mirror of
https://github.com/Relintai/pmlpp.git
synced 2024-11-08 13:12:09 +01:00
Added old versions for all remaining classes.
This commit is contained in:
parent
14c0cede56
commit
e8d0b13eed
12
SCsub
12
SCsub
@ -63,6 +63,18 @@ sources = [
|
||||
"mlpp/softmax_reg/softmax_reg_old.cpp",
|
||||
"mlpp/auto_encoder/auto_encoder_old.cpp",
|
||||
"mlpp/tanh_reg/tanh_reg_old.cpp",
|
||||
"mlpp/softmax_net/softmax_net_old.cpp",
|
||||
"mlpp/multinomial_nb/multinomial_nb_old.cpp",
|
||||
"mlpp/mann/mann_old.cpp",
|
||||
"mlpp/log_reg/log_reg_old.cpp",
|
||||
"mlpp/lin_reg/lin_reg_old.cpp",
|
||||
"mlpp/gaussian_nb/gaussian_nb_old.cpp",
|
||||
"mlpp/gan/gan_old.cpp",
|
||||
"mlpp/exp_reg/exp_reg_old.cpp",
|
||||
"mlpp/dual_svc/dual_svc_old.cpp",
|
||||
"mlpp/c_log_log_reg/c_log_log_reg_old.cpp",
|
||||
"mlpp/bernoulli_nb/bernoulli_nb_old.cpp",
|
||||
"mlpp/ann/ann_old.cpp",
|
||||
|
||||
"test/mlpp_tests.cpp",
|
||||
]
|
||||
|
818
mlpp/ann/ann_old.cpp
Normal file
818
mlpp/ann/ann_old.cpp
Normal file
@ -0,0 +1,818 @@
|
||||
//
|
||||
// ANN.cpp
|
||||
//
|
||||
// Created by Marc Melikyan on 11/4/20.
|
||||
//
|
||||
|
||||
#include "ann_old.h"
|
||||
#include "../activation/activation.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
#include "../utilities/utilities.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
MLPPANNOld::MLPPANNOld(std::vector<std::vector<real_t>> p_inputSet, std::vector<real_t> p_outputSet) {
|
||||
inputSet = p_inputSet;
|
||||
outputSet = p_outputSet;
|
||||
|
||||
n = inputSet.size();
|
||||
k = inputSet[0].size();
|
||||
lrScheduler = "None";
|
||||
decayConstant = 0;
|
||||
dropRate = 0;
|
||||
}
|
||||
|
||||
MLPPANNOld::~MLPPANNOld() {
|
||||
delete outputLayer;
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPANNOld::modelSetTest(std::vector<std::vector<real_t>> X) {
|
||||
if (!network.empty()) {
|
||||
network[0].input = X;
|
||||
network[0].forwardPass();
|
||||
|
||||
for (uint32_t i = 1; i < network.size(); i++) {
|
||||
network[i].input = network[i - 1].a;
|
||||
network[i].forwardPass();
|
||||
}
|
||||
outputLayer->input = network[network.size() - 1].a;
|
||||
} else {
|
||||
outputLayer->input = X;
|
||||
}
|
||||
outputLayer->forwardPass();
|
||||
return outputLayer->a;
|
||||
}
|
||||
|
||||
real_t MLPPANNOld::modelTest(std::vector<real_t> x) {
|
||||
if (!network.empty()) {
|
||||
network[0].Test(x);
|
||||
for (uint32_t i = 1; i < network.size(); i++) {
|
||||
network[i].Test(network[i - 1].a_test);
|
||||
}
|
||||
outputLayer->Test(network[network.size() - 1].a_test);
|
||||
} else {
|
||||
outputLayer->Test(x);
|
||||
}
|
||||
return outputLayer->a_test;
|
||||
}
|
||||
|
||||
void MLPPANNOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
real_t initial_learning_rate = learning_rate;
|
||||
|
||||
alg.printMatrix(network[network.size() - 1].weights);
|
||||
while (true) {
|
||||
learning_rate = applyLearningRateScheduler(initial_learning_rate, decayConstant, epoch, dropRate);
|
||||
cost_prev = Cost(y_hat, outputSet);
|
||||
|
||||
auto grads = computeGradients(y_hat, outputSet);
|
||||
auto cumulativeHiddenLayerWGrad = std::get<0>(grads);
|
||||
auto outputWGrad = std::get<1>(grads);
|
||||
|
||||
cumulativeHiddenLayerWGrad = alg.scalarMultiply(learning_rate / n, cumulativeHiddenLayerWGrad);
|
||||
outputWGrad = alg.scalarMultiply(learning_rate / n, outputWGrad);
|
||||
updateParameters(cumulativeHiddenLayerWGrad, outputWGrad, learning_rate); // subject to change. may want bias to have this matrix too.
|
||||
|
||||
std::cout << learning_rate << std::endl;
|
||||
|
||||
forwardPass();
|
||||
|
||||
if (UI) {
|
||||
MLPPANNOld::UI(epoch, cost_prev, y_hat, outputSet);
|
||||
}
|
||||
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPANNOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
real_t initial_learning_rate = learning_rate;
|
||||
|
||||
while (true) {
|
||||
learning_rate = applyLearningRateScheduler(initial_learning_rate, decayConstant, epoch, dropRate);
|
||||
|
||||
std::random_device rd;
|
||||
std::default_random_engine generator(rd());
|
||||
std::uniform_int_distribution<int> distribution(0, int(n - 1));
|
||||
int outputIndex = distribution(generator);
|
||||
|
||||
std::vector<real_t> y_hat = modelSetTest({ inputSet[outputIndex] });
|
||||
cost_prev = Cost({ y_hat }, { outputSet[outputIndex] });
|
||||
|
||||
auto grads = computeGradients(y_hat, { outputSet[outputIndex] });
|
||||
auto cumulativeHiddenLayerWGrad = std::get<0>(grads);
|
||||
auto outputWGrad = std::get<1>(grads);
|
||||
|
||||
cumulativeHiddenLayerWGrad = alg.scalarMultiply(learning_rate / n, cumulativeHiddenLayerWGrad);
|
||||
outputWGrad = alg.scalarMultiply(learning_rate / n, outputWGrad);
|
||||
|
||||
updateParameters(cumulativeHiddenLayerWGrad, outputWGrad, learning_rate); // subject to change. may want bias to have this matrix too.
|
||||
y_hat = modelSetTest({ inputSet[outputIndex] });
|
||||
|
||||
if (UI) {
|
||||
MLPPANNOld::UI(epoch, cost_prev, y_hat, { outputSet[outputIndex] });
|
||||
}
|
||||
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPANNOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
real_t initial_learning_rate = learning_rate;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
// always evaluate the result
|
||||
// always do forward pass only ONCE at end.
|
||||
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
while (true) {
|
||||
learning_rate = applyLearningRateScheduler(initial_learning_rate, decayConstant, epoch, dropRate);
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = modelSetTest(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
auto grads = computeGradients(y_hat, outputMiniBatches[i]);
|
||||
auto cumulativeHiddenLayerWGrad = std::get<0>(grads);
|
||||
auto outputWGrad = std::get<1>(grads);
|
||||
|
||||
cumulativeHiddenLayerWGrad = alg.scalarMultiply(learning_rate / n, cumulativeHiddenLayerWGrad);
|
||||
outputWGrad = alg.scalarMultiply(learning_rate / n, outputWGrad);
|
||||
|
||||
updateParameters(cumulativeHiddenLayerWGrad, outputWGrad, learning_rate); // subject to change. may want bias to have this matrix too.
|
||||
y_hat = modelSetTest(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPANNOld::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPANNOld::Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool NAG, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
real_t initial_learning_rate = learning_rate;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
// always evaluate the result
|
||||
// always do forward pass only ONCE at end.
|
||||
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
// Initializing necessary components for Adam.
|
||||
std::vector<std::vector<std::vector<real_t>>> v_hidden;
|
||||
|
||||
std::vector<real_t> v_output;
|
||||
while (true) {
|
||||
learning_rate = applyLearningRateScheduler(initial_learning_rate, decayConstant, epoch, dropRate);
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = modelSetTest(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
auto grads = computeGradients(y_hat, outputMiniBatches[i]);
|
||||
auto cumulativeHiddenLayerWGrad = std::get<0>(grads);
|
||||
auto outputWGrad = std::get<1>(grads);
|
||||
|
||||
if (!network.empty() && v_hidden.empty()) { // Initing our tensor
|
||||
v_hidden = alg.resize(v_hidden, cumulativeHiddenLayerWGrad);
|
||||
}
|
||||
|
||||
if (v_output.empty()) {
|
||||
v_output.resize(outputWGrad.size());
|
||||
}
|
||||
|
||||
if (NAG) { // "Aposterori" calculation
|
||||
updateParameters(v_hidden, v_output, 0); // DON'T update bias.
|
||||
}
|
||||
|
||||
v_hidden = alg.addition(alg.scalarMultiply(gamma, v_hidden), alg.scalarMultiply(learning_rate / n, cumulativeHiddenLayerWGrad));
|
||||
|
||||
v_output = alg.addition(alg.scalarMultiply(gamma, v_output), alg.scalarMultiply(learning_rate / n, outputWGrad));
|
||||
|
||||
updateParameters(v_hidden, v_output, learning_rate); // subject to change. may want bias to have this matrix too.
|
||||
y_hat = modelSetTest(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPANNOld::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPANNOld::Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
real_t initial_learning_rate = learning_rate;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
// always evaluate the result
|
||||
// always do forward pass only ONCE at end.
|
||||
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
// Initializing necessary components for Adam.
|
||||
std::vector<std::vector<std::vector<real_t>>> v_hidden;
|
||||
|
||||
std::vector<real_t> v_output;
|
||||
while (true) {
|
||||
learning_rate = applyLearningRateScheduler(initial_learning_rate, decayConstant, epoch, dropRate);
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = modelSetTest(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
auto grads = computeGradients(y_hat, outputMiniBatches[i]);
|
||||
auto cumulativeHiddenLayerWGrad = std::get<0>(grads);
|
||||
auto outputWGrad = std::get<1>(grads);
|
||||
|
||||
if (!network.empty() && v_hidden.empty()) { // Initing our tensor
|
||||
v_hidden = alg.resize(v_hidden, cumulativeHiddenLayerWGrad);
|
||||
}
|
||||
|
||||
if (v_output.empty()) {
|
||||
v_output.resize(outputWGrad.size());
|
||||
}
|
||||
|
||||
v_hidden = alg.addition(v_hidden, alg.exponentiate(cumulativeHiddenLayerWGrad, 2));
|
||||
|
||||
v_output = alg.addition(v_output, alg.exponentiate(outputWGrad, 2));
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations = alg.scalarMultiply(learning_rate / n, alg.elementWiseDivision(cumulativeHiddenLayerWGrad, alg.scalarAdd(e, alg.sqrt(v_hidden))));
|
||||
std::vector<real_t> outputLayerUpdation = alg.scalarMultiply(learning_rate / n, alg.elementWiseDivision(outputWGrad, alg.scalarAdd(e, alg.sqrt(v_output))));
|
||||
|
||||
updateParameters(hiddenLayerUpdations, outputLayerUpdation, learning_rate); // subject to change. may want bias to have this matrix too.
|
||||
y_hat = modelSetTest(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPANNOld::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPANNOld::Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
real_t initial_learning_rate = learning_rate;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
// always evaluate the result
|
||||
// always do forward pass only ONCE at end.
|
||||
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
// Initializing necessary components for Adam.
|
||||
std::vector<std::vector<std::vector<real_t>>> v_hidden;
|
||||
|
||||
std::vector<real_t> v_output;
|
||||
while (true) {
|
||||
learning_rate = applyLearningRateScheduler(initial_learning_rate, decayConstant, epoch, dropRate);
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = modelSetTest(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
auto grads = computeGradients(y_hat, outputMiniBatches[i]);
|
||||
auto cumulativeHiddenLayerWGrad = std::get<0>(grads);
|
||||
auto outputWGrad = std::get<1>(grads);
|
||||
|
||||
if (!network.empty() && v_hidden.empty()) { // Initing our tensor
|
||||
v_hidden = alg.resize(v_hidden, cumulativeHiddenLayerWGrad);
|
||||
}
|
||||
|
||||
if (v_output.empty()) {
|
||||
v_output.resize(outputWGrad.size());
|
||||
}
|
||||
|
||||
v_hidden = alg.addition(alg.scalarMultiply(1 - b1, v_hidden), alg.scalarMultiply(b1, alg.exponentiate(cumulativeHiddenLayerWGrad, 2)));
|
||||
|
||||
v_output = alg.addition(v_output, alg.exponentiate(outputWGrad, 2));
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations = alg.scalarMultiply(learning_rate / n, alg.elementWiseDivision(cumulativeHiddenLayerWGrad, alg.scalarAdd(e, alg.sqrt(v_hidden))));
|
||||
std::vector<real_t> outputLayerUpdation = alg.scalarMultiply(learning_rate / n, alg.elementWiseDivision(outputWGrad, alg.scalarAdd(e, alg.sqrt(v_output))));
|
||||
|
||||
updateParameters(hiddenLayerUpdations, outputLayerUpdation, learning_rate); // subject to change. may want bias to have this matrix too.
|
||||
y_hat = modelSetTest(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPANNOld::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPANNOld::Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
real_t initial_learning_rate = learning_rate;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
// always evaluate the result
|
||||
// always do forward pass only ONCE at end.
|
||||
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
// Initializing necessary components for Adam.
|
||||
std::vector<std::vector<std::vector<real_t>>> m_hidden;
|
||||
std::vector<std::vector<std::vector<real_t>>> v_hidden;
|
||||
|
||||
std::vector<real_t> m_output;
|
||||
std::vector<real_t> v_output;
|
||||
while (true) {
|
||||
learning_rate = applyLearningRateScheduler(initial_learning_rate, decayConstant, epoch, dropRate);
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = modelSetTest(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
auto grads = computeGradients(y_hat, outputMiniBatches[i]);
|
||||
auto cumulativeHiddenLayerWGrad = std::get<0>(grads);
|
||||
auto outputWGrad = std::get<1>(grads);
|
||||
|
||||
if (!network.empty() && m_hidden.empty() && v_hidden.empty()) { // Initing our tensor
|
||||
m_hidden = alg.resize(m_hidden, cumulativeHiddenLayerWGrad);
|
||||
v_hidden = alg.resize(v_hidden, cumulativeHiddenLayerWGrad);
|
||||
}
|
||||
|
||||
if (m_output.empty() && v_output.empty()) {
|
||||
m_output.resize(outputWGrad.size());
|
||||
v_output.resize(outputWGrad.size());
|
||||
}
|
||||
|
||||
m_hidden = alg.addition(alg.scalarMultiply(b1, m_hidden), alg.scalarMultiply(1 - b1, cumulativeHiddenLayerWGrad));
|
||||
v_hidden = alg.addition(alg.scalarMultiply(b2, v_hidden), alg.scalarMultiply(1 - b2, alg.exponentiate(cumulativeHiddenLayerWGrad, 2)));
|
||||
|
||||
m_output = alg.addition(alg.scalarMultiply(b1, m_output), alg.scalarMultiply(1 - b1, outputWGrad));
|
||||
v_output = alg.addition(alg.scalarMultiply(b2, v_output), alg.scalarMultiply(1 - b2, alg.exponentiate(outputWGrad, 2)));
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> m_hidden_hat = alg.scalarMultiply(1 / (1 - std::pow(b1, epoch)), m_hidden);
|
||||
std::vector<std::vector<std::vector<real_t>>> v_hidden_hat = alg.scalarMultiply(1 / (1 - std::pow(b2, epoch)), v_hidden);
|
||||
|
||||
std::vector<real_t> m_output_hat = alg.scalarMultiply(1 / (1 - std::pow(b1, epoch)), m_output);
|
||||
std::vector<real_t> v_output_hat = alg.scalarMultiply(1 / (1 - std::pow(b2, epoch)), v_output);
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations = alg.scalarMultiply(learning_rate / n, alg.elementWiseDivision(m_hidden_hat, alg.scalarAdd(e, alg.sqrt(v_hidden_hat))));
|
||||
std::vector<real_t> outputLayerUpdation = alg.scalarMultiply(learning_rate / n, alg.elementWiseDivision(m_output_hat, alg.scalarAdd(e, alg.sqrt(v_output_hat))));
|
||||
|
||||
updateParameters(hiddenLayerUpdations, outputLayerUpdation, learning_rate); // subject to change. may want bias to have this matrix too.
|
||||
y_hat = modelSetTest(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPANNOld::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPANNOld::Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
real_t initial_learning_rate = learning_rate;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
// always evaluate the result
|
||||
// always do forward pass only ONCE at end.
|
||||
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
// Initializing necessary components for Adam.
|
||||
std::vector<std::vector<std::vector<real_t>>> m_hidden;
|
||||
std::vector<std::vector<std::vector<real_t>>> u_hidden;
|
||||
|
||||
std::vector<real_t> m_output;
|
||||
std::vector<real_t> u_output;
|
||||
while (true) {
|
||||
learning_rate = applyLearningRateScheduler(initial_learning_rate, decayConstant, epoch, dropRate);
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = modelSetTest(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
auto grads = computeGradients(y_hat, outputMiniBatches[i]);
|
||||
auto cumulativeHiddenLayerWGrad = std::get<0>(grads);
|
||||
auto outputWGrad = std::get<1>(grads);
|
||||
|
||||
if (!network.empty() && m_hidden.empty() && u_hidden.empty()) { // Initing our tensor
|
||||
m_hidden = alg.resize(m_hidden, cumulativeHiddenLayerWGrad);
|
||||
u_hidden = alg.resize(u_hidden, cumulativeHiddenLayerWGrad);
|
||||
}
|
||||
|
||||
if (m_output.empty() && u_output.empty()) {
|
||||
m_output.resize(outputWGrad.size());
|
||||
u_output.resize(outputWGrad.size());
|
||||
}
|
||||
|
||||
m_hidden = alg.addition(alg.scalarMultiply(b1, m_hidden), alg.scalarMultiply(1 - b1, cumulativeHiddenLayerWGrad));
|
||||
u_hidden = alg.max(alg.scalarMultiply(b2, u_hidden), alg.abs(cumulativeHiddenLayerWGrad));
|
||||
|
||||
m_output = alg.addition(alg.scalarMultiply(b1, m_output), alg.scalarMultiply(1 - b1, outputWGrad));
|
||||
u_output = alg.max(alg.scalarMultiply(b2, u_output), alg.abs(outputWGrad));
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> m_hidden_hat = alg.scalarMultiply(1 / (1 - std::pow(b1, epoch)), m_hidden);
|
||||
|
||||
std::vector<real_t> m_output_hat = alg.scalarMultiply(1 / (1 - std::pow(b1, epoch)), m_output);
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations = alg.scalarMultiply(learning_rate / n, alg.elementWiseDivision(m_hidden_hat, alg.scalarAdd(e, u_hidden)));
|
||||
std::vector<real_t> outputLayerUpdation = alg.scalarMultiply(learning_rate / n, alg.elementWiseDivision(m_output_hat, alg.scalarAdd(e, u_output)));
|
||||
|
||||
updateParameters(hiddenLayerUpdations, outputLayerUpdation, learning_rate); // subject to change. may want bias to have this matrix too.
|
||||
y_hat = modelSetTest(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPANNOld::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPANNOld::Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
real_t initial_learning_rate = learning_rate;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
// always evaluate the result
|
||||
// always do forward pass only ONCE at end.
|
||||
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
// Initializing necessary components for Adam.
|
||||
std::vector<std::vector<std::vector<real_t>>> m_hidden;
|
||||
std::vector<std::vector<std::vector<real_t>>> v_hidden;
|
||||
|
||||
std::vector<real_t> m_output;
|
||||
std::vector<real_t> v_output;
|
||||
while (true) {
|
||||
learning_rate = applyLearningRateScheduler(initial_learning_rate, decayConstant, epoch, dropRate);
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = modelSetTest(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
auto grads = computeGradients(y_hat, outputMiniBatches[i]);
|
||||
auto cumulativeHiddenLayerWGrad = std::get<0>(grads);
|
||||
auto outputWGrad = std::get<1>(grads);
|
||||
|
||||
if (!network.empty() && m_hidden.empty() && v_hidden.empty()) { // Initing our tensor
|
||||
m_hidden = alg.resize(m_hidden, cumulativeHiddenLayerWGrad);
|
||||
v_hidden = alg.resize(v_hidden, cumulativeHiddenLayerWGrad);
|
||||
}
|
||||
|
||||
if (m_output.empty() && v_output.empty()) {
|
||||
m_output.resize(outputWGrad.size());
|
||||
v_output.resize(outputWGrad.size());
|
||||
}
|
||||
|
||||
m_hidden = alg.addition(alg.scalarMultiply(b1, m_hidden), alg.scalarMultiply(1 - b1, cumulativeHiddenLayerWGrad));
|
||||
v_hidden = alg.addition(alg.scalarMultiply(b2, v_hidden), alg.scalarMultiply(1 - b2, alg.exponentiate(cumulativeHiddenLayerWGrad, 2)));
|
||||
|
||||
m_output = alg.addition(alg.scalarMultiply(b1, m_output), alg.scalarMultiply(1 - b1, outputWGrad));
|
||||
v_output = alg.addition(alg.scalarMultiply(b2, v_output), alg.scalarMultiply(1 - b2, alg.exponentiate(outputWGrad, 2)));
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> m_hidden_hat = alg.scalarMultiply(1 / (1 - std::pow(b1, epoch)), m_hidden);
|
||||
std::vector<std::vector<std::vector<real_t>>> v_hidden_hat = alg.scalarMultiply(1 / (1 - std::pow(b2, epoch)), v_hidden);
|
||||
std::vector<std::vector<std::vector<real_t>>> m_hidden_final = alg.addition(alg.scalarMultiply(b1, m_hidden_hat), alg.scalarMultiply((1 - b1) / (1 - std::pow(b1, epoch)), cumulativeHiddenLayerWGrad));
|
||||
|
||||
std::vector<real_t> m_output_hat = alg.scalarMultiply(1 / (1 - std::pow(b1, epoch)), m_output);
|
||||
std::vector<real_t> v_output_hat = alg.scalarMultiply(1 / (1 - std::pow(b2, epoch)), v_output);
|
||||
std::vector<real_t> m_output_final = alg.addition(alg.scalarMultiply(b1, m_output_hat), alg.scalarMultiply((1 - b1) / (1 - std::pow(b1, epoch)), outputWGrad));
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations = alg.scalarMultiply(learning_rate / n, alg.elementWiseDivision(m_hidden_final, alg.scalarAdd(e, alg.sqrt(v_hidden_hat))));
|
||||
std::vector<real_t> outputLayerUpdation = alg.scalarMultiply(learning_rate / n, alg.elementWiseDivision(m_output_final, alg.scalarAdd(e, alg.sqrt(v_output_hat))));
|
||||
|
||||
updateParameters(hiddenLayerUpdations, outputLayerUpdation, learning_rate); // subject to change. may want bias to have this matrix too.
|
||||
y_hat = modelSetTest(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPANNOld::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPANNOld::AMSGrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
real_t initial_learning_rate = learning_rate;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
// always evaluate the result
|
||||
// always do forward pass only ONCE at end.
|
||||
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
// Initializing necessary components for Adam.
|
||||
std::vector<std::vector<std::vector<real_t>>> m_hidden;
|
||||
std::vector<std::vector<std::vector<real_t>>> v_hidden;
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> v_hidden_hat;
|
||||
|
||||
std::vector<real_t> m_output;
|
||||
std::vector<real_t> v_output;
|
||||
|
||||
std::vector<real_t> v_output_hat;
|
||||
while (true) {
|
||||
learning_rate = applyLearningRateScheduler(initial_learning_rate, decayConstant, epoch, dropRate);
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = modelSetTest(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
auto grads = computeGradients(y_hat, outputMiniBatches[i]);
|
||||
auto cumulativeHiddenLayerWGrad = std::get<0>(grads);
|
||||
auto outputWGrad = std::get<1>(grads);
|
||||
|
||||
if (!network.empty() && m_hidden.empty() && v_hidden.empty()) { // Initing our tensor
|
||||
m_hidden = alg.resize(m_hidden, cumulativeHiddenLayerWGrad);
|
||||
v_hidden = alg.resize(v_hidden, cumulativeHiddenLayerWGrad);
|
||||
v_hidden_hat = alg.resize(v_hidden_hat, cumulativeHiddenLayerWGrad);
|
||||
}
|
||||
|
||||
if (m_output.empty() && v_output.empty()) {
|
||||
m_output.resize(outputWGrad.size());
|
||||
v_output.resize(outputWGrad.size());
|
||||
v_output_hat.resize(outputWGrad.size());
|
||||
}
|
||||
|
||||
m_hidden = alg.addition(alg.scalarMultiply(b1, m_hidden), alg.scalarMultiply(1 - b1, cumulativeHiddenLayerWGrad));
|
||||
v_hidden = alg.addition(alg.scalarMultiply(b2, v_hidden), alg.scalarMultiply(1 - b2, alg.exponentiate(cumulativeHiddenLayerWGrad, 2)));
|
||||
|
||||
m_output = alg.addition(alg.scalarMultiply(b1, m_output), alg.scalarMultiply(1 - b1, outputWGrad));
|
||||
v_output = alg.addition(alg.scalarMultiply(b2, v_output), alg.scalarMultiply(1 - b2, alg.exponentiate(outputWGrad, 2)));
|
||||
|
||||
v_hidden_hat = alg.max(v_hidden_hat, v_hidden);
|
||||
|
||||
v_output_hat = alg.max(v_output_hat, v_output);
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations = alg.scalarMultiply(learning_rate / n, alg.elementWiseDivision(m_hidden, alg.scalarAdd(e, alg.sqrt(v_hidden_hat))));
|
||||
std::vector<real_t> outputLayerUpdation = alg.scalarMultiply(learning_rate / n, alg.elementWiseDivision(m_output, alg.scalarAdd(e, alg.sqrt(v_output_hat))));
|
||||
|
||||
updateParameters(hiddenLayerUpdations, outputLayerUpdation, learning_rate); // subject to change. may want bias to have this matrix too.
|
||||
y_hat = modelSetTest(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPANNOld::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
real_t MLPPANNOld::score() {
|
||||
MLPPUtilities util;
|
||||
forwardPass();
|
||||
return util.performance(y_hat, outputSet);
|
||||
}
|
||||
|
||||
void MLPPANNOld::save(std::string fileName) {
|
||||
MLPPUtilities util;
|
||||
if (!network.empty()) {
|
||||
util.saveParameters(fileName, network[0].weights, network[0].bias, false, 1);
|
||||
for (uint32_t i = 1; i < network.size(); i++) {
|
||||
util.saveParameters(fileName, network[i].weights, network[i].bias, true, i + 1);
|
||||
}
|
||||
util.saveParameters(fileName, outputLayer->weights, outputLayer->bias, true, network.size() + 1);
|
||||
} else {
|
||||
util.saveParameters(fileName, outputLayer->weights, outputLayer->bias, false, network.size() + 1);
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPANNOld::setLearningRateScheduler(std::string type, real_t decayConstant) {
|
||||
lrScheduler = type;
|
||||
MLPPANNOld::decayConstant = decayConstant;
|
||||
}
|
||||
|
||||
void MLPPANNOld::setLearningRateScheduler(std::string type, real_t decayConstant, real_t dropRate) {
|
||||
lrScheduler = type;
|
||||
MLPPANNOld::decayConstant = decayConstant;
|
||||
MLPPANNOld::dropRate = dropRate;
|
||||
}
|
||||
|
||||
// https://en.wikipedia.org/wiki/Learning_rate
|
||||
// Learning Rate Decay (C2W2L09) - Andrew Ng - Deep Learning Specialization
|
||||
real_t MLPPANNOld::applyLearningRateScheduler(real_t learningRate, real_t decayConstant, real_t epoch, real_t dropRate) {
|
||||
if (lrScheduler == "Time") {
|
||||
return learningRate / (1 + decayConstant * epoch);
|
||||
} else if (lrScheduler == "Epoch") {
|
||||
return learningRate * (decayConstant / std::sqrt(epoch));
|
||||
} else if (lrScheduler == "Step") {
|
||||
return learningRate * std::pow(decayConstant, int((1 + epoch) / dropRate)); // Utilizing an explicit int conversion implicitly takes the floor.
|
||||
} else if (lrScheduler == "Exponential") {
|
||||
return learningRate * std::exp(-decayConstant * epoch);
|
||||
}
|
||||
return learningRate;
|
||||
}
|
||||
|
||||
void MLPPANNOld::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
||||
if (network.empty()) {
|
||||
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
|
||||
network[0].forwardPass();
|
||||
} else {
|
||||
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
||||
network[network.size() - 1].forwardPass();
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPANNOld::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
||||
if (!network.empty()) {
|
||||
outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
|
||||
} else {
|
||||
outputLayer = new MLPPOldOutputLayer(k, activation, loss, inputSet, weightInit, reg, lambda, alpha);
|
||||
}
|
||||
}
|
||||
|
||||
real_t MLPPANNOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
|
||||
MLPPReg regularization;
|
||||
class MLPPCost cost;
|
||||
real_t totalRegTerm = 0;
|
||||
|
||||
auto cost_function = outputLayer->cost_map[outputLayer->cost];
|
||||
if (!network.empty()) {
|
||||
for (uint32_t i = 0; i < network.size() - 1; i++) {
|
||||
totalRegTerm += regularization.regTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg);
|
||||
}
|
||||
}
|
||||
return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg);
|
||||
}
|
||||
|
||||
void MLPPANNOld::forwardPass() {
|
||||
if (!network.empty()) {
|
||||
network[0].input = inputSet;
|
||||
network[0].forwardPass();
|
||||
|
||||
for (uint32_t i = 1; i < network.size(); i++) {
|
||||
network[i].input = network[i - 1].a;
|
||||
network[i].forwardPass();
|
||||
}
|
||||
outputLayer->input = network[network.size() - 1].a;
|
||||
} else {
|
||||
outputLayer->input = inputSet;
|
||||
}
|
||||
outputLayer->forwardPass();
|
||||
y_hat = outputLayer->a;
|
||||
}
|
||||
|
||||
void MLPPANNOld::updateParameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, std::vector<real_t> outputLayerUpdation, real_t learning_rate) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation);
|
||||
outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n;
|
||||
|
||||
if (!network.empty()) {
|
||||
network[network.size() - 1].weights = alg.subtraction(network[network.size() - 1].weights, hiddenLayerUpdations[0]);
|
||||
network[network.size() - 1].bias = alg.subtractMatrixRows(network[network.size() - 1].bias, alg.scalarMultiply(learning_rate / n, network[network.size() - 1].delta));
|
||||
|
||||
for (int i = network.size() - 2; i >= 0; i--) {
|
||||
network[i].weights = alg.subtraction(network[i].weights, hiddenLayerUpdations[(network.size() - 2) - i + 1]);
|
||||
network[i].bias = alg.subtractMatrixRows(network[i].bias, alg.scalarMultiply(learning_rate / n, network[i].delta));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> MLPPANNOld::computeGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) {
|
||||
// std::cout << "BEGIN" << std::endl;
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.
|
||||
|
||||
auto costDeriv = outputLayer->costDeriv_map[outputLayer->cost];
|
||||
auto outputAvn = outputLayer->activation_map[outputLayer->activation];
|
||||
outputLayer->delta = alg.hadamard_product((cost.*costDeriv)(y_hat, outputSet), (avn.*outputAvn)(outputLayer->z, 1));
|
||||
std::vector<real_t> outputWGrad = alg.mat_vec_mult(alg.transpose(outputLayer->input), outputLayer->delta);
|
||||
outputWGrad = alg.addition(outputWGrad, regularization.regDerivTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg));
|
||||
|
||||
if (!network.empty()) {
|
||||
auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation];
|
||||
network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1));
|
||||
std::vector<std::vector<real_t>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta);
|
||||
|
||||
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[network.size() - 1].weights, network[network.size() - 1].lambda, network[network.size() - 1].alpha, network[network.size() - 1].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
||||
|
||||
for (int i = network.size() - 2; i >= 0; i--) {
|
||||
hiddenLayerAvn = network[i].activation_map[network[i].activation];
|
||||
network[i].delta = alg.hadamard_product(alg.matmult(network[i + 1].delta, alg.transpose(network[i + 1].weights)), (avn.*hiddenLayerAvn)(network[i].z, 1));
|
||||
hiddenLayerWGrad = alg.matmult(alg.transpose(network[i].input), network[i].delta);
|
||||
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
||||
}
|
||||
}
|
||||
return { cumulativeHiddenLayerWGrad, outputWGrad };
|
||||
}
|
||||
|
||||
void MLPPANNOld::UI(int epoch, real_t cost_prev, std::vector<real_t> y_hat, std::vector<real_t> outputSet) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
|
||||
std::cout << "Layer " << network.size() + 1 << ": " << std::endl;
|
||||
MLPPUtilities::UI(outputLayer->weights, outputLayer->bias);
|
||||
if (!network.empty()) {
|
||||
for (int i = network.size() - 1; i >= 0; i--) {
|
||||
std::cout << "Layer " << i + 1 << ": " << std::endl;
|
||||
MLPPUtilities::UI(network[i].weights, network[i].bias);
|
||||
}
|
||||
}
|
||||
}
|
73
mlpp/ann/ann_old.h
Normal file
73
mlpp/ann/ann_old.h
Normal file
@ -0,0 +1,73 @@
|
||||
#ifndef MLPP_ANN_OLD_H
|
||||
#define MLPP_ANN_OLD_H
|
||||
|
||||
//
|
||||
// ANN.hpp
|
||||
//
|
||||
// Created by Marc Melikyan on 11/4/20.
|
||||
//
|
||||
|
||||
#include "core/math/math_defs.h"
|
||||
|
||||
#include "../hidden_layer/hidden_layer.h"
|
||||
#include "../output_layer/output_layer.h"
|
||||
|
||||
#include "../hidden_layer/hidden_layer_old.h"
|
||||
#include "../output_layer/output_layer_old.h"
|
||||
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
class MLPPANNOld {
|
||||
public:
|
||||
MLPPANNOld(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet);
|
||||
~MLPPANNOld();
|
||||
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
|
||||
real_t modelTest(std::vector<real_t> x);
|
||||
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
|
||||
void Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool NAG, bool UI = false);
|
||||
void Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI = false);
|
||||
void Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI = false);
|
||||
void Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false);
|
||||
void Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false);
|
||||
void Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false);
|
||||
void AMSGrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false);
|
||||
real_t score();
|
||||
void save(std::string fileName);
|
||||
|
||||
void setLearningRateScheduler(std::string type, real_t decayConstant);
|
||||
void setLearningRateScheduler(std::string type, real_t decayConstant, real_t dropRate);
|
||||
|
||||
void addLayer(int n_hidden, std::string activation, std::string weightInit = "Default", std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
|
||||
void addOutputLayer(std::string activation, std::string loss, std::string weightInit = "Default", std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
|
||||
|
||||
private:
|
||||
real_t applyLearningRateScheduler(real_t learningRate, real_t decayConstant, real_t epoch, real_t dropRate);
|
||||
|
||||
real_t Cost(std::vector<real_t> y_hat, std::vector<real_t> y);
|
||||
|
||||
void forwardPass();
|
||||
void updateParameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, std::vector<real_t> outputLayerUpdation, real_t learning_rate);
|
||||
std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> computeGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet);
|
||||
|
||||
void UI(int epoch, real_t cost_prev, std::vector<real_t> y_hat, std::vector<real_t> outputSet);
|
||||
|
||||
std::vector<std::vector<real_t>> inputSet;
|
||||
std::vector<real_t> outputSet;
|
||||
std::vector<real_t> y_hat;
|
||||
|
||||
std::vector<MLPPOldHiddenLayer> network;
|
||||
MLPPOldOutputLayer *outputLayer;
|
||||
|
||||
int n;
|
||||
int k;
|
||||
|
||||
std::string lrScheduler;
|
||||
real_t decayConstant;
|
||||
real_t dropRate;
|
||||
};
|
||||
|
||||
#endif /* ANN_hpp */
|
179
mlpp/bernoulli_nb/bernoulli_nb_old.cpp
Normal file
179
mlpp/bernoulli_nb/bernoulli_nb_old.cpp
Normal file
@ -0,0 +1,179 @@
|
||||
//
|
||||
// BernoulliNB.cpp
|
||||
//
|
||||
// Created by Marc Melikyan on 1/17/21.
|
||||
//
|
||||
|
||||
#include "bernoulli_nb_old.h"
|
||||
#include "../data/data.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../utilities/utilities.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
MLPPBernoulliNBOld::MLPPBernoulliNBOld(std::vector<std::vector<real_t>> p_inputSet, std::vector<real_t> p_outputSet) {
|
||||
inputSet = p_inputSet;
|
||||
outputSet = p_outputSet;
|
||||
class_num = 2;
|
||||
|
||||
y_hat.resize(outputSet.size());
|
||||
Evaluate();
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPBernoulliNBOld::modelSetTest(std::vector<std::vector<real_t>> X) {
|
||||
std::vector<real_t> y_hat;
|
||||
for (uint32_t i = 0; i < X.size(); i++) {
|
||||
y_hat.push_back(modelTest(X[i]));
|
||||
}
|
||||
return y_hat;
|
||||
}
|
||||
|
||||
real_t MLPPBernoulliNBOld::modelTest(std::vector<real_t> x) {
|
||||
real_t score_0 = 1;
|
||||
real_t score_1 = 1;
|
||||
|
||||
std::vector<int> foundIndices;
|
||||
|
||||
for (uint32_t j = 0; j < x.size(); j++) {
|
||||
for (uint32_t k = 0; k < vocab.size(); k++) {
|
||||
if (x[j] == vocab[k]) {
|
||||
score_0 *= theta[0][vocab[k]];
|
||||
score_1 *= theta[1][vocab[k]];
|
||||
|
||||
foundIndices.push_back(k);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < vocab.size(); i++) {
|
||||
bool found = false;
|
||||
for (uint32_t j = 0; j < foundIndices.size(); j++) {
|
||||
if (vocab[i] == vocab[foundIndices[j]]) {
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
score_0 *= 1 - theta[0][vocab[i]];
|
||||
score_1 *= 1 - theta[1][vocab[i]];
|
||||
}
|
||||
}
|
||||
|
||||
score_0 *= prior_0;
|
||||
score_1 *= prior_1;
|
||||
|
||||
// Assigning the traning example to a class
|
||||
|
||||
if (score_0 > score_1) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
real_t MLPPBernoulliNBOld::score() {
|
||||
MLPPUtilities util;
|
||||
return util.performance(y_hat, outputSet);
|
||||
}
|
||||
|
||||
void MLPPBernoulliNBOld::computeVocab() {
|
||||
MLPPLinAlg alg;
|
||||
MLPPData data;
|
||||
vocab = data.vecToSet<real_t>(alg.flatten(inputSet));
|
||||
}
|
||||
|
||||
void MLPPBernoulliNBOld::computeTheta() {
|
||||
// Resizing theta for the sake of ease & proper access of the elements.
|
||||
theta.resize(class_num);
|
||||
|
||||
// Setting all values in the hasmap by default to 0.
|
||||
for (int i = class_num - 1; i >= 0; i--) {
|
||||
for (uint32_t j = 0; j < vocab.size(); j++) {
|
||||
theta[i][vocab[j]] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < inputSet.size(); i++) {
|
||||
for (uint32_t j = 0; j < inputSet[0].size(); j++) {
|
||||
theta[outputSet[i]][inputSet[i][j]]++;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < theta.size(); i++) {
|
||||
for (uint32_t j = 0; j < theta[i].size(); j++) {
|
||||
if (i == 0) {
|
||||
theta[i][j] /= prior_0 * y_hat.size();
|
||||
} else {
|
||||
theta[i][j] /= prior_1 * y_hat.size();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPBernoulliNBOld::Evaluate() {
|
||||
for (uint32_t i = 0; i < outputSet.size(); i++) {
|
||||
// Pr(B | A) * Pr(A)
|
||||
real_t score_0 = 1;
|
||||
real_t score_1 = 1;
|
||||
|
||||
real_t sum = 0;
|
||||
for (uint32_t ii = 0; ii < outputSet.size(); ii++) {
|
||||
if (outputSet[ii] == 1) {
|
||||
sum += outputSet[ii];
|
||||
}
|
||||
}
|
||||
|
||||
// Easy computation of priors, i.e. Pr(C_k)
|
||||
prior_1 = sum / y_hat.size();
|
||||
prior_0 = 1 - prior_1;
|
||||
|
||||
// Evaluating Theta...
|
||||
computeTheta();
|
||||
|
||||
// Evaluating the vocab set...
|
||||
computeVocab();
|
||||
|
||||
std::vector<int> foundIndices;
|
||||
|
||||
for (uint32_t j = 0; j < inputSet.size(); j++) {
|
||||
for (uint32_t k = 0; k < vocab.size(); k++) {
|
||||
if (inputSet[i][j] == vocab[k]) {
|
||||
score_0 += std::log(theta[0][vocab[k]]);
|
||||
score_1 += std::log(theta[1][vocab[k]]);
|
||||
|
||||
foundIndices.push_back(k);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t ii = 0; ii < vocab.size(); ii++) {
|
||||
bool found = false;
|
||||
for (uint32_t j = 0; j < foundIndices.size(); j++) {
|
||||
if (vocab[ii] == vocab[foundIndices[j]]) {
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
score_0 += std::log(1 - theta[0][vocab[ii]]);
|
||||
score_1 += std::log(1 - theta[1][vocab[ii]]);
|
||||
}
|
||||
}
|
||||
|
||||
score_0 += std::log(prior_0);
|
||||
score_1 += std::log(prior_1);
|
||||
|
||||
score_0 = exp(score_0);
|
||||
score_1 = exp(score_1);
|
||||
|
||||
std::cout << score_0 << std::endl;
|
||||
std::cout << score_1 << std::endl;
|
||||
|
||||
// Assigning the traning example to a class
|
||||
|
||||
if (score_0 > score_1) {
|
||||
y_hat[i] = 0;
|
||||
} else {
|
||||
y_hat[i] = 1;
|
||||
}
|
||||
}
|
||||
}
|
42
mlpp/bernoulli_nb/bernoulli_nb_old.h
Normal file
42
mlpp/bernoulli_nb/bernoulli_nb_old.h
Normal file
@ -0,0 +1,42 @@
|
||||
|
||||
#ifndef MLPP_BERNOULLI_NB_OLD_H
|
||||
#define MLPP_BERNOULLI_NB_OLD_H
|
||||
|
||||
//
|
||||
// BernoulliNB.hpp
|
||||
//
|
||||
// Created by Marc Melikyan on 1/17/21.
|
||||
//
|
||||
|
||||
#include "core/math/math_defs.h"
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
class MLPPBernoulliNBOld {
|
||||
public:
|
||||
MLPPBernoulliNBOld(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet);
|
||||
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
|
||||
real_t modelTest(std::vector<real_t> x);
|
||||
real_t score();
|
||||
|
||||
private:
|
||||
void computeVocab();
|
||||
void computeTheta();
|
||||
void Evaluate();
|
||||
|
||||
// Model Params
|
||||
real_t prior_1 = 0;
|
||||
real_t prior_0 = 0;
|
||||
|
||||
std::vector<std::map<real_t, int>> theta;
|
||||
std::vector<real_t> vocab;
|
||||
int class_num;
|
||||
|
||||
// Datasets
|
||||
std::vector<std::vector<real_t>> inputSet;
|
||||
std::vector<real_t> outputSet;
|
||||
std::vector<real_t> y_hat;
|
||||
};
|
||||
|
||||
#endif /* BernoulliNB_hpp */
|
224
mlpp/c_log_log_reg/c_log_log_reg_old.cpp
Normal file
224
mlpp/c_log_log_reg/c_log_log_reg_old.cpp
Normal file
@ -0,0 +1,224 @@
|
||||
//
|
||||
// CLogLogReg.cpp
|
||||
//
|
||||
// Created by Marc Melikyan on 10/2/20.
|
||||
//
|
||||
|
||||
#include "c_log_log_reg_old.h"
|
||||
|
||||
#include "../activation/activation.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
#include "../utilities/utilities.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
MLPPCLogLogRegOld::MLPPCLogLogRegOld(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, std::string reg, real_t lambda, real_t alpha) :
|
||||
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
|
||||
y_hat.resize(n);
|
||||
weights = MLPPUtilities::weightInitialization(k);
|
||||
bias = MLPPUtilities::biasInitialization();
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPCLogLogRegOld::modelSetTest(std::vector<std::vector<real_t>> X) {
|
||||
return Evaluate(X);
|
||||
}
|
||||
|
||||
real_t MLPPCLogLogRegOld::modelTest(std::vector<real_t> x) {
|
||||
return Evaluate(x);
|
||||
}
|
||||
|
||||
void MLPPCLogLogRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
|
||||
while (true) {
|
||||
cost_prev = Cost(y_hat, outputSet);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputSet);
|
||||
|
||||
// Calculating the weight gradients
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate / n, alg.mat_vec_mult(alg.transpose(inputSet), alg.hadamard_product(error, avn.cloglog(z, 1)))));
|
||||
weights = regularization.regWeights(weights, lambda, alpha, reg);
|
||||
|
||||
// Calculating the bias gradients
|
||||
bias -= learning_rate * alg.sum_elements(alg.hadamard_product(error, avn.cloglog(z, 1))) / n;
|
||||
|
||||
forwardPass();
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
epoch++;
|
||||
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPCLogLogRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
|
||||
while (true) {
|
||||
cost_prev = Cost(y_hat, outputSet);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputSet);
|
||||
|
||||
weights = alg.addition(weights, alg.scalarMultiply(learning_rate / n, alg.mat_vec_mult(alg.transpose(inputSet), alg.hadamard_product(error, avn.cloglog(z, 1)))));
|
||||
weights = regularization.regWeights(weights, lambda, alpha, reg);
|
||||
|
||||
// Calculating the bias gradients
|
||||
bias += learning_rate * alg.sum_elements(alg.hadamard_product(error, avn.cloglog(z, 1))) / n;
|
||||
forwardPass();
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
epoch++;
|
||||
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPCLogLogRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
|
||||
while (true) {
|
||||
std::random_device rd;
|
||||
std::default_random_engine generator(rd());
|
||||
std::uniform_int_distribution<int> distribution(0, int(n - 1));
|
||||
int outputIndex = distribution(generator);
|
||||
|
||||
real_t y_hat = Evaluate(inputSet[outputIndex]);
|
||||
real_t z = propagate(inputSet[outputIndex]);
|
||||
cost_prev = Cost({ y_hat }, { outputSet[outputIndex] });
|
||||
|
||||
real_t error = y_hat - outputSet[outputIndex];
|
||||
|
||||
// Weight Updation
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate * error * exp(z - exp(z)), inputSet[outputIndex]));
|
||||
weights = regularization.regWeights(weights, lambda, alpha, reg);
|
||||
|
||||
// Bias updation
|
||||
bias -= learning_rate * error * exp(z - exp(z));
|
||||
|
||||
y_hat = Evaluate({ inputSet[outputIndex] });
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
epoch++;
|
||||
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPCLogLogRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
while (true) {
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
std::vector<real_t> z = propagate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]);
|
||||
|
||||
// Calculating the weight gradients
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate / n, alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), alg.hadamard_product(error, avn.cloglog(z, 1)))));
|
||||
weights = regularization.regWeights(weights, lambda, alpha, reg);
|
||||
|
||||
// Calculating the bias gradients
|
||||
bias -= learning_rate * alg.sum_elements(alg.hadamard_product(error, avn.cloglog(z, 1))) / n;
|
||||
|
||||
forwardPass();
|
||||
|
||||
y_hat = Evaluate(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
real_t MLPPCLogLogRegOld::score() {
|
||||
MLPPUtilities util;
|
||||
return util.performance(y_hat, outputSet);
|
||||
}
|
||||
|
||||
real_t MLPPCLogLogRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
|
||||
MLPPReg regularization;
|
||||
class MLPPCost cost;
|
||||
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPCLogLogRegOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPCLogLogRegOld::propagate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
|
||||
}
|
||||
|
||||
real_t MLPPCLogLogRegOld::Evaluate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
return avn.cloglog(alg.dot(weights, x) + bias);
|
||||
}
|
||||
|
||||
real_t MLPPCLogLogRegOld::propagate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
return alg.dot(weights, x) + bias;
|
||||
}
|
||||
|
||||
// cloglog ( wTx + b )
|
||||
void MLPPCLogLogRegOld::forwardPass() {
|
||||
MLPPActivation avn;
|
||||
|
||||
z = propagate(inputSet);
|
||||
y_hat = avn.cloglog(z);
|
||||
}
|
54
mlpp/c_log_log_reg/c_log_log_reg_old.h
Normal file
54
mlpp/c_log_log_reg/c_log_log_reg_old.h
Normal file
@ -0,0 +1,54 @@
|
||||
|
||||
#ifndef MLPP_C_LOG_LOG_REG_H
|
||||
#define MLPP_C_LOG_LOG_REG_H
|
||||
|
||||
//
|
||||
// CLogLogReg.hpp
|
||||
//
|
||||
// Created by Marc Melikyan on 10/2/20.
|
||||
//
|
||||
|
||||
#include "core/math/math_defs.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
class MLPPCLogLogRegOld {
|
||||
public:
|
||||
MLPPCLogLogRegOld(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
|
||||
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
|
||||
real_t modelTest(std::vector<real_t> x);
|
||||
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
void MLE(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
|
||||
real_t score();
|
||||
|
||||
private:
|
||||
void weightInitialization(int k);
|
||||
void biasInitialization();
|
||||
real_t Cost(std::vector<real_t> y_hat, std::vector<real_t> y);
|
||||
|
||||
std::vector<real_t> Evaluate(std::vector<std::vector<real_t>> X);
|
||||
std::vector<real_t> propagate(std::vector<std::vector<real_t>> X);
|
||||
real_t Evaluate(std::vector<real_t> x);
|
||||
real_t propagate(std::vector<real_t> x);
|
||||
void forwardPass();
|
||||
|
||||
std::vector<std::vector<real_t>> inputSet;
|
||||
std::vector<real_t> outputSet;
|
||||
std::vector<real_t> y_hat;
|
||||
std::vector<real_t> z;
|
||||
std::vector<real_t> weights;
|
||||
real_t bias;
|
||||
|
||||
int n;
|
||||
int k;
|
||||
|
||||
// Regularization Params
|
||||
std::string reg;
|
||||
real_t lambda;
|
||||
real_t alpha; /* This is the controlling param for Elastic Net*/
|
||||
};
|
||||
|
||||
#endif /* CLogLogReg_hpp */
|
246
mlpp/dual_svc/dual_svc_old.cpp
Normal file
246
mlpp/dual_svc/dual_svc_old.cpp
Normal file
@ -0,0 +1,246 @@
|
||||
//
|
||||
// DualSVC.cpp
|
||||
//
|
||||
// Created by Marc Melikyan on 10/2/20.
|
||||
//
|
||||
|
||||
#include "dual_svc_old.h"
|
||||
#include "../activation/activation.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
#include "../utilities/utilities.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
MLPPDualSVCOld::MLPPDualSVCOld(std::vector<std::vector<real_t>> p_inputSet, std::vector<real_t> p_outputSet, real_t p_C, std::string p_kernel) {
|
||||
inputSet = p_inputSet;
|
||||
outputSet = p_outputSet;
|
||||
n = p_inputSet.size();
|
||||
k = p_inputSet[0].size();
|
||||
C = p_C;
|
||||
kernel = p_kernel;
|
||||
|
||||
y_hat.resize(n);
|
||||
bias = MLPPUtilities::biasInitialization();
|
||||
alpha = MLPPUtilities::weightInitialization(n); // One alpha for all training examples, as per the lagrangian multipliers.
|
||||
K = kernelFunction(inputSet, inputSet, kernel); // For now this is unused. When non-linear kernels are added, the K will be manipulated.
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPDualSVCOld::modelSetTest(std::vector<std::vector<real_t>> X) {
|
||||
return Evaluate(X);
|
||||
}
|
||||
|
||||
real_t MLPPDualSVCOld::modelTest(std::vector<real_t> x) {
|
||||
return Evaluate(x);
|
||||
}
|
||||
|
||||
void MLPPDualSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
|
||||
while (true) {
|
||||
cost_prev = Cost(alpha, inputSet, outputSet);
|
||||
|
||||
alpha = alg.subtraction(alpha, alg.scalarMultiply(learning_rate, cost.dualFormSVMDeriv(alpha, inputSet, outputSet)));
|
||||
|
||||
alphaProjection();
|
||||
|
||||
// Calculating the bias
|
||||
real_t biasGradient = 0;
|
||||
for (uint32_t i = 0; i < alpha.size(); i++) {
|
||||
real_t sum = 0;
|
||||
if (alpha[i] < C && alpha[i] > 0) {
|
||||
for (uint32_t j = 0; j < alpha.size(); j++) {
|
||||
if (alpha[j] > 0) {
|
||||
sum += alpha[j] * outputSet[j] * alg.dot(inputSet[j], inputSet[i]); // TO DO: DON'T forget to add non-linear kernelizations.
|
||||
}
|
||||
}
|
||||
}
|
||||
biasGradient = (1 - outputSet[i] * sum) / outputSet[i];
|
||||
break;
|
||||
}
|
||||
bias -= biasGradient * learning_rate;
|
||||
|
||||
forwardPass();
|
||||
|
||||
// UI PORTION
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(alpha, inputSet, outputSet));
|
||||
MLPPUtilities::UI(alpha, bias);
|
||||
std::cout << score() << std::endl; // TO DO: DELETE THIS.
|
||||
}
|
||||
epoch++;
|
||||
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// void MLPPDualSVCOld::SGD(real_t learning_rate, int max_epoch, bool UI){
|
||||
// class MLPPCost cost;
|
||||
// MLPPActivation avn;
|
||||
// MLPPLinAlg alg;
|
||||
// MLPPReg regularization;
|
||||
|
||||
// real_t cost_prev = 0;
|
||||
// int epoch = 1;
|
||||
|
||||
// while(true){
|
||||
// std::random_device rd;
|
||||
// std::default_random_engine generator(rd());
|
||||
// std::uniform_int_distribution<int> distribution(0, int(n - 1));
|
||||
// int outputIndex = distribution(generator);
|
||||
|
||||
// cost_prev = Cost(alpha, inputSet[outputIndex], outputSet[outputIndex]);
|
||||
|
||||
// // Bias updation
|
||||
// bias -= learning_rate * costDeriv;
|
||||
|
||||
// y_hat = Evaluate({inputSet[outputIndex]});
|
||||
|
||||
// if(UI) {
|
||||
// MLPPUtilities::CostInfo(epoch, cost_prev, Cost(alpha));
|
||||
// MLPPUtilities::UI(weights, bias);
|
||||
// }
|
||||
// epoch++;
|
||||
|
||||
// if(epoch > max_epoch) { break; }
|
||||
// }
|
||||
// forwardPass();
|
||||
// }
|
||||
|
||||
// void MLPPDualSVCOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI){
|
||||
// class MLPPCost cost;
|
||||
// MLPPActivation avn;
|
||||
// MLPPLinAlg alg;
|
||||
// MLPPReg regularization;
|
||||
// real_t cost_prev = 0;
|
||||
// int epoch = 1;
|
||||
|
||||
// // Creating the mini-batches
|
||||
// int n_mini_batch = n/mini_batch_size;
|
||||
// auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
|
||||
// while(true){
|
||||
// for(int i = 0; i < n_mini_batch; i++){
|
||||
// std::vector<real_t> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
// std::vector<real_t> z = propagate(inputMiniBatches[i]);
|
||||
// cost_prev = Cost(z, outputMiniBatches[i], weights, C);
|
||||
|
||||
// // Calculating the weight gradients
|
||||
// weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate/n, alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), cost.HingeLossDeriv(z, outputMiniBatches[i], C))));
|
||||
// weights = regularization.regWeights(weights, learning_rate/n, 0, "Ridge");
|
||||
|
||||
// // Calculating the bias gradients
|
||||
// bias -= learning_rate * alg.sum_elements(cost.HingeLossDeriv(y_hat, outputMiniBatches[i], C)) / n;
|
||||
|
||||
// forwardPass();
|
||||
|
||||
// y_hat = Evaluate(inputMiniBatches[i]);
|
||||
|
||||
// if(UI) {
|
||||
// MLPPUtilities::CostInfo(epoch, cost_prev, Cost(z, outputMiniBatches[i], weights, C));
|
||||
// MLPPUtilities::UI(weights, bias);
|
||||
// }
|
||||
// }
|
||||
// epoch++;
|
||||
// if(epoch > max_epoch) { break; }
|
||||
// }
|
||||
// forwardPass();
|
||||
// }
|
||||
|
||||
real_t MLPPDualSVCOld::score() {
|
||||
MLPPUtilities util;
|
||||
return util.performance(y_hat, outputSet);
|
||||
}
|
||||
|
||||
void MLPPDualSVCOld::save(std::string fileName) {
|
||||
MLPPUtilities util;
|
||||
util.saveParameters(fileName, alpha, bias);
|
||||
}
|
||||
|
||||
real_t MLPPDualSVCOld::Cost(std::vector<real_t> alpha, std::vector<std::vector<real_t>> X, std::vector<real_t> y) {
|
||||
class MLPPCost cost;
|
||||
return cost.dualFormSVM(alpha, X, y);
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPDualSVCOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPActivation avn;
|
||||
return avn.sign(propagate(X));
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPDualSVCOld::propagate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
std::vector<real_t> z;
|
||||
for (uint32_t i = 0; i < X.size(); i++) {
|
||||
real_t sum = 0;
|
||||
for (uint32_t j = 0; j < alpha.size(); j++) {
|
||||
if (alpha[j] != 0) {
|
||||
sum += alpha[j] * outputSet[j] * alg.dot(inputSet[j], X[i]); // TO DO: DON'T forget to add non-linear kernelizations.
|
||||
}
|
||||
}
|
||||
sum += bias;
|
||||
z.push_back(sum);
|
||||
}
|
||||
return z;
|
||||
}
|
||||
|
||||
real_t MLPPDualSVCOld::Evaluate(std::vector<real_t> x) {
|
||||
MLPPActivation avn;
|
||||
return avn.sign(propagate(x));
|
||||
}
|
||||
|
||||
real_t MLPPDualSVCOld::propagate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
real_t z = 0;
|
||||
for (uint32_t j = 0; j < alpha.size(); j++) {
|
||||
if (alpha[j] != 0) {
|
||||
z += alpha[j] * outputSet[j] * alg.dot(inputSet[j], x); // TO DO: DON'T forget to add non-linear kernelizations.
|
||||
}
|
||||
}
|
||||
z += bias;
|
||||
return z;
|
||||
}
|
||||
|
||||
void MLPPDualSVCOld::forwardPass() {
|
||||
MLPPActivation avn;
|
||||
|
||||
z = propagate(inputSet);
|
||||
y_hat = avn.sign(z);
|
||||
}
|
||||
|
||||
void MLPPDualSVCOld::alphaProjection() {
|
||||
for (uint32_t i = 0; i < alpha.size(); i++) {
|
||||
if (alpha[i] > C) {
|
||||
alpha[i] = C;
|
||||
} else if (alpha[i] < 0) {
|
||||
alpha[i] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
real_t MLPPDualSVCOld::kernelFunction(std::vector<real_t> u, std::vector<real_t> v, std::string kernel) {
|
||||
MLPPLinAlg alg;
|
||||
if (kernel == "Linear") {
|
||||
return alg.dot(u, v);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPDualSVCOld::kernelFunction(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B, std::string kernel) {
|
||||
MLPPLinAlg alg;
|
||||
if (kernel == "Linear") {
|
||||
return alg.matmult(inputSet, alg.transpose(inputSet));
|
||||
}
|
||||
|
||||
return std::vector<std::vector<real_t>>();
|
||||
}
|
69
mlpp/dual_svc/dual_svc_old.h
Normal file
69
mlpp/dual_svc/dual_svc_old.h
Normal file
@ -0,0 +1,69 @@
|
||||
|
||||
#ifndef MLPP_DUAL_SVC_OLD_H
|
||||
#define MLPP_DUAL_SVC_OLD_H
|
||||
|
||||
//
|
||||
// DualSVC.hpp
|
||||
//
|
||||
// Created by Marc Melikyan on 10/2/20.
|
||||
//
|
||||
// http://disp.ee.ntu.edu.tw/~pujols/Support%20Vector%20Machine.pdf
|
||||
// http://ciml.info/dl/v0_99/ciml-v0_99-ch11.pdf
|
||||
// Were excellent for the practical intution behind the dual formulation.
|
||||
|
||||
#include "core/math/math_defs.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
class MLPPDualSVCOld {
|
||||
public:
|
||||
MLPPDualSVCOld(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, real_t C, std::string kernel = "Linear");
|
||||
MLPPDualSVCOld(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, real_t C, std::string kernel, real_t p, real_t c);
|
||||
|
||||
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
|
||||
real_t modelTest(std::vector<real_t> x);
|
||||
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
|
||||
real_t score();
|
||||
void save(std::string fileName);
|
||||
|
||||
private:
|
||||
void init();
|
||||
|
||||
real_t Cost(std::vector<real_t> alpha, std::vector<std::vector<real_t>> X, std::vector<real_t> y);
|
||||
|
||||
std::vector<real_t> Evaluate(std::vector<std::vector<real_t>> X);
|
||||
std::vector<real_t> propagate(std::vector<std::vector<real_t>> X);
|
||||
real_t Evaluate(std::vector<real_t> x);
|
||||
real_t propagate(std::vector<real_t> x);
|
||||
void forwardPass();
|
||||
|
||||
void alphaProjection();
|
||||
|
||||
real_t kernelFunction(std::vector<real_t> v, std::vector<real_t> u, std::string kernel);
|
||||
std::vector<std::vector<real_t>> kernelFunction(std::vector<std::vector<real_t>> U, std::vector<std::vector<real_t>> V, std::string kernel);
|
||||
|
||||
std::vector<std::vector<real_t>> inputSet;
|
||||
std::vector<real_t> outputSet;
|
||||
std::vector<real_t> z;
|
||||
std::vector<real_t> y_hat;
|
||||
real_t bias;
|
||||
|
||||
std::vector<real_t> alpha;
|
||||
std::vector<std::vector<real_t>> K;
|
||||
|
||||
real_t C;
|
||||
int n;
|
||||
int k;
|
||||
|
||||
std::string kernel;
|
||||
real_t p; // Poly
|
||||
real_t c; // Poly
|
||||
|
||||
// UI Portion
|
||||
void UI(int epoch, real_t cost_prev);
|
||||
};
|
||||
|
||||
#endif /* DualSVC_hpp */
|
247
mlpp/exp_reg/exp_reg_old.cpp
Normal file
247
mlpp/exp_reg/exp_reg_old.cpp
Normal file
@ -0,0 +1,247 @@
|
||||
//
|
||||
// ExpReg.cpp
|
||||
//
|
||||
// Created by Marc Melikyan on 10/2/20.
|
||||
//
|
||||
|
||||
#include "exp_reg_old.h"
|
||||
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
#include "../stat/stat.h"
|
||||
#include "../utilities/utilities.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
MLPPExpRegOld::MLPPExpRegOld(std::vector<std::vector<real_t>> p_inputSet, std::vector<real_t> p_outputSet, std::string p_reg, real_t p_lambda, real_t p_alpha) {
|
||||
inputSet = p_inputSet;
|
||||
outputSet = p_outputSet;
|
||||
n = p_inputSet.size();
|
||||
k = p_inputSet[0].size();
|
||||
reg = p_reg;
|
||||
lambda = p_lambda;
|
||||
alpha = p_alpha;
|
||||
|
||||
y_hat.resize(n);
|
||||
weights = MLPPUtilities::weightInitialization(k);
|
||||
initial = MLPPUtilities::weightInitialization(k);
|
||||
bias = MLPPUtilities::biasInitialization();
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPExpRegOld::modelSetTest(std::vector<std::vector<real_t>> X) {
|
||||
return Evaluate(X);
|
||||
}
|
||||
|
||||
real_t MLPPExpRegOld::modelTest(std::vector<real_t> x) {
|
||||
return Evaluate(x);
|
||||
}
|
||||
|
||||
void MLPPExpRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
|
||||
while (true) {
|
||||
cost_prev = Cost(y_hat, outputSet);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputSet);
|
||||
|
||||
for (int i = 0; i < k; i++) {
|
||||
// Calculating the weight gradient
|
||||
real_t sum = 0;
|
||||
for (int j = 0; j < n; j++) {
|
||||
sum += error[j] * inputSet[j][i] * std::pow(weights[i], inputSet[j][i] - 1);
|
||||
}
|
||||
real_t w_gradient = sum / n;
|
||||
|
||||
// Calculating the initial gradient
|
||||
real_t sum2 = 0;
|
||||
for (int j = 0; j < n; j++) {
|
||||
sum2 += error[j] * std::pow(weights[i], inputSet[j][i]);
|
||||
}
|
||||
|
||||
real_t i_gradient = sum2 / n;
|
||||
|
||||
// Weight/initial updation
|
||||
weights[i] -= learning_rate * w_gradient;
|
||||
initial[i] -= learning_rate * i_gradient;
|
||||
}
|
||||
weights = regularization.regWeights(weights, lambda, alpha, reg);
|
||||
|
||||
// Calculating the bias gradient
|
||||
real_t sum = 0;
|
||||
for (int j = 0; j < n; j++) {
|
||||
sum += (y_hat[j] - outputSet[j]);
|
||||
}
|
||||
real_t b_gradient = sum / n;
|
||||
|
||||
// bias updation
|
||||
bias -= learning_rate * b_gradient;
|
||||
forwardPass();
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
epoch++;
|
||||
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPExpRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
while (true) {
|
||||
std::random_device rd;
|
||||
std::default_random_engine generator(rd());
|
||||
std::uniform_int_distribution<int> distribution(0, int(n - 1));
|
||||
int outputIndex = distribution(generator);
|
||||
|
||||
real_t y_hat = Evaluate(inputSet[outputIndex]);
|
||||
cost_prev = Cost({ y_hat }, { outputSet[outputIndex] });
|
||||
|
||||
for (int i = 0; i < k; i++) {
|
||||
// Calculating the weight gradients
|
||||
|
||||
real_t w_gradient = (y_hat - outputSet[outputIndex]) * inputSet[outputIndex][i] * std::pow(weights[i], inputSet[outputIndex][i] - 1);
|
||||
real_t i_gradient = (y_hat - outputSet[outputIndex]) * std::pow(weights[i], inputSet[outputIndex][i]);
|
||||
|
||||
// Weight/initial updation
|
||||
weights[i] -= learning_rate * w_gradient;
|
||||
initial[i] -= learning_rate * i_gradient;
|
||||
}
|
||||
weights = regularization.regWeights(weights, lambda, alpha, reg);
|
||||
|
||||
// Calculating the bias gradients
|
||||
real_t b_gradient = (y_hat - outputSet[outputIndex]);
|
||||
|
||||
// Bias updation
|
||||
bias -= learning_rate * b_gradient;
|
||||
y_hat = Evaluate({ inputSet[outputIndex] });
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
epoch++;
|
||||
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPExpRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
while (true) {
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]);
|
||||
|
||||
for (int j = 0; j < k; j++) {
|
||||
// Calculating the weight gradient
|
||||
real_t sum = 0;
|
||||
for (uint32_t k = 0; k < outputMiniBatches[i].size(); k++) {
|
||||
sum += error[k] * inputMiniBatches[i][k][j] * std::pow(weights[j], inputMiniBatches[i][k][j] - 1);
|
||||
}
|
||||
real_t w_gradient = sum / outputMiniBatches[i].size();
|
||||
|
||||
// Calculating the initial gradient
|
||||
real_t sum2 = 0;
|
||||
for (uint32_t k = 0; k < outputMiniBatches[i].size(); k++) {
|
||||
sum2 += error[k] * std::pow(weights[j], inputMiniBatches[i][k][j]);
|
||||
}
|
||||
|
||||
real_t i_gradient = sum2 / outputMiniBatches[i].size();
|
||||
|
||||
// Weight/initial updation
|
||||
weights[j] -= learning_rate * w_gradient;
|
||||
initial[j] -= learning_rate * i_gradient;
|
||||
}
|
||||
weights = regularization.regWeights(weights, lambda, alpha, reg);
|
||||
|
||||
// Calculating the bias gradient
|
||||
real_t sum = 0;
|
||||
for (uint32_t j = 0; j < outputMiniBatches[i].size(); j++) {
|
||||
sum += (y_hat[j] - outputMiniBatches[i][j]);
|
||||
}
|
||||
|
||||
//real_t b_gradient = sum / outputMiniBatches[i].size();
|
||||
y_hat = Evaluate(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
real_t MLPPExpRegOld::score() {
|
||||
MLPPUtilities util;
|
||||
return util.performance(y_hat, outputSet);
|
||||
}
|
||||
|
||||
void MLPPExpRegOld::save(std::string fileName) {
|
||||
MLPPUtilities util;
|
||||
util.saveParameters(fileName, weights, initial, bias);
|
||||
}
|
||||
|
||||
real_t MLPPExpRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
|
||||
MLPPReg regularization;
|
||||
class MLPPCost cost;
|
||||
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPExpRegOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
std::vector<real_t> y_hat;
|
||||
y_hat.resize(X.size());
|
||||
for (uint32_t i = 0; i < X.size(); i++) {
|
||||
y_hat[i] = 0;
|
||||
for (uint32_t j = 0; j < X[i].size(); j++) {
|
||||
y_hat[i] += initial[j] * std::pow(weights[j], X[i][j]);
|
||||
}
|
||||
y_hat[i] += bias;
|
||||
}
|
||||
return y_hat;
|
||||
}
|
||||
|
||||
real_t MLPPExpRegOld::Evaluate(std::vector<real_t> x) {
|
||||
real_t y_hat = 0;
|
||||
for (uint32_t i = 0; i < x.size(); i++) {
|
||||
y_hat += initial[i] * std::pow(weights[i], x[i]);
|
||||
}
|
||||
|
||||
return y_hat + bias;
|
||||
}
|
||||
|
||||
// a * w^x + b
|
||||
void MLPPExpRegOld::forwardPass() {
|
||||
y_hat = Evaluate(inputSet);
|
||||
}
|
50
mlpp/exp_reg/exp_reg_old.h
Normal file
50
mlpp/exp_reg/exp_reg_old.h
Normal file
@ -0,0 +1,50 @@
|
||||
|
||||
#ifndef MLPP_EXP_REG_OLD_H
|
||||
#define MLPP_EXP_REG_OLD_H
|
||||
|
||||
//
|
||||
// ExpReg.hpp
|
||||
//
|
||||
// Created by Marc Melikyan on 10/2/20.
|
||||
//
|
||||
|
||||
#include "core/math/math_defs.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
class MLPPExpRegOld {
|
||||
public:
|
||||
MLPPExpRegOld(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
|
||||
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
|
||||
real_t modelTest(std::vector<real_t> x);
|
||||
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1);
|
||||
void SGD(real_t learning_rate, int max_epoch, bool UI = 1);
|
||||
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
|
||||
real_t score();
|
||||
void save(std::string fileName);
|
||||
|
||||
private:
|
||||
real_t Cost(std::vector<real_t> y_hat, std::vector<real_t> y);
|
||||
|
||||
std::vector<real_t> Evaluate(std::vector<std::vector<real_t>> X);
|
||||
real_t Evaluate(std::vector<real_t> x);
|
||||
void forwardPass();
|
||||
|
||||
std::vector<std::vector<real_t>> inputSet;
|
||||
std::vector<real_t> outputSet;
|
||||
std::vector<real_t> y_hat;
|
||||
std::vector<real_t> weights;
|
||||
std::vector<real_t> initial;
|
||||
real_t bias;
|
||||
|
||||
int n;
|
||||
int k;
|
||||
|
||||
// Regularization Params
|
||||
std::string reg;
|
||||
real_t lambda;
|
||||
real_t alpha; /* This is the controlling param for Elastic Net*/
|
||||
};
|
||||
|
||||
#endif /* ExpReg_hpp */
|
@ -1,6 +1,6 @@
|
||||
|
||||
#ifndef MLPP_GAN_hpp
|
||||
#define MLPP_GAN_hpp
|
||||
#ifndef MLPP_GAN_H
|
||||
#define MLPP_GAN_H
|
||||
|
||||
//
|
||||
// GAN.hpp
|
||||
|
287
mlpp/gan/gan_old.cpp
Normal file
287
mlpp/gan/gan_old.cpp
Normal file
@ -0,0 +1,287 @@
|
||||
//
|
||||
// GAN.cpp
|
||||
//
|
||||
// Created by Marc Melikyan on 11/4/20.
|
||||
//
|
||||
|
||||
#include "gan_old.h"
|
||||
#include "../activation/activation.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
#include "../utilities/utilities.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <iostream>
|
||||
|
||||
MLPPGAN::MLPPGAN(real_t k, std::vector<std::vector<real_t>> outputSet) :
|
||||
outputSet(outputSet), n(outputSet.size()), k(k) {
|
||||
}
|
||||
|
||||
MLPPGAN::~MLPPGAN() {
|
||||
delete outputLayer;
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPGAN::generateExample(int n) {
|
||||
MLPPLinAlg alg;
|
||||
return modelSetTestGenerator(alg.gaussianNoise(n, k));
|
||||
}
|
||||
|
||||
void MLPPGAN::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
|
||||
while (true) {
|
||||
cost_prev = Cost(y_hat, alg.onevec(n));
|
||||
|
||||
// Training of the discriminator.
|
||||
|
||||
std::vector<std::vector<real_t>> generatorInputSet = alg.gaussianNoise(n, k);
|
||||
std::vector<std::vector<real_t>> discriminatorInputSet = modelSetTestGenerator(generatorInputSet);
|
||||
discriminatorInputSet.insert(discriminatorInputSet.end(), outputSet.begin(), outputSet.end()); // Fake + real inputs.
|
||||
|
||||
std::vector<real_t> y_hat = modelSetTestDiscriminator(discriminatorInputSet);
|
||||
std::vector<real_t> outputSet = alg.zerovec(n);
|
||||
std::vector<real_t> outputSetReal = alg.onevec(n);
|
||||
outputSet.insert(outputSet.end(), outputSetReal.begin(), outputSetReal.end()); // Fake + real output scores.
|
||||
|
||||
auto dgrads = computeDiscriminatorGradients(y_hat, outputSet);
|
||||
auto cumulativeDiscriminatorHiddenLayerWGrad = std::get<0>(dgrads);
|
||||
auto outputDiscriminatorWGrad = std::get<1>(dgrads);
|
||||
|
||||
cumulativeDiscriminatorHiddenLayerWGrad = alg.scalarMultiply(learning_rate / n, cumulativeDiscriminatorHiddenLayerWGrad);
|
||||
outputDiscriminatorWGrad = alg.scalarMultiply(learning_rate / n, outputDiscriminatorWGrad);
|
||||
updateDiscriminatorParameters(cumulativeDiscriminatorHiddenLayerWGrad, outputDiscriminatorWGrad, learning_rate);
|
||||
|
||||
// Training of the generator.
|
||||
generatorInputSet = alg.gaussianNoise(n, k);
|
||||
discriminatorInputSet = modelSetTestGenerator(generatorInputSet);
|
||||
y_hat = modelSetTestDiscriminator(discriminatorInputSet);
|
||||
outputSet = alg.onevec(n);
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> cumulativeGeneratorHiddenLayerWGrad = computeGeneratorGradients(y_hat, outputSet);
|
||||
cumulativeGeneratorHiddenLayerWGrad = alg.scalarMultiply(learning_rate / n, cumulativeGeneratorHiddenLayerWGrad);
|
||||
updateGeneratorParameters(cumulativeGeneratorHiddenLayerWGrad, learning_rate);
|
||||
|
||||
forwardPass();
|
||||
if (UI) {
|
||||
MLPPGAN::UI(epoch, cost_prev, MLPPGAN::y_hat, alg.onevec(n));
|
||||
}
|
||||
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
real_t MLPPGAN::score() {
|
||||
MLPPLinAlg alg;
|
||||
MLPPUtilities util;
|
||||
forwardPass();
|
||||
return util.performance(y_hat, alg.onevec(n));
|
||||
}
|
||||
|
||||
void MLPPGAN::save(std::string fileName) {
|
||||
MLPPUtilities util;
|
||||
if (!network.empty()) {
|
||||
util.saveParameters(fileName, network[0].weights, network[0].bias, false, 1);
|
||||
for (uint32_t i = 1; i < network.size(); i++) {
|
||||
util.saveParameters(fileName, network[i].weights, network[i].bias, true, i + 1);
|
||||
}
|
||||
util.saveParameters(fileName, outputLayer->weights, outputLayer->bias, true, network.size() + 1);
|
||||
} else {
|
||||
util.saveParameters(fileName, outputLayer->weights, outputLayer->bias, false, network.size() + 1);
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
||||
MLPPLinAlg alg;
|
||||
if (network.empty()) {
|
||||
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
|
||||
network[0].forwardPass();
|
||||
} else {
|
||||
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
||||
network[network.size() - 1].forwardPass();
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPGAN::addOutputLayer(std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
||||
MLPPLinAlg alg;
|
||||
if (!network.empty()) {
|
||||
outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, "Sigmoid", "LogLoss", network[network.size() - 1].a, weightInit, reg, lambda, alpha);
|
||||
} else {
|
||||
outputLayer = new MLPPOldOutputLayer(k, "Sigmoid", "LogLoss", alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPGAN::modelSetTestGenerator(std::vector<std::vector<real_t>> X) {
|
||||
if (!network.empty()) {
|
||||
network[0].input = X;
|
||||
network[0].forwardPass();
|
||||
|
||||
for (uint32_t i = 1; i <= network.size() / 2; i++) {
|
||||
network[i].input = network[i - 1].a;
|
||||
network[i].forwardPass();
|
||||
}
|
||||
}
|
||||
return network[network.size() / 2].a;
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPGAN::modelSetTestDiscriminator(std::vector<std::vector<real_t>> X) {
|
||||
if (!network.empty()) {
|
||||
for (uint32_t i = network.size() / 2 + 1; i < network.size(); i++) {
|
||||
if (i == network.size() / 2 + 1) {
|
||||
network[i].input = X;
|
||||
} else {
|
||||
network[i].input = network[i - 1].a;
|
||||
}
|
||||
network[i].forwardPass();
|
||||
}
|
||||
outputLayer->input = network[network.size() - 1].a;
|
||||
}
|
||||
outputLayer->forwardPass();
|
||||
return outputLayer->a;
|
||||
}
|
||||
|
||||
real_t MLPPGAN::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
|
||||
MLPPReg regularization;
|
||||
class MLPPCost cost;
|
||||
real_t totalRegTerm = 0;
|
||||
|
||||
auto cost_function = outputLayer->cost_map[outputLayer->cost];
|
||||
if (!network.empty()) {
|
||||
for (uint32_t i = 0; i < network.size() - 1; i++) {
|
||||
totalRegTerm += regularization.regTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg);
|
||||
}
|
||||
}
|
||||
return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg);
|
||||
}
|
||||
|
||||
void MLPPGAN::forwardPass() {
|
||||
MLPPLinAlg alg;
|
||||
if (!network.empty()) {
|
||||
network[0].input = alg.gaussianNoise(n, k);
|
||||
network[0].forwardPass();
|
||||
|
||||
for (uint32_t i = 1; i < network.size(); i++) {
|
||||
network[i].input = network[i - 1].a;
|
||||
network[i].forwardPass();
|
||||
}
|
||||
outputLayer->input = network[network.size() - 1].a;
|
||||
} else { // Should never happen, though.
|
||||
outputLayer->input = alg.gaussianNoise(n, k);
|
||||
}
|
||||
outputLayer->forwardPass();
|
||||
y_hat = outputLayer->a;
|
||||
}
|
||||
|
||||
void MLPPGAN::updateDiscriminatorParameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, std::vector<real_t> outputLayerUpdation, real_t learning_rate) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation);
|
||||
outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n;
|
||||
|
||||
if (!network.empty()) {
|
||||
network[network.size() - 1].weights = alg.subtraction(network[network.size() - 1].weights, hiddenLayerUpdations[0]);
|
||||
network[network.size() - 1].bias = alg.subtractMatrixRows(network[network.size() - 1].bias, alg.scalarMultiply(learning_rate / n, network[network.size() - 1].delta));
|
||||
|
||||
for (int i = static_cast<int>(network.size()) - 2; i > static_cast<int>(network.size()) / 2; i--) {
|
||||
network[i].weights = alg.subtraction(network[i].weights, hiddenLayerUpdations[(network.size() - 2) - i + 1]);
|
||||
network[i].bias = alg.subtractMatrixRows(network[i].bias, alg.scalarMultiply(learning_rate / n, network[i].delta));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPGAN::updateGeneratorParameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, real_t learning_rate) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
if (!network.empty()) {
|
||||
for (int i = network.size() / 2; i >= 0; i--) {
|
||||
//std::cout << network[i].weights.size() << "x" << network[i].weights[0].size() << std::endl;
|
||||
//std::cout << hiddenLayerUpdations[(network.size() - 2) - i + 1].size() << "x" << hiddenLayerUpdations[(network.size() - 2) - i + 1][0].size() << std::endl;
|
||||
network[i].weights = alg.subtraction(network[i].weights, hiddenLayerUpdations[(network.size() - 2) - i + 1]);
|
||||
network[i].bias = alg.subtractMatrixRows(network[i].bias, alg.scalarMultiply(learning_rate / n, network[i].delta));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> MLPPGAN::computeDiscriminatorGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) {
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.
|
||||
|
||||
auto costDeriv = outputLayer->costDeriv_map[outputLayer->cost];
|
||||
auto outputAvn = outputLayer->activation_map[outputLayer->activation];
|
||||
outputLayer->delta = alg.hadamard_product((cost.*costDeriv)(y_hat, outputSet), (avn.*outputAvn)(outputLayer->z, 1));
|
||||
std::vector<real_t> outputWGrad = alg.mat_vec_mult(alg.transpose(outputLayer->input), outputLayer->delta);
|
||||
outputWGrad = alg.addition(outputWGrad, regularization.regDerivTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg));
|
||||
|
||||
if (!network.empty()) {
|
||||
auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation];
|
||||
|
||||
network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1));
|
||||
std::vector<std::vector<real_t>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta);
|
||||
|
||||
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[network.size() - 1].weights, network[network.size() - 1].lambda, network[network.size() - 1].alpha, network[network.size() - 1].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
||||
|
||||
//std::cout << "HIDDENLAYER FIRST:" << hiddenLayerWGrad.size() << "x" << hiddenLayerWGrad[0].size() << std::endl;
|
||||
//std::cout << "WEIGHTS SECOND:" << network[network.size() - 1].weights.size() << "x" << network[network.size() - 1].weights[0].size() << std::endl;
|
||||
|
||||
for (int i = static_cast<int>(network.size()) - 2; i > static_cast<int>(network.size()) / 2; i--) {
|
||||
hiddenLayerAvn = network[i].activation_map[network[i].activation];
|
||||
network[i].delta = alg.hadamard_product(alg.matmult(network[i + 1].delta, alg.transpose(network[i + 1].weights)), (avn.*hiddenLayerAvn)(network[i].z, 1));
|
||||
hiddenLayerWGrad = alg.matmult(alg.transpose(network[i].input), network[i].delta);
|
||||
|
||||
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
||||
}
|
||||
}
|
||||
return { cumulativeHiddenLayerWGrad, outputWGrad };
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> MLPPGAN::computeGeneratorGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) {
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.
|
||||
|
||||
auto costDeriv = outputLayer->costDeriv_map[outputLayer->cost];
|
||||
auto outputAvn = outputLayer->activation_map[outputLayer->activation];
|
||||
outputLayer->delta = alg.hadamard_product((cost.*costDeriv)(y_hat, outputSet), (avn.*outputAvn)(outputLayer->z, 1));
|
||||
std::vector<real_t> outputWGrad = alg.mat_vec_mult(alg.transpose(outputLayer->input), outputLayer->delta);
|
||||
outputWGrad = alg.addition(outputWGrad, regularization.regDerivTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg));
|
||||
if (!network.empty()) {
|
||||
auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation];
|
||||
network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1));
|
||||
std::vector<std::vector<real_t>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta);
|
||||
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[network.size() - 1].weights, network[network.size() - 1].lambda, network[network.size() - 1].alpha, network[network.size() - 1].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
||||
|
||||
for (int i = network.size() - 2; i >= 0; i--) {
|
||||
hiddenLayerAvn = network[i].activation_map[network[i].activation];
|
||||
network[i].delta = alg.hadamard_product(alg.matmult(network[i + 1].delta, alg.transpose(network[i + 1].weights)), (avn.*hiddenLayerAvn)(network[i].z, 1));
|
||||
hiddenLayerWGrad = alg.matmult(alg.transpose(network[i].input), network[i].delta);
|
||||
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
||||
}
|
||||
}
|
||||
return cumulativeHiddenLayerWGrad;
|
||||
}
|
||||
|
||||
void MLPPGAN::UI(int epoch, real_t cost_prev, std::vector<real_t> y_hat, std::vector<real_t> outputSet) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
|
||||
std::cout << "Layer " << network.size() + 1 << ": " << std::endl;
|
||||
MLPPUtilities::UI(outputLayer->weights, outputLayer->bias);
|
||||
if (!network.empty()) {
|
||||
for (int i = network.size() - 1; i >= 0; i--) {
|
||||
std::cout << "Layer " << i + 1 << ": " << std::endl;
|
||||
MLPPUtilities::UI(network[i].weights, network[i].bias);
|
||||
}
|
||||
}
|
||||
}
|
59
mlpp/gan/gan_old.h
Normal file
59
mlpp/gan/gan_old.h
Normal file
@ -0,0 +1,59 @@
|
||||
|
||||
#ifndef MLPP_GAN_OLD_hpp
|
||||
#define MLPP_GAN_OLD_hpp
|
||||
|
||||
//
|
||||
// GAN.hpp
|
||||
//
|
||||
// Created by Marc Melikyan on 11/4/20.
|
||||
//
|
||||
|
||||
#include "core/math/math_defs.h"
|
||||
|
||||
#include "../hidden_layer/hidden_layer.h"
|
||||
#include "../output_layer/output_layer.h"
|
||||
|
||||
#include "../hidden_layer/hidden_layer_old.h"
|
||||
#include "../output_layer/output_layer_old.h"
|
||||
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
class MLPPGAN {
|
||||
public:
|
||||
MLPPGAN(real_t k, std::vector<std::vector<real_t>> outputSet);
|
||||
~MLPPGAN();
|
||||
std::vector<std::vector<real_t>> generateExample(int n);
|
||||
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
real_t score();
|
||||
void save(std::string fileName);
|
||||
|
||||
void addLayer(int n_hidden, std::string activation, std::string weightInit = "Default", std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
|
||||
void addOutputLayer(std::string weightInit = "Default", std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
|
||||
|
||||
private:
|
||||
std::vector<std::vector<real_t>> modelSetTestGenerator(std::vector<std::vector<real_t>> X); // Evaluator for the generator of the gan.
|
||||
std::vector<real_t> modelSetTestDiscriminator(std::vector<std::vector<real_t>> X); // Evaluator for the discriminator of the gan.
|
||||
|
||||
real_t Cost(std::vector<real_t> y_hat, std::vector<real_t> y);
|
||||
|
||||
void forwardPass();
|
||||
void updateDiscriminatorParameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, std::vector<real_t> outputLayerUpdation, real_t learning_rate);
|
||||
void updateGeneratorParameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, real_t learning_rate);
|
||||
std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> computeDiscriminatorGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet);
|
||||
std::vector<std::vector<std::vector<real_t>>> computeGeneratorGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet);
|
||||
|
||||
void UI(int epoch, real_t cost_prev, std::vector<real_t> y_hat, std::vector<real_t> outputSet);
|
||||
|
||||
std::vector<std::vector<real_t>> outputSet;
|
||||
std::vector<real_t> y_hat;
|
||||
|
||||
std::vector<MLPPOldHiddenLayer> network;
|
||||
MLPPOldOutputLayer *outputLayer;
|
||||
|
||||
int n;
|
||||
int k;
|
||||
};
|
||||
|
||||
#endif /* GAN_hpp */
|
89
mlpp/gaussian_nb/gaussian_nb_old.cpp
Normal file
89
mlpp/gaussian_nb/gaussian_nb_old.cpp
Normal file
@ -0,0 +1,89 @@
|
||||
//
|
||||
// GaussianNB.cpp
|
||||
//
|
||||
// Created by Marc Melikyan on 1/17/21.
|
||||
//
|
||||
|
||||
#include "gaussian_nb_old.h"
|
||||
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../stat/stat.h"
|
||||
#include "../utilities/utilities.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
MLPPGaussianNBOld::MLPPGaussianNBOld(std::vector<std::vector<real_t>> p_inputSet, std::vector<real_t> p_outputSet, int p_class_num) {
|
||||
inputSet = p_inputSet;
|
||||
outputSet = p_outputSet;
|
||||
class_num = p_class_num;
|
||||
|
||||
y_hat.resize(outputSet.size());
|
||||
Evaluate();
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPGaussianNBOld::modelSetTest(std::vector<std::vector<real_t>> X) {
|
||||
std::vector<real_t> y_hat;
|
||||
for (uint32_t i = 0; i < X.size(); i++) {
|
||||
y_hat.push_back(modelTest(X[i]));
|
||||
}
|
||||
return y_hat;
|
||||
}
|
||||
|
||||
real_t MLPPGaussianNBOld::modelTest(std::vector<real_t> x) {
|
||||
real_t score[class_num];
|
||||
real_t y_hat_i = 1;
|
||||
for (int i = class_num - 1; i >= 0; i--) {
|
||||
y_hat_i += std::log(priors[i] * (1 / sqrt(2 * M_PI * sigma[i] * sigma[i])) * exp(-(x[i] * mu[i]) * (x[i] * mu[i]) / (2 * sigma[i] * sigma[i])));
|
||||
score[i] = exp(y_hat_i);
|
||||
}
|
||||
return std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(real_t)));
|
||||
}
|
||||
|
||||
real_t MLPPGaussianNBOld::score() {
|
||||
MLPPUtilities util;
|
||||
return util.performance(y_hat, outputSet);
|
||||
}
|
||||
|
||||
void MLPPGaussianNBOld::Evaluate() {
|
||||
MLPPStat stat;
|
||||
MLPPLinAlg alg;
|
||||
|
||||
// Computing mu_k_y and sigma_k_y
|
||||
mu.resize(class_num);
|
||||
sigma.resize(class_num);
|
||||
for (int i = class_num - 1; i >= 0; i--) {
|
||||
std::vector<real_t> set;
|
||||
for (uint32_t j = 0; j < inputSet.size(); j++) {
|
||||
for (uint32_t k = 0; k < inputSet[j].size(); k++) {
|
||||
if (outputSet[j] == i) {
|
||||
set.push_back(inputSet[j][k]);
|
||||
}
|
||||
}
|
||||
}
|
||||
mu[i] = stat.mean(set);
|
||||
sigma[i] = stat.standardDeviation(set);
|
||||
}
|
||||
|
||||
// Priors
|
||||
priors.resize(class_num);
|
||||
for (uint32_t i = 0; i < outputSet.size(); i++) {
|
||||
priors[int(outputSet[i])]++;
|
||||
}
|
||||
priors = alg.scalarMultiply(real_t(1) / real_t(outputSet.size()), priors);
|
||||
|
||||
for (uint32_t i = 0; i < outputSet.size(); i++) {
|
||||
real_t score[class_num];
|
||||
real_t y_hat_i = 1;
|
||||
for (int j = class_num - 1; j >= 0; j--) {
|
||||
for (uint32_t k = 0; k < inputSet[i].size(); k++) {
|
||||
y_hat_i += std::log(priors[j] * (1 / sqrt(2 * M_PI * sigma[j] * sigma[j])) * exp(-(inputSet[i][k] * mu[j]) * (inputSet[i][k] * mu[j]) / (2 * sigma[j] * sigma[j])));
|
||||
}
|
||||
score[j] = exp(y_hat_i);
|
||||
std::cout << score[j] << std::endl;
|
||||
}
|
||||
y_hat[i] = std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(real_t)));
|
||||
std::cout << std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(real_t))) << std::endl;
|
||||
}
|
||||
}
|
37
mlpp/gaussian_nb/gaussian_nb_old.h
Normal file
37
mlpp/gaussian_nb/gaussian_nb_old.h
Normal file
@ -0,0 +1,37 @@
|
||||
|
||||
#ifndef MLPP_GAUSSIAN_NB_OLD_H
|
||||
#define MLPP_GAUSSIAN_NB_OLD_H
|
||||
|
||||
//
|
||||
// GaussianNB.hpp
|
||||
//
|
||||
// Created by Marc Melikyan on 1/17/21.
|
||||
//
|
||||
|
||||
#include "core/math/math_defs.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
class MLPPGaussianNBOld {
|
||||
public:
|
||||
MLPPGaussianNBOld(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, int class_num);
|
||||
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
|
||||
real_t modelTest(std::vector<real_t> x);
|
||||
real_t score();
|
||||
|
||||
private:
|
||||
void Evaluate();
|
||||
|
||||
int class_num;
|
||||
|
||||
std::vector<real_t> priors;
|
||||
std::vector<real_t> mu;
|
||||
std::vector<real_t> sigma;
|
||||
|
||||
std::vector<std::vector<real_t>> inputSet;
|
||||
std::vector<real_t> outputSet;
|
||||
|
||||
std::vector<real_t> y_hat;
|
||||
};
|
||||
|
||||
#endif /* GaussianNB_hpp */
|
598
mlpp/lin_reg/lin_reg_old.cpp
Normal file
598
mlpp/lin_reg/lin_reg_old.cpp
Normal file
@ -0,0 +1,598 @@
|
||||
//
|
||||
// LinReg.cpp
|
||||
//
|
||||
// Created by Marc Melikyan on 10/2/20.
|
||||
//
|
||||
|
||||
#include "lin_reg_old.h"
|
||||
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
#include "../stat/stat.h"
|
||||
#include "../utilities/utilities.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
MLPPLinRegOld::MLPPLinRegOld(std::vector<std::vector<real_t>> p_inputSet, std::vector<real_t> p_outputSet, std::string p_reg, real_t p_lambda, real_t p_alpha) {
|
||||
inputSet = p_inputSet;
|
||||
outputSet = p_outputSet;
|
||||
n = p_inputSet.size();
|
||||
k = p_inputSet[0].size();
|
||||
reg = p_reg;
|
||||
lambda = p_lambda;
|
||||
alpha = p_alpha;
|
||||
|
||||
y_hat.resize(n);
|
||||
|
||||
weights = MLPPUtilities::weightInitialization(k);
|
||||
bias = MLPPUtilities::biasInitialization();
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPLinRegOld::modelSetTest(std::vector<std::vector<real_t>> X) {
|
||||
return Evaluate(X);
|
||||
}
|
||||
|
||||
real_t MLPPLinRegOld::modelTest(std::vector<real_t> x) {
|
||||
return Evaluate(x);
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::NewtonRaphson(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
while (true) {
|
||||
cost_prev = Cost(y_hat, outputSet);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputSet);
|
||||
|
||||
// Calculating the weight gradients (2nd derivative)
|
||||
std::vector<real_t> first_derivative = alg.mat_vec_mult(alg.transpose(inputSet), error);
|
||||
std::vector<std::vector<real_t>> second_derivative = alg.matmult(alg.transpose(inputSet), inputSet);
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate / n, alg.mat_vec_mult(alg.transpose(alg.inverse(second_derivative)), first_derivative)));
|
||||
weights = regularization.regWeights(weights, lambda, alpha, reg);
|
||||
|
||||
// Calculating the bias gradients (2nd derivative)
|
||||
bias -= learning_rate * alg.sum_elements(error) / n; // We keep this the same. The 2nd derivative is just [1].
|
||||
forwardPass();
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
|
||||
while (true) {
|
||||
cost_prev = Cost(y_hat, outputSet);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputSet);
|
||||
|
||||
// Calculating the weight gradients
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate / n, alg.mat_vec_mult(alg.transpose(inputSet), error)));
|
||||
weights = regularization.regWeights(weights, lambda, alpha, reg);
|
||||
|
||||
// Calculating the bias gradients
|
||||
bias -= learning_rate * alg.sum_elements(error) / n;
|
||||
forwardPass();
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
while (true) {
|
||||
std::random_device rd;
|
||||
std::default_random_engine generator(rd());
|
||||
std::uniform_int_distribution<int> distribution(0, int(n - 1));
|
||||
int outputIndex = distribution(generator);
|
||||
|
||||
real_t y_hat = Evaluate(inputSet[outputIndex]);
|
||||
cost_prev = Cost({ y_hat }, { outputSet[outputIndex] });
|
||||
|
||||
real_t error = y_hat - outputSet[outputIndex];
|
||||
|
||||
// Weight updation
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate * error, inputSet[outputIndex]));
|
||||
weights = regularization.regWeights(weights, lambda, alpha, reg);
|
||||
|
||||
// Bias updation
|
||||
bias -= learning_rate * error;
|
||||
|
||||
y_hat = Evaluate({ inputSet[outputIndex] });
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
epoch++;
|
||||
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
while (true) {
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]);
|
||||
|
||||
// Calculating the weight gradients
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate / outputMiniBatches[i].size(), alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), error)));
|
||||
weights = regularization.regWeights(weights, lambda, alpha, reg);
|
||||
|
||||
// Calculating the bias gradients
|
||||
bias -= learning_rate * alg.sum_elements(error) / outputMiniBatches[i].size();
|
||||
y_hat = Evaluate(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
// Initializing necessary components for Momentum.
|
||||
std::vector<real_t> v = alg.zerovec(weights.size());
|
||||
while (true) {
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]);
|
||||
|
||||
// Calculating the weight gradients
|
||||
std::vector<real_t> gradient = alg.scalarMultiply(1 / outputMiniBatches[i].size(), alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), error));
|
||||
std::vector<real_t> RegDerivTerm = regularization.regDerivTerm(weights, lambda, alpha, reg);
|
||||
std::vector<real_t> weight_grad = alg.addition(gradient, RegDerivTerm); // Weight_grad_final
|
||||
|
||||
v = alg.addition(alg.scalarMultiply(gamma, v), alg.scalarMultiply(learning_rate, weight_grad));
|
||||
|
||||
weights = alg.subtraction(weights, v);
|
||||
|
||||
// Calculating the bias gradients
|
||||
bias -= learning_rate * alg.sum_elements(error) / outputMiniBatches[i].size(); // As normal
|
||||
y_hat = Evaluate(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::NAG(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
// Initializing necessary components for Momentum.
|
||||
std::vector<real_t> v = alg.zerovec(weights.size());
|
||||
while (true) {
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(gamma, v)); // "Aposterori" calculation
|
||||
|
||||
std::vector<real_t> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]);
|
||||
|
||||
// Calculating the weight gradients
|
||||
std::vector<real_t> gradient = alg.scalarMultiply(1 / outputMiniBatches[i].size(), alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), error));
|
||||
std::vector<real_t> RegDerivTerm = regularization.regDerivTerm(weights, lambda, alpha, reg);
|
||||
std::vector<real_t> weight_grad = alg.addition(gradient, RegDerivTerm); // Weight_grad_final
|
||||
|
||||
v = alg.addition(alg.scalarMultiply(gamma, v), alg.scalarMultiply(learning_rate, weight_grad));
|
||||
|
||||
weights = alg.subtraction(weights, v);
|
||||
|
||||
// Calculating the bias gradients
|
||||
bias -= learning_rate * alg.sum_elements(error) / outputMiniBatches[i].size(); // As normal
|
||||
y_hat = Evaluate(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
// Initializing necessary components for Adagrad.
|
||||
std::vector<real_t> v = alg.zerovec(weights.size());
|
||||
while (true) {
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]);
|
||||
|
||||
// Calculating the weight gradients
|
||||
std::vector<real_t> gradient = alg.scalarMultiply(1 / outputMiniBatches[i].size(), alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), error));
|
||||
std::vector<real_t> RegDerivTerm = regularization.regDerivTerm(weights, lambda, alpha, reg);
|
||||
std::vector<real_t> weight_grad = alg.addition(gradient, RegDerivTerm); // Weight_grad_final
|
||||
|
||||
v = alg.hadamard_product(weight_grad, weight_grad);
|
||||
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate, alg.elementWiseDivision(weight_grad, alg.sqrt(alg.scalarAdd(e, v)))));
|
||||
|
||||
// Calculating the bias gradients
|
||||
bias -= learning_rate * alg.sum_elements(error) / outputMiniBatches[i].size(); // As normal
|
||||
y_hat = Evaluate(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI) {
|
||||
// Adagrad upgrade. Momentum is applied.
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
// Initializing necessary components for Adagrad.
|
||||
std::vector<real_t> v = alg.zerovec(weights.size());
|
||||
while (true) {
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]);
|
||||
|
||||
// Calculating the weight gradients
|
||||
std::vector<real_t> gradient = alg.scalarMultiply(1 / outputMiniBatches[i].size(), alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), error));
|
||||
std::vector<real_t> RegDerivTerm = regularization.regDerivTerm(weights, lambda, alpha, reg);
|
||||
std::vector<real_t> weight_grad = alg.addition(gradient, RegDerivTerm); // Weight_grad_final
|
||||
|
||||
v = alg.addition(alg.scalarMultiply(b1, v), alg.scalarMultiply(1 - b1, alg.hadamard_product(weight_grad, weight_grad)));
|
||||
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate, alg.elementWiseDivision(weight_grad, alg.sqrt(alg.scalarAdd(e, v)))));
|
||||
|
||||
// Calculating the bias gradients
|
||||
bias -= learning_rate * alg.sum_elements(error) / outputMiniBatches[i].size(); // As normal
|
||||
y_hat = Evaluate(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
// Initializing necessary components for Adam.
|
||||
std::vector<real_t> m = alg.zerovec(weights.size());
|
||||
|
||||
std::vector<real_t> v = alg.zerovec(weights.size());
|
||||
while (true) {
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]);
|
||||
|
||||
// Calculating the weight gradients
|
||||
std::vector<real_t> gradient = alg.scalarMultiply(1 / outputMiniBatches[i].size(), alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), error));
|
||||
std::vector<real_t> RegDerivTerm = regularization.regDerivTerm(weights, lambda, alpha, reg);
|
||||
std::vector<real_t> weight_grad = alg.addition(gradient, RegDerivTerm); // Weight_grad_final
|
||||
|
||||
m = alg.addition(alg.scalarMultiply(b1, m), alg.scalarMultiply(1 - b1, weight_grad));
|
||||
v = alg.addition(alg.scalarMultiply(b2, v), alg.scalarMultiply(1 - b2, alg.exponentiate(weight_grad, 2)));
|
||||
|
||||
std::vector<real_t> m_hat = alg.scalarMultiply(1 / (1 - pow(b1, epoch)), m);
|
||||
std::vector<real_t> v_hat = alg.scalarMultiply(1 / (1 - pow(b2, epoch)), v);
|
||||
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate, alg.elementWiseDivision(m_hat, alg.scalarAdd(e, alg.sqrt(v_hat)))));
|
||||
|
||||
// Calculating the bias gradients
|
||||
bias -= learning_rate * alg.sum_elements(error) / outputMiniBatches[i].size(); // As normal
|
||||
y_hat = Evaluate(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
std::vector<real_t> m = alg.zerovec(weights.size());
|
||||
|
||||
std::vector<real_t> u = alg.zerovec(weights.size());
|
||||
while (true) {
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]);
|
||||
|
||||
// Calculating the weight gradients
|
||||
std::vector<real_t> gradient = alg.scalarMultiply(1 / outputMiniBatches[i].size(), alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), error));
|
||||
std::vector<real_t> RegDerivTerm = regularization.regDerivTerm(weights, lambda, alpha, reg);
|
||||
std::vector<real_t> weight_grad = alg.addition(gradient, RegDerivTerm); // Weight_grad_final
|
||||
|
||||
m = alg.addition(alg.scalarMultiply(b1, m), alg.scalarMultiply(1 - b1, weight_grad));
|
||||
u = alg.max(alg.scalarMultiply(b2, u), alg.abs(weight_grad));
|
||||
|
||||
std::vector<real_t> m_hat = alg.scalarMultiply(1 / (1 - pow(b1, epoch)), m);
|
||||
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate, alg.elementWiseDivision(m_hat, u)));
|
||||
|
||||
// Calculating the bias gradients
|
||||
bias -= learning_rate * alg.sum_elements(error) / outputMiniBatches[i].size(); // As normal
|
||||
y_hat = Evaluate(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
// Initializing necessary components for Adam.
|
||||
std::vector<real_t> m = alg.zerovec(weights.size());
|
||||
std::vector<real_t> v = alg.zerovec(weights.size());
|
||||
std::vector<real_t> m_final = alg.zerovec(weights.size());
|
||||
while (true) {
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]);
|
||||
|
||||
// Calculating the weight gradients
|
||||
std::vector<real_t> gradient = alg.scalarMultiply(1 / outputMiniBatches[i].size(), alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), error));
|
||||
std::vector<real_t> RegDerivTerm = regularization.regDerivTerm(weights, lambda, alpha, reg);
|
||||
std::vector<real_t> weight_grad = alg.addition(gradient, RegDerivTerm); // Weight_grad_final
|
||||
|
||||
m = alg.addition(alg.scalarMultiply(b1, m), alg.scalarMultiply(1 - b1, weight_grad));
|
||||
v = alg.addition(alg.scalarMultiply(b2, v), alg.scalarMultiply(1 - b2, alg.exponentiate(weight_grad, 2)));
|
||||
m_final = alg.addition(alg.scalarMultiply(b1, m), alg.scalarMultiply((1 - b1) / (1 - pow(b1, epoch)), weight_grad));
|
||||
|
||||
std::vector<real_t> m_hat = alg.scalarMultiply(1 / (1 - pow(b1, epoch)), m);
|
||||
std::vector<real_t> v_hat = alg.scalarMultiply(1 / (1 - pow(b2, epoch)), v);
|
||||
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate, alg.elementWiseDivision(m_final, alg.scalarAdd(e, alg.sqrt(v_hat)))));
|
||||
|
||||
// Calculating the bias gradients
|
||||
bias -= learning_rate * alg.sum_elements(error) / outputMiniBatches[i].size(); // As normal
|
||||
y_hat = Evaluate(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::normalEquation() {
|
||||
MLPPLinAlg alg;
|
||||
MLPPStat stat;
|
||||
std::vector<real_t> x_means;
|
||||
std::vector<std::vector<real_t>> inputSetT = alg.transpose(inputSet);
|
||||
|
||||
x_means.resize(inputSetT.size());
|
||||
for (uint32_t i = 0; i < inputSetT.size(); i++) {
|
||||
x_means[i] = (stat.mean(inputSetT[i]));
|
||||
}
|
||||
|
||||
//try {
|
||||
std::vector<real_t> temp;
|
||||
temp.resize(k);
|
||||
temp = alg.mat_vec_mult(alg.inverse(alg.matmult(alg.transpose(inputSet), inputSet)), alg.mat_vec_mult(alg.transpose(inputSet), outputSet));
|
||||
if (std::isnan(temp[0])) {
|
||||
//throw 99;
|
||||
//TODO ERR_FAIL_COND
|
||||
std::cout << "ERR: Resulting matrix was noninvertible/degenerate, and so the normal equation could not be performed. Try utilizing gradient descent." << std::endl;
|
||||
return;
|
||||
} else {
|
||||
if (reg == "Ridge") {
|
||||
weights = alg.mat_vec_mult(alg.inverse(alg.addition(alg.matmult(alg.transpose(inputSet), inputSet), alg.scalarMultiply(lambda, alg.identity(k)))), alg.mat_vec_mult(alg.transpose(inputSet), outputSet));
|
||||
} else {
|
||||
weights = alg.mat_vec_mult(alg.inverse(alg.matmult(alg.transpose(inputSet), inputSet)), alg.mat_vec_mult(alg.transpose(inputSet), outputSet));
|
||||
}
|
||||
|
||||
bias = stat.mean(outputSet) - alg.dot(weights, x_means);
|
||||
|
||||
forwardPass();
|
||||
}
|
||||
//} catch (int err_num) {
|
||||
// std::cout << "ERR " << err_num << ": Resulting matrix was noninvertible/degenerate, and so the normal equation could not be performed. Try utilizing gradient descent." << std::endl;
|
||||
//}
|
||||
}
|
||||
|
||||
real_t MLPPLinRegOld::score() {
|
||||
MLPPUtilities util;
|
||||
return util.performance(y_hat, outputSet);
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::save(std::string fileName) {
|
||||
MLPPUtilities util;
|
||||
util.saveParameters(fileName, weights, bias);
|
||||
}
|
||||
|
||||
real_t MLPPLinRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
|
||||
MLPPReg regularization;
|
||||
class MLPPCost cost;
|
||||
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPLinRegOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
|
||||
}
|
||||
|
||||
real_t MLPPLinRegOld::Evaluate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
return alg.dot(weights, x) + bias;
|
||||
}
|
||||
|
||||
// wTx + b
|
||||
void MLPPLinRegOld::forwardPass() {
|
||||
y_hat = Evaluate(inputSet);
|
||||
}
|
60
mlpp/lin_reg/lin_reg_old.h
Normal file
60
mlpp/lin_reg/lin_reg_old.h
Normal file
@ -0,0 +1,60 @@
|
||||
|
||||
#ifndef MLPP_LIN_REG_OLD_H
|
||||
#define MLPP_LIN_REG_OLD_H
|
||||
|
||||
//
|
||||
// LinReg.hpp
|
||||
//
|
||||
// Created by Marc Melikyan on 10/2/20.
|
||||
//
|
||||
|
||||
#include "core/math/math_defs.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
class MLPPLinRegOld {
|
||||
public:
|
||||
MLPPLinRegOld(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
|
||||
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
|
||||
real_t modelTest(std::vector<real_t> x);
|
||||
void NewtonRaphson(real_t learning_rate, int max_epoch, bool UI);
|
||||
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
|
||||
void Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI = false);
|
||||
void NAG(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI = false);
|
||||
void Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI = false);
|
||||
void Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI = false);
|
||||
void Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false);
|
||||
void Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false);
|
||||
void Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false);
|
||||
|
||||
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
|
||||
void normalEquation();
|
||||
real_t score();
|
||||
void save(std::string fileName);
|
||||
|
||||
private:
|
||||
real_t Cost(std::vector<real_t> y_hat, std::vector<real_t> y);
|
||||
|
||||
std::vector<real_t> Evaluate(std::vector<std::vector<real_t>> X);
|
||||
real_t Evaluate(std::vector<real_t> x);
|
||||
void forwardPass();
|
||||
|
||||
std::vector<std::vector<real_t>> inputSet;
|
||||
std::vector<real_t> outputSet;
|
||||
std::vector<real_t> y_hat;
|
||||
std::vector<real_t> weights;
|
||||
real_t bias;
|
||||
|
||||
int n;
|
||||
int k;
|
||||
|
||||
// Regularization Params
|
||||
std::string reg;
|
||||
int lambda;
|
||||
int alpha; /* This is the controlling param for Elastic Net*/
|
||||
};
|
||||
|
||||
#endif /* LinReg_hpp */
|
213
mlpp/log_reg/log_reg_old.cpp
Normal file
213
mlpp/log_reg/log_reg_old.cpp
Normal file
@ -0,0 +1,213 @@
|
||||
//
|
||||
// LogReg.cpp
|
||||
//
|
||||
// Created by Marc Melikyan on 10/2/20.
|
||||
//
|
||||
|
||||
#include "log_reg_old.h"
|
||||
|
||||
#include "../activation/activation.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
#include "../utilities/utilities.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
MLPPLogRegOld::MLPPLogRegOld(std::vector<std::vector<real_t>> pinputSet, std::vector<real_t> poutputSet, std::string preg, real_t plambda, real_t palpha) {
|
||||
inputSet = pinputSet;
|
||||
outputSet = poutputSet;
|
||||
n = pinputSet.size();
|
||||
k = pinputSet[0].size();
|
||||
reg = preg;
|
||||
lambda = plambda;
|
||||
alpha = palpha;
|
||||
|
||||
y_hat.resize(n);
|
||||
weights = MLPPUtilities::weightInitialization(k);
|
||||
bias = MLPPUtilities::biasInitialization();
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPLogRegOld::modelSetTest(std::vector<std::vector<real_t>> X) {
|
||||
return Evaluate(X);
|
||||
}
|
||||
|
||||
real_t MLPPLogRegOld::modelTest(std::vector<real_t> x) {
|
||||
return Evaluate(x);
|
||||
}
|
||||
|
||||
void MLPPLogRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
|
||||
while (true) {
|
||||
cost_prev = Cost(y_hat, outputSet);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputSet);
|
||||
|
||||
// Calculating the weight gradients
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate / n, alg.mat_vec_mult(alg.transpose(inputSet), error)));
|
||||
weights = regularization.regWeights(weights, lambda, alpha, reg);
|
||||
|
||||
// Calculating the bias gradients
|
||||
bias -= learning_rate * alg.sum_elements(error) / n;
|
||||
forwardPass();
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
epoch++;
|
||||
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPLogRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
|
||||
while (true) {
|
||||
cost_prev = Cost(y_hat, outputSet);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(outputSet, y_hat);
|
||||
|
||||
// Calculating the weight gradients
|
||||
weights = alg.addition(weights, alg.scalarMultiply(learning_rate / n, alg.mat_vec_mult(alg.transpose(inputSet), error)));
|
||||
weights = regularization.regWeights(weights, lambda, alpha, reg);
|
||||
|
||||
// Calculating the bias gradients
|
||||
bias += learning_rate * alg.sum_elements(error) / n;
|
||||
forwardPass();
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPLogRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
while (true) {
|
||||
std::random_device rd;
|
||||
std::default_random_engine generator(rd());
|
||||
std::uniform_int_distribution<int> distribution(0, int(n - 1));
|
||||
int outputIndex = distribution(generator);
|
||||
|
||||
real_t y_hat = Evaluate(inputSet[outputIndex]);
|
||||
cost_prev = Cost({ y_hat }, { outputSet[outputIndex] });
|
||||
|
||||
real_t error = y_hat - outputSet[outputIndex];
|
||||
|
||||
// Weight updation
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate * error, inputSet[outputIndex]));
|
||||
weights = regularization.regWeights(weights, lambda, alpha, reg);
|
||||
|
||||
// Bias updation
|
||||
bias -= learning_rate * error;
|
||||
|
||||
y_hat = Evaluate({ inputSet[outputIndex] });
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
epoch++;
|
||||
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPLogRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
auto bacthes = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(bacthes);
|
||||
auto outputMiniBatches = std::get<1>(bacthes);
|
||||
|
||||
while (true) {
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]);
|
||||
|
||||
// Calculating the weight gradients
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate / outputMiniBatches[i].size(), alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), error)));
|
||||
weights = regularization.regWeights(weights, lambda, alpha, reg);
|
||||
|
||||
// Calculating the bias gradients
|
||||
bias -= learning_rate * alg.sum_elements(error) / outputMiniBatches[i].size();
|
||||
y_hat = Evaluate(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
|
||||
MLPPUtilities::UI(weights, bias);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
real_t MLPPLogRegOld::score() {
|
||||
MLPPUtilities util;
|
||||
return util.performance(y_hat, outputSet);
|
||||
}
|
||||
|
||||
void MLPPLogRegOld::save(std::string fileName) {
|
||||
MLPPUtilities util;
|
||||
util.saveParameters(fileName, weights, bias);
|
||||
}
|
||||
|
||||
real_t MLPPLogRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
|
||||
MLPPReg regularization;
|
||||
class MLPPCost cost;
|
||||
return cost.LogLoss(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPLogRegOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
return avn.sigmoid(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
|
||||
}
|
||||
|
||||
real_t MLPPLogRegOld::Evaluate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
return avn.sigmoid(alg.dot(weights, x) + bias);
|
||||
}
|
||||
|
||||
// sigmoid ( wTx + b )
|
||||
void MLPPLogRegOld::forwardPass() {
|
||||
y_hat = Evaluate(inputSet);
|
||||
}
|
51
mlpp/log_reg/log_reg_old.h
Normal file
51
mlpp/log_reg/log_reg_old.h
Normal file
@ -0,0 +1,51 @@
|
||||
|
||||
#ifndef MLPP_LOG_REG_OLD_H
|
||||
#define MLPP_LOG_REG_OLD_H
|
||||
|
||||
//
|
||||
// LogReg.hpp
|
||||
//
|
||||
// Created by Marc Melikyan on 10/2/20.
|
||||
//
|
||||
|
||||
#include "core/math/math_defs.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
class MLPPLogRegOld {
|
||||
public:
|
||||
MLPPLogRegOld(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
|
||||
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
|
||||
real_t modelTest(std::vector<real_t> x);
|
||||
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
void MLE(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
|
||||
real_t score();
|
||||
void save(std::string fileName);
|
||||
|
||||
private:
|
||||
real_t Cost(std::vector<real_t> y_hat, std::vector<real_t> y);
|
||||
|
||||
std::vector<real_t> Evaluate(std::vector<std::vector<real_t>> X);
|
||||
real_t Evaluate(std::vector<real_t> x);
|
||||
void forwardPass();
|
||||
|
||||
std::vector<std::vector<real_t>> inputSet;
|
||||
std::vector<real_t> outputSet;
|
||||
std::vector<real_t> y_hat;
|
||||
std::vector<real_t> weights;
|
||||
real_t bias;
|
||||
|
||||
int n;
|
||||
int k;
|
||||
real_t learning_rate;
|
||||
|
||||
// Regularization Params
|
||||
std::string reg;
|
||||
real_t lambda; /* Regularization Parameter */
|
||||
real_t alpha; /* This is the controlling param for Elastic Net*/
|
||||
};
|
||||
|
||||
#endif /* LogReg_hpp */
|
189
mlpp/mann/mann_old.cpp
Normal file
189
mlpp/mann/mann_old.cpp
Normal file
@ -0,0 +1,189 @@
|
||||
//
|
||||
// MANN.cpp
|
||||
//
|
||||
// Created by Marc Melikyan on 11/4/20.
|
||||
//
|
||||
|
||||
#include "mann_old.h"
|
||||
|
||||
#include "../activation/activation.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
#include "../utilities/utilities.h"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
MLPPMANNOld::MLPPMANNOld(std::vector<std::vector<real_t>> inputSet, std::vector<std::vector<real_t>> outputSet) :
|
||||
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_output(outputSet[0].size()) {
|
||||
}
|
||||
|
||||
MLPPMANNOld::~MLPPMANNOld() {
|
||||
delete outputLayer;
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPMANNOld::modelSetTest(std::vector<std::vector<real_t>> X) {
|
||||
if (!network.empty()) {
|
||||
network[0].input = X;
|
||||
network[0].forwardPass();
|
||||
|
||||
for (uint32_t i = 1; i < network.size(); i++) {
|
||||
network[i].input = network[i - 1].a;
|
||||
network[i].forwardPass();
|
||||
}
|
||||
outputLayer->input = network[network.size() - 1].a;
|
||||
} else {
|
||||
outputLayer->input = X;
|
||||
}
|
||||
outputLayer->forwardPass();
|
||||
return outputLayer->a;
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPMANNOld::modelTest(std::vector<real_t> x) {
|
||||
if (!network.empty()) {
|
||||
network[0].Test(x);
|
||||
for (uint32_t i = 1; i < network.size(); i++) {
|
||||
network[i].Test(network[i - 1].a_test);
|
||||
}
|
||||
outputLayer->Test(network[network.size() - 1].a_test);
|
||||
} else {
|
||||
outputLayer->Test(x);
|
||||
}
|
||||
return outputLayer->a_test;
|
||||
}
|
||||
|
||||
void MLPPMANNOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
|
||||
while (true) {
|
||||
cost_prev = Cost(y_hat, outputSet);
|
||||
|
||||
if (outputLayer->activation == "Softmax") {
|
||||
outputLayer->delta = alg.subtraction(y_hat, outputSet);
|
||||
} else {
|
||||
auto costDeriv = outputLayer->costDeriv_map[outputLayer->cost];
|
||||
auto outputAvn = outputLayer->activation_map[outputLayer->activation];
|
||||
outputLayer->delta = alg.hadamard_product((cost.*costDeriv)(y_hat, outputSet), (avn.*outputAvn)(outputLayer->z, 1));
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> outputWGrad = alg.matmult(alg.transpose(outputLayer->input), outputLayer->delta);
|
||||
|
||||
outputLayer->weights = alg.subtraction(outputLayer->weights, alg.scalarMultiply(learning_rate / n, outputWGrad));
|
||||
outputLayer->weights = regularization.regWeights(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg);
|
||||
outputLayer->bias = alg.subtractMatrixRows(outputLayer->bias, alg.scalarMultiply(learning_rate / n, outputLayer->delta));
|
||||
|
||||
if (!network.empty()) {
|
||||
auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation];
|
||||
network[network.size() - 1].delta = alg.hadamard_product(alg.matmult(outputLayer->delta, alg.transpose(outputLayer->weights)), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1));
|
||||
std::vector<std::vector<real_t>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta);
|
||||
|
||||
network[network.size() - 1].weights = alg.subtraction(network[network.size() - 1].weights, alg.scalarMultiply(learning_rate / n, hiddenLayerWGrad));
|
||||
network[network.size() - 1].weights = regularization.regWeights(network[network.size() - 1].weights, network[network.size() - 1].lambda, network[network.size() - 1].alpha, network[network.size() - 1].reg);
|
||||
network[network.size() - 1].bias = alg.subtractMatrixRows(network[network.size() - 1].bias, alg.scalarMultiply(learning_rate / n, network[network.size() - 1].delta));
|
||||
|
||||
for (int i = network.size() - 2; i >= 0; i--) {
|
||||
hiddenLayerAvn = network[i].activation_map[network[i].activation];
|
||||
network[i].delta = alg.hadamard_product(alg.matmult(network[i + 1].delta, network[i + 1].weights), (avn.*hiddenLayerAvn)(network[i].z, 1));
|
||||
hiddenLayerWGrad = alg.matmult(alg.transpose(network[i].input), network[i].delta);
|
||||
network[i].weights = alg.subtraction(network[i].weights, alg.scalarMultiply(learning_rate / n, hiddenLayerWGrad));
|
||||
network[i].weights = regularization.regWeights(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg);
|
||||
network[i].bias = alg.subtractMatrixRows(network[i].bias, alg.scalarMultiply(learning_rate / n, network[i].delta));
|
||||
}
|
||||
}
|
||||
|
||||
forwardPass();
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
|
||||
std::cout << "Layer " << network.size() + 1 << ": " << std::endl;
|
||||
MLPPUtilities::UI(outputLayer->weights, outputLayer->bias);
|
||||
if (!network.empty()) {
|
||||
std::cout << "Layer " << network.size() << ": " << std::endl;
|
||||
for (int i = network.size() - 1; i >= 0; i--) {
|
||||
std::cout << "Layer " << i + 1 << ": " << std::endl;
|
||||
MLPPUtilities::UI(network[i].weights, network[i].bias);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
real_t MLPPMANNOld::score() {
|
||||
MLPPUtilities util;
|
||||
forwardPass();
|
||||
return util.performance(y_hat, outputSet);
|
||||
}
|
||||
|
||||
void MLPPMANNOld::save(std::string fileName) {
|
||||
MLPPUtilities util;
|
||||
if (!network.empty()) {
|
||||
util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1);
|
||||
for (uint32_t i = 1; i < network.size(); i++) {
|
||||
util.saveParameters(fileName, network[i].weights, network[i].bias, 1, i + 1);
|
||||
}
|
||||
util.saveParameters(fileName, outputLayer->weights, outputLayer->bias, 1, network.size() + 1);
|
||||
} else {
|
||||
util.saveParameters(fileName, outputLayer->weights, outputLayer->bias, 0, network.size() + 1);
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPMANNOld::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
||||
if (network.empty()) {
|
||||
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
|
||||
network[0].forwardPass();
|
||||
} else {
|
||||
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
||||
network[network.size() - 1].forwardPass();
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPMANNOld::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
||||
if (!network.empty()) {
|
||||
outputLayer = new MLPPOldMultiOutputLayer(n_output, network[0].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
|
||||
} else {
|
||||
outputLayer = new MLPPOldMultiOutputLayer(n_output, k, activation, loss, inputSet, weightInit, reg, lambda, alpha);
|
||||
}
|
||||
}
|
||||
|
||||
real_t MLPPMANNOld::Cost(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
|
||||
MLPPReg regularization;
|
||||
class MLPPCost cost;
|
||||
real_t totalRegTerm = 0;
|
||||
|
||||
auto cost_function = outputLayer->cost_map[outputLayer->cost];
|
||||
if (!network.empty()) {
|
||||
for (uint32_t i = 0; i < network.size() - 1; i++) {
|
||||
totalRegTerm += regularization.regTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg);
|
||||
}
|
||||
}
|
||||
return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg);
|
||||
}
|
||||
|
||||
void MLPPMANNOld::forwardPass() {
|
||||
if (!network.empty()) {
|
||||
network[0].input = inputSet;
|
||||
network[0].forwardPass();
|
||||
|
||||
for (uint32_t i = 1; i < network.size(); i++) {
|
||||
network[i].input = network[i - 1].a;
|
||||
network[i].forwardPass();
|
||||
}
|
||||
outputLayer->input = network[network.size() - 1].a;
|
||||
} else {
|
||||
outputLayer->input = inputSet;
|
||||
}
|
||||
outputLayer->forwardPass();
|
||||
y_hat = outputLayer->a;
|
||||
}
|
51
mlpp/mann/mann_old.h
Normal file
51
mlpp/mann/mann_old.h
Normal file
@ -0,0 +1,51 @@
|
||||
|
||||
#ifndef MLPP_MANN_OLD_H
|
||||
#define MLPP_MANN_OLD_H
|
||||
|
||||
//
|
||||
// MANN.hpp
|
||||
//
|
||||
// Created by Marc Melikyan on 11/4/20.
|
||||
//
|
||||
|
||||
#include "core/math/math_defs.h"
|
||||
|
||||
#include "../hidden_layer/hidden_layer.h"
|
||||
#include "../multi_output_layer/multi_output_layer.h"
|
||||
|
||||
#include "../hidden_layer/hidden_layer_old.h"
|
||||
#include "../multi_output_layer/multi_output_layer_old.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
class MLPPMANNOld {
|
||||
public:
|
||||
MLPPMANNOld(std::vector<std::vector<real_t>> inputSet, std::vector<std::vector<real_t>> outputSet);
|
||||
~MLPPMANNOld();
|
||||
std::vector<std::vector<real_t>> modelSetTest(std::vector<std::vector<real_t>> X);
|
||||
std::vector<real_t> modelTest(std::vector<real_t> x);
|
||||
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
real_t score();
|
||||
void save(std::string fileName);
|
||||
|
||||
void addLayer(int n_hidden, std::string activation, std::string weightInit = "Default", std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
|
||||
void addOutputLayer(std::string activation, std::string loss, std::string weightInit = "Default", std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
|
||||
|
||||
private:
|
||||
real_t Cost(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
|
||||
void forwardPass();
|
||||
|
||||
std::vector<std::vector<real_t>> inputSet;
|
||||
std::vector<std::vector<real_t>> outputSet;
|
||||
std::vector<std::vector<real_t>> y_hat;
|
||||
|
||||
std::vector<MLPPOldHiddenLayer> network;
|
||||
MLPPOldMultiOutputLayer *outputLayer;
|
||||
|
||||
int n;
|
||||
int k;
|
||||
int n_output;
|
||||
};
|
||||
|
||||
#endif /* MANN_hpp */
|
121
mlpp/multinomial_nb/multinomial_nb_old.cpp
Normal file
121
mlpp/multinomial_nb/multinomial_nb_old.cpp
Normal file
@ -0,0 +1,121 @@
|
||||
//
|
||||
// MultinomialNB.cpp
|
||||
//
|
||||
// Created by Marc Melikyan on 1/17/21.
|
||||
//
|
||||
|
||||
#include "multinomial_nb_old.h"
|
||||
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../utilities/utilities.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
MLPPMultinomialNBOld::MLPPMultinomialNBOld(std::vector<std::vector<real_t>> pinputSet, std::vector<real_t> poutputSet, int pclass_num) {
|
||||
inputSet = pinputSet;
|
||||
outputSet = poutputSet;
|
||||
class_num = pclass_num;
|
||||
|
||||
y_hat.resize(outputSet.size());
|
||||
Evaluate();
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPMultinomialNBOld::modelSetTest(std::vector<std::vector<real_t>> X) {
|
||||
std::vector<real_t> y_hat;
|
||||
for (uint32_t i = 0; i < X.size(); i++) {
|
||||
y_hat.push_back(modelTest(X[i]));
|
||||
}
|
||||
return y_hat;
|
||||
}
|
||||
|
||||
real_t MLPPMultinomialNBOld::modelTest(std::vector<real_t> x) {
|
||||
real_t score[class_num];
|
||||
computeTheta();
|
||||
|
||||
for (uint32_t j = 0; j < x.size(); j++) {
|
||||
for (uint32_t k = 0; k < vocab.size(); k++) {
|
||||
if (x[j] == vocab[k]) {
|
||||
for (int p = class_num - 1; p >= 0; p--) {
|
||||
score[p] += std::log(theta[p][vocab[k]]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < priors.size(); i++) {
|
||||
score[i] += std::log(priors[i]);
|
||||
}
|
||||
|
||||
return std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(real_t)));
|
||||
}
|
||||
|
||||
real_t MLPPMultinomialNBOld::score() {
|
||||
MLPPUtilities util;
|
||||
return util.performance(y_hat, outputSet);
|
||||
}
|
||||
|
||||
void MLPPMultinomialNBOld::computeTheta() {
|
||||
// Resizing theta for the sake of ease & proper access of the elements.
|
||||
theta.resize(class_num);
|
||||
|
||||
// Setting all values in the hasmap by default to 0.
|
||||
for (int i = class_num - 1; i >= 0; i--) {
|
||||
for (uint32_t j = 0; j < vocab.size(); j++) {
|
||||
theta[i][vocab[j]] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < inputSet.size(); i++) {
|
||||
for (uint32_t j = 0; j < inputSet[0].size(); j++) {
|
||||
theta[outputSet[i]][inputSet[i][j]]++;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < theta.size(); i++) {
|
||||
for (uint32_t j = 0; j < theta[i].size(); j++) {
|
||||
theta[i][j] /= priors[i] * y_hat.size();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPMultinomialNBOld::Evaluate() {
|
||||
MLPPLinAlg alg;
|
||||
for (uint32_t i = 0; i < outputSet.size(); i++) {
|
||||
// Pr(B | A) * Pr(A)
|
||||
real_t score[class_num];
|
||||
|
||||
// Easy computation of priors, i.e. Pr(C_k)
|
||||
priors.resize(class_num);
|
||||
for (uint32_t ii = 0; ii < outputSet.size(); ii++) {
|
||||
priors[int(outputSet[ii])]++;
|
||||
}
|
||||
priors = alg.scalarMultiply(real_t(1) / real_t(outputSet.size()), priors);
|
||||
|
||||
// Evaluating Theta...
|
||||
computeTheta();
|
||||
|
||||
for (uint32_t j = 0; j < inputSet.size(); j++) {
|
||||
for (uint32_t k = 0; k < vocab.size(); k++) {
|
||||
if (inputSet[i][j] == vocab[k]) {
|
||||
for (int p = class_num - 1; p >= 0; p--) {
|
||||
score[p] += std::log(theta[i][vocab[k]]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t ii = 0; ii < priors.size(); ii++) {
|
||||
score[ii] += std::log(priors[ii]);
|
||||
score[ii] = exp(score[ii]);
|
||||
}
|
||||
|
||||
for (int ii = 0; ii < 2; ii++) {
|
||||
std::cout << score[ii] << std::endl;
|
||||
}
|
||||
|
||||
// Assigning the traning example's y_hat to a class
|
||||
y_hat[i] = std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(real_t)));
|
||||
}
|
||||
}
|
40
mlpp/multinomial_nb/multinomial_nb_old.h
Normal file
40
mlpp/multinomial_nb/multinomial_nb_old.h
Normal file
@ -0,0 +1,40 @@
|
||||
|
||||
#ifndef MLPP_MULTINOMIAL_NB_OLD_H
|
||||
#define MLPP_MULTINOMIAL_NB_OLD_H
|
||||
|
||||
//
|
||||
// MultinomialNB.hpp
|
||||
//
|
||||
// Created by Marc Melikyan on 1/17/21.
|
||||
//
|
||||
|
||||
#include "core/math/math_defs.h"
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
class MLPPMultinomialNBOld {
|
||||
public:
|
||||
MLPPMultinomialNBOld(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, int class_num);
|
||||
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
|
||||
real_t modelTest(std::vector<real_t> x);
|
||||
real_t score();
|
||||
|
||||
private:
|
||||
void computeTheta();
|
||||
void Evaluate();
|
||||
|
||||
// Model Params
|
||||
std::vector<real_t> priors;
|
||||
|
||||
std::vector<std::map<real_t, int>> theta;
|
||||
std::vector<real_t> vocab;
|
||||
int class_num;
|
||||
|
||||
// Datasets
|
||||
std::vector<std::vector<real_t>> inputSet;
|
||||
std::vector<real_t> outputSet;
|
||||
std::vector<real_t> y_hat;
|
||||
};
|
||||
|
||||
#endif /* MultinomialNB_hpp */
|
309
mlpp/softmax_net/softmax_net_old.cpp
Normal file
309
mlpp/softmax_net/softmax_net_old.cpp
Normal file
@ -0,0 +1,309 @@
|
||||
//
|
||||
// SoftmaxNet.cpp
|
||||
//
|
||||
// Created by Marc Melikyan on 10/2/20.
|
||||
//
|
||||
|
||||
#include "softmax_net_old.h"
|
||||
|
||||
#include "../activation/activation.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../data/data.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
#include "../utilities/utilities.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
MLPPSoftmaxNetOld::MLPPSoftmaxNetOld(std::vector<std::vector<real_t>> pinputSet, std::vector<std::vector<real_t>> poutputSet, int pn_hidden, std::string preg, real_t plambda, real_t palpha) {
|
||||
inputSet = pinputSet;
|
||||
outputSet = poutputSet;
|
||||
n = pinputSet.size();
|
||||
k = pinputSet[0].size();
|
||||
n_hidden = pn_hidden;
|
||||
n_class = poutputSet[0].size();
|
||||
reg = preg;
|
||||
lambda = plambda;
|
||||
alpha = palpha;
|
||||
|
||||
y_hat.resize(n);
|
||||
|
||||
weights1 = MLPPUtilities::weightInitialization(k, n_hidden);
|
||||
weights2 = MLPPUtilities::weightInitialization(n_hidden, n_class);
|
||||
bias1 = MLPPUtilities::biasInitialization(n_hidden);
|
||||
bias2 = MLPPUtilities::biasInitialization(n_class);
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPSoftmaxNetOld::modelTest(std::vector<real_t> x) {
|
||||
return Evaluate(x);
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPSoftmaxNetOld::modelSetTest(std::vector<std::vector<real_t>> X) {
|
||||
return Evaluate(X);
|
||||
}
|
||||
|
||||
void MLPPSoftmaxNetOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
|
||||
while (true) {
|
||||
cost_prev = Cost(y_hat, outputSet);
|
||||
|
||||
// Calculating the errors
|
||||
std::vector<std::vector<real_t>> error = alg.subtraction(y_hat, outputSet);
|
||||
|
||||
// Calculating the weight/bias gradients for layer 2
|
||||
|
||||
std::vector<std::vector<real_t>> D2_1 = alg.matmult(alg.transpose(a2), error);
|
||||
|
||||
// weights and bias updation for layer 2
|
||||
weights2 = alg.subtraction(weights2, alg.scalarMultiply(learning_rate, D2_1));
|
||||
weights2 = regularization.regWeights(weights2, lambda, alpha, reg);
|
||||
|
||||
bias2 = alg.subtractMatrixRows(bias2, alg.scalarMultiply(learning_rate, error));
|
||||
|
||||
//Calculating the weight/bias for layer 1
|
||||
|
||||
std::vector<std::vector<real_t>> D1_1 = alg.matmult(error, alg.transpose(weights2));
|
||||
|
||||
std::vector<std::vector<real_t>> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1));
|
||||
|
||||
std::vector<std::vector<real_t>> D1_3 = alg.matmult(alg.transpose(inputSet), D1_2);
|
||||
|
||||
// weight an bias updation for layer 1
|
||||
weights1 = alg.subtraction(weights1, alg.scalarMultiply(learning_rate, D1_3));
|
||||
weights1 = regularization.regWeights(weights1, lambda, alpha, reg);
|
||||
|
||||
bias1 = alg.subtractMatrixRows(bias1, alg.scalarMultiply(learning_rate, D1_2));
|
||||
|
||||
forwardPass();
|
||||
|
||||
// UI PORTION
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
|
||||
std::cout << "Layer 1:" << std::endl;
|
||||
MLPPUtilities::UI(weights1, bias1);
|
||||
std::cout << "Layer 2:" << std::endl;
|
||||
MLPPUtilities::UI(weights2, bias2);
|
||||
}
|
||||
epoch++;
|
||||
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPSoftmaxNetOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
while (true) {
|
||||
std::random_device rd;
|
||||
std::default_random_engine generator(rd());
|
||||
std::uniform_int_distribution<int> distribution(0, int(n - 1));
|
||||
int outputIndex = distribution(generator);
|
||||
|
||||
std::vector<real_t> y_hat = Evaluate(inputSet[outputIndex]);
|
||||
|
||||
auto prop_res = propagate(inputSet[outputIndex]);
|
||||
auto z2 = std::get<0>(prop_res);
|
||||
auto a2 = std::get<1>(prop_res);
|
||||
|
||||
cost_prev = Cost({ y_hat }, { outputSet[outputIndex] });
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputSet[outputIndex]);
|
||||
|
||||
// Weight updation for layer 2
|
||||
std::vector<std::vector<real_t>> D2_1 = alg.outerProduct(error, a2);
|
||||
weights2 = alg.subtraction(weights2, alg.scalarMultiply(learning_rate, alg.transpose(D2_1)));
|
||||
weights2 = regularization.regWeights(weights2, lambda, alpha, reg);
|
||||
|
||||
// Bias updation for layer 2
|
||||
bias2 = alg.subtraction(bias2, alg.scalarMultiply(learning_rate, error));
|
||||
|
||||
// Weight updation for layer 1
|
||||
std::vector<real_t> D1_1 = alg.mat_vec_mult(weights2, error);
|
||||
std::vector<real_t> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, true));
|
||||
std::vector<std::vector<real_t>> D1_3 = alg.outerProduct(inputSet[outputIndex], D1_2);
|
||||
|
||||
weights1 = alg.subtraction(weights1, alg.scalarMultiply(learning_rate, D1_3));
|
||||
weights1 = regularization.regWeights(weights1, lambda, alpha, reg);
|
||||
// Bias updation for layer 1
|
||||
|
||||
bias1 = alg.subtraction(bias1, alg.scalarMultiply(learning_rate, D1_2));
|
||||
|
||||
y_hat = Evaluate(inputSet[outputIndex]);
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
|
||||
std::cout << "Layer 1:" << std::endl;
|
||||
MLPPUtilities::UI(weights1, bias1);
|
||||
std::cout << "Layer 2:" << std::endl;
|
||||
MLPPUtilities::UI(weights2, bias2);
|
||||
}
|
||||
epoch++;
|
||||
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLPPSoftmaxNetOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
|
||||
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
auto inputMiniBatches = std::get<0>(batches);
|
||||
auto outputMiniBatches = std::get<1>(batches);
|
||||
|
||||
// Creating the mini-batches
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<std::vector<real_t>> currentInputSet;
|
||||
std::vector<std::vector<real_t>> currentOutputSet;
|
||||
for (int j = 0; j < n / n_mini_batch; j++) {
|
||||
currentInputSet.push_back(inputSet[n / n_mini_batch * i + j]);
|
||||
currentOutputSet.push_back(outputSet[n / n_mini_batch * i + j]);
|
||||
}
|
||||
inputMiniBatches.push_back(currentInputSet);
|
||||
outputMiniBatches.push_back(currentOutputSet);
|
||||
}
|
||||
|
||||
if (real_t(n) / real_t(n_mini_batch) - int(n / n_mini_batch) != 0) {
|
||||
for (int i = 0; i < n - n / n_mini_batch * n_mini_batch; i++) {
|
||||
inputMiniBatches[n_mini_batch - 1].push_back(inputSet[n / n_mini_batch * n_mini_batch + i]);
|
||||
outputMiniBatches[n_mini_batch - 1].push_back(outputSet[n / n_mini_batch * n_mini_batch + i]);
|
||||
}
|
||||
}
|
||||
|
||||
while (true) {
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<std::vector<real_t>> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
|
||||
auto propagate_res = propagate(inputMiniBatches[i]);
|
||||
auto z2 = std::get<0>(propagate_res);
|
||||
auto a2 = std::get<1>(propagate_res);
|
||||
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
// Calculating the errors
|
||||
std::vector<std::vector<real_t>> error = alg.subtraction(y_hat, outputMiniBatches[i]);
|
||||
|
||||
// Calculating the weight/bias gradients for layer 2
|
||||
|
||||
std::vector<std::vector<real_t>> D2_1 = alg.matmult(alg.transpose(a2), error);
|
||||
|
||||
// weights and bias updation for layser 2
|
||||
weights2 = alg.subtraction(weights2, alg.scalarMultiply(learning_rate, D2_1));
|
||||
weights2 = regularization.regWeights(weights2, lambda, alpha, reg);
|
||||
|
||||
// Bias Updation for layer 2
|
||||
bias2 = alg.subtractMatrixRows(bias2, alg.scalarMultiply(learning_rate, error));
|
||||
|
||||
//Calculating the weight/bias for layer 1
|
||||
|
||||
std::vector<std::vector<real_t>> D1_1 = alg.matmult(error, alg.transpose(weights2));
|
||||
|
||||
std::vector<std::vector<real_t>> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1));
|
||||
|
||||
std::vector<std::vector<real_t>> D1_3 = alg.matmult(alg.transpose(inputMiniBatches[i]), D1_2);
|
||||
|
||||
// weight an bias updation for layer 1
|
||||
weights1 = alg.subtraction(weights1, alg.scalarMultiply(learning_rate, D1_3));
|
||||
weights1 = regularization.regWeights(weights1, lambda, alpha, reg);
|
||||
|
||||
bias1 = alg.subtractMatrixRows(bias1, alg.scalarMultiply(learning_rate, D1_2));
|
||||
|
||||
y_hat = Evaluate(inputMiniBatches[i]);
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
|
||||
std::cout << "Layer 1:" << std::endl;
|
||||
MLPPUtilities::UI(weights1, bias1);
|
||||
std::cout << "Layer 2:" << std::endl;
|
||||
MLPPUtilities::UI(weights2, bias2);
|
||||
}
|
||||
}
|
||||
epoch++;
|
||||
if (epoch > max_epoch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
real_t MLPPSoftmaxNetOld::score() {
|
||||
MLPPUtilities util;
|
||||
return util.performance(y_hat, outputSet);
|
||||
}
|
||||
|
||||
void MLPPSoftmaxNetOld::save(std::string fileName) {
|
||||
MLPPUtilities util;
|
||||
util.saveParameters(fileName, weights1, bias1, 0, 1);
|
||||
util.saveParameters(fileName, weights2, bias2, 1, 2);
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPSoftmaxNetOld::getEmbeddings() {
|
||||
return weights1;
|
||||
}
|
||||
|
||||
real_t MLPPSoftmaxNetOld::Cost(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
|
||||
MLPPReg regularization;
|
||||
MLPPData data;
|
||||
class MLPPCost cost;
|
||||
return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights1, lambda, alpha, reg) + regularization.regTerm(weights2, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPSoftmaxNetOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
|
||||
std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2);
|
||||
return avn.adjSoftmax(alg.mat_vec_add(alg.matmult(a2, weights2), bias2));
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> MLPPSoftmaxNetOld::propagate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
|
||||
std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2);
|
||||
return { z2, a2 };
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPSoftmaxNetOld::Evaluate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
|
||||
std::vector<real_t> a2 = avn.sigmoid(z2);
|
||||
return avn.adjSoftmax(alg.addition(alg.mat_vec_mult(alg.transpose(weights2), a2), bias2));
|
||||
}
|
||||
|
||||
std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPSoftmaxNetOld::propagate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
|
||||
std::vector<real_t> a2 = avn.sigmoid(z2);
|
||||
return { z2, a2 };
|
||||
}
|
||||
|
||||
void MLPPSoftmaxNetOld::forwardPass() {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);
|
||||
a2 = avn.sigmoid(z2);
|
||||
y_hat = avn.adjSoftmax(alg.mat_vec_add(alg.matmult(a2, weights2), bias2));
|
||||
}
|
60
mlpp/softmax_net/softmax_net_old.h
Normal file
60
mlpp/softmax_net/softmax_net_old.h
Normal file
@ -0,0 +1,60 @@
|
||||
#ifndef MLPP_SOFTMAX_NET_OLD_H
|
||||
#define MLPP_SOFTMAX_NET_OLD_H
|
||||
|
||||
//
|
||||
// SoftmaxNet.hpp
|
||||
//
|
||||
// Created by Marc Melikyan on 10/2/20.
|
||||
//
|
||||
|
||||
#include "core/math/math_defs.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
class MLPPSoftmaxNetOld {
|
||||
public:
|
||||
MLPPSoftmaxNetOld(std::vector<std::vector<real_t>> inputSet, std::vector<std::vector<real_t>> outputSet, int n_hidden, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
|
||||
std::vector<real_t> modelTest(std::vector<real_t> x);
|
||||
std::vector<std::vector<real_t>> modelSetTest(std::vector<std::vector<real_t>> X);
|
||||
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
|
||||
real_t score();
|
||||
void save(std::string fileName);
|
||||
|
||||
std::vector<std::vector<real_t>> getEmbeddings(); // This class is used (mostly) for word2Vec. This function returns our embeddings.
|
||||
private:
|
||||
real_t Cost(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
|
||||
|
||||
std::vector<std::vector<real_t>> Evaluate(std::vector<std::vector<real_t>> X);
|
||||
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> propagate(std::vector<std::vector<real_t>> X);
|
||||
std::vector<real_t> Evaluate(std::vector<real_t> x);
|
||||
std::tuple<std::vector<real_t>, std::vector<real_t>> propagate(std::vector<real_t> x);
|
||||
void forwardPass();
|
||||
|
||||
std::vector<std::vector<real_t>> inputSet;
|
||||
std::vector<std::vector<real_t>> outputSet;
|
||||
std::vector<std::vector<real_t>> y_hat;
|
||||
|
||||
std::vector<std::vector<real_t>> weights1;
|
||||
std::vector<std::vector<real_t>> weights2;
|
||||
|
||||
std::vector<real_t> bias1;
|
||||
std::vector<real_t> bias2;
|
||||
|
||||
std::vector<std::vector<real_t>> z2;
|
||||
std::vector<std::vector<real_t>> a2;
|
||||
|
||||
int n;
|
||||
int k;
|
||||
int n_class;
|
||||
int n_hidden;
|
||||
|
||||
// Regularization Params
|
||||
std::string reg;
|
||||
real_t lambda;
|
||||
real_t alpha; /* This is the controlling param for Elastic Net*/
|
||||
};
|
||||
|
||||
#endif /* SoftmaxNet_hpp */
|
Loading…
Reference in New Issue
Block a user