Prefixed LinAlg with MLPP.

This commit is contained in:
Relintai 2023-01-25 00:29:02 +01:00
parent e399330a6c
commit 6fe1f32c3d
36 changed files with 350 additions and 350 deletions

View File

@ -19,7 +19,7 @@ double MLPPActivation::linear(double z, bool deriv) {
std::vector<double> MLPPActivation::linear(std::vector<double> z, bool deriv) {
if (deriv) {
LinAlg alg;
MLPPLinAlg alg;
return alg.onevec(z.size());
}
return z;
@ -27,7 +27,7 @@ std::vector<double> MLPPActivation::linear(std::vector<double> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::linear(std::vector<std::vector<double>> z, bool deriv) {
if (deriv) {
LinAlg alg;
MLPPLinAlg alg;
return alg.onemat(z.size(), z[0].size());
}
return z;
@ -41,7 +41,7 @@ double MLPPActivation::sigmoid(double z, bool deriv) {
}
std::vector<double> MLPPActivation::sigmoid(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), sigmoid(z)));
}
@ -49,7 +49,7 @@ std::vector<double> MLPPActivation::sigmoid(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::sigmoid(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), sigmoid(z)));
}
@ -57,7 +57,7 @@ std::vector<std::vector<double>> MLPPActivation::sigmoid(std::vector<std::vector
}
std::vector<double> MLPPActivation::softmax(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<double> a;
a.resize(z.size());
std::vector<double> expZ = alg.exp(z);
@ -73,7 +73,7 @@ std::vector<double> MLPPActivation::softmax(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::softmax(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<double>> a;
a.resize(z.size());
@ -84,7 +84,7 @@ std::vector<std::vector<double>> MLPPActivation::softmax(std::vector<std::vector
}
std::vector<double> MLPPActivation::adjSoftmax(std::vector<double> z) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<double> a;
double C = -*std::max_element(z.begin(), z.end());
z = alg.scalarAdd(C, z);
@ -93,7 +93,7 @@ std::vector<double> MLPPActivation::adjSoftmax(std::vector<double> z) {
}
std::vector<std::vector<double>> MLPPActivation::adjSoftmax(std::vector<std::vector<double>> z) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<double>> a;
a.resize(z.size());
@ -104,7 +104,7 @@ std::vector<std::vector<double>> MLPPActivation::adjSoftmax(std::vector<std::vec
}
std::vector<std::vector<double>> MLPPActivation::softmaxDeriv(std::vector<double> z) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<double>> deriv;
std::vector<double> a = softmax(z);
deriv.resize(a.size());
@ -124,7 +124,7 @@ std::vector<std::vector<double>> MLPPActivation::softmaxDeriv(std::vector<double
}
std::vector<std::vector<std::vector<double>>> MLPPActivation::softmaxDeriv(std::vector<std::vector<double>> z) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<std::vector<double>>> deriv;
std::vector<std::vector<double>> a = softmax(z);
@ -155,7 +155,7 @@ std::vector<double> MLPPActivation::softplus(std::vector<double> z, bool deriv)
if (deriv) {
return sigmoid(z);
}
LinAlg alg;
MLPPLinAlg alg;
return alg.log(alg.addition(alg.onevec(z.size()), alg.exp(z)));
}
@ -163,7 +163,7 @@ std::vector<std::vector<double>> MLPPActivation::softplus(std::vector<std::vecto
if (deriv) {
return sigmoid(z);
}
LinAlg alg;
MLPPLinAlg alg;
return alg.log(alg.addition(alg.onemat(z.size(), z[0].size()), alg.exp(z)));
}
@ -175,7 +175,7 @@ double MLPPActivation::softsign(double z, bool deriv) {
}
std::vector<double> MLPPActivation::softsign(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onevec(z.size()), alg.exponentiate(alg.addition(alg.onevec(z.size()), alg.abs(z)), 2));
}
@ -183,7 +183,7 @@ std::vector<double> MLPPActivation::softsign(std::vector<double> z, bool deriv)
}
std::vector<std::vector<double>> MLPPActivation::softsign(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.exponentiate(alg.addition(alg.onemat(z.size(), z[0].size()), alg.abs(z)), 2));
}
@ -198,7 +198,7 @@ double MLPPActivation::gaussianCDF(double z, bool deriv) {
}
std::vector<double> MLPPActivation::gaussianCDF(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.scalarMultiply(1 / sqrt(2 * M_PI), alg.exp(alg.scalarMultiply(-1 / 2, alg.hadamard_product(z, z))));
}
@ -206,7 +206,7 @@ std::vector<double> MLPPActivation::gaussianCDF(std::vector<double> z, bool deri
}
std::vector<std::vector<double>> MLPPActivation::gaussianCDF(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.scalarMultiply(1 / sqrt(2 * M_PI), alg.exp(alg.scalarMultiply(-1 / 2, alg.hadamard_product(z, z))));
}
@ -221,7 +221,7 @@ double MLPPActivation::cloglog(double z, bool deriv) {
}
std::vector<double> MLPPActivation::cloglog(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.exp(alg.scalarMultiply(-1, alg.exp(z)));
}
@ -229,7 +229,7 @@ std::vector<double> MLPPActivation::cloglog(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::cloglog(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.exp(alg.scalarMultiply(-1, alg.exp(z)));
}
@ -244,7 +244,7 @@ double MLPPActivation::logit(double z, bool deriv) {
}
std::vector<double> MLPPActivation::logit(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.subtraction(alg.elementWiseDivision(alg.onevec(z.size()), z), alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(z, alg.onevec(z.size()))));
}
@ -252,7 +252,7 @@ std::vector<double> MLPPActivation::logit(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::logit(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.subtraction(alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), z), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(z, alg.onemat(z.size(), z[0].size()))));
}
@ -310,7 +310,7 @@ double MLPPActivation::swish(double z, bool deriv) {
}
std::vector<double> MLPPActivation::swish(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
alg.addition(swish(z), alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), swish(z))));
}
@ -318,7 +318,7 @@ std::vector<double> MLPPActivation::swish(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::swish(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
alg.addition(swish(z), alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), swish(z))));
}
@ -333,7 +333,7 @@ double MLPPActivation::mish(double z, bool deriv) {
}
std::vector<double> MLPPActivation::mish(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.addition(alg.hadamard_product(alg.hadamard_product(alg.hadamard_product(sech(softplus(z)), sech(softplus(z))), z), sigmoid(z)), alg.elementWiseDivision(mish(z), z));
}
@ -341,7 +341,7 @@ std::vector<double> MLPPActivation::mish(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::mish(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.addition(alg.hadamard_product(alg.hadamard_product(alg.hadamard_product(sech(softplus(z)), sech(softplus(z))), z), sigmoid(z)), alg.elementWiseDivision(mish(z), z));
}
@ -356,7 +356,7 @@ double MLPPActivation::sinc(double z, bool deriv) {
}
std::vector<double> MLPPActivation::sinc(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.subtraction(alg.hadamard_product(z, alg.cos(z)), alg.sin(z)), alg.hadamard_product(z, z));
}
@ -364,7 +364,7 @@ std::vector<double> MLPPActivation::sinc(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::sinc(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.subtraction(alg.hadamard_product(z, alg.cos(z)), alg.sin(z)), alg.hadamard_product(z, z));
}
@ -662,7 +662,7 @@ std::vector<double> MLPPActivation::sinh(std::vector<double> z, bool deriv) {
if (deriv) {
return cosh(z);
}
LinAlg alg;
MLPPLinAlg alg;
return alg.scalarMultiply(0.5, alg.subtraction(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))));
}
@ -670,7 +670,7 @@ std::vector<std::vector<double>> MLPPActivation::sinh(std::vector<std::vector<do
if (deriv) {
return cosh(z);
}
LinAlg alg;
MLPPLinAlg alg;
return alg.scalarMultiply(0.5, alg.subtraction(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))));
}
@ -685,7 +685,7 @@ std::vector<double> MLPPActivation::cosh(std::vector<double> z, bool deriv) {
if (deriv) {
return sinh(z);
}
LinAlg alg;
MLPPLinAlg alg;
return alg.scalarMultiply(0.5, alg.addition(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))));
}
@ -693,7 +693,7 @@ std::vector<std::vector<double>> MLPPActivation::cosh(std::vector<std::vector<do
if (deriv) {
return sinh(z);
}
LinAlg alg;
MLPPLinAlg alg;
return alg.scalarMultiply(0.5, alg.addition(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))));
}
@ -705,7 +705,7 @@ double MLPPActivation::tanh(double z, bool deriv) {
}
std::vector<double> MLPPActivation::tanh(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.hadamard_product(tanh(z), tanh(z))));
}
@ -713,7 +713,7 @@ std::vector<double> MLPPActivation::tanh(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::tanh(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.hadamard_product(tanh(z), tanh(z))));
}
@ -729,7 +729,7 @@ double MLPPActivation::csch(double z, bool deriv) {
}
std::vector<double> MLPPActivation::csch(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), coth(z));
}
@ -737,7 +737,7 @@ std::vector<double> MLPPActivation::csch(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::csch(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), coth(z));
}
@ -752,7 +752,7 @@ double MLPPActivation::sech(double z, bool deriv) {
}
std::vector<double> MLPPActivation::sech(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.hadamard_product(alg.scalarMultiply(-1, sech(z)), tanh(z));
}
@ -762,7 +762,7 @@ std::vector<double> MLPPActivation::sech(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::sech(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.hadamard_product(alg.scalarMultiply(-1, sech(z)), tanh(z));
}
@ -779,7 +779,7 @@ double MLPPActivation::coth(double z, bool deriv) {
}
std::vector<double> MLPPActivation::coth(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), csch(z));
}
@ -787,7 +787,7 @@ std::vector<double> MLPPActivation::coth(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::coth(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), csch(z));
}
@ -802,7 +802,7 @@ double MLPPActivation::arsinh(double z, bool deriv) {
}
std::vector<double> MLPPActivation::arsinh(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onevec(z.size()), alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onevec(z.size()))));
}
@ -810,7 +810,7 @@ std::vector<double> MLPPActivation::arsinh(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::arsinh(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size()))));
}
@ -825,7 +825,7 @@ double MLPPActivation::arcosh(double z, bool deriv) {
}
std::vector<double> MLPPActivation::arcosh(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onevec(z.size()), alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onevec(z.size()))));
}
@ -833,7 +833,7 @@ std::vector<double> MLPPActivation::arcosh(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::arcosh(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size()))));
}
@ -848,7 +848,7 @@ double MLPPActivation::artanh(double z, bool deriv) {
}
std::vector<double> MLPPActivation::artanh(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z)));
}
@ -856,7 +856,7 @@ std::vector<double> MLPPActivation::artanh(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::artanh(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z)));
}
@ -871,7 +871,7 @@ double MLPPActivation::arcsch(double z, bool deriv) {
}
std::vector<double> MLPPActivation::arcsch(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.full(z.size(), -1), alg.hadamard_product(alg.hadamard_product(z, z), alg.sqrt(alg.addition(alg.onevec(z.size()), alg.elementWiseDivision(alg.onevec(z.size()), alg.hadamard_product(z, z))))));
}
@ -879,7 +879,7 @@ std::vector<double> MLPPActivation::arcsch(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::arcsch(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.full(z.size(), z[0].size(), -1), alg.hadamard_product(alg.hadamard_product(z, z), alg.sqrt(alg.addition(alg.onemat(z.size(), z[0].size()), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z))))));
}
@ -894,7 +894,7 @@ double MLPPActivation::arsech(double z, bool deriv) {
}
std::vector<double> MLPPActivation::arsech(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.full(z.size(), -1), alg.hadamard_product(z, alg.sqrt(alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z)))));
}
@ -902,7 +902,7 @@ std::vector<double> MLPPActivation::arsech(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::arsech(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.full(z.size(), z[0].size(), -1), alg.hadamard_product(z, alg.sqrt(alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z)))));
}
@ -917,7 +917,7 @@ double MLPPActivation::arcoth(double z, bool deriv) {
}
std::vector<double> MLPPActivation::arcoth(std::vector<double> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z)));
}
@ -925,7 +925,7 @@ std::vector<double> MLPPActivation::arcoth(std::vector<double> z, bool deriv) {
}
std::vector<std::vector<double>> MLPPActivation::arcoth(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
MLPPLinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z)));
}

View File

@ -55,7 +55,7 @@ double MLPPANN::modelTest(std::vector<double> x) {
void MLPPANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
class MLPPCost cost;
LinAlg alg;
MLPPLinAlg alg;
double cost_prev = 0;
int epoch = 1;
forwardPass();
@ -89,7 +89,7 @@ void MLPPANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
void MLPPANN::SGD(double learning_rate, int max_epoch, bool UI) {
class MLPPCost cost;
LinAlg alg;
MLPPLinAlg alg;
double cost_prev = 0;
int epoch = 1;
@ -127,7 +127,7 @@ void MLPPANN::SGD(double learning_rate, int max_epoch, bool UI) {
void MLPPANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
class MLPPCost cost;
LinAlg alg;
MLPPLinAlg alg;
double cost_prev = 0;
int epoch = 1;
@ -165,7 +165,7 @@ void MLPPANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo
void MLPPANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, double gamma, bool NAG, bool UI) {
class MLPPCost cost;
LinAlg alg;
MLPPLinAlg alg;
double cost_prev = 0;
int epoch = 1;
@ -222,7 +222,7 @@ void MLPPANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size,
void MLPPANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, double e, bool UI) {
class MLPPCost cost;
LinAlg alg;
MLPPLinAlg alg;
double cost_prev = 0;
int epoch = 1;
@ -278,7 +278,7 @@ void MLPPANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size,
void MLPPANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, double b1, double e, bool UI) {
class MLPPCost cost;
LinAlg alg;
MLPPLinAlg alg;
double cost_prev = 0;
int epoch = 1;
@ -334,7 +334,7 @@ void MLPPANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size,
void MLPPANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
class MLPPCost cost;
LinAlg alg;
MLPPLinAlg alg;
double cost_prev = 0;
int epoch = 1;
@ -401,7 +401,7 @@ void MLPPANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, dou
void MLPPANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
class MLPPCost cost;
LinAlg alg;
MLPPLinAlg alg;
double cost_prev = 0;
int epoch = 1;
@ -466,7 +466,7 @@ void MLPPANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, d
void MLPPANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
class MLPPCost cost;
LinAlg alg;
MLPPLinAlg alg;
double cost_prev = 0;
int epoch = 1;
@ -536,7 +536,7 @@ void MLPPANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, do
void MLPPANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
class MLPPCost cost;
LinAlg alg;
MLPPLinAlg alg;
double cost_prev = 0;
int epoch = 1;
@ -661,7 +661,7 @@ void MLPPANN::addLayer(int n_hidden, std::string activation, std::string weightI
}
void MLPPANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha) {
LinAlg alg;
MLPPLinAlg alg;
if (!network.empty()) {
outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
} else {
@ -701,7 +701,7 @@ void MLPPANN::forwardPass() {
}
void MLPPANN::updateParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, std::vector<double> outputLayerUpdation, double learning_rate) {
LinAlg alg;
MLPPLinAlg alg;
outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation);
outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n;
@ -721,7 +721,7 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> M
// std::cout << "BEGIN" << std::endl;
class MLPPCost cost;
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.

View File

@ -34,7 +34,7 @@ std::vector<double> MLPPAutoEncoder::modelTest(std::vector<double> x) {
void MLPPAutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
double cost_prev = 0;
int epoch = 1;
forwardPass();
@ -87,7 +87,7 @@ void MLPPAutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool
void MLPPAutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
double cost_prev = 0;
int epoch = 1;
@ -138,7 +138,7 @@ void MLPPAutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) {
void MLPPAutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
double cost_prev = 0;
int epoch = 1;
@ -213,7 +213,7 @@ double MLPPAutoEncoder::Cost(std::vector<std::vector<double>> y_hat, std::vector
}
std::vector<std::vector<double>> MLPPAutoEncoder::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<double>> a2 = avn.sigmoid(z2);
@ -221,7 +221,7 @@ std::vector<std::vector<double>> MLPPAutoEncoder::Evaluate(std::vector<std::vect
}
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLPPAutoEncoder::propagate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<double>> a2 = avn.sigmoid(z2);
@ -229,7 +229,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> M
}
std::vector<double> MLPPAutoEncoder::Evaluate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<double> a2 = avn.sigmoid(z2);
@ -237,7 +237,7 @@ std::vector<double> MLPPAutoEncoder::Evaluate(std::vector<double> x) {
}
std::tuple<std::vector<double>, std::vector<double>> MLPPAutoEncoder::propagate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<double> a2 = avn.sigmoid(z2);
@ -245,7 +245,7 @@ std::tuple<std::vector<double>, std::vector<double>> MLPPAutoEncoder::propagate(
}
void MLPPAutoEncoder::forwardPass() {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);
a2 = avn.sigmoid(z2);

View File

@ -74,7 +74,7 @@ double MLPPBernoulliNB::score() {
}
void MLPPBernoulliNB::computeVocab() {
LinAlg alg;
MLPPLinAlg alg;
MLPPData data;
vocab = data.vecToSet<double>(alg.flatten(inputSet));
}

View File

@ -31,7 +31,7 @@ double MLPPCLogLogReg::modelTest(std::vector<double> x) {
void MLPPCLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -65,7 +65,7 @@ void MLPPCLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool U
void MLPPCLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -96,7 +96,7 @@ void MLPPCLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) {
}
void MLPPCLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -138,7 +138,7 @@ void MLPPCLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) {
void MLPPCLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -191,30 +191,30 @@ double MLPPCLogLogReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
}
std::vector<double> MLPPCLogLogReg::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
}
std::vector<double> MLPPCLogLogReg::propagate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
}
double MLPPCLogLogReg::Evaluate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
return avn.cloglog(alg.dot(weights, x) + bias);
}
double MLPPCLogLogReg::propagate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
return alg.dot(weights, x) + bias;
}
// cloglog ( wTx + b )
void MLPPCLogLogReg::forwardPass() {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
z = propagate(inputSet);

View File

@ -15,7 +15,7 @@ MLPPConvolutions::MLPPConvolutions() :
}
std::vector<std::vector<double>> MLPPConvolutions::convolve(std::vector<std::vector<double>> input, std::vector<std::vector<double>> filter, int S, int P) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<double>> featureMap;
int N = input.size();
int F = filter.size();
@ -71,7 +71,7 @@ std::vector<std::vector<double>> MLPPConvolutions::convolve(std::vector<std::vec
}
std::vector<std::vector<std::vector<double>>> MLPPConvolutions::convolve(std::vector<std::vector<std::vector<double>>> input, std::vector<std::vector<std::vector<double>>> filter, int S, int P) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<std::vector<double>>> featureMap;
int N = input[0].size();
int F = filter[0].size();
@ -137,7 +137,7 @@ std::vector<std::vector<std::vector<double>>> MLPPConvolutions::convolve(std::ve
}
std::vector<std::vector<double>> MLPPConvolutions::pool(std::vector<std::vector<double>> input, int F, int S, std::string type) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<double>> pooledMap;
int N = input.size();
int mapSize = floor((N - F) / S + 1);
@ -185,7 +185,7 @@ std::vector<std::vector<std::vector<double>>> MLPPConvolutions::pool(std::vector
}
double MLPPConvolutions::globalPool(std::vector<std::vector<double>> input, std::string type) {
LinAlg alg;
MLPPLinAlg alg;
if (type == "Average") {
Stat stat;
return stat.mean(alg.flatten(input));
@ -272,7 +272,7 @@ std::vector<std::vector<double>> MLPPConvolutions::dy(std::vector<std::vector<do
}
std::vector<std::vector<double>> MLPPConvolutions::gradMagnitude(std::vector<std::vector<double>> input) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<double>> xDeriv_2 = alg.hadamard_product(dx(input), dx(input));
std::vector<std::vector<double>> yDeriv_2 = alg.hadamard_product(dy(input), dy(input));
return alg.sqrt(alg.addition(xDeriv_2, yDeriv_2));
@ -301,7 +301,7 @@ std::vector<std::vector<std::vector<double>>> MLPPConvolutions::computeM(std::ve
double const GAUSSIAN_PADDING = ((input.size() - 1) + GAUSSIAN_SIZE - input.size()) / 2; // Convs must be same.
std::cout << GAUSSIAN_PADDING << std::endl;
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<double>> xDeriv = dx(input);
std::vector<std::vector<double>> yDeriv = dy(input);
@ -315,7 +315,7 @@ std::vector<std::vector<std::vector<double>>> MLPPConvolutions::computeM(std::ve
}
std::vector<std::vector<std::string>> MLPPConvolutions::harrisCornerDetection(std::vector<std::vector<double>> input) {
double const k = 0.05; // Empirically determined wherein k -> [0.04, 0.06], though conventionally 0.05 is typically used as well.
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<std::vector<double>>> M = computeM(input);
std::vector<std::vector<double>> det = alg.subtraction(alg.hadamard_product(M[0], M[1]), alg.hadamard_product(M[2], M[2]));
std::vector<std::vector<double>> trace = alg.addition(M[0], M[1]);

View File

@ -30,12 +30,12 @@ double MLPPCost::MSE(std::vector<std::vector<double>> y_hat, std::vector<std::ve
}
std::vector<double> MLPPCost::MSEDeriv(std::vector<double> y_hat, std::vector<double> y) {
LinAlg alg;
MLPPLinAlg alg;
return alg.subtraction(y_hat, y);
}
std::vector<std::vector<double>> MLPPCost::MSEDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
LinAlg alg;
MLPPLinAlg alg;
return alg.subtraction(y_hat, y);
}
@ -58,12 +58,12 @@ double MLPPCost::RMSE(std::vector<std::vector<double>> y_hat, std::vector<std::v
}
std::vector<double> MLPPCost::RMSEDeriv(std::vector<double> y_hat, std::vector<double> y) {
LinAlg alg;
MLPPLinAlg alg;
return alg.scalarMultiply(1 / (2 * sqrt(MSE(y_hat, y))), MSEDeriv(y_hat, y));
}
std::vector<std::vector<double>> MLPPCost::RMSEDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
LinAlg alg;
MLPPLinAlg alg;
return alg.scalarMultiply(1 / (2 / sqrt(MSE(y_hat, y))), MSEDeriv(y_hat, y));
}
@ -139,12 +139,12 @@ double MLPPCost::MBE(std::vector<std::vector<double>> y_hat, std::vector<std::ve
}
std::vector<double> MLPPCost::MBEDeriv(std::vector<double> y_hat, std::vector<double> y) {
LinAlg alg;
MLPPLinAlg alg;
return alg.onevec(y_hat.size());
}
std::vector<std::vector<double>> MLPPCost::MBEDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
LinAlg alg;
MLPPLinAlg alg;
return alg.onemat(y_hat.size(), y_hat[0].size());
}
@ -171,12 +171,12 @@ double MLPPCost::LogLoss(std::vector<std::vector<double>> y_hat, std::vector<std
}
std::vector<double> MLPPCost::LogLossDeriv(std::vector<double> y_hat, std::vector<double> y) {
LinAlg alg;
MLPPLinAlg alg;
return alg.addition(alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)), alg.elementWiseDivision(alg.scalarMultiply(-1, alg.scalarAdd(-1, y)), alg.scalarMultiply(-1, alg.scalarAdd(-1, y_hat))));
}
std::vector<std::vector<double>> MLPPCost::LogLossDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
LinAlg alg;
MLPPLinAlg alg;
return alg.addition(alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)), alg.elementWiseDivision(alg.scalarMultiply(-1, alg.scalarAdd(-1, y)), alg.scalarMultiply(-1, alg.scalarAdd(-1, y_hat))));
}
@ -201,17 +201,17 @@ double MLPPCost::CrossEntropy(std::vector<std::vector<double>> y_hat, std::vecto
}
std::vector<double> MLPPCost::CrossEntropyDeriv(std::vector<double> y_hat, std::vector<double> y) {
LinAlg alg;
MLPPLinAlg alg;
return alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat));
}
std::vector<std::vector<double>> MLPPCost::CrossEntropyDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
LinAlg alg;
MLPPLinAlg alg;
return alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat));
}
double MLPPCost::HuberLoss(std::vector<double> y_hat, std::vector<double> y, double delta) {
LinAlg alg;
MLPPLinAlg alg;
double sum = 0;
for (int i = 0; i < y_hat.size(); i++) {
if (abs(y[i] - y_hat[i]) <= delta) {
@ -224,7 +224,7 @@ double MLPPCost::HuberLoss(std::vector<double> y_hat, std::vector<double> y, dou
}
double MLPPCost::HuberLoss(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y, double delta) {
LinAlg alg;
MLPPLinAlg alg;
double sum = 0;
for (int i = 0; i < y_hat.size(); i++) {
for (int j = 0; j < y_hat[i].size(); j++) {
@ -239,7 +239,7 @@ double MLPPCost::HuberLoss(std::vector<std::vector<double>> y_hat, std::vector<s
}
std::vector<double> MLPPCost::HuberLossDeriv(std::vector<double> y_hat, std::vector<double> y, double delta) {
LinAlg alg;
MLPPLinAlg alg;
double sum = 0;
std::vector<double> deriv;
deriv.resize(y_hat.size());
@ -259,7 +259,7 @@ std::vector<double> MLPPCost::HuberLossDeriv(std::vector<double> y_hat, std::vec
}
std::vector<std::vector<double>> MLPPCost::HuberLossDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y, double delta) {
LinAlg alg;
MLPPLinAlg alg;
double sum = 0;
std::vector<std::vector<double>> deriv;
deriv.resize(y_hat.size());
@ -349,39 +349,39 @@ double MLPPCost::WassersteinLoss(std::vector<std::vector<double>> y_hat, std::ve
}
std::vector<double> MLPPCost::WassersteinLossDeriv(std::vector<double> y_hat, std::vector<double> y) {
LinAlg alg;
MLPPLinAlg alg;
return alg.scalarMultiply(-1, y); // Simple.
}
std::vector<std::vector<double>> MLPPCost::WassersteinLossDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
LinAlg alg;
MLPPLinAlg alg;
return alg.scalarMultiply(-1, y); // Simple.
}
double MLPPCost::HingeLoss(std::vector<double> y_hat, std::vector<double> y, std::vector<double> weights, double C) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge");
}
double MLPPCost::HingeLoss(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y, std::vector<std::vector<double>> weights, double C) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge");
}
std::vector<double> MLPPCost::HingeLossDeriv(std::vector<double> y_hat, std::vector<double> y, double C) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y));
}
std::vector<std::vector<double>> MLPPCost::HingeLossDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y, double C) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y));
}
double MLPPCost::dualFormSVM(std::vector<double> alpha, std::vector<std::vector<double>> X, std::vector<double> y) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<double>> Y = alg.diag(y); // Y is a diagnoal matrix. Y[i][j] = y[i] if i = i, else Y[i][j] = 0. Yt = Y.
std::vector<std::vector<double>> K = alg.matmult(X, alg.transpose(X)); // TO DO: DON'T forget to add non-linear kernelizations.
std::vector<std::vector<double>> Q = alg.matmult(alg.matmult(alg.transpose(Y), K), Y);
@ -392,7 +392,7 @@ double MLPPCost::dualFormSVM(std::vector<double> alpha, std::vector<std::vector<
}
std::vector<double> MLPPCost::dualFormSVMDeriv(std::vector<double> alpha, std::vector<std::vector<double>> X, std::vector<double> y) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<double>> Y = alg.zeromat(y.size(), y.size());
for (int i = 0; i < y.size(); i++) {
Y[i][i] = y[i]; // Y is a diagnoal matrix. Y[i][j] = y[i] if i = i, else Y[i][j] = 0. Yt = Y.

View File

@ -126,7 +126,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>, s
// MULTIVARIATE SUPERVISED
void MLPPData::setData(int k, std::string fileName, std::vector<std::vector<double>> &inputSet, std::vector<double> &outputSet) {
LinAlg alg;
MLPPLinAlg alg;
std::string inputTemp;
std::string outputTemp;
@ -154,7 +154,7 @@ void MLPPData::setData(int k, std::string fileName, std::vector<std::vector<doub
}
void MLPPData::printData(std::vector<std::string> inputName, std::string outputName, std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) {
LinAlg alg;
MLPPLinAlg alg;
inputSet = alg.transpose(inputSet);
for (int i = 0; i < inputSet.size(); i++) {
std::cout << inputName[i] << std::endl;
@ -172,7 +172,7 @@ void MLPPData::printData(std::vector<std::string> inputName, std::string outputN
// UNSUPERVISED
void MLPPData::setData(int k, std::string fileName, std::vector<std::vector<double>> &inputSet) {
LinAlg alg;
MLPPLinAlg alg;
std::string inputTemp;
inputSet.resize(k);
@ -196,7 +196,7 @@ void MLPPData::setData(int k, std::string fileName, std::vector<std::vector<doub
}
void MLPPData::printData(std::vector<std::string> inputName, std::vector<std::vector<double>> inputSet) {
LinAlg alg;
MLPPLinAlg alg;
inputSet = alg.transpose(inputSet);
for (int i = 0; i < inputSet.size(); i++) {
std::cout << inputName[i] << std::endl;
@ -259,7 +259,7 @@ std::vector<std::vector<double>> MLPPData::rgb2gray(std::vector<std::vector<std:
}
std::vector<std::vector<std::vector<double>>> MLPPData::rgb2ycbcr(std::vector<std::vector<std::vector<double>>> input) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<std::vector<double>>> YCbCr;
YCbCr = alg.resize(YCbCr, input);
for (int i = 0; i < YCbCr[0].size(); i++) {
@ -275,7 +275,7 @@ std::vector<std::vector<std::vector<double>>> MLPPData::rgb2ycbcr(std::vector<st
// Conversion formulas available here:
// https://www.rapidtables.com/convert/color/rgb-to-hsv.html
std::vector<std::vector<std::vector<double>>> MLPPData::rgb2hsv(std::vector<std::vector<std::vector<double>>> input) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<std::vector<double>>> HSV;
HSV = alg.resize(HSV, input);
for (int i = 0; i < HSV[0].size(); i++) {
@ -317,7 +317,7 @@ std::vector<std::vector<std::vector<double>>> MLPPData::rgb2hsv(std::vector<std:
// http://machinethatsees.blogspot.com/2013/07/how-to-convert-rgb-to-xyz-or-vice-versa.html
std::vector<std::vector<std::vector<double>>> MLPPData::rgb2xyz(std::vector<std::vector<std::vector<double>>> input) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<std::vector<double>>> XYZ;
XYZ = alg.resize(XYZ, input);
std::vector<std::vector<double>> RGB2XYZ = { { 0.4124564, 0.3575761, 0.1804375 }, { 0.2126726, 0.7151522, 0.0721750 }, { 0.0193339, 0.1191920, 0.9503041 } };
@ -325,7 +325,7 @@ std::vector<std::vector<std::vector<double>>> MLPPData::rgb2xyz(std::vector<std:
}
std::vector<std::vector<std::vector<double>>> MLPPData::xyz2rgb(std::vector<std::vector<std::vector<double>>> input) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<std::vector<double>>> XYZ;
XYZ = alg.resize(XYZ, input);
std::vector<std::vector<double>> RGB2XYZ = alg.inverse({ { 0.4124564, 0.3575761, 0.1804375 }, { 0.2126726, 0.7151522, 0.0721750 }, { 0.0193339, 0.1191920, 0.9503041 } });
@ -520,7 +520,7 @@ std::vector<std::vector<double>> MLPPData::BOW(std::vector<std::string> sentence
}
std::vector<std::vector<double>> MLPPData::TFIDF(std::vector<std::string> sentences) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::string> wordList = removeNullByte(removeStopWords(createWordList(sentences)));
std::vector<std::vector<std::string>> segmented_sentences;
@ -620,7 +620,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::string>> MLPPData:
for (int i = inputSize; i < BOW.size(); i++) {
outputSet.push_back(BOW[i]);
}
LinAlg alg;
MLPPLinAlg alg;
SoftmaxNet *model;
if (type == "Skipgram") {
model = new SoftmaxNet(outputSet, inputSet, dimension);
@ -635,7 +635,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::string>> MLPPData:
}
std::vector<std::vector<double>> MLPPData::LSA(std::vector<std::string> sentences, int dim) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<double>> docWordData = BOW(sentences, "Binary");
auto [U, S, Vt] = alg.SVD(docWordData);
@ -678,7 +678,7 @@ void MLPPData::setInputNames(std::string fileName, std::vector<std::string> &inp
}
std::vector<std::vector<double>> MLPPData::featureScaling(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
X = alg.transpose(X);
std::vector<double> max_elements, min_elements;
max_elements.resize(X.size());
@ -698,7 +698,7 @@ std::vector<std::vector<double>> MLPPData::featureScaling(std::vector<std::vecto
}
std::vector<std::vector<double>> MLPPData::meanNormalization(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
Stat stat;
// (X_j - mu_j) / std_j, for every j
@ -710,7 +710,7 @@ std::vector<std::vector<double>> MLPPData::meanNormalization(std::vector<std::ve
}
std::vector<std::vector<double>> MLPPData::meanCentering(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
Stat stat;
for (int i = 0; i < X.size(); i++) {
double mean_i = stat.mean(X[i]);

View File

@ -34,7 +34,7 @@ double MLPPDualSVC::modelTest(std::vector<double> x) {
void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
class MLPPCost cost;
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -82,7 +82,7 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI)
// void MLPPDualSVC::SGD(double learning_rate, int max_epoch, bool UI){
// class MLPPCost cost;
// MLPPActivation avn;
// LinAlg alg;
// MLPPLinAlg alg;
// Reg regularization;
// double cost_prev = 0;
@ -115,7 +115,7 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI)
// void MLPPDualSVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
// class MLPPCost cost;
// MLPPActivation avn;
// LinAlg alg;
// MLPPLinAlg alg;
// Reg regularization;
// double cost_prev = 0;
// int epoch = 1;
@ -173,7 +173,7 @@ std::vector<double> MLPPDualSVC::Evaluate(std::vector<std::vector<double>> X) {
}
std::vector<double> MLPPDualSVC::propagate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<double> z;
for (int i = 0; i < X.size(); i++) {
double sum = 0;
@ -194,7 +194,7 @@ double MLPPDualSVC::Evaluate(std::vector<double> x) {
}
double MLPPDualSVC::propagate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
double z = 0;
for (int j = 0; j < alpha.size(); j++) {
if (alpha[j] != 0) {
@ -206,7 +206,7 @@ double MLPPDualSVC::propagate(std::vector<double> x) {
}
void MLPPDualSVC::forwardPass() {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
z = propagate(inputSet);
@ -224,14 +224,14 @@ void MLPPDualSVC::alphaProjection() {
}
double MLPPDualSVC::kernelFunction(std::vector<double> u, std::vector<double> v, std::string kernel) {
LinAlg alg;
MLPPLinAlg alg;
if (kernel == "Linear") {
return alg.dot(u, v);
} // warning: non-void function does not return a value in all control paths [-Wreturn-type]
}
std::vector<std::vector<double>> MLPPDualSVC::kernelFunction(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B, std::string kernel) {
LinAlg alg;
MLPPLinAlg alg;
if (kernel == "Linear") {
return alg.matmult(inputSet, alg.transpose(inputSet));
} // warning: non-void function does not return a value in all control paths [-Wreturn-type]

View File

@ -32,7 +32,7 @@ double MLPPExpReg::modelTest(std::vector<double> x) {
}
void MLPPExpReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -135,7 +135,7 @@ void MLPPExpReg::SGD(double learning_rate, int max_epoch, bool UI) {
}
void MLPPExpReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;

View File

@ -24,13 +24,13 @@ MLPPGAN::~MLPPGAN() {
}
std::vector<std::vector<double>> MLPPGAN::generateExample(int n) {
LinAlg alg;
MLPPLinAlg alg;
return modelSetTestGenerator(alg.gaussianNoise(n, k));
}
void MLPPGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
class MLPPCost cost;
LinAlg alg;
MLPPLinAlg alg;
double cost_prev = 0;
int epoch = 1;
forwardPass();
@ -77,7 +77,7 @@ void MLPPGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
double MLPPGAN::score() {
LinAlg alg;
MLPPLinAlg alg;
Utilities util;
forwardPass();
return util.performance(y_hat, alg.onevec(n));
@ -97,7 +97,7 @@ void MLPPGAN::save(std::string fileName) {
}
void MLPPGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) {
LinAlg alg;
MLPPLinAlg alg;
if (network.empty()) {
network.push_back(MLPPHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
network[0].forwardPass();
@ -108,7 +108,7 @@ void MLPPGAN::addLayer(int n_hidden, std::string activation, std::string weightI
}
void MLPPGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda, double alpha) {
LinAlg alg;
MLPPLinAlg alg;
if (!network.empty()) {
outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, "Sigmoid", "LogLoss", network[network.size() - 1].a, weightInit, reg, lambda, alpha);
} else {
@ -160,7 +160,7 @@ double MLPPGAN::Cost(std::vector<double> y_hat, std::vector<double> y) {
}
void MLPPGAN::forwardPass() {
LinAlg alg;
MLPPLinAlg alg;
if (!network.empty()) {
network[0].input = alg.gaussianNoise(n, k);
network[0].forwardPass();
@ -178,7 +178,7 @@ void MLPPGAN::forwardPass() {
}
void MLPPGAN::updateDiscriminatorParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, std::vector<double> outputLayerUpdation, double learning_rate) {
LinAlg alg;
MLPPLinAlg alg;
outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation);
outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n;
@ -195,7 +195,7 @@ void MLPPGAN::updateDiscriminatorParameters(std::vector<std::vector<std::vector<
}
void MLPPGAN::updateGeneratorParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, double learning_rate) {
LinAlg alg;
MLPPLinAlg alg;
if (!network.empty()) {
for (int i = network.size() / 2; i >= 0; i--) {
@ -210,7 +210,7 @@ void MLPPGAN::updateGeneratorParameters(std::vector<std::vector<std::vector<doub
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> MLPPGAN::computeDiscriminatorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
class MLPPCost cost;
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.
@ -246,7 +246,7 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> M
std::vector<std::vector<std::vector<double>>> MLPPGAN::computeGeneratorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
class MLPPCost cost;
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.

View File

@ -18,7 +18,7 @@ MLPPGaussianNB::MLPPGaussianNB(std::vector<std::vector<double>> inputSet, std::v
inputSet(inputSet), outputSet(outputSet), class_num(class_num) {
y_hat.resize(outputSet.size());
Evaluate();
LinAlg alg;
MLPPLinAlg alg;
}
std::vector<double> MLPPGaussianNB::modelSetTest(std::vector<std::vector<double>> X) {
@ -31,7 +31,7 @@ std::vector<double> MLPPGaussianNB::modelSetTest(std::vector<std::vector<double>
double MLPPGaussianNB::modelTest(std::vector<double> x) {
Stat stat;
LinAlg alg;
MLPPLinAlg alg;
double score[class_num];
double y_hat_i = 1;
@ -49,7 +49,7 @@ double MLPPGaussianNB::score() {
void MLPPGaussianNB::Evaluate() {
Stat stat;
LinAlg alg;
MLPPLinAlg alg;
// Computing mu_k_y and sigma_k_y
mu.resize(class_num);

View File

@ -98,14 +98,14 @@ MLPPHiddenLayer::MLPPHiddenLayer(int n_hidden, std::string activation, std::vect
}
void MLPPHiddenLayer::forwardPass() {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
z = alg.mat_vec_add(alg.matmult(input, weights), bias);
a = (avn.*activation_map[activation])(z, 0);
}
void MLPPHiddenLayer::Test(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);
a_test = (avn.*activationTest_map[activation])(z_test, 0);

View File

@ -23,7 +23,7 @@ MLPPKMeans::MLPPKMeans(std::vector<std::vector<double>> inputSet, int k, std::st
}
std::vector<std::vector<double>> MLPPKMeans::modelSetTest(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<double>> closestCentroids;
for (int i = 0; i < inputSet.size(); i++) {
std::vector<double> closestCentroid = mu[0];
@ -39,7 +39,7 @@ std::vector<std::vector<double>> MLPPKMeans::modelSetTest(std::vector<std::vecto
}
std::vector<double> MLPPKMeans::modelTest(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<double> closestCentroid = mu[0];
for (int j = 0; j < mu.size(); j++) {
if (alg.euclideanDistance(x, mu[j]) < alg.euclideanDistance(x, closestCentroid)) {
@ -85,7 +85,7 @@ double MLPPKMeans::score() {
}
std::vector<double> MLPPKMeans::silhouette_scores() {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<double>> closestCentroids = modelSetTest(inputSet);
std::vector<double> silhouette_scores;
for (int i = 0; i < inputSet.size(); i++) {
@ -136,7 +136,7 @@ std::vector<double> MLPPKMeans::silhouette_scores() {
// This simply computes r_nk
void MLPPKMeans::Evaluate() {
LinAlg alg;
MLPPLinAlg alg;
r.resize(inputSet.size());
for (int i = 0; i < r.size(); i++) {
@ -163,7 +163,7 @@ void MLPPKMeans::Evaluate() {
// This simply computes or re-computes mu_k
void MLPPKMeans::computeMu() {
LinAlg alg;
MLPPLinAlg alg;
for (int i = 0; i < mu.size(); i++) {
std::vector<double> num;
num.resize(r.size());
@ -197,7 +197,7 @@ void MLPPKMeans::centroidInitialization(int k) {
}
void MLPPKMeans::kmeansppInitialization(int k) {
LinAlg alg;
MLPPLinAlg alg;
std::random_device rd;
std::default_random_engine generator(rd());
std::uniform_int_distribution<int> distribution(0, int(inputSet.size() - 1));
@ -223,7 +223,7 @@ void MLPPKMeans::kmeansppInitialization(int k) {
}
double MLPPKMeans::Cost() {
LinAlg alg;
MLPPLinAlg alg;
double sum = 0;
for (int i = 0; i < r.size(); i++) {
for (int j = 0; j < r[0].size(); j++) {

View File

@ -63,7 +63,7 @@ int MLPPKNN::determineClass(std::vector<double> knn) {
}
std::vector<double> MLPPKNN::nearestNeighbors(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
// The nearest neighbors
std::vector<double> knn;

View File

@ -13,18 +13,18 @@
std::vector<std::vector<double>> LinAlg::gramMatrix(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::gramMatrix(std::vector<std::vector<double>> A) {
return matmult(transpose(A), A); // AtA
}
bool LinAlg::linearIndependenceChecker(std::vector<std::vector<double>> A) {
bool MLPPLinAlg::linearIndependenceChecker(std::vector<std::vector<double>> A) {
if (det(gramMatrix(A), A.size()) == 0) {
return false;
}
return true;
}
std::vector<std::vector<double>> LinAlg::gaussianNoise(int n, int m) {
std::vector<std::vector<double>> MLPPLinAlg::gaussianNoise(int n, int m) {
std::random_device rd;
std::default_random_engine generator(rd());
@ -40,7 +40,7 @@ std::vector<std::vector<double>> LinAlg::gaussianNoise(int n, int m) {
return A;
}
std::vector<std::vector<double>> LinAlg::addition(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B) {
std::vector<std::vector<double>> MLPPLinAlg::addition(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B) {
std::vector<std::vector<double>> C;
C.resize(A.size());
for (int i = 0; i < C.size(); i++) {
@ -55,7 +55,7 @@ std::vector<std::vector<double>> LinAlg::addition(std::vector<std::vector<double
return C;
}
std::vector<std::vector<double>> LinAlg::subtraction(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B) {
std::vector<std::vector<double>> MLPPLinAlg::subtraction(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B) {
std::vector<std::vector<double>> C;
C.resize(A.size());
for (int i = 0; i < C.size(); i++) {
@ -70,7 +70,7 @@ std::vector<std::vector<double>> LinAlg::subtraction(std::vector<std::vector<dou
return C;
}
std::vector<std::vector<double>> LinAlg::matmult(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B) {
std::vector<std::vector<double>> MLPPLinAlg::matmult(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B) {
std::vector<std::vector<double>> C;
C.resize(A.size());
for (int i = 0; i < C.size(); i++) {
@ -87,7 +87,7 @@ std::vector<std::vector<double>> LinAlg::matmult(std::vector<std::vector<double>
return C;
}
std::vector<std::vector<double>> LinAlg::hadamard_product(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B) {
std::vector<std::vector<double>> MLPPLinAlg::hadamard_product(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B) {
std::vector<std::vector<double>> C;
C.resize(A.size());
for (int i = 0; i < C.size(); i++) {
@ -102,7 +102,7 @@ std::vector<std::vector<double>> LinAlg::hadamard_product(std::vector<std::vecto
return C;
}
std::vector<std::vector<double>> LinAlg::kronecker_product(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B) {
std::vector<std::vector<double>> MLPPLinAlg::kronecker_product(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B) {
std::vector<std::vector<double>> C;
// [1,1,1,1] [1,2,3,4,5]
@ -131,7 +131,7 @@ std::vector<std::vector<double>> LinAlg::kronecker_product(std::vector<std::vect
return C;
}
std::vector<std::vector<double>> LinAlg::elementWiseDivision(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B) {
std::vector<std::vector<double>> MLPPLinAlg::elementWiseDivision(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B) {
std::vector<std::vector<double>> C;
C.resize(A.size());
for (int i = 0; i < C.size(); i++) {
@ -145,7 +145,7 @@ std::vector<std::vector<double>> LinAlg::elementWiseDivision(std::vector<std::ve
return C;
}
std::vector<std::vector<double>> LinAlg::transpose(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::transpose(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> AT;
AT.resize(A[0].size());
for (int i = 0; i < AT.size(); i++) {
@ -160,7 +160,7 @@ std::vector<std::vector<double>> LinAlg::transpose(std::vector<std::vector<doubl
return AT;
}
std::vector<std::vector<double>> LinAlg::scalarMultiply(double scalar, std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::scalarMultiply(double scalar, std::vector<std::vector<double>> A) {
for (int i = 0; i < A.size(); i++) {
for (int j = 0; j < A[i].size(); j++) {
A[i][j] *= scalar;
@ -169,7 +169,7 @@ std::vector<std::vector<double>> LinAlg::scalarMultiply(double scalar, std::vect
return A;
}
std::vector<std::vector<double>> LinAlg::scalarAdd(double scalar, std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::scalarAdd(double scalar, std::vector<std::vector<double>> A) {
for (int i = 0; i < A.size(); i++) {
for (int j = 0; j < A[i].size(); j++) {
A[i][j] += scalar;
@ -178,7 +178,7 @@ std::vector<std::vector<double>> LinAlg::scalarAdd(double scalar, std::vector<st
return A;
}
std::vector<std::vector<double>> LinAlg::log(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::log(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> B;
B.resize(A.size());
for (int i = 0; i < B.size(); i++) {
@ -192,7 +192,7 @@ std::vector<std::vector<double>> LinAlg::log(std::vector<std::vector<double>> A)
return B;
}
std::vector<std::vector<double>> LinAlg::log10(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::log10(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> B;
B.resize(A.size());
for (int i = 0; i < B.size(); i++) {
@ -206,7 +206,7 @@ std::vector<std::vector<double>> LinAlg::log10(std::vector<std::vector<double>>
return B;
}
std::vector<std::vector<double>> LinAlg::exp(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::exp(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> B;
B.resize(A.size());
for (int i = 0; i < B.size(); i++) {
@ -220,7 +220,7 @@ std::vector<std::vector<double>> LinAlg::exp(std::vector<std::vector<double>> A)
return B;
}
std::vector<std::vector<double>> LinAlg::erf(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::erf(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> B;
B.resize(A.size());
for (int i = 0; i < B.size(); i++) {
@ -234,7 +234,7 @@ std::vector<std::vector<double>> LinAlg::erf(std::vector<std::vector<double>> A)
return B;
}
std::vector<std::vector<double>> LinAlg::exponentiate(std::vector<std::vector<double>> A, double p) {
std::vector<std::vector<double>> MLPPLinAlg::exponentiate(std::vector<std::vector<double>> A, double p) {
for (int i = 0; i < A.size(); i++) {
for (int j = 0; j < A[i].size(); j++) {
A[i][j] = std::pow(A[i][j], p);
@ -243,15 +243,15 @@ std::vector<std::vector<double>> LinAlg::exponentiate(std::vector<std::vector<do
return A;
}
std::vector<std::vector<double>> LinAlg::sqrt(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::sqrt(std::vector<std::vector<double>> A) {
return exponentiate(A, 0.5);
}
std::vector<std::vector<double>> LinAlg::cbrt(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::cbrt(std::vector<std::vector<double>> A) {
return exponentiate(A, double(1) / double(3));
}
std::vector<std::vector<double>> LinAlg::matrixPower(std::vector<std::vector<double>> A, int n) {
std::vector<std::vector<double>> MLPPLinAlg::matrixPower(std::vector<std::vector<double>> A, int n) {
std::vector<std::vector<double>> B = identity(A.size());
if (n == 0) {
return identity(A.size());
@ -264,7 +264,7 @@ std::vector<std::vector<double>> LinAlg::matrixPower(std::vector<std::vector<dou
return B;
}
std::vector<std::vector<double>> LinAlg::abs(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::abs(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> B;
B.resize(A.size());
for (int i = 0; i < B.size(); i++) {
@ -278,7 +278,7 @@ std::vector<std::vector<double>> LinAlg::abs(std::vector<std::vector<double>> A)
return B;
}
double LinAlg::det(std::vector<std::vector<double>> A, int d) {
double MLPPLinAlg::det(std::vector<std::vector<double>> A, int d) {
double deter = 0;
std::vector<std::vector<double>> B;
B.resize(d);
@ -313,7 +313,7 @@ double LinAlg::det(std::vector<std::vector<double>> A, int d) {
return deter;
}
double LinAlg::trace(std::vector<std::vector<double>> A) {
double MLPPLinAlg::trace(std::vector<std::vector<double>> A) {
double trace = 0;
for (int i = 0; i < A.size(); i++) {
trace += A[i][i];
@ -321,7 +321,7 @@ double LinAlg::trace(std::vector<std::vector<double>> A) {
return trace;
}
std::vector<std::vector<double>> LinAlg::cofactor(std::vector<std::vector<double>> A, int n, int i, int j) {
std::vector<std::vector<double>> MLPPLinAlg::cofactor(std::vector<std::vector<double>> A, int n, int i, int j) {
std::vector<std::vector<double>> cof;
cof.resize(A.size());
for (int i = 0; i < cof.size(); i++) {
@ -344,7 +344,7 @@ std::vector<std::vector<double>> LinAlg::cofactor(std::vector<std::vector<double
return cof;
}
std::vector<std::vector<double>> LinAlg::adjoint(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::adjoint(std::vector<std::vector<double>> A) {
//Resizing the initial adjoint matrix
std::vector<std::vector<double>> adj;
adj.resize(A.size());
@ -379,16 +379,16 @@ std::vector<std::vector<double>> LinAlg::adjoint(std::vector<std::vector<double>
}
// The inverse can be computed as (1 / determinant(A)) * adjoint(A)
std::vector<std::vector<double>> LinAlg::inverse(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::inverse(std::vector<std::vector<double>> A) {
return scalarMultiply(1 / det(A, int(A.size())), adjoint(A));
}
// This is simply the Moore-Penrose least squares approximation of the inverse.
std::vector<std::vector<double>> LinAlg::pinverse(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::pinverse(std::vector<std::vector<double>> A) {
return matmult(inverse(matmult(transpose(A), A)), transpose(A));
}
std::vector<std::vector<double>> LinAlg::zeromat(int n, int m) {
std::vector<std::vector<double>> MLPPLinAlg::zeromat(int n, int m) {
std::vector<std::vector<double>> zeromat;
zeromat.resize(n);
for (int i = 0; i < zeromat.size(); i++) {
@ -397,11 +397,11 @@ std::vector<std::vector<double>> LinAlg::zeromat(int n, int m) {
return zeromat;
}
std::vector<std::vector<double>> LinAlg::onemat(int n, int m) {
std::vector<std::vector<double>> MLPPLinAlg::onemat(int n, int m) {
return full(n, m, 1);
}
std::vector<std::vector<double>> LinAlg::full(int n, int m, int k) {
std::vector<std::vector<double>> MLPPLinAlg::full(int n, int m, int k) {
std::vector<std::vector<double>> full;
full.resize(n);
for (int i = 0; i < full.size(); i++) {
@ -415,7 +415,7 @@ std::vector<std::vector<double>> LinAlg::full(int n, int m, int k) {
return full;
}
std::vector<std::vector<double>> LinAlg::sin(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::sin(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> B;
B.resize(A.size());
for (int i = 0; i < B.size(); i++) {
@ -429,7 +429,7 @@ std::vector<std::vector<double>> LinAlg::sin(std::vector<std::vector<double>> A)
return B;
}
std::vector<std::vector<double>> LinAlg::cos(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::cos(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> B;
B.resize(A.size());
for (int i = 0; i < B.size(); i++) {
@ -443,7 +443,7 @@ std::vector<std::vector<double>> LinAlg::cos(std::vector<std::vector<double>> A)
return B;
}
std::vector<double> LinAlg::max(std::vector<double> a, std::vector<double> b) {
std::vector<double> MLPPLinAlg::max(std::vector<double> a, std::vector<double> b) {
std::vector<double> c;
c.resize(a.size());
for (int i = 0; i < c.size(); i++) {
@ -456,15 +456,15 @@ std::vector<double> LinAlg::max(std::vector<double> a, std::vector<double> b) {
return c;
}
double LinAlg::max(std::vector<std::vector<double>> A) {
double MLPPLinAlg::max(std::vector<std::vector<double>> A) {
return max(flatten(A));
}
double LinAlg::min(std::vector<std::vector<double>> A) {
double MLPPLinAlg::min(std::vector<std::vector<double>> A) {
return min(flatten(A));
}
std::vector<std::vector<double>> LinAlg::round(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::round(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> B;
B.resize(A.size());
for (int i = 0; i < B.size(); i++) {
@ -478,7 +478,7 @@ std::vector<std::vector<double>> LinAlg::round(std::vector<std::vector<double>>
return B;
}
double LinAlg::norm_2(std::vector<std::vector<double>> A) {
double MLPPLinAlg::norm_2(std::vector<std::vector<double>> A) {
double sum = 0;
for (int i = 0; i < A.size(); i++) {
for (int j = 0; j < A[i].size(); j++) {
@ -488,7 +488,7 @@ double LinAlg::norm_2(std::vector<std::vector<double>> A) {
return std::sqrt(sum);
}
std::vector<std::vector<double>> LinAlg::identity(double d) {
std::vector<std::vector<double>> MLPPLinAlg::identity(double d) {
std::vector<std::vector<double>> identityMat;
identityMat.resize(d);
for (int i = 0; i < identityMat.size(); i++) {
@ -506,7 +506,7 @@ std::vector<std::vector<double>> LinAlg::identity(double d) {
return identityMat;
}
std::vector<std::vector<double>> LinAlg::cov(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::cov(std::vector<std::vector<double>> A) {
Stat stat;
std::vector<std::vector<double>> covMat;
covMat.resize(A.size());
@ -521,7 +521,7 @@ std::vector<std::vector<double>> LinAlg::cov(std::vector<std::vector<double>> A)
return covMat;
}
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> LinAlg::eig(std::vector<std::vector<double>> A) {
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLPPLinAlg::eig(std::vector<std::vector<double>> A) {
/*
A (the entered parameter) in most use cases will be X'X, XX', etc. and must be symmetric.
That simply means that 1) X' = X and 2) X is a square matrix. This function that computes the
@ -641,7 +641,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> L
return { eigenvectors, a_new };
}
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>, std::vector<std::vector<double>>> LinAlg::SVD(std::vector<std::vector<double>> A) {
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLPPLinAlg::SVD(std::vector<std::vector<double>> A) {
auto [left_eigenvecs, eigenvals] = eig(matmult(A, transpose(A)));
auto [right_eigenvecs, right_eigenvals] = eig(matmult(transpose(A), A));
@ -655,12 +655,12 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>, s
return { left_eigenvecs, sigma, right_eigenvecs };
}
std::vector<double> LinAlg::vectorProjection(std::vector<double> a, std::vector<double> b) {
std::vector<double> MLPPLinAlg::vectorProjection(std::vector<double> a, std::vector<double> b) {
double product = dot(a, b) / dot(a, a);
return scalarMultiply(product, a); // Projection of vector a onto b. Denotated as proj_a(b).
}
std::vector<std::vector<double>> LinAlg::gramSchmidtProcess(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPLinAlg::gramSchmidtProcess(std::vector<std::vector<double>> A) {
A = transpose(A); // C++ vectors lack a mechanism to directly index columns. So, we transpose *a copy* of A for this purpose for ease of use.
std::vector<std::vector<double>> B;
B.resize(A.size());
@ -680,13 +680,13 @@ std::vector<std::vector<double>> LinAlg::gramSchmidtProcess(std::vector<std::vec
return transpose(B); // We re-transpose the marix.
}
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> LinAlg::QRD(std::vector<std::vector<double>> A) {
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLPPLinAlg::QRD(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> Q = gramSchmidtProcess(A);
std::vector<std::vector<double>> R = matmult(transpose(Q), A);
return { Q, R };
}
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> LinAlg::chol(std::vector<std::vector<double>> A) {
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLPPLinAlg::chol(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> L = zeromat(A.size(), A[0].size());
for (int j = 0; j < L.size(); j++) { // Matrices entered must be square. No problem here.
for (int i = j; i < L.size(); i++) {
@ -708,7 +708,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> L
return { L, transpose(L) }; // Indeed, L.T is our upper triangular matrix.
}
double LinAlg::sum_elements(std::vector<std::vector<double>> A) {
double MLPPLinAlg::sum_elements(std::vector<std::vector<double>> A) {
double sum = 0;
for (int i = 0; i < A.size(); i++) {
for (int j = 0; j < A[i].size(); j++) {
@ -718,7 +718,7 @@ double LinAlg::sum_elements(std::vector<std::vector<double>> A) {
return sum;
}
std::vector<double> LinAlg::flatten(std::vector<std::vector<double>> A) {
std::vector<double> MLPPLinAlg::flatten(std::vector<std::vector<double>> A) {
std::vector<double> a;
for (int i = 0; i < A.size(); i++) {
for (int j = 0; j < A[i].size(); j++) {
@ -728,11 +728,11 @@ std::vector<double> LinAlg::flatten(std::vector<std::vector<double>> A) {
return a;
}
std::vector<double> LinAlg::solve(std::vector<std::vector<double>> A, std::vector<double> b) {
std::vector<double> MLPPLinAlg::solve(std::vector<std::vector<double>> A, std::vector<double> b) {
return mat_vec_mult(inverse(A), b);
}
bool LinAlg::positiveDefiniteChecker(std::vector<std::vector<double>> A) {
bool MLPPLinAlg::positiveDefiniteChecker(std::vector<std::vector<double>> A) {
auto [eigenvectors, eigenvals] = eig(A);
std::vector<double> eigenvals_vec;
for (int i = 0; i < eigenvals.size(); i++) {
@ -746,7 +746,7 @@ bool LinAlg::positiveDefiniteChecker(std::vector<std::vector<double>> A) {
return true;
}
bool LinAlg::negativeDefiniteChecker(std::vector<std::vector<double>> A) {
bool MLPPLinAlg::negativeDefiniteChecker(std::vector<std::vector<double>> A) {
auto [eigenvectors, eigenvals] = eig(A);
std::vector<double> eigenvals_vec;
for (int i = 0; i < eigenvals.size(); i++) {
@ -760,7 +760,7 @@ bool LinAlg::negativeDefiniteChecker(std::vector<std::vector<double>> A) {
return true;
}
bool LinAlg::zeroEigenvalue(std::vector<std::vector<double>> A) {
bool MLPPLinAlg::zeroEigenvalue(std::vector<std::vector<double>> A) {
auto [eigenvectors, eigenvals] = eig(A);
std::vector<double> eigenvals_vec;
for (int i = 0; i < eigenvals.size(); i++) {
@ -774,7 +774,7 @@ bool LinAlg::zeroEigenvalue(std::vector<std::vector<double>> A) {
return false;
}
void LinAlg::printMatrix(std::vector<std::vector<double>> A) {
void MLPPLinAlg::printMatrix(std::vector<std::vector<double>> A) {
for (int i = 0; i < A.size(); i++) {
for (int j = 0; j < A[i].size(); j++) {
std::cout << A[i][j] << " ";
@ -783,7 +783,7 @@ void LinAlg::printMatrix(std::vector<std::vector<double>> A) {
}
}
std::vector<std::vector<double>> LinAlg::outerProduct(std::vector<double> a, std::vector<double> b) {
std::vector<std::vector<double>> MLPPLinAlg::outerProduct(std::vector<double> a, std::vector<double> b) {
std::vector<std::vector<double>> C;
C.resize(a.size());
for (int i = 0; i < C.size(); i++) {
@ -792,7 +792,7 @@ std::vector<std::vector<double>> LinAlg::outerProduct(std::vector<double> a, std
return C;
}
std::vector<double> LinAlg::hadamard_product(std::vector<double> a, std::vector<double> b) {
std::vector<double> MLPPLinAlg::hadamard_product(std::vector<double> a, std::vector<double> b) {
std::vector<double> c;
c.resize(a.size());
@ -803,7 +803,7 @@ std::vector<double> LinAlg::hadamard_product(std::vector<double> a, std::vector<
return c;
}
std::vector<double> LinAlg::elementWiseDivision(std::vector<double> a, std::vector<double> b) {
std::vector<double> MLPPLinAlg::elementWiseDivision(std::vector<double> a, std::vector<double> b) {
std::vector<double> c;
c.resize(a.size());
@ -813,21 +813,21 @@ std::vector<double> LinAlg::elementWiseDivision(std::vector<double> a, std::vect
return c;
}
std::vector<double> LinAlg::scalarMultiply(double scalar, std::vector<double> a) {
std::vector<double> MLPPLinAlg::scalarMultiply(double scalar, std::vector<double> a) {
for (int i = 0; i < a.size(); i++) {
a[i] *= scalar;
}
return a;
}
std::vector<double> LinAlg::scalarAdd(double scalar, std::vector<double> a) {
std::vector<double> MLPPLinAlg::scalarAdd(double scalar, std::vector<double> a) {
for (int i = 0; i < a.size(); i++) {
a[i] += scalar;
}
return a;
}
std::vector<double> LinAlg::addition(std::vector<double> a, std::vector<double> b) {
std::vector<double> MLPPLinAlg::addition(std::vector<double> a, std::vector<double> b) {
std::vector<double> c;
c.resize(a.size());
for (int i = 0; i < a.size(); i++) {
@ -836,7 +836,7 @@ std::vector<double> LinAlg::addition(std::vector<double> a, std::vector<double>
return c;
}
std::vector<double> LinAlg::subtraction(std::vector<double> a, std::vector<double> b) {
std::vector<double> MLPPLinAlg::subtraction(std::vector<double> a, std::vector<double> b) {
std::vector<double> c;
c.resize(a.size());
for (int i = 0; i < a.size(); i++) {
@ -845,14 +845,14 @@ std::vector<double> LinAlg::subtraction(std::vector<double> a, std::vector<doubl
return c;
}
std::vector<double> LinAlg::subtractMatrixRows(std::vector<double> a, std::vector<std::vector<double>> B) {
std::vector<double> MLPPLinAlg::subtractMatrixRows(std::vector<double> a, std::vector<std::vector<double>> B) {
for (int i = 0; i < B.size(); i++) {
a = subtraction(a, B[i]);
}
return a;
}
std::vector<double> LinAlg::log(std::vector<double> a) {
std::vector<double> MLPPLinAlg::log(std::vector<double> a) {
std::vector<double> b;
b.resize(a.size());
for (int i = 0; i < a.size(); i++) {
@ -861,7 +861,7 @@ std::vector<double> LinAlg::log(std::vector<double> a) {
return b;
}
std::vector<double> LinAlg::log10(std::vector<double> a) {
std::vector<double> MLPPLinAlg::log10(std::vector<double> a) {
std::vector<double> b;
b.resize(a.size());
for (int i = 0; i < a.size(); i++) {
@ -870,7 +870,7 @@ std::vector<double> LinAlg::log10(std::vector<double> a) {
return b;
}
std::vector<double> LinAlg::exp(std::vector<double> a) {
std::vector<double> MLPPLinAlg::exp(std::vector<double> a) {
std::vector<double> b;
b.resize(a.size());
for (int i = 0; i < a.size(); i++) {
@ -879,7 +879,7 @@ std::vector<double> LinAlg::exp(std::vector<double> a) {
return b;
}
std::vector<double> LinAlg::erf(std::vector<double> a) {
std::vector<double> MLPPLinAlg::erf(std::vector<double> a) {
std::vector<double> b;
b.resize(a.size());
for (int i = 0; i < a.size(); i++) {
@ -888,7 +888,7 @@ std::vector<double> LinAlg::erf(std::vector<double> a) {
return b;
}
std::vector<double> LinAlg::exponentiate(std::vector<double> a, double p) {
std::vector<double> MLPPLinAlg::exponentiate(std::vector<double> a, double p) {
std::vector<double> b;
b.resize(a.size());
for (int i = 0; i < b.size(); i++) {
@ -897,15 +897,15 @@ std::vector<double> LinAlg::exponentiate(std::vector<double> a, double p) {
return b;
}
std::vector<double> LinAlg::sqrt(std::vector<double> a) {
std::vector<double> MLPPLinAlg::sqrt(std::vector<double> a) {
return exponentiate(a, 0.5);
}
std::vector<double> LinAlg::cbrt(std::vector<double> a) {
std::vector<double> MLPPLinAlg::cbrt(std::vector<double> a) {
return exponentiate(a, double(1) / double(3));
}
double LinAlg::dot(std::vector<double> a, std::vector<double> b) {
double MLPPLinAlg::dot(std::vector<double> a, std::vector<double> b) {
double c = 0;
for (int i = 0; i < a.size(); i++) {
c += a[i] * b[i];
@ -913,7 +913,7 @@ double LinAlg::dot(std::vector<double> a, std::vector<double> b) {
return c;
}
std::vector<double> LinAlg::cross(std::vector<double> a, std::vector<double> b) {
std::vector<double> MLPPLinAlg::cross(std::vector<double> a, std::vector<double> b) {
// Cross products exist in R^7 also. Though, I will limit it to R^3 as Wolfram does this.
std::vector<std::vector<double>> mat = { onevec(3), a, b };
@ -924,7 +924,7 @@ std::vector<double> LinAlg::cross(std::vector<double> a, std::vector<double> b)
return { det1, det2, det3 };
}
std::vector<double> LinAlg::abs(std::vector<double> a) {
std::vector<double> MLPPLinAlg::abs(std::vector<double> a) {
std::vector<double> b;
b.resize(a.size());
for (int i = 0; i < b.size(); i++) {
@ -933,17 +933,17 @@ std::vector<double> LinAlg::abs(std::vector<double> a) {
return b;
}
std::vector<double> LinAlg::zerovec(int n) {
std::vector<double> MLPPLinAlg::zerovec(int n) {
std::vector<double> zerovec;
zerovec.resize(n);
return zerovec;
}
std::vector<double> LinAlg::onevec(int n) {
std::vector<double> MLPPLinAlg::onevec(int n) {
return full(n, 1);
}
std::vector<std::vector<double>> LinAlg::diag(std::vector<double> a) {
std::vector<std::vector<double>> MLPPLinAlg::diag(std::vector<double> a) {
std::vector<std::vector<double>> B = zeromat(a.size(), a.size());
for (int i = 0; i < B.size(); i++) {
B[i][i] = a[i];
@ -951,7 +951,7 @@ std::vector<std::vector<double>> LinAlg::diag(std::vector<double> a) {
return B;
}
std::vector<double> LinAlg::full(int n, int k) {
std::vector<double> MLPPLinAlg::full(int n, int k) {
std::vector<double> full;
full.resize(n);
for (int i = 0; i < full.size(); i++) {
@ -960,7 +960,7 @@ std::vector<double> LinAlg::full(int n, int k) {
return full;
}
std::vector<double> LinAlg::sin(std::vector<double> a) {
std::vector<double> MLPPLinAlg::sin(std::vector<double> a) {
std::vector<double> b;
b.resize(a.size());
for (int i = 0; i < a.size(); i++) {
@ -969,7 +969,7 @@ std::vector<double> LinAlg::sin(std::vector<double> a) {
return b;
}
std::vector<double> LinAlg::cos(std::vector<double> a) {
std::vector<double> MLPPLinAlg::cos(std::vector<double> a) {
std::vector<double> b;
b.resize(a.size());
for (int i = 0; i < a.size(); i++) {
@ -978,7 +978,7 @@ std::vector<double> LinAlg::cos(std::vector<double> a) {
return b;
}
std::vector<std::vector<double>> LinAlg::rotate(std::vector<std::vector<double>> A, double theta, int axis) {
std::vector<std::vector<double>> MLPPLinAlg::rotate(std::vector<std::vector<double>> A, double theta, int axis) {
std::vector<std::vector<double>> rotationMatrix = { { std::cos(theta), -std::sin(theta) }, { std::sin(theta), std::cos(theta) } };
if (axis == 0) {
rotationMatrix = { { 1, 0, 0 }, { 0, std::cos(theta), -std::sin(theta) }, { 0, std::sin(theta), std::cos(theta) } };
@ -991,7 +991,7 @@ std::vector<std::vector<double>> LinAlg::rotate(std::vector<std::vector<double>>
return matmult(A, rotationMatrix);
}
std::vector<std::vector<double>> LinAlg::max(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B) {
std::vector<std::vector<double>> MLPPLinAlg::max(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B) {
std::vector<std::vector<double>> C;
C.resize(A.size());
for (int i = 0; i < C.size(); i++) {
@ -1003,7 +1003,7 @@ std::vector<std::vector<double>> LinAlg::max(std::vector<std::vector<double>> A,
return C;
}
double LinAlg::max(std::vector<double> a) {
double MLPPLinAlg::max(std::vector<double> a) {
int max = a[0];
for (int i = 0; i < a.size(); i++) {
if (a[i] > max) {
@ -1013,7 +1013,7 @@ double LinAlg::max(std::vector<double> a) {
return max;
}
double LinAlg::min(std::vector<double> a) {
double MLPPLinAlg::min(std::vector<double> a) {
int min = a[0];
for (int i = 0; i < a.size(); i++) {
if (a[i] < min) {
@ -1023,7 +1023,7 @@ double LinAlg::min(std::vector<double> a) {
return min;
}
std::vector<double> LinAlg::round(std::vector<double> a) {
std::vector<double> MLPPLinAlg::round(std::vector<double> a) {
std::vector<double> b;
b.resize(a.size());
for (int i = 0; i < a.size(); i++) {
@ -1033,7 +1033,7 @@ std::vector<double> LinAlg::round(std::vector<double> a) {
}
// Multidimensional Euclidean Distance
double LinAlg::euclideanDistance(std::vector<double> a, std::vector<double> b) {
double MLPPLinAlg::euclideanDistance(std::vector<double> a, std::vector<double> b) {
double dist = 0;
for (int i = 0; i < a.size(); i++) {
dist += (a[i] - b[i]) * (a[i] - b[i]);
@ -1041,11 +1041,11 @@ double LinAlg::euclideanDistance(std::vector<double> a, std::vector<double> b) {
return std::sqrt(dist);
}
double LinAlg::norm_2(std::vector<double> a) {
double MLPPLinAlg::norm_2(std::vector<double> a) {
return std::sqrt(norm_sq(a));
}
double LinAlg::norm_sq(std::vector<double> a) {
double MLPPLinAlg::norm_sq(std::vector<double> a) {
double n_sq = 0;
for (int i = 0; i < a.size(); i++) {
n_sq += a[i] * a[i];
@ -1053,7 +1053,7 @@ double LinAlg::norm_sq(std::vector<double> a) {
return n_sq;
}
double LinAlg::sum_elements(std::vector<double> a) {
double MLPPLinAlg::sum_elements(std::vector<double> a) {
double sum = 0;
for (int i = 0; i < a.size(); i++) {
sum += a[i];
@ -1061,18 +1061,18 @@ double LinAlg::sum_elements(std::vector<double> a) {
return sum;
}
double LinAlg::cosineSimilarity(std::vector<double> a, std::vector<double> b) {
double MLPPLinAlg::cosineSimilarity(std::vector<double> a, std::vector<double> b) {
return dot(a, b) / (norm_2(a) * norm_2(b));
}
void LinAlg::printVector(std::vector<double> a) {
void MLPPLinAlg::printVector(std::vector<double> a) {
for (int i = 0; i < a.size(); i++) {
std::cout << a[i] << " ";
}
std::cout << std::endl;
}
std::vector<std::vector<double>> LinAlg::mat_vec_add(std::vector<std::vector<double>> A, std::vector<double> b) {
std::vector<std::vector<double>> MLPPLinAlg::mat_vec_add(std::vector<std::vector<double>> A, std::vector<double> b) {
for (int i = 0; i < A.size(); i++) {
for (int j = 0; j < A[i].size(); j++) {
A[i][j] += b[j];
@ -1081,7 +1081,7 @@ std::vector<std::vector<double>> LinAlg::mat_vec_add(std::vector<std::vector<dou
return A;
}
std::vector<double> LinAlg::mat_vec_mult(std::vector<std::vector<double>> A, std::vector<double> b) {
std::vector<double> MLPPLinAlg::mat_vec_mult(std::vector<std::vector<double>> A, std::vector<double> b) {
std::vector<double> c;
c.resize(A.size());
@ -1093,35 +1093,35 @@ std::vector<double> LinAlg::mat_vec_mult(std::vector<std::vector<double>> A, std
return c;
}
std::vector<std::vector<std::vector<double>>> LinAlg::addition(std::vector<std::vector<std::vector<double>>> A, std::vector<std::vector<std::vector<double>>> B) {
std::vector<std::vector<std::vector<double>>> MLPPLinAlg::addition(std::vector<std::vector<std::vector<double>>> A, std::vector<std::vector<std::vector<double>>> B) {
for (int i = 0; i < A.size(); i++) {
A[i] = addition(A[i], B[i]);
}
return A;
}
std::vector<std::vector<std::vector<double>>> LinAlg::elementWiseDivision(std::vector<std::vector<std::vector<double>>> A, std::vector<std::vector<std::vector<double>>> B) {
std::vector<std::vector<std::vector<double>>> MLPPLinAlg::elementWiseDivision(std::vector<std::vector<std::vector<double>>> A, std::vector<std::vector<std::vector<double>>> B) {
for (int i = 0; i < A.size(); i++) {
A[i] = elementWiseDivision(A[i], B[i]);
}
return A;
}
std::vector<std::vector<std::vector<double>>> LinAlg::sqrt(std::vector<std::vector<std::vector<double>>> A) {
std::vector<std::vector<std::vector<double>>> MLPPLinAlg::sqrt(std::vector<std::vector<std::vector<double>>> A) {
for (int i = 0; i < A.size(); i++) {
A[i] = sqrt(A[i]);
}
return A;
}
std::vector<std::vector<std::vector<double>>> LinAlg::exponentiate(std::vector<std::vector<std::vector<double>>> A, double p) {
std::vector<std::vector<std::vector<double>>> MLPPLinAlg::exponentiate(std::vector<std::vector<std::vector<double>>> A, double p) {
for (int i = 0; i < A.size(); i++) {
A[i] = exponentiate(A[i], p);
}
return A;
}
std::vector<std::vector<double>> LinAlg::tensor_vec_mult(std::vector<std::vector<std::vector<double>>> A, std::vector<double> b) {
std::vector<std::vector<double>> MLPPLinAlg::tensor_vec_mult(std::vector<std::vector<std::vector<double>>> A, std::vector<double> b) {
std::vector<std::vector<double>> C;
C.resize(A.size());
for (int i = 0; i < C.size(); i++) {
@ -1135,7 +1135,7 @@ std::vector<std::vector<double>> LinAlg::tensor_vec_mult(std::vector<std::vector
return C;
}
std::vector<double> LinAlg::flatten(std::vector<std::vector<std::vector<double>>> A) {
std::vector<double> MLPPLinAlg::flatten(std::vector<std::vector<std::vector<double>>> A) {
std::vector<double> c;
for (int i = 0; i < A.size(); i++) {
std::vector<double> flattenedVec = flatten(A[i]);
@ -1144,7 +1144,7 @@ std::vector<double> LinAlg::flatten(std::vector<std::vector<std::vector<double>>
return c;
}
void LinAlg::printTensor(std::vector<std::vector<std::vector<double>>> A) {
void MLPPLinAlg::printTensor(std::vector<std::vector<std::vector<double>>> A) {
for (int i = 0; i < A.size(); i++) {
printMatrix(A[i]);
if (i != A.size() - 1) {
@ -1153,21 +1153,21 @@ void LinAlg::printTensor(std::vector<std::vector<std::vector<double>>> A) {
}
}
std::vector<std::vector<std::vector<double>>> LinAlg::scalarMultiply(double scalar, std::vector<std::vector<std::vector<double>>> A) {
std::vector<std::vector<std::vector<double>>> MLPPLinAlg::scalarMultiply(double scalar, std::vector<std::vector<std::vector<double>>> A) {
for (int i = 0; i < A.size(); i++) {
A[i] = scalarMultiply(scalar, A[i]);
}
return A;
}
std::vector<std::vector<std::vector<double>>> LinAlg::scalarAdd(double scalar, std::vector<std::vector<std::vector<double>>> A) {
std::vector<std::vector<std::vector<double>>> MLPPLinAlg::scalarAdd(double scalar, std::vector<std::vector<std::vector<double>>> A) {
for (int i = 0; i < A.size(); i++) {
A[i] = scalarAdd(scalar, A[i]);
}
return A;
}
std::vector<std::vector<std::vector<double>>> LinAlg::resize(std::vector<std::vector<std::vector<double>>> A, std::vector<std::vector<std::vector<double>>> B) {
std::vector<std::vector<std::vector<double>>> MLPPLinAlg::resize(std::vector<std::vector<std::vector<double>>> A, std::vector<std::vector<std::vector<double>>> B) {
A.resize(B.size());
for (int i = 0; i < B.size(); i++) {
A[i].resize(B[i].size());
@ -1178,21 +1178,21 @@ std::vector<std::vector<std::vector<double>>> LinAlg::resize(std::vector<std::ve
return A;
}
std::vector<std::vector<std::vector<double>>> LinAlg::max(std::vector<std::vector<std::vector<double>>> A, std::vector<std::vector<std::vector<double>>> B) {
std::vector<std::vector<std::vector<double>>> MLPPLinAlg::max(std::vector<std::vector<std::vector<double>>> A, std::vector<std::vector<std::vector<double>>> B) {
for (int i = 0; i < A.size(); i++) {
A[i] = max(A[i], B[i]);
}
return A;
}
std::vector<std::vector<std::vector<double>>> LinAlg::abs(std::vector<std::vector<std::vector<double>>> A) {
std::vector<std::vector<std::vector<double>>> MLPPLinAlg::abs(std::vector<std::vector<std::vector<double>>> A) {
for (int i = 0; i < A.size(); i++) {
A[i] = abs(A[i]);
}
return A;
}
double LinAlg::norm_2(std::vector<std::vector<std::vector<double>>> A) {
double MLPPLinAlg::norm_2(std::vector<std::vector<std::vector<double>>> A) {
double sum = 0;
for (int i = 0; i < A.size(); i++) {
for (int j = 0; j < A[i].size(); j++) {
@ -1205,7 +1205,7 @@ double LinAlg::norm_2(std::vector<std::vector<std::vector<double>>> A) {
}
// Bad implementation. Change this later.
std::vector<std::vector<std::vector<double>>> LinAlg::vector_wise_tensor_product(std::vector<std::vector<std::vector<double>>> A, std::vector<std::vector<double>> B) {
std::vector<std::vector<std::vector<double>>> MLPPLinAlg::vector_wise_tensor_product(std::vector<std::vector<std::vector<double>>> A, std::vector<std::vector<double>> B) {
std::vector<std::vector<std::vector<double>>> C;
C = resize(C, A);
for (int i = 0; i < A[0].size(); i++) {

View File

@ -12,7 +12,7 @@
#include <vector>
class LinAlg {
class MLPPLinAlg {
public:
// MATRIX FUNCTIONS

View File

@ -34,7 +34,7 @@ double LinReg::modelTest(std::vector<double> x) {
}
void LinReg::NewtonRaphson(double learning_rate, int max_epoch, bool UI) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -66,7 +66,7 @@ void LinReg::NewtonRaphson(double learning_rate, int max_epoch, bool UI) {
}
void LinReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -97,7 +97,7 @@ void LinReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
void LinReg::SGD(double learning_rate, int max_epoch, bool UI) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -136,7 +136,7 @@ void LinReg::SGD(double learning_rate, int max_epoch, bool UI) {
}
void LinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -174,7 +174,7 @@ void LinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool
}
void LinReg::normalEquation() {
LinAlg alg;
MLPPLinAlg alg;
Stat stat;
std::vector<double> x_means;
std::vector<std::vector<double>> inputSetT = alg.transpose(inputSet);
@ -224,12 +224,12 @@ double LinReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
}
std::vector<double> LinReg::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
}
double LinReg::Evaluate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
return alg.dot(weights, x) + bias;
}

View File

@ -31,7 +31,7 @@ double LogReg::modelTest(std::vector<double> x) {
}
void LogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -63,7 +63,7 @@ void LogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
void LogReg::MLE(double learning_rate, int max_epoch, bool UI) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -94,7 +94,7 @@ void LogReg::MLE(double learning_rate, int max_epoch, bool UI) {
}
void LogReg::SGD(double learning_rate, int max_epoch, bool UI) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -133,7 +133,7 @@ void LogReg::SGD(double learning_rate, int max_epoch, bool UI) {
}
void LogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -187,13 +187,13 @@ double LogReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
}
std::vector<double> LogReg::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
return avn.sigmoid(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
}
double LogReg::Evaluate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
return avn.sigmoid(alg.dot(weights, x) + bias);
}

View File

@ -55,7 +55,7 @@ std::vector<double> MANN::modelTest(std::vector<double> x) {
void MANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
class MLPPCost cost;
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;

View File

@ -37,7 +37,7 @@ double MLP::modelTest(std::vector<double> x) {
void MLP::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -96,7 +96,7 @@ void MLP::gradientDescent(double learning_rate, int max_epoch, bool UI) {
void MLP::SGD(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -150,7 +150,7 @@ void MLP::SGD(double learning_rate, int max_epoch, bool UI) {
void MLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -232,7 +232,7 @@ double MLP::Cost(std::vector<double> y_hat, std::vector<double> y) {
}
std::vector<double> MLP::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<double>> a2 = avn.sigmoid(z2);
@ -240,7 +240,7 @@ std::vector<double> MLP::Evaluate(std::vector<std::vector<double>> X) {
}
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLP::propagate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<double>> a2 = avn.sigmoid(z2);
@ -248,7 +248,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> M
}
double MLP::Evaluate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<double> a2 = avn.sigmoid(z2);
@ -256,7 +256,7 @@ double MLP::Evaluate(std::vector<double> x) {
}
std::tuple<std::vector<double>, std::vector<double>> MLP::propagate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<double> a2 = avn.sigmoid(z2);
@ -264,7 +264,7 @@ std::tuple<std::vector<double>, std::vector<double>> MLP::propagate(std::vector<
}
void MLP::forwardPass() {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);
a2 = avn.sigmoid(z2);

View File

@ -117,14 +117,14 @@ MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activ
}
void MultiOutputLayer::forwardPass() {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
z = alg.mat_vec_add(alg.matmult(input, weights), bias);
a = (avn.*activation_map[activation])(z, 0);
}
void MultiOutputLayer::Test(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);
a_test = (avn.*activationTest_map[activation])(z_test, 0);

View File

@ -78,7 +78,7 @@ void MultinomialNB::computeTheta() {
}
void MultinomialNB::Evaluate() {
LinAlg alg;
MLPPLinAlg alg;
for (int i = 0; i < outputSet.size(); i++) {
// Pr(B | A) * Pr(A)
double score[class_num];

View File

@ -226,12 +226,12 @@ double NumericalAnalysis::constantApproximation(double (*function)(std::vector<d
}
double NumericalAnalysis::linearApproximation(double (*function)(std::vector<double>), std::vector<double> c, std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
return constantApproximation(function, c) + alg.matmult(alg.transpose({ jacobian(function, c) }), { alg.subtraction(x, c) })[0][0];
}
double NumericalAnalysis::quadraticApproximation(double (*function)(std::vector<double>), std::vector<double> c, std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
return linearApproximation(function, c, x) + 0.5 * alg.matmult({ (alg.subtraction(x, c)) }, alg.matmult(hessian(function, c), alg.transpose({ alg.subtraction(x, c) })))[0][0];
}
@ -245,7 +245,7 @@ double NumericalAnalysis::cubicApproximation(double (*function)(std::vector<doub
Perform remaining multiplies as done for the 2nd order approximation.
Result is a scalar.
*/
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<double>> resultMat = alg.tensor_vec_mult(thirdOrderTensor(function, c), alg.subtraction(x, c));
double resultScalar = alg.matmult({ (alg.subtraction(x, c)) }, alg.matmult(resultMat, alg.transpose({ alg.subtraction(x, c) })))[0][0];
@ -253,7 +253,7 @@ double NumericalAnalysis::cubicApproximation(double (*function)(std::vector<doub
}
double NumericalAnalysis::laplacian(double (*function)(std::vector<double>), std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<double>> hessian_matrix = hessian(function, x);
double laplacian = 0;
for (int i = 0; i < hessian_matrix.size(); i++) {
@ -263,7 +263,7 @@ double NumericalAnalysis::laplacian(double (*function)(std::vector<double>), std
}
std::string NumericalAnalysis::secondPartialDerivativeTest(double (*function)(std::vector<double>), std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<double>> hessianMatrix = hessian(function, x);
/*
The reason we do this is because the 2nd partial derivative test is less conclusive for functions of variables greater than

View File

@ -114,14 +114,14 @@ OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost,
}
void OutputLayer::forwardPass() {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
z = alg.scalarAdd(bias, alg.mat_vec_mult(input, weights));
a = (avn.*activation_map[activation])(z, 0);
}
void OutputLayer::Test(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
z_test = alg.dot(weights, x) + bias;
a_test = (avn.*activationTest_map[activation])(z_test, 0);

View File

@ -18,7 +18,7 @@ PCA::PCA(std::vector<std::vector<double>> inputSet, int k) :
}
std::vector<std::vector<double>> PCA::principalComponents() {
LinAlg alg;
MLPPLinAlg alg;
MLPPData data;
auto [U, S, Vt] = alg.SVD(alg.cov(inputSet));
@ -34,7 +34,7 @@ std::vector<std::vector<double>> PCA::principalComponents() {
}
// Simply tells us the percentage of variance maintained.
double PCA::score() {
LinAlg alg;
MLPPLinAlg alg;
std::vector<std::vector<double>> X_approx = alg.matmult(U_reduce, Z);
double num, den = 0;
for (int i = 0; i < X_normalized.size(); i++) {

View File

@ -32,7 +32,7 @@ double ProbitReg::modelTest(std::vector<double> x) {
void ProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -65,7 +65,7 @@ void ProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
void ProbitReg::MLE(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -99,7 +99,7 @@ void ProbitReg::MLE(double learning_rate, int max_epoch, bool UI) {
void ProbitReg::SGD(double learning_rate, int max_epoch, bool UI) {
// NOTE: ∂y_hat/∂z is sparse
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -140,7 +140,7 @@ void ProbitReg::SGD(double learning_rate, int max_epoch, bool UI) {
void ProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -214,30 +214,30 @@ double ProbitReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
}
std::vector<double> ProbitReg::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
return avn.gaussianCDF(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
}
std::vector<double> ProbitReg::propagate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
}
double ProbitReg::Evaluate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
return avn.gaussianCDF(alg.dot(weights, x) + bias);
}
double ProbitReg::propagate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
return alg.dot(weights, x) + bias;
}
// gaussianCDF ( wTx + b )
void ProbitReg::forwardPass() {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
z = propagate(inputSet);

View File

@ -67,7 +67,7 @@ double Reg::regTerm(std::vector<std::vector<double>> weights, double lambda, dou
}
std::vector<double> Reg::regWeights(std::vector<double> weights, double lambda, double alpha, std::string reg) {
LinAlg alg;
MLPPLinAlg alg;
if (reg == "WeightClipping") {
return regDerivTerm(weights, lambda, alpha, reg);
}
@ -79,7 +79,7 @@ std::vector<double> Reg::regWeights(std::vector<double> weights, double lambda,
}
std::vector<std::vector<double>> Reg::regWeights(std::vector<std::vector<double>> weights, double lambda, double alpha, std::string reg) {
LinAlg alg;
MLPPLinAlg alg;
if (reg == "WeightClipping") {
return regDerivTerm(weights, lambda, alpha, reg);
}

View File

@ -36,7 +36,7 @@ std::vector<std::vector<double>> SoftmaxNet::modelSetTest(std::vector<std::vecto
void SoftmaxNet::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -92,7 +92,7 @@ void SoftmaxNet::gradientDescent(double learning_rate, int max_epoch, bool UI) {
void SoftmaxNet::SGD(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -146,7 +146,7 @@ void SoftmaxNet::SGD(double learning_rate, int max_epoch, bool UI) {
void SoftmaxNet::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -236,7 +236,7 @@ void SoftmaxNet::save(std::string fileName) {
util.saveParameters(fileName, weights1, bias1, 0, 1);
util.saveParameters(fileName, weights2, bias2, 1, 2);
LinAlg alg;
MLPPLinAlg alg;
}
std::vector<std::vector<double>> SoftmaxNet::getEmbeddings() {
@ -251,7 +251,7 @@ double SoftmaxNet::Cost(std::vector<std::vector<double>> y_hat, std::vector<std:
}
std::vector<std::vector<double>> SoftmaxNet::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<double>> a2 = avn.sigmoid(z2);
@ -259,7 +259,7 @@ std::vector<std::vector<double>> SoftmaxNet::Evaluate(std::vector<std::vector<do
}
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> SoftmaxNet::propagate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<double>> a2 = avn.sigmoid(z2);
@ -267,7 +267,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> S
}
std::vector<double> SoftmaxNet::Evaluate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<double> a2 = avn.sigmoid(z2);
@ -275,7 +275,7 @@ std::vector<double> SoftmaxNet::Evaluate(std::vector<double> x) {
}
std::tuple<std::vector<double>, std::vector<double>> SoftmaxNet::propagate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<double> a2 = avn.sigmoid(z2);
@ -283,7 +283,7 @@ std::tuple<std::vector<double>, std::vector<double>> SoftmaxNet::propagate(std::
}
void SoftmaxNet::forwardPass() {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);
a2 = avn.sigmoid(z2);

View File

@ -31,7 +31,7 @@ std::vector<std::vector<double>> SoftmaxReg::modelSetTest(std::vector<std::vecto
}
void SoftmaxReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -70,7 +70,7 @@ void SoftmaxReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
void SoftmaxReg::SGD(double learning_rate, int max_epoch, bool UI) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -113,7 +113,7 @@ void SoftmaxReg::SGD(double learning_rate, int max_epoch, bool UI) {
}
void SoftmaxReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -170,13 +170,13 @@ double SoftmaxReg::Cost(std::vector<std::vector<double>> y_hat, std::vector<std:
}
std::vector<double> SoftmaxReg::Evaluate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
return avn.softmax(alg.addition(bias, alg.mat_vec_mult(alg.transpose(weights), x)));
}
std::vector<std::vector<double>> SoftmaxReg::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
return avn.softmax(alg.mat_vec_add(alg.matmult(X, weights), bias));
@ -184,7 +184,7 @@ std::vector<std::vector<double>> SoftmaxReg::Evaluate(std::vector<std::vector<do
// softmax ( wTx + b )
void SoftmaxReg::forwardPass() {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
y_hat = avn.softmax(alg.mat_vec_add(alg.matmult(inputSet, weights), bias));

View File

@ -66,7 +66,7 @@ std::vector<double> Stat::mode(const std::vector<double> &x) {
}
double Stat::range(const std::vector<double> &x) {
LinAlg alg;
MLPPLinAlg alg;
return alg.max(x) - alg.min(x);
}

View File

@ -33,7 +33,7 @@ double SVC::modelTest(std::vector<double> x) {
void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
class MLPPCost cost;
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -66,7 +66,7 @@ void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
void SVC::SGD(double learning_rate, int max_epoch, bool UI) {
class MLPPCost cost;
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -109,7 +109,7 @@ void SVC::SGD(double learning_rate, int max_epoch, bool UI) {
void SVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
class MLPPCost cost;
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -164,32 +164,32 @@ double SVC::Cost(std::vector<double> z, std::vector<double> y, std::vector<doubl
}
std::vector<double> SVC::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
return avn.sign(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
}
std::vector<double> SVC::propagate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
}
double SVC::Evaluate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
return avn.sign(alg.dot(weights, x) + bias);
}
double SVC::propagate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
return alg.dot(weights, x) + bias;
}
// sign ( wTx + b )
void SVC::forwardPass() {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
z = propagate(inputSet);

View File

@ -32,7 +32,7 @@ double TanhReg::modelTest(std::vector<double> x) {
void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -65,7 +65,7 @@ void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) {
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -105,7 +105,7 @@ void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) {
void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
@ -163,30 +163,30 @@ double TanhReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
}
std::vector<double> TanhReg::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
return avn.tanh(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
}
std::vector<double> TanhReg::propagate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPLinAlg alg;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
}
double TanhReg::Evaluate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
return avn.tanh(alg.dot(weights, x) + bias);
}
double TanhReg::propagate(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
return alg.dot(weights, x) + bias;
}
// Tanh ( wTx + b )
void TanhReg::forwardPass() {
LinAlg alg;
MLPPLinAlg alg;
MLPPActivation avn;
z = propagate(inputSet);

View File

@ -15,7 +15,7 @@
// DCT ii.
// https://www.mathworks.com/help/images/discrete-cosine-transform.html
std::vector<std::vector<double>> Transforms::discreteCosineTransform(std::vector<std::vector<double>> A) {
LinAlg alg;
MLPPLinAlg alg;
A = alg.scalarAdd(-128, A); // Center around 0.
std::vector<std::vector<double>> B;

View File

@ -24,7 +24,7 @@ UniLinReg::UniLinReg(std::vector<double> x, std::vector<double> y) :
}
std::vector<double> UniLinReg::modelSetTest(std::vector<double> x) {
LinAlg alg;
MLPPLinAlg alg;
return alg.scalarAdd(b0, alg.scalarMultiply(b1, x));
}

View File

@ -24,13 +24,13 @@ WGAN::~WGAN() {
}
std::vector<std::vector<double>> WGAN::generateExample(int n) {
LinAlg alg;
MLPPLinAlg alg;
return modelSetTestGenerator(alg.gaussianNoise(n, k));
}
void WGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
class MLPPCost cost;
LinAlg alg;
MLPPLinAlg alg;
double cost_prev = 0;
int epoch = 1;
forwardPass();
@ -86,7 +86,7 @@ void WGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
double WGAN::score() {
LinAlg alg;
MLPPLinAlg alg;
Utilities util;
forwardPass();
return util.performance(y_hat, alg.onevec(n));
@ -106,7 +106,7 @@ void WGAN::save(std::string fileName) {
}
void WGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) {
LinAlg alg;
MLPPLinAlg alg;
if (network.empty()) {
network.push_back(MLPPHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
network[0].forwardPass();
@ -117,7 +117,7 @@ void WGAN::addLayer(int n_hidden, std::string activation, std::string weightInit
}
void WGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda, double alpha) {
LinAlg alg;
MLPPLinAlg alg;
if (!network.empty()) {
outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01);
} else { // Should never happen.
@ -169,7 +169,7 @@ double WGAN::Cost(std::vector<double> y_hat, std::vector<double> y) {
}
void WGAN::forwardPass() {
LinAlg alg;
MLPPLinAlg alg;
if (!network.empty()) {
network[0].input = alg.gaussianNoise(n, k);
network[0].forwardPass();
@ -187,7 +187,7 @@ void WGAN::forwardPass() {
}
void WGAN::updateDiscriminatorParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, std::vector<double> outputLayerUpdation, double learning_rate) {
LinAlg alg;
MLPPLinAlg alg;
outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation);
outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n;
@ -204,7 +204,7 @@ void WGAN::updateDiscriminatorParameters(std::vector<std::vector<std::vector<dou
}
void WGAN::updateGeneratorParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, double learning_rate) {
LinAlg alg;
MLPPLinAlg alg;
if (!network.empty()) {
for (int i = network.size() / 2; i >= 0; i--) {
@ -219,7 +219,7 @@ void WGAN::updateGeneratorParameters(std::vector<std::vector<std::vector<double>
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> WGAN::computeDiscriminatorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
class MLPPCost cost;
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.
@ -255,7 +255,7 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> W
std::vector<std::vector<std::vector<double>>> WGAN::computeGeneratorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
class MLPPCost cost;
MLPPActivation avn;
LinAlg alg;
MLPPLinAlg alg;
Reg regularization;
std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.