Added Utilities.createMiniBatches, standardized the order of objects in all regression/neural network modules, rebuilt SO

This commit is contained in:
novak_99 2021-06-02 20:37:27 -07:00
parent 3397671049
commit 086b8bea31
25 changed files with 188 additions and 279 deletions

View File

@ -35,9 +35,8 @@ namespace MLPP {
} }
void AutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI){ void AutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI){
LinAlg alg;
Activation avn; Activation avn;
LinAlg alg;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -89,9 +88,8 @@ namespace MLPP {
} }
void AutoEncoder::SGD(double learning_rate, int max_epoch, bool UI){ void AutoEncoder::SGD(double learning_rate, int max_epoch, bool UI){
LinAlg alg;
Activation avn; Activation avn;
Utilities util; LinAlg alg;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -138,33 +136,18 @@ namespace MLPP {
forwardPass(); forwardPass();
} }
void AutoEncoder::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){ void AutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
Activation avn; Activation avn;
LinAlg alg; LinAlg alg;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
int n_miniBatch = n/miniBatch_size;
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
// Creating the mini-batches // Creating the mini-batches
for(int i = 0; i < n_miniBatch; i++){ int n_mini_batch = n/mini_batch_size;
std::vector<std::vector<double>> currentInputSet; std::vector<std::vector<std::vector<double>>> inputMiniBatches = Utilities::createMiniBatches(inputSet, n_mini_batch);
for(int j = 0; j < n/n_miniBatch; j++){
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
}
inputMiniBatches.push_back(currentInputSet);
}
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
}
}
while(true){ while(true){
for(int i = 0; i < n_miniBatch; i++){ for(int i = 0; i < n_mini_batch; i++){
std::vector<std::vector<double>> y_hat = Evaluate(inputMiniBatches[i]); std::vector<std::vector<double>> y_hat = Evaluate(inputMiniBatches[i]);
auto [z2, a2] = propagate(inputMiniBatches[i]); auto [z2, a2] = propagate(inputMiniBatches[i]);
cost_prev = Cost(y_hat, inputMiniBatches[i]); cost_prev = Cost(y_hat, inputMiniBatches[i]);

View File

@ -20,7 +20,7 @@ class AutoEncoder{
std::vector<double> modelTest(std::vector<double> x); std::vector<double> modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
void SGD(double learning_rate, int max_epoch, bool UI = 1); void SGD(double learning_rate, int max_epoch, bool UI = 1);
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1); void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
double score(); double score();
void save(std::string fileName); void save(std::string fileName);

View File

@ -32,9 +32,9 @@ namespace MLPP{
} }
void CLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI){ void CLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI){
Reg regularization;
LinAlg alg;
Activation avn; Activation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -65,9 +65,9 @@ namespace MLPP{
} }
void CLogLogReg::MLE(double learning_rate, int max_epoch, bool UI){ void CLogLogReg::MLE(double learning_rate, int max_epoch, bool UI){
Reg regularization;
Activation avn; Activation avn;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -134,39 +134,19 @@ namespace MLPP{
forwardPass(); forwardPass();
} }
void CLogLogReg::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){ void CLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
Reg regularization;
Activation avn; Activation avn;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
int n_miniBatch = n/miniBatch_size;
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
std::vector<std::vector<double>> outputMiniBatches;
// Creating the mini-batches // Creating the mini-batches
for(int i = 0; i < n_miniBatch; i++){ int n_mini_batch = n/mini_batch_size;
std::vector<std::vector<double>> currentInputSet; auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
std::vector<double> currentOutputSet;
std::vector<double> currentPreActivationSet;
for(int j = 0; j < n/n_miniBatch; j++){
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]);
}
inputMiniBatches.push_back(currentInputSet);
outputMiniBatches.push_back(currentOutputSet);
}
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]);
}
}
while(true){ while(true){
for(int i = 0; i < n_miniBatch; i++){ for(int i = 0; i < n_mini_batch; i++){
std::vector<double> y_hat = Evaluate(inputMiniBatches[i]); std::vector<double> y_hat = Evaluate(inputMiniBatches[i]);
std::vector<double> z = propagate(inputMiniBatches[i]); std::vector<double> z = propagate(inputMiniBatches[i]);
cost_prev = Cost(y_hat, outputMiniBatches[i]); cost_prev = Cost(y_hat, outputMiniBatches[i]);

View File

@ -22,7 +22,7 @@ namespace MLPP {
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
void MLE(double learning_rate, int max_epoch, bool UI = 1); void MLE(double learning_rate, int max_epoch, bool UI = 1);
void SGD(double learning_rate, int max_epoch, bool UI = 1); void SGD(double learning_rate, int max_epoch, bool UI = 1);
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1); void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
double score(); double score();
private: private:

View File

@ -33,8 +33,8 @@ namespace MLPP{
} }
void ExpReg::gradientDescent(double learning_rate, int max_epoch, bool UI){ void ExpReg::gradientDescent(double learning_rate, int max_epoch, bool UI){
Reg regularization;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -93,7 +93,6 @@ namespace MLPP{
void ExpReg::SGD(double learning_rate, int max_epoch, bool UI){ void ExpReg::SGD(double learning_rate, int max_epoch, bool UI){
Reg regularization; Reg regularization;
Utilities util;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -138,38 +137,18 @@ namespace MLPP{
forwardPass(); forwardPass();
} }
void ExpReg::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){ void ExpReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
Reg regularization;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
int n_miniBatch = n/miniBatch_size;
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
std::vector<std::vector<double>> outputMiniBatches;
// Creating the mini-batches // Creating the mini-batches
for(int i = 0; i < n_miniBatch; i++){ int n_mini_batch = n/mini_batch_size;
std::vector<std::vector<double>> currentInputSet; auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
std::vector<double> currentOutputSet;
for(int j = 0; j < n/n_miniBatch; j++){
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]);
}
inputMiniBatches.push_back(currentInputSet);
outputMiniBatches.push_back(currentOutputSet);
}
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]);
}
}
while(true){ while(true){
for(int i = 0; i < n_miniBatch; i++){ for(int i = 0; i < n_mini_batch; i++){
std::vector<double> y_hat = Evaluate(inputMiniBatches[i]); std::vector<double> y_hat = Evaluate(inputMiniBatches[i]);
cost_prev = Cost(y_hat, outputMiniBatches[i]); cost_prev = Cost(y_hat, outputMiniBatches[i]);
std::vector<double> error = alg.subtraction(y_hat, outputMiniBatches[i]); std::vector<double> error = alg.subtraction(y_hat, outputMiniBatches[i]);

View File

@ -19,7 +19,7 @@ namespace MLPP{
double modelTest(std::vector<double> x); double modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
void SGD(double learning_rate, int max_epoch, bool UI = 1); void SGD(double learning_rate, int max_epoch, bool UI = 1);
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1); void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
double score(); double score();
void save(std::string fileName); void save(std::string fileName);
private: private:

View File

@ -35,8 +35,8 @@ namespace MLPP{
} }
void LinReg::gradientDescent(double learning_rate, int max_epoch, bool UI){ void LinReg::gradientDescent(double learning_rate, int max_epoch, bool UI){
Reg regularization;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -67,7 +67,6 @@ namespace MLPP{
void LinReg::SGD(double learning_rate, int max_epoch, bool UI){ void LinReg::SGD(double learning_rate, int max_epoch, bool UI){
LinAlg alg; LinAlg alg;
Reg regularization; Reg regularization;
Utilities util;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -102,37 +101,18 @@ namespace MLPP{
forwardPass(); forwardPass();
} }
void LinReg::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){ void LinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
Reg regularization;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
int n_miniBatch = n/miniBatch_size;
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
std::vector<std::vector<double>> outputMiniBatches;
// Creating the mini-batches // Creating the mini-batches
for(int i = 0; i < n_miniBatch; i++){ int n_mini_batch = n/mini_batch_size;
std::vector<std::vector<double>> currentInputSet; auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
std::vector<double> currentOutputSet;
for(int j = 0; j < n/n_miniBatch; j++){
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]);
}
inputMiniBatches.push_back(currentInputSet);
outputMiniBatches.push_back(currentOutputSet);
}
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]);
}
}
while(true){ while(true){
for(int i = 0; i < n_miniBatch; i++){ for(int i = 0; i < n_mini_batch; i++){
std::vector<double> y_hat = Evaluate(inputMiniBatches[i]); std::vector<double> y_hat = Evaluate(inputMiniBatches[i]);
cost_prev = Cost(y_hat, outputMiniBatches[i]); cost_prev = Cost(y_hat, outputMiniBatches[i]);

View File

@ -19,7 +19,7 @@ namespace MLPP{
double modelTest(std::vector<double> x); double modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
void SGD(double learning_rate, int max_epoch, bool UI = 1); void SGD(double learning_rate, int max_epoch, bool UI = 1);
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1); void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
void normalEquation(); void normalEquation();
double score(); double score();
void save(std::string fileName); void save(std::string fileName);

View File

@ -32,8 +32,8 @@ namespace MLPP{
} }
void LogReg::gradientDescent(double learning_rate, int max_epoch, bool UI){ void LogReg::gradientDescent(double learning_rate, int max_epoch, bool UI){
Reg regularization;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -63,8 +63,8 @@ namespace MLPP{
} }
void LogReg::MLE(double learning_rate, int max_epoch, bool UI){ void LogReg::MLE(double learning_rate, int max_epoch, bool UI){
Reg regularization;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -94,7 +94,6 @@ namespace MLPP{
void LogReg::SGD(double learning_rate, int max_epoch, bool UI){ void LogReg::SGD(double learning_rate, int max_epoch, bool UI){
LinAlg alg; LinAlg alg;
Reg regularization; Reg regularization;
Utilities util;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -129,38 +128,18 @@ namespace MLPP{
forwardPass(); forwardPass();
} }
void LogReg::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){ void LogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
Reg regularization;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
int n_miniBatch = n/miniBatch_size;
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
std::vector<std::vector<double>> outputMiniBatches;
// Creating the mini-batches // Creating the mini-batches
for(int i = 0; i < n_miniBatch; i++){ int n_mini_batch = n/mini_batch_size;
std::vector<std::vector<double>> currentInputSet; auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
std::vector<double> currentOutputSet;
for(int j = 0; j < n/n_miniBatch; j++){
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]);
}
inputMiniBatches.push_back(currentInputSet);
outputMiniBatches.push_back(currentOutputSet);
}
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]);
}
}
while(true){ while(true){
for(int i = 0; i < n_miniBatch; i++){ for(int i = 0; i < n_mini_batch; i++){
std::vector<double> y_hat = Evaluate(inputMiniBatches[i]); std::vector<double> y_hat = Evaluate(inputMiniBatches[i]);
cost_prev = Cost(y_hat, outputMiniBatches[i]); cost_prev = Cost(y_hat, outputMiniBatches[i]);

View File

@ -22,7 +22,7 @@ namespace MLPP {
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
void MLE(double learning_rate, int max_epoch, bool UI = 1); void MLE(double learning_rate, int max_epoch, bool UI = 1);
void SGD(double learning_rate, int max_epoch, bool UI = 1); void SGD(double learning_rate, int max_epoch, bool UI = 1);
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1); void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
double score(); double score();
void save(std::string fileName); void save(std::string fileName);
private: private:

View File

@ -36,10 +36,9 @@ namespace MLPP {
} }
void MLP::gradientDescent(double learning_rate, int max_epoch, bool UI){ void MLP::gradientDescent(double learning_rate, int max_epoch, bool UI){
Reg regularization;
LinAlg alg;
Activation avn; Activation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -60,7 +59,7 @@ namespace MLPP {
bias2 -= learning_rate * alg.sum_elements(error) / n; bias2 -= learning_rate * alg.sum_elements(error) / n;
//Calculating the weight/bias for layer 1 // Calculating the weight/bias for layer 1
std::vector<std::vector<double>> D1_1; std::vector<std::vector<double>> D1_1;
D1_1.resize(n); D1_1.resize(n);
@ -96,13 +95,12 @@ namespace MLPP {
} }
void MLP::SGD(double learning_rate, int max_epoch, bool UI){ void MLP::SGD(double learning_rate, int max_epoch, bool UI){
Reg regularization;
LinAlg alg;
Activation avn; Activation avn;
Utilities util; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
while(true){ while(true){
std::random_device rd; std::random_device rd;
std::default_random_engine generator(rd()); std::default_random_engine generator(rd());
@ -148,39 +146,19 @@ namespace MLPP {
forwardPass(); forwardPass();
} }
void MLP::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){ void MLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
Reg regularization;
Activation avn; Activation avn;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
int n_miniBatch = n/miniBatch_size;
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
std::vector<std::vector<double>> outputMiniBatches;
// Creating the mini-batches // Creating the mini-batches
for(int i = 0; i < n_miniBatch; i++){ int n_mini_batch = n/mini_batch_size;
std::vector<std::vector<double>> currentInputSet; auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
std::vector<double> currentOutputSet;
for(int j = 0; j < n/n_miniBatch; j++){
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]);
}
inputMiniBatches.push_back(currentInputSet);
outputMiniBatches.push_back(currentOutputSet);
}
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]);
}
}
while(true){ while(true){
for(int i = 0; i < n_miniBatch; i++){ for(int i = 0; i < n_mini_batch; i++){
std::vector<double> y_hat = Evaluate(inputMiniBatches[i]); std::vector<double> y_hat = Evaluate(inputMiniBatches[i]);
auto [z2, a2] = propagate(inputMiniBatches[i]); auto [z2, a2] = propagate(inputMiniBatches[i]);
cost_prev = Cost(y_hat, outputMiniBatches[i]); cost_prev = Cost(y_hat, outputMiniBatches[i]);

View File

@ -20,7 +20,7 @@ class MLP{
double modelTest(std::vector<double> x); double modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
void SGD(double learning_rate, int max_epoch, bool UI = 1); void SGD(double learning_rate, int max_epoch, bool UI = 1);
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1); void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
double score(); double score();
void save(std::string fileName); void save(std::string fileName);

View File

@ -32,9 +32,9 @@ namespace MLPP{
} }
void ProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI){ void ProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI){
Reg regularization;
Activation avn; Activation avn;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -64,8 +64,8 @@ namespace MLPP{
void ProbitReg::MLE(double learning_rate, int max_epoch, bool UI){ void ProbitReg::MLE(double learning_rate, int max_epoch, bool UI){
Activation avn; Activation avn;
Reg regularization;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -95,10 +95,9 @@ namespace MLPP{
void ProbitReg::SGD(double learning_rate, int max_epoch, bool UI){ void ProbitReg::SGD(double learning_rate, int max_epoch, bool UI){
// NOTE: ∂y_hat/∂z is sparse // NOTE: ∂y_hat/∂z is sparse
LinAlg alg;
Activation avn; Activation avn;
LinAlg alg;
Reg regularization; Reg regularization;
Utilities util;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -134,39 +133,38 @@ namespace MLPP{
forwardPass(); forwardPass();
} }
void ProbitReg::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){ void ProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
Reg regularization;
Activation avn; Activation avn;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
int n_miniBatch = n/miniBatch_size; // Creating the mini-batches
int n_mini_batch = n/mini_batch_size;
std::vector<std::vector<std::vector<double>>> inputMiniBatches; auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
std::vector<std::vector<double>> outputMiniBatches;
// Creating the mini-batches // Creating the mini-batches
for(int i = 0; i < n_miniBatch; i++){ for(int i = 0; i < n_mini_batch; i++){
std::vector<std::vector<double>> currentInputSet; std::vector<std::vector<double>> currentInputSet;
std::vector<double> currentOutputSet; std::vector<double> currentOutputSet;
for(int j = 0; j < n/n_miniBatch; j++){ for(int j = 0; j < n/n_mini_batch; j++){
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]); currentInputSet.push_back(inputSet[n/n_mini_batch * i + j]);
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]); currentOutputSet.push_back(outputSet[n/n_mini_batch * i + j]);
} }
inputMiniBatches.push_back(currentInputSet); inputMiniBatches.push_back(currentInputSet);
outputMiniBatches.push_back(currentOutputSet); outputMiniBatches.push_back(currentOutputSet);
} }
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){ if(double(n)/double(n_mini_batch) - int(n/n_mini_batch) != 0){
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){ for(int i = 0; i < n - n/n_mini_batch * n_mini_batch; i++){
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]); inputMiniBatches[n_mini_batch - 1].push_back(inputSet[n/n_mini_batch * n_mini_batch + i]);
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]); outputMiniBatches[n_mini_batch - 1].push_back(outputSet[n/n_mini_batch * n_mini_batch + i]);
} }
} }
while(true){ while(true){
for(int i = 0; i < n_miniBatch; i++){ for(int i = 0; i < n_mini_batch; i++){
std::vector<double> y_hat = Evaluate(inputMiniBatches[i]); std::vector<double> y_hat = Evaluate(inputMiniBatches[i]);
std::vector<double> z = propagate(inputMiniBatches[i]); std::vector<double> z = propagate(inputMiniBatches[i]);
cost_prev = Cost(y_hat, outputMiniBatches[i]); cost_prev = Cost(y_hat, outputMiniBatches[i]);

View File

@ -22,7 +22,7 @@ namespace MLPP {
void gradientDescent(double learning_rate, int max_epoch = 0, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch = 0, bool UI = 1);
void MLE(double learning_rate, int max_epoch = 0, bool UI = 1); void MLE(double learning_rate, int max_epoch = 0, bool UI = 1);
void SGD(double learning_rate, int max_epoch = 0, bool UI = 1); void SGD(double learning_rate, int max_epoch = 0, bool UI = 1);
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1); void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
double score(); double score();
void save(std::string fileName); void save(std::string fileName);
private: private:

View File

@ -36,11 +36,9 @@ namespace MLPP{
} }
void SoftmaxNet::gradientDescent(double learning_rate, int max_epoch, bool UI){ void SoftmaxNet::gradientDescent(double learning_rate, int max_epoch, bool UI){
Reg regularization;
LinAlg alg;
Activation avn; Activation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -94,13 +92,12 @@ namespace MLPP{
} }
void SoftmaxNet::SGD(double learning_rate, int max_epoch, bool UI){ void SoftmaxNet::SGD(double learning_rate, int max_epoch, bool UI){
Reg regularization;
LinAlg alg;
Activation avn; Activation avn;
Utilities util; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
while(true){ while(true){
std::random_device rd; std::random_device rd;
std::default_random_engine generator(rd()); std::default_random_engine generator(rd());
@ -146,39 +143,38 @@ namespace MLPP{
forwardPass(); forwardPass();
} }
void SoftmaxNet::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){ void SoftmaxNet::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
Reg regularization;
Activation avn; Activation avn;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
int n_miniBatch = n/miniBatch_size; // Creating the mini-batches
int n_mini_batch = n/mini_batch_size;
std::vector<std::vector<std::vector<double>>> inputMiniBatches; auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
std::vector<std::vector<std::vector<double>>> outputMiniBatches;
//Creating the mini-batches // Creating the mini-batches
for(int i = 0; i < n_miniBatch; i++){ for(int i = 0; i < n_mini_batch; i++){
std::vector<std::vector<double>> currentInputSet; std::vector<std::vector<double>> currentInputSet;
std::vector<std::vector<double>> currentOutputSet; std::vector<std::vector<double>> currentOutputSet;
for(int j = 0; j < n/n_miniBatch; j++){ for(int j = 0; j < n/n_mini_batch; j++){
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]); currentInputSet.push_back(inputSet[n/n_mini_batch * i + j]);
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]); currentOutputSet.push_back(outputSet[n/n_mini_batch * i + j]);
} }
inputMiniBatches.push_back(currentInputSet); inputMiniBatches.push_back(currentInputSet);
outputMiniBatches.push_back(currentOutputSet); outputMiniBatches.push_back(currentOutputSet);
} }
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){ if(double(n)/double(n_mini_batch) - int(n/n_mini_batch) != 0){
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){ for(int i = 0; i < n - n/n_mini_batch * n_mini_batch; i++){
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]); inputMiniBatches[n_mini_batch - 1].push_back(inputSet[n/n_mini_batch * n_mini_batch + i]);
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]); outputMiniBatches[n_mini_batch - 1].push_back(outputSet[n/n_mini_batch * n_mini_batch + i]);
} }
} }
while(true){ while(true){
for(int i = 0; i < n_miniBatch; i++){ for(int i = 0; i < n_mini_batch; i++){
std::vector<std::vector<double>> y_hat = Evaluate(inputMiniBatches[i]); std::vector<std::vector<double>> y_hat = Evaluate(inputMiniBatches[i]);
auto [z2, a2] = propagate(inputMiniBatches[i]); auto [z2, a2] = propagate(inputMiniBatches[i]);
cost_prev = Cost(y_hat, outputMiniBatches[i]); cost_prev = Cost(y_hat, outputMiniBatches[i]);

View File

@ -21,7 +21,7 @@ namespace MLPP {
std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> X); std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> X);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
void SGD(double learning_rate, int max_epoch, bool UI = 1); void SGD(double learning_rate, int max_epoch, bool UI = 1);
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1); void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
double score(); double score();
void save(std::string fileName); void save(std::string fileName);

View File

@ -33,8 +33,8 @@ namespace MLPP{
} }
void SoftmaxReg::gradientDescent(double learning_rate, int max_epoch, bool UI){ void SoftmaxReg::gradientDescent(double learning_rate, int max_epoch, bool UI){
Reg regularization;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -72,9 +72,8 @@ namespace MLPP{
} }
void SoftmaxReg::SGD(double learning_rate, int max_epoch, bool UI){ void SoftmaxReg::SGD(double learning_rate, int max_epoch, bool UI){
Reg regularization;
LinAlg alg; LinAlg alg;
Utilities util; Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -114,38 +113,18 @@ namespace MLPP{
} }
void SoftmaxReg::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){ void SoftmaxReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
Reg regularization;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
int n_miniBatch = n/miniBatch_size;
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
std::vector<std::vector<std::vector<double>>> outputMiniBatches;
// Creating the mini-batches // Creating the mini-batches
for(int i = 0; i < n_miniBatch; i++){ int n_mini_batch = n/mini_batch_size;
std::vector<std::vector<double>> currentInputSet; auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
std::vector<std::vector<double>> currentOutputSet;
for(int j = 0; j < n/n_miniBatch; j++){
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]);
}
inputMiniBatches.push_back(currentInputSet);
outputMiniBatches.push_back(currentOutputSet);
}
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]);
}
}
while(true){ while(true){
for(int i = 0; i < n_miniBatch; i++){ for(int i = 0; i < n_mini_batch; i++){
std::vector<std::vector<double>> y_hat = Evaluate(inputMiniBatches[i]); std::vector<std::vector<double>> y_hat = Evaluate(inputMiniBatches[i]);
cost_prev = Cost(y_hat, outputMiniBatches[i]); cost_prev = Cost(y_hat, outputMiniBatches[i]);

View File

@ -21,7 +21,7 @@ namespace MLPP {
std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> X); std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> X);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
void SGD(double learning_rate, int max_epoch, bool UI = 1); void SGD(double learning_rate, int max_epoch, bool UI = 1);
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1); void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
double score(); double score();
void save(std::string fileName); void save(std::string fileName);
private: private:

View File

@ -32,9 +32,9 @@ namespace MLPP{
} }
void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI){ void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI){
Reg regularization;
LinAlg alg;
Activation avn; Activation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -68,7 +68,6 @@ namespace MLPP{
void TanhReg::SGD(double learning_rate, int max_epoch, bool UI){ void TanhReg::SGD(double learning_rate, int max_epoch, bool UI){
LinAlg alg; LinAlg alg;
Reg regularization; Reg regularization;
Utilities util;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -103,39 +102,19 @@ namespace MLPP{
forwardPass(); forwardPass();
} }
void TanhReg::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){ void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
Reg regularization;
Activation avn; Activation avn;
LinAlg alg; LinAlg alg;
Reg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
int n_miniBatch = n/miniBatch_size;
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
std::vector<std::vector<double>> outputMiniBatches;
// Creating the mini-batches // Creating the mini-batches
for(int i = 0; i < n_miniBatch; i++){ int n_mini_batch = n/mini_batch_size;
std::vector<std::vector<double>> currentInputSet; auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
std::vector<double> currentOutputSet;
std::vector<double> currentPreActivationSet;
for(int j = 0; j < n/n_miniBatch; j++){
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]);
}
inputMiniBatches.push_back(currentInputSet);
outputMiniBatches.push_back(currentOutputSet);
}
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]);
}
}
while(true){ while(true){
for(int i = 0; i < n_miniBatch; i++){ for(int i = 0; i < n_mini_batch; i++){
std::vector<double> y_hat = Evaluate(inputMiniBatches[i]); std::vector<double> y_hat = Evaluate(inputMiniBatches[i]);
std::vector<double> z = propagate(inputMiniBatches[i]); std::vector<double> z = propagate(inputMiniBatches[i]);
cost_prev = Cost(y_hat, outputMiniBatches[i]); cost_prev = Cost(y_hat, outputMiniBatches[i]);

View File

@ -21,7 +21,7 @@ namespace MLPP {
double modelTest(std::vector<double> x); double modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
void SGD(double learning_rate, int max_epoch, bool UI = 1); void SGD(double learning_rate, int max_epoch, bool UI = 1);
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1); void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
double score(); double score();
void save(std::string fileName); void save(std::string fileName);
private: private:

View File

@ -264,6 +264,80 @@ namespace MLPP{
std::cout << Cost << std::endl; std::cout << Cost << std::endl;
} }
std::vector<std::vector<std::vector<double>>> Utilities::createMiniBatches(std::vector<std::vector<double>> inputSet, int n_mini_batch){
int n = inputSet.size();
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
// Creating the mini-batches
for(int i = 0; i < n_mini_batch; i++){
std::vector<std::vector<double>> currentInputSet;
for(int j = 0; j < n/n_mini_batch; j++){
currentInputSet.push_back(inputSet[n/n_mini_batch * i + j]);
}
inputMiniBatches.push_back(currentInputSet);
}
if(double(n)/double(n_mini_batch) - int(n/n_mini_batch) != 0){
for(int i = 0; i < n - n/n_mini_batch * n_mini_batch; i++){
inputMiniBatches[n_mini_batch - 1].push_back(inputSet[n/n_mini_batch * n_mini_batch + i]);
}
}
return inputMiniBatches;
}
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<std::vector<double>>> Utilities::createMiniBatches(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int n_mini_batch){
int n = inputSet.size();
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
std::vector<std::vector<double>> outputMiniBatches;
for(int i = 0; i < n_mini_batch; i++){
std::vector<std::vector<double>> currentInputSet;
std::vector<double> currentOutputSet;
for(int j = 0; j < n/n_mini_batch; j++){
currentInputSet.push_back(inputSet[n/n_mini_batch * i + j]);
currentOutputSet.push_back(outputSet[n/n_mini_batch * i + j]);
}
inputMiniBatches.push_back(currentInputSet);
outputMiniBatches.push_back(currentOutputSet);
}
if(double(n)/double(n_mini_batch) - int(n/n_mini_batch) != 0){
for(int i = 0; i < n - n/n_mini_batch * n_mini_batch; i++){
inputMiniBatches[n_mini_batch - 1].push_back(inputSet[n/n_mini_batch * n_mini_batch + i]);
outputMiniBatches[n_mini_batch - 1].push_back(outputSet[n/n_mini_batch * n_mini_batch + i]);
}
}
return {inputMiniBatches, outputMiniBatches};
}
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<std::vector<std::vector<double>>>> Utilities::createMiniBatches(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, int n_mini_batch){
int n = inputSet.size();
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
std::vector<std::vector<std::vector<double>>> outputMiniBatches;
for(int i = 0; i < n_mini_batch; i++){
std::vector<std::vector<double>> currentInputSet;
std::vector<std::vector<double>> currentOutputSet;
for(int j = 0; j < n/n_mini_batch; j++){
currentInputSet.push_back(inputSet[n/n_mini_batch * i + j]);
currentOutputSet.push_back(outputSet[n/n_mini_batch * i + j]);
}
inputMiniBatches.push_back(currentInputSet);
outputMiniBatches.push_back(currentOutputSet);
}
if(double(n)/double(n_mini_batch) - int(n/n_mini_batch) != 0){
for(int i = 0; i < n - n/n_mini_batch * n_mini_batch; i++){
inputMiniBatches[n_mini_batch - 1].push_back(inputSet[n/n_mini_batch * n_mini_batch + i]);
outputMiniBatches[n_mini_batch - 1].push_back(outputSet[n/n_mini_batch * n_mini_batch + i]);
}
}
return {inputMiniBatches, outputMiniBatches};
}
std::tuple<double, double, double, double> Utilities::TF_PN(std::vector<double> y_hat, std::vector<double> y){ std::tuple<double, double, double, double> Utilities::TF_PN(std::vector<double> y_hat, std::vector<double> y){
double TP, FP, TN, FN = 0; double TP, FP, TN, FN = 0;
for(int i = 0; i < y_hat.size(); i++){ for(int i = 0; i < y_hat.size(); i++){

View File

@ -36,6 +36,10 @@ namespace MLPP{
static void UI(std::vector<std::vector<double>>, std::vector<double> bias); static void UI(std::vector<std::vector<double>>, std::vector<double> bias);
static void CostInfo(int epoch, double cost_prev, double Cost); static void CostInfo(int epoch, double cost_prev, double Cost);
static std::vector<std::vector<std::vector<double>>> createMiniBatches(std::vector<std::vector<double>> inputSet, int n_mini_batch);
static std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<std::vector<double>>> createMiniBatches(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int n_mini_batch);
static std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<std::vector<std::vector<double>>>> createMiniBatches(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, int n_mini_batch);
// F1 score, Precision/Recall, TP, FP, TN, FN, etc. // F1 score, Precision/Recall, TP, FP, TN, FN, etc.
std::tuple<double, double, double, double> TF_PN(std::vector<double> y_hat, std::vector<double> y); //TF_PN = "True", "False", "Positive", "Negative" std::tuple<double, double, double, double> TF_PN(std::vector<double> y_hat, std::vector<double> y); //TF_PN = "True", "False", "Positive", "Negative"
double recall(std::vector<double> y_hat, std::vector<double> y); double recall(std::vector<double> y_hat, std::vector<double> y);

Binary file not shown.

BIN
a.out

Binary file not shown.

View File

@ -135,7 +135,7 @@ int main() {
// std::vector<std::vector<double>> inputSet = {{1,2,3,4,5,6,7,8,9,10}, {3,5,9,12,15,18,21,24,27,30}}; // std::vector<std::vector<double>> inputSet = {{1,2,3,4,5,6,7,8,9,10}, {3,5,9,12,15,18,21,24,27,30}};
// std::vector<double> outputSet = {2,4,6,8,10,12,14,16,18,20}; // std::vector<double> outputSet = {2,4,6,8,10,12,14,16,18,20};
// LinReg model(alg.transpose(inputSet), outputSet); // Can use Lasso, Ridge, ElasticNet Reg // LinReg model(alg.transpose(inputSet), outputSet); // Can use Lasso, Ridge, ElasticNet Reg
//model.normalEquation(); // model.normalEquation();
// model.gradientDescent(0.001, 30000, 1); // model.gradientDescent(0.001, 30000, 1);
// model.SGD(0.001, 30000, 1); // model.SGD(0.001, 30000, 1);
// model.MBGD(0.001, 10000, 2, 1); // model.MBGD(0.001, 10000, 2, 1);
@ -368,10 +368,10 @@ int main() {
// alg.printMatrix(alg.matrixPower({{5,5},{5,5}}, 2)); // alg.printMatrix(alg.matrixPower({{5,5},{5,5}}, 2));
// alg.printVector(alg.solve({{1,1}, {1.5, 4.0}}, {2200, 5050})); // alg.printVector(alg.solve({{1,1}, {1.5, 4.0}}, {2200, 5050}));
std::vector<std::vector<double>> matrixOfCubes = {{1,2,64,27}}; // std::vector<std::vector<double>> matrixOfCubes = {{1,2,64,27}};
std::vector<double> vectorOfCubes = {1,2,64,27}; // std::vector<double> vectorOfCubes = {1,2,64,27};
alg.printMatrix(alg.cbrt(matrixOfCubes)); // alg.printMatrix(alg.cbrt(matrixOfCubes));
alg.printVector(alg.cbrt(vectorOfCubes)); // alg.printVector(alg.cbrt(vectorOfCubes));
return 0; return 0;
} }