mirror of
https://github.com/Relintai/MLPP.git
synced 2024-11-12 10:15:01 +01:00
Added Utilities.createMiniBatches, standardized the order of objects in all regression/neural network modules, rebuilt SO
This commit is contained in:
parent
3397671049
commit
086b8bea31
@ -35,9 +35,8 @@ namespace MLPP {
|
||||
}
|
||||
|
||||
void AutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI){
|
||||
LinAlg alg;
|
||||
Activation avn;
|
||||
|
||||
LinAlg alg;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
@ -89,9 +88,8 @@ namespace MLPP {
|
||||
}
|
||||
|
||||
void AutoEncoder::SGD(double learning_rate, int max_epoch, bool UI){
|
||||
LinAlg alg;
|
||||
Activation avn;
|
||||
Utilities util;
|
||||
LinAlg alg;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
@ -138,33 +136,18 @@ namespace MLPP {
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void AutoEncoder::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){
|
||||
void AutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
|
||||
Activation avn;
|
||||
LinAlg alg;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
int n_miniBatch = n/miniBatch_size;
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
|
||||
|
||||
// Creating the mini-batches
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
std::vector<std::vector<double>> currentInputSet;
|
||||
for(int j = 0; j < n/n_miniBatch; j++){
|
||||
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
|
||||
}
|
||||
inputMiniBatches.push_back(currentInputSet);
|
||||
}
|
||||
int n_mini_batch = n/mini_batch_size;
|
||||
std::vector<std::vector<std::vector<double>>> inputMiniBatches = Utilities::createMiniBatches(inputSet, n_mini_batch);
|
||||
|
||||
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
|
||||
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
|
||||
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
}
|
||||
}
|
||||
|
||||
while(true){
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
for(int i = 0; i < n_mini_batch; i++){
|
||||
std::vector<std::vector<double>> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
auto [z2, a2] = propagate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, inputMiniBatches[i]);
|
||||
|
@ -20,7 +20,7 @@ class AutoEncoder{
|
||||
std::vector<double> modelTest(std::vector<double> x);
|
||||
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void SGD(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
|
||||
double score();
|
||||
void save(std::string fileName);
|
||||
|
||||
|
@ -32,9 +32,9 @@ namespace MLPP{
|
||||
}
|
||||
|
||||
void CLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI){
|
||||
Reg regularization;
|
||||
LinAlg alg;
|
||||
Activation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
@ -65,9 +65,9 @@ namespace MLPP{
|
||||
}
|
||||
|
||||
void CLogLogReg::MLE(double learning_rate, int max_epoch, bool UI){
|
||||
Reg regularization;
|
||||
Activation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
@ -134,39 +134,19 @@ namespace MLPP{
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void CLogLogReg::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){
|
||||
Reg regularization;
|
||||
void CLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
|
||||
Activation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
int n_miniBatch = n/miniBatch_size;
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
|
||||
std::vector<std::vector<double>> outputMiniBatches;
|
||||
// Creating the mini-batches
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
std::vector<std::vector<double>> currentInputSet;
|
||||
std::vector<double> currentOutputSet;
|
||||
std::vector<double> currentPreActivationSet;
|
||||
for(int j = 0; j < n/n_miniBatch; j++){
|
||||
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
|
||||
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]);
|
||||
}
|
||||
inputMiniBatches.push_back(currentInputSet);
|
||||
outputMiniBatches.push_back(currentOutputSet);
|
||||
}
|
||||
|
||||
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
|
||||
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
|
||||
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
}
|
||||
}
|
||||
int n_mini_batch = n/mini_batch_size;
|
||||
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
|
||||
while(true){
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
for(int i = 0; i < n_mini_batch; i++){
|
||||
std::vector<double> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
std::vector<double> z = propagate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
@ -22,7 +22,7 @@ namespace MLPP {
|
||||
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void MLE(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void SGD(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
|
||||
double score();
|
||||
private:
|
||||
|
||||
|
@ -33,8 +33,8 @@ namespace MLPP{
|
||||
}
|
||||
|
||||
void ExpReg::gradientDescent(double learning_rate, int max_epoch, bool UI){
|
||||
Reg regularization;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
@ -93,7 +93,6 @@ namespace MLPP{
|
||||
|
||||
void ExpReg::SGD(double learning_rate, int max_epoch, bool UI){
|
||||
Reg regularization;
|
||||
Utilities util;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
@ -138,38 +137,18 @@ namespace MLPP{
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void ExpReg::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){
|
||||
Reg regularization;
|
||||
void ExpReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
int n_miniBatch = n/miniBatch_size;
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
|
||||
std::vector<std::vector<double>> outputMiniBatches;
|
||||
|
||||
// Creating the mini-batches
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
std::vector<std::vector<double>> currentInputSet;
|
||||
std::vector<double> currentOutputSet;
|
||||
for(int j = 0; j < n/n_miniBatch; j++){
|
||||
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
|
||||
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]);
|
||||
}
|
||||
inputMiniBatches.push_back(currentInputSet);
|
||||
outputMiniBatches.push_back(currentOutputSet);
|
||||
}
|
||||
|
||||
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
|
||||
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
|
||||
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
}
|
||||
}
|
||||
int n_mini_batch = n/mini_batch_size;
|
||||
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
|
||||
while(true){
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
for(int i = 0; i < n_mini_batch; i++){
|
||||
std::vector<double> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
std::vector<double> error = alg.subtraction(y_hat, outputMiniBatches[i]);
|
||||
|
@ -19,7 +19,7 @@ namespace MLPP{
|
||||
double modelTest(std::vector<double> x);
|
||||
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void SGD(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
|
||||
double score();
|
||||
void save(std::string fileName);
|
||||
private:
|
||||
|
@ -35,8 +35,8 @@ namespace MLPP{
|
||||
}
|
||||
|
||||
void LinReg::gradientDescent(double learning_rate, int max_epoch, bool UI){
|
||||
Reg regularization;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
@ -67,7 +67,6 @@ namespace MLPP{
|
||||
void LinReg::SGD(double learning_rate, int max_epoch, bool UI){
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
Utilities util;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
@ -102,37 +101,18 @@ namespace MLPP{
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void LinReg::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){
|
||||
Reg regularization;
|
||||
void LinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
int n_miniBatch = n/miniBatch_size;
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
|
||||
std::vector<std::vector<double>> outputMiniBatches;
|
||||
// Creating the mini-batches
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
std::vector<std::vector<double>> currentInputSet;
|
||||
std::vector<double> currentOutputSet;
|
||||
for(int j = 0; j < n/n_miniBatch; j++){
|
||||
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
|
||||
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]);
|
||||
}
|
||||
inputMiniBatches.push_back(currentInputSet);
|
||||
outputMiniBatches.push_back(currentOutputSet);
|
||||
}
|
||||
int n_mini_batch = n/mini_batch_size;
|
||||
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
|
||||
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
|
||||
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
|
||||
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
}
|
||||
}
|
||||
|
||||
while(true){
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
for(int i = 0; i < n_mini_batch; i++){
|
||||
std::vector<double> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
|
@ -19,7 +19,7 @@ namespace MLPP{
|
||||
double modelTest(std::vector<double> x);
|
||||
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void SGD(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
|
||||
void normalEquation();
|
||||
double score();
|
||||
void save(std::string fileName);
|
||||
|
@ -32,8 +32,8 @@ namespace MLPP{
|
||||
}
|
||||
|
||||
void LogReg::gradientDescent(double learning_rate, int max_epoch, bool UI){
|
||||
Reg regularization;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
@ -63,8 +63,8 @@ namespace MLPP{
|
||||
}
|
||||
|
||||
void LogReg::MLE(double learning_rate, int max_epoch, bool UI){
|
||||
Reg regularization;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
@ -94,7 +94,6 @@ namespace MLPP{
|
||||
void LogReg::SGD(double learning_rate, int max_epoch, bool UI){
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
Utilities util;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
@ -129,38 +128,18 @@ namespace MLPP{
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void LogReg::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){
|
||||
Reg regularization;
|
||||
void LogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
int n_miniBatch = n/miniBatch_size;
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
|
||||
std::vector<std::vector<double>> outputMiniBatches;
|
||||
|
||||
// Creating the mini-batches
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
std::vector<std::vector<double>> currentInputSet;
|
||||
std::vector<double> currentOutputSet;
|
||||
for(int j = 0; j < n/n_miniBatch; j++){
|
||||
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
|
||||
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]);
|
||||
}
|
||||
inputMiniBatches.push_back(currentInputSet);
|
||||
outputMiniBatches.push_back(currentOutputSet);
|
||||
}
|
||||
|
||||
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
|
||||
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
|
||||
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
}
|
||||
}
|
||||
int n_mini_batch = n/mini_batch_size;
|
||||
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
|
||||
while(true){
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
for(int i = 0; i < n_mini_batch; i++){
|
||||
std::vector<double> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
|
@ -22,7 +22,7 @@ namespace MLPP {
|
||||
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void MLE(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void SGD(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
|
||||
double score();
|
||||
void save(std::string fileName);
|
||||
private:
|
||||
|
@ -36,10 +36,9 @@ namespace MLPP {
|
||||
}
|
||||
|
||||
void MLP::gradientDescent(double learning_rate, int max_epoch, bool UI){
|
||||
Reg regularization;
|
||||
LinAlg alg;
|
||||
Activation avn;
|
||||
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
@ -60,7 +59,7 @@ namespace MLPP {
|
||||
|
||||
bias2 -= learning_rate * alg.sum_elements(error) / n;
|
||||
|
||||
//Calculating the weight/bias for layer 1
|
||||
// Calculating the weight/bias for layer 1
|
||||
|
||||
std::vector<std::vector<double>> D1_1;
|
||||
D1_1.resize(n);
|
||||
@ -96,13 +95,12 @@ namespace MLPP {
|
||||
}
|
||||
|
||||
void MLP::SGD(double learning_rate, int max_epoch, bool UI){
|
||||
Reg regularization;
|
||||
LinAlg alg;
|
||||
Activation avn;
|
||||
Utilities util;
|
||||
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
while(true){
|
||||
std::random_device rd;
|
||||
std::default_random_engine generator(rd());
|
||||
@ -148,39 +146,19 @@ namespace MLPP {
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void MLP::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){
|
||||
Reg regularization;
|
||||
void MLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
|
||||
Activation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
int n_miniBatch = n/miniBatch_size;
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
|
||||
std::vector<std::vector<double>> outputMiniBatches;
|
||||
|
||||
// Creating the mini-batches
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
std::vector<std::vector<double>> currentInputSet;
|
||||
std::vector<double> currentOutputSet;
|
||||
for(int j = 0; j < n/n_miniBatch; j++){
|
||||
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
|
||||
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]);
|
||||
}
|
||||
inputMiniBatches.push_back(currentInputSet);
|
||||
outputMiniBatches.push_back(currentOutputSet);
|
||||
}
|
||||
|
||||
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
|
||||
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
|
||||
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
}
|
||||
}
|
||||
int n_mini_batch = n/mini_batch_size;
|
||||
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
|
||||
while(true){
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
for(int i = 0; i < n_mini_batch; i++){
|
||||
std::vector<double> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
auto [z2, a2] = propagate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
@ -20,7 +20,7 @@ class MLP{
|
||||
double modelTest(std::vector<double> x);
|
||||
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void SGD(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
|
||||
double score();
|
||||
void save(std::string fileName);
|
||||
|
||||
|
@ -32,9 +32,9 @@ namespace MLPP{
|
||||
}
|
||||
|
||||
void ProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI){
|
||||
Reg regularization;
|
||||
Activation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
@ -64,8 +64,8 @@ namespace MLPP{
|
||||
|
||||
void ProbitReg::MLE(double learning_rate, int max_epoch, bool UI){
|
||||
Activation avn;
|
||||
Reg regularization;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
@ -95,10 +95,9 @@ namespace MLPP{
|
||||
|
||||
void ProbitReg::SGD(double learning_rate, int max_epoch, bool UI){
|
||||
// NOTE: ∂y_hat/∂z is sparse
|
||||
LinAlg alg;
|
||||
Activation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
Utilities util;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
@ -134,39 +133,38 @@ namespace MLPP{
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void ProbitReg::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){
|
||||
Reg regularization;
|
||||
void ProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
|
||||
Activation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
int n_miniBatch = n/miniBatch_size;
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
|
||||
std::vector<std::vector<double>> outputMiniBatches;
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n/mini_batch_size;
|
||||
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
|
||||
// Creating the mini-batches
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
for(int i = 0; i < n_mini_batch; i++){
|
||||
std::vector<std::vector<double>> currentInputSet;
|
||||
std::vector<double> currentOutputSet;
|
||||
for(int j = 0; j < n/n_miniBatch; j++){
|
||||
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
|
||||
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]);
|
||||
for(int j = 0; j < n/n_mini_batch; j++){
|
||||
currentInputSet.push_back(inputSet[n/n_mini_batch * i + j]);
|
||||
currentOutputSet.push_back(outputSet[n/n_mini_batch * i + j]);
|
||||
}
|
||||
inputMiniBatches.push_back(currentInputSet);
|
||||
outputMiniBatches.push_back(currentOutputSet);
|
||||
}
|
||||
|
||||
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
|
||||
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
|
||||
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
if(double(n)/double(n_mini_batch) - int(n/n_mini_batch) != 0){
|
||||
for(int i = 0; i < n - n/n_mini_batch * n_mini_batch; i++){
|
||||
inputMiniBatches[n_mini_batch - 1].push_back(inputSet[n/n_mini_batch * n_mini_batch + i]);
|
||||
outputMiniBatches[n_mini_batch - 1].push_back(outputSet[n/n_mini_batch * n_mini_batch + i]);
|
||||
}
|
||||
}
|
||||
|
||||
while(true){
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
for(int i = 0; i < n_mini_batch; i++){
|
||||
std::vector<double> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
std::vector<double> z = propagate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
@ -22,7 +22,7 @@ namespace MLPP {
|
||||
void gradientDescent(double learning_rate, int max_epoch = 0, bool UI = 1);
|
||||
void MLE(double learning_rate, int max_epoch = 0, bool UI = 1);
|
||||
void SGD(double learning_rate, int max_epoch = 0, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
|
||||
double score();
|
||||
void save(std::string fileName);
|
||||
private:
|
||||
|
@ -36,11 +36,9 @@ namespace MLPP{
|
||||
}
|
||||
|
||||
void SoftmaxNet::gradientDescent(double learning_rate, int max_epoch, bool UI){
|
||||
Reg regularization;
|
||||
LinAlg alg;
|
||||
Activation avn;
|
||||
|
||||
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
@ -94,13 +92,12 @@ namespace MLPP{
|
||||
}
|
||||
|
||||
void SoftmaxNet::SGD(double learning_rate, int max_epoch, bool UI){
|
||||
Reg regularization;
|
||||
LinAlg alg;
|
||||
Activation avn;
|
||||
Utilities util;
|
||||
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
while(true){
|
||||
std::random_device rd;
|
||||
std::default_random_engine generator(rd());
|
||||
@ -146,39 +143,38 @@ namespace MLPP{
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void SoftmaxNet::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){
|
||||
Reg regularization;
|
||||
void SoftmaxNet::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
|
||||
Activation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
int n_miniBatch = n/miniBatch_size;
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
|
||||
std::vector<std::vector<std::vector<double>>> outputMiniBatches;
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n/mini_batch_size;
|
||||
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
|
||||
//Creating the mini-batches
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
// Creating the mini-batches
|
||||
for(int i = 0; i < n_mini_batch; i++){
|
||||
std::vector<std::vector<double>> currentInputSet;
|
||||
std::vector<std::vector<double>> currentOutputSet;
|
||||
for(int j = 0; j < n/n_miniBatch; j++){
|
||||
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
|
||||
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]);
|
||||
for(int j = 0; j < n/n_mini_batch; j++){
|
||||
currentInputSet.push_back(inputSet[n/n_mini_batch * i + j]);
|
||||
currentOutputSet.push_back(outputSet[n/n_mini_batch * i + j]);
|
||||
}
|
||||
inputMiniBatches.push_back(currentInputSet);
|
||||
outputMiniBatches.push_back(currentOutputSet);
|
||||
}
|
||||
|
||||
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
|
||||
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
|
||||
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
if(double(n)/double(n_mini_batch) - int(n/n_mini_batch) != 0){
|
||||
for(int i = 0; i < n - n/n_mini_batch * n_mini_batch; i++){
|
||||
inputMiniBatches[n_mini_batch - 1].push_back(inputSet[n/n_mini_batch * n_mini_batch + i]);
|
||||
outputMiniBatches[n_mini_batch - 1].push_back(outputSet[n/n_mini_batch * n_mini_batch + i]);
|
||||
}
|
||||
}
|
||||
|
||||
while(true){
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
for(int i = 0; i < n_mini_batch; i++){
|
||||
std::vector<std::vector<double>> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
auto [z2, a2] = propagate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
@ -21,7 +21,7 @@ namespace MLPP {
|
||||
std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> X);
|
||||
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void SGD(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
|
||||
double score();
|
||||
void save(std::string fileName);
|
||||
|
||||
|
@ -33,8 +33,8 @@ namespace MLPP{
|
||||
}
|
||||
|
||||
void SoftmaxReg::gradientDescent(double learning_rate, int max_epoch, bool UI){
|
||||
Reg regularization;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
@ -72,9 +72,8 @@ namespace MLPP{
|
||||
}
|
||||
|
||||
void SoftmaxReg::SGD(double learning_rate, int max_epoch, bool UI){
|
||||
Reg regularization;
|
||||
LinAlg alg;
|
||||
Utilities util;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
@ -114,38 +113,18 @@ namespace MLPP{
|
||||
|
||||
}
|
||||
|
||||
void SoftmaxReg::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){
|
||||
Reg regularization;
|
||||
void SoftmaxReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
int n_miniBatch = n/miniBatch_size;
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
|
||||
std::vector<std::vector<std::vector<double>>> outputMiniBatches;
|
||||
|
||||
// Creating the mini-batches
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
std::vector<std::vector<double>> currentInputSet;
|
||||
std::vector<std::vector<double>> currentOutputSet;
|
||||
for(int j = 0; j < n/n_miniBatch; j++){
|
||||
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
|
||||
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]);
|
||||
}
|
||||
inputMiniBatches.push_back(currentInputSet);
|
||||
outputMiniBatches.push_back(currentOutputSet);
|
||||
}
|
||||
|
||||
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
|
||||
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
|
||||
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
}
|
||||
}
|
||||
int n_mini_batch = n/mini_batch_size;
|
||||
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
|
||||
while(true){
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
for(int i = 0; i < n_mini_batch; i++){
|
||||
std::vector<std::vector<double>> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
||||
|
@ -21,7 +21,7 @@ namespace MLPP {
|
||||
std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> X);
|
||||
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void SGD(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
|
||||
double score();
|
||||
void save(std::string fileName);
|
||||
private:
|
||||
|
@ -32,9 +32,9 @@ namespace MLPP{
|
||||
}
|
||||
|
||||
void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI){
|
||||
Reg regularization;
|
||||
LinAlg alg;
|
||||
Activation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
@ -68,7 +68,6 @@ namespace MLPP{
|
||||
void TanhReg::SGD(double learning_rate, int max_epoch, bool UI){
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
Utilities util;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
@ -103,39 +102,19 @@ namespace MLPP{
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void TanhReg::MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI){
|
||||
Reg regularization;
|
||||
void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
|
||||
Activation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
int n_miniBatch = n/miniBatch_size;
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
|
||||
std::vector<std::vector<double>> outputMiniBatches;
|
||||
// Creating the mini-batches
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
std::vector<std::vector<double>> currentInputSet;
|
||||
std::vector<double> currentOutputSet;
|
||||
std::vector<double> currentPreActivationSet;
|
||||
for(int j = 0; j < n/n_miniBatch; j++){
|
||||
currentInputSet.push_back(inputSet[n/n_miniBatch * i + j]);
|
||||
currentOutputSet.push_back(outputSet[n/n_miniBatch * i + j]);
|
||||
}
|
||||
inputMiniBatches.push_back(currentInputSet);
|
||||
outputMiniBatches.push_back(currentOutputSet);
|
||||
}
|
||||
int n_mini_batch = n/mini_batch_size;
|
||||
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
|
||||
if(double(n)/double(n_miniBatch) - int(n/n_miniBatch) != 0){
|
||||
for(int i = 0; i < n - n/n_miniBatch * n_miniBatch; i++){
|
||||
inputMiniBatches[n_miniBatch - 1].push_back(inputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
outputMiniBatches[n_miniBatch - 1].push_back(outputSet[n/n_miniBatch * n_miniBatch + i]);
|
||||
}
|
||||
}
|
||||
|
||||
while(true){
|
||||
for(int i = 0; i < n_miniBatch; i++){
|
||||
for(int i = 0; i < n_mini_batch; i++){
|
||||
std::vector<double> y_hat = Evaluate(inputMiniBatches[i]);
|
||||
std::vector<double> z = propagate(inputMiniBatches[i]);
|
||||
cost_prev = Cost(y_hat, outputMiniBatches[i]);
|
||||
|
@ -21,7 +21,7 @@ namespace MLPP {
|
||||
double modelTest(std::vector<double> x);
|
||||
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void SGD(double learning_rate, int max_epoch, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int miniBatch_size, bool UI = 1);
|
||||
void MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
|
||||
double score();
|
||||
void save(std::string fileName);
|
||||
private:
|
||||
|
@ -264,6 +264,80 @@ namespace MLPP{
|
||||
std::cout << Cost << std::endl;
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> Utilities::createMiniBatches(std::vector<std::vector<double>> inputSet, int n_mini_batch){
|
||||
int n = inputSet.size();
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
|
||||
|
||||
// Creating the mini-batches
|
||||
for(int i = 0; i < n_mini_batch; i++){
|
||||
std::vector<std::vector<double>> currentInputSet;
|
||||
for(int j = 0; j < n/n_mini_batch; j++){
|
||||
currentInputSet.push_back(inputSet[n/n_mini_batch * i + j]);
|
||||
}
|
||||
inputMiniBatches.push_back(currentInputSet);
|
||||
}
|
||||
|
||||
if(double(n)/double(n_mini_batch) - int(n/n_mini_batch) != 0){
|
||||
for(int i = 0; i < n - n/n_mini_batch * n_mini_batch; i++){
|
||||
inputMiniBatches[n_mini_batch - 1].push_back(inputSet[n/n_mini_batch * n_mini_batch + i]);
|
||||
}
|
||||
}
|
||||
return inputMiniBatches;
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<std::vector<double>>> Utilities::createMiniBatches(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int n_mini_batch){
|
||||
int n = inputSet.size();
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
|
||||
std::vector<std::vector<double>> outputMiniBatches;
|
||||
|
||||
for(int i = 0; i < n_mini_batch; i++){
|
||||
std::vector<std::vector<double>> currentInputSet;
|
||||
std::vector<double> currentOutputSet;
|
||||
for(int j = 0; j < n/n_mini_batch; j++){
|
||||
currentInputSet.push_back(inputSet[n/n_mini_batch * i + j]);
|
||||
currentOutputSet.push_back(outputSet[n/n_mini_batch * i + j]);
|
||||
}
|
||||
inputMiniBatches.push_back(currentInputSet);
|
||||
outputMiniBatches.push_back(currentOutputSet);
|
||||
}
|
||||
|
||||
if(double(n)/double(n_mini_batch) - int(n/n_mini_batch) != 0){
|
||||
for(int i = 0; i < n - n/n_mini_batch * n_mini_batch; i++){
|
||||
inputMiniBatches[n_mini_batch - 1].push_back(inputSet[n/n_mini_batch * n_mini_batch + i]);
|
||||
outputMiniBatches[n_mini_batch - 1].push_back(outputSet[n/n_mini_batch * n_mini_batch + i]);
|
||||
}
|
||||
}
|
||||
return {inputMiniBatches, outputMiniBatches};
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<std::vector<std::vector<double>>>> Utilities::createMiniBatches(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, int n_mini_batch){
|
||||
int n = inputSet.size();
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
|
||||
std::vector<std::vector<std::vector<double>>> outputMiniBatches;
|
||||
|
||||
for(int i = 0; i < n_mini_batch; i++){
|
||||
std::vector<std::vector<double>> currentInputSet;
|
||||
std::vector<std::vector<double>> currentOutputSet;
|
||||
for(int j = 0; j < n/n_mini_batch; j++){
|
||||
currentInputSet.push_back(inputSet[n/n_mini_batch * i + j]);
|
||||
currentOutputSet.push_back(outputSet[n/n_mini_batch * i + j]);
|
||||
}
|
||||
inputMiniBatches.push_back(currentInputSet);
|
||||
outputMiniBatches.push_back(currentOutputSet);
|
||||
}
|
||||
|
||||
if(double(n)/double(n_mini_batch) - int(n/n_mini_batch) != 0){
|
||||
for(int i = 0; i < n - n/n_mini_batch * n_mini_batch; i++){
|
||||
inputMiniBatches[n_mini_batch - 1].push_back(inputSet[n/n_mini_batch * n_mini_batch + i]);
|
||||
outputMiniBatches[n_mini_batch - 1].push_back(outputSet[n/n_mini_batch * n_mini_batch + i]);
|
||||
}
|
||||
}
|
||||
return {inputMiniBatches, outputMiniBatches};
|
||||
}
|
||||
|
||||
std::tuple<double, double, double, double> Utilities::TF_PN(std::vector<double> y_hat, std::vector<double> y){
|
||||
double TP, FP, TN, FN = 0;
|
||||
for(int i = 0; i < y_hat.size(); i++){
|
||||
|
@ -36,6 +36,10 @@ namespace MLPP{
|
||||
static void UI(std::vector<std::vector<double>>, std::vector<double> bias);
|
||||
static void CostInfo(int epoch, double cost_prev, double Cost);
|
||||
|
||||
static std::vector<std::vector<std::vector<double>>> createMiniBatches(std::vector<std::vector<double>> inputSet, int n_mini_batch);
|
||||
static std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<std::vector<double>>> createMiniBatches(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int n_mini_batch);
|
||||
static std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<std::vector<std::vector<double>>>> createMiniBatches(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, int n_mini_batch);
|
||||
|
||||
// F1 score, Precision/Recall, TP, FP, TN, FN, etc.
|
||||
std::tuple<double, double, double, double> TF_PN(std::vector<double> y_hat, std::vector<double> y); //TF_PN = "True", "False", "Positive", "Negative"
|
||||
double recall(std::vector<double> y_hat, std::vector<double> y);
|
||||
|
Binary file not shown.
10
main.cpp
10
main.cpp
@ -135,7 +135,7 @@ int main() {
|
||||
// std::vector<std::vector<double>> inputSet = {{1,2,3,4,5,6,7,8,9,10}, {3,5,9,12,15,18,21,24,27,30}};
|
||||
// std::vector<double> outputSet = {2,4,6,8,10,12,14,16,18,20};
|
||||
// LinReg model(alg.transpose(inputSet), outputSet); // Can use Lasso, Ridge, ElasticNet Reg
|
||||
//model.normalEquation();
|
||||
// model.normalEquation();
|
||||
// model.gradientDescent(0.001, 30000, 1);
|
||||
// model.SGD(0.001, 30000, 1);
|
||||
// model.MBGD(0.001, 10000, 2, 1);
|
||||
@ -368,10 +368,10 @@ int main() {
|
||||
// alg.printMatrix(alg.matrixPower({{5,5},{5,5}}, 2));
|
||||
// alg.printVector(alg.solve({{1,1}, {1.5, 4.0}}, {2200, 5050}));
|
||||
|
||||
std::vector<std::vector<double>> matrixOfCubes = {{1,2,64,27}};
|
||||
std::vector<double> vectorOfCubes = {1,2,64,27};
|
||||
alg.printMatrix(alg.cbrt(matrixOfCubes));
|
||||
alg.printVector(alg.cbrt(vectorOfCubes));
|
||||
// std::vector<std::vector<double>> matrixOfCubes = {{1,2,64,27}};
|
||||
// std::vector<double> vectorOfCubes = {1,2,64,27};
|
||||
// alg.printMatrix(alg.cbrt(matrixOfCubes));
|
||||
// alg.printVector(alg.cbrt(vectorOfCubes));
|
||||
|
||||
return 0;
|
||||
}
|
Loading…
Reference in New Issue
Block a user