mirror of
https://github.com/Relintai/pmlpp.git
synced 2024-12-22 15:06:47 +01:00
Prefix Cost with MLPP.
This commit is contained in:
parent
1381b5f70e
commit
0e9d8bcb41
4
main.cpp
4
main.cpp
@ -123,9 +123,9 @@ int main() {
|
||||
Stat stat;
|
||||
LinAlg alg;
|
||||
MLPPActivation avn;
|
||||
Cost cost;
|
||||
MLPPCost cost;
|
||||
Data data;
|
||||
Convolutions conv;
|
||||
MLPPConvolutions conv;
|
||||
|
||||
// DATA SETS
|
||||
// std::vector<std::vector<double>> inputSet = {{1,2,3,4,5,6,7,8,9,10}, {3,5,9,12,15,18,21,24,27,30}};
|
||||
|
@ -54,7 +54,7 @@ double MLPPANN::modelTest(std::vector<double> x) {
|
||||
}
|
||||
|
||||
void MLPPANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
LinAlg alg;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -88,7 +88,7 @@ void MLPPANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
|
||||
void MLPPANN::SGD(double learning_rate, int max_epoch, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
LinAlg alg;
|
||||
|
||||
double cost_prev = 0;
|
||||
@ -126,7 +126,7 @@ void MLPPANN::SGD(double learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
|
||||
void MLPPANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
LinAlg alg;
|
||||
|
||||
double cost_prev = 0;
|
||||
@ -164,7 +164,7 @@ void MLPPANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo
|
||||
}
|
||||
|
||||
void MLPPANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, double gamma, bool NAG, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
LinAlg alg;
|
||||
|
||||
double cost_prev = 0;
|
||||
@ -221,7 +221,7 @@ void MLPPANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size,
|
||||
}
|
||||
|
||||
void MLPPANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, double e, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
LinAlg alg;
|
||||
|
||||
double cost_prev = 0;
|
||||
@ -277,7 +277,7 @@ void MLPPANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size,
|
||||
}
|
||||
|
||||
void MLPPANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, double b1, double e, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
LinAlg alg;
|
||||
|
||||
double cost_prev = 0;
|
||||
@ -333,7 +333,7 @@ void MLPPANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size,
|
||||
}
|
||||
|
||||
void MLPPANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
LinAlg alg;
|
||||
|
||||
double cost_prev = 0;
|
||||
@ -400,7 +400,7 @@ void MLPPANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, dou
|
||||
}
|
||||
|
||||
void MLPPANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
LinAlg alg;
|
||||
|
||||
double cost_prev = 0;
|
||||
@ -465,7 +465,7 @@ void MLPPANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, d
|
||||
}
|
||||
|
||||
void MLPPANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
LinAlg alg;
|
||||
|
||||
double cost_prev = 0;
|
||||
@ -535,7 +535,7 @@ void MLPPANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, do
|
||||
}
|
||||
|
||||
void MLPPANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
LinAlg alg;
|
||||
|
||||
double cost_prev = 0;
|
||||
@ -671,7 +671,7 @@ void MLPPANN::addOutputLayer(std::string activation, std::string loss, std::stri
|
||||
|
||||
double MLPPANN::Cost(std::vector<double> y_hat, std::vector<double> y) {
|
||||
Reg regularization;
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
double totalRegTerm = 0;
|
||||
|
||||
auto cost_function = outputLayer->cost_map[outputLayer->cost];
|
||||
@ -719,7 +719,7 @@ void MLPPANN::updateParameters(std::vector<std::vector<std::vector<double>>> hid
|
||||
|
||||
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> MLPPANN::computeGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
|
||||
// std::cout << "BEGIN" << std::endl;
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
|
@ -208,7 +208,7 @@ void MLPPAutoEncoder::save(std::string fileName) {
|
||||
}
|
||||
|
||||
double MLPPAutoEncoder::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
return cost.MSE(y_hat, inputSet);
|
||||
}
|
||||
|
||||
|
@ -186,7 +186,7 @@ double MLPPCLogLogReg::score() {
|
||||
|
||||
double MLPPCLogLogReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
|
||||
Reg regularization;
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
|
@ -10,11 +10,11 @@
|
||||
#include <cmath>
|
||||
#include <iostream>
|
||||
|
||||
Convolutions::Convolutions() :
|
||||
MLPPConvolutions::MLPPConvolutions() :
|
||||
prewittHorizontal({ { 1, 1, 1 }, { 0, 0, 0 }, { -1, -1, -1 } }), prewittVertical({ { 1, 0, -1 }, { 1, 0, -1 }, { 1, 0, -1 } }), sobelHorizontal({ { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }), sobelVertical({ { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }), scharrHorizontal({ { 3, 10, 3 }, { 0, 0, 0 }, { -3, -10, -3 } }), scharrVertical({ { 3, 0, -3 }, { 10, 0, -10 }, { 3, 0, -3 } }), robertsHorizontal({ { 0, 1 }, { -1, 0 } }), robertsVertical({ { 1, 0 }, { 0, -1 } }) {
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::convolve(std::vector<std::vector<double>> input, std::vector<std::vector<double>> filter, int S, int P) {
|
||||
std::vector<std::vector<double>> MLPPConvolutions::convolve(std::vector<std::vector<double>> input, std::vector<std::vector<double>> filter, int S, int P) {
|
||||
LinAlg alg;
|
||||
std::vector<std::vector<double>> featureMap;
|
||||
int N = input.size();
|
||||
@ -70,7 +70,7 @@ std::vector<std::vector<double>> Convolutions::convolve(std::vector<std::vector<
|
||||
return featureMap;
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> Convolutions::convolve(std::vector<std::vector<std::vector<double>>> input, std::vector<std::vector<std::vector<double>>> filter, int S, int P) {
|
||||
std::vector<std::vector<std::vector<double>>> MLPPConvolutions::convolve(std::vector<std::vector<std::vector<double>>> input, std::vector<std::vector<std::vector<double>>> filter, int S, int P) {
|
||||
LinAlg alg;
|
||||
std::vector<std::vector<std::vector<double>>> featureMap;
|
||||
int N = input[0].size();
|
||||
@ -136,7 +136,7 @@ std::vector<std::vector<std::vector<double>>> Convolutions::convolve(std::vector
|
||||
return featureMap;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::pool(std::vector<std::vector<double>> input, int F, int S, std::string type) {
|
||||
std::vector<std::vector<double>> MLPPConvolutions::pool(std::vector<std::vector<double>> input, int F, int S, std::string type) {
|
||||
LinAlg alg;
|
||||
std::vector<std::vector<double>> pooledMap;
|
||||
int N = input.size();
|
||||
@ -176,7 +176,7 @@ std::vector<std::vector<double>> Convolutions::pool(std::vector<std::vector<doub
|
||||
return pooledMap;
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> Convolutions::pool(std::vector<std::vector<std::vector<double>>> input, int F, int S, std::string type) {
|
||||
std::vector<std::vector<std::vector<double>>> MLPPConvolutions::pool(std::vector<std::vector<std::vector<double>>> input, int F, int S, std::string type) {
|
||||
std::vector<std::vector<std::vector<double>>> pooledMap;
|
||||
for (int i = 0; i < input.size(); i++) {
|
||||
pooledMap.push_back(pool(input[i], F, S, type));
|
||||
@ -184,7 +184,7 @@ std::vector<std::vector<std::vector<double>>> Convolutions::pool(std::vector<std
|
||||
return pooledMap;
|
||||
}
|
||||
|
||||
double Convolutions::globalPool(std::vector<std::vector<double>> input, std::string type) {
|
||||
double MLPPConvolutions::globalPool(std::vector<std::vector<double>> input, std::string type) {
|
||||
LinAlg alg;
|
||||
if (type == "Average") {
|
||||
Stat stat;
|
||||
@ -196,7 +196,7 @@ double Convolutions::globalPool(std::vector<std::vector<double>> input, std::str
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<double> Convolutions::globalPool(std::vector<std::vector<std::vector<double>>> input, std::string type) {
|
||||
std::vector<double> MLPPConvolutions::globalPool(std::vector<std::vector<std::vector<double>>> input, std::string type) {
|
||||
std::vector<double> pooledMap;
|
||||
for (int i = 0; i < input.size(); i++) {
|
||||
pooledMap.push_back(globalPool(input[i], type));
|
||||
@ -204,12 +204,12 @@ std::vector<double> Convolutions::globalPool(std::vector<std::vector<std::vector
|
||||
return pooledMap;
|
||||
}
|
||||
|
||||
double Convolutions::gaussian2D(double x, double y, double std) {
|
||||
double MLPPConvolutions::gaussian2D(double x, double y, double std) {
|
||||
double std_sq = std * std;
|
||||
return 1 / (2 * M_PI * std_sq) * std::exp(-(x * x + y * y) / 2 * std_sq);
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::gaussianFilter2D(int size, double std) {
|
||||
std::vector<std::vector<double>> MLPPConvolutions::gaussianFilter2D(int size, double std) {
|
||||
std::vector<std::vector<double>> filter;
|
||||
filter.resize(size);
|
||||
for (int i = 0; i < filter.size(); i++) {
|
||||
@ -229,7 +229,7 @@ been easier to carry out the calculation explicitly, mainly because it is more i
|
||||
and also because my convolution algorithm is only built for filters with equally sized
|
||||
heights and widths.
|
||||
*/
|
||||
std::vector<std::vector<double>> Convolutions::dx(std::vector<std::vector<double>> input) {
|
||||
std::vector<std::vector<double>> MLPPConvolutions::dx(std::vector<std::vector<double>> input) {
|
||||
std::vector<std::vector<double>> deriv; // We assume a gray scale image.
|
||||
deriv.resize(input.size());
|
||||
for (int i = 0; i < deriv.size(); i++) {
|
||||
@ -250,7 +250,7 @@ std::vector<std::vector<double>> Convolutions::dx(std::vector<std::vector<double
|
||||
return deriv;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::dy(std::vector<std::vector<double>> input) {
|
||||
std::vector<std::vector<double>> MLPPConvolutions::dy(std::vector<std::vector<double>> input) {
|
||||
std::vector<std::vector<double>> deriv;
|
||||
deriv.resize(input.size());
|
||||
for (int i = 0; i < deriv.size(); i++) {
|
||||
@ -271,14 +271,14 @@ std::vector<std::vector<double>> Convolutions::dy(std::vector<std::vector<double
|
||||
return deriv;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::gradMagnitude(std::vector<std::vector<double>> input) {
|
||||
std::vector<std::vector<double>> MLPPConvolutions::gradMagnitude(std::vector<std::vector<double>> input) {
|
||||
LinAlg alg;
|
||||
std::vector<std::vector<double>> xDeriv_2 = alg.hadamard_product(dx(input), dx(input));
|
||||
std::vector<std::vector<double>> yDeriv_2 = alg.hadamard_product(dy(input), dy(input));
|
||||
return alg.sqrt(alg.addition(xDeriv_2, yDeriv_2));
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::gradOrientation(std::vector<std::vector<double>> input) {
|
||||
std::vector<std::vector<double>> MLPPConvolutions::gradOrientation(std::vector<std::vector<double>> input) {
|
||||
std::vector<std::vector<double>> deriv;
|
||||
deriv.resize(input.size());
|
||||
for (int i = 0; i < deriv.size(); i++) {
|
||||
@ -295,7 +295,7 @@ std::vector<std::vector<double>> Convolutions::gradOrientation(std::vector<std::
|
||||
return deriv;
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> Convolutions::computeM(std::vector<std::vector<double>> input) {
|
||||
std::vector<std::vector<std::vector<double>>> MLPPConvolutions::computeM(std::vector<std::vector<double>> input) {
|
||||
double const SIGMA = 1;
|
||||
double const GAUSSIAN_SIZE = 3;
|
||||
|
||||
@ -313,7 +313,7 @@ std::vector<std::vector<std::vector<double>>> Convolutions::computeM(std::vector
|
||||
std::vector<std::vector<std::vector<double>>> M = { xxDeriv, yyDeriv, xyDeriv };
|
||||
return M;
|
||||
}
|
||||
std::vector<std::vector<std::string>> Convolutions::harrisCornerDetection(std::vector<std::vector<double>> input) {
|
||||
std::vector<std::vector<std::string>> MLPPConvolutions::harrisCornerDetection(std::vector<std::vector<double>> input) {
|
||||
double const k = 0.05; // Empirically determined wherein k -> [0.04, 0.06], though conventionally 0.05 is typically used as well.
|
||||
LinAlg alg;
|
||||
std::vector<std::vector<std::vector<double>>> M = computeM(input);
|
||||
@ -340,34 +340,34 @@ std::vector<std::vector<std::string>> Convolutions::harrisCornerDetection(std::v
|
||||
return imageTypes;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::getPrewittHorizontal() {
|
||||
std::vector<std::vector<double>> MLPPConvolutions::getPrewittHorizontal() {
|
||||
return prewittHorizontal;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::getPrewittVertical() {
|
||||
std::vector<std::vector<double>> MLPPConvolutions::getPrewittVertical() {
|
||||
return prewittVertical;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::getSobelHorizontal() {
|
||||
std::vector<std::vector<double>> MLPPConvolutions::getSobelHorizontal() {
|
||||
return sobelHorizontal;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::getSobelVertical() {
|
||||
std::vector<std::vector<double>> MLPPConvolutions::getSobelVertical() {
|
||||
return sobelVertical;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::getScharrHorizontal() {
|
||||
std::vector<std::vector<double>> MLPPConvolutions::getScharrHorizontal() {
|
||||
return scharrHorizontal;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::getScharrVertical() {
|
||||
std::vector<std::vector<double>> MLPPConvolutions::getScharrVertical() {
|
||||
return scharrVertical;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::getRobertsHorizontal() {
|
||||
std::vector<std::vector<double>> MLPPConvolutions::getRobertsHorizontal() {
|
||||
return robertsHorizontal;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::getRobertsVertical() {
|
||||
std::vector<std::vector<double>> MLPPConvolutions::getRobertsVertical() {
|
||||
return robertsVertical;
|
||||
}
|
||||
|
@ -5,9 +5,9 @@
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
class Convolutions {
|
||||
class MLPPConvolutions {
|
||||
public:
|
||||
Convolutions();
|
||||
MLPPConvolutions();
|
||||
std::vector<std::vector<double>> convolve(std::vector<std::vector<double>> input, std::vector<std::vector<double>> filter, int S, int P = 0);
|
||||
std::vector<std::vector<std::vector<double>>> convolve(std::vector<std::vector<std::vector<double>>> input, std::vector<std::vector<std::vector<double>>> filter, int S, int P = 0);
|
||||
std::vector<std::vector<double>> pool(std::vector<std::vector<double>> input, int F, int S, std::string type);
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <iostream>
|
||||
|
||||
|
||||
double Cost::MSE(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double MLPPCost::MSE(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double sum = 0;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
sum += (y_hat[i] - y[i]) * (y_hat[i] - y[i]);
|
||||
@ -19,7 +19,7 @@ double Cost::MSE(std::vector<double> y_hat, std::vector<double> y) {
|
||||
return sum / 2 * y_hat.size();
|
||||
}
|
||||
|
||||
double Cost::MSE(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
double MLPPCost::MSE(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
double sum = 0;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
for (int j = 0; j < y_hat[i].size(); j++) {
|
||||
@ -29,17 +29,17 @@ double Cost::MSE(std::vector<std::vector<double>> y_hat, std::vector<std::vector
|
||||
return sum / 2 * y_hat.size();
|
||||
}
|
||||
|
||||
std::vector<double> Cost::MSEDeriv(std::vector<double> y_hat, std::vector<double> y) {
|
||||
std::vector<double> MLPPCost::MSEDeriv(std::vector<double> y_hat, std::vector<double> y) {
|
||||
LinAlg alg;
|
||||
return alg.subtraction(y_hat, y);
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Cost::MSEDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
std::vector<std::vector<double>> MLPPCost::MSEDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
LinAlg alg;
|
||||
return alg.subtraction(y_hat, y);
|
||||
}
|
||||
|
||||
double Cost::RMSE(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double MLPPCost::RMSE(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double sum = 0;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
sum += (y_hat[i] - y[i]) * (y_hat[i] - y[i]);
|
||||
@ -47,7 +47,7 @@ double Cost::RMSE(std::vector<double> y_hat, std::vector<double> y) {
|
||||
return sqrt(sum / y_hat.size());
|
||||
}
|
||||
|
||||
double Cost::RMSE(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
double MLPPCost::RMSE(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
double sum = 0;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
for (int j = 0; j < y_hat[i].size(); j++) {
|
||||
@ -57,17 +57,17 @@ double Cost::RMSE(std::vector<std::vector<double>> y_hat, std::vector<std::vecto
|
||||
return sqrt(sum / y_hat.size());
|
||||
}
|
||||
|
||||
std::vector<double> Cost::RMSEDeriv(std::vector<double> y_hat, std::vector<double> y) {
|
||||
std::vector<double> MLPPCost::RMSEDeriv(std::vector<double> y_hat, std::vector<double> y) {
|
||||
LinAlg alg;
|
||||
return alg.scalarMultiply(1 / (2 * sqrt(MSE(y_hat, y))), MSEDeriv(y_hat, y));
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Cost::RMSEDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
std::vector<std::vector<double>> MLPPCost::RMSEDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
LinAlg alg;
|
||||
return alg.scalarMultiply(1 / (2 / sqrt(MSE(y_hat, y))), MSEDeriv(y_hat, y));
|
||||
}
|
||||
|
||||
double Cost::MAE(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double MLPPCost::MAE(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double sum = 0;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
sum += abs((y_hat[i] - y[i]));
|
||||
@ -75,7 +75,7 @@ double Cost::MAE(std::vector<double> y_hat, std::vector<double> y) {
|
||||
return sum / y_hat.size();
|
||||
}
|
||||
|
||||
double Cost::MAE(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
double MLPPCost::MAE(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
double sum = 0;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
for (int j = 0; j < y_hat[i].size(); j++) {
|
||||
@ -85,7 +85,7 @@ double Cost::MAE(std::vector<std::vector<double>> y_hat, std::vector<std::vector
|
||||
return sum / y_hat.size();
|
||||
}
|
||||
|
||||
std::vector<double> Cost::MAEDeriv(std::vector<double> y_hat, std::vector<double> y) {
|
||||
std::vector<double> MLPPCost::MAEDeriv(std::vector<double> y_hat, std::vector<double> y) {
|
||||
std::vector<double> deriv;
|
||||
deriv.resize(y_hat.size());
|
||||
for (int i = 0; i < deriv.size(); i++) {
|
||||
@ -100,7 +100,7 @@ std::vector<double> Cost::MAEDeriv(std::vector<double> y_hat, std::vector<double
|
||||
return deriv;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Cost::MAEDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
std::vector<std::vector<double>> MLPPCost::MAEDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
std::vector<std::vector<double>> deriv;
|
||||
deriv.resize(y_hat.size());
|
||||
for (int i = 0; i < deriv.size(); i++) {
|
||||
@ -120,7 +120,7 @@ std::vector<std::vector<double>> Cost::MAEDeriv(std::vector<std::vector<double>>
|
||||
return deriv;
|
||||
}
|
||||
|
||||
double Cost::MBE(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double MLPPCost::MBE(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double sum = 0;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
sum += (y_hat[i] - y[i]);
|
||||
@ -128,7 +128,7 @@ double Cost::MBE(std::vector<double> y_hat, std::vector<double> y) {
|
||||
return sum / y_hat.size();
|
||||
}
|
||||
|
||||
double Cost::MBE(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
double MLPPCost::MBE(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
double sum = 0;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
for (int j = 0; j < y_hat[i].size(); j++) {
|
||||
@ -138,17 +138,17 @@ double Cost::MBE(std::vector<std::vector<double>> y_hat, std::vector<std::vector
|
||||
return sum / y_hat.size();
|
||||
}
|
||||
|
||||
std::vector<double> Cost::MBEDeriv(std::vector<double> y_hat, std::vector<double> y) {
|
||||
std::vector<double> MLPPCost::MBEDeriv(std::vector<double> y_hat, std::vector<double> y) {
|
||||
LinAlg alg;
|
||||
return alg.onevec(y_hat.size());
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Cost::MBEDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
std::vector<std::vector<double>> MLPPCost::MBEDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
LinAlg alg;
|
||||
return alg.onemat(y_hat.size(), y_hat[0].size());
|
||||
}
|
||||
|
||||
double Cost::LogLoss(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double MLPPCost::LogLoss(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double sum = 0;
|
||||
double eps = 1e-8;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
@ -158,7 +158,7 @@ double Cost::LogLoss(std::vector<double> y_hat, std::vector<double> y) {
|
||||
return sum / y_hat.size();
|
||||
}
|
||||
|
||||
double Cost::LogLoss(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
double MLPPCost::LogLoss(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
double sum = 0;
|
||||
double eps = 1e-8;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
@ -170,17 +170,17 @@ double Cost::LogLoss(std::vector<std::vector<double>> y_hat, std::vector<std::ve
|
||||
return sum / y_hat.size();
|
||||
}
|
||||
|
||||
std::vector<double> Cost::LogLossDeriv(std::vector<double> y_hat, std::vector<double> y) {
|
||||
std::vector<double> MLPPCost::LogLossDeriv(std::vector<double> y_hat, std::vector<double> y) {
|
||||
LinAlg alg;
|
||||
return alg.addition(alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)), alg.elementWiseDivision(alg.scalarMultiply(-1, alg.scalarAdd(-1, y)), alg.scalarMultiply(-1, alg.scalarAdd(-1, y_hat))));
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Cost::LogLossDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
std::vector<std::vector<double>> MLPPCost::LogLossDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
LinAlg alg;
|
||||
return alg.addition(alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)), alg.elementWiseDivision(alg.scalarMultiply(-1, alg.scalarAdd(-1, y)), alg.scalarMultiply(-1, alg.scalarAdd(-1, y_hat))));
|
||||
}
|
||||
|
||||
double Cost::CrossEntropy(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double MLPPCost::CrossEntropy(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double sum = 0;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
sum += y[i] * std::log(y_hat[i]);
|
||||
@ -189,7 +189,7 @@ double Cost::CrossEntropy(std::vector<double> y_hat, std::vector<double> y) {
|
||||
return -1 * sum;
|
||||
}
|
||||
|
||||
double Cost::CrossEntropy(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
double MLPPCost::CrossEntropy(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
double sum = 0;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
for (int j = 0; j < y_hat[i].size(); j++) {
|
||||
@ -200,17 +200,17 @@ double Cost::CrossEntropy(std::vector<std::vector<double>> y_hat, std::vector<st
|
||||
return -1 * sum;
|
||||
}
|
||||
|
||||
std::vector<double> Cost::CrossEntropyDeriv(std::vector<double> y_hat, std::vector<double> y) {
|
||||
std::vector<double> MLPPCost::CrossEntropyDeriv(std::vector<double> y_hat, std::vector<double> y) {
|
||||
LinAlg alg;
|
||||
return alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat));
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Cost::CrossEntropyDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
std::vector<std::vector<double>> MLPPCost::CrossEntropyDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
LinAlg alg;
|
||||
return alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat));
|
||||
}
|
||||
|
||||
double Cost::HuberLoss(std::vector<double> y_hat, std::vector<double> y, double delta) {
|
||||
double MLPPCost::HuberLoss(std::vector<double> y_hat, std::vector<double> y, double delta) {
|
||||
LinAlg alg;
|
||||
double sum = 0;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
@ -223,7 +223,7 @@ double Cost::HuberLoss(std::vector<double> y_hat, std::vector<double> y, double
|
||||
return sum;
|
||||
}
|
||||
|
||||
double Cost::HuberLoss(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y, double delta) {
|
||||
double MLPPCost::HuberLoss(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y, double delta) {
|
||||
LinAlg alg;
|
||||
double sum = 0;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
@ -238,7 +238,7 @@ double Cost::HuberLoss(std::vector<std::vector<double>> y_hat, std::vector<std::
|
||||
return sum;
|
||||
}
|
||||
|
||||
std::vector<double> Cost::HuberLossDeriv(std::vector<double> y_hat, std::vector<double> y, double delta) {
|
||||
std::vector<double> MLPPCost::HuberLossDeriv(std::vector<double> y_hat, std::vector<double> y, double delta) {
|
||||
LinAlg alg;
|
||||
double sum = 0;
|
||||
std::vector<double> deriv;
|
||||
@ -258,7 +258,7 @@ std::vector<double> Cost::HuberLossDeriv(std::vector<double> y_hat, std::vector<
|
||||
return deriv;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Cost::HuberLossDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y, double delta) {
|
||||
std::vector<std::vector<double>> MLPPCost::HuberLossDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y, double delta) {
|
||||
LinAlg alg;
|
||||
double sum = 0;
|
||||
std::vector<std::vector<double>> deriv;
|
||||
@ -283,7 +283,7 @@ std::vector<std::vector<double>> Cost::HuberLossDeriv(std::vector<std::vector<do
|
||||
return deriv;
|
||||
}
|
||||
|
||||
double Cost::HingeLoss(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double MLPPCost::HingeLoss(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double sum = 0;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
sum += fmax(0, 1 - y[i] * y_hat[i]);
|
||||
@ -292,7 +292,7 @@ double Cost::HingeLoss(std::vector<double> y_hat, std::vector<double> y) {
|
||||
return sum / y_hat.size();
|
||||
}
|
||||
|
||||
double Cost::HingeLoss(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
double MLPPCost::HingeLoss(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
double sum = 0;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
for (int j = 0; j < y_hat[i].size(); j++) {
|
||||
@ -303,7 +303,7 @@ double Cost::HingeLoss(std::vector<std::vector<double>> y_hat, std::vector<std::
|
||||
return sum / y_hat.size();
|
||||
}
|
||||
|
||||
std::vector<double> Cost::HingeLossDeriv(std::vector<double> y_hat, std::vector<double> y) {
|
||||
std::vector<double> MLPPCost::HingeLossDeriv(std::vector<double> y_hat, std::vector<double> y) {
|
||||
std::vector<double> deriv;
|
||||
deriv.resize(y_hat.size());
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
@ -316,7 +316,7 @@ std::vector<double> Cost::HingeLossDeriv(std::vector<double> y_hat, std::vector<
|
||||
return deriv;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Cost::HingeLossDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
std::vector<std::vector<double>> MLPPCost::HingeLossDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
std::vector<std::vector<double>> deriv;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
for (int j = 0; j < y_hat[i].size(); j++) {
|
||||
@ -330,7 +330,7 @@ std::vector<std::vector<double>> Cost::HingeLossDeriv(std::vector<std::vector<do
|
||||
return deriv;
|
||||
}
|
||||
|
||||
double Cost::WassersteinLoss(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double MLPPCost::WassersteinLoss(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double sum = 0;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
sum += y_hat[i] * y[i];
|
||||
@ -338,7 +338,7 @@ double Cost::WassersteinLoss(std::vector<double> y_hat, std::vector<double> y) {
|
||||
return -sum / y_hat.size();
|
||||
}
|
||||
|
||||
double Cost::WassersteinLoss(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
double MLPPCost::WassersteinLoss(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
double sum = 0;
|
||||
for (int i = 0; i < y_hat.size(); i++) {
|
||||
for (int j = 0; j < y_hat[i].size(); j++) {
|
||||
@ -348,39 +348,39 @@ double Cost::WassersteinLoss(std::vector<std::vector<double>> y_hat, std::vector
|
||||
return -sum / y_hat.size();
|
||||
}
|
||||
|
||||
std::vector<double> Cost::WassersteinLossDeriv(std::vector<double> y_hat, std::vector<double> y) {
|
||||
std::vector<double> MLPPCost::WassersteinLossDeriv(std::vector<double> y_hat, std::vector<double> y) {
|
||||
LinAlg alg;
|
||||
return alg.scalarMultiply(-1, y); // Simple.
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Cost::WassersteinLossDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
std::vector<std::vector<double>> MLPPCost::WassersteinLossDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
LinAlg alg;
|
||||
return alg.scalarMultiply(-1, y); // Simple.
|
||||
}
|
||||
|
||||
double Cost::HingeLoss(std::vector<double> y_hat, std::vector<double> y, std::vector<double> weights, double C) {
|
||||
double MLPPCost::HingeLoss(std::vector<double> y_hat, std::vector<double> y, std::vector<double> weights, double C) {
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge");
|
||||
}
|
||||
double Cost::HingeLoss(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y, std::vector<std::vector<double>> weights, double C) {
|
||||
double MLPPCost::HingeLoss(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y, std::vector<std::vector<double>> weights, double C) {
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge");
|
||||
}
|
||||
|
||||
std::vector<double> Cost::HingeLossDeriv(std::vector<double> y_hat, std::vector<double> y, double C) {
|
||||
std::vector<double> MLPPCost::HingeLossDeriv(std::vector<double> y_hat, std::vector<double> y, double C) {
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y));
|
||||
}
|
||||
std::vector<std::vector<double>> Cost::HingeLossDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y, double C) {
|
||||
std::vector<std::vector<double>> MLPPCost::HingeLossDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y, double C) {
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y));
|
||||
}
|
||||
|
||||
double Cost::dualFormSVM(std::vector<double> alpha, std::vector<std::vector<double>> X, std::vector<double> y) {
|
||||
double MLPPCost::dualFormSVM(std::vector<double> alpha, std::vector<std::vector<double>> X, std::vector<double> y) {
|
||||
LinAlg alg;
|
||||
std::vector<std::vector<double>> Y = alg.diag(y); // Y is a diagnoal matrix. Y[i][j] = y[i] if i = i, else Y[i][j] = 0. Yt = Y.
|
||||
std::vector<std::vector<double>> K = alg.matmult(X, alg.transpose(X)); // TO DO: DON'T forget to add non-linear kernelizations.
|
||||
@ -391,7 +391,7 @@ double Cost::dualFormSVM(std::vector<double> alpha, std::vector<std::vector<doub
|
||||
return -alg.dot(one, alpha) + 0.5 * alphaQ;
|
||||
}
|
||||
|
||||
std::vector<double> Cost::dualFormSVMDeriv(std::vector<double> alpha, std::vector<std::vector<double>> X, std::vector<double> y) {
|
||||
std::vector<double> MLPPCost::dualFormSVMDeriv(std::vector<double> alpha, std::vector<std::vector<double>> X, std::vector<double> y) {
|
||||
LinAlg alg;
|
||||
std::vector<std::vector<double>> Y = alg.zeromat(y.size(), y.size());
|
||||
for (int i = 0; i < y.size(); i++) {
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <vector>
|
||||
|
||||
|
||||
class Cost {
|
||||
class MLPPCost {
|
||||
public:
|
||||
// Regression Costs
|
||||
double MSE(std::vector<double> y_hat, std::vector<double> y);
|
||||
|
@ -32,7 +32,7 @@ double DualSVC::modelTest(std::vector<double> x) {
|
||||
}
|
||||
|
||||
void DualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
@ -80,7 +80,7 @@ void DualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
|
||||
// void DualSVC::SGD(double learning_rate, int max_epoch, bool UI){
|
||||
// class Cost cost;
|
||||
// class MLPPCost cost;
|
||||
// MLPPActivation avn;
|
||||
// LinAlg alg;
|
||||
// Reg regularization;
|
||||
@ -113,7 +113,7 @@ void DualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
// }
|
||||
|
||||
// void DualSVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
|
||||
// class Cost cost;
|
||||
// class MLPPCost cost;
|
||||
// MLPPActivation avn;
|
||||
// LinAlg alg;
|
||||
// Reg regularization;
|
||||
@ -163,7 +163,7 @@ void DualSVC::save(std::string fileName) {
|
||||
}
|
||||
|
||||
double DualSVC::Cost(std::vector<double> alpha, std::vector<std::vector<double>> X, std::vector<double> y) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
return cost.dualFormSVM(alpha, X, y);
|
||||
}
|
||||
|
||||
|
@ -205,7 +205,7 @@ void ExpReg::save(std::string fileName) {
|
||||
|
||||
double ExpReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
|
||||
Reg regularization;
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@ std::vector<std::vector<double>> GAN::generateExample(int n) {
|
||||
}
|
||||
|
||||
void GAN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
LinAlg alg;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -147,7 +147,7 @@ std::vector<double> GAN::modelSetTestDiscriminator(std::vector<std::vector<doubl
|
||||
|
||||
double GAN::Cost(std::vector<double> y_hat, std::vector<double> y) {
|
||||
Reg regularization;
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
double totalRegTerm = 0;
|
||||
|
||||
auto cost_function = outputLayer->cost_map[outputLayer->cost];
|
||||
@ -208,7 +208,7 @@ void GAN::updateGeneratorParameters(std::vector<std::vector<std::vector<double>>
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> GAN::computeDiscriminatorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
@ -244,7 +244,7 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> G
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> GAN::computeGeneratorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
|
@ -219,7 +219,7 @@ void LinReg::save(std::string fileName) {
|
||||
|
||||
double LinReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
|
||||
Reg regularization;
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
|
@ -182,7 +182,7 @@ void LogReg::save(std::string fileName) {
|
||||
|
||||
double LogReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
|
||||
Reg regularization;
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
return cost.LogLoss(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
|
@ -53,7 +53,7 @@ std::vector<double> MANN::modelTest(std::vector<double> x) {
|
||||
}
|
||||
|
||||
void MANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
@ -159,7 +159,7 @@ void MANN::addOutputLayer(std::string activation, std::string loss, std::string
|
||||
|
||||
double MANN::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
Reg regularization;
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
double totalRegTerm = 0;
|
||||
|
||||
auto cost_function = outputLayer->cost_map[outputLayer->cost];
|
||||
|
@ -227,7 +227,7 @@ void MLP::save(std::string fileName) {
|
||||
|
||||
double MLP::Cost(std::vector<double> y_hat, std::vector<double> y) {
|
||||
Reg regularization;
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
return cost.LogLoss(y_hat, y) + regularization.regTerm(weights2, lambda, alpha, reg) + regularization.regTerm(weights1, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
|
@ -98,22 +98,22 @@ MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activ
|
||||
activation_map["Arcoth"] = &MLPPActivation::arcoth;
|
||||
activationTest_map["Arcoth"] = &MLPPActivation::arcoth;
|
||||
|
||||
costDeriv_map["MSE"] = &Cost::MSEDeriv;
|
||||
cost_map["MSE"] = &Cost::MSE;
|
||||
costDeriv_map["RMSE"] = &Cost::RMSEDeriv;
|
||||
cost_map["RMSE"] = &Cost::RMSE;
|
||||
costDeriv_map["MAE"] = &Cost::MAEDeriv;
|
||||
cost_map["MAE"] = &Cost::MAE;
|
||||
costDeriv_map["MBE"] = &Cost::MBEDeriv;
|
||||
cost_map["MBE"] = &Cost::MBE;
|
||||
costDeriv_map["LogLoss"] = &Cost::LogLossDeriv;
|
||||
cost_map["LogLoss"] = &Cost::LogLoss;
|
||||
costDeriv_map["CrossEntropy"] = &Cost::CrossEntropyDeriv;
|
||||
cost_map["CrossEntropy"] = &Cost::CrossEntropy;
|
||||
costDeriv_map["HingeLoss"] = &Cost::HingeLossDeriv;
|
||||
cost_map["HingeLoss"] = &Cost::HingeLoss;
|
||||
costDeriv_map["WassersteinLoss"] = &Cost::HingeLossDeriv;
|
||||
cost_map["WassersteinLoss"] = &Cost::HingeLoss;
|
||||
costDeriv_map["MSE"] = &MLPPCost::MSEDeriv;
|
||||
cost_map["MSE"] = &MLPPCost::MSE;
|
||||
costDeriv_map["RMSE"] = &MLPPCost::RMSEDeriv;
|
||||
cost_map["RMSE"] = &MLPPCost::RMSE;
|
||||
costDeriv_map["MAE"] = &MLPPCost::MAEDeriv;
|
||||
cost_map["MAE"] = &MLPPCost::MAE;
|
||||
costDeriv_map["MBE"] = &MLPPCost::MBEDeriv;
|
||||
cost_map["MBE"] = &MLPPCost::MBE;
|
||||
costDeriv_map["LogLoss"] = &MLPPCost::LogLossDeriv;
|
||||
cost_map["LogLoss"] = &MLPPCost::LogLoss;
|
||||
costDeriv_map["CrossEntropy"] = &MLPPCost::CrossEntropyDeriv;
|
||||
cost_map["CrossEntropy"] = &MLPPCost::CrossEntropy;
|
||||
costDeriv_map["HingeLoss"] = &MLPPCost::HingeLossDeriv;
|
||||
cost_map["HingeLoss"] = &MLPPCost::HingeLoss;
|
||||
costDeriv_map["WassersteinLoss"] = &MLPPCost::HingeLossDeriv;
|
||||
cost_map["WassersteinLoss"] = &MLPPCost::HingeLoss;
|
||||
}
|
||||
|
||||
void MultiOutputLayer::forwardPass() {
|
||||
|
@ -35,8 +35,8 @@ public:
|
||||
|
||||
std::map<std::string, std::vector<std::vector<double>> (MLPPActivation::*)(std::vector<std::vector<double>>, bool)> activation_map;
|
||||
std::map<std::string, std::vector<double> (MLPPActivation::*)(std::vector<double>, bool)> activationTest_map;
|
||||
std::map<std::string, double (Cost::*)(std::vector<std::vector<double>>, std::vector<std::vector<double>>)> cost_map;
|
||||
std::map<std::string, std::vector<std::vector<double>> (Cost::*)(std::vector<std::vector<double>>, std::vector<std::vector<double>>)> costDeriv_map;
|
||||
std::map<std::string, double (MLPPCost::*)(std::vector<std::vector<double>>, std::vector<std::vector<double>>)> cost_map;
|
||||
std::map<std::string, std::vector<std::vector<double>> (MLPPCost::*)(std::vector<std::vector<double>>, std::vector<std::vector<double>>)> costDeriv_map;
|
||||
|
||||
std::vector<double> z_test;
|
||||
std::vector<double> a_test;
|
||||
|
@ -95,22 +95,22 @@ OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost,
|
||||
activation_map["Arcoth"] = &MLPPActivation::arcoth;
|
||||
activationTest_map["Arcoth"] = &MLPPActivation::arcoth;
|
||||
|
||||
costDeriv_map["MSE"] = &Cost::MSEDeriv;
|
||||
cost_map["MSE"] = &Cost::MSE;
|
||||
costDeriv_map["RMSE"] = &Cost::RMSEDeriv;
|
||||
cost_map["RMSE"] = &Cost::RMSE;
|
||||
costDeriv_map["MAE"] = &Cost::MAEDeriv;
|
||||
cost_map["MAE"] = &Cost::MAE;
|
||||
costDeriv_map["MBE"] = &Cost::MBEDeriv;
|
||||
cost_map["MBE"] = &Cost::MBE;
|
||||
costDeriv_map["LogLoss"] = &Cost::LogLossDeriv;
|
||||
cost_map["LogLoss"] = &Cost::LogLoss;
|
||||
costDeriv_map["CrossEntropy"] = &Cost::CrossEntropyDeriv;
|
||||
cost_map["CrossEntropy"] = &Cost::CrossEntropy;
|
||||
costDeriv_map["HingeLoss"] = &Cost::HingeLossDeriv;
|
||||
cost_map["HingeLoss"] = &Cost::HingeLoss;
|
||||
costDeriv_map["WassersteinLoss"] = &Cost::HingeLossDeriv;
|
||||
cost_map["WassersteinLoss"] = &Cost::HingeLoss;
|
||||
costDeriv_map["MSE"] = &MLPPCost::MSEDeriv;
|
||||
cost_map["MSE"] = &MLPPCost::MSE;
|
||||
costDeriv_map["RMSE"] = &MLPPCost::RMSEDeriv;
|
||||
cost_map["RMSE"] = &MLPPCost::RMSE;
|
||||
costDeriv_map["MAE"] = &MLPPCost::MAEDeriv;
|
||||
cost_map["MAE"] = &MLPPCost::MAE;
|
||||
costDeriv_map["MBE"] = &MLPPCost::MBEDeriv;
|
||||
cost_map["MBE"] = &MLPPCost::MBE;
|
||||
costDeriv_map["LogLoss"] = &MLPPCost::LogLossDeriv;
|
||||
cost_map["LogLoss"] = &MLPPCost::LogLoss;
|
||||
costDeriv_map["CrossEntropy"] = &MLPPCost::CrossEntropyDeriv;
|
||||
cost_map["CrossEntropy"] = &MLPPCost::CrossEntropy;
|
||||
costDeriv_map["HingeLoss"] = &MLPPCost::HingeLossDeriv;
|
||||
cost_map["HingeLoss"] = &MLPPCost::HingeLoss;
|
||||
costDeriv_map["WassersteinLoss"] = &MLPPCost::HingeLossDeriv;
|
||||
cost_map["WassersteinLoss"] = &MLPPCost::HingeLoss;
|
||||
}
|
||||
|
||||
void OutputLayer::forwardPass() {
|
||||
|
@ -34,8 +34,8 @@ public:
|
||||
|
||||
std::map<std::string, std::vector<double> (MLPPActivation::*)(std::vector<double>, bool)> activation_map;
|
||||
std::map<std::string, double (MLPPActivation::*)(double, bool)> activationTest_map;
|
||||
std::map<std::string, double (Cost::*)(std::vector<double>, std::vector<double>)> cost_map;
|
||||
std::map<std::string, std::vector<double> (Cost::*)(std::vector<double>, std::vector<double>)> costDeriv_map;
|
||||
std::map<std::string, double (MLPPCost::*)(std::vector<double>, std::vector<double>)> cost_map;
|
||||
std::map<std::string, std::vector<double> (MLPPCost::*)(std::vector<double>, std::vector<double>)> costDeriv_map;
|
||||
|
||||
double z_test;
|
||||
double a_test;
|
||||
|
@ -209,7 +209,7 @@ void ProbitReg::save(std::string fileName) {
|
||||
|
||||
double ProbitReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
|
||||
Reg regularization;
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
|
@ -246,7 +246,7 @@ std::vector<std::vector<double>> SoftmaxNet::getEmbeddings() {
|
||||
double SoftmaxNet::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
Reg regularization;
|
||||
Data data;
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights1, lambda, alpha, reg) + regularization.regTerm(weights2, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
|
@ -165,7 +165,7 @@ void SoftmaxReg::save(std::string fileName) {
|
||||
|
||||
double SoftmaxReg::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
Reg regularization;
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@ double SVC::modelTest(std::vector<double> x) {
|
||||
}
|
||||
|
||||
void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
@ -64,7 +64,7 @@ void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
|
||||
void SVC::SGD(double learning_rate, int max_epoch, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
@ -107,7 +107,7 @@ void SVC::SGD(double learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
|
||||
void SVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
@ -159,7 +159,7 @@ void SVC::save(std::string fileName) {
|
||||
}
|
||||
|
||||
double SVC::Cost(std::vector<double> z, std::vector<double> y, std::vector<double> weights, double C) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
return cost.HingeLoss(z, y, weights, C);
|
||||
}
|
||||
|
||||
|
@ -158,7 +158,7 @@ void TanhReg::save(std::string fileName) {
|
||||
|
||||
double TanhReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
|
||||
Reg regularization;
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@ std::vector<std::vector<double>> WGAN::generateExample(int n) {
|
||||
}
|
||||
|
||||
void WGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
LinAlg alg;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -156,7 +156,7 @@ std::vector<double> WGAN::modelSetTestDiscriminator(std::vector<std::vector<doub
|
||||
|
||||
double WGAN::Cost(std::vector<double> y_hat, std::vector<double> y) {
|
||||
Reg regularization;
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
double totalRegTerm = 0;
|
||||
|
||||
auto cost_function = outputLayer->cost_map[outputLayer->cost];
|
||||
@ -217,7 +217,7 @@ void WGAN::updateGeneratorParameters(std::vector<std::vector<std::vector<double>
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> WGAN::computeDiscriminatorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
@ -253,7 +253,7 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> W
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> WGAN::computeGeneratorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
|
||||
class Cost cost;
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
|
Loading…
Reference in New Issue
Block a user