Added LinAlg.full & LinAlg.zerovec, changed implementation of LinAlg.onevec & LinAlg.onemat, “vectorized” csch, sech, coth

This commit is contained in:
novak_99 2021-05-26 17:09:49 -07:00
parent 558e138948
commit 7ae3b348b9
5 changed files with 61 additions and 108 deletions

View File

@ -570,78 +570,34 @@ namespace MLPP{
}
std::vector<double> Activation::csch(std::vector<double> z, bool deriv){
if(deriv){
std::vector<double> deriv;
deriv.resize(z.size());
for(int i = 0; i < z.size(); i++){
deriv[i] = csch(z[i], 1);
}
return deriv;
}
std::vector<double> a;
a.resize(z.size());
for(int i = 0; i < z.size(); i++){
a[i] = csch(z[i]);
}
return a;
LinAlg alg;
if(deriv){ return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), coth(z)); }
return alg.elementWiseDivision(alg.onevec(z.size()), sinh(z));
}
std::vector<std::vector<double>> Activation::csch(std::vector<std::vector<double>> z, bool deriv){
if(deriv){
std::vector<std::vector<double>> deriv;
deriv.resize(z.size());
for(int i = 0; i < z.size(); i++){
deriv[i] = csch(z[i], 1);
}
return deriv;
}
std::vector<std::vector<double>> a;
a.resize(z.size());
for(int i = 0; i < z.size(); i++){
a[i] = csch(z[i]);
}
return a;
LinAlg alg;
if(deriv){ return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), coth(z)); }
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), sinh(z));
}
double Activation::sech(double z, bool deriv){
if(deriv){ return -sech(z) * tanh(z); }
return 2 / (exp(z) + exp(-z));
return 1 / cosh(z);
}
std::vector<double> Activation::sech(std::vector<double> z, bool deriv){
if(deriv){
std::vector<double> deriv;
deriv.resize(z.size());
for(int i = 0; i < z.size(); i++){
deriv[i] = sech(z[i], 1);
}
return deriv;
}
std::vector<double> a;
a.resize(z.size());
for(int i = 0; i < z.size(); i++){
a[i] = sech(z[i]);
}
return a;
LinAlg alg;
if(deriv){ return alg.hadamard_product(alg.scalarMultiply(-1, sech(z)), tanh(z)); }
return alg.elementWiseDivision(alg.onevec(z.size()), cosh(z));
// return activation(z, deriv, static_cast<void (*)(double, bool)>(&sech));
}
std::vector<std::vector<double>> Activation::sech(std::vector<std::vector<double>> z, bool deriv){
if(deriv){
std::vector<std::vector<double>> deriv;
deriv.resize(z.size());
for(int i = 0; i < z.size(); i++){
deriv[i] = sech(z[i], 1);
}
return deriv;
}
std::vector<std::vector<double>> a;
a.resize(z.size());
for(int i = 0; i < z.size(); i++){
a[i] = sech(z[i]);
}
return a;
LinAlg alg;
if(deriv){ return alg.hadamard_product(alg.scalarMultiply(-1, sech(z)), tanh(z)); }
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), cosh(z));
// return activation(z, deriv, static_cast<void (*)(double, bool)>(&sech));
}
@ -653,37 +609,15 @@ namespace MLPP{
}
std::vector<double> Activation::coth(std::vector<double> z, bool deriv){
if(deriv){
std::vector<double> deriv;
deriv.resize(z.size());
for(int i = 0; i < z.size(); i++){
deriv[i] = coth(z[i], 1);
}
return deriv;
}
std::vector<double> a;
a.resize(z.size());
for(int i = 0; i < z.size(); i++){
a[i] = coth(z[i]);
}
return a;
LinAlg alg;
if(deriv){ return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), csch(z)); }
return alg.elementWiseDivision(alg.onevec(z.size()), tanh(z));
}
std::vector<std::vector<double>> Activation::coth(std::vector<std::vector<double>> z, bool deriv){
if(deriv){
std::vector<std::vector<double>> deriv;
deriv.resize(z.size());
for(int i = 0; i < z.size(); i++){
deriv[i] = coth(z[i], 1);
}
return deriv;
}
std::vector<std::vector<double>> a;
a.resize(z.size());
for(int i = 0; i < z.size(); i++){
a[i] = coth(z[i]);
}
return a;
LinAlg alg;
if(deriv){ return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), csch(z)); }
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), tanh(z));
}
double Activation::arsinh(double z, bool deriv){

View File

@ -300,17 +300,21 @@ namespace MLPP{
}
std::vector<std::vector<double>> LinAlg::onemat(int n, int m){
std::vector<std::vector<double>> onemat;
onemat.resize(n);
for(int i = 0; i < onemat.size(); i++){
onemat[i].resize(m);
return full(n, m, 1);
}
std::vector<std::vector<double>> LinAlg::full(int n, int m, int k){
std::vector<std::vector<double>> full;
full.resize(n);
for(int i = 0; i < full.size(); i++){
full[i].resize(m);
}
for(int i = 0; i < onemat.size(); i++){
for(int j = 0; j < onemat[i].size(); j++){
onemat[i][j] = 1;
for(int i = 0; i < full.size(); i++){
for(int j = 0; j < full[i].size(); j++){
full[i][j] = k;
}
}
return onemat;
return full;
}
std::vector<std::vector<double>> LinAlg::round(std::vector<std::vector<double>> A){
@ -640,13 +644,23 @@ namespace MLPP{
return c;
}
std::vector<double> LinAlg::zerovec(int n){
std::vector<double> zerovec;
zerovec.resize(n);
return zerovec;
}
std::vector<double> LinAlg::onevec(int n){
std::vector<double> onevec;
onevec.resize(n);
for(int i = 0; i < onevec.size(); i++){
onevec[i] = 1;
return full(n, 1);
}
std::vector<double> LinAlg::full(int n, int k){
std::vector<double> full;
full.resize(n);
for(int i = 0; i < full.size(); i++){
full[i] = k;
}
return onevec;
return full;
}
double LinAlg::max(std::vector<double> a){

View File

@ -58,6 +58,8 @@ namespace MLPP{
std::vector<std::vector<double>> onemat(int n, int m);
std::vector<std::vector<double>> full(int n, int m, int k);
std::vector<std::vector<double>> round(std::vector<std::vector<double>> A);
std::vector<std::vector<double>> identity(double d);
@ -102,8 +104,12 @@ namespace MLPP{
double dot(std::vector<double> a, std::vector<double> b);
std::vector<double> zerovec(int n);
std::vector<double> onevec(int n);
std::vector<double> full(int n, int k);
double max(std::vector<double> a);
double min(std::vector<double> a);

BIN
a.out

Binary file not shown.

View File

@ -9,7 +9,7 @@
// POLYMORPHIC IMPLEMENTATION OF REGRESSION CLASSES
// EXTEND SGD/MBGD SUPPORT FOR DYN. SIZED ANN
// STANDARDIZE ACTIVATIONS/OPTIMIZATIONS
// ADD LEAKYRELU, ELU TO ANN
// ADD LEAKYRELU, ELU, SELU TO ANN
// HYPOTHESIS TESTING CLASS
// GAUSS MARKOV CHECKER CLASS
@ -348,21 +348,20 @@ int main() {
// OutlierFinder outlierFinder(2); // Any datapoint outside of 2 stds from the mean is marked as an outlier.
// alg.printVector(outlierFinder.modelTest(inputSet));
// // Testing for new Functions
// // Testing new Functions
// double z_s = 4;
// std::cout << avn.sigmoid(z_s) << std::endl;
// std::cout << avn.sigmoid(z_s, 1) << std::endl;
// std::cout << avn.coth(z_s) << std::endl;
// std::cout << avn.coth(z_s, 1) << std::endl;
// std::vector<double> z_v = {4, 5};
// alg.printVector(avn.sigmoid(z_v));
// alg.printVector(avn.sigmoid(z_v, 1));
// alg.printVector(avn.coth(z_v));
// alg.printVector(avn.coth(z_v, 1));
// std::vector<std::vector<double>> Z_m = {{4, 5}};
// alg.printMatrix(avn.sigmoid(Z_m));
// alg.printMatrix(avn.sigmoid(Z_m, 1));
// alg.printMatrix(avn.coth(Z_m));
// alg.printMatrix(avn.coth(Z_m, 1));
// alg.printMatrix(alg.pinverse({{1,2}, {3,4}}));
// // alg.printMatrix(alg.pinverse({{1,2}, {3,4}}));
return 0;
}