mirror of
https://github.com/Relintai/pmlpp.git
synced 2024-12-22 15:06:47 +01:00
Now MLPPANN uses engine classes.
This commit is contained in:
parent
1b3606c7ae
commit
1224116e12
688
mlpp/ann/ann.cpp
688
mlpp/ann/ann.cpp
File diff suppressed because it is too large
Load Diff
@ -11,22 +11,32 @@
|
||||
|
||||
#include "core/object/reference.h"
|
||||
|
||||
#include "../lin_alg/mlpp_matrix.h"
|
||||
#include "../lin_alg/mlpp_vector.h"
|
||||
|
||||
#include "../hidden_layer/hidden_layer.h"
|
||||
#include "../output_layer/output_layer.h"
|
||||
|
||||
#include "../hidden_layer/hidden_layer_old.h"
|
||||
#include "../output_layer/output_layer_old.h"
|
||||
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include "../activation/activation.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../regularization/reg.h"
|
||||
#include "../utilities/utilities.h"
|
||||
|
||||
class MLPPANN : public Reference {
|
||||
GDCLASS(MLPPANN, Reference);
|
||||
|
||||
public:
|
||||
std::vector<real_t> model_set_test(std::vector<std::vector<real_t>> X);
|
||||
real_t model_test(std::vector<real_t> x);
|
||||
enum SchedulerType {
|
||||
SCHEDULER_TYPE_NONE = 0,
|
||||
SCHEDULER_TYPE_TIME,
|
||||
SCHEDULER_TYPE_EPOCH,
|
||||
SCHEDULER_TYPE_STEP,
|
||||
SCHEDULER_TYPE_EXPONENTIAL,
|
||||
};
|
||||
|
||||
public:
|
||||
Ref<MLPPVector> model_set_test(const Ref<MLPPMatrix> &X);
|
||||
real_t model_test(const Ref<MLPPVector> &x);
|
||||
|
||||
void gradient_descent(real_t learning_rate, int max_epoch, bool ui = false);
|
||||
void sgd(real_t learning_rate, int max_epoch, bool ui = false);
|
||||
@ -40,15 +50,15 @@ public:
|
||||
void amsgrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool ui = false);
|
||||
|
||||
real_t score();
|
||||
void save(std::string file_name);
|
||||
void save(const String &file_name);
|
||||
|
||||
void set_learning_rate_scheduler(std::string type, real_t decay_constant);
|
||||
void set_learning_rate_scheduler_drop(std::string type, real_t decay_constant, real_t drop_rate);
|
||||
void set_learning_rate_scheduler(SchedulerType type, real_t decay_constant);
|
||||
void set_learning_rate_scheduler_drop(SchedulerType type, real_t decay_constant, real_t drop_rate);
|
||||
|
||||
void add_layer(int n_hidden, std::string activation, std::string weight_init = "Default", std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
|
||||
void add_output_layer(std::string activation, std::string loss, std::string weight_init = "Default", std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
|
||||
void add_layer(int n_hidden, MLPPActivation::ActivationFunction activation, MLPPUtilities::WeightDistributionType weight_init = MLPPUtilities::WEIGHT_DISTRIBUTION_TYPE_DEFAULT, MLPPReg::RegularizationType reg = MLPPReg::REGULARIZATION_TYPE_NONE, real_t lambda = 0.5, real_t alpha = 0.5);
|
||||
void add_output_layer(MLPPActivation::ActivationFunction activation, MLPPCost::CostTypes loss, MLPPUtilities::WeightDistributionType weight_init = MLPPUtilities::WEIGHT_DISTRIBUTION_TYPE_DEFAULT, MLPPReg::RegularizationType reg = MLPPReg::REGULARIZATION_TYPE_NONE, real_t lambda = 0.5, real_t alpha = 0.5);
|
||||
|
||||
MLPPANN(std::vector<std::vector<real_t>> p_input_set, std::vector<real_t> p_output_set);
|
||||
MLPPANN(const Ref<MLPPMatrix> &p_input_set, const Ref<MLPPVector> &p_output_set);
|
||||
|
||||
MLPPANN();
|
||||
~MLPPANN();
|
||||
@ -56,29 +66,37 @@ public:
|
||||
protected:
|
||||
real_t apply_learning_rate_scheduler(real_t learning_rate, real_t decay_constant, real_t epoch, real_t drop_rate);
|
||||
|
||||
real_t cost(std::vector<real_t> y_hat, std::vector<real_t> y);
|
||||
real_t cost(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
|
||||
|
||||
void forward_pass();
|
||||
void update_parameters(std::vector<std::vector<std::vector<real_t>>> hidden_layer_updations, std::vector<real_t> output_layer_updation, real_t learning_rate);
|
||||
std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> compute_gradients(std::vector<real_t> y_hat, std::vector<real_t> _output_set);
|
||||
void update_parameters(const Vector<Ref<MLPPMatrix>> &hidden_layer_updations, const Ref<MLPPVector> &output_layer_updation, real_t learning_rate);
|
||||
|
||||
void print_ui(int epoch, real_t cost_prev, std::vector<real_t> y_hat, std::vector<real_t> p_output_set);
|
||||
struct ComputeGradientsResult {
|
||||
Vector<Ref<MLPPMatrix>> cumulative_hidden_layer_w_grad;
|
||||
Ref<MLPPVector> output_w_grad;
|
||||
};
|
||||
|
||||
ComputeGradientsResult compute_gradients(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &_output_set);
|
||||
|
||||
void print_ui(int epoch, real_t cost_prev, const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &p_output_set);
|
||||
|
||||
static void _bind_methods();
|
||||
|
||||
std::vector<std::vector<real_t>> _input_set;
|
||||
std::vector<real_t> _output_set;
|
||||
std::vector<real_t> _y_hat;
|
||||
Ref<MLPPMatrix> _input_set;
|
||||
Ref<MLPPVector> _output_set;
|
||||
Ref<MLPPVector> _y_hat;
|
||||
|
||||
std::vector<MLPPOldHiddenLayer> _network;
|
||||
MLPPOldOutputLayer *_output_layer;
|
||||
Vector<Ref<MLPPHiddenLayer>> _network;
|
||||
Ref<MLPPOutputLayer> _output_layer;
|
||||
|
||||
int _n;
|
||||
int _k;
|
||||
|
||||
std::string _lr_scheduler;
|
||||
SchedulerType _lr_scheduler;
|
||||
real_t _decay_constant;
|
||||
real_t _drop_rate;
|
||||
};
|
||||
|
||||
VARIANT_ENUM_CAST(MLPPANN::SchedulerType);
|
||||
|
||||
#endif /* ANN_hpp */
|
@ -142,9 +142,9 @@ void MLPPGAN::add_output_layer(MLPPUtilities::WeightDistributionType weight_init
|
||||
MLPPLinAlg alg;
|
||||
|
||||
if (!_network.empty()) {
|
||||
_output_layer = Ref<MLPPOutputLayer>(memnew(MLPPOutputLayer(_network.write[_network.size() - 1]->get_n_hidden(), MLPPActivation::ACTIVATION_FUNCTION_SIGMOID, _network.write[_network.size() - 1]->get_a(), weight_init, reg, lambda, alpha)));
|
||||
_output_layer = Ref<MLPPOutputLayer>(memnew(MLPPOutputLayer(_network.write[_network.size() - 1]->get_n_hidden(), MLPPActivation::ACTIVATION_FUNCTION_SIGMOID, MLPPCost::COST_TYPE_LOGISTIC_LOSS, _network.write[_network.size() - 1]->get_a(), weight_init, reg, lambda, alpha)));
|
||||
} else {
|
||||
_output_layer = Ref<MLPPOutputLayer>(memnew(MLPPOutputLayer(_k, MLPPActivation::ACTIVATION_FUNCTION_SIGMOID, alg.gaussian_noise(_n, _k), weight_init, reg, lambda, alpha)));
|
||||
_output_layer = Ref<MLPPOutputLayer>(memnew(MLPPOutputLayer(_k, MLPPActivation::ACTIVATION_FUNCTION_SIGMOID, MLPPCost::COST_TYPE_LOGISTIC_LOSS, alg.gaussian_noise(_n, _k), weight_init, reg, lambda, alpha)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2499,6 +2499,24 @@ std::vector<std::vector<real_t>> MLPPLinAlg::max(std::vector<std::vector<real_t>
|
||||
return C;
|
||||
}
|
||||
|
||||
Ref<MLPPMatrix> MLPPLinAlg::max_nm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B) {
|
||||
Ref<MLPPMatrix> C;
|
||||
C.instance();
|
||||
C->resize(A->size());
|
||||
|
||||
const real_t *a_ptr = A->ptr();
|
||||
const real_t *b_ptr = B->ptr();
|
||||
real_t *c_ptr = C->ptrw();
|
||||
|
||||
int size = A->data_size();
|
||||
|
||||
for (int i = 0; i < size; i++) {
|
||||
c_ptr[i] = MAX(a_ptr[i], b_ptr[i]);
|
||||
}
|
||||
|
||||
return C;
|
||||
}
|
||||
|
||||
real_t MLPPLinAlg::max(std::vector<real_t> a) {
|
||||
int max = a[0];
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
@ -2749,6 +2767,17 @@ std::vector<std::vector<std::vector<real_t>>> MLPPLinAlg::addition(std::vector<s
|
||||
return A;
|
||||
}
|
||||
|
||||
Vector<Ref<MLPPMatrix>> MLPPLinAlg::addition_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B) {
|
||||
Vector<Ref<MLPPMatrix>> res;
|
||||
res.resize(A.size());
|
||||
|
||||
for (int i = 0; i < res.size(); i++) {
|
||||
res.write[i] = additionm(A[i], B[i]);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> MLPPLinAlg::elementWiseDivision(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B) {
|
||||
for (uint32_t i = 0; i < A.size(); i++) {
|
||||
A[i] = elementWiseDivision(A[i], B[i]);
|
||||
@ -2756,6 +2785,17 @@ std::vector<std::vector<std::vector<real_t>>> MLPPLinAlg::elementWiseDivision(st
|
||||
return A;
|
||||
}
|
||||
|
||||
Vector<Ref<MLPPMatrix>> MLPPLinAlg::element_wise_division_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B) {
|
||||
Vector<Ref<MLPPMatrix>> res;
|
||||
res.resize(A.size());
|
||||
|
||||
for (int i = 0; i < A.size(); i++) {
|
||||
res.write[i] = element_wise_divisionm(A[i], B[i]);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> MLPPLinAlg::sqrt(std::vector<std::vector<std::vector<real_t>>> A) {
|
||||
for (uint32_t i = 0; i < A.size(); i++) {
|
||||
A[i] = sqrt(A[i]);
|
||||
@ -2763,6 +2803,17 @@ std::vector<std::vector<std::vector<real_t>>> MLPPLinAlg::sqrt(std::vector<std::
|
||||
return A;
|
||||
}
|
||||
|
||||
Vector<Ref<MLPPMatrix>> MLPPLinAlg::sqrt_vt(const Vector<Ref<MLPPMatrix>> &A) {
|
||||
Vector<Ref<MLPPMatrix>> res;
|
||||
res.resize(A.size());
|
||||
|
||||
for (int i = 0; i < A.size(); i++) {
|
||||
res.write[i] = sqrtm(A[i]);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> MLPPLinAlg::exponentiate(std::vector<std::vector<std::vector<real_t>>> A, real_t p) {
|
||||
for (uint32_t i = 0; i < A.size(); i++) {
|
||||
A[i] = exponentiate(A[i], p);
|
||||
@ -2770,6 +2821,17 @@ std::vector<std::vector<std::vector<real_t>>> MLPPLinAlg::exponentiate(std::vect
|
||||
return A;
|
||||
}
|
||||
|
||||
Vector<Ref<MLPPMatrix>> MLPPLinAlg::exponentiate_vt(const Vector<Ref<MLPPMatrix>> &A, real_t p) {
|
||||
Vector<Ref<MLPPMatrix>> res;
|
||||
res.resize(A.size());
|
||||
|
||||
for (int i = 0; i < A.size(); i++) {
|
||||
res.write[i] = exponentiatem(A[i], p);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPLinAlg::tensor_vec_mult(std::vector<std::vector<std::vector<real_t>>> A, std::vector<real_t> b) {
|
||||
std::vector<std::vector<real_t>> C;
|
||||
C.resize(A.size());
|
||||
@ -2840,6 +2902,21 @@ std::vector<std::vector<std::vector<real_t>>> MLPPLinAlg::resize(std::vector<std
|
||||
return A;
|
||||
}
|
||||
|
||||
Vector<Ref<MLPPMatrix>> MLPPLinAlg::resize_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B) {
|
||||
Vector<Ref<MLPPMatrix>> res;
|
||||
res.resize(B.size());
|
||||
|
||||
for (int i = 0; i < res.size(); i++) {
|
||||
Ref<MLPPMatrix> m;
|
||||
m.instance();
|
||||
m->resize(B[i]->size());
|
||||
|
||||
res.write[i] = m;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> MLPPLinAlg::max(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B) {
|
||||
for (uint32_t i = 0; i < A.size(); i++) {
|
||||
A[i] = max(A[i], B[i]);
|
||||
@ -2847,6 +2924,17 @@ std::vector<std::vector<std::vector<real_t>>> MLPPLinAlg::max(std::vector<std::v
|
||||
return A;
|
||||
}
|
||||
|
||||
Vector<Ref<MLPPMatrix>> MLPPLinAlg::max_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B) {
|
||||
Vector<Ref<MLPPMatrix>> res;
|
||||
res.resize(A.size());
|
||||
|
||||
for (int i = 0; i < A.size(); i++) {
|
||||
res.write[i] = max_nm(A[i], B[i]);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> MLPPLinAlg::abs(std::vector<std::vector<std::vector<real_t>>> A) {
|
||||
for (uint32_t i = 0; i < A.size(); i++) {
|
||||
A[i] = abs(A[i]);
|
||||
@ -2854,6 +2942,17 @@ std::vector<std::vector<std::vector<real_t>>> MLPPLinAlg::abs(std::vector<std::v
|
||||
return A;
|
||||
}
|
||||
|
||||
Vector<Ref<MLPPMatrix>> MLPPLinAlg::abs_vt(const Vector<Ref<MLPPMatrix>> &A) {
|
||||
Vector<Ref<MLPPMatrix>> res;
|
||||
res.resize(A.size());
|
||||
|
||||
for (int i = 0; i < A.size(); i++) {
|
||||
res.write[i] = absm(A[i]);
|
||||
}
|
||||
|
||||
return A;
|
||||
}
|
||||
|
||||
real_t MLPPLinAlg::norm_2(std::vector<std::vector<std::vector<real_t>>> A) {
|
||||
real_t sum = 0;
|
||||
for (uint32_t i = 0; i < A.size(); i++) {
|
||||
|
@ -111,6 +111,8 @@ public:
|
||||
std::vector<std::vector<real_t>> rotate(std::vector<std::vector<real_t>> A, real_t theta, int axis = -1);
|
||||
|
||||
std::vector<std::vector<real_t>> max(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
|
||||
Ref<MLPPMatrix> max_nm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
|
||||
|
||||
real_t max(std::vector<std::vector<real_t>> A);
|
||||
real_t min(std::vector<std::vector<real_t>> A);
|
||||
|
||||
@ -305,11 +307,16 @@ public:
|
||||
// TENSOR FUNCTIONS
|
||||
std::vector<std::vector<std::vector<real_t>>> addition(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B);
|
||||
|
||||
Vector<Ref<MLPPMatrix>> addition_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B);
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> elementWiseDivision(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B);
|
||||
Vector<Ref<MLPPMatrix>> element_wise_division_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B);
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> sqrt(std::vector<std::vector<std::vector<real_t>>> A);
|
||||
Vector<Ref<MLPPMatrix>> sqrt_vt(const Vector<Ref<MLPPMatrix>> &A);
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> exponentiate(std::vector<std::vector<std::vector<real_t>>> A, real_t p);
|
||||
Vector<Ref<MLPPMatrix>> exponentiate_vt(const Vector<Ref<MLPPMatrix>> &A, real_t p);
|
||||
|
||||
std::vector<std::vector<real_t>> tensor_vec_mult(std::vector<std::vector<std::vector<real_t>>> A, std::vector<real_t> b);
|
||||
|
||||
@ -325,11 +332,15 @@ public:
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> resize(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B);
|
||||
|
||||
Vector<Ref<MLPPMatrix>> resize_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B);
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> hadamard_product(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B);
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> max(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B);
|
||||
Vector<Ref<MLPPMatrix>> max_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B);
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> abs(std::vector<std::vector<std::vector<real_t>>> A);
|
||||
Vector<Ref<MLPPMatrix>> abs_vt(const Vector<Ref<MLPPMatrix>> &A);
|
||||
|
||||
real_t norm_2(std::vector<std::vector<std::vector<real_t>>> A);
|
||||
|
||||
|
@ -72,18 +72,18 @@ void MLPPOutputLayer::set_a(const Ref<MLPPVector> &val) {
|
||||
_initialized = false;
|
||||
}
|
||||
|
||||
Ref<MLPPVector> MLPPOutputLayer::get_z_test() {
|
||||
real_t MLPPOutputLayer::get_z_test() {
|
||||
return _z_test;
|
||||
}
|
||||
void MLPPOutputLayer::set_z_test(const Ref<MLPPVector> &val) {
|
||||
void MLPPOutputLayer::set_z_test(const real_t val) {
|
||||
_z_test = val;
|
||||
_initialized = false;
|
||||
}
|
||||
|
||||
Ref<MLPPVector> MLPPOutputLayer::get_a_test() {
|
||||
real_t MLPPOutputLayer::get_a_test() {
|
||||
return _a_test;
|
||||
}
|
||||
void MLPPOutputLayer::set_a_test(const Ref<MLPPVector> &val) {
|
||||
void MLPPOutputLayer::set_a_test(const real_t val) {
|
||||
_a_test = val;
|
||||
_initialized = false;
|
||||
}
|
||||
@ -166,12 +166,13 @@ void MLPPOutputLayer::test(const Ref<MLPPVector> &x) {
|
||||
MLPPActivation avn;
|
||||
|
||||
_z_test = alg.dotv(_weights, x) + _bias;
|
||||
_a_test = avn.run_activation_norm_vector(_activation, _z_test);
|
||||
_a_test = avn.run_activation_norm_real(_activation, _z_test);
|
||||
}
|
||||
|
||||
MLPPOutputLayer::MLPPOutputLayer(int p_n_hidden, MLPPActivation::ActivationFunction p_activation, Ref<MLPPMatrix> p_input, MLPPUtilities::WeightDistributionType p_weight_init, MLPPReg::RegularizationType p_reg, real_t p_lambda, real_t p_alpha) {
|
||||
MLPPOutputLayer::MLPPOutputLayer(int p_n_hidden, MLPPActivation::ActivationFunction p_activation, MLPPCost::CostTypes p_cost, Ref<MLPPMatrix> p_input, MLPPUtilities::WeightDistributionType p_weight_init, MLPPReg::RegularizationType p_reg, real_t p_lambda, real_t p_alpha) {
|
||||
_n_hidden = p_n_hidden;
|
||||
_activation = p_activation;
|
||||
_cost = p_cost;
|
||||
|
||||
_input = p_input;
|
||||
|
||||
@ -185,8 +186,8 @@ MLPPOutputLayer::MLPPOutputLayer(int p_n_hidden, MLPPActivation::ActivationFunct
|
||||
_z.instance();
|
||||
_a.instance();
|
||||
|
||||
_z_test.instance();
|
||||
_a_test.instance();
|
||||
_z_test = 0;
|
||||
_a_test = 0;
|
||||
|
||||
_delta.instance();
|
||||
|
||||
@ -217,8 +218,8 @@ MLPPOutputLayer::MLPPOutputLayer() {
|
||||
_z.instance();
|
||||
_a.instance();
|
||||
|
||||
_z_test.instance();
|
||||
_a_test.instance();
|
||||
_z_test = 0;
|
||||
_a_test = 0;
|
||||
|
||||
_delta.instance();
|
||||
|
||||
@ -265,11 +266,11 @@ void MLPPOutputLayer::_bind_methods() {
|
||||
|
||||
ClassDB::bind_method(D_METHOD("get_z_test"), &MLPPOutputLayer::get_z_test);
|
||||
ClassDB::bind_method(D_METHOD("set_z_test", "val"), &MLPPOutputLayer::set_z_test);
|
||||
ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "z_test", PROPERTY_HINT_RESOURCE_TYPE, "MLPPVector"), "set_z_test", "get_z_test");
|
||||
ADD_PROPERTY(PropertyInfo(Variant::REAL, "z_test"), "set_z_test", "get_z_test");
|
||||
|
||||
ClassDB::bind_method(D_METHOD("get_a_test"), &MLPPOutputLayer::get_a_test);
|
||||
ClassDB::bind_method(D_METHOD("set_a_test", "val"), &MLPPOutputLayer::set_a_test);
|
||||
ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "a_test", PROPERTY_HINT_RESOURCE_TYPE, "MLPPVector"), "set_a_test", "get_a_test");
|
||||
ADD_PROPERTY(PropertyInfo(Variant::REAL, "a_test"), "set_a_test", "get_a_test");
|
||||
|
||||
ClassDB::bind_method(D_METHOD("get_delta"), &MLPPOutputLayer::get_delta);
|
||||
ClassDB::bind_method(D_METHOD("set_delta", "val"), &MLPPOutputLayer::set_delta);
|
||||
|
@ -49,11 +49,11 @@ public:
|
||||
Ref<MLPPVector> get_a();
|
||||
void set_a(const Ref<MLPPVector> &val);
|
||||
|
||||
Ref<MLPPVector> get_z_test();
|
||||
void set_z_test(const Ref<MLPPVector> &val);
|
||||
real_t get_z_test();
|
||||
void set_z_test(const real_t val);
|
||||
|
||||
Ref<MLPPVector> get_a_test();
|
||||
void set_a_test(const Ref<MLPPVector> &val);
|
||||
real_t get_a_test();
|
||||
void set_a_test(const real_t val);
|
||||
|
||||
Ref<MLPPVector> get_delta();
|
||||
void set_delta(const Ref<MLPPVector> &val);
|
||||
@ -76,7 +76,7 @@ public:
|
||||
void forward_pass();
|
||||
void test(const Ref<MLPPVector> &x);
|
||||
|
||||
MLPPOutputLayer(int p_n_hidden, MLPPActivation::ActivationFunction p_activation, Ref<MLPPMatrix> p_input, MLPPUtilities::WeightDistributionType p_weight_init, MLPPReg::RegularizationType p_reg, real_t p_lambda, real_t p_alpha);
|
||||
MLPPOutputLayer(int p_n_hidden, MLPPActivation::ActivationFunction p_activation, MLPPCost::CostTypes p_cost, Ref<MLPPMatrix> p_input, MLPPUtilities::WeightDistributionType p_weight_init, MLPPReg::RegularizationType p_reg, real_t p_lambda, real_t p_alpha);
|
||||
|
||||
MLPPOutputLayer();
|
||||
~MLPPOutputLayer();
|
||||
@ -96,8 +96,8 @@ protected:
|
||||
Ref<MLPPVector> _z;
|
||||
Ref<MLPPVector> _a;
|
||||
|
||||
Ref<MLPPVector> _z_test;
|
||||
Ref<MLPPVector> _a_test;
|
||||
real_t _z_test;
|
||||
real_t _a_test;
|
||||
|
||||
Ref<MLPPVector> _delta;
|
||||
|
||||
|
@ -626,18 +626,26 @@ void MLPPTests::test_dynamically_sized_ann(bool ui) {
|
||||
alg.printVector(ann_old.modelSetTest(alg.transpose(inputSet)));
|
||||
std::cout << "ACCURACY: " << 100 * ann_old.score() << "%" << std::endl;
|
||||
|
||||
MLPPANN ann(alg.transpose(inputSet), outputSet);
|
||||
ann.add_layer(2, "Cosh");
|
||||
ann.add_output_layer("Sigmoid", "LogLoss");
|
||||
Ref<MLPPMatrix> input_set;
|
||||
input_set.instance();
|
||||
input_set->set_from_std_vectors(inputSet);
|
||||
|
||||
Ref<MLPPVector> output_set;
|
||||
output_set.instance();
|
||||
output_set->set_from_std_vector(outputSet);
|
||||
|
||||
MLPPANN ann(alg.transposem(input_set), output_set);
|
||||
ann.add_layer(2, MLPPActivation::ACTIVATION_FUNCTION_COSH);
|
||||
ann.add_output_layer(MLPPActivation::ACTIVATION_FUNCTION_SIGMOID, MLPPCost::COST_TYPE_LOGISTIC_LOSS);
|
||||
|
||||
ann.amsgrad(0.1, 10000, 1, 0.9, 0.999, 0.000001, ui);
|
||||
ann.adadelta(1, 1000, 2, 0.9, 0.000001, ui);
|
||||
ann.momentum(0.1, 8000, 2, 0.9, true, ui);
|
||||
|
||||
ann.set_learning_rate_scheduler_drop("Step", 0.5, 1000);
|
||||
ann.set_learning_rate_scheduler_drop(MLPPANN::SCHEDULER_TYPE_STEP, 0.5, 1000);
|
||||
ann.gradient_descent(0.01, 30000);
|
||||
alg.printVector(ann.model_set_test(alg.transpose(inputSet)));
|
||||
std::cout << "ACCURACY: " << 100 * ann.score() << "%" << std::endl;
|
||||
PLOG_MSG(ann.model_set_test(alg.transposem(input_set))->to_string());
|
||||
PLOG_MSG("ACCURACY: " + String::num(100 * ann.score()) + "%");
|
||||
}
|
||||
void MLPPTests::test_wgan_old(bool ui) {
|
||||
//MLPPStat stat;
|
||||
@ -705,15 +713,23 @@ void MLPPTests::test_ann(bool ui) {
|
||||
alg.printVector(predictions_old); // Testing out the model's preds for train set.
|
||||
std::cout << "ACCURACY: " << 100 * ann_old.score() << "%" << std::endl; // Accuracy.
|
||||
|
||||
MLPPANN ann(inputSet, outputSet);
|
||||
ann.add_layer(5, "Sigmoid");
|
||||
ann.add_layer(8, "Sigmoid"); // Add more layers as needed.
|
||||
ann.add_output_layer("Sigmoid", "LogLoss");
|
||||
Ref<MLPPMatrix> input_set;
|
||||
input_set.instance();
|
||||
input_set->set_from_std_vectors(inputSet);
|
||||
|
||||
Ref<MLPPVector> output_set;
|
||||
output_set.instance();
|
||||
output_set->set_from_std_vector(outputSet);
|
||||
|
||||
MLPPANN ann(input_set, output_set);
|
||||
ann.add_layer(5, MLPPActivation::ACTIVATION_FUNCTION_SIGMOID);
|
||||
ann.add_layer(8, MLPPActivation::ACTIVATION_FUNCTION_SIGMOID); // Add more layers as needed.
|
||||
ann.add_output_layer(MLPPActivation::ACTIVATION_FUNCTION_SIGMOID, MLPPCost::COST_TYPE_LOGISTIC_LOSS);
|
||||
ann.gradient_descent(1, 20000, ui);
|
||||
|
||||
std::vector<real_t> predictions = ann.model_set_test(inputSet);
|
||||
alg.printVector(predictions); // Testing out the model's preds for train set.
|
||||
std::cout << "ACCURACY: " << 100 * ann.score() << "%" << std::endl; // Accuracy.
|
||||
Ref<MLPPVector> predictions = ann.model_set_test(input_set);
|
||||
PLOG_MSG(predictions->to_string()); // Testing out the model's preds for train set.
|
||||
PLOG_MSG("ACCURACY: " + String::num(100 * ann.score()) + "%"); // Accuracy.
|
||||
}
|
||||
void MLPPTests::test_dynamically_sized_mann(bool ui) {
|
||||
MLPPLinAlg alg;
|
||||
|
Loading…
Reference in New Issue
Block a user