diff --git a/mlpp/hidden_layer/hidden_layer.cpp b/mlpp/hidden_layer/hidden_layer.cpp index 119abdb..a65b501 100644 --- a/mlpp/hidden_layer/hidden_layer.cpp +++ b/mlpp/hidden_layer/hidden_layer.cpp @@ -16,6 +16,7 @@ int MLPPHiddenLayer::get_n_hidden() const { } void MLPPHiddenLayer::set_n_hidden(const int val) { n_hidden = val; + _initialized = false; } MLPPActivation::ActivationFunction MLPPHiddenLayer::get_activation() const { @@ -23,6 +24,7 @@ MLPPActivation::ActivationFunction MLPPHiddenLayer::get_activation() const { } void MLPPHiddenLayer::set_activation(const MLPPActivation::ActivationFunction val) { activation = val; + _initialized = false; } Ref MLPPHiddenLayer::get_input() { @@ -30,6 +32,7 @@ Ref MLPPHiddenLayer::get_input() { } void MLPPHiddenLayer::set_input(const Ref &val) { input = val; + _initialized = false; } Ref MLPPHiddenLayer::get_weights() { @@ -37,6 +40,7 @@ Ref MLPPHiddenLayer::get_weights() { } void MLPPHiddenLayer::set_weights(const Ref &val) { weights = val; + _initialized = false; } Ref MLPPHiddenLayer::MLPPHiddenLayer::get_bias() { @@ -44,6 +48,7 @@ Ref MLPPHiddenLayer::MLPPHiddenLayer::get_bias() { } void MLPPHiddenLayer::set_bias(const Ref &val) { bias = val; + _initialized = false; } Ref MLPPHiddenLayer::get_z() { @@ -51,6 +56,7 @@ Ref MLPPHiddenLayer::get_z() { } void MLPPHiddenLayer::set_z(const Ref &val) { z = val; + _initialized = false; } Ref MLPPHiddenLayer::get_a() { @@ -58,6 +64,7 @@ Ref MLPPHiddenLayer::get_a() { } void MLPPHiddenLayer::set_a(const Ref &val) { a = val; + _initialized = false; } Ref MLPPHiddenLayer::get_z_test() { @@ -65,6 +72,7 @@ Ref MLPPHiddenLayer::get_z_test() { } void MLPPHiddenLayer::set_z_test(const Ref &val) { z_test = val; + _initialized = false; } Ref MLPPHiddenLayer::get_a_test() { @@ -72,6 +80,7 @@ Ref MLPPHiddenLayer::get_a_test() { } void MLPPHiddenLayer::set_a_test(const Ref &val) { a_test = val; + _initialized = false; } Ref MLPPHiddenLayer::get_delta() { @@ -79,6 +88,7 @@ Ref MLPPHiddenLayer::get_delta() { } void MLPPHiddenLayer::set_delta(const Ref &val) { delta = val; + _initialized = false; } MLPPReg::RegularizationType MLPPHiddenLayer::get_reg() const { @@ -86,6 +96,7 @@ MLPPReg::RegularizationType MLPPHiddenLayer::get_reg() const { } void MLPPHiddenLayer::set_reg(const MLPPReg::RegularizationType val) { reg = val; + _initialized = false; } real_t MLPPHiddenLayer::get_lambda() const { @@ -93,6 +104,7 @@ real_t MLPPHiddenLayer::get_lambda() const { } void MLPPHiddenLayer::set_lambda(const real_t val) { lambda = val; + _initialized = false; } real_t MLPPHiddenLayer::get_alpha() const { @@ -100,6 +112,7 @@ real_t MLPPHiddenLayer::get_alpha() const { } void MLPPHiddenLayer::set_alpha(const real_t val) { alpha = val; + _initialized = false; } MLPPUtilities::WeightDistributionType MLPPHiddenLayer::get_weight_init() const { @@ -107,9 +120,33 @@ MLPPUtilities::WeightDistributionType MLPPHiddenLayer::get_weight_init() const { } void MLPPHiddenLayer::set_weight_init(const MLPPUtilities::WeightDistributionType val) { weight_init = val; + _initialized = false; +} + +bool MLPPHiddenLayer::is_initialized() { + return _initialized; +} +void MLPPHiddenLayer::initialize() { + if (_initialized) { + return; + } + + weights->resize(Size2i(n_hidden, input->size().x)); + bias->resize(n_hidden); + + MLPPUtilities utils; + + utils.weight_initializationm(weights, weight_init); + utils.bias_initializationv(bias); + + _initialized = true; } void MLPPHiddenLayer::forward_pass() { + if (!_initialized) { + initialize(); + } + MLPPLinAlg alg; MLPPActivation avn; @@ -118,6 +155,10 @@ void MLPPHiddenLayer::forward_pass() { } void MLPPHiddenLayer::test(const Ref &x) { + if (!_initialized) { + initialize(); + } + MLPPLinAlg alg; MLPPActivation avn; @@ -149,13 +190,15 @@ MLPPHiddenLayer::MLPPHiddenLayer(int p_n_hidden, MLPPActivation::ActivationFunct weights.instance(); bias.instance(); - weights->resize(Size2i(input->size().x, n_hidden)); + weights->resize(Size2i(n_hidden, input->size().x)); bias->resize(n_hidden); MLPPUtilities utils; utils.weight_initializationm(weights, weight_init); utils.bias_initializationv(bias); + + _initialized = true; } MLPPHiddenLayer::MLPPHiddenLayer() { @@ -179,6 +222,8 @@ MLPPHiddenLayer::MLPPHiddenLayer() { weights.instance(); bias.instance(); + + _initialized = false; } MLPPHiddenLayer::~MLPPHiddenLayer() { } @@ -240,6 +285,9 @@ void MLPPHiddenLayer::_bind_methods() { ClassDB::bind_method(D_METHOD("set_weight_init", "val"), &MLPPHiddenLayer::set_weight_init); ADD_PROPERTY(PropertyInfo(Variant::INT, "set_weight_init"), "set_weight_init", "get_weight_init"); + ClassDB::bind_method(D_METHOD("is_initialized"), &MLPPHiddenLayer::is_initialized); + ClassDB::bind_method(D_METHOD("initialize"), &MLPPHiddenLayer::initialize); + ClassDB::bind_method(D_METHOD("forward_pass"), &MLPPHiddenLayer::forward_pass); ClassDB::bind_method(D_METHOD("test", "x"), &MLPPHiddenLayer::test); } @@ -338,6 +386,7 @@ MLPPOldHiddenLayer::MLPPOldHiddenLayer(int p_n_hidden, std::string p_activation, void MLPPOldHiddenLayer::forwardPass() { MLPPLinAlg alg; MLPPActivation avn; + z = alg.mat_vec_add(alg.matmult(input, weights), bias); a = (avn.*activation_map[activation])(z, false); } diff --git a/mlpp/hidden_layer/hidden_layer.h b/mlpp/hidden_layer/hidden_layer.h index 055ef3e..e50c373 100644 --- a/mlpp/hidden_layer/hidden_layer.h +++ b/mlpp/hidden_layer/hidden_layer.h @@ -70,6 +70,9 @@ public: MLPPUtilities::WeightDistributionType get_weight_init() const; void set_weight_init(const MLPPUtilities::WeightDistributionType val); + bool is_initialized(); + void initialize(); + void forward_pass(); void test(const Ref &x); @@ -103,6 +106,8 @@ protected: real_t alpha; /* This is the controlling param for Elastic Net*/ MLPPUtilities::WeightDistributionType weight_init; + + bool _initialized; }; class MLPPOldHiddenLayer { diff --git a/mlpp/output_layer/output_layer.cpp b/mlpp/output_layer/output_layer.cpp index 147bb5a..3a9155a 100644 --- a/mlpp/output_layer/output_layer.cpp +++ b/mlpp/output_layer/output_layer.cpp @@ -16,6 +16,7 @@ int MLPPOutputLayer::get_n_hidden() { } void MLPPOutputLayer::set_n_hidden(const int val) { n_hidden = val; + _initialized = false; } MLPPActivation::ActivationFunction MLPPOutputLayer::get_activation() { @@ -23,6 +24,7 @@ MLPPActivation::ActivationFunction MLPPOutputLayer::get_activation() { } void MLPPOutputLayer::set_activation(const MLPPActivation::ActivationFunction val) { activation = val; + _initialized = false; } MLPPCost::CostTypes MLPPOutputLayer::get_cost() { @@ -30,6 +32,7 @@ MLPPCost::CostTypes MLPPOutputLayer::get_cost() { } void MLPPOutputLayer::set_cost(const MLPPCost::CostTypes val) { cost = val; + _initialized = false; } Ref MLPPOutputLayer::get_input() { @@ -37,6 +40,7 @@ Ref MLPPOutputLayer::get_input() { } void MLPPOutputLayer::set_input(const Ref &val) { input = val; + _initialized = false; } Ref MLPPOutputLayer::get_weights() { @@ -44,6 +48,7 @@ Ref MLPPOutputLayer::get_weights() { } void MLPPOutputLayer::set_weights(const Ref &val) { weights = val; + _initialized = false; } real_t MLPPOutputLayer::MLPPOutputLayer::get_bias() { @@ -51,6 +56,7 @@ real_t MLPPOutputLayer::MLPPOutputLayer::get_bias() { } void MLPPOutputLayer::set_bias(const real_t val) { bias = val; + _initialized = false; } Ref MLPPOutputLayer::get_z() { @@ -58,6 +64,7 @@ Ref MLPPOutputLayer::get_z() { } void MLPPOutputLayer::set_z(const Ref &val) { z = val; + _initialized = false; } Ref MLPPOutputLayer::get_a() { @@ -65,6 +72,7 @@ Ref MLPPOutputLayer::get_a() { } void MLPPOutputLayer::set_a(const Ref &val) { a = val; + _initialized = false; } Ref MLPPOutputLayer::get_z_test() { @@ -72,6 +80,7 @@ Ref MLPPOutputLayer::get_z_test() { } void MLPPOutputLayer::set_z_test(const Ref &val) { z_test = val; + _initialized = false; } Ref MLPPOutputLayer::get_a_test() { @@ -79,6 +88,7 @@ Ref MLPPOutputLayer::get_a_test() { } void MLPPOutputLayer::set_a_test(const Ref &val) { a_test = val; + _initialized = false; } Ref MLPPOutputLayer::get_delta() { @@ -86,6 +96,7 @@ Ref MLPPOutputLayer::get_delta() { } void MLPPOutputLayer::set_delta(const Ref &val) { delta = val; + _initialized = false; } MLPPReg::RegularizationType MLPPOutputLayer::get_reg() { @@ -100,6 +111,7 @@ real_t MLPPOutputLayer::get_lambda() { } void MLPPOutputLayer::set_lambda(const real_t val) { lambda = val; + _initialized = false; } real_t MLPPOutputLayer::get_alpha() { @@ -107,6 +119,7 @@ real_t MLPPOutputLayer::get_alpha() { } void MLPPOutputLayer::set_alpha(const real_t val) { alpha = val; + _initialized = false; } MLPPUtilities::WeightDistributionType MLPPOutputLayer::get_weight_init() { @@ -114,9 +127,32 @@ MLPPUtilities::WeightDistributionType MLPPOutputLayer::get_weight_init() { } void MLPPOutputLayer::set_weight_init(const MLPPUtilities::WeightDistributionType val) { weight_init = val; + _initialized = false; +} + +bool MLPPOutputLayer::is_initialized() { + return _initialized; +} +void MLPPOutputLayer::initialize() { + if (_initialized) { + return; + } + + weights->resize(n_hidden); + + MLPPUtilities utils; + + utils.weight_initializationv(weights, weight_init); + bias = utils.bias_initializationr(); + + _initialized = true; } void MLPPOutputLayer::forward_pass() { + if (!_initialized) { + initialize(); + } + MLPPLinAlg alg; MLPPActivation avn; @@ -125,6 +161,10 @@ void MLPPOutputLayer::forward_pass() { } void MLPPOutputLayer::test(const Ref &x) { + if (!_initialized) { + initialize(); + } + MLPPLinAlg alg; MLPPActivation avn; @@ -162,6 +202,8 @@ MLPPOutputLayer::MLPPOutputLayer(int p_n_hidden, MLPPActivation::ActivationFunct utils.weight_initializationv(weights, weight_init); bias = utils.bias_initializationr(); + + _initialized = true; } MLPPOutputLayer::MLPPOutputLayer() { @@ -185,6 +227,8 @@ MLPPOutputLayer::MLPPOutputLayer() { weights.instance(); bias = 0; + + _initialized = false; } MLPPOutputLayer::~MLPPOutputLayer() { } @@ -250,6 +294,9 @@ void MLPPOutputLayer::_bind_methods() { ClassDB::bind_method(D_METHOD("set_weight_init", "val"), &MLPPOutputLayer::set_weight_init); ADD_PROPERTY(PropertyInfo(Variant::INT, "set_weight_init"), "set_weight_init", "get_weight_init"); + ClassDB::bind_method(D_METHOD("is_initialized"), &MLPPOutputLayer::is_initialized); + ClassDB::bind_method(D_METHOD("initialize"), &MLPPOutputLayer::initialize); + ClassDB::bind_method(D_METHOD("forward_pass"), &MLPPOutputLayer::forward_pass); ClassDB::bind_method(D_METHOD("test", "x"), &MLPPOutputLayer::test); } diff --git a/mlpp/output_layer/output_layer.h b/mlpp/output_layer/output_layer.h index 373c63a..1208dba 100644 --- a/mlpp/output_layer/output_layer.h +++ b/mlpp/output_layer/output_layer.h @@ -74,6 +74,9 @@ public: MLPPUtilities::WeightDistributionType get_weight_init(); void set_weight_init(const MLPPUtilities::WeightDistributionType val); + bool is_initialized(); + void initialize(); + void forward_pass(); void test(const Ref &x); @@ -108,6 +111,8 @@ protected: real_t alpha; /* This is the controlling param for Elastic Net*/ MLPPUtilities::WeightDistributionType weight_init; + + bool _initialized; }; class MLPPOldOutputLayer {