mirror of
https://github.com/Relintai/pmlpp.git
synced 2024-12-21 14:56:47 +01:00
Fix warnings found by clang.
This commit is contained in:
parent
6970af9363
commit
624fd34433
@ -197,10 +197,10 @@ void MLPPExpReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size,
|
||||
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
|
||||
|
||||
// Calculating the bias gradient
|
||||
real_t sum = 0;
|
||||
for (int j = 0; j < current_output_batch->size(); j++) {
|
||||
sum += (y_hat->get_element(j) - current_output_batch->get_element(j));
|
||||
}
|
||||
//real_t sum = 0;
|
||||
//for (int j = 0; j < current_output_batch->size(); j++) {
|
||||
// sum += (y_hat->get_element(j) - current_output_batch->get_element(j));
|
||||
//}
|
||||
|
||||
//real_t b_gradient = sum / output_mini_batches[i].size();
|
||||
y_hat = evaluatem(current_input_batch);
|
||||
@ -288,7 +288,7 @@ Ref<MLPPVector> MLPPExpReg::evaluatem(const Ref<MLPPMatrix> &X) {
|
||||
y_hat->resize(X->size().y);
|
||||
|
||||
for (int i = 0; i < X->size().y; i++) {
|
||||
real_t y;
|
||||
real_t y = 0;
|
||||
|
||||
for (int j = 0; j < X->size().x; j++) {
|
||||
y += _initial->get_element(j) * Math::pow(_weights->get_element(j), X->get_element(i, j));
|
||||
|
@ -182,10 +182,10 @@ void MLPPExpRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_siz
|
||||
weights = regularization.regWeights(weights, lambda, alpha, reg);
|
||||
|
||||
// Calculating the bias gradient
|
||||
real_t sum = 0;
|
||||
for (uint32_t j = 0; j < outputMiniBatches[i].size(); j++) {
|
||||
sum += (y_hat[j] - outputMiniBatches[i][j]);
|
||||
}
|
||||
//real_t sum = 0;
|
||||
//for (uint32_t j = 0; j < outputMiniBatches[i].size(); j++) {
|
||||
// sum += (y_hat[j] - outputMiniBatches[i][j]);
|
||||
//}
|
||||
|
||||
//real_t b_gradient = sum / outputMiniBatches[i].size();
|
||||
y_hat = Evaluate(inputMiniBatches[i]);
|
||||
|
@ -292,6 +292,8 @@ public:
|
||||
for (int i = 0; i < _size.x; ++i) {
|
||||
row_ptr[i] = _data[ind_start + i];
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
_FORCE_INLINE_ PoolRealArray get_row_pool_vector(int p_index_y) {
|
||||
|
@ -293,6 +293,8 @@ public:
|
||||
for (int i = 0; i < _size.x; ++i) {
|
||||
row_ptr[i] = _data[ind_start + i];
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
_FORCE_INLINE_ PoolRealArray get_row_pool_vector(int p_index_y) {
|
||||
@ -644,6 +646,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// TODO: These are temporary
|
||||
std::vector<real_t> to_flat_std_vector() const;
|
||||
void set_from_std_vectors(const std::vector<std::vector<real_t>> &p_from);
|
||||
|
@ -76,7 +76,7 @@ protected:
|
||||
|
||||
int _n;
|
||||
int _k;
|
||||
real_t _learning_rate;
|
||||
//real_t _learning_rate;
|
||||
|
||||
// Regularization Params
|
||||
MLPPReg::RegularizationType _reg;
|
||||
|
@ -40,7 +40,7 @@ private:
|
||||
|
||||
int n;
|
||||
int k;
|
||||
real_t learning_rate;
|
||||
//real_t learning_rate;
|
||||
|
||||
// Regularization Params
|
||||
std::string reg;
|
||||
|
@ -77,7 +77,7 @@ real_t MLPPNumericalAnalysis::numDiff_3(real_t (*function)(std::vector<real_t>),
|
||||
// For third order derivative tensors.
|
||||
// NOTE: Approximations do not appear to be accurate for sinusodial functions...
|
||||
// Should revisit this later.
|
||||
real_t eps = INT_MAX;
|
||||
real_t eps = 1e-5;
|
||||
|
||||
std::vector<real_t> x_ppp = x;
|
||||
x_ppp[axis1] += eps;
|
||||
|
@ -77,7 +77,7 @@ real_t MLPPNumericalAnalysisOld::numDiff_3(real_t (*function)(std::vector<real_t
|
||||
// For third order derivative tensors.
|
||||
// NOTE: Approximations do not appear to be accurate for sinusodial functions...
|
||||
// Should revisit this later.
|
||||
real_t eps = INT_MAX;
|
||||
real_t eps = 1e-5;
|
||||
|
||||
std::vector<real_t> x_ppp = x;
|
||||
x_ppp[axis1] += eps;
|
||||
|
Loading…
Reference in New Issue
Block a user