Fixed MLPPConvolutions::dy().

This commit is contained in:
Relintai 2023-12-29 19:22:56 +01:00
parent 2fb3c086e3
commit ad34de8409
3 changed files with 18 additions and 21 deletions

View File

@ -338,9 +338,9 @@ Ref<MLPPMatrix> MLPPConvolutions::dy(const Ref<MLPPMatrix> &input) {
for (int i = 0; i < input_size.y; i++) {
for (int j = 0; j < input_size.x; j++) {
if (j != 0 && j != input_size.y - 1) {
if (i != 0 && i != input_size.y - 1) {
deriv->element_set(i, j, input->element_get(i - 1, j) - input->element_get(i + 1, j));
} else if (j == 0) {
} else if (i == 0) {
deriv->element_set(i, j, -input->element_get(i + 1, j)); // 0 - E1 = Implicit zero-padding
} else {
deriv->element_set(i, j, input->element_get(i - 1, j)); // E0 - 0 =Implicit zero-padding

View File

@ -1477,19 +1477,23 @@ void MLPPTests::test_numerical_analysis() {
PLOG_MSG(conv.dx(A)->to_string());
/*
0 0 0 0
1 0 0 0
0 0 0 -1
0 0 0 0
[MLPPMatrix:
[ -0 -0 -0 -0 ]
[ 1 0 0 0 ]
[ 0 0 0 -1 ]
[ 0 0 0 0 ]
]
*/
PLOG_MSG("conv.dy(A)");
PLOG_MSG(conv.dy(A)->to_string());
/*
0 3.14159 0 0
1.5708 0 0 0
0 0 0 -1.5708
0 0 0 0
[MLPPMatrix:
[ -0 -3.141593 -0 -3.141593 ]
[ 1.570796 0 0 3.141593 ]
[ 0 0 0 -1.570796 ]
[ 0 0 0 3.141593 ]
]
*/
PLOG_MSG("conv.grad_orientation(A)");
PLOG_MSG(conv.grad_orientation(A)->to_string());

View File

@ -541,7 +541,6 @@ void MLPPTestsOld::test_numerical_analysis() {
// Checks for numerical analysis class.
MLPPNumericalAnalysisOld numAn;
/*
std::cout << numAn.quadraticApproximation(f_old, 0, 1) << std::endl;
std::cout << numAn.cubicApproximation(f_old, 0, 1.001) << std::endl;
@ -550,12 +549,10 @@ void MLPPTestsOld::test_numerical_analysis() {
std::cout << numAn.quadraticApproximation(f_mv_old, { 0, 0, 0 }, { 1, 1, 1 }) << std::endl;
std::cout << numAn.numDiff(&f_old, 1) << std::endl;
std::cout << numAn.newtonRaphsonMethod(&f_old, 1, 1000) << std::endl;
std::cout << numAn.invQuadraticInterpolation(&f_old, { 100, 2, 1.5 }, 10) << std::endl;
std::cout << numAn.numDiff(&f_mv_old, { 1, 1 }, 1) << std::endl; // Derivative w.r.t. x.
alg.printVector(numAn.jacobian(&f_mv_old, { 1, 1 }));
@ -567,8 +564,6 @@ void MLPPTestsOld::test_numerical_analysis() {
std::cout << numAn.numDiff_2(&f_mv_old, { 2, 2, 500 }, 2, 2) << std::endl;
std::cout << numAn.numDiff_3(&f_mv_old, { 2, 1000, 130 }, 0, 0, 0) << std::endl;
alg.printTensor(numAn.thirdOrderTensor(&f_mv_old, { 1, 1, 1 }));
std::cout << "Our Hessian." << std::endl;
alg.printMatrix(numAn.hessian(&f_mv_old, { 2, 2, 500 }));
@ -583,11 +578,10 @@ void MLPPTestsOld::test_numerical_analysis() {
alg.printMatrix(alg.tensor_vec_mult(tensor, { 1, 2 }));
std::cout << numAn.cubicApproximation(f_mv_old, { 0, 0, 0 }, { 1, 1, 1 }) << std::endl;
std::cout << numAn.eulerianMethod(f_prime_old, { 1, 1 }, 1.5, 0.000001) << std::endl;
std::cout << numAn.eulerianMethod(f_prime_2var_old, { 2, 3 }, 2.5, 0.00000001) << std::endl;
*/
std::vector<std::vector<real_t>> A = {
{ 1, 0, 0, 0 },
{ 0, 0, 0, 0 },
@ -595,9 +589,8 @@ void MLPPTestsOld::test_numerical_analysis() {
{ 0, 0, 0, 1 }
};
//alg.printMatrix(conv.dx(A));
//alg.printMatrix(conv.dy(A));
alg.printMatrix(conv.dx(A));
alg.printMatrix(conv.dy(A));
alg.printMatrix(conv.grad_orientation(A));