diff --git a/mlpp/convolutions/convolutions.cpp b/mlpp/convolutions/convolutions.cpp index 59b2f75..7f2a2bf 100644 --- a/mlpp/convolutions/convolutions.cpp +++ b/mlpp/convolutions/convolutions.cpp @@ -4,376 +4,548 @@ // Created by Marc Melikyan on 4/6/21. // -#include "../convolutions/convolutions.h" +#include "convolutions.h" #include "../lin_alg/lin_alg.h" #include "../stat/stat.h" -#include -#include +#include "core/math/math_funcs.h" -/* -std::vector> MLPPConvolutions::convolve_2d(std::vector> input, std::vector> filter, int S, int P) { +#include + +Ref MLPPConvolutions::convolve_2d(const Ref &p_input, const Ref &filter, const int S, const int P) { MLPPLinAlg alg; - std::vector> feature_map; - uint32_t N = input.size(); - uint32_t F = filter.size(); - uint32_t map_size = (N - F + 2 * P) / S + 1; // This is computed as ⌊map_size⌋ by def- thanks C++! + + Ref input = p_input; + + Size2i input_size = input->size(); + int N = input_size.y; + int F = filter->size().y; + int map_size = (N - F + 2 * P) / S + 1; // This is computed as ⌊map_size⌋ by def- thanks C++! if (P != 0) { - std::vector> padded_input; - padded_input.resize(N + 2 * P); - for (uint32_t i = 0; i < padded_input.size(); i++) { - padded_input[i].resize(N + 2 * P); - } - for (uint32_t i = 0; i < padded_input.size(); i++) { - for (uint32_t j = 0; j < padded_input[i].size(); j++) { - if (i - P < 0 || j - P < 0 || i - P > input.size() - 1 || j - P > input[0].size() - 1) { - padded_input[i][j] = 0; + Ref padded_input; + padded_input.instance(); + + Size2i pis = Size2i(N + 2 * P, N + 2 * P); + + padded_input->resize(pis); + + for (int i = 0; i < pis.y; i++) { + for (int j = 0; j < pis.x; j++) { + if (i - P < 0 || j - P < 0 || i - P > input_size.y - 1 || j - P > input_size.x - 1) { + padded_input->element_set(i, j, 0); } else { - padded_input[i][j] = input[i - P][j - P]; + padded_input->element_set(i, j, input->element_get(i - P, j - P)); } } } - input.resize(padded_input.size()); - for (uint32_t i = 0; i < padded_input.size(); i++) { - input[i].resize(padded_input[i].size()); - } + input = padded_input; } - feature_map.resize(map_size); - for (uint32_t i = 0; i < map_size; i++) { - feature_map[i].resize(map_size); - } + Ref feature_map; + feature_map.instance(); + + feature_map->resize(Size2i(map_size, map_size)); + + Ref filter_flattened = filter->flatten(); + + Ref convolving_input; + convolving_input.instance(); + convolving_input->resize(F * F); + + for (int i = 0; i < map_size; i++) { + for (int j = 0; j < map_size; j++) { + int current_index = 0; + + for (int k = 0; k < F; k++) { + for (int p = 0; p < F; p++) { + real_t val; - for (uint32_t i = 0; i < map_size; i++) { - for (uint32_t j = 0; j < map_size; j++) { - std::vector convolving_input; - for (uint32_t k = 0; k < F; k++) { - for (uint32_t p = 0; p < F; p++) { if (i == 0 && j == 0) { - convolving_input.push_back(input[i + k][j + p]); + val = input->element_get(i + k, j + p); } else if (i == 0) { - convolving_input.push_back(input[i + k][j + (S - 1) + p]); + val = input->element_get(i + k, j + (S - 1) + p); } else if (j == 0) { - convolving_input.push_back(input[i + (S - 1) + k][j + p]); + val = input->element_get(i + (S - 1) + k, j + p); } else { - convolving_input.push_back(input[i + (S - 1) + k][j + (S - 1) + p]); + val = input->element_get(i + (S - 1) + k, j + (S - 1) + p); } + + convolving_input->element_set(current_index, val); + ++current_index; } } - feature_map[i][j] = alg.dot(convolving_input, alg.flatten(filter)); + + feature_map->element_set(i, j, convolving_input->dot(filter_flattened)); } } + return feature_map; } -std::vector>> MLPPConvolutions::convolve_3d(std::vector>> input, std::vector>> filter, int S, int P) { +Ref MLPPConvolutions::convolve_3d(const Ref &p_input, const Ref &filter, const int S, const int P) { MLPPLinAlg alg; - std::vector>> feature_map; - uint32_t N = input[0].size(); - uint32_t F = filter[0].size(); - uint32_t C = filter.size() / input.size(); - uint32_t map_size = (N - F + 2 * P) / S + 1; // This is computed as ⌊map_size⌋ by def. + + Ref input = p_input; + + Size3i input_size = input->size(); + Size3i filter_size = filter->size(); + + int N = input_size.y; + int F = filter_size.y; + int C = filter_size.z / input_size.z; + int map_size = (N - F + 2 * P) / S + 1; // This is computed as ⌊map_size⌋ by def. if (P != 0) { - for (uint32_t c = 0; c < input.size(); c++) { - std::vector> padded_input; - padded_input.resize(N + 2 * P); - for (uint32_t i = 0; i < padded_input.size(); i++) { - padded_input[i].resize(N + 2 * P); - } - for (uint32_t i = 0; i < padded_input.size(); i++) { - for (uint32_t j = 0; j < padded_input[i].size(); j++) { - if (i - P < 0 || j - P < 0 || i - P > input[c].size() - 1 || j - P > input[c][0].size() - 1) { - padded_input[i][j] = 0; + Ref padded_input; + padded_input.instance(); + + Ref padded_input_slice; + padded_input_slice.instance(); + + Size2i padded_input_slice_size = Size2i(N + 2 * P, N + 2 * P); + padded_input_slice->resize(padded_input_slice_size); + + padded_input->resize(Size3i(padded_input_slice_size.x, padded_input_slice_size.y, input_size.z)); + + for (int c = 0; c < input_size.z; c++) { + for (int i = 0; i < padded_input_slice_size.y; i++) { + for (int j = 0; j < padded_input_slice_size.x; j++) { + if (i - P < 0 || j - P < 0 || i - P > input_size.y - 1 || j - P > input_size.x - 1) { + padded_input_slice->element_set(i, j, 0); } else { - padded_input[i][j] = input[c][i - P][j - P]; + padded_input_slice->element_set(i, j, input->element_get(i - P, j - P, c)); } } } - input[c].resize(padded_input.size()); - for (uint32_t i = 0; i < padded_input.size(); i++) { - input[c][i].resize(padded_input[i].size()); - } - input[c] = padded_input; + + padded_input->z_slice_set_mlpp_matrix(c, padded_input_slice); } + + input = padded_input; } - feature_map.resize(C); - for (uint32_t i = 0; i < feature_map.size(); i++) { - feature_map[i].resize(map_size); - for (uint32_t j = 0; j < feature_map[i].size(); j++) { - feature_map[i][j].resize(map_size); - } - } + Ref feature_map; + feature_map.instance(); + feature_map->resize(Size3i(map_size, map_size, C)); + + Ref filter_flattened = filter->flatten(); + + Ref convolving_input; + convolving_input.instance(); + convolving_input->resize(input_size.z * F * F); + + for (int c = 0; c < C; c++) { + for (int i = 0; i < map_size; i++) { + for (int j = 0; j < map_size; j++) { + int current_index = 0; + + for (int t = 0; t < input_size.z; t++) { + for (int k = 0; k < F; k++) { + for (int p = 0; p < F; p++) { + real_t val; - for (uint32_t c = 0; c < C; c++) { - for (uint32_t i = 0; i < map_size; i++) { - for (uint32_t j = 0; j < map_size; j++) { - std::vector convolving_input; - for (uint32_t t = 0; t < input.size(); t++) { - for (uint32_t k = 0; k < F; k++) { - for (uint32_t p = 0; p < F; p++) { if (i == 0 && j == 0) { - convolving_input.push_back(input[t][i + k][j + p]); + val = input->element_get(i + k, j + p, t); } else if (i == 0) { - convolving_input.push_back(input[t][i + k][j + (S - 1) + p]); + val = input->element_get(i + k, j + (S - 1) + p, t); } else if (j == 0) { - convolving_input.push_back(input[t][i + (S - 1) + k][j + p]); + val = input->element_get(i + (S - 1) + k, j + p, t); } else { - convolving_input.push_back(input[t][i + (S - 1) + k][j + (S - 1) + p]); + val = input->element_get(i + (S - 1) + k, j + (S - 1) + p, t); } + + convolving_input->element_set(current_index, val); + ++current_index; } } } - feature_map[c][i][j] = alg.dot(convolving_input, alg.flatten(filter)); + + feature_map->element_set(i, j, c, convolving_input->dot(filter_flattened)); } } } return feature_map; } -std::vector> MLPPConvolutions::pool_2d(std::vector> input, int F, int S, std::string type) { +Ref MLPPConvolutions::pool_2d(const Ref &input, const int F, const int S, const PoolType type) { MLPPLinAlg alg; - std::vector> pooled_map; - uint32_t N = input.size(); - uint32_t map_size = floor((N - F) / S + 1); - pooled_map.resize(map_size); - for (uint32_t i = 0; i < map_size; i++) { - pooled_map[i].resize(map_size); - } + Size2i input_size = input->size(); + + int N = input_size.y; + int map_size = (N - F) / S + 1; + + Ref pooled_map; + pooled_map.instance(); + pooled_map->resize(Size2i(map_size, map_size)); + + Ref pooling_input; + pooling_input.instance(); + pooling_input->resize(F * F); + + for (int i = 0; i < map_size; i++) { + for (int j = 0; j < map_size; j++) { + int current_index = 0; - for (uint32_t i = 0; i < map_size; i++) { - for (uint32_t j = 0; j < map_size; j++) { - std::vector pooling_input; for (int k = 0; k < F; k++) { for (int p = 0; p < F; p++) { + real_t val; + if (i == 0 && j == 0) { - pooling_input.push_back(input[i + k][j + p]); + val = input->element_get(i + k, j + p); } else if (i == 0) { - pooling_input.push_back(input[i + k][j + (S - 1) + p]); + val = input->element_get(i + k, j + (S - 1) + p); } else if (j == 0) { - pooling_input.push_back(input[i + (S - 1) + k][j + p]); + val = input->element_get(i + (S - 1) + k, j + p); } else { - pooling_input.push_back(input[i + (S - 1) + k][j + (S - 1) + p]); + val = input->element_get(i + (S - 1) + k, j + (S - 1) + p); } + + pooling_input->element_set(current_index, val); + ++current_index; } } - if (type == "Average") { + + if (type == POOL_TYPE_AVERAGE) { MLPPStat stat; - pooled_map[i][j] = stat.mean(pooling_input); - } else if (type == "Min") { - pooled_map[i][j] = alg.min(pooling_input); + pooled_map->element_set(i, j, stat.meanv(pooling_input)); + } else if (type == POOL_TYPE_MIN) { + pooled_map->element_set(i, j, alg.minvr(pooling_input)); } else { - pooled_map[i][j] = alg.max(pooling_input); + pooled_map->element_set(i, j, alg.maxvr(pooling_input)); } } } + return pooled_map; } -std::vector>> MLPPConvolutions::pool_3d(std::vector>> input, int F, int S, std::string type) { - std::vector>> pooled_map; - for (uint32_t i = 0; i < input.size(); i++) { - pooled_map.push_back(pool_2d(input[i], F, S, type)); +Ref MLPPConvolutions::pool_3d(const Ref &input, const int F, const int S, const PoolType type) { + Size3i input_size = input->size(); + + Ref z_slice; + z_slice.instance(); + z_slice->resize(Size2i(input_size.x, input_size.y)); + + int N = input_size.y; + int map_size = (N - F) / S + 1; + + Ref pooled_map; + pooled_map.instance(); + pooled_map->resize(Size3i(map_size, map_size, input_size.z)); + + for (int i = 0; i < input_size.z; i++) { + input->z_slice_get_into_mlpp_matrix(i, z_slice); + + Ref p = pool_2d(z_slice, F, S, type); + + pooled_map->z_slice_set_mlpp_matrix(i, p); } + return pooled_map; } -real_t MLPPConvolutions::global_pool_2d(std::vector> input, std::string type) { +real_t MLPPConvolutions::global_pool_2d(const Ref &input, const PoolType type) { MLPPLinAlg alg; - if (type == "Average") { + + Ref f = input->flatten(); + + if (type == POOL_TYPE_AVERAGE) { MLPPStat stat; - return stat.mean(alg.flatten(input)); - } else if (type == "Min") { - return alg.min(alg.flatten(input)); + return stat.meanv(f); + } else if (type == POOL_TYPE_MIN) { + return alg.minvr(f); } else { - return alg.max(alg.flatten(input)); + return alg.maxvr(f); } } -std::vector MLPPConvolutions::global_pool_3d(std::vector>> input, std::string type) { - std::vector pooled_map; - for (uint32_t i = 0; i < input.size(); i++) { - pooled_map.push_back(global_pool_2d(input[i], type)); +Ref MLPPConvolutions::global_pool_3d(const Ref &input, const PoolType type) { + Size3i input_size = input->size(); + + Ref pooled_map; + pooled_map.instance(); + pooled_map->resize(input_size.z); + + Ref z_slice; + z_slice.instance(); + z_slice->resize(Size2i(input_size.x, input_size.y)); + + for (int i = 0; i < input_size.z; i++) { + input->z_slice_get_into_mlpp_matrix(i, z_slice); + + pooled_map->element_set(i, global_pool_2d(z_slice, type)); } + return pooled_map; } -real_t MLPPConvolutions::gaussian_2d(real_t x, real_t y, real_t std) { +real_t MLPPConvolutions::gaussian_2d(const real_t x, const real_t y, const real_t std) { real_t std_sq = std * std; - return 1 / (2 * Math_PI * std_sq) * std::exp(-(x * x + y * y) / 2 * std_sq); + return 1 / (2 * Math_PI * std_sq) * Math::exp(-(x * x + y * y) / 2 * std_sq); } -std::vector> MLPPConvolutions::gaussian_filter_2d(int size, real_t std) { - std::vector> filter; - filter.resize(size); - for (uint32_t i = 0; i < filter.size(); i++) { - filter[i].resize(size); - } +Ref MLPPConvolutions::gaussian_filter_2d(const int size, const real_t std) { + Ref filter; + filter.instance(); + filter->resize(Size2i(size, size)); + for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { - filter[i][j] = gaussian_2d(i - (size - 1) / 2, (size - 1) / 2 - j, std); + real_t val = gaussian_2d(i - (size - 1) / 2, (size - 1) / 2 - j, std); + + filter->element_set(i, j, val); } } + return filter; } - // Indeed a filter could have been used for this purpose, but I decided that it would've just // been easier to carry out the calculation explicitly, mainly because it is more informative, // and also because my convolution algorithm is only built for filters with equally sized // heights and widths. -std::vector> MLPPConvolutions::dx(std::vector> input) { - std::vector> deriv; // We assume a gray scale image. - deriv.resize(input.size()); - for (uint32_t i = 0; i < deriv.size(); i++) { - deriv[i].resize(input[i].size()); - } +Ref MLPPConvolutions::dx(const Ref &input) { + Size2i input_size = input->size(); - for (uint32_t i = 0; i < input.size(); i++) { - for (uint32_t j = 0; j < input[i].size(); j++) { - if (j != 0 && j != input.size() - 1) { - deriv[i][j] = input[i][j + 1] - input[i][j - 1]; + Ref deriv; // We assume a gray scale image. + deriv.instance(); + deriv->resize(input_size); + + for (int i = 0; i < input_size.y; i++) { + for (int j = 0; j < input_size.x; j++) { + if (j != 0 && j != input_size.y - 1) { + deriv->element_set(i, j, input->element_get(i, j + 1) - input->element_get(i, j - 1)); } else if (j == 0) { - deriv[i][j] = input[i][j + 1] - 0; // Implicit zero-padding + deriv->element_set(i, j, input->element_get(i, j + 1)); // E0 - 0 = Implicit zero-padding } else { - deriv[i][j] = 0 - input[i][j - 1]; // Implicit zero-padding + deriv->element_set(i, j, -input->element_get(i, j - 1)); // 0 - E1 = Implicit zero-padding } } } + return deriv; } -std::vector> MLPPConvolutions::dy(std::vector> input) { - std::vector> deriv; - deriv.resize(input.size()); - for (uint32_t i = 0; i < deriv.size(); i++) { - deriv[i].resize(input[i].size()); - } +Ref MLPPConvolutions::dy(const Ref &input) { + Size2i input_size = input->size(); - for (uint32_t i = 0; i < input.size(); i++) { - for (uint32_t j = 0; j < input[i].size(); j++) { - if (i != 0 && i != input.size() - 1) { - deriv[i][j] = input[i - 1][j] - input[i + 1][j]; - } else if (i == 0) { - deriv[i][j] = 0 - input[i + 1][j]; // Implicit zero-padding + Ref deriv; // We assume a gray scale image. + deriv.instance(); + deriv->resize(input_size); + + for (int i = 0; i < input_size.y; i++) { + for (int j = 0; j < input_size.x; j++) { + if (j != 0 && j != input_size.y - 1) { + deriv->element_set(i, j, input->element_get(i - 1, j) - input->element_get(i + 1, j)); + } else if (j == 0) { + deriv->element_set(i, j, -input->element_get(i + 1, j)); // 0 - E1 = Implicit zero-padding } else { - deriv[i][j] = input[i - 1][j] - 0; // Implicit zero-padding + deriv->element_set(i, j, input->element_get(i - 1, j)); // E0 - 0 =Implicit zero-padding } } } + return deriv; } -std::vector> MLPPConvolutions::grad_magnitude(std::vector> input) { +Ref MLPPConvolutions::grad_magnitude(const Ref &input) { MLPPLinAlg alg; - std::vector> x_deriv_2 = alg.hadamard_product(dx(input), dx(input)); - std::vector> y_deriv_2 = alg.hadamard_product(dy(input), dy(input)); - return alg.sqrt(alg.addition(x_deriv_2, y_deriv_2)); + + Ref x_deriv_2 = dx(input)->hadamard_productn(dx(input)); + Ref y_deriv_2 = dy(input)->hadamard_productn(dy(input)); + + return x_deriv_2->addn(y_deriv_2)->sqrtn(); } -std::vector> MLPPConvolutions::grad_orientation(std::vector> input) { - std::vector> deriv; - deriv.resize(input.size()); - for (uint32_t i = 0; i < deriv.size(); i++) { - deriv[i].resize(input[i].size()); - } +Ref MLPPConvolutions::grad_orientation(const Ref &input) { + Ref deriv; // We assume a gray scale image. + deriv.instance(); + deriv->resize(input->size()); - std::vector> x_deriv = dx(input); - std::vector> y_deriv = dy(input); - for (uint32_t i = 0; i < deriv.size(); i++) { - for (uint32_t j = 0; j < deriv[i].size(); j++) { - deriv[i][j] = std::atan2(y_deriv[i][j], x_deriv[i][j]); + Size2i deriv_size = deriv->size(); + + Ref x_deriv = dx(input); + Ref y_deriv = dy(input); + + for (int i = 0; i < deriv_size.y; i++) { + for (int j = 0; j < deriv_size.x; j++) { + deriv->element_set(i, j, Math::atan2(y_deriv->element_get(i, j), x_deriv->element_get(i, j))); } } + return deriv; } -std::vector>> MLPPConvolutions::compute_m(std::vector> input) { +Ref MLPPConvolutions::compute_m(const Ref &input) { + Size2i input_size = input->size(); + real_t const SIGMA = 1; real_t const GAUSSIAN_SIZE = 3; - real_t const GAUSSIAN_PADDING = ((input.size() - 1) + GAUSSIAN_SIZE - input.size()) / 2; // Convs must be same. - std::cout << GAUSSIAN_PADDING << std::endl; - MLPPLinAlg alg; - std::vector> x_deriv = dx(input); - std::vector> y_deriv = dy(input); + real_t const GAUSSIAN_PADDING = ((input_size.y - 1) + GAUSSIAN_SIZE - input_size.y) / 2; // Convs must be same. - std::vector> gaussian_filter = gaussian_filter_2d(GAUSSIAN_SIZE, SIGMA); // Sigma of 1, size of 3. - std::vector> xx_deriv = convolve_2d(alg.hadamard_product(x_deriv, x_deriv), gaussian_filter, 1, GAUSSIAN_PADDING); - std::vector> yy_deriv = convolve_2d(alg.hadamard_product(y_deriv, y_deriv), gaussian_filter, 1, GAUSSIAN_PADDING); - std::vector> xy_deriv = convolve_2d(alg.hadamard_product(x_deriv, y_deriv), gaussian_filter, 1, GAUSSIAN_PADDING); + Ref x_deriv = dx(input); + Ref y_deriv = dy(input); + + Ref gaussian_filter = gaussian_filter_2d(GAUSSIAN_SIZE, SIGMA); // Sigma of 1, size of 3. + Ref xx_deriv = convolve_2d(x_deriv->hadamard_productn(x_deriv), gaussian_filter, 1, GAUSSIAN_PADDING); + Ref yy_deriv = convolve_2d(y_deriv->hadamard_productn(y_deriv), gaussian_filter, 1, GAUSSIAN_PADDING); + Ref xy_deriv = convolve_2d(x_deriv->hadamard_productn(y_deriv), gaussian_filter, 1, GAUSSIAN_PADDING); + + Size2i ds = xx_deriv->size(); + + Ref M; + M.instance(); + M->resize(Size3i(ds.x, ds.y, 3)); + + M->z_slice_set_mlpp_matrix(0, xx_deriv); + M->z_slice_set_mlpp_matrix(1, yy_deriv); + M->z_slice_set_mlpp_matrix(2, xy_deriv); - std::vector>> M = { xx_deriv, yy_deriv, xy_deriv }; return M; } -std::vector> MLPPConvolutions::harris_corner_detection(std::vector> input) { + +Vector> MLPPConvolutions::compute_mv(const Ref &input) { + Size2i input_size = input->size(); + + real_t const SIGMA = 1; + real_t const GAUSSIAN_SIZE = 3; + + real_t const GAUSSIAN_PADDING = ((input_size.y - 1) + GAUSSIAN_SIZE - input_size.y) / 2; // Convs must be same. + + Ref x_deriv = dx(input); + Ref y_deriv = dy(input); + + Ref gaussian_filter = gaussian_filter_2d(GAUSSIAN_SIZE, SIGMA); // Sigma of 1, size of 3. + Ref xx_deriv = convolve_2d(x_deriv->hadamard_productn(x_deriv), gaussian_filter, 1, GAUSSIAN_PADDING); + Ref yy_deriv = convolve_2d(y_deriv->hadamard_productn(y_deriv), gaussian_filter, 1, GAUSSIAN_PADDING); + Ref xy_deriv = convolve_2d(x_deriv->hadamard_productn(y_deriv), gaussian_filter, 1, GAUSSIAN_PADDING); + + Vector> M; + M.resize(3); + + M.set(0, xx_deriv); + M.set(1, yy_deriv); + M.set(2, xy_deriv); + + return M; +} + +Vector> MLPPConvolutions::harris_corner_detection(const Ref &input) { real_t const k = 0.05; // Empirically determined wherein k -> [0.04, 0.06], though conventionally 0.05 is typically used as well. - MLPPLinAlg alg; - std::vector>> M = compute_m(input); - std::vector> det = alg.subtraction(alg.hadamard_product(M[0], M[1]), alg.hadamard_product(M[2], M[2])); - std::vector> trace = alg.addition(M[0], M[1]); + + Vector> M = compute_mv(input); + + Ref M0 = M[0]; + Ref M1 = M[1]; + Ref M2 = M[2]; + + Ref det = M0->hadamard_productn(M1)->subn(M2->hadamard_productn(M2)); + Ref trace = M0->addn(M1); // The reason this is not a scalar is because xx_deriv, xy_deriv, yx_deriv, and yy_deriv are not scalars. - std::vector> r = alg.subtraction(det, alg.scalarMultiply(k, alg.hadamard_product(trace, trace))); - std::vector> imageTypes; - imageTypes.resize(r.size()); - alg.printMatrix(r); - for (uint32_t i = 0; i < r.size(); i++) { - imageTypes[i].resize(r[i].size()); - for (uint32_t j = 0; j < r[i].size(); j++) { - if (r[i][j] > 0) { - imageTypes[i][j] = "C"; - } else if (r[i][j] < 0) { - imageTypes[i][j] = "E"; + Ref r = det->subn(trace->hadamard_productn(trace)->scalar_multiplyn(k)); + Size2i r_size = r->size(); + + Vector> image_types; + image_types.resize(r_size.y); + //alg.printMatrix(r); + + for (int i = 0; i < r_size.y; i++) { + image_types.write[i].resize(r_size.x); + + for (int j = 0; j < r_size.x; j++) { + real_t e = r->element_get(i, j); + + if (e > 0) { + image_types.write[i].write[j] = 'C'; + } else if (e < 0) { + image_types.write[i].write[j] = 'E'; } else { - imageTypes[i][j] = "N"; + image_types.write[i].write[j] = 'N'; } } } - return imageTypes; + + return image_types; } -std::vector> MLPPConvolutions::get_prewitt_horizontal() { +Ref MLPPConvolutions::get_prewitt_horizontal() const { return _prewitt_horizontal; } -std::vector> MLPPConvolutions::get_prewitt_vertical() { +Ref MLPPConvolutions::get_prewitt_vertical() const { return _prewitt_vertical; } -std::vector> MLPPConvolutions::get_sobel_horizontal() { +Ref MLPPConvolutions::get_sobel_horizontal() const { return _sobel_horizontal; } -std::vector> MLPPConvolutions::get_sobel_vertical() { +Ref MLPPConvolutions::get_sobel_vertical() const { return _sobel_vertical; } -std::vector> MLPPConvolutions::get_scharr_horizontal() { +Ref MLPPConvolutions::get_scharr_horizontal() const { return _scharr_horizontal; } -std::vector> MLPPConvolutions::get_scharr_vertical() { +Ref MLPPConvolutions::get_scharr_vertical() const { return _scharr_vertical; } -std::vector> MLPPConvolutions::get_roberts_horizontal() { +Ref MLPPConvolutions::get_roberts_horizontal() const { return _roberts_horizontal; } -std::vector> MLPPConvolutions::get_roberts_vertical() { +Ref MLPPConvolutions::get_roberts_vertical() const { return _roberts_vertical; } -*/ MLPPConvolutions::MLPPConvolutions() { - /* - _prewitt_horizontal = { { 1, 1, 1 }, { 0, 0, 0 }, { -1, -1, -1 } }; - _prewitt_vertical = { { 1, 0, -1 }, { 1, 0, -1 }, { 1, 0, -1 } }; - _sobel_horizontal = { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }; - _sobel_vertical = { { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }; - _scharr_horizontal = { { 3, 10, 3 }, { 0, 0, 0 }, { -3, -10, -3 } }; - _scharr_vertical = { { 3, 0, -3 }, { 10, 0, -10 }, { 3, 0, -3 } }; - _roberts_horizontal = { { 0, 1 }, { -1, 0 } }; - _roberts_vertical = { { 1, 0 }, { 0, -1 } }; - */ + const real_t prewitt_horizontal_arr[]{ + 1, 1, 1, // + 0, 0, 0, // + -1, -1, -1, // + }; + const real_t prewitt_vertical_arr[] = { + 1, 0, -1, // + 1, 0, -1, // + 1, 0, -1 // + }; + const real_t sobel_horizontal_arr[] = { + 1, 2, 1, // + 0, 0, 0, // + -1, -2, -1 // + }; + const real_t sobel_vertical_arr[] = { + -1, 0, 1, // + -2, 0, 2, // + -1, 0, 1 // + }; + const real_t scharr_horizontal_arr[] = { + 3, 10, 3, // + 0, 0, 0, // + -3, -10, -3 // + }; + const real_t scharr_vertical_arr[] = { + 3, 0, -3, // + 10, 0, -10, // + 3, 0, -3 // + }; + const real_t roberts_horizontal_arr[] = { + 0, 1, // + -1, 0 // + }; + const real_t roberts_vertical_arr[] = { + 1, 0, // + 0, -1 // + }; + + _prewitt_horizontal = Ref(memnew(MLPPMatrix(prewitt_horizontal_arr, 3, 3))); + _prewitt_vertical = Ref(memnew(MLPPMatrix(prewitt_vertical_arr, 3, 3))); + _sobel_horizontal = Ref(memnew(MLPPMatrix(sobel_horizontal_arr, 3, 3))); + _sobel_vertical = Ref(memnew(MLPPMatrix(sobel_vertical_arr, 3, 3))); + _scharr_horizontal = Ref(memnew(MLPPMatrix(scharr_horizontal_arr, 3, 3))); + _scharr_vertical = Ref(memnew(MLPPMatrix(scharr_vertical_arr, 3, 3))); + _roberts_horizontal = Ref(memnew(MLPPMatrix(roberts_horizontal_arr, 2, 2))); + _roberts_vertical = Ref(memnew(MLPPMatrix(roberts_vertical_arr, 2, 2))); } void MLPPConvolutions::_bind_methods() { diff --git a/mlpp/convolutions/convolutions.h b/mlpp/convolutions/convolutions.h index 9edb460..a230b65 100644 --- a/mlpp/convolutions/convolutions.h +++ b/mlpp/convolutions/convolutions.h @@ -2,64 +2,73 @@ #ifndef MLPP_CONVOLUTIONS_H #define MLPP_CONVOLUTIONS_H -#include -#include +#include "core/containers/vector.h" +#include "core/string/ustring.h" #include "core/math/math_defs.h" +#include "../lin_alg/mlpp_matrix.h" +#include "../lin_alg/mlpp_tensor3.h" +#include "../lin_alg/mlpp_vector.h" + #include "core/object/reference.h" class MLPPConvolutions : public Reference { GDCLASS(MLPPConvolutions, Reference); public: - /* - std::vector> convolve_2d(std::vector> input, std::vector> filter, int S, int P = 0); - std::vector>> convolve_3d(std::vector>> input, std::vector>> filter, int S, int P = 0); + enum PoolType { + POOL_TYPE_AVERAGE = 0, + POOL_TYPE_MIN, + POOL_TYPE_MAX, + }; - std::vector> pool_2d(std::vector> input, int F, int S, std::string type); - std::vector>> pool_3d(std::vector>> input, int F, int S, std::string type); + Ref convolve_2d(const Ref &input, const Ref &filter, const int S, const int P = 0); + Ref convolve_3d(const Ref &input, const Ref &filter, const int S, const int P = 0); - real_t global_pool_2d(std::vector> input, std::string type); - std::vector global_pool_3d(std::vector>> input, std::string type); + Ref pool_2d(const Ref &input, const int F, const int S, const PoolType type); + Ref pool_3d(const Ref &input, const int F, const int S, const PoolType type); - real_t gaussian_2d(real_t x, real_t y, real_t std); - std::vector> gaussian_filter_2d(int size, real_t std); + real_t global_pool_2d(const Ref &input, const PoolType type); + Ref global_pool_3d(const Ref &input, const PoolType type); - std::vector> dx(std::vector> input); - std::vector> dy(std::vector> input); + real_t gaussian_2d(const real_t x, const real_t y, const real_t std); + Ref gaussian_filter_2d(const int size, const real_t std); - std::vector> grad_magnitude(std::vector> input); - std::vector> grad_orientation(std::vector> input); + Ref dx(const Ref &input); + Ref dy(const Ref &input); - std::vector>> compute_m(std::vector> input); - std::vector> harris_corner_detection(std::vector> input); + Ref grad_magnitude(const Ref &input); + Ref grad_orientation(const Ref &input); - std::vector> get_prewitt_horizontal(); - std::vector> get_prewitt_vertical(); - std::vector> get_sobel_horizontal(); - std::vector> get_sobel_vertical(); - std::vector> get_scharr_horizontal(); - std::vector> get_scharr_vertical(); - std::vector> get_roberts_horizontal(); - std::vector> get_roberts_vertical(); - */ + Ref compute_m(const Ref &input); + Vector> compute_mv(const Ref &input); + + //TODO better data srtucture for this. Maybe IntMatrix? + Vector> harris_corner_detection(const Ref &input); + + Ref get_prewitt_horizontal() const; + Ref get_prewitt_vertical() const; + Ref get_sobel_horizontal() const; + Ref get_sobel_vertical() const; + Ref get_scharr_horizontal() const; + Ref get_scharr_vertical() const; + Ref get_roberts_horizontal() const; + Ref get_roberts_vertical() const; MLPPConvolutions(); protected: static void _bind_methods(); - /* - std::vector> _prewitt_horizontal; - std::vector> _prewitt_vertical; - std::vector> _sobel_horizontal; - std::vector> _sobel_vertical; - std::vector> _scharr_horizontal; - std::vector> _scharr_vertical; - std::vector> _roberts_horizontal; - std::vector> _roberts_vertical; - */ + Ref _prewitt_horizontal; + Ref _prewitt_vertical; + Ref _sobel_horizontal; + Ref _sobel_vertical; + Ref _scharr_horizontal; + Ref _scharr_vertical; + Ref _roberts_horizontal; + Ref _roberts_vertical; }; #endif // Convolutions_hpp \ No newline at end of file diff --git a/mlpp/transforms/transforms.cpp b/mlpp/transforms/transforms.cpp index bf56a79..86458f1 100644 --- a/mlpp/transforms/transforms.cpp +++ b/mlpp/transforms/transforms.cpp @@ -6,53 +6,53 @@ #include "transforms.h" #include "../lin_alg/lin_alg.h" -#include -#include -#include -/* +#include "core/math/math_funcs.h" + // DCT ii. // https://www.mathworks.com/help/images/discrete-cosine-transform.html -std::vector> MLPPTransforms::discreteCosineTransform(std::vector> A) { - MLPPLinAlg alg; - A = alg.scalarAdd(-128, A); // Center around 0. +Ref MLPPTransforms::discrete_cosine_transform(const Ref &p_A) { + Ref A = p_A->scalar_addn(-128); // Center around 0. - std::vector> B; - B.resize(A.size()); - for (uint32_t i = 0; i < B.size(); i++) { - B[i].resize(A[i].size()); - } + Size2i size = A->size(); - int M = A.size(); + Ref B; + B.instance(); + B->resize(size); - for (uint32_t i = 0; i < B.size(); i++) { - for (uint32_t j = 0; j < B[i].size(); j++) { + real_t M = size.y; + real_t inv_sqrt_M = 1 / Math::sqrt(M); + real_t s2M = Math::sqrt(real_t(2) / real_t(M)); + + for (int i = 0; i < size.y; i++) { + for (int j = 0; j < size.x; j++) { real_t sum = 0; + real_t alphaI; if (i == 0) { - alphaI = 1 / std::sqrt(M); + alphaI = inv_sqrt_M; } else { - alphaI = std::sqrt(real_t(2) / real_t(M)); - } - real_t alphaJ; - if (j == 0) { - alphaJ = 1 / std::sqrt(M); - } else { - alphaJ = std::sqrt(real_t(2) / real_t(M)); + alphaI = s2M; } - for (uint32_t k = 0; k < B.size(); k++) { - for (uint32_t f = 0; f < B[k].size(); f++) { - sum += A[k][f] * std::cos((Math_PI * i * (2 * k + 1)) / (2 * M)) * std::cos((Math_PI * j * (2 * f + 1)) / (2 * M)); + real_t alphaJ; + if (j == 0) { + alphaJ = inv_sqrt_M; + } else { + alphaJ = s2M; + } + + for (int k = 0; k < size.y; k++) { + for (int f = 0; f < size.x; f++) { + sum += A->element_get(k, f) * Math::cos((Math_PI * i * (2 * k + 1)) / (2 * M)) * Math::cos((Math_PI * j * (2 * f + 1)) / (2 * M)); } } - B[i][j] = sum; - B[i][j] *= alphaI * alphaJ; + + B->element_set(i, j, sum * alphaI * alphaJ); } } return B; } -*/ void MLPPTransforms::_bind_methods() { } diff --git a/mlpp/transforms/transforms.h b/mlpp/transforms/transforms.h index 39004a1..725e135 100644 --- a/mlpp/transforms/transforms.h +++ b/mlpp/transforms/transforms.h @@ -11,14 +11,13 @@ #include "core/object/reference.h" -#include -#include +#include "../lin_alg/mlpp_matrix.h" class MLPPTransforms : public Reference { GDCLASS(MLPPTransforms, Reference); public: - //std::vector> discreteCosineTransform(std::vector> A); + Ref discrete_cosine_transform(const Ref &p_A); protected: static void _bind_methods(); diff --git a/test/mlpp_tests.cpp b/test/mlpp_tests.cpp index 4f582f7..f226eec 100644 --- a/test/mlpp_tests.cpp +++ b/test/mlpp_tests.cpp @@ -747,14 +747,20 @@ void MLPPTests::test_naive_bayes() { output_set.instance(); output_set->set_from_std_vector(outputSet); + ERR_PRINT("MLPPMultinomialNB"); + MLPPMultinomialNB MNB(input_set, output_set, 2); PLOG_MSG(MNB.model_set_test(input_set)->to_string()); - MLPPBernoulliNB BNB(algn.transposenm(input_set), output_set); - PLOG_MSG(BNB.model_set_test(algn.transposenm(input_set))->to_string()); + ERR_PRINT("MLPPBernoulliNB"); - MLPPGaussianNB GNB(algn.transposenm(input_set), output_set, 2); - PLOG_MSG(GNB.model_set_test(algn.transposenm(input_set))->to_string()); + MLPPBernoulliNB BNB(input_set, output_set); + PLOG_MSG(BNB.model_set_test(input_set)->to_string()); + + ERR_PRINT("MLPPGaussianNB"); + + MLPPGaussianNB GNB(input_set, output_set, 2); + PLOG_MSG(GNB.model_set_test(input_set)->to_string()); } void MLPPTests::test_k_means(bool ui) { // KMeans @@ -816,50 +822,64 @@ void MLPPTests::test_knn(bool ui) { } void MLPPTests::test_convolution_tensors_etc() { - /* MLPPLinAlg alg; MLPPLinAlg algn; MLPPData data; - MLPPConvolutionsOld conv; + MLPPConvolutions conv; + MLPPTransforms trans; // CONVOLUTION, POOLING, ETC.. - std::vector> input = { - { 1 }, + const real_t input_arr[] = { + 1, }; - std::vector>> tensorSet; - tensorSet.push_back(input); - tensorSet.push_back(input); - tensorSet.push_back(input); + Ref input = Ref(memnew(MLPPMatrix(input_arr, 1, 1))); - alg.printTensor(data.rgb2xyz(tensorSet)); + Ref tensor_set; + tensor_set.instance(); + tensor_set->resize(Size3i(1, 1, 0)); + tensor_set->z_slice_add_mlpp_matrix(input); + tensor_set->z_slice_add_mlpp_matrix(input); + tensor_set->z_slice_add_mlpp_matrix(input); - std::vector> input2 = { - { 62, 55, 55, 54, 49, 48, 47, 55 }, - { 62, 57, 54, 52, 48, 47, 48, 53 }, - { 61, 60, 52, 49, 48, 47, 49, 54 }, - { 63, 61, 60, 60, 63, 65, 68, 65 }, - { 67, 67, 70, 74, 79, 85, 91, 92 }, - { 82, 95, 101, 106, 114, 115, 112, 117 }, - { 96, 111, 115, 119, 128, 128, 130, 127 }, - { 109, 121, 127, 133, 139, 141, 140, 133 }, + ERR_PRINT("TODO data.rgb2xyz(tensor_set)"); + //ERR_PRINT(data.rgb2xyz(tensor_set)->to_string()); + + const real_t input2_arr[] = { + 62, 55, 55, 54, 49, 48, 47, 55, // + 62, 57, 54, 52, 48, 47, 48, 53, // + 61, 60, 52, 49, 48, 47, 49, 54, // + 63, 61, 60, 60, 63, 65, 68, 65, // + 67, 67, 70, 74, 79, 85, 91, 92, // + 82, 95, 101, 106, 114, 115, 112, 117, // + 96, 111, 115, 119, 128, 128, 130, 127, // + 109, 121, 127, 133, 139, 141, 140, 133, // }; - MLPPTransformsOld trans; + Ref input2 = Ref(memnew(MLPPMatrix(input2_arr, 8, 8))); - alg.printMatrix(trans.discreteCosineTransform(input2)); + ERR_PRINT(trans.discrete_cosine_transform(input2)->to_string()); - alg.printMatrix(conv.convolve_2d(input2, conv.get_prewitt_vertical(), 1)); // Can use padding - alg.printMatrix(conv.pool_2d(input2, 4, 4, "Max")); // Can use Max, Min, or Average pooling. + ERR_PRINT(conv.convolve_2d(input2, conv.get_prewitt_vertical(), 1)->to_string()); // Can use padding + ERR_PRINT(conv.pool_2d(input2, 4, 4, MLPPConvolutions::POOL_TYPE_MAX)->to_string()); // Can use Max, Min, or Average pooling. - std::vector>> tensorSet2; - tensorSet2.push_back(input2); - tensorSet2.push_back(input2); - alg.printVector(conv.global_pool_3d(tensorSet2, "Average")); // Can use Max, Min, or Average global pooling. + Ref tensor_set2; + tensor_set2.instance(); + tensor_set2->resize(Size3i(8, 8, 0)); + tensor_set2->z_slice_add_mlpp_matrix(input2); + tensor_set2->z_slice_add_mlpp_matrix(input2); - std::vector> laplacian = { { 1, 1, 1 }, { 1, -4, 1 }, { 1, 1, 1 } }; - alg.printMatrix(conv.convolve_2d(conv.gaussian_filter_2d(5, 1), laplacian, 1)); - */ + ERR_PRINT(conv.global_pool_3d(tensor_set2, MLPPConvolutions::POOL_TYPE_AVERAGE)->to_string()); // Can use Max, Min, or Average global pooling. + + const real_t laplacian_arr[] = { + 1, 1, 1, // + 1, -4, 1, // + 1, 1, 1 // + }; + + Ref laplacian = Ref(memnew(MLPPMatrix(laplacian_arr, 3, 3))); + + ERR_PRINT(conv.convolve_2d(conv.gaussian_filter_2d(5, 1), laplacian, 1)->to_string()); } void MLPPTests::test_pca_svd_eigenvalues_eigenvectors(bool ui) { /*