mirror of
https://github.com/Relintai/pandemonium_engine.git
synced 2024-12-28 22:57:15 +01:00
310 lines
12 KiB
C
310 lines
12 KiB
C
/*
|
|
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
|
|
*
|
|
* Use of this source code is governed by a BSD-style license
|
|
* that can be found in the LICENSE file in the root of the source
|
|
* tree. An additional intellectual property rights grant can be found
|
|
* in the file PATENTS. All contributing project authors may
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
*/
|
|
|
|
#include <assert.h>
|
|
|
|
#include "./vpx_scale_rtcd.h"
|
|
#include "./vpx_config.h"
|
|
|
|
#include "vpx/vpx_integer.h"
|
|
|
|
#include "vp9/common/vp9_blockd.h"
|
|
#include "vp9/common/vp9_reconinter.h"
|
|
#include "vp9/common/vp9_reconintra.h"
|
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
void vp9_highbd_build_inter_predictor(const uint8_t *src, int src_stride,
|
|
uint8_t *dst, int dst_stride,
|
|
const MV *src_mv,
|
|
const struct scale_factors *sf,
|
|
int w, int h, int ref,
|
|
const InterpKernel *kernel,
|
|
enum mv_precision precision,
|
|
int x, int y, int bd) {
|
|
const int is_q4 = precision == MV_PRECISION_Q4;
|
|
const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
|
|
is_q4 ? src_mv->col : src_mv->col * 2 };
|
|
MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
|
|
const int subpel_x = mv.col & SUBPEL_MASK;
|
|
const int subpel_y = mv.row & SUBPEL_MASK;
|
|
|
|
src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
|
|
|
|
highbd_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
|
|
sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4,
|
|
bd);
|
|
}
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
|
|
uint8_t *dst, int dst_stride,
|
|
const MV *src_mv,
|
|
const struct scale_factors *sf,
|
|
int w, int h, int ref,
|
|
const InterpKernel *kernel,
|
|
enum mv_precision precision,
|
|
int x, int y) {
|
|
const int is_q4 = precision == MV_PRECISION_Q4;
|
|
const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
|
|
is_q4 ? src_mv->col : src_mv->col * 2 };
|
|
MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
|
|
const int subpel_x = mv.col & SUBPEL_MASK;
|
|
const int subpel_y = mv.row & SUBPEL_MASK;
|
|
|
|
src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
|
|
|
|
inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
|
|
sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4);
|
|
}
|
|
|
|
static INLINE int round_mv_comp_q4(int value) {
|
|
return (value < 0 ? value - 2 : value + 2) / 4;
|
|
}
|
|
|
|
static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) {
|
|
MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row +
|
|
mi->bmi[1].as_mv[idx].as_mv.row +
|
|
mi->bmi[2].as_mv[idx].as_mv.row +
|
|
mi->bmi[3].as_mv[idx].as_mv.row),
|
|
round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col +
|
|
mi->bmi[1].as_mv[idx].as_mv.col +
|
|
mi->bmi[2].as_mv[idx].as_mv.col +
|
|
mi->bmi[3].as_mv[idx].as_mv.col) };
|
|
return res;
|
|
}
|
|
|
|
static INLINE int round_mv_comp_q2(int value) {
|
|
return (value < 0 ? value - 1 : value + 1) / 2;
|
|
}
|
|
|
|
static MV mi_mv_pred_q2(const MODE_INFO *mi, int idx, int block0, int block1) {
|
|
MV res = { round_mv_comp_q2(mi->bmi[block0].as_mv[idx].as_mv.row +
|
|
mi->bmi[block1].as_mv[idx].as_mv.row),
|
|
round_mv_comp_q2(mi->bmi[block0].as_mv[idx].as_mv.col +
|
|
mi->bmi[block1].as_mv[idx].as_mv.col) };
|
|
return res;
|
|
}
|
|
|
|
// TODO(jkoleszar): yet another mv clamping function :-(
|
|
MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv,
|
|
int bw, int bh, int ss_x, int ss_y) {
|
|
// If the MV points so far into the UMV border that no visible pixels
|
|
// are used for reconstruction, the subpel part of the MV can be
|
|
// discarded and the MV limited to 16 pixels with equivalent results.
|
|
const int spel_left = (VP9_INTERP_EXTEND + bw) << SUBPEL_BITS;
|
|
const int spel_right = spel_left - SUBPEL_SHIFTS;
|
|
const int spel_top = (VP9_INTERP_EXTEND + bh) << SUBPEL_BITS;
|
|
const int spel_bottom = spel_top - SUBPEL_SHIFTS;
|
|
MV clamped_mv = {
|
|
src_mv->row * (1 << (1 - ss_y)),
|
|
src_mv->col * (1 << (1 - ss_x))
|
|
};
|
|
assert(ss_x <= 1);
|
|
assert(ss_y <= 1);
|
|
|
|
clamp_mv(&clamped_mv,
|
|
xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left,
|
|
xd->mb_to_right_edge * (1 << (1 - ss_x)) + spel_right,
|
|
xd->mb_to_top_edge * (1 << (1 - ss_y)) - spel_top,
|
|
xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom);
|
|
|
|
return clamped_mv;
|
|
}
|
|
|
|
MV average_split_mvs(const struct macroblockd_plane *pd,
|
|
const MODE_INFO *mi, int ref, int block) {
|
|
const int ss_idx = ((pd->subsampling_x > 0) << 1) | (pd->subsampling_y > 0);
|
|
MV res = {0, 0};
|
|
switch (ss_idx) {
|
|
case 0:
|
|
res = mi->bmi[block].as_mv[ref].as_mv;
|
|
break;
|
|
case 1:
|
|
res = mi_mv_pred_q2(mi, ref, block, block + 2);
|
|
break;
|
|
case 2:
|
|
res = mi_mv_pred_q2(mi, ref, block, block + 1);
|
|
break;
|
|
case 3:
|
|
res = mi_mv_pred_q4(mi, ref);
|
|
break;
|
|
default:
|
|
assert(ss_idx <= 3 && ss_idx >= 0);
|
|
}
|
|
return res;
|
|
}
|
|
|
|
static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
|
|
int bw, int bh,
|
|
int x, int y, int w, int h,
|
|
int mi_x, int mi_y) {
|
|
struct macroblockd_plane *const pd = &xd->plane[plane];
|
|
const MODE_INFO *mi = xd->mi[0];
|
|
const int is_compound = has_second_ref(mi);
|
|
const InterpKernel *kernel = vp9_filter_kernels[mi->interp_filter];
|
|
int ref;
|
|
|
|
for (ref = 0; ref < 1 + is_compound; ++ref) {
|
|
const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
|
|
struct buf_2d *const pre_buf = &pd->pre[ref];
|
|
struct buf_2d *const dst_buf = &pd->dst;
|
|
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
|
|
const MV mv = mi->sb_type < BLOCK_8X8
|
|
? average_split_mvs(pd, mi, ref, block)
|
|
: mi->mv[ref].as_mv;
|
|
|
|
// TODO(jkoleszar): This clamping is done in the incorrect place for the
|
|
// scaling case. It needs to be done on the scaled MV, not the pre-scaling
|
|
// MV. Note however that it performs the subsampling aware scaling so
|
|
// that the result is always q4.
|
|
// mv_precision precision is MV_PRECISION_Q4.
|
|
const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh,
|
|
pd->subsampling_x,
|
|
pd->subsampling_y);
|
|
|
|
uint8_t *pre;
|
|
MV32 scaled_mv;
|
|
int xs, ys, subpel_x, subpel_y;
|
|
const int is_scaled = vp9_is_scaled(sf);
|
|
|
|
if (is_scaled) {
|
|
// Co-ordinate of containing block to pixel precision.
|
|
const int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
|
|
const int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
|
|
#if CONFIG_BETTER_HW_COMPATIBILITY
|
|
assert(xd->mi[0]->sb_type != BLOCK_4X8 &&
|
|
xd->mi[0]->sb_type != BLOCK_8X4);
|
|
assert(mv_q4.row == mv.row * (1 << (1 - pd->subsampling_y)) &&
|
|
mv_q4.col == mv.col * (1 << (1 - pd->subsampling_x)));
|
|
#endif
|
|
if (plane == 0)
|
|
pre_buf->buf = xd->block_refs[ref]->buf->y_buffer;
|
|
else if (plane == 1)
|
|
pre_buf->buf = xd->block_refs[ref]->buf->u_buffer;
|
|
else
|
|
pre_buf->buf = xd->block_refs[ref]->buf->v_buffer;
|
|
|
|
pre_buf->buf += scaled_buffer_offset(x_start + x, y_start + y,
|
|
pre_buf->stride, sf);
|
|
pre = pre_buf->buf;
|
|
scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
|
|
xs = sf->x_step_q4;
|
|
ys = sf->y_step_q4;
|
|
} else {
|
|
pre = pre_buf->buf + (y * pre_buf->stride + x);
|
|
scaled_mv.row = mv_q4.row;
|
|
scaled_mv.col = mv_q4.col;
|
|
xs = ys = 16;
|
|
}
|
|
subpel_x = scaled_mv.col & SUBPEL_MASK;
|
|
subpel_y = scaled_mv.row & SUBPEL_MASK;
|
|
pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride
|
|
+ (scaled_mv.col >> SUBPEL_BITS);
|
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
|
highbd_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
|
|
subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys,
|
|
xd->bd);
|
|
} else {
|
|
inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
|
|
subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys);
|
|
}
|
|
#else
|
|
inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
|
|
subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys);
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
}
|
|
}
|
|
|
|
static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize,
|
|
int mi_row, int mi_col,
|
|
int plane_from, int plane_to) {
|
|
int plane;
|
|
const int mi_x = mi_col * MI_SIZE;
|
|
const int mi_y = mi_row * MI_SIZE;
|
|
for (plane = plane_from; plane <= plane_to; ++plane) {
|
|
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize,
|
|
&xd->plane[plane]);
|
|
const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
|
|
const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
|
|
const int bw = 4 * num_4x4_w;
|
|
const int bh = 4 * num_4x4_h;
|
|
|
|
if (xd->mi[0]->sb_type < BLOCK_8X8) {
|
|
int i = 0, x, y;
|
|
assert(bsize == BLOCK_8X8);
|
|
for (y = 0; y < num_4x4_h; ++y)
|
|
for (x = 0; x < num_4x4_w; ++x)
|
|
build_inter_predictors(xd, plane, i++, bw, bh,
|
|
4 * x, 4 * y, 4, 4, mi_x, mi_y);
|
|
} else {
|
|
build_inter_predictors(xd, plane, 0, bw, bh,
|
|
0, 0, bw, bh, mi_x, mi_y);
|
|
}
|
|
}
|
|
}
|
|
|
|
void vp9_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
|
|
BLOCK_SIZE bsize) {
|
|
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0);
|
|
}
|
|
|
|
void vp9_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
|
|
BLOCK_SIZE bsize, int plane) {
|
|
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, plane, plane);
|
|
}
|
|
|
|
void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
|
|
BLOCK_SIZE bsize) {
|
|
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1,
|
|
MAX_MB_PLANE - 1);
|
|
}
|
|
|
|
void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
|
|
BLOCK_SIZE bsize) {
|
|
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0,
|
|
MAX_MB_PLANE - 1);
|
|
}
|
|
|
|
void vp9_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
|
|
const YV12_BUFFER_CONFIG *src,
|
|
int mi_row, int mi_col) {
|
|
uint8_t *const buffers[MAX_MB_PLANE] = { src->y_buffer, src->u_buffer,
|
|
src->v_buffer};
|
|
const int strides[MAX_MB_PLANE] = { src->y_stride, src->uv_stride,
|
|
src->uv_stride};
|
|
int i;
|
|
|
|
for (i = 0; i < MAX_MB_PLANE; ++i) {
|
|
struct macroblockd_plane *const pd = &planes[i];
|
|
setup_pred_plane(&pd->dst, buffers[i], strides[i], mi_row, mi_col, NULL,
|
|
pd->subsampling_x, pd->subsampling_y);
|
|
}
|
|
}
|
|
|
|
void vp9_setup_pre_planes(MACROBLOCKD *xd, int idx,
|
|
const YV12_BUFFER_CONFIG *src,
|
|
int mi_row, int mi_col,
|
|
const struct scale_factors *sf) {
|
|
if (src != NULL) {
|
|
int i;
|
|
uint8_t *const buffers[MAX_MB_PLANE] = { src->y_buffer, src->u_buffer,
|
|
src->v_buffer};
|
|
const int strides[MAX_MB_PLANE] = { src->y_stride, src->uv_stride,
|
|
src->uv_stride};
|
|
for (i = 0; i < MAX_MB_PLANE; ++i) {
|
|
struct macroblockd_plane *const pd = &xd->plane[i];
|
|
setup_pred_plane(&pd->pre[idx], buffers[i], strides[i], mi_row, mi_col,
|
|
sf, pd->subsampling_x, pd->subsampling_y);
|
|
}
|
|
}
|
|
}
|