/* * Copyright (c) 2016, Alliance for Open Media. All rights reserved. * * This source code is subject to the terms of the BSD 2 Clause License and * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License * was not distributed with this source code in the LICENSE file, you can * obtain it at www.aomedia.org/license/software. If the Alliance for Open * Media Patent License 1.0 was not distributed with this source code in the * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
// The _vert_* tables are like the ordinary tables above, but describe the // order we visit square blocks when doing a PARTITION_VERT_A or // PARTITION_VERT_B. This is the same order as normal except for on the last // split where we go vertically (TL, BL, TR, BR). We treat the rectangular block // as a pair of squares, which means that these tables work correctly for both // mixed vertical partition types. // // There are tables for each of the square sizes. Vertical rectangles (like // BLOCK_16X32) use their respective "non-vert" table staticconst uint8_t *const has_tr_vert_tables[BLOCK_SIZES] = { // 4X4
NULL, // 4X8, 8X4, 8X8
has_tr_4x8, NULL, has_tr_vert_8x8, // 8X16, 16X8, 16X16
has_tr_8x16, NULL, has_tr_vert_16x16, // 16X32, 32X16, 32X32
has_tr_16x32, NULL, has_tr_vert_32x32, // 32X64, 64X32, 64X64
has_tr_32x64, NULL, has_tr_vert_64x64, // 64x128, 128x64, 128x128
has_tr_64x128, NULL, has_tr_128x128
};
staticconst uint8_t *get_has_tr_table(PARTITION_TYPE partition,
BLOCK_SIZE bsize) { const uint8_t *ret = NULL; // If this is a mixed vertical partition, look up bsize in orders_vert. if (partition == PARTITION_VERT_A || partition == PARTITION_VERT_B) {
assert(bsize < BLOCK_SIZES);
ret = has_tr_vert_tables[bsize];
} else {
ret = has_tr_tables[bsize];
}
assert(ret); return ret;
}
staticint has_top_right(BLOCK_SIZE sb_size, BLOCK_SIZE bsize, int mi_row, int mi_col, int top_available, int right_available,
PARTITION_TYPE partition, TX_SIZE txsz, int row_off, int col_off, int ss_x, int ss_y) { if (!top_available || !right_available) return 0;
if (row_off > 0) { // Just need to check if enough pixels on the right. if (block_size_wide[bsize] > block_size_wide[BLOCK_64X64]) { // Special case: For 128x128 blocks, the transform unit whose // top-right corner is at the center of the block does in fact have // pixels available at its top-right corner. if (row_off == mi_size_high[BLOCK_64X64] >> ss_y &&
col_off + top_right_count_unit == mi_size_wide[BLOCK_64X64] >> ss_x) { return 1;
} constint plane_bw_unit_64 = mi_size_wide[BLOCK_64X64] >> ss_x; constint col_off_64 = col_off % plane_bw_unit_64; return col_off_64 + top_right_count_unit < plane_bw_unit_64;
} return col_off + top_right_count_unit < plane_bw_unit;
} else { // All top-right pixels are in the block above, which is already available. if (col_off + top_right_count_unit < plane_bw_unit) return 1;
// Top row of superblock: so top-right pixels are in the top and/or // top-right superblocks, both of which are already available. if (blk_row_in_sb == 0) return 1;
// Rightmost column of superblock (and not the top row): so top-right pixels // fall in the right superblock, which is not available yet. if (((blk_col_in_sb + 1) << bw_in_mi_log2) >= sb_mi_size) { return 0;
}
// General case (neither top row nor rightmost column): check if the // top-right block is coded before the current block. constint this_blk_index =
((blk_row_in_sb + 0) << (MAX_MIB_SIZE_LOG2 - bw_in_mi_log2)) +
blk_col_in_sb + 0; constint idx1 = this_blk_index / 8; constint idx2 = this_blk_index % 8; const uint8_t *has_tr_table = get_has_tr_table(partition, bsize); return (has_tr_table[idx1] >> idx2) & 1;
}
}
// The _vert_* tables are like the ordinary tables above, but describe the // order we visit square blocks when doing a PARTITION_VERT_A or // PARTITION_VERT_B. This is the same order as normal except for on the last // split where we go vertically (TL, BL, TR, BR). We treat the rectangular block // as a pair of squares, which means that these tables work correctly for both // mixed vertical partition types. // // There are tables for each of the square sizes. Vertical rectangles (like // BLOCK_16X32) use their respective "non-vert" table staticconst uint8_t *const has_bl_vert_tables[BLOCK_SIZES] = { // 4X4
NULL, // 4X8, 8X4, 8X8
has_bl_4x8, NULL, has_bl_vert_8x8, // 8X16, 16X8, 16X16
has_bl_8x16, NULL, has_bl_vert_16x16, // 16X32, 32X16, 32X32
has_bl_16x32, NULL, has_bl_vert_32x32, // 32X64, 64X32, 64X64
has_bl_32x64, NULL, has_bl_vert_64x64, // 64x128, 128x64, 128x128
has_bl_64x128, NULL, has_bl_128x128
};
staticconst uint8_t *get_has_bl_table(PARTITION_TYPE partition,
BLOCK_SIZE bsize) { const uint8_t *ret = NULL; // If this is a mixed vertical partition, look up bsize in orders_vert. if (partition == PARTITION_VERT_A || partition == PARTITION_VERT_B) {
assert(bsize < BLOCK_SIZES);
ret = has_bl_vert_tables[bsize];
} else {
ret = has_bl_tables[bsize];
}
assert(ret); return ret;
}
staticint has_bottom_left(BLOCK_SIZE sb_size, BLOCK_SIZE bsize, int mi_row, int mi_col, int bottom_available, int left_available,
PARTITION_TYPE partition, TX_SIZE txsz, int row_off, int col_off, int ss_x, int ss_y) { if (!bottom_available || !left_available) return 0;
// Special case for 128x* blocks, when col_off is half the block width. // This is needed because 128x* superblocks are divided into 64x* blocks in // raster order if (block_size_wide[bsize] > block_size_wide[BLOCK_64X64] && col_off > 0) { constint plane_bw_unit_64 = mi_size_wide[BLOCK_64X64] >> ss_x; constint col_off_64 = col_off % plane_bw_unit_64; if (col_off_64 == 0) { // We are at the left edge of top-right or bottom-right 64x* block. constint plane_bh_unit_64 = mi_size_high[BLOCK_64X64] >> ss_y; constint row_off_64 = row_off % plane_bh_unit_64; constint plane_bh_unit =
AOMMIN(mi_size_high[bsize] >> ss_y, plane_bh_unit_64); // Check if all bottom-left pixels are in the left 64x* block (which is // already coded). return row_off_64 + tx_size_high_unit[txsz] < plane_bh_unit;
}
}
if (col_off > 0) { // Bottom-left pixels are in the bottom-left block, which is not available. return 0;
} else { constint bh_unit = mi_size_high[bsize]; constint plane_bh_unit = AOMMAX(bh_unit >> ss_y, 1); constint bottom_left_count_unit = tx_size_high_unit[txsz];
// All bottom-left pixels are in the left block, which is already available. if (row_off + bottom_left_count_unit < plane_bh_unit) return 1;
// Leftmost column of superblock: so bottom-left pixels maybe in the left // and/or bottom-left superblocks. But only the left superblock is // available, so check if all required pixels fall in that superblock. if (blk_col_in_sb == 0) { constint blk_start_row_off =
blk_row_in_sb << (bh_in_mi_log2 + MI_SIZE_LOG2 - MI_SIZE_LOG2) >>
ss_y; constint row_off_in_sb = blk_start_row_off + row_off; constint sb_height_unit = sb_mi_size >> ss_y; return row_off_in_sb + bottom_left_count_unit < sb_height_unit;
}
// Bottom row of superblock (and not the leftmost column): so bottom-left // pixels fall in the bottom superblock, which is not available yet. if (((blk_row_in_sb + 1) << bh_in_mi_log2) >= sb_mi_size) return 0;
// General case (neither leftmost column nor bottom row): check if the // bottom-left block is coded before the current block. constint this_blk_index =
((blk_row_in_sb + 0) << (MAX_MIB_SIZE_LOG2 - bw_in_mi_log2)) +
blk_col_in_sb + 0; constint idx1 = this_blk_index / 8; constint idx2 = this_blk_index % 8; const uint8_t *has_bl_table = get_has_bl_table(partition, bsize); return (has_bl_table[idx1] >> idx2) & 1;
}
}
for (int r = 0; r < bh; ++r) { for (int c = 0; c < bw; ++c) { int val; int y = r + 1; int x = (c << 6) - y * dx; constint base_x = x >> frac_bits_x; if (base_x >= min_base_x) { constint shift = ((x * (1 << upsample_above)) & 0x3F) >> 1;
val = above[base_x] * (32 - shift) + above[base_x + 1] * shift;
val = ROUND_POWER_OF_TWO(val, 5);
} else {
x = c + 1;
y = (r << 6) - x * dy; constint base_y = y >> frac_bits_y;
assert(base_y >= min_base_y); constint shift = ((y * (1 << upsample_left)) & 0x3F) >> 1;
val = left[base_y] * (32 - shift) + left[base_y + 1] * shift;
val = ROUND_POWER_OF_TWO(val, 5);
}
dst[c] = val;
}
dst += stride;
}
}
// Directional prediction, zone 3: 180 < angle < 270 void av1_dr_prediction_z3_c(uint8_t *dst, ptrdiff_t stride, int bw, int bh, const uint8_t *above, const uint8_t *left, int upsample_left, int dx, int dy) { int r, c, y, base, shift, val;
(void)above;
(void)dx;
assert(dx == 1);
assert(dy > 0);
constint max_base_y = (bw + bh - 1) << upsample_left; constint frac_bits = 6 - upsample_left; constint base_inc = 1 << upsample_left;
y = dy; for (c = 0; c < bw; ++c, y += dy) {
base = y >> frac_bits;
shift = ((y << upsample_left) & 0x3F) >> 1;
for (r = 0; r < bh; ++r, base += base_inc) { if (base < max_base_y) {
val = left[base] * (32 - shift) + left[base + 1] * shift;
dst[r * stride + c] = ROUND_POWER_OF_TWO(val, 5);
} else { for (; r < bh; ++r) dst[r * stride + c] = left[max_base_y]; break;
}
}
}
}
for (int r = 0; r < bh; ++r) { for (int c = 0; c < bw; ++c) { int val; int y = r + 1; int x = (c << 6) - y * dx; constint base_x = x >> frac_bits_x; if (base_x >= min_base_x) { constint shift = ((x * (1 << upsample_above)) & 0x3F) >> 1;
val = above[base_x] * (32 - shift) + above[base_x + 1] * shift;
val = ROUND_POWER_OF_TWO(val, 5);
} else {
x = c + 1;
y = (r << 6) - x * dy; constint base_y = y >> frac_bits_y;
assert(base_y >= min_base_y); constint shift = ((y * (1 << upsample_left)) & 0x3F) >> 1;
val = left[base_y] * (32 - shift) + left[base_y + 1] * shift;
val = ROUND_POWER_OF_TWO(val, 5);
}
dst[c] = val;
}
dst += stride;
}
}
// Directional prediction, zone 3: 180 < angle < 270 void av1_highbd_dr_prediction_z3_c(uint16_t *dst, ptrdiff_t stride, int bw, int bh, const uint16_t *above, const uint16_t *left, int upsample_left, int dx, int dy, int bd) { int r, c, y, base, shift, val;
staticint is_smooth(const MB_MODE_INFO *mbmi, int plane) { if (plane == 0) { const PREDICTION_MODE mode = mbmi->mode; return (mode == SMOOTH_PRED || mode == SMOOTH_V_PRED ||
mode == SMOOTH_H_PRED);
} else { // uv_mode is not set for inter blocks, so need to explicitly // detect that case. if (is_inter_block(mbmi)) return 0;
uint8_t in[MAX_UPSAMPLE_SZ + 3]; // copy p[-1..(sz-1)] and extend first and last samples
in[0] = p[-1];
in[1] = p[-1]; for (int i = 0; i < sz; i++) {
in[i + 2] = p[i];
}
in[sz + 2] = p[sz - 1];
// interpolate half-sample edge positions
p[-2] = in[0]; for (int i = 0; i < sz; i++) { int s = -in[i] + (9 * in[i + 1]) + (9 * in[i + 2]) - in[i + 3];
s = clip_pixel((s + 8) >> 4);
p[2 * i - 1] = s;
p[2 * i] = in[i + 2];
}
}
staticvoid build_directional_and_filter_intra_predictors( const uint8_t *ref, int ref_stride, uint8_t *dst, int dst_stride,
PREDICTION_MODE mode, int p_angle, FILTER_INTRA_MODE filter_intra_mode,
TX_SIZE tx_size, int disable_edge_filter, int n_top_px, int n_topright_px, int n_left_px, int n_bottomleft_px, int intra_edge_filter_type) { int i; const uint8_t *above_ref = ref - ref_stride; const uint8_t *left_ref = ref - 1;
DECLARE_ALIGNED(16, uint8_t, left_data[NUM_INTRA_NEIGHBOUR_PIXELS]);
DECLARE_ALIGNED(16, uint8_t, above_data[NUM_INTRA_NEIGHBOUR_PIXELS]);
uint8_t *const above_row = above_data + 16;
uint8_t *const left_col = left_data + 16; constint txwpx = tx_size_wide[tx_size]; constint txhpx = tx_size_high[tx_size]; int need_left = extend_modes[mode] & NEED_LEFT; int need_above = extend_modes[mode] & NEED_ABOVE; int need_above_left = extend_modes[mode] & NEED_ABOVELEFT; constint is_dr_mode = av1_is_directional_mode(mode); constint use_filter_intra = filter_intra_mode != FILTER_INTRA_MODES;
assert(use_filter_intra || is_dr_mode); // The left_data, above_data buffers must be zeroed to fix some intermittent // valgrind errors. Uninitialized reads in intra pred modules (e.g. width = 4 // path in av1_dr_prediction_z1_avx2()) from left_data, above_data are seen to // be the potential reason for this issue.
memset(left_data, 129, NUM_INTRA_NEIGHBOUR_PIXELS);
memset(above_data, 127, NUM_INTRA_NEIGHBOUR_PIXELS);
// The default values if ref pixels are not available: // 128 127 127 .. 127 127 127 127 127 127 // 129 A B .. Y Z // 129 C D .. W X // 129 E F .. U V // 129 G H .. S T T T T T // ..
int s = (p_left[0] * kernel[0]) + (p_above[-1] * kernel[1]) +
(p_above[0] * kernel[2]);
s = (s + 8) >> 4;
p_above[-1] = s;
p_left[-1] = s;
}
void av1_highbd_upsample_intra_edge_c(uint16_t *p, int sz, int bd) { // interpolate half-sample positions
assert(sz <= MAX_UPSAMPLE_SZ);
uint16_t in[MAX_UPSAMPLE_SZ + 3]; // copy p[-1..(sz-1)] and extend first and last samples
in[0] = p[-1];
in[1] = p[-1]; for (int i = 0; i < sz; i++) {
in[i + 2] = p[i];
}
in[sz + 2] = p[sz - 1];
// interpolate half-sample edge positions
p[-2] = in[0]; for (int i = 0; i < sz; i++) { int s = -in[i] + (9 * in[i + 1]) + (9 * in[i + 2]) - in[i + 3];
s = (s + 8) >> 4;
s = clip_pixel_highbd(s, bd);
p[2 * i - 1] = s;
p[2 * i] = in[i + 2];
}
}
staticvoid highbd_build_directional_and_filter_intra_predictors( const uint8_t *ref8, int ref_stride, uint8_t *dst8, int dst_stride,
PREDICTION_MODE mode, int p_angle, FILTER_INTRA_MODE filter_intra_mode,
TX_SIZE tx_size, int disable_edge_filter, int n_top_px, int n_topright_px, int n_left_px, int n_bottomleft_px, int intra_edge_filter_type, int bit_depth) { int i;
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); const uint16_t *const ref = CONVERT_TO_SHORTPTR(ref8);
DECLARE_ALIGNED(16, uint16_t, left_data[NUM_INTRA_NEIGHBOUR_PIXELS]);
DECLARE_ALIGNED(16, uint16_t, above_data[NUM_INTRA_NEIGHBOUR_PIXELS]);
uint16_t *const above_row = above_data + 16;
uint16_t *const left_col = left_data + 16; constint txwpx = tx_size_wide[tx_size]; constint txhpx = tx_size_high[tx_size]; int need_left = extend_modes[mode] & NEED_LEFT; int need_above = extend_modes[mode] & NEED_ABOVE; int need_above_left = extend_modes[mode] & NEED_ABOVELEFT; const uint16_t *above_ref = ref - ref_stride; const uint16_t *left_ref = ref - 1; constint is_dr_mode = av1_is_directional_mode(mode); constint use_filter_intra = filter_intra_mode != FILTER_INTRA_MODES;
assert(use_filter_intra || is_dr_mode); constint base = 128 << (bit_depth - 8); // The left_data, above_data buffers must be zeroed to fix some intermittent // valgrind errors. Uninitialized reads in intra pred modules (e.g. width = 4 // path in av1_highbd_dr_prediction_z2_avx2()) from left_data, above_data are // seen to be the potential reason for this issue.
aom_memset16(left_data, base + 1, NUM_INTRA_NEIGHBOUR_PIXELS);
aom_memset16(above_data, base - 1, NUM_INTRA_NEIGHBOUR_PIXELS);
// The default values if ref pixels are not available: // base base-1 base-1 .. base-1 base-1 base-1 base-1 base-1 base-1 // base+1 A B .. Y Z // base+1 C D .. W X // base+1 E F .. U V // base+1 G H .. S T T T T T
// For HBD encode/decode, this function generates the pred data of a given // block for non-directional intra prediction modes (i.e., DC, SMOOTH, SMOOTH_H, // SMOOTH_V and PAETH). staticvoid highbd_build_non_directional_intra_predictors( const uint8_t *ref8, int ref_stride, uint8_t *dst8, int dst_stride,
PREDICTION_MODE mode, TX_SIZE tx_size, int n_top_px, int n_left_px, int bit_depth) { int i = 0;
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); const uint16_t *const ref = CONVERT_TO_SHORTPTR(ref8); constint txwpx = tx_size_wide[tx_size]; constint txhpx = tx_size_high[tx_size]; int need_left = extend_modes[mode] & NEED_LEFT; int need_above = extend_modes[mode] & NEED_ABOVE; int need_above_left = extend_modes[mode] & NEED_ABOVELEFT; const uint16_t *above_ref = ref - ref_stride; const uint16_t *left_ref = ref - 1; constint base = 128 << (bit_depth - 8);
// Distance between the right edge of this prediction block to // the frame right edge constint xr = (xd->mb_to_right_edge >> (3 + ss_x)) + wpx - x - txwpx; // Distance between the bottom edge of this prediction block to // the frame bottom edge constint yd = (xd->mb_to_bottom_edge >> (3 + ss_y)) + hpx - y - txhpx; constint use_filter_intra = filter_intra_mode != FILTER_INTRA_MODES; constint is_dr_mode = av1_is_directional_mode(mode);
// The computations in this function, as well as in build_intra_predictors(), // are generalized for all intra modes. Some of these operations are not // required since non-directional intra modes (i.e., DC, SMOOTH, SMOOTH_H, // SMOOTH_V, and PAETH) specifically require left and top neighbors. Hence, a // separate function build_non_directional_intra_predictors() is introduced // for these modes to avoid redundant computations while generating pred data.
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.