/* * Copyright (c) 2010 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree.
*/
// Initialize the segmentation index as 0.
mi->segment_id = 0;
// Skip the rest if AQ mode is disabled. if (!seg->enabled) return;
switch (aq_mode) { case CYCLIC_REFRESH_AQ:
mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col); break; #if !CONFIG_REALTIME_ONLY case VARIANCE_AQ: if (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
cpi->force_update_segmentation ||
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) { int min_energy; int max_energy; // Get sub block energy range if (bsize >= BLOCK_32X32) {
vp9_get_sub_block_energy(cpi, x, mi_row, mi_col, bsize, &min_energy,
&max_energy);
} else {
min_energy = bsize <= BLOCK_16X16 ? x->mb_energy
: vp9_block_energy(cpi, x, bsize);
}
mi->segment_id = vp9_vaq_segment_id(min_energy);
} else {
mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
} break; case EQUATOR360_AQ: if (cm->frame_type == KEY_FRAME || cpi->force_update_segmentation)
mi->segment_id = vp9_360aq_segment_id(mi_row, cm->mi_rows); else
mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col); break; #endif case LOOKAHEAD_AQ:
mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col); break; case PSNR_AQ: mi->segment_id = segment_index; break; case PERCEPTUAL_AQ: mi->segment_id = x->segment_id; break; default: // NO_AQ or PSNR_AQ break;
}
// Set segment index if ROI map or active_map is enabled. if (cpi->roi.enabled || cpi->active_map.enabled)
mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
vp9_init_plane_quantizers(cpi, x);
}
// Lighter version of set_offsets that only sets the mode info // pointers. staticINLINEvoid set_mode_info_offsets(VP9_COMMON *const cm,
MACROBLOCK *const x,
MACROBLOCKD *const xd, int mi_row, int mi_col) { constint idx_str = xd->mi_stride * mi_row + mi_col;
xd->mi = cm->mi_grid_visible + idx_str;
xd->mi[0] = cm->mi + idx_str;
x->mbmi_ext = x->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
}
// Set up destination pointers.
vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
// Set up limit values for MV components. // Mv beyond the range do not produce new/different prediction block.
mv_limits->row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
mv_limits->col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
mv_limits->row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
mv_limits->col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;
// Set up distance of MB to edge of frame in 1/8th pel units.
assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width, cm->mi_rows,
cm->mi_cols);
// Set up source buffers.
vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
typedefstruct { // This struct is used for computing variance in choose_partitioning(), where // the max number of samples within a superblock is 16x16 (with 4x4 avg). Even // in high bitdepth, uint32_t is enough for sum_square_error (2^12 * 2^12 * 16 // * 16 = 2^32).
uint32_t sum_square_error;
int32_t sum_error; int log2_count; int variance;
} Var;
typedefstruct {
Var none;
Var horz[2];
Var vert[2];
} partition_variance;
typedefstruct {
partition_variance part_variances;
Var split[4];
} v4x4;
// For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if // variance is below threshold, otherwise split will be selected. // No check for vert/horiz split as too few samples for variance. if (bsize == bsize_min) { // Variance already computed to set the force_split. if (frame_is_intra_only(cm)) get_variance(&vt.part_variances->none); if (mi_col + block_width / 2 < cm->mi_cols &&
mi_row + block_height / 2 < cm->mi_rows &&
vt.part_variances->none.variance < threshold) {
set_block_size(cpi, x, xd, mi_row, mi_col, bsize); return 1;
} return 0;
} elseif (bsize > bsize_min) { // Variance already computed to set the force_split. if (frame_is_intra_only(cm)) get_variance(&vt.part_variances->none); // For key frame: take split for bsize above 32X32 or very high variance. if (frame_is_intra_only(cm) &&
(bsize > BLOCK_32X32 ||
vt.part_variances->none.variance > (threshold << 4))) { return 0;
} // If variance is low, take the bsize (no split). if (mi_col + block_width / 2 < cm->mi_cols &&
mi_row + block_height / 2 < cm->mi_rows &&
vt.part_variances->none.variance < threshold) {
set_block_size(cpi, x, xd, mi_row, mi_col, bsize); return 1;
}
// Set the variance split thresholds for following the block sizes: // 0 - threshold_64x64, 1 - threshold_32x32, 2 - threshold_16x16, // 3 - vbp_threshold_8x8. vbp_threshold_8x8 (to split to 4x4 partition) is // currently only used on key frame. staticvoid set_vbp_thresholds(VP9_COMP *cpi, int64_t thresholds[], int q, int content_state) {
VP9_COMMON *const cm = &cpi->common; constint is_key_frame = frame_is_intra_only(cm); constint threshold_multiplier =
is_key_frame ? 20 : cpi->sf.variance_part_thresh_mult;
int64_t threshold_base =
(int64_t)(threshold_multiplier * cpi->y_dequant[q][1]);
// Compute the minmax over the 8x8 subblocks. staticint compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d, int dp, int x16_idx, int y16_idx, #if CONFIG_VP9_HIGHBITDEPTH int highbd_flag, #endif int pixels_wide, int pixels_high) { int k; int minmax_max = 0; int minmax_min = 255; // Loop over the 4 8x8 subblocks. for (k = 0; k < 4; k++) { int x8_idx = x16_idx + ((k & 1) << 3); int y8_idx = y16_idx + ((k >> 1) << 3); int min = 0; int max = 0; if (x8_idx < pixels_wide && y8_idx < pixels_high) { #if CONFIG_VP9_HIGHBITDEPTH if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
vpx_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
d + y8_idx * dp + x8_idx, dp, &min, &max);
} else {
vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx,
dp, &min, &max);
} #else
vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx, dp,
&min, &max); #endif if ((max - min) > minmax_max) minmax_max = (max - min); if ((max - min) < minmax_min) minmax_min = (max - min);
}
} return (minmax_max - minmax_min);
}
staticvoid fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d, int dp, int x8_idx, int y8_idx, v8x8 *vst, #if CONFIG_VP9_HIGHBITDEPTH int highbd_flag, #endif int pixels_wide, int pixels_high, int is_key_frame) { int k; for (k = 0; k < 4; k++) { int x4_idx = x8_idx + ((k & 1) << 2); int y4_idx = y8_idx + ((k >> 1) << 2); unsignedint sse = 0; int sum = 0; if (x4_idx < pixels_wide && y4_idx < pixels_high) { int s_avg; int d_avg = 128; #if CONFIG_VP9_HIGHBITDEPTH if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
s_avg = vpx_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp); if (!is_key_frame)
d_avg = vpx_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
} else {
s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp); if (!is_key_frame) d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
} #else
s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp); if (!is_key_frame) d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp); #endif
sum = s_avg - d_avg;
sse = sum * sum;
}
fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
}
}
staticvoid fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d, int dp, int x16_idx, int y16_idx, v16x16 *vst, #if CONFIG_VP9_HIGHBITDEPTH int highbd_flag, #endif int pixels_wide, int pixels_high, int is_key_frame) { int k; for (k = 0; k < 4; k++) { int x8_idx = x16_idx + ((k & 1) << 3); int y8_idx = y16_idx + ((k >> 1) << 3); unsignedint sse = 0; int sum = 0; if (x8_idx < pixels_wide && y8_idx < pixels_high) { int s_avg; int d_avg = 128; #if CONFIG_VP9_HIGHBITDEPTH if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
s_avg = vpx_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp); if (!is_key_frame)
d_avg = vpx_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
} else {
s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp); if (!is_key_frame) d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
} #else
s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp); if (!is_key_frame) d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp); #endif
sum = s_avg - d_avg;
sse = sum * sum;
}
fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
}
}
// Check if most of the superblock is skin content, and if so, force split to // 32x32, and set x->sb_is_skin for use in mode selection. staticint skin_sb_split(VP9_COMP *cpi, constint low_res, int mi_row, int mi_col, int *force_split) {
VP9_COMMON *const cm = &cpi->common; #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) return 0; #endif // Avoid checking superblocks on/near boundary and avoid low resolutions. // Note superblock may still pick 64X64 if y_sad is very small // (i.e., y_sad < cpi->vbp_threshold_sad) below. For now leave this as is. if (!low_res && (mi_col >= 8 && mi_col + 8 < cm->mi_cols && mi_row >= 8 &&
mi_row + 8 < cm->mi_rows)) { int num_16x16_skin = 0; int num_16x16_nonskin = 0; constint block_index = mi_row * cm->mi_cols + mi_col; constint bw = num_8x8_blocks_wide_lookup[BLOCK_64X64]; constint bh = num_8x8_blocks_high_lookup[BLOCK_64X64]; constint xmis = VPXMIN(cm->mi_cols - mi_col, bw); constint ymis = VPXMIN(cm->mi_rows - mi_row, bh); // Loop through the 16x16 sub-blocks. int i, j; for (i = 0; i < ymis; i += 2) { for (j = 0; j < xmis; j += 2) { int bl_index = block_index + i * cm->mi_cols + j; int is_skin = cpi->skin_map[bl_index];
num_16x16_skin += is_skin;
num_16x16_nonskin += (1 - is_skin); if (num_16x16_nonskin > 3) { // Exit loop if at least 4 of the 16x16 blocks are not skin.
i = ymis; break;
}
}
} if (num_16x16_skin > 12) {
*force_split = 1; return 1;
}
} return 0;
}
staticvoid set_low_temp_var_flag(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
v64x64 *vt, int64_t thresholds[],
MV_REFERENCE_FRAME ref_frame_partition, int mi_col, int mi_row) { int i, j;
VP9_COMMON *const cm = &cpi->common; constint mv_thr = cm->width > 640 ? 8 : 4; // Check temporal variance for bsize >= 16x16, if LAST_FRAME was selected and // int_pro mv is small. If the temporal variance is small set the flag // variance_low for the block. The variance threshold can be adjusted, the // higher the more aggressive. if (ref_frame_partition == LAST_FRAME &&
(cpi->sf.short_circuit_low_temp_var == 1 ||
(xd->mi[0]->mv[0].as_mv.col < mv_thr &&
xd->mi[0]->mv[0].as_mv.col > -mv_thr &&
xd->mi[0]->mv[0].as_mv.row < mv_thr &&
xd->mi[0]->mv[0].as_mv.row > -mv_thr))) { if (xd->mi[0]->sb_type == BLOCK_64X64) { if ((vt->part_variances).none.variance < (thresholds[0] >> 1))
x->variance_low[0] = 1;
} elseif (xd->mi[0]->sb_type == BLOCK_64X32) { for (i = 0; i < 2; i++) { if (vt->part_variances.horz[i].variance < (thresholds[0] >> 2))
x->variance_low[i + 1] = 1;
}
} elseif (xd->mi[0]->sb_type == BLOCK_32X64) { for (i = 0; i < 2; i++) { if (vt->part_variances.vert[i].variance < (thresholds[0] >> 2))
x->variance_low[i + 3] = 1;
}
} else { for (i = 0; i < 4; i++) { constint idx[4][2] = { { 0, 0 }, { 0, 4 }, { 4, 0 }, { 4, 4 } }; constint idx_str =
cm->mi_stride * (mi_row + idx[i][0]) + mi_col + idx[i][1];
MODE_INFO **this_mi = cm->mi_grid_visible + idx_str;
staticint copy_partitioning(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd, int mi_row, int mi_col, int segment_id, int sb_offset) { int svc_copy_allowed = 1; int frames_since_key_thresh = 1; if (cpi->use_svc) { // For SVC, don't allow copy if base spatial layer is key frame, or if // frame is not a temporal enhancement layer frame. int layer = LAYER_IDS_TO_IDX(0, cpi->svc.temporal_layer_id,
cpi->svc.number_temporal_layers); const LAYER_CONTEXT *lc = &cpi->svc.layer_context[layer]; if (lc->is_key_frame || !cpi->svc.non_reference_frame) svc_copy_allowed = 0;
frames_since_key_thresh = cpi->svc.number_spatial_layers << 1;
} if (cpi->rc.frames_since_key > frames_since_key_thresh && svc_copy_allowed &&
!cpi->resize_pending && segment_id == CR_SEGMENT_ID_BASE &&
cpi->prev_segment_id[sb_offset] == CR_SEGMENT_ID_BASE &&
cpi->copied_frame_cnt[sb_offset] < cpi->max_copied_frame) { if (cpi->prev_partition != NULL) {
copy_partitioning_helper(cpi, x, xd, BLOCK_64X64, mi_row, mi_col);
cpi->copied_frame_cnt[sb_offset] += 1;
memcpy(x->variance_low, &(cpi->prev_variance_low[sb_offset * 25]), sizeof(x->variance_low)); return 1;
}
}
return 0;
}
// Set the partition for mi_col/row_high (current resolution) based on // the previous spatial layer (mi_col/row). Returns 0 if partition is set, // returns 1 if no scale partitioning is done. Return 1 means the variance // partitioning will be used. staticint scale_partitioning_svc(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
BLOCK_SIZE bsize, int mi_row, int mi_col, int mi_row_high, int mi_col_high) {
VP9_COMMON *const cm = &cpi->common;
SVC *const svc = &cpi->svc;
BLOCK_SIZE *prev_part = svc->prev_partition_svc; // Variables with _high are for higher resolution. int bsize_high = 0; int subsize_high = 0; constint bsl = b_width_log2_lookup[bsize]; constint bs = (1 << bsl) >> 2; constint has_rows = (mi_row_high + bs) < cm->mi_rows; constint has_cols = (mi_col_high + bs) < cm->mi_cols;
int start_pos;
BLOCK_SIZE bsize_low;
PARTITION_TYPE partition_high;
// If the lower layer frame is outside the boundary (this can happen for // odd size resolutions) then do not scale partitioning from the lower // layer. Do variance based partitioning instead (return 1). if (mi_row >= svc->mi_rows[svc->spatial_layer_id - 1] ||
mi_col >= svc->mi_cols[svc->spatial_layer_id - 1]) return 1;
// Do not scale partitioning from lower layers on the boundary. Do // variance based partitioning instead (return 1). if (!has_rows || !has_cols) return 1;
// For reference frames: return 1 (do variance-based partitioning) if the // superblock is not low source sad and lower-resoln bsize is below 32x32. if (!cpi->svc.non_reference_frame && !x->skip_low_source_sad &&
bsize_low < BLOCK_32X32) return 1;
// Scale up block size by 2x2. Force 64x64 for size larger than 32x32. if (bsize_low < BLOCK_32X32) {
bsize_high = bsize_low + 3;
} elseif (bsize_low >= BLOCK_32X32) {
bsize_high = BLOCK_64X64;
}
staticvoid update_prev_partition(VP9_COMP *cpi, MACROBLOCK *x, int segment_id, int mi_row, int mi_col, int sb_offset) {
update_prev_partition_helper(cpi, BLOCK_64X64, mi_row, mi_col);
cpi->prev_segment_id[sb_offset] = segment_id;
memcpy(&(cpi->prev_variance_low[sb_offset * 25]), x->variance_low, sizeof(x->variance_low)); // Reset the counter for copy partitioning
cpi->copied_frame_cnt[sb_offset] = 0;
}
staticvoid chroma_check(VP9_COMP *cpi, MACROBLOCK *x, int bsize, unsignedint y_sad, int is_key_frame, int scene_change_detected) { int i;
MACROBLOCKD *xd = &x->e_mbd; int shift = 2;
if (is_key_frame) return;
// For speed > 8, avoid the chroma check if y_sad is above threshold. if (cpi->oxcf.speed > 8) { if (y_sad > cpi->vbp_thresholds[1] &&
(!cpi->noise_estimate.enabled ||
vp9_noise_estimate_extract_level(&cpi->noise_estimate) < kMedium)) return;
}
if (cpi->oxcf.content == VP9E_CONTENT_SCREEN && scene_change_detected)
shift = 5;
for (i = 1; i <= 2; ++i) { unsignedint uv_sad = UINT_MAX; struct macroblock_plane *p = &x->plane[i]; struct macroblockd_plane *pd = &xd->plane[i]; const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
if (bs != BLOCK_INVALID)
uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride, pd->dst.buf,
pd->dst.stride);
// TODO(marpan): Investigate if we should lower this threshold if // superblock is detected as skin.
x->color_sensitivity[i - 1] = uv_sad > (y_sad >> shift);
}
}
if (cpi->content_state_sb_fd != NULL) { if (tmp_sad < avg_source_sad_threshold2) { // Cap the increment to 255. if (cpi->content_state_sb_fd[sb_offset] < 255)
cpi->content_state_sb_fd[sb_offset]++;
} else {
cpi->content_state_sb_fd[sb_offset] = 0;
}
} if (tmp_sad == 0) x->zero_temp_sad_source = 1; return tmp_sad;
}
// This function chooses partitioning based on the variance between source and // reconstructed last, where variance is computed for down-sampled inputs. staticint choose_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
MACROBLOCK *x, int mi_row, int mi_col) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd; int i, j, k, m;
v64x64 vt;
v16x16 *vt2 = NULL; int force_split[21]; int avg_32x32; int max_var_32x32 = 0; int min_var_32x32 = INT_MAX; int var_32x32; int avg_16x16[4]; int maxvar_16x16[4]; int minvar_16x16[4];
int64_t threshold_4x4avg;
NOISE_LEVEL noise_level = kLow; int content_state = 0;
uint8_t *s; const uint8_t *d; int sp; int dp; int compute_minmax_variance = 1; unsignedint y_sad = UINT_MAX;
BLOCK_SIZE bsize = BLOCK_64X64; // Ref frame used in partitioning.
MV_REFERENCE_FRAME ref_frame_partition = LAST_FRAME; int pixels_wide = 64, pixels_high = 64;
int64_t thresholds[4] = { cpi->vbp_thresholds[0], cpi->vbp_thresholds[1],
cpi->vbp_thresholds[2], cpi->vbp_thresholds[3] }; int scene_change_detected =
cpi->rc.high_source_sad ||
(cpi->use_svc && cpi->svc.high_source_sad_superframe); int force_64_split = scene_change_detected ||
(cpi->oxcf.content == VP9E_CONTENT_SCREEN &&
cpi->compute_source_sad_onepass &&
cpi->sf.use_source_sad && !x->zero_temp_sad_source);
// For the variance computation under SVC mode, we treat the frame as key if // the reference (base layer frame) is key frame (i.e., is_key_frame == 1). int is_key_frame =
(frame_is_intra_only(cm) ||
(is_one_pass_svc(cpi) &&
cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame));
if (!is_key_frame) { if (cm->frame_refs[LAST_FRAME - 1].sf.x_scale_fp == REF_INVALID_SCALE ||
cm->frame_refs[LAST_FRAME - 1].sf.y_scale_fp == REF_INVALID_SCALE)
is_key_frame = 1;
}
// Always use 4x4 partition for key frame. constint use_4x4_partition = frame_is_intra_only(cm); constint low_res = (cm->width <= 352 && cm->height <= 288); int variance4x4downsample[16]; int segment_id; int sb_offset = (cm->mi_stride >> 3) * (mi_row >> 3) + (mi_col >> 3);
// For SVC: check if LAST frame is NULL or if the resolution of LAST is // different than the current frame resolution, and if so, treat this frame // as a key frame, for the purpose of the superblock partitioning. // LAST == NULL can happen in some cases where enhancement spatial layers are // enabled dyanmically in the stream and the only reference is the spatial // reference (GOLDEN). if (cpi->use_svc) { const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi, LAST_FRAME); if (ref == NULL || ref->y_crop_height != cm->height ||
ref->y_crop_width != cm->width)
is_key_frame = 1;
}
// For SVC on top spatial layer: use/scale the partition from // the lower spatial resolution if svc_use_lowres_part is enabled. if (cpi->sf.svc_use_lowres_part &&
cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 1 &&
cpi->svc.prev_partition_svc != NULL && content_state != kVeryHighSad) { if (!scale_partitioning_svc(cpi, x, xd, BLOCK_64X64, mi_row >> 1,
mi_col >> 1, mi_row, mi_col)) { if (cpi->sf.copy_partition_flag) {
update_prev_partition(cpi, x, segment_id, mi_row, mi_col, sb_offset);
} return 0;
}
} // If source_sad is low copy the partition without computing the y_sad. if (x->skip_low_source_sad && cpi->sf.copy_partition_flag &&
!force_64_split &&
copy_partitioning(cpi, x, xd, mi_row, mi_col, segment_id, sb_offset)) {
x->sb_use_mv_part = 1; if (cpi->sf.svc_use_lowres_part &&
cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 2)
update_partition_svc(cpi, BLOCK_64X64, mi_row, mi_col); return 0;
}
}
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled &&
cyclic_refresh_segment_id_boosted(segment_id)) { int q = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex);
set_vbp_thresholds(cpi, thresholds, q, content_state);
} else {
set_vbp_thresholds(cpi, thresholds, cm->base_qindex, content_state);
} // Decrease 32x32 split threshold for screen on base layer, for scene // change/high motion frames. if (cpi->oxcf.content == VP9E_CONTENT_SCREEN &&
cpi->svc.spatial_layer_id == 0 && force_64_split)
thresholds[1] = 3 * thresholds[1] >> 2;
// For non keyframes, disable 4x4 average for low resolution when speed = 8
threshold_4x4avg = (cpi->oxcf.speed < 8) ? thresholds[1] << 1 : INT64_MAX;
if (xd->mb_to_right_edge < 0) pixels_wide += (xd->mb_to_right_edge >> 3); if (xd->mb_to_bottom_edge < 0) pixels_high += (xd->mb_to_bottom_edge >> 3);
s = x->plane[0].src.buf;
sp = x->plane[0].src.stride;
// Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks, // 5-20 for the 16x16 blocks.
force_split[0] = force_64_split;
if (!is_key_frame) { // In the case of spatial/temporal scalable coding, the assumption here is // that the temporal reference frame will always be of type LAST_FRAME. // TODO(marpan): If that assumption is broken, we need to revisit this code.
MODE_INFO *mi = xd->mi[0];
YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
if (!(is_one_pass_svc(cpi) && cpi->svc.spatial_layer_id) ||
cpi->svc.use_gf_temporal_ref_current_layer) { // For now, GOLDEN will not be used for non-zero spatial layers, since // it may not be a temporal reference.
yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
}
// Only compute y_sad_g (sad for golden reference) for speed < 8. if (cpi->oxcf.speed < 8 && yv12_g && yv12_g != yv12 &&
(cpi->ref_frame_flags & VP9_GOLD_FLAG)) {
vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
&cm->frame_refs[GOLDEN_FRAME - 1].sf);
y_sad_g = cpi->fn_ptr[bsize].sdf(
x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
xd->plane[0].pre[0].stride);
} else {
y_sad_g = UINT_MAX;
}
if (cpi->use_skin_detection)
x->sb_is_skin = skin_sb_split(cpi, low_res, mi_row, mi_col, force_split);
d = xd->plane[0].dst.buf;
dp = xd->plane[0].dst.stride;
// If the y_sad is very small, take 64x64 as partition and exit. // Don't check on boosted segment for now, as 64x64 is suppressed there. if (segment_id == CR_SEGMENT_ID_BASE && y_sad < cpi->vbp_threshold_sad) { constint block_width = num_8x8_blocks_wide_lookup[BLOCK_64X64]; constint block_height = num_8x8_blocks_high_lookup[BLOCK_64X64]; if (mi_col + block_width / 2 < cm->mi_cols &&
mi_row + block_height / 2 < cm->mi_rows) {
set_block_size(cpi, x, xd, mi_row, mi_col, BLOCK_64X64);
x->variance_low[0] = 1;
chroma_check(cpi, x, bsize, y_sad, is_key_frame, scene_change_detected); if (cpi->sf.svc_use_lowres_part &&
cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 2)
update_partition_svc(cpi, BLOCK_64X64, mi_row, mi_col); if (cpi->sf.copy_partition_flag) {
update_prev_partition(cpi, x, segment_id, mi_row, mi_col, sb_offset);
} return 0;
}
}
// If the y_sad is small enough, copy the partition of the superblock in the // last frame to current frame only if the last frame is not a keyframe. // Stop the copy every cpi->max_copied_frame to refresh the partition. // TODO(jianj) : tune the threshold. if (cpi->sf.copy_partition_flag && y_sad_last < cpi->vbp_threshold_copy &&
copy_partitioning(cpi, x, xd, mi_row, mi_col, segment_id, sb_offset)) {
chroma_check(cpi, x, bsize, y_sad, is_key_frame, scene_change_detected); if (cpi->sf.svc_use_lowres_part &&
cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 2)
update_partition_svc(cpi, BLOCK_64X64, mi_row, mi_col); return 0;
}
} else {
d = VP9_VAR_OFFS;
dp = 0; #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { switch (xd->bd) { case 10: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); break; case 12: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12); break; case 8: default: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); break;
}
} #endif// CONFIG_VP9_HIGHBITDEPTH
}
if (low_res && threshold_4x4avg < INT64_MAX)
CHECK_MEM_ERROR(&cm->error, vt2, vpx_calloc(16, sizeof(*vt2))); // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances // for splits. for (i = 0; i < 4; i++) { constint x32_idx = ((i & 1) << 5); constint y32_idx = ((i >> 1) << 5); constint i2 = i << 2;
force_split[i + 1] = 0;
avg_16x16[i] = 0;
maxvar_16x16[i] = 0;
minvar_16x16[i] = INT_MAX; for (j = 0; j < 4; j++) { constint x16_idx = x32_idx + ((j & 1) << 4); constint y16_idx = y32_idx + ((j >> 1) << 4); constint split_index = 5 + i2 + j;
v16x16 *vst = &vt.split[i].split[j];
force_split[split_index] = 0;
variance4x4downsample[i2 + j] = 0; if (!is_key_frame) {
fill_variance_8x8avg(s, sp, d, dp, x16_idx, y16_idx, vst, #if CONFIG_VP9_HIGHBITDEPTH
xd->cur_buf->flags, #endif
pixels_wide, pixels_high, is_key_frame);
fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
get_variance(&vt.split[i].split[j].part_variances.none);
avg_16x16[i] += vt.split[i].split[j].part_variances.none.variance; if (vt.split[i].split[j].part_variances.none.variance < minvar_16x16[i])
minvar_16x16[i] = vt.split[i].split[j].part_variances.none.variance; if (vt.split[i].split[j].part_variances.none.variance > maxvar_16x16[i])
maxvar_16x16[i] = vt.split[i].split[j].part_variances.none.variance; if (vt.split[i].split[j].part_variances.none.variance > thresholds[2]) { // 16X16 variance is above threshold for split, so force split to 8x8 // for this 16x16 block (this also forces splits for upper levels).
force_split[split_index] = 1;
force_split[i + 1] = 1;
force_split[0] = 1;
} elseif (compute_minmax_variance &&
vt.split[i].split[j].part_variances.none.variance >
thresholds[1] &&
!cyclic_refresh_segment_id_boosted(segment_id)) { // We have some nominal amount of 16x16 variance (based on average), // compute the minmax over the 8x8 sub-blocks, and if above threshold, // force split to 8x8 block for this 16x16 block. int minmax = compute_minmax_8x8(s, sp, d, dp, x16_idx, y16_idx, #if CONFIG_VP9_HIGHBITDEPTH
xd->cur_buf->flags, #endif
pixels_wide, pixels_high); int thresh_minmax = (int)cpi->vbp_threshold_minmax; if (x->content_state_sb == kVeryHighSad)
thresh_minmax = thresh_minmax << 1; if (minmax > thresh_minmax) {
force_split[split_index] = 1;
force_split[i + 1] = 1;
force_split[0] = 1;
}
}
} if (is_key_frame ||
(low_res && vt.split[i].split[j].part_variances.none.variance >
threshold_4x4avg)) {
force_split[split_index] = 0; // Go down to 4x4 down-sampling for variance.
variance4x4downsample[i2 + j] = 1; for (k = 0; k < 4; k++) { int x8_idx = x16_idx + ((k & 1) << 3); int y8_idx = y16_idx + ((k >> 1) << 3);
v8x8 *vst2 = is_key_frame ? &vst->split[k] : &vt2[i2 + j].split[k];
fill_variance_4x4avg(s, sp, d, dp, x8_idx, y8_idx, vst2, #if CONFIG_VP9_HIGHBITDEPTH
xd->cur_buf->flags, #endif
pixels_wide, pixels_high, is_key_frame);
}
}
}
} if (cpi->noise_estimate.enabled)
noise_level = vp9_noise_estimate_extract_level(&cpi->noise_estimate); // Fill the rest of the variance tree by summing split partition values.
avg_32x32 = 0; for (i = 0; i < 4; i++) { constint i2 = i << 2; for (j = 0; j < 4; j++) { if (variance4x4downsample[i2 + j] == 1) {
v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] : &vt.split[i].split[j]; for (m = 0; m < 4; m++) fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
fill_variance_tree(vtemp, BLOCK_16X16); // If variance of this 16x16 block is above the threshold, force block // to split. This also forces a split on the upper levels.
get_variance(&vtemp->part_variances.none); if (vtemp->part_variances.none.variance > thresholds[2]) {
force_split[5 + i2 + j] = 1;
force_split[i + 1] = 1;
force_split[0] = 1;
}
}
}
fill_variance_tree(&vt.split[i], BLOCK_32X32); // If variance of this 32x32 block is above the threshold, or if its above // (some threshold of) the average variance over the sub-16x16 blocks, then // force this block to split. This also forces a split on the upper // (64x64) level. if (!force_split[i + 1]) {
get_variance(&vt.split[i].part_variances.none);
var_32x32 = vt.split[i].part_variances.none.variance;
max_var_32x32 = VPXMAX(var_32x32, max_var_32x32);
min_var_32x32 = VPXMIN(var_32x32, min_var_32x32); if (vt.split[i].part_variances.none.variance > thresholds[1] ||
(!is_key_frame &&
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.27 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.