/* * Copyright (c) 2010 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree.
*/
// Resets the first pass file to the given position using a relative seek from // the current position. staticvoid reset_fpf_position(TWO_PASS *p, const FIRSTPASS_STATS *position) {
p->stats_in = position;
}
// Read frame stats at an offset from the current position. staticconst FIRSTPASS_STATS *read_frame_stats(const TWO_PASS *p, int offset) { if ((offset >= 0 && p->stats_in + offset >= p->stats_in_end) ||
(offset < 0 && p->stats_in + offset < p->stats_in_start)) { return NULL;
}
// Calculate an active area of the image that discounts formatting // bars and partially discounts other 0 energy areas. #define MIN_ACTIVE_AREA 0.5 #define MAX_ACTIVE_AREA 1.0 staticdouble calculate_active_area(const FRAME_INFO *frame_info, const FIRSTPASS_STATS *this_frame) { double active_pct;
// Get the average weighted error for the clip (or corpus) staticdouble get_distribution_av_err(VP9_COMP *cpi, TWO_PASS *const twopass) { constdouble av_weight =
twopass->total_stats.weight / twopass->total_stats.count;
#define ACT_AREA_CORRECTION 0.5 // Calculate a modified Error used in distributing bits between easier and // harder frames. staticdouble calculate_mod_frame_score(const VP9_COMP *cpi, const VP9EncoderConfig *oxcf, const FIRSTPASS_STATS *this_frame, constdouble av_err) { double modified_score =
av_err * pow(this_frame->coded_error * this_frame->weight /
DOUBLE_DIVIDE_CHECK(av_err),
oxcf->two_pass_vbrbias / 100.0);
// Correction for active area. Frames with a reduced active area // (eg due to formatting bars) have a higher error per mb for the // remaining active MBs. The correction here assumes that coding // 0.5N blocks of complexity 2X is a little easier than coding N // blocks of complexity X.
modified_score *= pow(calculate_active_area(&cpi->frame_info, this_frame),
ACT_AREA_CORRECTION);
// Correction for active area. Frames with a reduced active area // (eg due to formatting bars) have a higher error per mb for the // remaining active MBs. The correction here assumes that coding // 0.5N blocks of complexity 2X is a little easier than coding N // blocks of complexity X.
modified_score *=
pow(calculate_active_area(frame_info, this_frame), ACT_AREA_CORRECTION);
// Normalize to a midpoint score.
modified_score /= DOUBLE_DIVIDE_CHECK(mean_mod_score); return fclamp(modified_score, min_score, max_score);
}
// Refine the motion search range according to the frame dimension // for first pass test. staticint get_search_range(const VP9_COMP *cpi) { int sr = 0; constint dim = VPXMIN(cpi->initial_width, cpi->initial_height);
while ((dim << sr) < MAX_FULL_PEL_VAL) ++sr; return sr;
}
// Reduce limits to keep the motion search within MV_MAX of ref_mv. Not doing // this can be problematic for big videos (8K) and may cause assert failure // (or memory violation) in mv_cost. Limits are only modified if they would // be non-empty. Returns 1 if limits are non-empty. staticint intersect_limits_with_mv_max(MvLimits *mv_limits, const MV *ref_mv) { constint row_min =
VPXMAX(mv_limits->row_min, (ref_mv->row + 7 - MV_MAX) >> 3); constint row_max =
VPXMIN(mv_limits->row_max, (ref_mv->row - 1 + MV_MAX) >> 3); constint col_min =
VPXMAX(mv_limits->col_min, (ref_mv->col + 7 - MV_MAX) >> 3); constint col_max =
VPXMIN(mv_limits->col_max, (ref_mv->col - 1 + MV_MAX) >> 3); if (row_min > row_max || col_min > col_max) { return 0;
}
mv_limits->row_min = row_min;
mv_limits->row_max = row_max;
mv_limits->col_min = col_min;
mv_limits->col_max = col_max; return 1;
}
staticint find_fp_qindex(vpx_bit_depth_t bit_depth) { int i;
for (i = 0; i < QINDEX_RANGE; ++i) if (vp9_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q) break;
if (i == QINDEX_RANGE) i--;
return i;
}
staticvoid set_first_pass_params(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common; if (!cpi->refresh_alt_ref_frame &&
(cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY))) {
cm->frame_type = KEY_FRAME;
} else {
cm->frame_type = INTER_FRAME;
} // Do not use periodic key frames.
cpi->rc.frames_to_key = INT_MAX;
}
// Scale an sse threshold to account for 8/10/12 bit. staticint scale_sse_threshold(VP9_COMMON *cm, int thresh) { int ret_val = thresh; #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { switch (cm->bit_depth) { case VPX_BITS_8: ret_val = thresh; break; case VPX_BITS_10: ret_val = thresh << 4; break; default:
assert(cm->bit_depth == VPX_BITS_12);
ret_val = thresh << 8; break;
}
} #else
(void)cm; #endif// CONFIG_VP9_HIGHBITDEPTH return ret_val;
}
// This threshold is used to track blocks where to all intents and purposes // the intra prediction error 0. Though the metric we test against // is technically a sse we are mainly interested in blocks where all the pixels // in the 8 bit domain have an error of <= 1 (where error = sse) so a // linear scaling for 10 and 12 bit gives similar results. #define UL_INTRA_THRESH 50 staticint get_ul_intra_threshold(VP9_COMMON *cm) { int ret_val = UL_INTRA_THRESH; #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { switch (cm->bit_depth) { case VPX_BITS_8: ret_val = UL_INTRA_THRESH; break; case VPX_BITS_10: ret_val = UL_INTRA_THRESH << 2; break; default:
assert(cm->bit_depth == VPX_BITS_12);
ret_val = UL_INTRA_THRESH << 4; break;
}
} #else
(void)cm; #endif// CONFIG_VP9_HIGHBITDEPTH return ret_val;
}
// Estimate noise at a single point based on the impact of a spatial kernel // on the point value staticint fp_estimate_point_noise(uint8_t *src_ptr, constint stride) { int sum_weight = 0; int sum_val = 0; int i, j; int max_diff = 0; int diff; int dn_diff;
uint8_t *tmp_ptr;
uint8_t *kernel_ptr;
uint8_t dn_val;
uint8_t centre_val = *src_ptr;
kernel_ptr = fp_dn_kernel_3;
// Apply the kernel
tmp_ptr = src_ptr - stride - 1; for (i = 0; i < KERNEL_SIZE; ++i) { for (j = 0; j < KERNEL_SIZE; ++j) {
diff = abs((int)centre_val - (int)tmp_ptr[j]);
max_diff = VPXMAX(max_diff, diff); if (diff <= FP_DN_THRESH) {
sum_weight += *kernel_ptr;
sum_val += (int)tmp_ptr[j] * (int)*kernel_ptr;
}
++kernel_ptr;
}
tmp_ptr += stride;
}
if (max_diff < FP_MAX_DN_THRESH) // Update the source value with the new filtered value
dn_val = (sum_val + (sum_weight >> 1)) / sum_weight; else
dn_val = *src_ptr;
// return the noise energy as the square of the difference between the // denoised and raw value.
dn_diff = (int)*src_ptr - (int)dn_val; return dn_diff * dn_diff;
} #if CONFIG_VP9_HIGHBITDEPTH staticint fp_highbd_estimate_point_noise(uint8_t *src_ptr, constint stride) { int sum_weight = 0; int sum_val = 0; int i, j; int max_diff = 0; int diff; int dn_diff;
uint8_t *tmp_ptr;
uint16_t *tmp_ptr16;
uint8_t *kernel_ptr;
uint16_t dn_val;
uint16_t centre_val = *CONVERT_TO_SHORTPTR(src_ptr);
if (max_diff < FP_MAX_DN_THRESH) // Update the source value with the new filtered value
dn_val = (sum_val + (sum_weight >> 1)) / sum_weight; else
dn_val = *CONVERT_TO_SHORTPTR(src_ptr);
// return the noise energy as the square of the difference between the // denoised and raw value.
dn_diff = (int)(*CONVERT_TO_SHORTPTR(src_ptr)) - (int)dn_val; return dn_diff * dn_diff;
} #endif
// Estimate noise for a block. staticint fp_estimate_block_noise(MACROBLOCK *x, BLOCK_SIZE bsize) { #if CONFIG_VP9_HIGHBITDEPTH
MACROBLOCKD *xd = &x->e_mbd; #endif
uint8_t *src_ptr = &x->plane[0].src.buf[0]; constint width = num_4x4_blocks_wide_lookup[bsize] * 4; constint height = num_4x4_blocks_high_lookup[bsize] * 4; int w, h; int stride = x->plane[0].src.stride; int block_noise = 0;
// Sampled points to reduce cost overhead. for (h = 0; h < height; h += 2) { for (w = 0; w < width; w += 2) { #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
block_noise += fp_highbd_estimate_point_noise(src_ptr, stride); else
block_noise += fp_estimate_point_noise(src_ptr, stride); #else
block_noise += fp_estimate_point_noise(src_ptr, stride); #endif
++src_ptr;
}
src_ptr += (stride - width);
} return block_noise << 2; // Scale << 2 to account for sampling.
}
// This function is called to test the functionality of row based // multi-threading in unit tests for bit-exactness staticvoid accumulate_floating_point_stats(VP9_COMP *cpi,
TileDataEnc *first_tile_col) {
VP9_COMMON *const cm = &cpi->common; int mb_row, mb_col;
first_tile_col->fp_data.intra_factor = 0;
first_tile_col->fp_data.brightness_factor = 0;
first_tile_col->fp_data.neutral_count = 0; for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { constint mb_index = mb_row * cm->mb_cols + mb_col;
first_tile_col->fp_data.intra_factor +=
cpi->twopass.fp_mb_float_stats[mb_index].frame_mb_intra_factor;
first_tile_col->fp_data.brightness_factor +=
cpi->twopass.fp_mb_float_stats[mb_index].frame_mb_brightness_factor;
first_tile_col->fp_data.neutral_count +=
cpi->twopass.fp_mb_float_stats[mb_index].frame_mb_neutral_count;
}
}
}
staticvoid first_pass_stat_calc(VP9_COMP *cpi, FIRSTPASS_STATS *fps,
FIRSTPASS_DATA *fp_acc_data) {
VP9_COMMON *const cm = &cpi->common; // The minimum error here insures some bit allocation to frames even // in static regions. The allocation per MB declines for larger formats // where the typical "real" energy per MB also falls. // Initial estimate here uses sqrt(mbs) to define the min_err, where the // number of mbs is proportional to the image area. constint num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
: cpi->common.MBs; constdouble min_err = 200 * sqrt(num_mbs);
// Clamp the image start to rows/2. This number of rows is discarded top // and bottom as dead data so rows / 2 means the frame is blank. if ((fp_acc_data->image_data_start_row > cm->mb_rows / 2) ||
(fp_acc_data->image_data_start_row == INVALID_ROW)) {
fp_acc_data->image_data_start_row = cm->mb_rows / 2;
} // Exclude any image dead zone if (fp_acc_data->image_data_start_row > 0) {
fp_acc_data->intra_skip_count =
VPXMAX(0, fp_acc_data->intra_skip_count -
(fp_acc_data->image_data_start_row * cm->mb_cols * 2));
}
xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
xd->plane[1].dst.buf = new_yv12->u_buffer + recon_uvoffset;
xd->plane[2].dst.buf = new_yv12->v_buffer + recon_uvoffset;
xd->mi[0]->sb_type = bsize;
xd->mi[0]->ref_frame[0] = INTRA_FRAME;
set_mi_row_col(xd, &tile, mb_row << 1, num_8x8_blocks_high_lookup[bsize],
mb_col << 1, num_8x8_blocks_wide_lookup[bsize], cm->mi_rows,
cm->mi_cols); // Are edges available for intra prediction? // Since the firstpass does not populate the mi_grid_visible, // above_mi/left_mi must be overwritten with a nonzero value when edges // are available. Required by vp9_predict_intra_block().
xd->above_mi = (mb_row != 0) ? &mi_above : NULL;
xd->left_mi = ((mb_col << 1) > tile.mi_col_start) ? &mi_left : NULL;
// Do intra 16x16 prediction.
x->skip_encode = 0;
x->fp_src_pred = 0; // Do intra prediction based on source pixels for tile boundaries if (mb_col == mb_col_start && mb_col != 0) {
xd->left_mi = &mi_left;
x->fp_src_pred = 1;
}
xd->mi[0]->mode = DC_PRED;
xd->mi[0]->tx_size =
use_dc_pred ? (bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4; // Fix - zero the 16x16 block first. This ensures correct this_error for // block sizes smaller than 16x16.
vp9_zero_array(x->plane[0].src_diff, 256);
vp9_encode_intra_block_plane(x, bsize, 0, 0);
this_error = vpx_get_mb_ss(x->plane[0].src_diff);
this_intra_error = this_error;
// Keep a record of blocks that have very low intra error residual // (i.e. are in effect completely flat and untextured in the intra // domain). In natural videos this is uncommon, but it is much more // common in animations, graphics and screen content, so may be used // as a signal to detect these types of content. if (this_error < get_ul_intra_threshold(cm)) {
++(fp_acc_data->intra_skip_count);
} elseif ((mb_col > 0) &&
(fp_acc_data->image_data_start_row == INVALID_ROW)) {
fp_acc_data->image_data_start_row = mb_row;
}
// Blocks that are mainly smooth in the intra domain. // Some special accounting for CQ but also these are better for testing // noise levels. if (this_error < get_smooth_intra_threshold(cm)) {
++(fp_acc_data->intra_smooth_count);
}
// Special case noise measurement for first frame. if (cm->current_video_frame == 0) { if (this_intra_error < scale_sse_threshold(cm, LOW_I_THRESH)) {
fp_acc_data->frame_noise_energy += fp_estimate_block_noise(x, bsize);
} else {
fp_acc_data->frame_noise_energy += (int64_t)SECTION_NOISE_DEF;
}
}
#if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { switch (cm->bit_depth) { case VPX_BITS_8: break; case VPX_BITS_10: this_error >>= 4; break; default:
assert(cm->bit_depth == VPX_BITS_12);
this_error >>= 8; break;
}
} #endif// CONFIG_VP9_HIGHBITDEPTH
// Intrapenalty below deals with situations where the intra and inter // error scores are very low (e.g. a plain black frame). // We do not have special cases in first pass for 0,0 and nearest etc so // all inter modes carry an overhead cost estimate for the mv. // When the error score is very low this causes us to pick all or lots of // INTRA modes and throw lots of key frames. // This penalty adds a cost matching that of a 0,0 mv to the intra case.
this_error += intrapenalty;
// Accumulate the intra error.
fp_acc_data->intra_error += (int64_t)this_error;
// Set up limit values for motion vectors to prevent them extending // outside the UMV borders.
x->mv_limits.col_min = -((mb_col * 16) + BORDER_MV_PIXELS_B16);
x->mv_limits.col_max =
((cm->mb_cols - 1 - mb_col) * 16) + BORDER_MV_PIXELS_B16;
// Other than for intra-only frame do a motion search. if (!frame_is_intra_only(cm)) { int tmp_err, motion_error, this_motion_error, raw_motion_error; // Assume 0,0 motion with no mv overhead.
MV mv = { 0, 0 }, tmp_mv = { 0, 0 }; struct buf_2d unscaled_last_source_buf_2d;
vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[bsize];
#if CONFIG_RATE_CTRL if (cpi->oxcf.use_simple_encode_api) { // Store zero mv as default
store_fp_motion_vector(cpi, &mv, mb_row, mb_col, LAST_FRAME, 0);
} #endif// CONFIG_RAGE_CTRL
// Compute the motion error of the 0,0 motion using the last source // frame as the reference. Skip the further motion search on // reconstructed frame if this error is very small.
unscaled_last_source_buf_2d.buf =
cpi->unscaled_last_source->y_buffer + recon_yoffset;
unscaled_last_source_buf_2d.stride = cpi->unscaled_last_source->y_stride; #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
raw_motion_error = highbd_get_prediction_error(
bsize, &x->plane[0].src, &unscaled_last_source_buf_2d, xd->bd);
} else {
raw_motion_error = get_prediction_error(bsize, &x->plane[0].src,
&unscaled_last_source_buf_2d);
} #else
raw_motion_error = get_prediction_error(bsize, &x->plane[0].src,
&unscaled_last_source_buf_2d); #endif// CONFIG_VP9_HIGHBITDEPTH
if (raw_motion_error > NZ_MOTION_PENALTY) { // Test last reference frame using the previous best mv as the // starting point (best reference) for the search.
first_pass_motion_search(cpi, x, best_ref_mv, &mv, &motion_error);
// If the current best reference mv is not centered on 0,0 then do a // 0,0 based search as well. if (!is_zero_mv(best_ref_mv)) {
tmp_err = INT_MAX;
first_pass_motion_search(cpi, x, &zero_mv, &tmp_mv, &tmp_err);
// Search in an older reference frame. if ((cm->current_video_frame > 1) && gld_yv12 != NULL) { // Assume 0,0 motion with no mv overhead. int gf_motion_error;
if (gf_motion_error < motion_error && gf_motion_error < this_error)
++(fp_acc_data->second_ref_count);
// Reset to last frame as reference buffer.
xd->plane[0].pre[0].buf = first_ref_buf->y_buffer + recon_yoffset;
xd->plane[1].pre[0].buf = first_ref_buf->u_buffer + recon_uvoffset;
xd->plane[2].pre[0].buf = first_ref_buf->v_buffer + recon_uvoffset;
// In accumulating a score for the older reference frame take the // best of the motion predicted score and the intra coded error // (just as will be done for) accumulation of "coded_error" for // the last frame. if (gf_motion_error < this_error)
fp_acc_data->sr_coded_error += gf_motion_error; else
fp_acc_data->sr_coded_error += this_error;
} else {
fp_acc_data->sr_coded_error += motion_error;
}
} else {
fp_acc_data->sr_coded_error += motion_error;
}
// Start by assuming that intra mode is best.
best_ref_mv->row = 0;
best_ref_mv->col = 0;
if (motion_error <= this_error) {
vpx_clear_system_state();
// Keep a count of cases where the inter and intra were very close // and very low. This helps with scene cut detection for example in // cropped clips with black bars at the sides or top and bottom. if (((this_error - intrapenalty) * 9 <= motion_error * 10) &&
(this_error < (2 * intrapenalty))) {
fp_acc_data->neutral_count += 1.0; if (cpi->row_mt_bit_exact)
cpi->twopass.fp_mb_float_stats[mb_index].frame_mb_neutral_count =
1.0; // Also track cases where the intra is not much worse than the inter // and use this in limiting the GF/arf group length.
} elseif ((this_error > NCOUNT_INTRA_THRESH) &&
(this_error < (NCOUNT_INTRA_FACTOR * motion_error))) {
mb_neutral_count =
(double)motion_error / DOUBLE_DIVIDE_CHECK((double)this_error);
fp_acc_data->neutral_count += mb_neutral_count; if (cpi->row_mt_bit_exact)
cpi->twopass.fp_mb_float_stats[mb_index].frame_mb_neutral_count =
mb_neutral_count;
}
// Don't allow a value of 0 for duration. // (Section duration is also defaulted to minimum of 1.0).
fps.duration = VPXMAX(1.0, (double)(source->ts_end - source->ts_start));
// Don't want to do output stats with a stack variable!
twopass->this_frame_stats = fps;
output_stats(&twopass->this_frame_stats);
accumulate_stats(&twopass->total_stats, &fps);
}
// Copy the previous Last Frame back into gf and arf buffers if // the prediction is good enough... but also don't allow it to lag too far. if ((twopass->sr_update_lag > 3) ||
((cm->current_video_frame > 0) &&
(twopass->this_frame_stats.pcnt_inter > 0.20) &&
((twopass->this_frame_stats.intra_error /
DOUBLE_DIVIDE_CHECK(twopass->this_frame_stats.coded_error)) > 2.0))) { if (gld_yv12 != NULL) {
ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
cm->ref_frame_map[cpi->lst_fb_idx]);
}
twopass->sr_update_lag = 1;
} else {
++twopass->sr_update_lag;
}
vpx_extend_frame_borders(new_yv12);
// The frame we just compressed now becomes the last frame.
ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->lst_fb_idx],
cm->new_fb_idx);
// Special case for the first frame. Copy into the GF buffer as a second // reference. if (cm->current_video_frame == 0 && cpi->gld_fb_idx != INVALID_IDX) {
ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
cm->ref_frame_map[cpi->lst_fb_idx]);
}
// In the first pass, every frame is considered as a show frame.
update_frame_indexes(cm, /*show_frame=*/1); if (cpi->use_svc) vp9_inc_frame_in_layer(cpi);
}
// Adjustment based on quantizer to the power term.
power_term =
q_pow_term[index] +
(((q_pow_term[index + 1] - q_pow_term[index]) * (q % 32)) / 32.0);
// Clamp the target rate to VBR min / max limts. constint target_rate =
vp9_rc_clamp_pframe_target_size(cpi, section_target_bandwidth); double noise_factor = pow((section_noise / SECTION_NOISE_DEF), 0.5);
noise_factor = fclamp(noise_factor, NOISE_FACTOR_MIN, NOISE_FACTOR_MAX);
inactive_zone = fclamp(inactive_zone, 0.0, 1.0);
// TODO(jimbankoski): remove #if here or below when this has been // well tested. #if CONFIG_ALWAYS_ADJUST_BPM // based on recent history adjust expectations of bits per macroblock.
last_group_rate_err =
(double)twopass->rolling_arf_group_actual_bits /
DOUBLE_DIVIDE_CHECK((double)twopass->rolling_arf_group_target_bits);
last_group_rate_err = VPXMAX(0.25, VPXMIN(4.0, last_group_rate_err));
twopass->bpm_factor *= (3.0 + last_group_rate_err) / 4.0;
twopass->bpm_factor = VPXMAX(0.25, VPXMIN(4.0, twopass->bpm_factor)); #endif
// TODO(jimbankoski): remove #if here or above when this has been // well tested. #if !CONFIG_ALWAYS_ADJUST_BPM // based on recent history adjust expectations of bits per macroblock.
last_group_rate_err =
(double)twopass->rolling_arf_group_actual_bits /
DOUBLE_DIVIDE_CHECK((double)twopass->rolling_arf_group_target_bits);
last_group_rate_err = VPXMAX(0.25, VPXMIN(4.0, last_group_rate_err));
twopass->bpm_factor *= (3.0 + last_group_rate_err) / 4.0;
twopass->bpm_factor = VPXMAX(0.25, VPXMIN(4.0, twopass->bpm_factor)); #endif
// Try and pick a max Q that will be high enough to encode the // content at the given rate. for (q = rc->best_quality; q < rc->worst_quality; ++q) { constdouble factor =
calc_correction_factor(av_err_per_mb, wq_err_divisor(cpi), q); constint bits_per_mb = vp9_rc_bits_per_mb(
INTER_FRAME, q,
factor * speed_term * cpi->twopass.bpm_factor * noise_factor,
cpi->common.bit_depth); if ((uint64_t)bits_per_mb <= target_norm_bits_per_mb) break;
}
// Restriction on active max q for constrained quality mode. if (cpi->oxcf.rc_mode == VPX_CQ) q = VPXMAX(q, oxcf->cq_level); return q;
}
}
staticvoid setup_rf_level_maxq(VP9_COMP *cpi) { int i;
RATE_CONTROL *const rc = &cpi->rc; for (i = INTER_NORMAL; i < RATE_FACTOR_LEVELS; ++i) { int qdelta = vp9_frame_type_qdelta(cpi, i, rc->worst_quality);
rc->rf_level_maxq[i] = VPXMAX(rc->worst_quality + qdelta, rc->best_quality);
}
}
staticvoid init_subsampling(VP9_COMP *cpi) { const VP9_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc; constint w = cm->width; constint h = cm->height; int i;
for (i = 0; i < FRAME_SCALE_STEPS; ++i) { // Note: Frames with odd-sized dimensions may result from this scaling.
rc->frame_width[i] = (w * 16) / frame_scale_factor[i];
rc->frame_height[i] = (h * 16) / frame_scale_factor[i];
}
setup_rf_level_maxq(cpi);
}
void calculate_coded_size(VP9_COMP *cpi, int *scaled_frame_width, int *scaled_frame_height) {
RATE_CONTROL *const rc = &cpi->rc;
*scaled_frame_width = rc->frame_width[rc->frame_size_selector];
*scaled_frame_height = rc->frame_height[rc->frame_size_selector];
}
// Scan the first pass file and calculate a modified score for each // frame that is used to distribute bits. The modified score is assumed // to provide a linear basis for bit allocation. I.e., a frame A with a score // that is double that of frame B will be allocated 2x as many bits.
{ double modified_score_total = 0.0; const FIRSTPASS_STATS *s = twopass->stats_in; double av_err;
if (oxcf->vbr_corpus_complexity) {
twopass->mean_mod_score = (double)oxcf->vbr_corpus_complexity / 10.0;
av_err = get_distribution_av_err(cpi, twopass);
} else {
av_err = get_distribution_av_err(cpi, twopass); // The first scan is unclamped and gives a raw average. while (s < twopass->stats_in_end) {
modified_score_total += calculate_mod_frame_score(cpi, oxcf, s, av_err);
++s;
}
// The average error from this first scan is used to define the midpoint // error for the rate distribution function.
twopass->mean_mod_score =
modified_score_total / DOUBLE_DIVIDE_CHECK(stats->count);
}
// Second scan using clamps based on the previous cycle average. // This may modify the total and average somewhat but we don't bother with // further iterations.
modified_score_total = 0.0;
s = twopass->stats_in; while (s < twopass->stats_in_end) {
modified_score_total +=
calculate_norm_frame_score(cpi, twopass, oxcf, s, av_err);
++s;
}
twopass->normalized_score_left = modified_score_total;
// If using Corpus wide VBR mode then update the clip target bandwidth to // reflect how the clip compares to the rest of the corpus. if (oxcf->vbr_corpus_complexity) {
oxcf->target_bandwidth =
(int64_t)((double)oxcf->target_bandwidth *
(twopass->normalized_score_left / stats->count));
}
frame_rate = 10000000.0 * stats->count / stats->duration; // Each frame can have a different duration, as the frame rate in the source // isn't guaranteed to be constant. The frame rate prior to the first frame // encoded in the second pass is a guess. However, the sum duration is not. // It is calculated based on the actual durations of all frames from the // first pass.
vp9_new_framerate(cpi, frame_rate);
twopass->bits_left =
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.