/* * Copyright (c) 2019, Alliance for Open Media. All rights reserved. * * This source code is subject to the terms of the BSD 2 Clause License and * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License * was not distributed with this source code in the LICENSE file, you can * obtain it at www.aomedia.org/license/software. If the Alliance for Open * Media Patent License 1.0 was not distributed with this source code in the * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
/*!\defgroup gf_group_algo Golden Frame Group * \ingroup high_level_algo * Algorithms regarding determining the length of GF groups and defining GF * group structures. * @{
*/ /*! @} - end defgroup gf_group_algo */
// Calculate an active area of the image that discounts formatting // bars and partially discounts other 0 energy areas. #define MIN_ACTIVE_AREA 0.5 #define MAX_ACTIVE_AREA 1.0 staticdouble calculate_active_area(const FRAME_INFO *frame_info, const FIRSTPASS_STATS *this_frame) { constdouble active_pct =
1.0 -
((this_frame->intra_skip_pct / 2) +
((this_frame->inactive_zone_rows * 2) / (double)frame_info->mb_rows)); return fclamp(active_pct, MIN_ACTIVE_AREA, MAX_ACTIVE_AREA);
}
// Calculate a modified Error used in distributing bits between easier and // harder frames. #define ACT_AREA_CORRECTION 0.5 staticdouble calculate_modified_err_new(const FRAME_INFO *frame_info, const FIRSTPASS_STATS *total_stats, const FIRSTPASS_STATS *this_stats, int vbrbias, double modified_error_min, double modified_error_max) { if (total_stats == NULL) { return 0;
} constdouble av_weight = total_stats->weight / total_stats->count; constdouble av_err =
(total_stats->coded_error * av_weight) / total_stats->count; double modified_error =
av_err * pow(this_stats->coded_error * this_stats->weight /
DOUBLE_DIVIDE_CHECK(av_err),
vbrbias / 100.0);
// Correction for active area. Frames with a reduced active area // (eg due to formatting bars) have a higher error per mb for the // remaining active MBs. The correction here assumes that coding // 0.5N blocks of complexity 2X is a little easier than coding N // blocks of complexity X.
modified_error *=
pow(calculate_active_area(frame_info, this_stats), ACT_AREA_CORRECTION);
// Resets the first pass file to the given position using a relative seek from // the current position. staticvoid reset_fpf_position(TWO_PASS_FRAME *p_frame, const FIRSTPASS_STATS *position) {
p_frame->stats_in = position;
}
*fps = *p_frame->stats_in; /* Move old stats[0] out to accommodate for next frame stats */
memmove(p->frame_stats_arr[0], p->frame_stats_arr[1],
(p->stats_buf_ctx->stats_in_end - p_frame->stats_in - 1) * sizeof(FIRSTPASS_STATS));
p->stats_buf_ctx->stats_in_end--; return 1;
}
// Read frame stats at an offset from the current position. staticconst FIRSTPASS_STATS *read_frame_stats(const TWO_PASS *p, const TWO_PASS_FRAME *p_frame, int offset) { if ((offset >= 0 &&
p_frame->stats_in + offset >= p->stats_buf_ctx->stats_in_end) ||
(offset < 0 &&
p_frame->stats_in + offset < p->stats_buf_ctx->stats_in_start)) { return NULL;
}
return &p_frame->stats_in[offset];
}
// This function returns the maximum target rate per frame. staticint frame_max_bits(const RATE_CONTROL *rc, const AV1EncoderConfig *oxcf) {
int64_t max_bits = ((int64_t)rc->avg_frame_bandwidth *
(int64_t)oxcf->rc_cfg.vbrmax_section) /
100; if (max_bits < 0)
max_bits = 0; elseif (max_bits > rc->max_frame_bandwidth)
max_bits = rc->max_frame_bandwidth;
return (int)max_bits;
}
// Based on history adjust expectations of bits per macroblock. staticvoid twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) {
TWO_PASS *const twopass = &cpi->ppi->twopass; const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
// Based on recent history adjust expectations of bits per macroblock. double rate_err_factor = 1.0; constdouble adj_limit = AOMMAX(0.2, (double)(100 - rate_err_tol) / 200.0); constdouble min_fac = 1.0 - adj_limit; constdouble max_fac = 1.0 + adj_limit;
#if CONFIG_THREE_PASS if (cpi->third_pass_ctx && cpi->third_pass_ctx->frame_info_count > 0) {
int64_t actual_bits = 0;
int64_t target_bits = 0; double factor = 0.0; int count = 0; for (int i = 0; i < cpi->third_pass_ctx->frame_info_count; i++) {
actual_bits += cpi->third_pass_ctx->frame_info[i].actual_bits;
target_bits += cpi->third_pass_ctx->frame_info[i].bits_allocated;
factor += cpi->third_pass_ctx->frame_info[i].bpm_factor;
count++;
}
// Is the rate control trending in the right direction. Only make // an adjustment if things are getting worse. if ((rate_err_factor < 1.0 && err_estimate >= 0) ||
(rate_err_factor > 1.0 && err_estimate <= 0)) {
twopass->bpm_factor *= rate_err_factor;
twopass->bpm_factor = AOMMAX(min_fac, AOMMIN(max_fac, twopass->bpm_factor));
}
}
// Similar to find_qindex_by_rate() function in ratectrl.c, but includes // calculation of a correction_factor. staticint find_qindex_by_rate_with_correction(uint64_t desired_bits_per_mb,
aom_bit_depth_t bit_depth, double error_per_mb, double group_weight_factor, int best_qindex, int worst_qindex) {
assert(best_qindex <= worst_qindex); int low = best_qindex; int high = worst_qindex;
/*!\brief Choose a target maximum Q for a group of frames * * \ingroup rate_control * * This function is used to estimate a suitable maximum Q for a * group of frames. Inititally it is called to get a crude estimate * for the whole clip. It is then called for each ARF/GF group to get * a revised estimate for that group. * * \param[in] cpi Top-level encoder structure * \param[in] av_frame_err The average per frame coded error score * for frames making up this section/group. * \param[in] inactive_zone Used to mask off /ignore part of the * frame. The most common use case is where * a wide format video (e.g. 16:9) is * letter-boxed into a more square format. * Here we want to ignore the bands at the * top and bottom. * \param[in] av_target_bandwidth The target bits per frame * * \return The maximum Q for frames in the group.
*/ staticint get_twopass_worst_quality(AV1_COMP *cpi, constdouble av_frame_err, double inactive_zone, int av_target_bandwidth) { const RATE_CONTROL *const rc = &cpi->rc; const AV1EncoderConfig *const oxcf = &cpi->oxcf; const RateControlCfg *const rc_cfg = &oxcf->rc_cfg;
inactive_zone = fclamp(inactive_zone, 0.0, 0.9999);
// Update bpm correction factor based on previous GOP rate error.
twopass_update_bpm_factor(cpi, rate_err_tol);
// Try and pick a max Q that will be high enough to encode the // content at the given rate. int q = find_qindex_by_rate_with_correction(
target_norm_bits_per_mb, cpi->common.seq_params->bit_depth,
av_err_per_mb,
cpi->ppi->twopass.bpm_factor * speed_factor * size_factor,
rc->best_quality, rc->worst_quality);
// Restriction on active max q for constrained quality mode. if (rc_cfg->mode == AOM_CQ) q = AOMMAX(q, rc_cfg->cq_level); return q;
}
}
/* This function considers how the quality of prediction may be deteriorating * with distance. It comapres the coded error for the last frame and the * second reference frame (usually two frames old) and also applies a factor * based on the extent of INTRA coding. * * The decay factor is then used to reduce the contribution of frames further * from the alt-ref or golden frame, to the bitframe boost calculation for that * alt-ref or golden frame.
*/ staticdouble get_sr_decay_rate(const FIRSTPASS_STATS *frame) { double sr_diff = (frame->sr_coded_error - frame->coded_error); double sr_decay = 1.0; double modified_pct_inter; double modified_pcnt_intra;
// This function gives an estimate of how badly we believe the prediction // quality is decaying from frame to frame. staticdouble get_zero_motion_factor(const FIRSTPASS_STATS *frame) { constdouble zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion; double sr_decay = get_sr_decay_rate(frame); return AOMMIN(sr_decay, zero_motion_pct);
}
// Clamp value to range 0.0 to 1.0 // This should happen anyway if input values are sensibly clamped but checked // here just in case. if (zero_motion_factor > 1.0)
zero_motion_factor = 1.0; elseif (zero_motion_factor < 0.0)
zero_motion_factor = 0.0;
// Function to test for a condition where a complex transition is followed // by a static section. For example in slide shows where there is a fade // between slides. This is to help with more optimal kf and gf positioning. staticint detect_transition_to_still(const FIRSTPASS_INFO *firstpass_info, int next_stats_index, constint min_gf_interval, constint frame_interval, constint still_interval, constdouble loop_decay_rate, constdouble last_decay_rate) { // Break clause to detect very still sections after motion // For example a static image after a fade or other transition // instead of a clean scene cut. if (frame_interval > min_gf_interval && loop_decay_rate >= 0.999 &&
last_decay_rate < 0.9) { int stats_left =
av1_firstpass_info_future_count(firstpass_info, next_stats_index); if (stats_left >= still_interval) { int j; // Look ahead a few frames to see if static condition persists... for (j = 0; j < still_interval; ++j) { const FIRSTPASS_STATS *stats =
av1_firstpass_info_peek(firstpass_info, next_stats_index + j); if (stats->pcnt_inter - stats->pcnt_motion < 0.999) break;
} // Only if it does do we signal a transition to still. return j == still_interval;
}
} return 0;
}
// This function detects a flash through the high relative pcnt_second_ref // score in the frame following a flash frame. The offset passed in should // reflect this. staticint detect_flash(const TWO_PASS *twopass, const TWO_PASS_FRAME *twopass_frame, constint offset) { const FIRSTPASS_STATS *const next_frame =
read_frame_stats(twopass, twopass_frame, offset);
// What we are looking for here is a situation where there is a // brief break in prediction (such as a flash) but subsequent frames // are reasonably well predicted by an earlier (pre flash) frame. // The recovery after a flash is indicated by a high pcnt_second_ref // compared to pcnt_inter. return next_frame != NULL &&
next_frame->pcnt_second_ref > next_frame->pcnt_inter &&
next_frame->pcnt_second_ref >= 0.5;
}
// Update the motion related elements to the GF arf boost calculation. staticvoid accumulate_frame_motion_stats(const FIRSTPASS_STATS *stats,
GF_GROUP_STATS *gf_stats, double f_w, double f_h) { constdouble pct = stats->pcnt_motion;
// Accumulate a measure of how uniform (or conversely how random) the motion // field is (a ratio of abs(mv) / mv). if (pct > 0.05) { constdouble mvr_ratio =
fabs(stats->mvr_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVr)); constdouble mvc_ratio =
fabs(stats->mvc_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVc));
// Use a different error per mb factor for calculating boost for // different formats. if (screen_area <= 640 * 360) { return 500.0;
} else { return 1000.0;
}
}
// Underlying boost factor is based on inter error ratio.
frame_boost = AOMMAX(baseline_err_per_mb(frame_info) * active_area,
this_frame->intra_error * active_area) /
DOUBLE_DIVIDE_CHECK(this_frame->coded_error);
frame_boost = frame_boost * BOOST_FACTOR * boost_q_correction;
// Increase boost for frames where new data coming into frame (e.g. zoom out). // Slightly reduce boost if there is a net balance of motion out of the frame // (zoom in). The range for this_frame_mv_in_out is -1.0 to +1.0. if (this_frame_mv_in_out > 0.0)
frame_boost += frame_boost * (this_frame_mv_in_out * 2.0); // In the extreme case the boost is halved. else
frame_boost += frame_boost * (this_frame_mv_in_out / 2.0);
// Underlying boost factor is based on inter error ratio.
frame_boost = AOMMAX(baseline_err_per_mb(frame_info) * active_area,
this_frame->intra_error * active_area) /
DOUBLE_DIVIDE_CHECK(
(this_frame->coded_error + *sr_accumulator) * active_area);
// Update the accumulator for second ref error difference. // This is intended to give an indication of how much the coded error is // increasing over time.
*sr_accumulator += (this_frame->sr_coded_error - this_frame->coded_error);
*sr_accumulator = AOMMAX(0.0, *sr_accumulator);
// Q correction and scaling // The 40.0 value here is an experimentally derived baseline minimum. // This value is in line with the minimum per frame boost in the alt_ref // boost calculation.
frame_boost = ((frame_boost + 40.0) * boost_q_correction);
staticint get_projected_gfu_boost(const PRIMARY_RATE_CONTROL *p_rc, int gfu_boost, int frames_to_project, int num_stats_used_for_gfu_boost) { /* * If frames_to_project is equal to num_stats_used_for_gfu_boost, * it means that gfu_boost was calculated over frames_to_project to * begin with(ie; all stats required were available), hence return * the original boost.
*/ if (num_stats_used_for_gfu_boost >= frames_to_project) return gfu_boost;
double min_boost_factor = sqrt(p_rc->baseline_gf_interval); // Get the current tpl factor (number of frames = frames_to_project). double tpl_factor = av1_get_gfu_boost_projection_factor(
min_boost_factor, MAX_GFUBOOST_FACTOR, frames_to_project); // Get the tpl factor when number of frames = num_stats_used_for_prior_boost. double tpl_factor_num_stats = av1_get_gfu_boost_projection_factor(
min_boost_factor, MAX_GFUBOOST_FACTOR, num_stats_used_for_gfu_boost); int projected_gfu_boost =
(int)rint((tpl_factor * gfu_boost) / tpl_factor_num_stats); return projected_gfu_boost;
}
#define GF_MAX_BOOST 90.0 #define GF_MIN_BOOST 50 #define MIN_DECAY_FACTOR 0.01 int av1_calc_arf_boost(const TWO_PASS *twopass, const TWO_PASS_FRAME *twopass_frame, const PRIMARY_RATE_CONTROL *p_rc, FRAME_INFO *frame_info, int offset, int f_frames, int b_frames, int *num_fpstats_used, int *num_fpstats_required, int project_gfu_boost) { int i;
GF_GROUP_STATS gf_stats;
init_gf_stats(&gf_stats); double boost_score = (double)NORMAL_BOOST; int arf_boost; int flash_detected = 0; if (num_fpstats_used) *num_fpstats_used = 0;
// Search forward from the proposed arf/next gf position. for (i = 0; i < f_frames; ++i) { const FIRSTPASS_STATS *this_frame =
read_frame_stats(twopass, twopass_frame, i + offset); if (this_frame == NULL) break;
// Update the motion related elements to the boost calculation.
accumulate_frame_motion_stats(this_frame, &gf_stats,
frame_info->frame_width,
frame_info->frame_height);
// We want to discount the flash frame itself and the recovery // frame that follows as both will have poor scores.
flash_detected = detect_flash(twopass, twopass_frame, i + offset) ||
detect_flash(twopass, twopass_frame, i + offset + 1);
// Accumulate the effect of prediction quality decay. if (!flash_detected) {
gf_stats.decay_accumulator *= get_prediction_decay_rate(this_frame);
gf_stats.decay_accumulator = gf_stats.decay_accumulator < MIN_DECAY_FACTOR
? MIN_DECAY_FACTOR
: gf_stats.decay_accumulator;
}
// Reset for backward looking loop.
boost_score = 0.0;
init_gf_stats(&gf_stats); // Search backward towards last gf position. for (i = -1; i >= -b_frames; --i) { const FIRSTPASS_STATS *this_frame =
read_frame_stats(twopass, twopass_frame, i + offset); if (this_frame == NULL) break;
// Update the motion related elements to the boost calculation.
accumulate_frame_motion_stats(this_frame, &gf_stats,
frame_info->frame_width,
frame_info->frame_height);
// We want to discount the the flash frame itself and the recovery // frame that follows as both will have poor scores.
flash_detected = detect_flash(twopass, twopass_frame, i + offset) ||
detect_flash(twopass, twopass_frame, i + offset + 1);
// Calculate a section intra ratio used in setting max loop filter. staticint calculate_section_intra_ratio(const FIRSTPASS_STATS *begin, const FIRSTPASS_STATS *end, int section_length) { const FIRSTPASS_STATS *s = begin; double intra_error = 0.0; double coded_error = 0.0; int i = 0;
while (s < end && i < section_length) {
intra_error += s->intra_error;
coded_error += s->coded_error;
++s;
++i;
}
/*!\brief Calculates the bit target for this GF/ARF group * * \ingroup rate_control * * Calculates the total bits to allocate in this GF/ARF group. * * \param[in] cpi Top-level encoder structure * \param[in] gf_group_err Cumulative coded error score for the * frames making up this group. * * \return The target total number of bits for this GF/ARF group.
*/ static int64_t calculate_total_gf_group_bits(AV1_COMP *cpi, double gf_group_err) { const RATE_CONTROL *const rc = &cpi->rc; const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; const TWO_PASS *const twopass = &cpi->ppi->twopass; constint max_bits = frame_max_bits(rc, &cpi->oxcf);
int64_t total_group_bits;
// Calculate the bits to be allocated to the group as a whole. if ((twopass->kf_group_bits > 0) && (twopass->kf_group_error_left > 0)) {
total_group_bits = (int64_t)(twopass->kf_group_bits *
(gf_group_err / twopass->kf_group_error_left));
} else {
total_group_bits = 0;
}
// Clip based on user supplied data rate variability limit. if (total_group_bits > (int64_t)max_bits * p_rc->baseline_gf_interval)
total_group_bits = (int64_t)max_bits * p_rc->baseline_gf_interval;
return total_group_bits;
}
// Calculate the number of bits to assign to boosted frames in a group. staticint calculate_boost_bits(int frame_count, int boost,
int64_t total_group_bits) { int allocation_chunks;
// return 0 for invalid inputs (could arise e.g. through rounding errors) if (!boost || (total_group_bits <= 0)) return 0;
if (frame_count <= 0) return (int)(AOMMIN(total_group_bits, INT_MAX));
// Calculate the number of extra bits for use in the boosted frame or frames. return AOMMAX((int)(((int64_t)boost * total_group_bits) / allocation_chunks),
0);
}
// Calculate the boost factor based on the number of bits assigned, i.e. the // inverse of calculate_boost_bits(). staticint calculate_boost_factor(int frame_count, int bits,
int64_t total_group_bits) { return (int)(100.0 * frame_count * bits / (total_group_bits - bits));
}
// Reduce the number of bits assigned to keyframe or arf if necessary, to // prevent bitrate spikes that may break level constraints. // frame_type: 0: keyframe; 1: arf. staticint adjust_boost_bits_for_target_level(const AV1_COMP *const cpi,
RATE_CONTROL *const rc, int bits_assigned,
int64_t group_bits, int frame_type) { const AV1_COMMON *const cm = &cpi->common; const SequenceHeader *const seq_params = cm->seq_params;
PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; constint temporal_layer_id = cm->temporal_layer_id; constint spatial_layer_id = cm->spatial_layer_id; for (int index = 0; index < seq_params->operating_points_cnt_minus_1 + 1;
++index) { if (!is_in_operating_point(seq_params->operating_point_idc[index],
temporal_layer_id, spatial_layer_id)) { continue;
}
const AV1_LEVEL target_level =
cpi->ppi->level_params.target_seq_level_idx[index]; if (target_level >= SEQ_LEVELS) continue;
assert(is_valid_seq_level_idx(target_level));
constdouble level_bitrate_limit = av1_get_max_bitrate_for_level(
target_level, seq_params->tier[0], seq_params->profile); constint target_bits_per_frame =
(int)(level_bitrate_limit / cpi->framerate); if (frame_type == 0) { // Maximum bits for keyframe is 8 times the target_bits_per_frame. constint level_enforced_max_kf_bits = target_bits_per_frame * 8; if (bits_assigned > level_enforced_max_kf_bits) { constint frames = rc->frames_to_key - 1;
p_rc->kf_boost = calculate_boost_factor(
frames, level_enforced_max_kf_bits, group_bits);
bits_assigned =
calculate_boost_bits(frames, p_rc->kf_boost, group_bits);
}
} elseif (frame_type == 1) { // Maximum bits for arf is 4 times the target_bits_per_frame. constint level_enforced_max_arf_bits = target_bits_per_frame * 4; if (bits_assigned > level_enforced_max_arf_bits) {
p_rc->gfu_boost =
calculate_boost_factor(p_rc->baseline_gf_interval,
level_enforced_max_arf_bits, group_bits);
bits_assigned = calculate_boost_bits(p_rc->baseline_gf_interval,
p_rc->gfu_boost, group_bits);
}
} else {
assert(0);
}
}
return bits_assigned;
}
// Allocate bits to each frame in a GF / ARF group staticvoid allocate_gf_group_bits(GF_GROUP *gf_group,
PRIMARY_RATE_CONTROL *const p_rc,
RATE_CONTROL *const rc,
int64_t gf_group_bits, int gf_arf_bits, int key_frame, int use_arf) { staticconstdouble layer_fraction[MAX_ARF_LAYERS + 1] = { 1.0, 0.70, 0.55,
0.60, 0.60, 1.0,
1.0 };
int64_t total_group_bits = gf_group_bits; int base_frame_bits; constint gf_group_size = gf_group->size; int layer_frames[MAX_ARF_LAYERS + 1] = { 0 };
// For key frames the frame target rate is already set and it // is also the golden frame. // === [frame_index == 0] === int frame_index = !!key_frame;
// Subtract the extra bits set aside for ARF frames from the Group Total if (use_arf) total_group_bits -= gf_arf_bits;
// Check the number of frames in each layer in case we have a // non standard group length. int max_arf_layer = gf_group->max_layer_depth - 1; for (int idx = frame_index; idx < gf_group_size; ++idx) { if ((gf_group->update_type[idx] == ARF_UPDATE) ||
(gf_group->update_type[idx] == INTNL_ARF_UPDATE)) {
layer_frames[gf_group->layer_depth[idx]]++;
}
}
// Allocate extra bits to each ARF layer int i; int layer_extra_bits[MAX_ARF_LAYERS + 1] = { 0 };
assert(max_arf_layer <= MAX_ARF_LAYERS); for (i = 1; i <= max_arf_layer; ++i) { double fraction = (i == max_arf_layer) ? 1.0 : layer_fraction[i];
layer_extra_bits[i] =
(int)((gf_arf_bits * fraction) / AOMMAX(1, layer_frames[i]));
gf_arf_bits -= (int)(gf_arf_bits * fraction);
}
// Now combine ARF layer and baseline bits to give total bits for each frame. int arf_extra_bits; for (int idx = frame_index; idx < gf_group_size; ++idx) { switch (gf_group->update_type[idx]) { case ARF_UPDATE: case INTNL_ARF_UPDATE:
arf_extra_bits = layer_extra_bits[gf_group->layer_depth[idx]];
gf_group->bit_allocation[idx] =
(base_frame_bits > INT_MAX - arf_extra_bits)
? INT_MAX
: (base_frame_bits + arf_extra_bits); break; case INTNL_OVERLAY_UPDATE: case OVERLAY_UPDATE: gf_group->bit_allocation[idx] = 0; break; default: gf_group->bit_allocation[idx] = base_frame_bits; break;
}
}
// Set the frame following the current GOP to 0 bit allocation. For ARF // groups, this next frame will be overlay frame, which is the first frame // in the next GOP. For GF group, next GOP will overwrite the rate allocation. // Setting this frame to use 0 bit (of out the current GOP budget) will // simplify logics in reference frame management. if (gf_group_size < MAX_STATIC_GF_GROUP_LENGTH)
gf_group->bit_allocation[gf_group_size] = 0;
}
// Returns true if KF group and GF group both are almost completely static. staticinlineint is_almost_static(double gf_zero_motion, int kf_zero_motion, int is_lap_enabled) { if (is_lap_enabled) { /* * when LAP enabled kf_zero_motion is not reliable, so use strict * constraint on gf_zero_motion.
*/ return (gf_zero_motion >= 0.999);
} else { return (gf_zero_motion >= 0.995) &&
(kf_zero_motion >= STATIC_KF_GROUP_THRESH);
}
}
#define ARF_ABS_ZOOM_THRESH 4.4 staticinlineint detect_gf_cut(AV1_COMP *cpi, int frame_index, int cur_start, int flash_detected, int active_max_gf_interval, int active_min_gf_interval,
GF_GROUP_STATS *gf_stats) {
RATE_CONTROL *const rc = &cpi->rc;
TWO_PASS *const twopass = &cpi->ppi->twopass;
AV1_COMMON *const cm = &cpi->common; // Motion breakout threshold for loop below depends on image size. constdouble mv_ratio_accumulator_thresh = (cm->height + cm->width) / 4.0;
if (!flash_detected) { // Break clause to detect very still sections after motion. For example, // a static image after a fade or other transition.
// TODO(angiebird): This is a temporary change, we will avoid using // twopass_frame.stats_in in the follow-up CL int index = (int)(cpi->twopass_frame.stats_in -
twopass->stats_buf_ctx->stats_in_start); if (detect_transition_to_still(&twopass->firstpass_info, index,
rc->min_gf_interval, frame_index - cur_start,
5, gf_stats->loop_decay_rate,
gf_stats->last_loop_decay_rate)) { return 1;
}
}
// Some conditions to breakout after min interval. if (frame_index - cur_start >= active_min_gf_interval && // If possible don't break very close to a kf
(rc->frames_to_key - frame_index >= rc->min_gf_interval) &&
((frame_index - cur_start) & 0x01) && !flash_detected &&
(gf_stats->mv_ratio_accumulator > mv_ratio_accumulator_thresh ||
gf_stats->abs_mv_in_out_accumulator > ARF_ABS_ZOOM_THRESH)) { return 1;
}
// If almost totally static, we will not use the the max GF length later, // so we can continue for more frames. if (((frame_index - cur_start) >= active_max_gf_interval + 1) &&
!is_almost_static(gf_stats->zero_motion_accumulator,
twopass->kf_zeromotion_pct, cpi->ppi->lap_enabled)) { return 1;
} return 0;
}
if (gop_length_decision_method == 2) { // GF group length is decided based on GF boost and tpl stats of ARFs from // base layer, (base+1) layer.
shorten_gf_interval =
(p_rc->gfu_boost <
p_rc->num_stats_used_for_gfu_boost * GF_MIN_BOOST * 1.4) &&
!av1_tpl_setup_stats(cpi, 3, frame_params);
} else { int do_complete_tpl = 1;
GF_GROUP *const gf_group = &cpi->ppi->gf_group; int is_temporal_filter_enabled =
(rc->frames_since_key > 0 && gf_group->arf_index > -1);
if (gop_length_decision_method == 1) { // Check if tpl stats of ARFs from base layer, (base+1) layer, // (base+2) layer can decide the GF group length. int gop_length_eval = av1_tpl_setup_stats(cpi, 2, frame_params);
if (do_complete_tpl) { // Decide GF group length based on complete tpl stats.
shorten_gf_interval = !av1_tpl_setup_stats(cpi, 1, frame_params); // Tpl stats is reused when the ARF is temporally filtered and GF // interval is not shortened. if (is_temporal_filter_enabled && !shorten_gf_interval) {
cpi->skip_tpl_setup_stats = 1; #if CONFIG_BITRATE_ACCURACY && !CONFIG_THREE_PASS
assert(cpi->gf_frame_index == 0);
av1_vbr_rc_update_q_index_list(&cpi->vbr_rc_info, &cpi->ppi->tpl_data,
gf_group,
cpi->common.seq_params->bit_depth); #endif// CONFIG_BITRATE_ACCURACY
}
}
} return shorten_gf_interval;
}
#define MIN_SHRINK_LEN 6 // the minimum length of gf if we are shrinking #define SMOOTH_FILT_LEN 7 #define HALF_FILT_LEN (SMOOTH_FILT_LEN / 2) #define WINDOW_SIZE 7 #define HALF_WIN (WINDOW_SIZE / 2)
// Smooth filter intra_error and coded_error in firstpass stats. // If stats[i].is_flash==1, the ith element should not be used in the filtering. staticvoid smooth_filter_stats(const FIRSTPASS_STATS *stats, int start_idx, int last_idx, double *filt_intra_err, double *filt_coded_err) { // A 7-tap gaussian smooth filter staticconstdouble smooth_filt[SMOOTH_FILT_LEN] = { 0.006, 0.061, 0.242,
0.383, 0.242, 0.061,
0.006 }; int i, j; for (i = start_idx; i <= last_idx; i++) { double total_wt = 0; for (j = -HALF_FILT_LEN; j <= HALF_FILT_LEN; j++) { int idx = AOMMIN(AOMMAX(i + j, start_idx), last_idx); if (stats[idx].is_flash) continue;
filt_intra_err[i] +=
smooth_filt[j + HALF_FILT_LEN] * stats[idx].intra_error;
total_wt += smooth_filt[j + HALF_FILT_LEN];
} if (total_wt > 0.01) {
filt_intra_err[i] /= total_wt;
} else {
filt_intra_err[i] = stats[i].intra_error;
}
} for (i = start_idx; i <= last_idx; i++) { double total_wt = 0; for (j = -HALF_FILT_LEN; j <= HALF_FILT_LEN; j++) { int idx = AOMMIN(AOMMAX(i + j, start_idx), last_idx); // Coded error involves idx and idx - 1. if (stats[idx].is_flash || (idx > 0 && stats[idx - 1].is_flash)) continue;
// Calculate gradient staticvoid get_gradient(constdouble *values, int start, int last, double *grad) { if (start == last) {
grad[start] = 0; return;
} for (int i = start; i <= last; i++) { int prev = AOMMAX(i - 1, start); int next = AOMMIN(i + 1, last);
grad[i] = (values[next] - values[prev]) / (next - prev);
}
}
staticint find_next_scenecut(const FIRSTPASS_STATS *const stats_start, int first, int last) { // Identify unstable areas caused by scenecuts. // Find the max and 2nd max coded error, and the average of the rest frames. // If there is only one frame that yields a huge coded error, it is likely a // scenecut. double this_ratio, max_prev_ratio, max_next_ratio, max_prev_coded,
max_next_coded;
if (last - first == 0) return -1;
for (int i = first; i <= last; i++) { if (stats_start[i].is_flash || (i > 0 && stats_start[i - 1].is_flash)) continue; double temp_intra = AOMMAX(stats_start[i].intra_error, 0.01);
this_ratio = stats_start[i].coded_error / temp_intra; // find the avg ratio in the preceding neighborhood
max_prev_ratio = 0;
max_prev_coded = 0; for (int j = AOMMAX(first, i - HALF_WIN); j < i; j++) { if (stats_start[j].is_flash || (j > 0 && stats_start[j - 1].is_flash)) continue;
temp_intra = AOMMAX(stats_start[j].intra_error, 0.01); double temp_ratio = stats_start[j].coded_error / temp_intra; if (temp_ratio > max_prev_ratio) {
max_prev_ratio = temp_ratio;
} if (stats_start[j].coded_error > max_prev_coded) {
max_prev_coded = stats_start[j].coded_error;
}
} // find the avg ratio in the following neighborhood
max_next_ratio = 0;
max_next_coded = 0; for (int j = i + 1; j <= AOMMIN(i + HALF_WIN, last); j++) { if (stats_start[i].is_flash || (i > 0 && stats_start[i - 1].is_flash)) continue;
temp_intra = AOMMAX(stats_start[j].intra_error, 0.01); double temp_ratio = stats_start[j].coded_error / temp_intra; if (temp_ratio > max_next_ratio) {
max_next_ratio = temp_ratio;
} if (stats_start[j].coded_error > max_next_coded) {
max_next_coded = stats_start[j].coded_error;
}
}
if (max_prev_ratio < 0.001 && max_next_ratio < 0.001) { // the ratios are very small, only check a small fixed threshold if (this_ratio < 0.02) continue;
} else { // check if this frame has a larger ratio than the neighborhood double max_sr = stats_start[i].sr_coded_error; if (i < last) max_sr = AOMMAX(max_sr, stats_start[i + 1].sr_coded_error); double max_sr_fr_ratio =
max_sr / AOMMAX(stats_start[i].coded_error, 0.01);
// Remove the region with index next_region. // parameter merge: 0: merge with previous; 1: merge with next; 2: // merge with both, take type from previous if possible // After removing, next_region will be the index of the next region. staticvoid remove_region(int merge, REGIONS *regions, int *num_regions, int *next_region) { int k = *next_region;
assert(k < *num_regions); if (*num_regions == 1) {
*num_regions = 0; return;
} if (k == 0) {
merge = 1;
} elseif (k == *num_regions - 1) {
merge = 0;
} int num_merge = (merge == 2) ? 2 : 1; switch (merge) { case 0:
regions[k - 1].last = regions[k].last;
*next_region = k; break; case 1:
regions[k + 1].start = regions[k].start;
*next_region = k + 1; break; case 2:
regions[k - 1].last = regions[k + 1].last;
*next_region = k; break; default: assert(0);
}
*num_regions -= num_merge; for (k = *next_region - (merge == 1); k < *num_regions; k++) {
regions[k] = regions[k + num_merge];
}
}
// Insert a region in the cur_region_idx. The start and last should both be in // the current region. After insertion, the cur_region_idx will point to the // last region that was splitted from the original region. staticvoid insert_region(int start, int last, REGION_TYPES type,
REGIONS *regions, int *num_regions, int *cur_region_idx) { int k = *cur_region_idx;
REGION_TYPES this_region_type = regions[k].type; int this_region_last = regions[k].last; int num_add = (start != regions[k].start) + (last != regions[k].last); // move the following regions further to the back for (int r = *num_regions - 1; r > k; r--) {
regions[r + num_add] = regions[r];
}
*num_regions += num_add; if (start > regions[k].start) {
regions[k].last = start - 1;
k++;
regions[k].start = start;
}
regions[k].type = type; if (last < this_region_last) {
regions[k].last = last;
k++;
regions[k].start = last + 1;
regions[k].last = this_region_last;
regions[k].type = this_region_type;
} else {
regions[k].last = this_region_last;
}
*cur_region_idx = k;
}
// Get the average of stats inside a region. staticvoid analyze_region(const FIRSTPASS_STATS *stats, int k,
REGIONS *regions) { int i;
regions[k].avg_cor_coeff = 0;
regions[k].avg_sr_fr_ratio = 0;
regions[k].avg_intra_err = 0;
regions[k].avg_coded_err = 0;
int check_first_sr = (k != 0);
for (i = regions[k].start; i <= regions[k].last; i++) { if (i > regions[k].start || check_first_sr) { double num_frames =
(double)(regions[k].last - regions[k].start + check_first_sr); double max_coded_error =
AOMMAX(stats[i].coded_error, stats[i - 1].coded_error); double this_ratio =
stats[i].sr_coded_error / AOMMAX(max_coded_error, 0.001);
regions[k].avg_sr_fr_ratio += this_ratio / num_frames;
}
// Calculate the regions stats of every region. staticvoid get_region_stats(const FIRSTPASS_STATS *stats, REGIONS *regions, int num_regions) { for (int k = 0; k < num_regions; k++) {
analyze_region(stats, k, regions);
}
}
// Find tentative stable regions staticint find_stable_regions(const FIRSTPASS_STATS *stats, constdouble *grad_coded, int this_start, int this_last, REGIONS *regions) { int i, j, k = 0;
regions[k].start = this_start; for (i = this_start; i <= this_last; i++) { // Check mean and variance of stats in a window double mean_intra = 0.001, var_intra = 0.001; double mean_coded = 0.001, var_coded = 0.001; int count = 0; for (j = -HALF_WIN; j <= HALF_WIN; j++) { int idx = AOMMIN(AOMMAX(i + j, this_start), this_last); if (stats[idx].is_flash || (idx > 0 && stats[idx - 1].is_flash)) continue;
mean_intra += stats[idx].intra_error;
var_intra += stats[idx].intra_error * stats[idx].intra_error;
mean_coded += stats[idx].coded_error;
var_coded += stats[idx].coded_error * stats[idx].coded_error;
count++;
}
// mark a new region if type changes if (i == regions[k].start) { // first frame in the region
regions[k].type = cur_type;
} elseif (cur_type != regions[k].type) { // Append a new region
regions[k].last = i - 1;
regions[k + 1].start = i;
regions[k + 1].type = cur_type;
k++;
}
}
regions[k].last = this_last; return k + 1;
}
// Clean up regions that should be removed or merged. staticvoid cleanup_regions(REGIONS *regions, int *num_regions) { int k = 0; while (k < *num_regions) { if ((k > 0 && regions[k - 1].type == regions[k].type &&
regions[k].type != SCENECUT_REGION) ||
regions[k].last < regions[k].start) {
remove_region(0, regions, num_regions, &k);
} else {
k++;
}
}
}
// Remove regions that are of type and shorter than length. // Merge it with its neighboring regions. staticvoid remove_short_regions(REGIONS *regions, int *num_regions,
REGION_TYPES type, int length) { int k = 0; while (k < *num_regions && (*num_regions) > 1) { if ((regions[k].last - regions[k].start + 1 < length &&
regions[k].type == type)) { // merge current region with the previous and next regions
remove_region(2, regions, num_regions, &k);
} else {
k++;
}
}
cleanup_regions(regions, num_regions);
}
staticvoid adjust_unstable_region_bounds(const FIRSTPASS_STATS *stats,
REGIONS *regions, int *num_regions) { int i, j, k; // Remove regions that are too short. Likely noise.
remove_short_regions(regions, num_regions, STABLE_REGION, HALF_WIN);
remove_short_regions(regions, num_regions, HIGH_VAR_REGION, HALF_WIN);
get_region_stats(stats, regions, *num_regions);
// Adjust region boundaries. The thresholds are empirically obtained, but // overall the performance is not very sensitive to small changes to them. for (k = 0; k < *num_regions; k++) { if (regions[k].type == STABLE_REGION) continue; if (k > 0) { // Adjust previous boundary. // First find the average intra/coded error in the previous // neighborhood. double avg_intra_err = 0; constint starti = AOMMAX(regions[k - 1].last - WINDOW_SIZE + 1,
regions[k - 1].start + 1); constint lasti = regions[k - 1].last; int counti = 0; for (i = starti; i <= lasti; i++) {
avg_intra_err += stats[i].intra_error;
counti++;
} if (counti > 0) {
avg_intra_err = AOMMAX(avg_intra_err / (double)counti, 0.001); int count_coded = 0, count_grad = 0; for (j = lasti + 1; j <= regions[k].last; j++) { constint intra_close =
fabs(stats[j].intra_error - avg_intra_err) / avg_intra_err < 0.1; constint coded_small = stats[j].coded_error / avg_intra_err < 0.1; constint coeff_close = stats[j].cor_coeff > 0.995; if (!coeff_close || !coded_small) count_coded--; if (intra_close && count_coded >= 0 && count_grad >= 0) { // this frame probably belongs to the previous stable region
regions[k - 1].last = j;
regions[k].start = j + 1;
} else { break;
}
}
}
} // if k > 0 if (k < *num_regions - 1) { // Adjust next boundary. // First find the average intra/coded error in the next neighborhood. double avg_intra_err = 0; constint starti = regions[k + 1].start; constint lasti = AOMMIN(regions[k + 1].last - 1,
regions[k + 1].start + WINDOW_SIZE - 1); int counti = 0; for (i = starti; i <= lasti; i++) {
avg_intra_err += stats[i].intra_error;
counti++;
} if (counti > 0) {
avg_intra_err = AOMMAX(avg_intra_err / (double)counti, 0.001); // At the boundary, coded error is large, but still the frame is stable int count_coded = 1, count_grad = 1; for (j = starti - 1; j >= regions[k].start; j--) { constint intra_close =
fabs(stats[j].intra_error - avg_intra_err) / avg_intra_err < 0.1; constint coded_small =
stats[j + 1].coded_error / avg_intra_err < 0.1; constint coeff_close = stats[j].cor_coeff > 0.995; if (!coeff_close || !coded_small) count_coded--; if (intra_close && count_coded >= 0 && count_grad >= 0) { // this frame probably belongs to the next stable region
regions[k + 1].start = j;
regions[k].last = j - 1;
} else { break;
}
}
}
} // if k < *num_regions - 1
} // end of loop over all regions
// If a stable regions has higher error than neighboring high var regions, // or if the stable region has a lower average correlation, // then it should be merged with them
k = 0; while (k < *num_regions && (*num_regions) > 1) { if (regions[k].type == STABLE_REGION &&
(regions[k].last - regions[k].start + 1) < 2 * WINDOW_SIZE &&
((k > 0 && // previous regions
(regions[k].avg_coded_err > regions[k - 1].avg_coded_err * 1.01 ||
regions[k].avg_cor_coeff < regions[k - 1].avg_cor_coeff * 0.999)) &&
(k < *num_regions - 1 && // next region
(regions[k].avg_coded_err > regions[k + 1].avg_coded_err * 1.01 ||
regions[k].avg_cor_coeff < regions[k + 1].avg_cor_coeff * 0.999)))) { // merge current region with the previous and next regions
remove_region(2, regions, num_regions, &k);
analyze_region(stats, k - 1, regions);
} elseif (regions[k].type == HIGH_VAR_REGION &&
(regions[k].last - regions[k].start + 1) < 2 * WINDOW_SIZE &&
((k > 0 && // previous regions
(regions[k].avg_coded_err <
regions[k - 1].avg_coded_err * 0.99 ||
regions[k].avg_cor_coeff >
regions[k - 1].avg_cor_coeff * 1.001)) &&
(k < *num_regions - 1 && // next region
(regions[k].avg_coded_err <
regions[k + 1].avg_coded_err * 0.99 ||
regions[k].avg_cor_coeff >
regions[k + 1].avg_cor_coeff * 1.001)))) { // merge current region with the previous and next regions
remove_region(2, regions, num_regions, &k);
analyze_region(stats, k - 1, regions);
} else {
k++;
}
}
// Identify blending regions. staticvoid find_blending_regions(const FIRSTPASS_STATS *stats,
REGIONS *regions, int *num_regions) { int i, k = 0; // Blending regions will have large content change, therefore will have a // large consistent change in intra error. int count_stable = 0; while (k < *num_regions) { if (regions[k].type == STABLE_REGION) {
k++;
count_stable++; continue;
} int dir = 0; int start = 0, last; for (i = regions[k].start; i <= regions[k].last; i++) { // First mark the regions that has consistent large change of intra error. if (k == 0 && i == regions[k].start) continue; if (stats[i].is_flash || (i > 0 && stats[i - 1].is_flash)) continue; double grad = stats[i].intra_error - stats[i - 1].intra_error; int large_change = fabs(grad) / AOMMAX(stats[i].intra_error, 0.01) > 0.05; int this_dir = 0; if (large_change) {
this_dir = (grad > 0) ? 1 : -1;
} // the current trend continues if (dir == this_dir) continue; if (dir != 0) { // Mark the end of a new large change group and add it
last = i - 1;
insert_region(start, last, BLENDING_REGION, regions, num_regions, &k);
}
dir = this_dir; if (k == 0 && i == regions[k].start + 1) {
start = i - 1;
} else {
start = i;
}
} if (dir != 0) {
last = regions[k].last;
insert_region(start, last, BLENDING_REGION, regions, num_regions, &k);
}
k++;
}
// If the blending region has very low correlation, mark it as high variance // since we probably cannot benefit from it anyways.
get_region_stats(stats, regions, *num_regions); for (k = 0; k < *num_regions; k++) { if (regions[k].type != BLENDING_REGION) continue; if (regions[k].last == regions[k].start || regions[k].avg_cor_coeff < 0.6 ||
count_stable == 0)
regions[k].type = HIGH_VAR_REGION;
}
get_region_stats(stats, regions, *num_regions);
// It is possible for blending to result in a "dip" in intra error (first // decrease then increase). Therefore we need to find the dip and combine the // two regions.
k = 1; while (k < *num_regions) { if (k < *num_regions - 1 && regions[k].type == HIGH_VAR_REGION) { // Check if this short high variance regions is actually in the middle of // a blending region. if (regions[k - 1].type == BLENDING_REGION &&
regions[k + 1].type == BLENDING_REGION &&
regions[k].last - regions[k].start < 3) { int prev_dir = (stats[regions[k - 1].last].intra_error -
stats[regions[k - 1].last - 1].intra_error) > 0
? 1
: -1; int next_dir = (stats[regions[k + 1].last].intra_error -
stats[regions[k + 1].last - 1].intra_error) > 0
? 1
: -1; if (prev_dir < 0 && next_dir > 0) { // This is possibly a mid region of blending. Check the ratios double ratio_thres = AOMMIN(regions[k - 1].avg_sr_fr_ratio,
regions[k + 1].avg_sr_fr_ratio) *
0.95; if (regions[k].avg_sr_fr_ratio > ratio_thres) {
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.