/*
* Copyright (c) 2016, Alliance for Open Media. All rights reserved.
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#include <limits.h>
#include <float .h>
#include <math.h>
#include <stdbool.h>
#include <stdio.h>
#include "config/aom_config.h"
#include "config/aom_dsp_rtcd.h"
#include "config/av1_rtcd.h"
#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/binary_codes_writer.h"
#include "aom_ports/mem.h"
#include "aom_ports/aom_timer.h"
#include "aom_util/aom_pthread.h"
#if CONFIG_MISMATCH_DEBUG
#include "aom_util/debug_util.h"
#endif // CONFIG_MISMATCH_DEBUG
#include "av1/common/cfl.h"
#include "av1/common/common.h"
#include "av1/common/common_data.h"
#include "av1/common/entropy.h"
#include "av1/common/entropymode.h"
#include "av1/common/idct.h"
#include "av1/common/mv.h"
#include "av1/common/mvref_common.h"
#include "av1/common/pred_common.h"
#include "av1/common/quant_common.h"
#include "av1/common/reconintra.h"
#include "av1/common/reconinter.h"
#include "av1/common/seg_common.h"
#include "av1/common/tile_common.h"
#include "av1/common/warped_motion.h"
#include "av1/encoder/allintra_vis.h"
#include "av1/encoder/aq_complexity.h"
#include "av1/encoder/aq_cyclicrefresh.h"
#include "av1/encoder/aq_variance.h"
#include "av1/encoder/global_motion_facade.h"
#include "av1/encoder/encodeframe.h"
#include "av1/encoder/encodeframe_utils.h"
#include "av1/encoder/encodemb.h"
#include "av1/encoder/encodemv.h"
#include "av1/encoder/encodetxb.h"
#include "av1/encoder/ethread.h"
#include "av1/encoder/extend.h"
#include "av1/encoder/intra_mode_search_utils.h"
#include "av1/encoder/ml.h"
#include "av1/encoder/motion_search_facade.h"
#include "av1/encoder/partition_strategy.h"
#if !CONFIG_REALTIME_ONLY
#include "av1/encoder/partition_model_weights.h"
#endif
#include "av1/encoder/partition_search.h"
#include "av1/encoder/rd.h"
#include "av1/encoder/rdopt.h"
#include "av1/encoder/reconinter_enc.h"
#include "av1/encoder/segmentation.h"
#include "av1/encoder/tokenize.h"
#include "av1/encoder/tpl_model.h"
#include "av1/encoder/var_based_part.h"
#if CONFIG_TUNE_VMAF
#include "av1/encoder/tune_vmaf.h"
#endif
/*!\cond */
// This is used as a reference when computing the source variance for the
// purposes of activity masking.
// Eventually this should be replaced by custom no-reference routines,
// which will be faster.
static const uint8_t AV1_VAR_OFFS[MAX_SB_SIZE] = {
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128
};
#if CONFIG_AV1_HIGHBITDEPTH
static const uint16_t AV1_HIGH_VAR_OFFS_8[MAX_SB_SIZE] = {
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128
};
static const uint16_t AV1_HIGH_VAR_OFFS_10[MAX_SB_SIZE] = {
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4
};
static const uint16_t AV1_HIGH_VAR_OFFS_12[MAX_SB_SIZE] = {
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16
};
#endif // CONFIG_AV1_HIGHBITDEPTH
/*!\endcond */
// For the given bit depth, returns a constant array used to assist the
// calculation of source block variance, which will then be used to decide
// adaptive quantizers.
static const uint8_t *get_var_offs(int use_hbd, int bd) {
#if CONFIG_AV1_HIGHBITDEPTH
if (use_hbd) {
assert(bd == 8 || bd == 10 || bd == 12);
const int off_index = (bd - 8) >> 1;
static const uint16_t *high_var_offs[3] = { AV1_HIGH_VAR_OFFS_8,
AV1_HIGH_VAR_OFFS_10,
AV1_HIGH_VAR_OFFS_12 };
return CONVERT_TO_BYTEPTR(high_var_offs[off_index]);
}
#else
(void )use_hbd;
(void )bd;
assert(!use_hbd);
#endif
assert(bd == 8);
return AV1_VAR_OFFS;
}
void av1_init_rtc_counters(MACROBLOCK *const x) {
av1_init_cyclic_refresh_counters(x);
x->cnt_zeromv = 0;
}
void av1_accumulate_rtc_counters(AV1_COMP *cpi, const MACROBLOCK *const x) {
if (cpi->oxcf.q_cfg.aq_mode == CYCLIC_REFRESH_AQ)
av1_accumulate_cyclic_refresh_counters(cpi->cyclic_refresh, x);
cpi->rc.cnt_zeromv += x->cnt_zeromv;
}
unsigned int av1_get_perpixel_variance(const AV1_COMP *cpi,
const MACROBLOCKD *xd,
const struct buf_2d *ref,
BLOCK_SIZE bsize, int plane,
int use_hbd) {
const int subsampling_x = xd->plane[plane].subsampling_x;
const int subsampling_y = xd->plane[plane].subsampling_y;
const BLOCK_SIZE plane_bsize =
get_plane_block_size(bsize, subsampling_x, subsampling_y);
unsigned int sse;
const unsigned int var = cpi->ppi->fn_ptr[plane_bsize].vf(
ref->buf, ref->stride, get_var_offs(use_hbd, xd->bd), 0, &sse);
return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[plane_bsize]);
}
unsigned int av1_get_perpixel_variance_facade(const AV1_COMP *cpi,
const MACROBLOCKD *xd,
const struct buf_2d *ref,
BLOCK_SIZE bsize, int plane) {
const int use_hbd = is_cur_buf_hbd(xd);
return av1_get_perpixel_variance(cpi, xd, ref, bsize, plane, use_hbd);
}
void av1_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
int mi_row, int mi_col, const int num_planes,
BLOCK_SIZE bsize) {
// Set current frame pointer.
x->e_mbd.cur_buf = src;
// We use AOMMIN(num_planes, MAX_MB_PLANE) instead of num_planes to quiet
// the static analysis warnings.
for (int i = 0; i < AOMMIN(num_planes, MAX_MB_PLANE); i++) {
const int is_uv = i > 0;
setup_pred_plane(
&x->plane[i].src, bsize, src->buffers[i], src->crop_widths[is_uv],
src->crop_heights[is_uv], src->strides[is_uv], mi_row, mi_col, NULL,
x->e_mbd.plane[i].subsampling_x, x->e_mbd.plane[i].subsampling_y);
}
}
#if !CONFIG_REALTIME_ONLY
/*!\brief Assigns different quantization parameters to each super
* block based on its TPL weight.
*
* \ingroup tpl_modelling
*
* \param[in] cpi Top level encoder instance structure
* \param[in,out] td Thread data structure
* \param[in,out] x Macro block level data for this block.
* \param[in] tile_info Tile infromation / identification
* \param[in] mi_row Block row (in "MI_SIZE" units) index
* \param[in] mi_col Block column (in "MI_SIZE" units) index
* \param[out] num_planes Number of image planes (e.g. Y,U,V)
*
* \remark No return value but updates macroblock and thread data
* related to the q / q delta to be used.
*/
static inline void setup_delta_q(AV1_COMP *const cpi, ThreadData *td,
MACROBLOCK *const x,
const TileInfo *const tile_info, int mi_row,
int mi_col, int num_planes) {
AV1_COMMON *const cm = &cpi->common;
const CommonModeInfoParams *const mi_params = &cm->mi_params;
const DeltaQInfo *const delta_q_info = &cm->delta_q_info;
assert(delta_q_info->delta_q_present_flag);
const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
// Delta-q modulation based on variance
av1_setup_src_planes(x, cpi->source, mi_row, mi_col, num_planes, sb_size);
const int delta_q_res = delta_q_info->delta_q_res;
int current_qindex = cm->quant_params.base_qindex;
if (cpi->use_ducky_encode && cpi->ducky_encode_info.frame_info.qp_mode ==
DUCKY_ENCODE_FRAME_MODE_QINDEX) {
const int sb_row = mi_row >> cm->seq_params->mib_size_log2;
const int sb_col = mi_col >> cm->seq_params->mib_size_log2;
const int sb_cols =
CEIL_POWER_OF_TWO(cm->mi_params.mi_cols, cm->seq_params->mib_size_log2);
const int sb_index = sb_row * sb_cols + sb_col;
current_qindex =
cpi->ducky_encode_info.frame_info.superblock_encode_qindex[sb_index];
} else if (cpi->oxcf.q_cfg.deltaq_mode == DELTA_Q_PERCEPTUAL) {
if (DELTA_Q_PERCEPTUAL_MODULATION == 1) {
const int block_wavelet_energy_level =
av1_block_wavelet_energy_level(cpi, x, sb_size);
x->sb_energy_level = block_wavelet_energy_level;
current_qindex = av1_compute_q_from_energy_level_deltaq_mode(
cpi, block_wavelet_energy_level);
} else {
const int block_var_level = av1_log_block_var(cpi, x, sb_size);
x->sb_energy_level = block_var_level;
current_qindex =
av1_compute_q_from_energy_level_deltaq_mode(cpi, block_var_level);
}
} else if (cpi->oxcf.q_cfg.deltaq_mode == DELTA_Q_OBJECTIVE &&
cpi->oxcf.algo_cfg.enable_tpl_model) {
// Setup deltaq based on tpl stats
current_qindex =
av1_get_q_for_deltaq_objective(cpi, td, NULL, sb_size, mi_row, mi_col);
} else if (cpi->oxcf.q_cfg.deltaq_mode == DELTA_Q_PERCEPTUAL_AI) {
current_qindex = av1_get_sbq_perceptual_ai(cpi, sb_size, mi_row, mi_col);
} else if (cpi->oxcf.q_cfg.deltaq_mode == DELTA_Q_USER_RATING_BASED) {
current_qindex = av1_get_sbq_user_rating_based(cpi, mi_row, mi_col);
} else if (cpi->oxcf.q_cfg.enable_hdr_deltaq) {
current_qindex = av1_get_q_for_hdr(cpi, x, sb_size, mi_row, mi_col);
} else if (cpi->oxcf.q_cfg.deltaq_mode == DELTA_Q_VARIANCE_BOOST) {
current_qindex = av1_get_sbq_variance_boost(cpi, x);
}
x->rdmult_cur_qindex = current_qindex;
MACROBLOCKD *const xd = &x->e_mbd;
const int adjusted_qindex = av1_adjust_q_from_delta_q_res(
delta_q_res, xd->current_base_qindex, current_qindex);
if (cpi->use_ducky_encode) {
assert(adjusted_qindex == current_qindex);
}
current_qindex = adjusted_qindex;
x->delta_qindex = current_qindex - cm->quant_params.base_qindex;
x->rdmult_delta_qindex = x->delta_qindex;
av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
xd->mi[0]->current_qindex = current_qindex;
av1_init_plane_quantizers(cpi, x, xd->mi[0]->segment_id, 0);
// keep track of any non-zero delta-q used
td->deltaq_used |= (x->delta_qindex != 0);
if (cpi->oxcf.tool_cfg.enable_deltalf_mode) {
const int delta_lf_res = delta_q_info->delta_lf_res;
const int lfmask = ~(delta_lf_res - 1);
const int delta_lf_from_base =
((x->delta_qindex / 4 + delta_lf_res / 2) & lfmask);
const int8_t delta_lf =
(int8_t)clamp(delta_lf_from_base, -MAX_LOOP_FILTER, MAX_LOOP_FILTER);
const int frame_lf_count =
av1_num_planes(cm) > 1 ? FRAME_LF_COUNT : FRAME_LF_COUNT - 2;
const int mib_size = cm->seq_params->mib_size;
// pre-set the delta lf for loop filter. Note that this value is set
// before mi is assigned for each block in current superblock
for (int j = 0; j < AOMMIN(mib_size, mi_params->mi_rows - mi_row); j++) {
for (int k = 0; k < AOMMIN(mib_size, mi_params->mi_cols - mi_col); k++) {
const int grid_idx = get_mi_grid_idx(mi_params, mi_row + j, mi_col + k);
mi_params->mi_alloc[grid_idx].delta_lf_from_base = delta_lf;
for (int lf_id = 0; lf_id < frame_lf_count; ++lf_id) {
mi_params->mi_alloc[grid_idx].delta_lf[lf_id] = delta_lf;
}
}
}
}
}
static void init_ref_frame_space(AV1_COMP *cpi, ThreadData *td, int mi_row,
int mi_col) {
const AV1_COMMON *cm = &cpi->common;
const GF_GROUP *const gf_group = &cpi->ppi->gf_group;
const CommonModeInfoParams *const mi_params = &cm->mi_params;
MACROBLOCK *x = &td->mb;
const int frame_idx = cpi->gf_frame_index;
TplParams *const tpl_data = &cpi->ppi->tpl_data;
const uint8_t block_mis_log2 = tpl_data->tpl_stats_block_mis_log2;
av1_zero(x->tpl_keep_ref_frame);
if (!av1_tpl_stats_ready(tpl_data, frame_idx)) return ;
if (!is_frame_tpl_eligible(gf_group, cpi->gf_frame_index)) return ;
if (cpi->oxcf.q_cfg.aq_mode != NO_AQ) return ;
const int is_overlay =
cpi->ppi->gf_group.update_type[frame_idx] == OVERLAY_UPDATE;
if (is_overlay) {
memset(x->tpl_keep_ref_frame, 1, sizeof (x->tpl_keep_ref_frame));
return ;
}
TplDepFrame *tpl_frame = &tpl_data->tpl_frame[frame_idx];
TplDepStats *tpl_stats = tpl_frame->tpl_stats_ptr;
const int tpl_stride = tpl_frame->stride;
int64_t inter_cost[INTER_REFS_PER_FRAME] = { 0 };
const int step = 1 << block_mis_log2;
const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
const int mi_row_end =
AOMMIN(mi_size_high[sb_size] + mi_row, mi_params->mi_rows);
const int mi_cols_sr = av1_pixels_to_mi(cm->superres_upscaled_width);
const int mi_col_sr =
coded_to_superres_mi(mi_col, cm->superres_scale_denominator);
const int mi_col_end_sr =
AOMMIN(coded_to_superres_mi(mi_col + mi_size_wide[sb_size],
cm->superres_scale_denominator),
mi_cols_sr);
const int row_step = step;
const int col_step_sr =
coded_to_superres_mi(step, cm->superres_scale_denominator);
for (int row = mi_row; row < mi_row_end; row += row_step) {
for (int col = mi_col_sr; col < mi_col_end_sr; col += col_step_sr) {
const TplDepStats *this_stats =
&tpl_stats[av1_tpl_ptr_pos(row, col, tpl_stride, block_mis_log2)];
int64_t tpl_pred_error[INTER_REFS_PER_FRAME] = { 0 };
// Find the winner ref frame idx for the current block
int64_t best_inter_cost = this_stats->pred_error[0];
int best_rf_idx = 0;
for (int idx = 1; idx < INTER_REFS_PER_FRAME; ++idx) {
if ((this_stats->pred_error[idx] < best_inter_cost) &&
(this_stats->pred_error[idx] != 0)) {
best_inter_cost = this_stats->pred_error[idx];
best_rf_idx = idx;
}
}
// tpl_pred_error is the pred_error reduction of best_ref w.r.t.
// LAST_FRAME.
tpl_pred_error[best_rf_idx] = this_stats->pred_error[best_rf_idx] -
this_stats->pred_error[LAST_FRAME - 1];
for (int rf_idx = 1; rf_idx < INTER_REFS_PER_FRAME; ++rf_idx)
inter_cost[rf_idx] += tpl_pred_error[rf_idx];
}
}
int rank_index[INTER_REFS_PER_FRAME - 1];
for (int idx = 0; idx < INTER_REFS_PER_FRAME - 1; ++idx) {
rank_index[idx] = idx + 1;
for (int i = idx; i > 0; --i) {
if (inter_cost[rank_index[i - 1]] > inter_cost[rank_index[i]]) {
const int tmp = rank_index[i - 1];
rank_index[i - 1] = rank_index[i];
rank_index[i] = tmp;
}
}
}
x->tpl_keep_ref_frame[INTRA_FRAME] = 1;
x->tpl_keep_ref_frame[LAST_FRAME] = 1;
int cutoff_ref = 0;
for (int idx = 0; idx < INTER_REFS_PER_FRAME - 1; ++idx) {
x->tpl_keep_ref_frame[rank_index[idx] + LAST_FRAME] = 1;
if (idx > 2) {
if (!cutoff_ref) {
// If the predictive coding gains are smaller than the previous more
// relevant frame over certain amount, discard this frame and all the
// frames afterwards.
if (llabs(inter_cost[rank_index[idx]]) <
llabs(inter_cost[rank_index[idx - 1]]) / 8 ||
inter_cost[rank_index[idx]] == 0)
cutoff_ref = 1;
}
if (cutoff_ref) x->tpl_keep_ref_frame[rank_index[idx] + LAST_FRAME] = 0;
}
}
}
static inline void adjust_rdmult_tpl_model(AV1_COMP *cpi, MACROBLOCK *x,
int mi_row, int mi_col) {
const BLOCK_SIZE sb_size = cpi->common.seq_params->sb_size;
const int orig_rdmult = cpi->rd.RDMULT;
assert(IMPLIES(cpi->ppi->gf_group.size > 0,
cpi->gf_frame_index < cpi->ppi->gf_group.size));
const int gf_group_index = cpi->gf_frame_index;
if (cpi->oxcf.algo_cfg.enable_tpl_model && cpi->oxcf.q_cfg.aq_mode == NO_AQ &&
cpi->oxcf.q_cfg.deltaq_mode == NO_DELTA_Q && gf_group_index > 0 &&
cpi->ppi->gf_group.update_type[gf_group_index] == ARF_UPDATE) {
const int dr =
av1_get_rdmult_delta(cpi, sb_size, mi_row, mi_col, orig_rdmult);
x->rdmult = dr;
}
}
#endif // !CONFIG_REALTIME_ONLY
#if CONFIG_RT_ML_PARTITIONING
// Get a prediction(stored in x->est_pred) for the whole superblock.
static void get_estimated_pred(AV1_COMP *cpi, const TileInfo *const tile,
MACROBLOCK *x, int mi_row, int mi_col) {
AV1_COMMON *const cm = &cpi->common;
const int is_key_frame = frame_is_intra_only(cm);
MACROBLOCKD *xd = &x->e_mbd;
// TODO(kyslov) Extend to 128x128
assert(cm->seq_params->sb_size == BLOCK_64X64);
av1_set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
if (!is_key_frame) {
MB_MODE_INFO *mi = xd->mi[0];
const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_yv12_buf(cm, LAST_FRAME);
assert(yv12 != NULL);
av1_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
get_ref_scale_factors(cm, LAST_FRAME), 1);
mi->ref_frame[0] = LAST_FRAME;
mi->ref_frame[1] = NONE;
mi->bsize = BLOCK_64X64;
mi->mv[0].as_int = 0;
mi->interp_filters = av1_broadcast_interp_filter(BILINEAR);
set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]);
xd->plane[0].dst.buf = x->est_pred;
xd->plane[0].dst.stride = 64;
av1_enc_build_inter_predictor_y(xd, mi_row, mi_col);
} else {
#if CONFIG_AV1_HIGHBITDEPTH
switch (xd->bd) {
case 8: memset(x->est_pred, 128, 64 * 64 * sizeof (x->est_pred[0])); break ;
case 10:
memset(x->est_pred, 128 * 4, 64 * 64 * sizeof (x->est_pred[0]));
break ;
case 12:
memset(x->est_pred, 128 * 16, 64 * 64 * sizeof (x->est_pred[0]));
break ;
}
#else
memset(x->est_pred, 128, 64 * 64 * sizeof (x->est_pred[0]));
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
#endif // CONFIG_RT_ML_PARTITIONING
#define AVG_CDF_WEIGHT_LEFT 3
#define AVG_CDF_WEIGHT_TOP_RIGHT 1
/*!\brief Encode a superblock (minimal RD search involved)
*
* \ingroup partition_search
* Encodes the superblock by a pre-determined partition pattern, only minor
* rd-based searches are allowed to adjust the initial pattern. It is only used
* by realtime encoding.
*/
static inline void encode_nonrd_sb(AV1_COMP *cpi, ThreadData *td,
TileDataEnc *tile_data, TokenExtra **tp,
const int mi_row, const int mi_col,
const int seg_skip) {
AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
const SPEED_FEATURES *const sf = &cpi->sf;
const TileInfo *const tile_info = &tile_data->tile_info;
MB_MODE_INFO **mi = cm->mi_params.mi_grid_base +
get_mi_grid_idx(&cm->mi_params, mi_row, mi_col);
const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
PC_TREE *const pc_root = td->pc_root;
#if CONFIG_RT_ML_PARTITIONING
if (sf->part_sf.partition_search_type == ML_BASED_PARTITION) {
RD_STATS dummy_rdc;
get_estimated_pred(cpi, tile_info, x, mi_row, mi_col);
av1_nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col,
BLOCK_64X64, &dummy_rdc, 1, INT64_MAX, pc_root);
return ;
}
#endif
// Set the partition
if (sf->part_sf.partition_search_type == FIXED_PARTITION || seg_skip ||
(sf->rt_sf.use_fast_fixed_part && x->sb_force_fixed_part == 1 &&
(!frame_is_intra_only(cm) &&
(!cpi->ppi->use_svc ||
!cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame)))) {
// set a fixed-size partition
av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
BLOCK_SIZE bsize_select = sf->part_sf.fixed_partition_size;
if (sf->rt_sf.use_fast_fixed_part &&
x->content_state_sb.source_sad_nonrd < kLowSad) {
bsize_select = cm->seq_params->sb_size;
}
if (cpi->sf.rt_sf.skip_encoding_non_reference_slide_change &&
cpi->rc.high_source_sad && cpi->ppi->rtc_ref.non_reference_frame) {
bsize_select = cm->seq_params->sb_size;
x->force_zeromv_skip_for_sb = 1;
}
const BLOCK_SIZE bsize = seg_skip ? sb_size : bsize_select;
if (x->content_state_sb.source_sad_nonrd > kZeroSad)
x->force_color_check_block_level = 1;
av1_set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
} else if (sf->part_sf.partition_search_type == VAR_BASED_PARTITION) {
// set a variance-based partition
av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
av1_choose_var_based_partitioning(cpi, tile_info, td, x, mi_row, mi_col);
}
assert(sf->part_sf.partition_search_type == FIXED_PARTITION || seg_skip ||
sf->part_sf.partition_search_type == VAR_BASED_PARTITION);
set_cb_offsets(td->mb.cb_offset, 0, 0);
// Initialize the flag to skip cdef to 1.
if (sf->rt_sf.skip_cdef_sb) {
const int block64_in_sb = (sb_size == BLOCK_128X128) ? 2 : 1;
// If 128x128 block is used, we need to set the flag for all 4 64x64 sub
// "blocks".
for (int r = 0; r < block64_in_sb; ++r) {
for (int c = 0; c < block64_in_sb; ++c) {
const int idx_in_sb =
r * MI_SIZE_64X64 * cm->mi_params.mi_stride + c * MI_SIZE_64X64;
if (mi[idx_in_sb]) mi[idx_in_sb]->cdef_strength = 1;
}
}
}
#if CONFIG_COLLECT_COMPONENT_TIMING
start_timing(cpi, nonrd_use_partition_time);
#endif
av1_nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, sb_size,
pc_root);
#if CONFIG_COLLECT_COMPONENT_TIMING
end_timing(cpi, nonrd_use_partition_time);
#endif
}
// This function initializes the stats for encode_rd_sb.
static inline void init_encode_rd_sb(AV1_COMP *cpi, ThreadData *td,
const TileDataEnc *tile_data,
SIMPLE_MOTION_DATA_TREE *sms_root,
RD_STATS *rd_cost, int mi_row, int mi_col,
int gather_tpl_data) {
const AV1_COMMON *cm = &cpi->common;
const TileInfo *tile_info = &tile_data->tile_info;
MACROBLOCK *x = &td->mb;
const SPEED_FEATURES *sf = &cpi->sf;
const int use_simple_motion_search =
(sf->part_sf.simple_motion_search_split ||
sf->part_sf.simple_motion_search_prune_rect ||
sf->part_sf.simple_motion_search_early_term_none ||
sf->part_sf.ml_early_term_after_part_split_level) &&
!frame_is_intra_only(cm);
if (use_simple_motion_search) {
av1_init_simple_motion_search_mvs_for_sb(cpi, tile_info, x, sms_root,
mi_row, mi_col);
}
#if !CONFIG_REALTIME_ONLY
if (!(has_no_stats_stage(cpi) && cpi->oxcf.mode == REALTIME &&
cpi->oxcf.gf_cfg.lag_in_frames == 0)) {
init_ref_frame_space(cpi, td, mi_row, mi_col);
x->sb_energy_level = 0;
x->part_search_info.cnn_output_valid = 0;
if (gather_tpl_data) {
if (cm->delta_q_info.delta_q_present_flag) {
const int num_planes = av1_num_planes(cm);
const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
setup_delta_q(cpi, td, x, tile_info, mi_row, mi_col, num_planes);
av1_tpl_rdmult_setup_sb(cpi, x, sb_size, mi_row, mi_col);
}
// TODO(jingning): revisit this function.
if (cpi->oxcf.algo_cfg.enable_tpl_model && (0)) {
adjust_rdmult_tpl_model(cpi, x, mi_row, mi_col);
}
}
}
#else
(void )tile_info;
(void )mi_row;
(void )mi_col;
(void )gather_tpl_data;
#endif
x->reuse_inter_pred = false ;
x->txfm_search_params.mode_eval_type = DEFAULT_EVAL;
reset_mb_rd_record(x->txfm_search_info.mb_rd_record);
av1_zero(x->picked_ref_frames_mask);
av1_invalid_rd_stats(rd_cost);
}
#if !CONFIG_REALTIME_ONLY
static void sb_qp_sweep_init_quantizers(AV1_COMP *cpi, ThreadData *td,
const TileDataEnc *tile_data,
SIMPLE_MOTION_DATA_TREE *sms_tree,
RD_STATS *rd_cost, int mi_row,
int mi_col, int delta_qp_ofs) {
AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
const TileInfo *tile_info = &tile_data->tile_info;
const CommonModeInfoParams *const mi_params = &cm->mi_params;
const DeltaQInfo *const delta_q_info = &cm->delta_q_info;
assert(delta_q_info->delta_q_present_flag);
const int delta_q_res = delta_q_info->delta_q_res;
const SPEED_FEATURES *sf = &cpi->sf;
const int use_simple_motion_search =
(sf->part_sf.simple_motion_search_split ||
sf->part_sf.simple_motion_search_prune_rect ||
sf->part_sf.simple_motion_search_early_term_none ||
sf->part_sf.ml_early_term_after_part_split_level) &&
!frame_is_intra_only(cm);
if (use_simple_motion_search) {
av1_init_simple_motion_search_mvs_for_sb(cpi, tile_info, x, sms_tree,
mi_row, mi_col);
}
int current_qindex = x->rdmult_cur_qindex + delta_qp_ofs;
MACROBLOCKD *const xd = &x->e_mbd;
current_qindex = av1_adjust_q_from_delta_q_res(
delta_q_res, xd->current_base_qindex, current_qindex);
x->delta_qindex = current_qindex - cm->quant_params.base_qindex;
av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
xd->mi[0]->current_qindex = current_qindex;
av1_init_plane_quantizers(cpi, x, xd->mi[0]->segment_id, 0);
// keep track of any non-zero delta-q used
td->deltaq_used |= (x->delta_qindex != 0);
if (cpi->oxcf.tool_cfg.enable_deltalf_mode) {
const int delta_lf_res = delta_q_info->delta_lf_res;
const int lfmask = ~(delta_lf_res - 1);
const int delta_lf_from_base =
((x->delta_qindex / 4 + delta_lf_res / 2) & lfmask);
const int8_t delta_lf =
(int8_t)clamp(delta_lf_from_base, -MAX_LOOP_FILTER, MAX_LOOP_FILTER);
const int frame_lf_count =
av1_num_planes(cm) > 1 ? FRAME_LF_COUNT : FRAME_LF_COUNT - 2;
const int mib_size = cm->seq_params->mib_size;
// pre-set the delta lf for loop filter. Note that this value is set
// before mi is assigned for each block in current superblock
for (int j = 0; j < AOMMIN(mib_size, mi_params->mi_rows - mi_row); j++) {
for (int k = 0; k < AOMMIN(mib_size, mi_params->mi_cols - mi_col); k++) {
const int grid_idx = get_mi_grid_idx(mi_params, mi_row + j, mi_col + k);
mi_params->mi_alloc[grid_idx].delta_lf_from_base = delta_lf;
for (int lf_id = 0; lf_id < frame_lf_count; ++lf_id) {
mi_params->mi_alloc[grid_idx].delta_lf[lf_id] = delta_lf;
}
}
}
}
x->reuse_inter_pred = false ;
x->txfm_search_params.mode_eval_type = DEFAULT_EVAL;
reset_mb_rd_record(x->txfm_search_info.mb_rd_record);
av1_zero(x->picked_ref_frames_mask);
av1_invalid_rd_stats(rd_cost);
}
static int sb_qp_sweep(AV1_COMP *const cpi, ThreadData *td,
TileDataEnc *tile_data, TokenExtra **tp, int mi_row,
int mi_col, BLOCK_SIZE bsize,
SIMPLE_MOTION_DATA_TREE *sms_tree,
SB_FIRST_PASS_STATS *sb_org_stats) {
AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
RD_STATS rdc_winner, cur_rdc;
av1_invalid_rd_stats(&rdc_winner);
int best_qindex = td->mb.rdmult_delta_qindex;
const int start = cm->current_frame.frame_type == KEY_FRAME ? -20 : -12;
const int end = cm->current_frame.frame_type == KEY_FRAME ? 20 : 12;
const int step = cm->delta_q_info.delta_q_res;
for (int sweep_qp_delta = start; sweep_qp_delta <= end;
sweep_qp_delta += step) {
sb_qp_sweep_init_quantizers(cpi, td, tile_data, sms_tree, &cur_rdc, mi_row,
mi_col, sweep_qp_delta);
const int alloc_mi_idx = get_alloc_mi_idx(&cm->mi_params, mi_row, mi_col);
const int backup_current_qindex =
cm->mi_params.mi_alloc[alloc_mi_idx].current_qindex;
av1_reset_mbmi(&cm->mi_params, bsize, mi_row, mi_col);
av1_restore_sb_state(sb_org_stats, cpi, td, tile_data, mi_row, mi_col);
cm->mi_params.mi_alloc[alloc_mi_idx].current_qindex = backup_current_qindex;
td->pc_root = av1_alloc_pc_tree_node(bsize);
if (!td->pc_root)
aom_internal_error(x->e_mbd.error_info, AOM_CODEC_MEM_ERROR,
"Failed to allocate PC_TREE" );
av1_rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize,
&cur_rdc, cur_rdc, td->pc_root, sms_tree, NULL,
SB_DRY_PASS, NULL);
if ((rdc_winner.rdcost > cur_rdc.rdcost) ||
(abs(sweep_qp_delta) < abs(best_qindex - x->rdmult_delta_qindex) &&
rdc_winner.rdcost == cur_rdc.rdcost)) {
rdc_winner = cur_rdc;
best_qindex = x->rdmult_delta_qindex + sweep_qp_delta;
}
}
return best_qindex;
}
#endif //! CONFIG_REALTIME_ONLY
/*!\brief Encode a superblock (RD-search-based)
*
* \ingroup partition_search
* Conducts partition search for a superblock, based on rate-distortion costs,
* from scratch or adjusting from a pre-calculated partition pattern.
*/
static inline void encode_rd_sb(AV1_COMP *cpi, ThreadData *td,
TileDataEnc *tile_data, TokenExtra **tp,
const int mi_row, const int mi_col,
const int seg_skip) {
AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const SPEED_FEATURES *const sf = &cpi->sf;
const TileInfo *const tile_info = &tile_data->tile_info;
MB_MODE_INFO **mi = cm->mi_params.mi_grid_base +
get_mi_grid_idx(&cm->mi_params, mi_row, mi_col);
const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
const int num_planes = av1_num_planes(cm);
int dummy_rate;
int64_t dummy_dist;
RD_STATS dummy_rdc;
SIMPLE_MOTION_DATA_TREE *const sms_root = td->sms_root;
#if CONFIG_REALTIME_ONLY
(void )seg_skip;
#endif // CONFIG_REALTIME_ONLY
init_encode_rd_sb(cpi, td, tile_data, sms_root, &dummy_rdc, mi_row, mi_col,
1);
// Encode the superblock
if (sf->part_sf.partition_search_type == VAR_BASED_PARTITION) {
// partition search starting from a variance-based partition
av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
av1_choose_var_based_partitioning(cpi, tile_info, td, x, mi_row, mi_col);
#if CONFIG_COLLECT_COMPONENT_TIMING
start_timing(cpi, rd_use_partition_time);
#endif
td->pc_root = av1_alloc_pc_tree_node(sb_size);
if (!td->pc_root)
aom_internal_error(xd->error_info, AOM_CODEC_MEM_ERROR,
"Failed to allocate PC_TREE" );
av1_rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, sb_size,
&dummy_rate, &dummy_dist, 1, td->pc_root);
av1_free_pc_tree_recursive(td->pc_root, num_planes, 0, 0,
sf->part_sf.partition_search_type);
td->pc_root = NULL;
#if CONFIG_COLLECT_COMPONENT_TIMING
end_timing(cpi, rd_use_partition_time);
#endif
}
#if !CONFIG_REALTIME_ONLY
else if (sf->part_sf.partition_search_type == FIXED_PARTITION || seg_skip) {
// partition search by adjusting a fixed-size partition
av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
const BLOCK_SIZE bsize =
seg_skip ? sb_size : sf->part_sf.fixed_partition_size;
av1_set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
td->pc_root = av1_alloc_pc_tree_node(sb_size);
if (!td->pc_root)
aom_internal_error(xd->error_info, AOM_CODEC_MEM_ERROR,
"Failed to allocate PC_TREE" );
av1_rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, sb_size,
&dummy_rate, &dummy_dist, 1, td->pc_root);
av1_free_pc_tree_recursive(td->pc_root, num_planes, 0, 0,
sf->part_sf.partition_search_type);
td->pc_root = NULL;
} else {
// The most exhaustive recursive partition search
SuperBlockEnc *sb_enc = &x->sb_enc;
// No stats for overlay frames. Exclude key frame.
av1_get_tpl_stats_sb(cpi, sb_size, mi_row, mi_col, sb_enc);
// Reset the tree for simple motion search data
av1_reset_simple_motion_tree_partition(sms_root, sb_size);
#if CONFIG_COLLECT_COMPONENT_TIMING
start_timing(cpi, rd_pick_partition_time);
#endif
// Estimate the maximum square partition block size, which will be used
// as the starting block size for partitioning the sb
set_max_min_partition_size(sb_enc, cpi, x, sf, sb_size, mi_row, mi_col);
// The superblock can be searched only once, or twice consecutively for
// better quality. Note that the meaning of passes here is different from
// the general concept of 1-pass/2-pass encoders.
const int num_passes =
cpi->oxcf.unit_test_cfg.sb_multipass_unit_test ? 2 : 1;
if (cpi->oxcf.sb_qp_sweep &&
!(has_no_stats_stage(cpi) && cpi->oxcf.mode == REALTIME &&
cpi->oxcf.gf_cfg.lag_in_frames == 0) &&
cm->delta_q_info.delta_q_present_flag) {
AOM_CHECK_MEM_ERROR(
x->e_mbd.error_info, td->mb.sb_stats_cache,
(SB_FIRST_PASS_STATS *)aom_malloc(sizeof (*td->mb.sb_stats_cache)));
av1_backup_sb_state(td->mb.sb_stats_cache, cpi, td, tile_data, mi_row,
mi_col);
assert(x->rdmult_delta_qindex == x->delta_qindex);
const int best_qp_diff =
sb_qp_sweep(cpi, td, tile_data, tp, mi_row, mi_col, sb_size, sms_root,
td->mb.sb_stats_cache) -
x->rdmult_delta_qindex;
sb_qp_sweep_init_quantizers(cpi, td, tile_data, sms_root, &dummy_rdc,
mi_row, mi_col, best_qp_diff);
const int alloc_mi_idx = get_alloc_mi_idx(&cm->mi_params, mi_row, mi_col);
const int backup_current_qindex =
cm->mi_params.mi_alloc[alloc_mi_idx].current_qindex;
av1_reset_mbmi(&cm->mi_params, sb_size, mi_row, mi_col);
av1_restore_sb_state(td->mb.sb_stats_cache, cpi, td, tile_data, mi_row,
mi_col);
cm->mi_params.mi_alloc[alloc_mi_idx].current_qindex =
backup_current_qindex;
aom_free(td->mb.sb_stats_cache);
td->mb.sb_stats_cache = NULL;
}
if (num_passes == 1) {
#if CONFIG_PARTITION_SEARCH_ORDER
if (cpi->ext_part_controller.ready && !frame_is_intra_only(cm)) {
av1_reset_part_sf(&cpi->sf.part_sf);
av1_reset_sf_for_ext_part(cpi);
RD_STATS this_rdc;
av1_rd_partition_search(cpi, td, tile_data, tp, sms_root, mi_row,
mi_col, sb_size, &this_rdc);
} else {
td->pc_root = av1_alloc_pc_tree_node(sb_size);
if (!td->pc_root)
aom_internal_error(xd->error_info, AOM_CODEC_MEM_ERROR,
"Failed to allocate PC_TREE" );
av1_rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, sb_size,
&dummy_rdc, dummy_rdc, td->pc_root, sms_root,
NULL, SB_SINGLE_PASS, NULL);
}
#else
td->pc_root = av1_alloc_pc_tree_node(sb_size);
if (!td->pc_root)
aom_internal_error(xd->error_info, AOM_CODEC_MEM_ERROR,
"Failed to allocate PC_TREE" );
av1_rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, sb_size,
&dummy_rdc, dummy_rdc, td->pc_root, sms_root, NULL,
SB_SINGLE_PASS, NULL);
#endif // CONFIG_PARTITION_SEARCH_ORDER
} else {
// First pass
AOM_CHECK_MEM_ERROR(
x->e_mbd.error_info, td->mb.sb_fp_stats,
(SB_FIRST_PASS_STATS *)aom_malloc(sizeof (*td->mb.sb_fp_stats)));
av1_backup_sb_state(td->mb.sb_fp_stats, cpi, td, tile_data, mi_row,
mi_col);
td->pc_root = av1_alloc_pc_tree_node(sb_size);
if (!td->pc_root)
aom_internal_error(xd->error_info, AOM_CODEC_MEM_ERROR,
"Failed to allocate PC_TREE" );
av1_rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, sb_size,
&dummy_rdc, dummy_rdc, td->pc_root, sms_root, NULL,
SB_DRY_PASS, NULL);
// Second pass
init_encode_rd_sb(cpi, td, tile_data, sms_root, &dummy_rdc, mi_row,
mi_col, 0);
av1_reset_mbmi(&cm->mi_params, sb_size, mi_row, mi_col);
av1_reset_simple_motion_tree_partition(sms_root, sb_size);
av1_restore_sb_state(td->mb.sb_fp_stats, cpi, td, tile_data, mi_row,
mi_col);
td->pc_root = av1_alloc_pc_tree_node(sb_size);
if (!td->pc_root)
aom_internal_error(xd->error_info, AOM_CODEC_MEM_ERROR,
"Failed to allocate PC_TREE" );
av1_rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, sb_size,
&dummy_rdc, dummy_rdc, td->pc_root, sms_root, NULL,
SB_WET_PASS, NULL);
aom_free(td->mb.sb_fp_stats);
td->mb.sb_fp_stats = NULL;
}
// Reset to 0 so that it wouldn't be used elsewhere mistakenly.
sb_enc->tpl_data_count = 0;
#if CONFIG_COLLECT_COMPONENT_TIMING
end_timing(cpi, rd_pick_partition_time);
#endif
}
#endif // !CONFIG_REALTIME_ONLY
// Update the inter rd model
// TODO(angiebird): Let inter_mode_rd_model_estimation support multi-tile.
if (cpi->sf.inter_sf.inter_mode_rd_model_estimation == 1 &&
cm->tiles.cols == 1 && cm->tiles.rows == 1) {
av1_inter_mode_data_fit(tile_data, x->rdmult);
}
}
// Check if the cost update of symbols mode, coeff and dv are tile or off.
static inline int is_mode_coeff_dv_upd_freq_tile_or_off(
const AV1_COMP *const cpi) {
const INTER_MODE_SPEED_FEATURES *const inter_sf = &cpi->sf.inter_sf;
return (inter_sf->coeff_cost_upd_level <= INTERNAL_COST_UPD_TILE &&
inter_sf->mode_cost_upd_level <= INTERNAL_COST_UPD_TILE &&
cpi->sf.intra_sf.dv_cost_upd_level <= INTERNAL_COST_UPD_TILE);
}
// When row-mt is enabled and cost update frequencies are set to off/tile,
// processing of current SB can start even before processing of top-right SB
// is finished. This function checks if it is sufficient to wait for top SB
// to finish processing before current SB starts processing.
static inline int delay_wait_for_top_right_sb(const AV1_COMP *const cpi) {
const MODE mode = cpi->oxcf.mode;
if (mode == GOOD) return 0;
if (mode == ALLINTRA)
return is_mode_coeff_dv_upd_freq_tile_or_off(cpi);
else if (mode == REALTIME)
return (is_mode_coeff_dv_upd_freq_tile_or_off(cpi) &&
cpi->sf.inter_sf.mv_cost_upd_level <= INTERNAL_COST_UPD_TILE);
else
return 0;
}
/*!\brief Calculate source SAD at superblock level using 64x64 block source SAD
*
* \ingroup partition_search
* \callgraph
* \callergraph
*/
static inline uint64_t get_sb_source_sad(const AV1_COMP *cpi, int mi_row,
int mi_col) {
if (cpi->src_sad_blk_64x64 == NULL) return UINT64_MAX;
const AV1_COMMON *const cm = &cpi->common;
const int blk_64x64_in_mis = (cm->seq_params->sb_size == BLOCK_128X128)
? (cm->seq_params->mib_size >> 1)
: cm->seq_params->mib_size;
const int num_blk_64x64_cols =
(cm->mi_params.mi_cols + blk_64x64_in_mis - 1) / blk_64x64_in_mis;
const int num_blk_64x64_rows =
(cm->mi_params.mi_rows + blk_64x64_in_mis - 1) / blk_64x64_in_mis;
const int blk_64x64_col_index = mi_col / blk_64x64_in_mis;
const int blk_64x64_row_index = mi_row / blk_64x64_in_mis;
uint64_t curr_sb_sad = UINT64_MAX;
// Avoid the border as sad_blk_64x64 may not be set for the border
// in the scene detection.
if ((blk_64x64_row_index >= num_blk_64x64_rows - 1) ||
(blk_64x64_col_index >= num_blk_64x64_cols - 1)) {
return curr_sb_sad;
}
const uint64_t *const src_sad_blk_64x64_data =
&cpi->src_sad_blk_64x64[blk_64x64_col_index +
blk_64x64_row_index * num_blk_64x64_cols];
if (cm->seq_params->sb_size == BLOCK_128X128) {
// Calculate SB source SAD by accumulating source SAD of 64x64 blocks in the
// superblock
curr_sb_sad = src_sad_blk_64x64_data[0] + src_sad_blk_64x64_data[1] +
src_sad_blk_64x64_data[num_blk_64x64_cols] +
src_sad_blk_64x64_data[num_blk_64x64_cols + 1];
} else if (cm->seq_params->sb_size == BLOCK_64X64) {
curr_sb_sad = src_sad_blk_64x64_data[0];
}
return curr_sb_sad;
}
/*!\brief Determine whether grading content can be skipped based on sad stat
*
* \ingroup partition_search
* \callgraph
* \callergraph
*/
static inline bool is_calc_src_content_needed(AV1_COMP *cpi,
MACROBLOCK *const x, int mi_row,
int mi_col) {
if (cpi->svc.spatial_layer_id < cpi->svc.number_spatial_layers - 1)
return true ;
const uint64_t curr_sb_sad = get_sb_source_sad(cpi, mi_row, mi_col);
if (curr_sb_sad == UINT64_MAX) return true ;
if (curr_sb_sad == 0) {
x->content_state_sb.source_sad_nonrd = kZeroSad;
return false ;
}
AV1_COMMON *const cm = &cpi->common;
bool do_calc_src_content = true ;
if (cpi->oxcf.speed < 9) return do_calc_src_content;
// TODO(yunqing): Tune/validate the thresholds for 128x128 SB size.
if (AOMMIN(cm->width, cm->height) < 360) {
// Derive Average 64x64 block source SAD from SB source SAD
const uint64_t avg_64x64_blk_sad =
(cm->seq_params->sb_size == BLOCK_128X128) ? ((curr_sb_sad + 2) >> 2)
: curr_sb_sad;
// The threshold is determined based on kLowSad and kHighSad threshold and
// test results.
uint64_t thresh_low = 15000;
uint64_t thresh_high = 40000;
if (cpi->sf.rt_sf.increase_source_sad_thresh) {
thresh_low = thresh_low << 1;
thresh_high = thresh_high << 1;
}
if (avg_64x64_blk_sad > thresh_low && avg_64x64_blk_sad < thresh_high) {
do_calc_src_content = false ;
// Note: set x->content_state_sb.source_sad_rd as well if this is extended
// to RTC rd path.
x->content_state_sb.source_sad_nonrd = kMedSad;
}
}
return do_calc_src_content;
}
/*!\brief Determine whether grading content is needed based on sf and frame stat
*
* \ingroup partition_search
* \callgraph
* \callergraph
*/
// TODO(any): consolidate sfs to make interface cleaner
static inline void grade_source_content_sb(AV1_COMP *cpi, MACROBLOCK *const x,
TileDataEnc *tile_data, int mi_row,
int mi_col) {
AV1_COMMON *const cm = &cpi->common;
if (cm->current_frame.frame_type == KEY_FRAME ||
(cpi->ppi->use_svc &&
cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame)) {
assert(x->content_state_sb.source_sad_nonrd == kMedSad);
assert(x->content_state_sb.source_sad_rd == kMedSad);
return ;
}
bool calc_src_content = false ;
if (cpi->sf.rt_sf.source_metrics_sb_nonrd) {
if (!cpi->sf.rt_sf.check_scene_detection || cpi->rc.frame_source_sad > 0) {
calc_src_content = is_calc_src_content_needed(cpi, x, mi_row, mi_col);
} else {
x->content_state_sb.source_sad_nonrd = kZeroSad;
}
} else if ((cpi->sf.rt_sf.var_part_based_on_qidx >= 1) &&
(cm->width * cm->height <= 352 * 288)) {
if (cpi->rc.frame_source_sad > 0)
calc_src_content = true ;
else
x->content_state_sb.source_sad_rd = kZeroSad;
}
if (calc_src_content)
av1_source_content_sb(cpi, x, tile_data, mi_row, mi_col);
}
/*!\brief Encode a superblock row by breaking it into superblocks
*
* \ingroup partition_search
* \callgraph
* \callergraph
* Do partition and mode search for an sb row: one row of superblocks filling up
* the width of the current tile.
*/
static inline void encode_sb_row(AV1_COMP *cpi, ThreadData *td,
TileDataEnc *tile_data, int mi_row,
TokenExtra **tp) {
AV1_COMMON *const cm = &cpi->common;
const TileInfo *const tile_info = &tile_data->tile_info;
MultiThreadInfo *const mt_info = &cpi->mt_info;
AV1EncRowMultiThreadInfo *const enc_row_mt = &mt_info->enc_row_mt;
AV1EncRowMultiThreadSync *const row_mt_sync = &tile_data->row_mt_sync;
bool row_mt_enabled = mt_info->row_mt_enabled;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const int sb_cols_in_tile = av1_get_sb_cols_in_tile(cm, tile_info);
const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
const int mib_size = cm->seq_params->mib_size;
const int mib_size_log2 = cm->seq_params->mib_size_log2;
const int sb_row = (mi_row - tile_info->mi_row_start) >> mib_size_log2;
const int use_nonrd_mode = cpi->sf.rt_sf.use_nonrd_pick_mode;
#if CONFIG_COLLECT_COMPONENT_TIMING
start_timing(cpi, encode_sb_row_time);
#endif
// Initialize the left context for the new SB row
av1_zero_left_context(xd);
// Reset delta for quantizer and loof filters at the beginning of every tile
if (mi_row == tile_info->mi_row_start || row_mt_enabled) {
if (cm->delta_q_info.delta_q_present_flag)
xd->current_base_qindex = cm->quant_params.base_qindex;
if (cm->delta_q_info.delta_lf_present_flag) {
av1_reset_loop_filter_delta(xd, av1_num_planes(cm));
}
}
reset_thresh_freq_fact(x);
// Code each SB in the row
for (int mi_col = tile_info->mi_col_start, sb_col_in_tile = 0;
mi_col < tile_info->mi_col_end; mi_col += mib_size, sb_col_in_tile++) {
// In realtime/allintra mode and when frequency of cost updates is off/tile,
// wait for the top superblock to finish encoding. Otherwise, wait for the
// top-right superblock to finish encoding.
enc_row_mt->sync_read_ptr(
row_mt_sync, sb_row, sb_col_in_tile - delay_wait_for_top_right_sb(cpi));
#if CONFIG_MULTITHREAD
if (row_mt_enabled) {
pthread_mutex_lock(enc_row_mt->mutex_);
const bool row_mt_exit = enc_row_mt->row_mt_exit;
pthread_mutex_unlock(enc_row_mt->mutex_);
// Exit in case any worker has encountered an error.
if (row_mt_exit) return ;
}
#endif
const int update_cdf = tile_data->allow_update_cdf && row_mt_enabled;
if (update_cdf && (tile_info->mi_row_start != mi_row)) {
if ((tile_info->mi_col_start == mi_col)) {
// restore frame context at the 1st column sb
memcpy(xd->tile_ctx, x->row_ctx, sizeof (*xd->tile_ctx));
} else {
// update context
int wt_left = AVG_CDF_WEIGHT_LEFT;
int wt_tr = AVG_CDF_WEIGHT_TOP_RIGHT;
if (tile_info->mi_col_end > (mi_col + mib_size))
av1_avg_cdf_symbols(xd->tile_ctx, x->row_ctx + sb_col_in_tile,
wt_left, wt_tr);
else
av1_avg_cdf_symbols(xd->tile_ctx, x->row_ctx + sb_col_in_tile - 1,
wt_left, wt_tr);
}
}
// Update the rate cost tables for some symbols
av1_set_cost_upd_freq(cpi, td, tile_info, mi_row, mi_col);
// Reset color coding related parameters
av1_zero(x->color_sensitivity_sb);
av1_zero(x->color_sensitivity_sb_g);
av1_zero(x->color_sensitivity_sb_alt);
av1_zero(x->color_sensitivity);
x->content_state_sb.source_sad_nonrd = kMedSad;
x->content_state_sb.source_sad_rd = kMedSad;
x->content_state_sb.lighting_change = 0;
x->content_state_sb.low_sumdiff = 0;
x->force_zeromv_skip_for_sb = 0;
x->sb_me_block = 0;
x->sb_me_partition = 0;
x->sb_me_mv.as_int = 0;
x->sb_force_fixed_part = 1;
x->color_palette_thresh = 64;
x->force_color_check_block_level = 0;
x->nonrd_prune_ref_frame_search =
cpi->sf.rt_sf.nonrd_prune_ref_frame_search;
if (cpi->oxcf.mode == ALLINTRA) {
x->intra_sb_rdmult_modifier = 128;
}
xd->cur_frame_force_integer_mv = cm->features.cur_frame_force_integer_mv;
x->source_variance = UINT_MAX;
td->mb.cb_coef_buff = av1_get_cb_coeff_buffer(cpi, mi_row, mi_col);
// Get segment id and skip flag
const struct segmentation *const seg = &cm->seg;
int seg_skip = 0;
if (seg->enabled) {
const uint8_t *const map =
seg->update_map ? cpi->enc_seg.map : cm->last_frame_seg_map;
const uint8_t segment_id =
map ? get_segment_id(&cm->mi_params, map, sb_size, mi_row, mi_col)
: 0;
seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP);
}
produce_gradients_for_sb(cpi, x, sb_size, mi_row, mi_col);
init_src_var_info_of_4x4_sub_blocks(cpi, x->src_var_info_of_4x4_sub_blocks,
sb_size);
// Grade the temporal variation of the sb, the grade will be used to decide
// fast mode search strategy for coding blocks
if (!seg_skip) grade_source_content_sb(cpi, x, tile_data, mi_row, mi_col);
// encode the superblock
if (use_nonrd_mode) {
encode_nonrd_sb(cpi, td, tile_data, tp, mi_row, mi_col, seg_skip);
} else {
encode_rd_sb(cpi, td, tile_data, tp, mi_row, mi_col, seg_skip);
}
// Update the top-right context in row_mt coding
if (update_cdf && (tile_info->mi_row_end > (mi_row + mib_size))) {
if (sb_cols_in_tile == 1)
memcpy(x->row_ctx, xd->tile_ctx, sizeof (*xd->tile_ctx));
else if (sb_col_in_tile >= 1)
memcpy(x->row_ctx + sb_col_in_tile - 1, xd->tile_ctx,
sizeof (*xd->tile_ctx));
}
enc_row_mt->sync_write_ptr(row_mt_sync, sb_row, sb_col_in_tile,
sb_cols_in_tile);
}
#if CONFIG_COLLECT_COMPONENT_TIMING
end_timing(cpi, encode_sb_row_time);
#endif
}
static inline void init_encode_frame_mb_context(AV1_COMP *cpi) {
AV1_COMMON *const cm = &cpi->common;
const int num_planes = av1_num_planes(cm);
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
// Copy data over into macro block data structures.
av1_setup_src_planes(x, cpi->source, 0, 0, num_planes,
cm->seq_params->sb_size);
av1_setup_block_planes(xd, cm->seq_params->subsampling_x,
cm->seq_params->subsampling_y, num_planes);
}
void av1_alloc_tile_data(AV1_COMP *cpi) {
AV1_COMMON *const cm = &cpi->common;
AV1EncRowMultiThreadInfo *const enc_row_mt = &cpi->mt_info.enc_row_mt;
const int tile_cols = cm->tiles.cols;
const int tile_rows = cm->tiles.rows;
av1_row_mt_mem_dealloc(cpi);
aom_free(cpi->tile_data);
cpi->allocated_tiles = 0;
enc_row_mt->allocated_tile_cols = 0;
enc_row_mt->allocated_tile_rows = 0;
CHECK_MEM_ERROR(
cm, cpi->tile_data,
aom_memalign(32, tile_cols * tile_rows * sizeof (*cpi->tile_data)));
cpi->allocated_tiles = tile_cols * tile_rows;
enc_row_mt->allocated_tile_cols = tile_cols;
enc_row_mt->allocated_tile_rows = tile_rows;
for (int tile_row = 0; tile_row < tile_rows; ++tile_row) {
for (int tile_col = 0; tile_col < tile_cols; ++tile_col) {
const int tile_index = tile_row * tile_cols + tile_col;
TileDataEnc *const this_tile = &cpi->tile_data[tile_index];
av1_zero(this_tile->row_mt_sync);
this_tile->row_ctx = NULL;
}
}
}
void av1_init_tile_data(AV1_COMP *cpi) {
AV1_COMMON *const cm = &cpi->common;
const int num_planes = av1_num_planes(cm);
const int tile_cols = cm->tiles.cols;
const int tile_rows = cm->tiles.rows;
int tile_col, tile_row;
TokenInfo *const token_info = &cpi->token_info;
TokenExtra *pre_tok = token_info->tile_tok[0][0];
TokenList *tplist = token_info->tplist[0][0];
unsigned int tile_tok = 0;
int tplist_count = 0;
if (!is_stat_generation_stage(cpi) &&
cm->features.allow_screen_content_tools) {
// Number of tokens for which token info needs to be allocated.
unsigned int tokens_required =
get_token_alloc(cm->mi_params.mb_rows, cm->mi_params.mb_cols,
MAX_SB_SIZE_LOG2, num_planes);
// Allocate/reallocate memory for token related info if the number of tokens
// required is more than the number of tokens already allocated. This could
// occur in case of the following:
// 1) If the memory is not yet allocated
// 2) If the frame dimensions have changed
const bool realloc_tokens = tokens_required > token_info->tokens_allocated;
if (realloc_tokens) {
free_token_info(token_info);
alloc_token_info(cm, token_info, tokens_required);
pre_tok = token_info->tile_tok[0][0];
tplist = token_info->tplist[0][0];
}
}
for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
TileDataEnc *const tile_data =
&cpi->tile_data[tile_row * tile_cols + tile_col];
TileInfo *const tile_info = &tile_data->tile_info;
av1_tile_init(tile_info, cm, tile_row, tile_col);
tile_data->firstpass_top_mv = kZeroMv;
tile_data->abs_sum_level = 0;
if (is_token_info_allocated(token_info)) {
token_info->tile_tok[tile_row][tile_col] = pre_tok + tile_tok;
pre_tok = token_info->tile_tok[tile_row][tile_col];
tile_tok = allocated_tokens(
tile_info, cm->seq_params->mib_size_log2 + MI_SIZE_LOG2,
num_planes);
token_info->tplist[tile_row][tile_col] = tplist + tplist_count;
tplist = token_info->tplist[tile_row][tile_col];
tplist_count = av1_get_sb_rows_in_tile(cm, tile_info);
}
tile_data->allow_update_cdf = !cm->tiles.large_scale;
tile_data->allow_update_cdf = tile_data->allow_update_cdf &&
!cm->features.disable_cdf_update &&
!delay_wait_for_top_right_sb(cpi);
tile_data->tctx = *cm->fc;
}
}
}
// Populate the start palette token info prior to encoding an SB row.
static inline void get_token_start(AV1_COMP *cpi, const TileInfo *tile_info,
int tile_row, int tile_col, int mi_row,
TokenExtra **tp) {
const TokenInfo *token_info = &cpi->token_info;
if (!is_token_info_allocated(token_info)) return ;
const AV1_COMMON *cm = &cpi->common;
const int num_planes = av1_num_planes(cm);
TokenList *const tplist = cpi->token_info.tplist[tile_row][tile_col];
const int sb_row_in_tile =
(mi_row - tile_info->mi_row_start) >> cm->seq_params->mib_size_log2;
get_start_tok(cpi, tile_row, tile_col, mi_row, tp,
cm->seq_params->mib_size_log2 + MI_SIZE_LOG2, num_planes);
assert(tplist != NULL);
tplist[sb_row_in_tile].start = *tp;
}
// Populate the token count after encoding an SB row.
static inline void populate_token_count(AV1_COMP *cpi,
const TileInfo *tile_info, int tile_row,
int tile_col, int mi_row,
TokenExtra *tok) {
const TokenInfo *token_info = &cpi->token_info;
if (!is_token_info_allocated(token_info)) return ;
const AV1_COMMON *cm = &cpi->common;
const int num_planes = av1_num_planes(cm);
TokenList *const tplist = token_info->tplist[tile_row][tile_col];
const int sb_row_in_tile =
(mi_row - tile_info->mi_row_start) >> cm->seq_params->mib_size_log2;
const int tile_mb_cols =
(tile_info->mi_col_end - tile_info->mi_col_start + 2) >> 2;
const int num_mb_rows_in_sb =
((1 << (cm->seq_params->mib_size_log2 + MI_SIZE_LOG2)) + 8) >> 4;
tplist[sb_row_in_tile].count =
(unsigned int )(tok - tplist[sb_row_in_tile].start);
assert((unsigned int )(tok - tplist[sb_row_in_tile].start) <=
get_token_alloc(num_mb_rows_in_sb, tile_mb_cols,
cm->seq_params->mib_size_log2 + MI_SIZE_LOG2,
num_planes));
(void )num_planes;
(void )tile_mb_cols;
(void )num_mb_rows_in_sb;
}
/*!\brief Encode a superblock row
*
* \ingroup partition_search
*/
void av1_encode_sb_row(AV1_COMP *cpi, ThreadData *td, int tile_row,
int tile_col, int mi_row) {
AV1_COMMON *const cm = &cpi->common;
const int tile_cols = cm->tiles.cols;
TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
const TileInfo *const tile_info = &this_tile->tile_info;
TokenExtra *tok = NULL;
get_token_start(cpi, tile_info, tile_row, tile_col, mi_row, &tok);
encode_sb_row(cpi, td, this_tile, mi_row, &tok);
populate_token_count(cpi, tile_info, tile_row, tile_col, mi_row, tok);
}
/*!\brief Encode a tile
*
* \ingroup partition_search
*/
void av1_encode_tile(AV1_COMP *cpi, ThreadData *td, int tile_row,
int tile_col) {
AV1_COMMON *const cm = &cpi->common;
TileDataEnc *const this_tile =
&cpi->tile_data[tile_row * cm->tiles.cols + tile_col];
const TileInfo *const tile_info = &this_tile->tile_info;
if (!cpi->sf.rt_sf.use_nonrd_pick_mode) av1_inter_mode_data_init(this_tile);
av1_zero_above_context(cm, &td->mb.e_mbd, tile_info->mi_col_start,
tile_info->mi_col_end, tile_row);
av1_init_above_context(&cm->above_contexts, av1_num_planes(cm), tile_row,
&td->mb.e_mbd);
#if !CONFIG_REALTIME_ONLY
if (cpi->oxcf.intra_mode_cfg.enable_cfl_intra)
cfl_init(&td->mb.e_mbd.cfl, cm->seq_params);
#endif
if (td->mb.txfm_search_info.mb_rd_record != NULL) {
av1_crc32c_calculator_init(
&td->mb.txfm_search_info.mb_rd_record->crc_calculator);
}
for (int mi_row = tile_info->mi_row_start; mi_row < tile_info->mi_row_end;
mi_row += cm->seq_params->mib_size) {
av1_encode_sb_row(cpi, td, tile_row, tile_col, mi_row);
}
this_tile->abs_sum_level = td->abs_sum_level;
}
/*!\brief Break one frame into tiles and encode the tiles
*
* \ingroup partition_search
*
* \param[in] cpi Top-level encoder structure
*/
static inline void encode_tiles(AV1_COMP *cpi) {
AV1_COMMON *const cm = &cpi->common;
const int tile_cols = cm->tiles.cols;
const int tile_rows = cm->tiles.rows;
int tile_col, tile_row;
MACROBLOCK *const mb = &cpi->td.mb;
assert(IMPLIES(cpi->tile_data == NULL,
cpi->allocated_tiles < tile_cols * tile_rows));
if (cpi->allocated_tiles < tile_cols * tile_rows) av1_alloc_tile_data(cpi);
av1_init_tile_data(cpi);
av1_alloc_mb_data(cpi, mb);
for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
TileDataEnc *const this_tile =
&cpi->tile_data[tile_row * cm->tiles.cols + tile_col];
cpi->td.intrabc_used = 0;
cpi->td.deltaq_used = 0;
cpi->td.abs_sum_level = 0;
cpi->td.rd_counts.seg_tmp_pred_cost[0] = 0;
cpi->td.rd_counts.seg_tmp_pred_cost[1] = 0;
cpi->td.mb.e_mbd.tile_ctx = &this_tile->tctx;
cpi->td.mb.tile_pb_ctx = &this_tile->tctx;
av1_init_rtc_counters(&cpi->td.mb);
cpi->td.mb.palette_pixels = 0;
av1_encode_tile(cpi, &cpi->td, tile_row, tile_col);
if (!frame_is_intra_only(&cpi->common))
av1_accumulate_rtc_counters(cpi, &cpi->td.mb);
cpi->palette_pixel_num += cpi->td.mb.palette_pixels;
cpi->intrabc_used |= cpi->td.intrabc_used;
cpi->deltaq_used |= cpi->td.deltaq_used;
}
}
av1_dealloc_mb_data(mb, av1_num_planes(cm));
}
// Set the relative distance of a reference frame w.r.t. current frame
static inline void set_rel_frame_dist(
const AV1_COMMON *const cm, RefFrameDistanceInfo *const ref_frame_dist_info,
const int ref_frame_flags) {
MV_REFERENCE_FRAME ref_frame;
int min_past_dist = INT32_MAX, min_future_dist = INT32_MAX;
ref_frame_dist_info->nearest_past_ref = NONE_FRAME;
ref_frame_dist_info->nearest_future_ref = NONE_FRAME;
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
ref_frame_dist_info->ref_relative_dist[ref_frame - LAST_FRAME] = 0;
if (ref_frame_flags & av1_ref_frame_flag_list[ref_frame]) {
int dist = av1_encoder_get_relative_dist(
cm->cur_frame->ref_display_order_hint[ref_frame - LAST_FRAME],
cm->current_frame.display_order_hint);
ref_frame_dist_info->ref_relative_dist[ref_frame - LAST_FRAME] = dist;
// Get the nearest ref_frame in the past
if (abs(dist) < min_past_dist && dist < 0) {
ref_frame_dist_info->nearest_past_ref = ref_frame;
min_past_dist = abs(dist);
}
// Get the nearest ref_frame in the future
if (dist < min_future_dist && dist > 0) {
ref_frame_dist_info->nearest_future_ref = ref_frame;
min_future_dist = dist;
}
}
}
}
static inline int refs_are_one_sided(const AV1_COMMON *cm) {
assert(!frame_is_intra_only(cm));
int one_sided_refs = 1;
const int cur_display_order_hint = cm->current_frame.display_order_hint;
for (int ref = LAST_FRAME; ref <= ALTREF_FRAME; ++ref) {
const RefCntBuffer *const buf = get_ref_frame_buf(cm, ref);
if (buf == NULL) continue ;
if (av1_encoder_get_relative_dist(buf->display_order_hint,
cur_display_order_hint) > 0) {
one_sided_refs = 0; // bwd reference
break ;
}
}
return one_sided_refs;
}
static inline void get_skip_mode_ref_offsets(const AV1_COMMON *cm,
int ref_order_hint[2]) {
const SkipModeInfo *const skip_mode_info = &cm->current_frame.skip_mode_info;
ref_order_hint[0] = ref_order_hint[1] = 0;
if (!skip_mode_info->skip_mode_allowed) return ;
const RefCntBuffer *const buf_0 =
get_ref_frame_buf(cm, LAST_FRAME + skip_mode_info->ref_frame_idx_0);
const RefCntBuffer *const buf_1 =
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5 C=95 H=93 G=93
¤ Dauer der Verarbeitung: 0.20 Sekunden
(vorverarbeitet)
¤
*© Formatika GbR, Deutschland