/* * Copyright (c) 2016, Alliance for Open Media. All rights reserved. * * This source code is subject to the terms of the BSD 2 Clause License and * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License * was not distributed with this source code in the LICENSE file, you can * obtain it at www.aomedia.org/license/software. If the Alliance for Open * Media Patent License 1.0 was not distributed with this source code in the * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
void av1_row_mt_sync_write(AV1EncRowMultiThreadSync *row_mt_sync, int r, int c, int cols) { #if CONFIG_MULTITHREAD constint nsync = row_mt_sync->sync_range; int cur; // Only signal when there are enough encoded blocks for next row to run. int sig = 1;
if (c < cols - 1) {
cur = c; if (c % nsync) sig = 0;
} else {
cur = cols + nsync + row_mt_sync->intrabc_extra_top_right_sb_delay;
}
if (sig) {
pthread_mutex_lock(&row_mt_sync->mutex_[r]);
// When a thread encounters an error, num_finished_cols[r] is set to maximum // column number. In this case, the AOMMAX operation here ensures that // num_finished_cols[r] is not overwritten with a smaller value thus // preventing the infinite waiting of threads in the relevant sync_read() // function.
row_mt_sync->num_finished_cols[r] =
AOMMAX(row_mt_sync->num_finished_cols[r], cur);
row_mt_sync->rows = rows; // Set up nsync.
row_mt_sync->sync_range = 1;
}
// Deallocate row based multi-threading synchronization related mutex and data void av1_row_mt_sync_mem_dealloc(AV1EncRowMultiThreadSync *row_mt_sync) { if (row_mt_sync != NULL) { #if CONFIG_MULTITHREAD int i;
if (row_mt_sync->mutex_ != NULL) { for (i = 0; i < row_mt_sync->rows; ++i) {
pthread_mutex_destroy(&row_mt_sync->mutex_[i]);
}
aom_free(row_mt_sync->mutex_);
} if (row_mt_sync->cond_ != NULL) { for (i = 0; i < row_mt_sync->rows; ++i) {
pthread_cond_destroy(&row_mt_sync->cond_[i]);
}
aom_free(row_mt_sync->cond_);
} #endif// CONFIG_MULTITHREAD
aom_free(row_mt_sync->num_finished_cols);
// clear the structure as the source of this call may be dynamic change // in tiles in which case this call will be followed by an _alloc() // which may fail.
av1_zero(*row_mt_sync);
}
}
#if CONFIG_REALTIME_ONLY int num_b_rows_in_tile =
av1_get_sb_rows_in_tile(cm, &this_tile->tile_info); int num_b_cols_in_tile =
av1_get_sb_cols_in_tile(cm, &this_tile->tile_info); #else int num_b_rows_in_tile =
is_firstpass
? av1_get_unit_rows_in_tile(&this_tile->tile_info, fp_block_size)
: av1_get_sb_rows_in_tile(cm, &this_tile->tile_info); int num_b_cols_in_tile =
is_firstpass
? av1_get_unit_cols_in_tile(&this_tile->tile_info, fp_block_size)
: av1_get_sb_cols_in_tile(cm, &this_tile->tile_info); #endif int theoretical_limit_on_threads =
AOMMIN((num_b_cols_in_tile + 1) >> 1, num_b_rows_in_tile); int num_threads_working = row_mt_sync->num_threads_working;
if (num_threads_working < theoretical_limit_on_threads) { int num_mis_to_encode =
this_tile->tile_info.mi_row_end - row_mt_sync->next_mi_row;
// Tile to be processed by this thread is selected on the basis of // availability of jobs: // 1) If jobs are available, tile to be processed is chosen on the // basis of minimum number of threads working for that tile. If two or // more tiles have same number of threads working for them, then the // tile with maximum number of jobs available will be chosen. // 2) If no jobs are available, then end_of_frame is reached. if (num_mis_to_encode > 0) { if (num_threads_working < min_num_threads_working) {
min_num_threads_working = num_threads_working;
max_mis_to_encode = 0;
} if (num_threads_working == min_num_threads_working &&
num_mis_to_encode > max_mis_to_encode) {
tile_id = tile_index;
max_mis_to_encode = num_mis_to_encode;
}
}
}
}
} if (tile_id == -1) {
*end_of_frame = 1;
} else { // Update the current tile id to the tile id that will be processed next, // which will be the least processed tile.
*cur_tile_id = tile_id; constint unit_height = mi_size_high[fp_block_size];
get_next_job(&tile_data[tile_id], current_mi_row,
is_firstpass ? unit_height : cm->seq_params->mib_size);
}
}
// In case of multithreading of firstpass encode, due to top-right // dependency, the worker on a firstpass row waits for the completion of the // firstpass processing of the top and top-right fp_blocks. Hence, in case a // thread (main/worker) encounters an error, update the firstpass processing // of every row in the frame to indicate that it is complete in order to avoid // dependent workers waiting indefinitely. for (int tile_row = 0; tile_row < tile_rows; ++tile_row) { for (int tile_col = 0; tile_col < tile_cols; ++tile_col) {
TileDataEnc *const tile_data =
&cpi->tile_data[tile_row * tile_cols + tile_col];
TileInfo *tile = &tile_data->tile_info;
AV1EncRowMultiThreadSync *const row_mt_sync = &tile_data->row_mt_sync; constint unit_cols_in_tile =
av1_get_unit_cols_in_tile(tile, fp_block_size); for (int mi_row = tile->mi_row_start, unit_row_in_tile = 0;
mi_row < tile->mi_row_end;
mi_row += unit_height, unit_row_in_tile++) {
enc_row_mt->sync_write_ptr(row_mt_sync, unit_row_in_tile,
unit_cols_in_tile - 1, unit_cols_in_tile);
}
}
}
}
// The jmp_buf is valid only for the duration of the function that calls // setjmp(). Therefore, this function must reset the 'setjmp' field to 0 // before it returns. if (setjmp(error_info->jmp)) {
error_info->setjmp = 0; #if CONFIG_MULTITHREAD
pthread_mutex_lock(enc_row_mt_mutex_);
enc_row_mt->firstpass_mt_exit = true;
pthread_mutex_unlock(enc_row_mt_mutex_); #endif
set_firstpass_encode_done(cpi); return 0;
}
error_info->setjmp = 1;
AV1_COMMON *const cm = &cpi->common; int cur_tile_id = enc_row_mt->thread_id_to_tile_id[thread_id];
assert(cur_tile_id != -1);
const BLOCK_SIZE fp_block_size = cpi->fp_block_size; constint unit_height = mi_size_high[fp_block_size]; int end_of_frame = 0; while (1) { int current_mi_row = -1; #if CONFIG_MULTITHREAD
pthread_mutex_lock(enc_row_mt_mutex_); #endif bool firstpass_mt_exit = enc_row_mt->firstpass_mt_exit; if (!firstpass_mt_exit && !get_next_job(&cpi->tile_data[cur_tile_id],
¤t_mi_row, unit_height)) { // No jobs are available for the current tile. Query for the status of // other tiles and get the next job if available
switch_tile_and_get_next_job(cm, cpi->tile_data, &cur_tile_id,
¤t_mi_row, &end_of_frame, 1,
fp_block_size);
} #if CONFIG_MULTITHREAD
pthread_mutex_unlock(enc_row_mt_mutex_); #endif // When firstpass_mt_exit is set to true, other workers need not pursue any // further jobs. if (firstpass_mt_exit || end_of_frame) break;
// In case of row-multithreading, due to top-right dependency, the worker on // an SB row waits for the completion of the encode of the top and top-right // SBs. Hence, in case a thread (main/worker) encounters an error, update that // encoding of every SB row in the frame is complete in order to avoid the // dependent workers of every tile from waiting indefinitely. for (int tile_row = 0; tile_row < tile_rows; tile_row++) { for (int tile_col = 0; tile_col < tile_cols; tile_col++) {
TileDataEnc *const this_tile =
&cpi->tile_data[tile_row * tile_cols + tile_col]; const TileInfo *const tile_info = &this_tile->tile_info;
AV1EncRowMultiThreadSync *const row_mt_sync = &this_tile->row_mt_sync; constint sb_cols_in_tile = av1_get_sb_cols_in_tile(cm, tile_info); for (int mi_row = tile_info->mi_row_start, sb_row_in_tile = 0;
mi_row < tile_info->mi_row_end;
mi_row += mib_size, sb_row_in_tile++) {
enc_row_mt->sync_write_ptr(row_mt_sync, sb_row_in_tile,
sb_cols_in_tile - 1, sb_cols_in_tile);
}
}
}
}
// The jmp_buf is valid only for the duration of the function that calls // setjmp(). Therefore, this function must reset the 'setjmp' field to 0 // before it returns. if (setjmp(error_info->jmp)) {
error_info->setjmp = 0; #if CONFIG_MULTITHREAD
pthread_mutex_lock(enc_row_mt_mutex_);
enc_row_mt->row_mt_exit = true; // Wake up all the workers waiting in launch_loop_filter_rows() to exit in // case of an error.
pthread_cond_broadcast(enc_row_mt->cond_);
pthread_mutex_unlock(enc_row_mt_mutex_); #endif
set_encoding_done(cpi);
constint mib_size_log2 = cm->seq_params->mib_size_log2; int cur_tile_id = enc_row_mt->thread_id_to_tile_id[thread_id];
// Preallocate the pc_tree for realtime coding to reduce the cost of memory // allocation. if (cpi->sf.rt_sf.use_nonrd_pick_mode) {
thread_data->td->pc_root = av1_alloc_pc_tree_node(cm->seq_params->sb_size); if (!thread_data->td->pc_root)
aom_internal_error(xd->error_info, AOM_CODEC_MEM_ERROR, "Failed to allocate PC_TREE");
} else {
thread_data->td->pc_root = NULL;
}
// When master thread does not have a valid job to process, xd->tile_ctx // is not set and it contains NULL pointer. This can result in NULL pointer // access violation if accessed beyond the encode stage. Hence, updating // thread_data->td->mb.e_mbd.tile_ctx is initialized with common frame // context to avoid NULL pointer access in subsequent stages.
thread_data->td->mb.e_mbd.tile_ctx = cm->fc; while (1) { int current_mi_row = -1; #if CONFIG_MULTITHREAD
pthread_mutex_lock(enc_row_mt_mutex_); #endif
row_mt_exit = enc_row_mt->row_mt_exit; // row_mt_exit check here can be avoided as it is checked after // sync_read_ptr() in encode_sb_row(). However, checking row_mt_exit here, // tries to return before calling the function get_next_job(). if (!row_mt_exit &&
!get_next_job(&cpi->tile_data[cur_tile_id], ¤t_mi_row,
cm->seq_params->mib_size)) { // No jobs are available for the current tile. Query for the status of // other tiles and get the next job if available
switch_tile_and_get_next_job(cm, cpi->tile_data, &cur_tile_id,
¤t_mi_row, &end_of_frame, 0,
fp_block_size);
} #if CONFIG_MULTITHREAD
pthread_mutex_unlock(enc_row_mt_mutex_); #endif // When row_mt_exit is set to true, other workers need not pursue any // further jobs. if (row_mt_exit) {
error_info->setjmp = 0; return 1;
}
// The jmp_buf is valid only for the duration of the function that calls // setjmp(). Therefore, this function must reset the 'setjmp' field to 0 // before it returns. if (setjmp(error_info->jmp)) {
error_info->setjmp = 0; return 0;
}
error_info->setjmp = 1;
// Preallocate the pc_tree for realtime coding to reduce the cost of memory // allocation. if (cpi->sf.rt_sf.use_nonrd_pick_mode) {
thread_data->td->pc_root = av1_alloc_pc_tree_node(cm->seq_params->sb_size); if (!thread_data->td->pc_root)
aom_internal_error(xd->error_info, AOM_CODEC_MEM_ERROR, "Failed to allocate PC_TREE");
} else {
thread_data->td->pc_root = NULL;
}
for (t = thread_data->start; t < tile_rows * tile_cols;
t += cpi->mt_info.num_workers) { int tile_row = t / tile_cols; int tile_col = t % tile_cols;
void av1_init_frame_mt(AV1_PRIMARY *ppi, AV1_COMP *cpi) {
cpi->mt_info.workers = ppi->p_mt_info.workers;
cpi->mt_info.num_workers = ppi->p_mt_info.num_workers;
cpi->mt_info.tile_thr_data = ppi->p_mt_info.tile_thr_data; int i; for (i = MOD_FP; i < NUM_MT_MODULES; i++) {
cpi->mt_info.num_mod_workers[i] =
AOMMIN(cpi->mt_info.num_workers, ppi->p_mt_info.num_mod_workers[i]);
}
}
void av1_init_cdef_worker(AV1_COMP *cpi) { // The allocation is done only for level 0 parallel frames. No change // in config is supported in the middle of a parallel encode set, since the // rest of the MT modules also do not support dynamic change of config. if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) return;
PrimaryMultiThreadInfo *const p_mt_info = &cpi->ppi->p_mt_info; int num_cdef_workers = av1_get_num_mod_workers_for_alloc(p_mt_info, MOD_CDEF);
// Computes the number of workers to be considered while allocating memory for a // multi-threaded module under FPMT. int av1_get_num_mod_workers_for_alloc(const PrimaryMultiThreadInfo *p_mt_info,
MULTI_THREADED_MODULES mod_name) { int num_mod_workers = p_mt_info->num_mod_workers[mod_name]; if (p_mt_info->num_mod_workers[MOD_FRAME_ENC] > 1) { // TODO(anyone): Change num_mod_workers to num_mod_workers[MOD_FRAME_ENC]. // As frame parallel jobs will only perform multi-threading for the encode // stage, we can limit the allocations according to num_enc_workers per // frame parallel encode(a.k.a num_mod_workers[MOD_FRAME_ENC]).
num_mod_workers = p_mt_info->num_workers;
} return num_mod_workers;
}
// Set up shared coeff buffers.
av1_setup_shared_coeff_buffer(&ppi->seq_params, &td->shared_coeff_buf,
&ppi->error);
AOM_CHECK_MEM_ERROR(&ppi->error, td->tmp_conv_dst,
aom_memalign(32, MAX_SB_SIZE * MAX_SB_SIZE * sizeof(*td->tmp_conv_dst)));
if (i < p_mt_info->num_mod_workers[MOD_FP]) { // Set up firstpass PICK_MODE_CONTEXT.
td->firstpass_ctx =
av1_alloc_pmc(ppi->cpi, BLOCK_16X16, &td->shared_coeff_buf); if (!td->firstpass_ctx)
aom_internal_error(&ppi->error, AOM_CODEC_MEM_ERROR, "Failed to allocate PICK_MODE_CONTEXT");
}
if (!is_first_pass && i < num_enc_workers) { // Set up sms_tree. if (av1_setup_sms_tree(ppi->cpi, td)) {
aom_internal_error(&ppi->error, AOM_CODEC_MEM_ERROR, "Failed to allocate SMS tree");
}
for (int x = 0; x < 2; x++) for (int y = 0; y < 2; y++)
AOM_CHECK_MEM_ERROR(
&ppi->error, td->hash_value_buffer[x][y],
(uint32_t *)aom_malloc(AOM_BUFFER_SIZE_FOR_BLOCK_HASH * sizeof(*td->hash_value_buffer[0][0])));
// Allocate buffers used by palette coding mode.
AOM_CHECK_MEM_ERROR(&ppi->error, td->palette_buffer,
aom_memalign(16, sizeof(*td->palette_buffer)));
// The buffers 'tmp_pred_bufs[]', 'comp_rd_buffer' and 'obmc_buffer' are // used in inter frames to store intermediate inter mode prediction // results and are not required for allintra encoding mode. Hence, the // memory allocations for these buffers are avoided for allintra // encoding mode. if (ppi->cpi->oxcf.kf_cfg.key_freq_max != 0) {
alloc_obmc_buffers(&td->obmc_buffer, &ppi->error);
thread_data->thread_id = i; // Set the starting tile for each thread.
thread_data->start = i;
if (i > 0) { // Create threads if (!winterface->reset(worker))
aom_internal_error(&ppi->error, AOM_CODEC_ERROR, "Tile encoder thread creation failed");
}
winterface->sync(worker);
++p_mt_info->num_workers;
}
}
// This function will change the state and free the mutex of corresponding // workers and terminate the object. The object can not be re-used unless a call // to reset() is made. void av1_terminate_workers(AV1_PRIMARY *ppi) {
PrimaryMultiThreadInfo *const p_mt_info = &ppi->p_mt_info; for (int t = 0; t < p_mt_info->num_workers; ++t) {
AVxWorker *const worker = &p_mt_info->workers[t];
aom_get_worker_interface()->end(worker);
}
}
// This function returns 1 if frame parallel encode is supported for // the current configuration. Returns 0 otherwise. staticinlineint is_fpmt_config(const AV1_PRIMARY *ppi, const AV1EncoderConfig *oxcf) { // FPMT is enabled for AOM_Q and AOM_VBR. // TODO(Tarun): Test and enable resize config. if (oxcf->rc_cfg.mode == AOM_CBR || oxcf->rc_cfg.mode == AOM_CQ) { return 0;
} if (ppi->use_svc) { return 0;
} if (oxcf->tile_cfg.enable_large_scale_tile) { return 0;
} if (oxcf->dec_model_cfg.timing_info_present) { return 0;
} if (oxcf->mode != GOOD) { return 0;
} if (oxcf->tool_cfg.error_resilient_mode) { return 0;
} if (oxcf->resize_cfg.resize_mode) { return 0;
} if (oxcf->pass != AOM_RC_SECOND_PASS) { return 0;
} if (oxcf->max_threads < 2) { return 0;
} if (!oxcf->fp_mt) { return 0;
}
return 1;
}
int av1_check_fpmt_config(AV1_PRIMARY *const ppi, const AV1EncoderConfig *const oxcf) { if (is_fpmt_config(ppi, oxcf)) return 1; // Reset frame parallel configuration for unsupported config if (ppi->num_fp_contexts > 1) { for (int i = 1; i < ppi->num_fp_contexts; i++) { // Release the previously-used frame-buffer if (ppi->parallel_cpi[i]->common.cur_frame != NULL) {
--ppi->parallel_cpi[i]->common.cur_frame->ref_count;
ppi->parallel_cpi[i]->common.cur_frame = NULL;
}
}
// A large value for threads used to compute the max num_enc_workers // possible for each resolution. #define MAX_THREADS 100
// Computes the max number of enc workers possible for each resolution. staticinlineint compute_max_num_enc_workers(
CommonModeInfoParams *const mi_params, int mib_size_log2) { int num_sb_rows = CEIL_POWER_OF_TWO(mi_params->mi_rows, mib_size_log2); int num_sb_cols = CEIL_POWER_OF_TWO(mi_params->mi_cols, mib_size_log2);
// Computes the number of frame parallel(fp) contexts to be created // based on the number of max_enc_workers. int av1_compute_num_fp_contexts(AV1_PRIMARY *ppi, AV1EncoderConfig *oxcf) {
ppi->p_mt_info.num_mod_workers[MOD_FRAME_ENC] = 0; if (!av1_check_fpmt_config(ppi, oxcf)) { return 1;
} int max_num_enc_workers = compute_max_num_enc_workers(
&ppi->cpi->common.mi_params, ppi->cpi->common.seq_params->mib_size_log2); // Scaling factors and rounding factors used to tune worker_per_frame // computation. int rounding_factor[2] = { 2, 4 }; int scaling_factor[2] = { 4, 8 }; int is_480p_or_lesser =
AOMMIN(oxcf->frm_dim_cfg.width, oxcf->frm_dim_cfg.height) <= 480; int is_sb_64 = 0; if (ppi->cpi != NULL)
is_sb_64 = ppi->cpi->common.seq_params->sb_size == BLOCK_64X64; // A parallel frame encode has at least 1/4th the // theoretical limit of max enc workers in default case. For resolutions // larger than 480p, if SB size is 64x64, optimal performance is obtained with // limit of 1/8. int index = (!is_480p_or_lesser && is_sb_64) ? 1 : 0; int workers_per_frame =
AOMMAX(1, (max_num_enc_workers + rounding_factor[index]) /
scaling_factor[index]); int max_threads = oxcf->max_threads; int num_fp_contexts = max_threads / workers_per_frame; // Based on empirical results, FPMT gains with multi-tile are significant when // more parallel frames are available. Use FPMT with multi-tile encode only // when sufficient threads are available for parallel encode of // MAX_PARALLEL_FRAMES frames. if (oxcf->tile_cfg.tile_columns > 0 || oxcf->tile_cfg.tile_rows > 0) { if (num_fp_contexts < MAX_PARALLEL_FRAMES) num_fp_contexts = 1;
}
// Computes the number of workers to process each of the parallel frames. staticinlineint compute_num_workers_per_frame( constint num_workers, constint parallel_frame_count) { // Number of level 2 workers per frame context (floor division). int workers_per_frame = (num_workers / parallel_frame_count); return workers_per_frame;
}
staticinlinevoid restore_workers_after_fpmt(AV1_PRIMARY *ppi, int parallel_frame_count, int num_fpmt_workers_prepared);
// Prepare level 1 workers. This function is only called for // parallel_frame_count > 1. This function populates the mt_info structure of // frame level contexts appropriately by dividing the total number of available // workers amongst the frames as level 2 workers. It also populates the hook and // data members of level 1 workers. staticinlinevoid prepare_fpmt_workers(AV1_PRIMARY *ppi,
AV1_COMP_DATA *first_cpi_data,
AVxWorkerHook hook, int parallel_frame_count) {
assert(parallel_frame_count <= ppi->num_fp_contexts &&
parallel_frame_count > 1);
PrimaryMultiThreadInfo *const p_mt_info = &ppi->p_mt_info; int num_workers = p_mt_info->num_workers;
volatileint frame_idx = 0; volatileint i = 0; while (i < num_workers) { // Assign level 1 worker
AVxWorker *frame_worker = p_mt_info->p_workers[frame_idx] =
&p_mt_info->workers[i];
AV1_COMP *cur_cpi = ppi->parallel_cpi[frame_idx];
MultiThreadInfo *mt_info = &cur_cpi->mt_info; // This 'aom_internal_error_info' pointer is not derived from the local // pointer ('AV1_COMMON *const cm') to silence the compiler warning // "variable 'cm' might be clobbered by 'longjmp' or 'vfork' [-Wclobbered]". struct aom_internal_error_info *const error = cur_cpi->common.error;
// The jmp_buf is valid only within the scope of the function that calls // setjmp(). Therefore, this function must reset the 'setjmp' field to 0 // before it returns. if (setjmp(error->jmp)) {
error->setjmp = 0;
restore_workers_after_fpmt(ppi, parallel_frame_count, i);
aom_internal_error_copy(&ppi->error, error);
}
error->setjmp = 1;
AV1_COMMON *const cm = &cur_cpi->common; // Assign start of level 2 worker pool
mt_info->workers = &p_mt_info->workers[i];
mt_info->tile_thr_data = &p_mt_info->tile_thr_data[i]; // Assign number of workers for each frame in the parallel encode set.
mt_info->num_workers = compute_num_workers_per_frame(
num_workers - i, parallel_frame_count - frame_idx); for (int j = MOD_FP; j < NUM_MT_MODULES; j++) {
mt_info->num_mod_workers[j] =
AOMMIN(mt_info->num_workers, p_mt_info->num_mod_workers[j]);
} if (p_mt_info->cdef_worker != NULL) {
mt_info->cdef_worker = &p_mt_info->cdef_worker[i];
// Back up the original cdef_worker pointers.
mt_info->restore_state_buf.cdef_srcbuf = mt_info->cdef_worker->srcbuf; constint num_planes = av1_num_planes(cm); for (int plane = 0; plane < num_planes; plane++)
mt_info->restore_state_buf.cdef_colbuf[plane] =
mt_info->cdef_worker->colbuf[plane];
} #if !CONFIG_REALTIME_ONLY if (is_restoration_used(cm)) { // Back up the original LR buffers before update. int idx = i + mt_info->num_workers - 1;
assert(idx < mt_info->lr_row_sync.num_workers);
mt_info->restore_state_buf.rst_tmpbuf =
mt_info->lr_row_sync.lrworkerdata[idx].rst_tmpbuf;
mt_info->restore_state_buf.rlbs =
mt_info->lr_row_sync.lrworkerdata[idx].rlbs;
// At this stage, the thread specific CDEF buffers for the current frame's // 'common' and 'cdef_sync' only need to be allocated. 'cdef_worker' has // already been allocated across parallel frames.
av1_alloc_cdef_buffers(cm, &p_mt_info->cdef_worker, &mt_info->cdef_sync,
p_mt_info->num_workers, 0);
int frame_idx = 0; int i = 0; while (i < num_fpmt_workers_prepared) {
AV1_COMP *cur_cpi = ppi->parallel_cpi[frame_idx];
MultiThreadInfo *mt_info = &cur_cpi->mt_info; const AV1_COMMON *const cm = &cur_cpi->common; constint num_planes = av1_num_planes(cm);
// Restore the original cdef_worker pointers. if (p_mt_info->cdef_worker != NULL) {
mt_info->cdef_worker->srcbuf = mt_info->restore_state_buf.cdef_srcbuf; for (int plane = 0; plane < num_planes; plane++)
mt_info->cdef_worker->colbuf[plane] =
mt_info->restore_state_buf.cdef_colbuf[plane];
} #if !CONFIG_REALTIME_ONLY if (is_restoration_used(cm)) { // Restore the original LR buffers. int idx = i + mt_info->num_workers - 1;
assert(idx < mt_info->lr_row_sync.num_workers);
mt_info->lr_row_sync.lrworkerdata[idx].rst_tmpbuf =
mt_info->restore_state_buf.rst_tmpbuf;
mt_info->lr_row_sync.lrworkerdata[idx].rlbs =
mt_info->restore_state_buf.rlbs;
} #endif
frame_idx++;
i += mt_info->num_workers;
}
}
// Synchronize level 1 workers. staticinlinevoid sync_fpmt_workers(AV1_PRIMARY *ppi, int frames_in_parallel_set) { const AVxWorkerInterface *const winterface = aom_get_worker_interface(); int num_workers = ppi->p_mt_info.p_num_workers; int had_error = 0; // Points to error in the earliest display order frame in the parallel set. conststruct aom_internal_error_info *error = NULL;
// Encoding ends. for (int i = num_workers - 1; i >= 0; --i) {
AVxWorker *const worker = ppi->p_mt_info.p_workers[i]; if (!winterface->sync(worker)) {
had_error = 1;
error = ppi->parallel_cpi[i]->common.error;
}
}
// AOM_CODEC_OK(0) means no error. return !status;
}
// This function encodes the raw frame data for each frame in parallel encode // set, and outputs the frame bit stream to the designated buffers. void av1_compress_parallel_frames(AV1_PRIMARY *const ppi,
AV1_COMP_DATA *const first_cpi_data) { // Bitmask for the frame buffers referenced by cpi->scaled_ref_buf // corresponding to frames in the current parallel encode set. int ref_buffers_used_map = 0; int frames_in_parallel_set = av1_init_parallel_frame_context(
first_cpi_data, ppi, &ref_buffers_used_map);
prepare_fpmt_workers(ppi, first_cpi_data, get_compressed_data_hook,
frames_in_parallel_set);
launch_fpmt_workers(ppi);
sync_fpmt_workers(ppi, frames_in_parallel_set);
// Release cpi->scaled_ref_buf corresponding to frames in the current parallel // encode set. for (int i = 0; i < frames_in_parallel_set; ++i) {
av1_release_scaled_references_fpmt(ppi->parallel_cpi[i]);
}
av1_decrement_ref_counts_fpmt(ppi->cpi->common.buffer_pool,
ref_buffers_used_map);
}
staticinlinevoid launch_workers(MultiThreadInfo *const mt_info, int num_workers) { const AVxWorkerInterface *const winterface = aom_get_worker_interface(); for (int i = num_workers - 1; i >= 0; i--) {
AVxWorker *const worker = &mt_info->workers[i];
worker->had_error = 0; if (i == 0)
winterface->execute(worker); else
winterface->launch(worker);
}
}
staticinlinevoid sync_enc_workers(MultiThreadInfo *const mt_info,
AV1_COMMON *const cm, int num_workers) { const AVxWorkerInterface *const winterface = aom_get_worker_interface(); const AVxWorker *const worker_main = &mt_info->workers[0]; int had_error = worker_main->had_error; struct aom_internal_error_info error_info;
// Read the error_info of main thread. if (had_error) {
error_info = ((EncWorkerData *)worker_main->data1)->error_info;
}
// Encoding ends. for (int i = num_workers - 1; i > 0; i--) {
AVxWorker *const worker = &mt_info->workers[i]; if (!winterface->sync(worker)) {
had_error = 1;
error_info = ((EncWorkerData *)worker->data1)->error_info;
}
}
if (had_error) aom_internal_error_copy(cm->error, &error_info);
// Restore xd->error_info of the main thread back to cm->error so that the // multithreaded code, when executed using a single thread, has a valid // xd->error_info.
MACROBLOCKD *const xd = &((EncWorkerData *)worker_main->data1)->td->mb.e_mbd;
xd->error_info = cm->error;
}
staticinlinevoid accumulate_counters_enc_workers(AV1_COMP *cpi, int num_workers) { for (int i = num_workers - 1; i >= 0; i--) {
AVxWorker *const worker = &cpi->mt_info.workers[i];
EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
cpi->intrabc_used |= thread_data->td->intrabc_used;
cpi->deltaq_used |= thread_data->td->deltaq_used; // Accumulate rtc counters. if (!frame_is_intra_only(&cpi->common))
av1_accumulate_rtc_counters(cpi, &thread_data->td->mb);
cpi->palette_pixel_num += thread_data->td->mb.palette_pixels; if (thread_data->td != &cpi->td) { // Keep these conditional expressions in sync with the corresponding ones // in prepare_enc_workers(). if (cpi->sf.inter_sf.mv_cost_upd_level != INTERNAL_COST_UPD_OFF) {
aom_free(thread_data->td->mv_costs_alloc);
thread_data->td->mv_costs_alloc = NULL;
} if (cpi->sf.intra_sf.dv_cost_upd_level != INTERNAL_COST_UPD_OFF) {
aom_free(thread_data->td->dv_costs_alloc);
thread_data->td->dv_costs_alloc = NULL;
}
}
av1_dealloc_mb_data(&thread_data->td->mb, av1_num_planes(&cpi->common));
// Before encoding a frame, copy the thread data from cpi. if (thread_data->td != &cpi->td) {
thread_data->td->mb = cpi->td.mb;
thread_data->td->rd_counts = cpi->td.rd_counts;
thread_data->td->mb.obmc_buffer = thread_data->td->obmc_buffer;
for (int x = 0; x < 2; x++) { for (int y = 0; y < 2; y++) {
memcpy(thread_data->td->hash_value_buffer[x][y],
cpi->td.mb.intrabc_hash_info.hash_value_buffer[x][y],
AOM_BUFFER_SIZE_FOR_BLOCK_HASH * sizeof(*thread_data->td->hash_value_buffer[0][0]));
thread_data->td->mb.intrabc_hash_info.hash_value_buffer[x][y] =
thread_data->td->hash_value_buffer[x][y];
}
} // Keep these conditional expressions in sync with the corresponding ones // in accumulate_counters_enc_workers(). if (cpi->sf.inter_sf.mv_cost_upd_level != INTERNAL_COST_UPD_OFF) {
CHECK_MEM_ERROR(
cm, thread_data->td->mv_costs_alloc,
(MvCosts *)aom_malloc(sizeof(*thread_data->td->mv_costs_alloc)));
thread_data->td->mb.mv_costs = thread_data->td->mv_costs_alloc;
memcpy(thread_data->td->mb.mv_costs, cpi->td.mb.mv_costs, sizeof(MvCosts));
} if (cpi->sf.intra_sf.dv_cost_upd_level != INTERNAL_COST_UPD_OFF) { // Reset dv_costs to NULL for worker threads when dv cost update is // enabled so that only dv_cost_upd_level needs to be checked before the // aom_free() call for the same.
thread_data->td->mb.dv_costs = NULL; if (av1_need_dv_costs(cpi)) {
CHECK_MEM_ERROR(cm, thread_data->td->dv_costs_alloc,
(IntraBCMVCosts *)aom_malloc( sizeof(*thread_data->td->dv_costs_alloc)));
thread_data->td->mb.dv_costs = thread_data->td->dv_costs_alloc;
memcpy(thread_data->td->mb.dv_costs, cpi->td.mb.dv_costs, sizeof(IntraBCMVCosts));
}
}
}
av1_alloc_mb_data(cpi, &thread_data->td->mb);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.