/* * Copyright (c) 2014 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree.
*/
staticvoid accumulate_rd_opt(ThreadData *td, ThreadData *td_t) { int i, j, k, l, m, n;
for (i = 0; i < REFERENCE_MODES; i++)
td->rd_counts.comp_pred_diff[i] += td_t->rd_counts.comp_pred_diff[i];
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
td->rd_counts.filter_diff[i] += td_t->rd_counts.filter_diff[i];
for (i = 0; i < TX_SIZES; i++) for (j = 0; j < PLANE_TYPES; j++) for (k = 0; k < REF_TYPES; k++) for (l = 0; l < COEF_BANDS; l++) for (m = 0; m < COEFF_CONTEXTS; m++) for (n = 0; n < ENTROPY_TOKENS; n++)
td->rd_counts.coef_counts[i][j][k][l][m][n] +=
td_t->rd_counts.coef_counts[i][j][k][l][m][n];
}
staticvoid create_enc_workers(VP9_COMP *cpi, int num_workers) {
VP9_COMMON *const cm = &cpi->common; const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); int i; // While using SVC, we need to allocate threads according to the highest // resolution. When row based multithreading is enabled, it is OK to // allocate more threads than the number of max tile columns. if (cpi->use_svc && !cpi->row_mt) { int max_tile_cols = get_max_tile_cols(cpi);
num_workers = VPXMIN(cpi->oxcf.max_threads, max_tile_cols);
}
assert(num_workers > 0); if (num_workers == cpi->num_workers) return;
vp9_loop_filter_dealloc(&cpi->lf_row_sync);
vp9_bitstream_encode_tiles_buffer_dealloc(cpi);
vp9_encode_free_mt_data(cpi);
// Create threads if (!winterface->reset(worker))
vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Tile encoder thread creation failed");
} else { // Main thread acts as a worker and uses the thread data in cpi.
thread_data->cpi = cpi;
thread_data->td = &cpi->td;
}
winterface->sync(worker);
}
}
staticvoid launch_enc_workers(VP9_COMP *cpi, VPxWorkerHook hook, void *data2, int num_workers) { const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); int i;
for (i = 0; i < num_workers; i++) {
VPxWorker *const worker = &cpi->workers[i];
worker->hook = hook;
worker->data1 = &cpi->tile_thr_data[i];
worker->data2 = data2;
}
// Encode a frame for (i = 0; i < num_workers; i++) {
VPxWorker *const worker = &cpi->workers[i];
EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
// Set the starting tile for each thread.
thread_data->start = i;
if (i == cpi->num_workers - 1)
winterface->execute(worker); else
winterface->launch(worker);
}
// Encoding ends. for (i = 0; i < num_workers; i++) {
VPxWorker *const worker = &cpi->workers[i];
winterface->sync(worker);
}
}
void vp9_encode_free_mt_data(struct VP9_COMP *cpi) { int t; for (t = 0; t < cpi->num_workers; ++t) {
VPxWorker *const worker = &cpi->workers[t];
EncWorkerData *const thread_data = &cpi->tile_thr_data[t];
for (i = 0; i < num_workers; i++) {
EncWorkerData *const thread_data = &cpi->tile_thr_data[i];
// Before encoding a frame, copy the thread data from cpi. if (thread_data->td != &cpi->td) {
thread_data->td->mb = cpi->td.mb;
thread_data->td->rd_counts = cpi->td.rd_counts;
} if (thread_data->td->counts != &cpi->common.counts) {
memcpy(thread_data->td->counts, &cpi->common.counts, sizeof(cpi->common.counts));
}
// Handle use_nonrd_pick_mode case. if (cpi->sf.use_nonrd_pick_mode) {
MACROBLOCK *const x = &thread_data->td->mb;
MACROBLOCKD *const xd = &x->e_mbd; struct macroblock_plane *const p = x->plane; struct macroblockd_plane *const pd = xd->plane;
PICK_MODE_CONTEXT *ctx = &thread_data->td->pc_root->none; int j;
// Deallocate row based multi-threading synchronization related mutex and data void vp9_row_mt_sync_mem_dealloc(VP9RowMTSync *row_mt_sync) { if (row_mt_sync != NULL) { #if CONFIG_MULTITHREAD int i;
if (row_mt_sync->mutex != NULL) { for (i = 0; i < row_mt_sync->rows; ++i) {
pthread_mutex_destroy(&row_mt_sync->mutex[i]);
}
vpx_free(row_mt_sync->mutex);
} if (row_mt_sync->cond != NULL) { for (i = 0; i < row_mt_sync->rows; ++i) {
pthread_cond_destroy(&row_mt_sync->cond[i]);
}
vpx_free(row_mt_sync->cond);
} #endif// CONFIG_MULTITHREAD
vpx_free(row_mt_sync->cur_col); // clear the structure as the source of this call may be dynamic change // in tiles in which case this call will be followed by an _alloc() // which may fail.
vp9_zero(*row_mt_sync);
}
}
void vp9_row_mt_sync_read(VP9RowMTSync *const row_mt_sync, int r, int c) { #if CONFIG_MULTITHREAD constint nsync = row_mt_sync->sync_range;
void vp9_row_mt_sync_read_dummy(VP9RowMTSync *const row_mt_sync, int r, int c) {
(void)row_mt_sync;
(void)r;
(void)c; return;
}
void vp9_row_mt_sync_write(VP9RowMTSync *const row_mt_sync, int r, int c, constint cols) { #if CONFIG_MULTITHREAD constint nsync = row_mt_sync->sync_range; int cur; // Only signal when there are enough encoded blocks for next row to run. int sig = 1;
if (c < cols - 1) {
cur = c; if (c % nsync != nsync - 1) sig = 0;
} else {
cur = cols + nsync;
}
if (sig) {
pthread_mutex_lock(&row_mt_sync->mutex[r]);
end_of_frame = 0; while (0 == end_of_frame) { // Get the next job in the queue
proc_job =
(JobNode *)vp9_enc_grp_get_next_job(multi_thread_ctxt, cur_tile_id); if (NULL == proc_job) { // Query for the status of other tiles
end_of_frame = vp9_get_tiles_proc_status(
multi_thread_ctxt, thread_data->tile_completion_status, &cur_tile_id,
tile_cols);
} else {
tile_col = proc_job->tile_col_id;
tile_row = proc_job->tile_row_id;
staticint enc_row_mt_worker_hook(void *arg1, void *arg2) {
EncWorkerData *const thread_data = (EncWorkerData *)arg1;
MultiThreadHandle *multi_thread_ctxt = (MultiThreadHandle *)arg2;
VP9_COMP *const cpi = thread_data->cpi; const VP9_COMMON *const cm = &cpi->common; constint tile_cols = 1 << cm->log2_tile_cols; int tile_row, tile_col; int end_of_frame; int thread_id = thread_data->thread_id; int cur_tile_id = multi_thread_ctxt->thread_id_to_tile_id[thread_id];
JobNode *proc_job = NULL; int mi_row;
end_of_frame = 0; while (0 == end_of_frame) { // Get the next job in the queue
proc_job =
(JobNode *)vp9_enc_grp_get_next_job(multi_thread_ctxt, cur_tile_id); if (NULL == proc_job) { // Query for the status of other tiles
end_of_frame = vp9_get_tiles_proc_status(
multi_thread_ctxt, thread_data->tile_completion_status, &cur_tile_id,
tile_cols);
} else {
tile_col = proc_job->tile_col_id;
tile_row = proc_job->tile_row_id;
mi_row = proc_job->vert_unit_row_num * MI_BLOCK_SIZE;
for (i = 0; i < num_workers; i++) {
EncWorkerData *thread_data;
thread_data = &cpi->tile_thr_data[i]; // Before encoding a frame, copy the thread data from cpi. if (thread_data->td != &cpi->td) {
thread_data->td->mb = cpi->td.mb;
thread_data->td->rd_counts = cpi->td.rd_counts;
} if (thread_data->td->counts != &cpi->common.counts) {
memcpy(thread_data->td->counts, &cpi->common.counts, sizeof(cpi->common.counts));
}
// Handle use_nonrd_pick_mode case. if (cpi->sf.use_nonrd_pick_mode) {
MACROBLOCK *const x = &thread_data->td->mb;
MACROBLOCKD *const xd = &x->e_mbd; struct macroblock_plane *const p = x->plane; struct macroblockd_plane *const pd = xd->plane;
PICK_MODE_CONTEXT *ctx = &thread_data->td->pc_root->none; int j;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.