/* * Copyright (c) 2010 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree.
*/ #include <stddef.h>
/* Distance of Mb to the various image edges. * These specified to 8th pel as they are always compared * to values that are in 1/8th pel units
*/
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
/* Set up limit values for motion vectors used to prevent * them extending outside the UMV borders
*/
x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
x->mv_col_max =
((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
x->mv_row_max =
((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);
/* Copy current mb to a buffer */
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp8_activity_masking(cpi, x);
/* Is segmentation enabled */ /* MB level adjustment to quantizer */ if (xd->segmentation_enabled) { /* Code to set segment id in xd->mbmi.segment_id for * current MB (with range checking)
*/ if (cpi->segmentation_map[map_index + mb_col] <= 3) {
xd->mode_info_context->mbmi.segment_id =
cpi->segmentation_map[map_index + mb_col];
} else {
xd->mode_info_context->mbmi.segment_id = 0;
}
vp8cx_mb_init_quantizer(cpi, x, 1);
} else { /* Set to Segment 0 by default */
xd->mode_info_context->mbmi.segment_id = 0;
}
for (b = 0; b < xd->mbmi.partition_count; ++b) {
inter_b_modes[x->partition->bmi[b].mode]++;
}
}
#endif // Keep track of how many (consecutive) times a block // is coded as ZEROMV_LASTREF, for base layer frames. // Reset to 0 if its coded as anything else. if (cpi->current_layer == 0) { if (xd->mode_info_context->mbmi.mode == ZEROMV &&
xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) { // Increment, check for wrap-around. if (cpi->consec_zero_last[map_index + mb_col] < 255) {
cpi->consec_zero_last[map_index + mb_col] += 1;
} if (cpi->consec_zero_last_mvbias[map_index + mb_col] < 255) {
cpi->consec_zero_last_mvbias[map_index + mb_col] += 1;
}
} else {
cpi->consec_zero_last[map_index + mb_col] = 0;
cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
} if (x->zero_last_dot_suppress) {
cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
}
}
/* Special case code for cyclic refresh * If cyclic update enabled then copy * xd->mbmi.segment_id; (which may have been updated * based on mode during * vp8cx_encode_inter_macroblock()) back into the * global segmentation map
*/ if ((cpi->current_layer == 0) &&
(cpi->cyclic_refresh_mode_enabled &&
xd->segmentation_enabled)) { const MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
cpi->segmentation_map[map_index + mb_col] = mbmi->segment_id;
/* If the block has been refreshed mark it as clean * (the magnitude of the -ve influences how long it * will be before we consider another refresh): * Else if it was coded (last frame 0,0) and has * not already been refreshed then mark it as a * candidate for cleanup next time (marked 0) else * mark it as dirty (1).
*/ if (mbmi->segment_id) {
cpi->cyclic_refresh_map[map_index + mb_col] = -1;
} elseif ((mbmi->mode == ZEROMV) &&
(mbmi->ref_frame == LAST_FRAME)) { if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) {
cpi->cyclic_refresh_map[map_index + mb_col] = 0;
}
} else {
cpi->cyclic_refresh_map[map_index + mb_col] = 1;
}
}
}
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING /* pack tokens for this MB */
{ int tok_count = tp - tp_start;
vp8_pack_tokens(w, tp_start, tok_count);
} #else
cpi->tplist[mb_row].stop = tp; #endif /* Increment pointer into gf usage flags structure. */
x->gf_active_ptr++;
/* Increment the activity mask pointers. */
x->mb_activity_ptr++;
/* adjust to the next column of macroblocks */
x->src.y_buffer += 16;
x->src.u_buffer += 8;
x->src.v_buffer += 8;
recon_yoffset += 16;
recon_uvoffset += 8;
/* Keep track of segment usage */
segment_counts[xd->mode_info_context->mbmi.segment_id]++;
/* skip to next mb */
xd->mode_info_context++;
x->partition_info++;
xd->above_context++;
}
#if 1 /*TODO: Remove dequant from BLOCKD. This is a temporary solution until * the quantizer code uses a passed in pointer to the dequant constants. * This will also require modifications to the x86 and neon assembly.
* */ for (i = 0; i < 16; ++i) zd->block[i].dequant = zd->dequant_y1; for (i = 16; i < 24; ++i) zd->block[i].dequant = zd->dequant_uv;
zd->block[24].dequant = zd->dequant_y2; #endif
/* don't allocate more threads than cores available */ if (cpi->oxcf.multi_threaded > cm->processor_core_count) {
th_count = cm->processor_core_count - 1;
}
/* we have th_count + 1 (main) threads processing one row each */ /* no point to have more threads than the sync range allows */ if (th_count > ((cm->mb_cols / cpi->mt_sync_range) - 1)) {
th_count = (cm->mb_cols / cpi->mt_sync_range) - 1;
}
} if (th_count == cpi->encoding_thread_count) return 0;
vp8cx_remove_encoder_threads(cpi); if (th_count != 0) { int ithread; int rc = 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.