/* * Copyright (c) 2010 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree.
*/ #include <limits.h> #include <stdio.h>
/* activity_avg must be positive, or flat regions could get a zero weight * (infinite lambda), which confounds analysis. * This also avoids the need for divide by zero checks in * vp8_activity_masking().
*/ #define VP8_ACTIVITY_AVG_MIN (64)
/* This is used as a reference when computing the source variance for the * purposes of activity masking. * Eventually this should be replaced by custom no-reference routines, * which will be faster.
*/ staticconstunsignedchar VP8_VAR_OFFS[16] = { 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128,
128, 128, 128, 128 };
/* Original activity measure from Tim T's code. */ staticunsignedint tt_activity_measure(MACROBLOCK *x) { unsignedint act; unsignedint sse; /* TODO: This could also be done over smaller areas (8x8), but that would * require extensive changes elsewhere, as lambda is assumed to be fixed * over an entire MB in most of the code. * Another option is to compute four 8x8 variances, and pick a single * lambda using a non-linear combination (e.g., the smallest, or second * smallest, etc.).
*/
act = vpx_variance16x16(x->src.y_buffer, x->src.y_stride, VP8_VAR_OFFS, 0,
&sse);
act = act << 4;
/* If the region is flat, lower the activity some more. */ if (act < 8 << 12) act = act < 5 << 12 ? act : 5 << 12;
return act;
}
/* Measure the activity of the current macroblock * What we measure here is TBD so abstracted to this function
*/ #define ALT_ACT_MEASURE 1 staticunsignedint mb_activity_measure(MACROBLOCK *x, int mb_row, int mb_col) { unsignedint mb_activity;
if (ALT_ACT_MEASURE) { int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
/* Or use an alternative. */
mb_activity = vp8_encode_intra(x, use_dc_pred);
} else { /* Original activity measure from Tim T's code. */
mb_activity = tt_activity_measure(x);
}
if (mb_activity < VP8_ACTIVITY_AVG_MIN) mb_activity = VP8_ACTIVITY_AVG_MIN;
return mb_activity;
}
/* Calculate an "average" mb activity value for the frame */ #define ACT_MEDIAN 0 staticvoid calc_av_activity(VP8_COMP *cpi, int64_t activity_sum) { #if ACT_MEDIAN /* Find median: Simple n^2 algorithm for experimentation */
{ unsignedint median; unsignedint i, j; unsignedint *sortlist; unsignedint tmp;
/* Create a list to sort to */
CHECK_MEM_ERROR(&cpi->common.error, sortlist,
vpx_calloc(sizeof(unsignedint), cpi->common.MBs));
/* Copy map to sort list */
memcpy(sortlist, cpi->mb_activity_map, sizeof(unsignedint) * cpi->common.MBs);
/* Ripple each value down to its correct position */ for (i = 1; i < cpi->common.MBs; ++i) { for (j = i; j > 0; j--) { if (sortlist[j] < sortlist[j - 1]) { /* Swap values */
tmp = sortlist[j - 1];
sortlist[j - 1] = sortlist[j];
sortlist[j] = tmp;
} else break;
}
}
/* Even number MBs so estimate median as mean of two either side. */
median = (1 + sortlist[cpi->common.MBs >> 1] +
sortlist[(cpi->common.MBs >> 1) + 1]) >>
1;
cpi->activity_avg = median;
vpx_free(sortlist);
} #else /* Simple mean for now */
cpi->activity_avg = (unsignedint)(activity_sum / cpi->common.MBs); #endif
if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN) {
cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
}
/* Experimental code: return fixed value normalized for several clips */ if (ALT_ACT_MEASURE) cpi->activity_avg = 100000;
}
#if USE_ACT_INDEX /* Calculate and activity index for each mb */ staticvoid calc_activity_index(VP8_COMP *cpi, MACROBLOCK *x) {
VP8_COMMON *const cm = &cpi->common; int mb_row, mb_col;
/* Loop through all MBs. Note activity of each, average activity and * calculate a normalized activity for each
*/ staticvoid build_activity_map(VP8_COMP *cpi) {
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *xd = &x->e_mbd;
VP8_COMMON *const cm = &cpi->common;
#if ALT_ACT_MEASURE
YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx]; int recon_yoffset; int recon_y_stride = new_yv12->y_stride; #endif
int mb_row, mb_col; unsignedint mb_activity;
int64_t activity_sum = 0;
/* for each macroblock row in image */ for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { #if ALT_ACT_MEASURE /* reset above block coeffs */
xd->up_available = (mb_row != 0);
recon_yoffset = (mb_row * recon_y_stride * 16); #endif /* for each macroblock col in image */ for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { #if ALT_ACT_MEASURE
xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
xd->left_available = (mb_col != 0);
recon_yoffset += 16; #endif /* Copy current mb to a buffer */
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
/* Activity based Zbin adjustment */
adjust_act_zbin(cpi, x);
}
staticvoid encode_mb_row(VP8_COMP *cpi, VP8_COMMON *cm, int mb_row,
MACROBLOCK *x, MACROBLOCKD *xd, TOKENEXTRA **tp, int *segment_counts, int *totalrate) { int recon_yoffset, recon_uvoffset; int mb_col; int ref_fb_idx = cm->lst_fb_idx; int dst_fb_idx = cm->new_fb_idx; int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride; int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride; int map_index = (mb_row * cpi->common.mb_cols);
/* Distance of Mb to the top & bottom edges, specified in 1/8th pel * units as they are always compared to values that are in 1/8th pel
*/
xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
/* Set up limit values for vertical motion vector components * to prevent them extending beyond the UMV borders
*/
x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);
/* Set the mb activity pointer to the start of the row. */
x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
/* for each macroblock col in image */ for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
*tp = cpi->tok; #endif /* Distance of Mb to the left & right edges, specified in * 1/8th pel units as they are always compared to values * that are in 1/8th pel units
*/
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
/* Set up limit values for horizontal motion vector components * to prevent them extending beyond the UMV borders
*/
x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
x->mv_col_max =
((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp8_activity_masking(cpi, x);
/* Is segmentation enabled */ /* MB level adjustment to quantizer */ if (xd->segmentation_enabled) { /* Code to set segment id in xd->mbmi.segment_id for current MB * (with range checking)
*/ if (cpi->segmentation_map[map_index + mb_col] <= 3) {
xd->mode_info_context->mbmi.segment_id =
cpi->segmentation_map[map_index + mb_col];
} else {
xd->mode_info_context->mbmi.segment_id = 0;
}
vp8cx_mb_init_quantizer(cpi, x, 1);
} else { /* Set to Segment 0 by default */
xd->mode_info_context->mbmi.segment_id = 0;
}
for (b = 0; b < xd->mbmi.partition_count; ++b) {
inter_b_modes[x->partition->bmi[b].mode]++;
}
}
#endif
// Keep track of how many (consecutive) times a block is coded // as ZEROMV_LASTREF, for base layer frames. // Reset to 0 if its coded as anything else. if (cpi->current_layer == 0) { if (xd->mode_info_context->mbmi.mode == ZEROMV &&
xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) { // Increment, check for wrap-around. if (cpi->consec_zero_last[map_index + mb_col] < 255) {
cpi->consec_zero_last[map_index + mb_col] += 1;
} if (cpi->consec_zero_last_mvbias[map_index + mb_col] < 255) {
cpi->consec_zero_last_mvbias[map_index + mb_col] += 1;
}
} else {
cpi->consec_zero_last[map_index + mb_col] = 0;
cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
} if (x->zero_last_dot_suppress) {
cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
}
}
/* Special case code for cyclic refresh * If cyclic update enabled then copy xd->mbmi.segment_id; (which * may have been updated based on mode during * vp8cx_encode_inter_macroblock()) back into the global * segmentation map
*/ if ((cpi->current_layer == 0) &&
(cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)) {
cpi->segmentation_map[map_index + mb_col] =
xd->mode_info_context->mbmi.segment_id;
/* If the block has been refreshed mark it as clean (the * magnitude of the -ve influences how long it will be before * we consider another refresh): * Else if it was coded (last frame 0,0) and has not already * been refreshed then mark it as a candidate for cleanup * next time (marked 0) else mark it as dirty (1).
*/ if (xd->mode_info_context->mbmi.segment_id) {
cpi->cyclic_refresh_map[map_index + mb_col] = -1;
} elseif ((xd->mode_info_context->mbmi.mode == ZEROMV) &&
(xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)) { if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) {
cpi->cyclic_refresh_map[map_index + mb_col] = 0;
}
} else {
cpi->cyclic_refresh_map[map_index + mb_col] = 1;
}
}
}
cpi->tplist[mb_row].stop = *tp;
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING /* pack tokens for this MB */
{ int tok_count = *tp - tp_start;
vp8_pack_tokens(w, tp_start, tok_count);
} #endif /* Increment pointer into gf usage flags structure. */
x->gf_active_ptr++;
/* Increment the activity mask pointers. */
x->mb_activity_ptr++;
/* adjust to the next column of macroblocks */
x->src.y_buffer += 16;
x->src.u_buffer += 8;
x->src.v_buffer += 8;
recon_yoffset += 16;
recon_uvoffset += 8;
/* Keep track of segment usage */
segment_counts[xd->mode_info_context->mbmi.segment_id]++;
/* skip to next mb */
xd->mode_info_context++;
x->partition_info++;
xd->above_context++;
}
/* extend the recon for intra prediction */
vp8_extend_mb_row(&cm->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16,
xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
/* reset intra mode contexts */ if (cm->frame_type == KEY_FRAME) vp8_init_mbmode_probs(cm);
/* Copy data over into macro block data structures. */
x->src = *cpi->Source;
xd->pre = cm->yv12_fb[cm->lst_fb_idx];
xd->dst = cm->yv12_fb[cm->new_fb_idx];
/* set up frame for intra coded blocks */
vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
#if CONFIG_MULTITHREAD staticvoid sum_coef_counts(MACROBLOCK *x, MACROBLOCK *x_thread) { int i = 0; do { int j = 0; do { int k = 0; do { /* at every context */
/* calc probs and branch cts for this frame only */ int t = 0; /* token/prob index */
do {
x->coef_counts[i][j][k][t] += x_thread->coef_counts[i][j][k][t];
} while (++t < ENTROPY_NODES);
} while (++k < PREV_COEF_CONTEXTS);
} while (++j < COEF_BANDS);
} while (++i < BLOCK_TYPES);
} #endif// CONFIG_MULTITHREAD
void vp8_encode_frame(VP8_COMP *cpi) { int mb_row;
MACROBLOCK *const x = &cpi->mb;
VP8_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
TOKENEXTRA *tp = cpi->tok; int segment_counts[MAX_MB_SEGMENTS]; int totalrate; #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
BOOL_CODER *bc = &cpi->bc[1]; /* bc[0] is for control partition */ constint num_part = (1 << cm->multi_token_partition); #endif
// Work out the segment probabilities if segmentation is enabled // and needs to be updated if (xd->segmentation_enabled && xd->update_mb_segmentation_map) { int tot_count; int i;
/* Set to defaults */
memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
/* Zero probabilities not allowed */ for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) { if (xd->mb_segment_tree_probs[i] == 0) xd->mb_segment_tree_probs[i] = 1;
}
}
}
/* projected_frame_size in units of BYTES */
cpi->projected_frame_size = totalrate >> 8;
/* Make a note of the percentage MBs coded Intra. */ if (cm->frame_type == KEY_FRAME) {
cpi->this_frame_percent_intra = 100;
} else { int tot_modes;
#if !CONFIG_REALTIME_ONLY /* Adjust the projected reference frame usage probability numbers to * reflect what we have just seen. This may be useful when we make * multiple iterations of the recode loop rather than continuing to use * values from the previous frame.
*/ if ((cm->frame_type != KEY_FRAME) &&
((cpi->oxcf.number_of_layers > 1) ||
(!cm->refresh_alt_ref_frame && !cm->refresh_golden_frame))) {
vp8_convert_rfct_to_prob(cpi);
} #endif
} void vp8_setup_block_ptrs(MACROBLOCK *x) { int r, c; int i;
for (r = 0; r < 4; ++r) { for (c = 0; c < 4; ++c) {
x->block[r * 4 + c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
}
}
for (r = 0; r < 2; ++r) { for (c = 0; c < 2; ++c) {
x->block[16 + r * 2 + c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
}
}
for (r = 0; r < 2; ++r) { for (c = 0; c < 2; ++c) {
x->block[20 + r * 2 + c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
}
}
x->block[24].src_diff = x->src_diff + 384;
for (i = 0; i < 25; ++i) {
x->block[i].coeff = x->coeff + i * 16;
}
}
void vp8_build_block_offsets(MACROBLOCK *x) { int block = 0; int br, bc;
int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, int recon_yoffset, int recon_uvoffset, int mb_row, int mb_col) {
MACROBLOCKD *const xd = &x->e_mbd; int intra_error = 0; int rate; int distortion;
#if CONFIG_TEMPORAL_DENOISING /* Reset the best sse mode/mv for each macroblock. */
x->best_reference_frame = INTRA_FRAME;
x->best_zeromv_reference_frame = INTRA_FRAME;
x->best_sse_inter_mode = 0;
x->best_sse_mv.as_int = 0;
x->need_to_clamp_best_mvs = 0; #endif
if (cpi->sf.RD) { int zbin_mode_boost_enabled = x->zbin_mode_boost_enabled;
/* Are we using the fast quantizer for the mode selection? */ if (cpi->sf.use_fastquant_for_pick) {
x->quantize_b = vp8_fast_quantize_b;
/* the fast quantizer does not use zbin_extra, so
* do not recalculate */
x->zbin_mode_boost_enabled = 0;
}
vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
&distortion, &intra_error, mb_row, mb_col);
/* switch back to the regular quantizer for the encode */ if (cpi->sf.improved_quant) {
x->quantize_b = vp8_regular_quantize_b;
}
if (xd->mode_info_context->mbmi.mode != B_PRED) {
vp8_inverse_transform_mby(xd);
}
vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs + 16);
} else { /* always set mb_skip_coeff as it is needed by the loopfilter */
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.