/* * Copyright (c) 2010 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree.
*/
staticint decode_unsigned_max(struct vpx_read_bit_buffer *rb, int max) { constint data = vpx_rb_read_literal(rb, get_unsigned_bits(max)); return data > max ? max : data;
}
staticvoid read_tx_mode_probs(struct tx_probs *tx_probs, vpx_reader *r) { int i, j;
for (i = 0; i < TX_SIZE_CONTEXTS; ++i) for (j = 0; j < TX_SIZES - 3; ++j)
vp9_diff_update_prob(r, &tx_probs->p8x8[i][j]);
for (i = 0; i < TX_SIZE_CONTEXTS; ++i) for (j = 0; j < TX_SIZES - 2; ++j)
vp9_diff_update_prob(r, &tx_probs->p16x16[i][j]);
for (i = 0; i < TX_SIZE_CONTEXTS; ++i) for (j = 0; j < TX_SIZES - 1; ++j)
vp9_diff_update_prob(r, &tx_probs->p32x32[i][j]);
}
staticvoid read_switchable_interp_probs(FRAME_CONTEXT *fc, vpx_reader *r) { int i, j; for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
}
staticvoid read_inter_mode_probs(FRAME_CONTEXT *fc, vpx_reader *r) { int i, j; for (i = 0; i < INTER_MODE_CONTEXTS; ++i) for (j = 0; j < INTER_MODES - 1; ++j)
vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
}
staticvoid read_frame_reference_mode_probs(VP9_COMMON *cm, vpx_reader *r) {
FRAME_CONTEXT *const fc = cm->fc; int i;
if (cm->reference_mode == REFERENCE_MODE_SELECT) for (i = 0; i < COMP_INTER_CONTEXTS; ++i)
vp9_diff_update_prob(r, &fc->comp_inter_prob[i]);
if (cm->reference_mode != COMPOUND_REFERENCE) for (i = 0; i < REF_CONTEXTS; ++i) {
vp9_diff_update_prob(r, &fc->single_ref_prob[i][0]);
vp9_diff_update_prob(r, &fc->single_ref_prob[i][1]);
}
if (cm->reference_mode != SINGLE_REFERENCE) for (i = 0; i < REF_CONTEXTS; ++i)
vp9_diff_update_prob(r, &fc->comp_ref_prob[i]);
}
staticvoid update_mv_probs(vpx_prob *p, int n, vpx_reader *r) { int i; for (i = 0; i < n; ++i) if (vpx_read(r, MV_UPDATE_PROB)) p[i] = (vpx_read_literal(r, 7) << 1) | 1;
}
staticvoid read_mv_probs(nmv_context *ctx, int allow_hp, vpx_reader *r) { int i, j;
staticvoid build_mc_border(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int x, int y, int b_w, int b_h, int w, int h) { // Get a pointer to the start of the real data for this row. const uint8_t *ref_row = src - x - y * src_stride;
if (y >= h)
ref_row += (h - 1) * src_stride; elseif (y > 0)
ref_row += y * src_stride;
do { int right = 0, copy; int left = x < 0 ? -x : 0;
if (left > b_w) left = b_w;
if (x + b_w > w) right = x + b_w - w;
if (right > b_w) right = b_w;
copy = b_w - left - right;
if (left) memset(dst, ref_row[0], left);
if (copy) memcpy(dst + left, ref_row + x + left, copy);
if (right) memset(dst + left + copy, ref_row[w - 1], right);
dst += dst_stride;
++y;
if (y > 0 && y < h) ref_row += src_stride;
} while (--b_h);
}
#if CONFIG_VP9_HIGHBITDEPTH staticvoid high_build_mc_border(const uint8_t *src8, int src_stride,
uint16_t *dst, int dst_stride, int x, int y, int b_w, int b_h, int w, int h) { // Get a pointer to the start of the real data for this row. const uint16_t *src = CONVERT_TO_SHORTPTR(src8); const uint16_t *ref_row = src - x - y * src_stride;
if (y >= h)
ref_row += (h - 1) * src_stride; elseif (y > 0)
ref_row += y * src_stride;
do { int right = 0, copy; int left = x < 0 ? -x : 0;
if (left > b_w) left = b_w;
if (x + b_w > w) right = x + b_w - w;
if (right > b_w) right = b_w;
copy = b_w - left - right;
if (left) vpx_memset16(dst, ref_row[0], left);
if (copy) memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
if (right) vpx_memset16(dst + left + copy, ref_row[w - 1], right);
dst += dst_stride;
++y;
if (y > 0 && y < h) ref_row += src_stride;
} while (--b_h);
} #endif// CONFIG_VP9_HIGHBITDEPTH
#if CONFIG_VP9_HIGHBITDEPTH staticvoid extend_and_predict(TileWorkerData *twd, const uint8_t *buf_ptr1, int pre_buf_stride, int x0, int y0, int b_w, int b_h, int frame_width, int frame_height, int border_offset, uint8_t *const dst, int dst_buf_stride, int subpel_x, int subpel_y, const InterpKernel *kernel, conststruct scale_factors *sf, MACROBLOCKD *xd, int w, int h, int ref, int xs, int ys) {
uint16_t *mc_buf_high = twd->extend_and_predict_buf; if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
high_build_mc_border(buf_ptr1, pre_buf_stride, mc_buf_high, b_w, x0, y0,
b_w, b_h, frame_width, frame_height);
highbd_inter_predictor(mc_buf_high + border_offset, b_w,
CONVERT_TO_SHORTPTR(dst), dst_buf_stride, subpel_x,
subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
} else {
build_mc_border(buf_ptr1, pre_buf_stride, (uint8_t *)mc_buf_high, b_w, x0,
y0, b_w, b_h, frame_width, frame_height);
inter_predictor(((uint8_t *)mc_buf_high) + border_offset, b_w, dst,
dst_buf_stride, subpel_x, subpel_y, sf, w, h, ref, kernel,
xs, ys);
}
} #else staticvoid extend_and_predict(TileWorkerData *twd, const uint8_t *buf_ptr1, int pre_buf_stride, int x0, int y0, int b_w, int b_h, int frame_width, int frame_height, int border_offset, uint8_t *const dst, int dst_buf_stride, int subpel_x, int subpel_y, const InterpKernel *kernel, conststruct scale_factors *sf, int w, int h, int ref, int xs, int ys) {
uint8_t *mc_buf = (uint8_t *)twd->extend_and_predict_buf; const uint8_t *buf_ptr;
// Calculate the top left corner of the best matching block in the // reference frame.
x0 += scaled_mv.col >> SUBPEL_BITS;
y0 += scaled_mv.row >> SUBPEL_BITS;
x0_16 += scaled_mv.col;
y0_16 += scaled_mv.row;
// Do border extension if there is motion or the // width/height is not a multiple of 8 pixels. if (is_scaled || scaled_mv.col || scaled_mv.row || (frame_width & 0x7) ||
(frame_height & 0x7)) { int y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1;
// Get reference block bottom right horizontal coordinate. int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1; int x_pad = 0, y_pad = 0;
staticINLINEvoid dec_reset_skip_context(MACROBLOCKD *xd) { int i; for (i = 0; i < MAX_MB_PLANE; i++) { struct macroblockd_plane *const pd = &xd->plane[i];
memset(pd->above_context, 0, sizeof(ENTROPY_CONTEXT) * pd->n4_w);
memset(pd->left_context, 0, sizeof(ENTROPY_CONTEXT) * pd->n4_h);
}
}
staticvoid set_plane_n4(MACROBLOCKD *const xd, int bw, int bh, int bwl, int bhl) { int i; for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].n4_w = (bw << 1) >> xd->plane[i].subsampling_x;
xd->plane[i].n4_h = (bh << 1) >> xd->plane[i].subsampling_y;
xd->plane[i].n4_wl = bwl - xd->plane[i].subsampling_x;
xd->plane[i].n4_hl = bhl - xd->plane[i].subsampling_y;
}
}
static MODE_INFO *set_offsets_recon(VP9_COMMON *const cm, MACROBLOCKD *const xd, int mi_row, int mi_col, int bw, int bh, int bwl, int bhl) { constint offset = mi_row * cm->mi_stride + mi_col; const TileInfo *const tile = &xd->tile;
xd->mi = cm->mi_grid_visible + offset;
set_plane_n4(xd, bw, bh, bwl, bhl);
set_skip_context(xd, mi_row, mi_col);
// Distance of Mb to the various image edges. These are specified to 8th pel // as they are always compared to values that are in 1/8th pel units
set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
static MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
BLOCK_SIZE bsize, int mi_row, int mi_col, int bw, int bh, int x_mis, int y_mis, int bwl, int bhl) { constint offset = mi_row * cm->mi_stride + mi_col; int x, y; const TileInfo *const tile = &xd->tile;
xd->mi = cm->mi_grid_visible + offset;
xd->mi[0] = &cm->mi[offset]; // TODO(slavarnway): Generate sb_type based on bwl and bhl, instead of // passing bsize from decode_partition().
xd->mi[0]->sb_type = bsize; for (y = 0; y < y_mis; ++y) for (x = !y; x < x_mis; ++x) {
xd->mi[y * cm->mi_stride + x] = xd->mi[0];
}
set_plane_n4(xd, bw, bh, bwl, bhl);
set_skip_context(xd, mi_row, mi_col);
// Distance of Mb to the various image edges. These are specified to 8th pel // as they are always compared to values that are in 1/8th pel units
set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
if (!is_inter_block(mi)) {
predict_recon_intra(xd, mi, twd, parse_intra_block_row_mt);
} else { if (!mi->skip) {
tran_low_t *dqcoeff[MAX_MB_PLANE]; int *eob[MAX_MB_PLANE]; int plane; int eobtotal; // Based on eobtotal and bsize, this may be mi->skip may be set to true // In that case dqcoeff and eob need to be backed up and restored as // recon_block will not increment these pointers for skip cases for (plane = 0; plane < MAX_MB_PLANE; ++plane) { conststruct macroblockd_plane *const pd = &xd->plane[plane];
dqcoeff[plane] = pd->dqcoeff;
eob[plane] = pd->eob;
}
eobtotal = predict_recon_inter(xd, mi, twd, parse_inter_block_row_mt);
staticINLINEvoid dec_update_partition_context(TileWorkerData *twd, int mi_row, int mi_col, BLOCK_SIZE subsize, int bw) {
PARTITION_CONTEXT *const above_ctx = twd->xd.above_seg_context + mi_col;
PARTITION_CONTEXT *const left_ctx =
twd->xd.left_seg_context + (mi_row & MI_MASK);
// update the partition context at the end notes. set partition bits // of block sizes larger than the current one to be one, and partition // bits of smaller block sizes to be zero.
memset(above_ctx, partition_context_lookup[subsize].above, bw);
memset(left_ctx, partition_context_lookup[subsize].left, bw);
}
static PARTITION_TYPE read_partition(TileWorkerData *twd, int mi_row, int mi_col, int has_rows, int has_cols, int bsl) { constint ctx = dec_partition_plane_context(twd, mi_row, mi_col, bsl); const vpx_prob *const probs = twd->xd.partition_probs[ctx];
FRAME_COUNTS *counts = twd->xd.counts;
PARTITION_TYPE p;
vpx_reader *r = &twd->bit_reader;
if (has_rows && has_cols)
p = (PARTITION_TYPE)vpx_read_tree(r, vp9_partition_tree, probs); elseif (!has_rows && has_cols)
p = vpx_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ; elseif (has_rows && !has_cols)
p = vpx_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT; else
p = PARTITION_SPLIT;
staticvoid setup_token_decoder(const uint8_t *data, const uint8_t *data_end,
size_t read_size, struct vpx_internal_error_info *error_info,
vpx_reader *r, vpx_decrypt_cb decrypt_cb, void *decrypt_state) { // Validate the calculated partition length. If the buffer described by the // partition can't be fully read then throw an error. if (!read_is_valid(data, read_size, data_end))
vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME, "Truncated packet or corrupt tile length");
if (vpx_reader_init(r, data, read_size, decrypt_cb, decrypt_state))
vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR, "Failed to allocate bool decoder %d", 1);
}
staticvoid read_coef_probs_common(vp9_coeff_probs_model *coef_probs,
vpx_reader *r) { int i, j, k, l, m;
if (vpx_read_bit(r)) for (i = 0; i < PLANE_TYPES; ++i) for (j = 0; j < REF_TYPES; ++j) for (k = 0; k < COEF_BANDS; ++k) for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) for (m = 0; m < UNCONSTRAINED_NODES; ++m)
vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
}
staticvoid setup_segmentation(struct segmentation *seg, struct vpx_read_bit_buffer *rb) { int i, j;
seg->update_map = 0;
seg->update_data = 0;
seg->enabled = vpx_rb_read_bit(rb); if (!seg->enabled) return;
// Segmentation map update
seg->update_map = vpx_rb_read_bit(rb); if (seg->update_map) { for (i = 0; i < SEG_TREE_PROBS; i++)
seg->tree_probs[i] =
vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8) : MAX_PROB;
seg->temporal_update = vpx_rb_read_bit(rb); if (seg->temporal_update) { for (i = 0; i < PREDICTION_PROBS; i++)
seg->pred_probs[i] =
vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8) : MAX_PROB;
} else { for (i = 0; i < PREDICTION_PROBS; i++) seg->pred_probs[i] = MAX_PROB;
}
}
// Segmentation data update
seg->update_data = vpx_rb_read_bit(rb); if (seg->update_data) {
seg->abs_delta = vpx_rb_read_bit(rb);
vp9_clearall_segfeatures(seg);
for (i = 0; i < MAX_SEGMENTS; i++) { for (j = 0; j < SEG_LVL_MAX; j++) { int data = 0; constint feature_enabled = vpx_rb_read_bit(rb); if (feature_enabled) {
vp9_enable_segfeature(seg, i, j);
data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j)); if (vp9_is_segfeature_signed(j))
data = vpx_rb_read_bit(rb) ? -data : data;
}
vp9_set_segdata(seg, i, j, data);
}
}
}
}
// Read in loop filter deltas applied at the MB level based on mode or ref // frame.
lf->mode_ref_delta_update = 0;
lf->mode_ref_delta_enabled = vpx_rb_read_bit(rb); if (lf->mode_ref_delta_enabled) {
lf->mode_ref_delta_update = vpx_rb_read_bit(rb); if (lf->mode_ref_delta_update) { int i;
for (i = 0; i < MAX_REF_LF_DELTAS; i++) if (vpx_rb_read_bit(rb))
lf->ref_deltas[i] = vpx_rb_read_signed_literal(rb, 6);
for (i = 0; i < MAX_MODE_LF_DELTAS; i++) if (vpx_rb_read_bit(rb))
lf->mode_deltas[i] = vpx_rb_read_signed_literal(rb, 6);
}
}
}
staticvoid resize_context_buffers(VP9_COMMON *cm, int width, int height) { #if CONFIG_SIZE_LIMIT if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Dimensions of %dx%d beyond allowed size of %dx%d.",
width, height, DECODE_WIDTH_LIMIT, DECODE_HEIGHT_LIMIT); #endif if (cm->width != width || cm->height != height) { constint new_mi_rows =
ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2; constint new_mi_cols =
ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
// Allocations in vp9_alloc_context_buffers() depend on individual // dimensions as well as the overall size. if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) { if (vp9_alloc_context_buffers(cm, width, height)) { // The cm->mi_* values have been cleared and any existing context // buffers have been freed. Clear cm->width and cm->height to be // consistent and to force a realloc next time.
cm->width = 0;
cm->height = 0;
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to allocate context buffers");
}
} else {
vp9_set_mb_mi(cm, width, height);
}
vp9_init_context_buffers(cm);
cm->width = width;
cm->height = height;
} if (cm->cur_frame->mvs == NULL || cm->mi_rows > cm->cur_frame->mi_rows ||
cm->mi_cols > cm->cur_frame->mi_cols) {
resize_mv_buffer(cm);
}
}
// Check to make sure at least one of frames that this frame references // has valid dimensions. for (i = 0; i < REFS_PER_FRAME; ++i) {
RefBuffer *const ref_frame = &cm->frame_refs[i];
has_valid_ref_frame |=
(ref_frame->idx != INVALID_IDX &&
valid_ref_frame_size(ref_frame->buf->y_crop_width,
ref_frame->buf->y_crop_height, width, height));
} if (!has_valid_ref_frame)
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Referenced frame has invalid size"); for (i = 0; i < REFS_PER_FRAME; ++i) {
RefBuffer *const ref_frame = &cm->frame_refs[i]; if (ref_frame->idx == INVALID_IDX ||
!valid_ref_frame_img_fmt(ref_frame->buf->bit_depth,
ref_frame->buf->subsampling_x,
ref_frame->buf->subsampling_y, cm->bit_depth,
cm->subsampling_x, cm->subsampling_y))
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Referenced frame has incompatible color format");
}
// Reads the next tile returning its size and adjusting '*data' accordingly // based on 'is_last'. staticvoid get_tile_buffer(const uint8_t *const data_end, int is_last, struct vpx_internal_error_info *error_info, const uint8_t **data, vpx_decrypt_cb decrypt_cb, void *decrypt_state, TileBuffer *buf) {
size_t size;
if (!is_last) { if (!read_is_valid(*data, 4, data_end))
vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME, "Truncated packet or corrupt tile length");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.