/* * Copyright (c) 2010 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree.
*/
{ constint t = p->token; const vpx_prob *const context_tree = p->context_tree;
assert(t != ZERO_TOKEN);
assert(t != EOB_TOKEN);
assert(t != EOSB_TOKEN);
vpx_write(w, 1, context_tree[1]); if (t == ONE_TOKEN) {
vpx_write(w, 0, context_tree[2]);
vpx_write_bit(w, p->extra & 1);
} else { // t >= TWO_TOKEN && t < EOB_TOKEN conststruct vp9_token *const a = &vp9_coef_encodings[t]; int v = a->value; int n = a->len; constint e = p->extra;
vpx_write(w, 1, context_tree[2]);
vp9_write_tree(w, vp9_coef_con_tree,
vp9_pareto8_full[context_tree[PIVOT_NODE] - 1], v,
n - UNCONSTRAINED_NODES, 0); if (t >= CATEGORY1_TOKEN) { const vp9_extra_bit *const b = &extra_bits[t]; constunsignedchar *pb = b->prob;
v = e >> 1;
n = b->len; // number of bits in v, assumed nonzero do { constint bb = (v >> --n) & 1;
vpx_write(w, bb, *pb++);
} while (n);
}
vpx_write_bit(w, e & 1);
}
}
}
*tp = (TOKENEXTRA *)(uintptr_t)p + (p->token == EOSB_TOKEN);
}
staticvoid write_segment_id(vpx_writer *w, conststruct segmentation *seg, int segment_id) { if (seg->enabled && seg->update_map)
vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0);
}
// This function encodes the reference frame staticvoid write_ref_frames(const VP9_COMMON *cm, const MACROBLOCKD *const xd,
vpx_writer *w) { const MODE_INFO *const mi = xd->mi[0]; constint is_compound = has_second_ref(mi); constint segment_id = mi->segment_id;
// If segment level coding of this signal is disabled... // or the segment allows multiple reference frame options if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
assert(!is_compound);
assert(mi->ref_frame[0] ==
get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
} else { // does the feature use compound prediction or not // (if not specified at the frame/segment level) if (cm->reference_mode == REFERENCE_MODE_SELECT) {
vpx_write(w, is_compound, vp9_get_reference_mode_prob(cm, xd));
} else {
assert((!is_compound) == (cm->reference_mode == SINGLE_REFERENCE));
}
// If segment skip is not enabled code the mode. if (!segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { if (bsize >= BLOCK_8X8) {
write_inter_mode(w, mode, inter_probs);
}
}
switch (cpi->sf.use_fast_coef_updates) { case TWO_LOOP: { /* dry run to see if there is any update at all needed */
int64_t savings = 0; int update[2] = { 0, 0 }; for (i = 0; i < PLANE_TYPES; ++i) { for (j = 0; j < REF_TYPES; ++j) { for (k = 0; k < COEF_BANDS; ++k) { for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { for (t = 0; t < entropy_nodes_update; ++t) {
vpx_prob newp = new_coef_probs[i][j][k][l][t]; const vpx_prob oldp = old_coef_probs[i][j][k][l][t];
int64_t s; int u = 0; if (t == PIVOT_NODE)
s = vp9_prob_diff_update_savings_search_model(
frame_branch_ct[i][j][k][l][0], oldp, &newp, upd,
stepsize); else
s = vp9_prob_diff_update_savings_search(
frame_branch_ct[i][j][k][l][t], oldp, &newp, upd); if (s > 0 && newp != oldp) u = 1; if (u)
savings += s - (int)(vp9_cost_zero(upd)); else
savings -= (int)(vp9_cost_zero(upd));
update[u]++;
}
}
}
}
}
// printf("Update %d %d, savings %d\n", update[0], update[1], savings); /* Is coef updated at all */ if (update[1] == 0 || savings < 0) {
vpx_write_bit(bc, 0); return;
}
vpx_write_bit(bc, 1); for (i = 0; i < PLANE_TYPES; ++i) { for (j = 0; j < REF_TYPES; ++j) { for (k = 0; k < COEF_BANDS; ++k) { for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { // calc probs and branch cts for this frame only for (t = 0; t < entropy_nodes_update; ++t) {
vpx_prob newp = new_coef_probs[i][j][k][l][t];
vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
int64_t s; int u = 0; if (t == PIVOT_NODE)
s = vp9_prob_diff_update_savings_search_model(
frame_branch_ct[i][j][k][l][0], *oldp, &newp, upd,
stepsize); else
s = vp9_prob_diff_update_savings_search(
frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd); if (s > 0 && newp != *oldp) u = 1;
vpx_write(bc, u, upd); if (u) { /* send/use new probability */
vp9_write_prob_diff_update(bc, newp, *oldp);
*oldp = newp;
}
}
}
}
}
} return;
}
default: { int updates = 0; int noupdates_before_first = 0;
assert(cpi->sf.use_fast_coef_updates == ONE_LOOP_REDUCED); for (i = 0; i < PLANE_TYPES; ++i) { for (j = 0; j < REF_TYPES; ++j) { for (k = 0; k < COEF_BANDS; ++k) { for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { // calc probs and branch cts for this frame only for (t = 0; t < entropy_nodes_update; ++t) {
vpx_prob newp = new_coef_probs[i][j][k][l][t];
vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
int64_t s; int u = 0;
if (t == PIVOT_NODE) {
s = vp9_prob_diff_update_savings_search_model(
frame_branch_ct[i][j][k][l][0], *oldp, &newp, upd,
stepsize);
} else {
s = vp9_prob_diff_update_savings_search(
frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
}
if (s > 0 && newp != *oldp) u = 1;
updates += u; if (u == 0 && updates == 0) {
noupdates_before_first++; continue;
} if (u == 1 && updates == 1) { int v; // first update
vpx_write_bit(bc, 1); for (v = 0; v < noupdates_before_first; ++v)
vpx_write(bc, 0, upd);
}
vpx_write(bc, u, upd); if (u) { /* send/use new probability */
vp9_write_prob_diff_update(bc, newp, *oldp);
*oldp = newp;
}
}
}
}
}
} if (updates == 0) {
vpx_write_bit(bc, 0); // no updates
} return;
}
}
}
staticvoid encode_loopfilter(struct loopfilter *lf, struct vpx_write_bit_buffer *wb) { int i;
// Encode the loop filter level and type
vpx_wb_write_literal(wb, lf->filter_level, 6);
vpx_wb_write_literal(wb, lf->sharpness_level, 3);
// Write out loop filter deltas applied at the MB level based on mode or // ref frame (if they are enabled).
vpx_wb_write_bit(wb, lf->mode_ref_delta_enabled);
if (lf->mode_ref_delta_enabled) {
vpx_wb_write_bit(wb, lf->mode_ref_delta_update); if (lf->mode_ref_delta_update) { for (i = 0; i < MAX_REF_LF_DELTAS; i++) { constint delta = lf->ref_deltas[i]; constint changed = delta != lf->last_ref_deltas[i];
vpx_wb_write_bit(wb, changed); if (changed) {
lf->last_ref_deltas[i] = delta;
vpx_wb_write_literal(wb, abs(delta) & 0x3F, 6);
vpx_wb_write_bit(wb, delta < 0);
}
}
staticvoid encode_segmentation(VP9_COMMON *cm, MACROBLOCKD *xd, struct vpx_write_bit_buffer *wb) { int i, j;
conststruct segmentation *seg = &cm->seg;
vpx_wb_write_bit(wb, seg->enabled); if (!seg->enabled) return;
// Segmentation map
vpx_wb_write_bit(wb, seg->update_map); if (seg->update_map) { // Select the coding strategy (temporal or spatial)
vp9_choose_segmap_coding_method(cm, xd); // Write out probabilities used to decode unpredicted macro-block segments for (i = 0; i < SEG_TREE_PROBS; i++) { constint prob = seg->tree_probs[i]; constint update = prob != MAX_PROB;
vpx_wb_write_bit(wb, update); if (update) vpx_wb_write_literal(wb, prob, 8);
}
// Write out the chosen coding method.
vpx_wb_write_bit(wb, seg->temporal_update); if (seg->temporal_update) { for (i = 0; i < PREDICTION_PROBS; i++) { constint prob = seg->pred_probs[i]; constint update = prob != MAX_PROB;
vpx_wb_write_bit(wb, update); if (update) vpx_wb_write_literal(wb, prob, 8);
}
}
}
// Segmentation data
vpx_wb_write_bit(wb, seg->update_data); if (seg->update_data) {
vpx_wb_write_bit(wb, seg->abs_delta);
for (i = 0; i < MAX_SEGMENTS; i++) { for (j = 0; j < SEG_LVL_MAX; j++) { constint active = segfeature_active(seg, i, j);
vpx_wb_write_bit(wb, active); if (active) { constint data = get_segdata(seg, i, j); constint data_max = vp9_seg_feature_data_max(j);
staticvoid fix_interp_filter(VP9_COMMON *cm, FRAME_COUNTS *counts) { if (cm->interp_filter == SWITCHABLE) { // Check to see if only one of the filters is actually used int count[SWITCHABLE_FILTERS]; int i, j, c = 0; for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
count[i] = 0; for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
count[i] += counts->switchable_interp[j][i];
c += (count[i] > 0);
} if (c == 1) { // Only one filter is used. So set the filter at frame level for (i = 0; i < SWITCHABLE_FILTERS; ++i) { if (count[i]) {
cm->interp_filter = i; break;
}
}
}
}
}
staticvoid write_tile_info(const VP9_COMMON *const cm, struct vpx_write_bit_buffer *wb) { int min_log2_tile_cols, max_log2_tile_cols, ones;
vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
if (update_ref_idx != INVALID_IDX) { return (1 << update_ref_idx);
} else { return 0;
}
} if (vp9_preserve_existing_gf(cpi)) { // We have decided to preserve the previously existing golden frame as our // new ARF frame. However, in the short term we leave it in the GF slot and, // if we're updating the GF with the current decoded frame, we save it // instead to the ARF slot. // Later, in the function vp9_encoder.c:vp9_update_reference_frames() we // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it // there so that it can be done outside of the recode loop. // Note: This is highly specific to the use of ARF as a forward reference, // and this needs to be generalized as other uses are implemented // (like RTC/temporal scalability). return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
(cpi->refresh_golden_frame << cpi->alt_fb_idx);
} else { int arf_idx = cpi->alt_fb_idx;
GF_GROUP *const gf_group = &cpi->twopass.gf_group;
if (cpi->multi_layer_arf) { for (arf_idx = 0; arf_idx < REF_FRAMES; ++arf_idx) { if (arf_idx != cpi->alt_fb_idx && arf_idx != cpi->lst_fb_idx &&
arf_idx != cpi->gld_fb_idx) { int idx; for (idx = 0; idx < gf_group->stack_size; ++idx) if (arf_idx == gf_group->arf_index_stack[idx]) break; if (idx == gf_group->stack_size) break;
}
}
}
cpi->twopass.gf_group.top_arf_idx = arf_idx;
while (tile_col < tile_cols) { int i, j; for (i = 0; i < num_workers && tile_col < tile_cols; ++i) {
VPxWorker *const worker = &cpi->workers[i];
VP9BitstreamWorkerData *const data = &cpi->vp9_bitstream_worker_data[i];
// First thread can directly write into the output buffer. if (i == 0) { // If this worker happens to be for the last tile, then do not offset it // by 4 for the tile size. const size_t offset = total_size + (tile_col == tile_cols - 1 ? 0 : 4); if (data_size < offset) {
vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "encode_tiles_mt: output buffer full");
}
data->dest = data_ptr + offset;
data->dest_size = data_size - offset;
}
worker->data1 = cpi;
worker->data2 = data;
worker->hook = encode_tile_worker;
worker->had_error = 0;
if (i < num_workers - 1) {
winterface->launch(worker);
} else {
winterface->execute(worker);
}
++tile_col;
} for (j = 0; j < i; ++j) {
VPxWorker *const worker = &cpi->workers[j];
VP9BitstreamWorkerData *const data =
(VP9BitstreamWorkerData *)worker->data2;
uint32_t tile_size; int k;
if (!winterface->sync(worker)) {
error = 1; continue;
}
// Encoding tiles in parallel is done only for realtime mode now. In other // modes the speed up is insignificant and requires further testing to ensure // that it does not make the overall process worse in any case. if (cpi->oxcf.mode == REALTIME && cpi->num_workers > 1 && tile_rows == 1 &&
tile_cols > 1) { return encode_tiles_mt(cpi, data_ptr, data_size);
}
for (tile_row = 0; tile_row < tile_rows; tile_row++) { for (tile_col = 0; tile_col < tile_cols; tile_col++) { int tile_idx = tile_row * tile_cols + tile_col;
// If to use show existing frame.
vpx_wb_write_bit(wb, cm->show_existing_frame); if (cm->show_existing_frame) {
vpx_wb_write_literal(wb, cpi->alt_fb_idx, 3); return;
}
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
prob_diff_update(vp9_inter_mode_tree, cm->fc->inter_mode_probs[i],
counts->inter_mode[i], INTER_MODES, &header_bc);
if (cm->interp_filter == SWITCHABLE)
update_switchable_interp_probs(cm, &header_bc, counts);
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
counts->intra_inter[i]);
vpx_write_bit(&header_bc, use_compound_pred); if (use_compound_pred) {
vpx_write_bit(&header_bc, use_hybrid_pred); if (use_hybrid_pred) for (i = 0; i < COMP_INTER_CONTEXTS; i++)
vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
counts->comp_inter[i]);
}
}
if (cm->reference_mode != COMPOUND_REFERENCE) { for (i = 0; i < REF_CONTEXTS; i++) {
vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
counts->single_ref[i][0]);
vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
counts->single_ref[i][1]);
}
}
if (cm->reference_mode != SINGLE_REFERENCE) for (i = 0; i < REF_CONTEXTS; i++)
vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
counts->comp_ref[i]);
for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
prob_diff_update(vp9_intra_mode_tree, cm->fc->y_mode_prob[i],
counts->y_mode[i], INTRA_MODES, &header_bc);
for (i = 0; i < PARTITION_CONTEXTS; ++i)
prob_diff_update(vp9_partition_tree, fc->partition_prob[i],
counts->partition[i], PARTITION_TYPES, &header_bc);
// Skip the rest coding process if use show existing frame. if (cm->show_existing_frame) {
uncompressed_hdr_size = vpx_wb_bytes_written(&wb);
data += uncompressed_hdr_size;
*size = data - dest; return;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.