/* * Copyright (c) 2018 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree.
*/
// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit // integers, and return the high 16 bits of the intermediate integers. // (a * b) >> 16 // Note: Because this is done in 2 operations, a and b cannot both be UINT16_MIN staticINLINE int16x8_t vec_mulhi(int16x8_t a, int16x8_t b) { // madds does ((A * B) >> 15) + C, we need >> 16, so we perform an extra right // shift. return vec_sra(vec_madds(a, b, vec_zeros_s16), vec_ones_u16);
}
// Negate 16-bit integers in a when the corresponding signed 16-bit // integer in b is negative. staticINLINE int16x8_t vec_sign(int16x8_t a, int16x8_t b) { const int16x8_t mask = vec_sra(b, vec_shift_sign_s16); return vec_xor(vec_add(a, mask), mask);
}
// Compare packed 16-bit integers across a, and return the maximum value in // every element. Returns a vector containing the biggest value across vector a. staticINLINE int16x8_t vec_max_across(int16x8_t a) {
a = vec_max(a, vec_perm(a, a, vec_perm64));
a = vec_max(a, vec_perm(a, a, vec_perm32)); return vec_max(a, vec_perm(a, a, vec_perm16));
}
// First set of 8 coeff starts with DC + 7 AC
qcoeff0 = vec_mulhi(vec_vaddshs(vec_abs(coeff0), round), quant);
zero_coeff0 = vec_cmpeq(qcoeff0, vec_zeros_s16);
qcoeff0 = vec_sign(qcoeff0, coeff0);
vec_vsx_st(qcoeff0, 0, qcoeff_ptr);
// We quantize 16 coeff up front (enough for a 4x4) and process 24 coeff per // loop iteration. // for 8x8: 16 + 2 x 24 = 64 // for 16x16: 16 + 10 x 24 = 256 if (n_coeffs > 16) {
int16x8_t coeff2, qcoeff2, dqcoeff2, eob2, scan2;
bool16x8_t zero_coeff2;
int index = 16; int off0 = 32; int off1 = 48; int off2 = 64;
// Sets the value of a 32-bit integers to 1 when the corresponding value in a is // negative. staticINLINE int32x4_t vec_is_neg(int32x4_t a) { return vec_sr(a, vec_shift_sign_s32);
}
// DeQuantization function used for 32x32 blocks. Quantized coeff of 32x32 // blocks are twice as big as for other block sizes. As such, using // vec_mladd results in overflow. staticINLINE int16x8_t dequantize_coeff_32(int16x8_t qcoeff,
int16x8_t dequant) {
int32x4_t dqcoeffe = vec_mule(qcoeff, dequant);
int32x4_t dqcoeffo = vec_mulo(qcoeff, dequant); // Add 1 if negative to round towards zero because the C uses division.
dqcoeffe = vec_add(dqcoeffe, vec_is_neg(dqcoeffe));
dqcoeffo = vec_add(dqcoeffo, vec_is_neg(dqcoeffo));
dqcoeffe = vec_sra(dqcoeffe, vec_ones_u32);
dqcoeffo = vec_sra(dqcoeffo, vec_ones_u32); return (int16x8_t)vec_perm(dqcoeffe, dqcoeffo, vec_perm_odd_even_pack);
}
void vp9_quantize_fp_32x32_vsx(const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *round_ptr, const int16_t *quant_ptr, tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan) { // In stage 1, we quantize 16 coeffs (DC + 15 AC) // In stage 2, we loop 42 times and quantize 24 coeffs per iteration // (32 * 32 - 16) / 24 = 42 int num_itr = 42; // Offsets are in bytes, 16 coeffs = 32 bytes int off0 = 32; int off1 = 48; int off2 = 64;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.