/* * Copyright (c) 2016, Alliance for Open Media. All rights reserved. * * This source code is subject to the terms of the BSD 2 Clause License and * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License * was not distributed with this source code in the LICENSE file, you can * obtain it at www.aomedia.org/license/software. If the Alliance for Open * Media Patent License 1.0 was not distributed with this source code in the * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
typedef int64_t (*sse_extractor_type)(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b); typedef int64_t (*sse_part_extractor_type)(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b, int hstart, int width, int vstart, int height); typedef uint64_t (*var_part_extractor_type)(const YV12_BUFFER_CONFIG *a, int hstart, int width, int vstart, int height);
const AV1_COMMON *cm; const MACROBLOCK *x; int plane; int plane_w; int plane_h;
RestUnitSearchInfo *rusi;
// Speed features const LOOP_FILTER_SPEED_FEATURES *lpf_sf;
uint8_t *dgd_buffer; int dgd_stride; const uint8_t *src_buffer; int src_stride;
// SSE values for each restoration mode for the current RU // These are saved by each search function for use in search_switchable()
int64_t sse[RESTORE_SWITCHABLE_TYPES];
// This flag will be set based on the speed feature // 'prune_sgr_based_on_wiener'. 0 implies no pruning and 1 implies pruning.
uint8_t skip_sgr_eval;
// Total rate and distortion so far for each restoration type // These are initialised by reset_rsc in search_rest_type
int64_t total_sse[RESTORE_TYPES];
int64_t total_bits[RESTORE_TYPES];
// Reference parameters for delta-coding // // For each restoration type, we need to store the latest parameter set which // has been used, so that we can properly cost up the next parameter set. // Note that we have two sets of these - one for the single-restoration-mode // search (ie, frame_restoration_type = RESTORE_WIENER or RESTORE_SGRPROJ) // and one for the switchable mode. This is because these two cases can lead // to different sets of parameters being signaled, but we don't know which // we will pick for sure until the end of the search process.
WienerInfo ref_wiener;
SgrprojInfo ref_sgrproj;
WienerInfo switchable_ref_wiener;
SgrprojInfo switchable_ref_sgrproj;
// Buffers used to hold dgd-avg and src-avg data respectively during SIMD // call of Wiener filter.
int16_t *dgd_avg;
int16_t *src_avg;
} RestSearchCtxt;
const YV12_BUFFER_CONFIG *fts = &cm->cur_frame->buf; // TODO(yunqing): For now, only use optimized LR filter in decoder. Can be // also used in encoder. constint optimized_lr = 0;
int64_t av1_lowbd_pixel_proj_error_c(const uint8_t *src8, int width, int height, int src_stride, const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride, int32_t *flt1, int flt1_stride, int xq[2], const sgr_params_type *params) { int i, j; const uint8_t *src = src8; const uint8_t *dat = dat8;
int64_t err = 0; if (params->r[0] > 0 && params->r[1] > 0) { for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) {
assert(flt1[j] < (1 << 15) && flt1[j] > -(1 << 15));
assert(flt0[j] < (1 << 15) && flt0[j] > -(1 << 15)); const int32_t u = (int32_t)(dat[j] << SGRPROJ_RST_BITS);
int32_t v = u << SGRPROJ_PRJ_BITS;
v += xq[0] * (flt0[j] - u) + xq[1] * (flt1[j] - u); const int32_t e =
ROUND_POWER_OF_TWO(v, SGRPROJ_RST_BITS + SGRPROJ_PRJ_BITS) - src[j];
err += ((int64_t)e * e);
}
dat += dat_stride;
src += src_stride;
flt0 += flt0_stride;
flt1 += flt1_stride;
}
} elseif (params->r[0] > 0) { for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) {
assert(flt0[j] < (1 << 15) && flt0[j] > -(1 << 15)); const int32_t u = (int32_t)(dat[j] << SGRPROJ_RST_BITS);
int32_t v = u << SGRPROJ_PRJ_BITS;
v += xq[0] * (flt0[j] - u); const int32_t e =
ROUND_POWER_OF_TWO(v, SGRPROJ_RST_BITS + SGRPROJ_PRJ_BITS) - src[j];
err += ((int64_t)e * e);
}
dat += dat_stride;
src += src_stride;
flt0 += flt0_stride;
}
} elseif (params->r[1] > 0) { for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) {
assert(flt1[j] < (1 << 15) && flt1[j] > -(1 << 15)); const int32_t u = (int32_t)(dat[j] << SGRPROJ_RST_BITS);
int32_t v = u << SGRPROJ_PRJ_BITS;
v += xq[1] * (flt1[j] - u); const int32_t e =
ROUND_POWER_OF_TWO(v, SGRPROJ_RST_BITS + SGRPROJ_PRJ_BITS) - src[j];
err += ((int64_t)e * e);
}
dat += dat_stride;
src += src_stride;
flt1 += flt1_stride;
}
} else { for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) { const int32_t e = (int32_t)(dat[j]) - src[j];
err += ((int64_t)e * e);
}
dat += dat_stride;
src += src_stride;
}
}
return err;
}
#if CONFIG_AV1_HIGHBITDEPTH
int64_t av1_highbd_pixel_proj_error_c(const uint8_t *src8, int width, int height, int src_stride, const uint8_t *dat8, int dat_stride,
int32_t *flt0, int flt0_stride,
int32_t *flt1, int flt1_stride, int xq[2], const sgr_params_type *params) { const uint16_t *src = CONVERT_TO_SHORTPTR(src8); const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8); int i, j;
int64_t err = 0; const int32_t half = 1 << (SGRPROJ_RST_BITS + SGRPROJ_PRJ_BITS - 1); if (params->r[0] > 0 && params->r[1] > 0) { int xq0 = xq[0]; int xq1 = xq[1]; for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) { const int32_t d = dat[j]; const int32_t s = src[j]; const int32_t u = (int32_t)(d << SGRPROJ_RST_BITS);
int32_t v0 = flt0[j] - u;
int32_t v1 = flt1[j] - u;
int32_t v = half;
v += xq0 * v0;
v += xq1 * v1; const int32_t e = (v >> (SGRPROJ_RST_BITS + SGRPROJ_PRJ_BITS)) + d - s;
err += ((int64_t)e * e);
}
dat += dat_stride;
flt0 += flt0_stride;
flt1 += flt1_stride;
src += src_stride;
}
} elseif (params->r[0] > 0 || params->r[1] > 0) { int exq;
int32_t *flt; int flt_stride; if (params->r[0] > 0) {
exq = xq[0];
flt = flt0;
flt_stride = flt0_stride;
} else {
exq = xq[1];
flt = flt1;
flt_stride = flt1_stride;
} for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) { const int32_t d = dat[j]; const int32_t s = src[j]; const int32_t u = (int32_t)(d << SGRPROJ_RST_BITS);
int32_t v = half;
v += exq * (flt[j] - u); const int32_t e = (v >> (SGRPROJ_RST_BITS + SGRPROJ_PRJ_BITS)) + d - s;
err += ((int64_t)e * e);
}
dat += dat_stride;
flt += flt_stride;
src += src_stride;
}
} else { for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) { const int32_t d = dat[j]; const int32_t s = src[j]; const int32_t e = d - s;
err += ((int64_t)e * e);
}
dat += dat_stride;
src += src_stride;
}
} return err;
} #endif// CONFIG_AV1_HIGHBITDEPTH
static int64_t get_pixel_proj_error(const uint8_t *src8, int width, int height, int src_stride, const uint8_t *dat8, int dat_stride, int use_highbitdepth,
int32_t *flt0, int flt0_stride,
int32_t *flt1, int flt1_stride, int *xqd, const sgr_params_type *params) { int xq[2];
av1_decode_xq(xqd, xq, params);
// The function calls 3 subfunctions for the following cases : // 1) When params->r[0] > 0 and params->r[1] > 0. In this case all elements // of C and H need to be computed. // 2) When only params->r[0] > 0. In this case only H[0][0] and C[0] are // non-zero and need to be computed. // 3) When only params->r[1] > 0. In this case only H[1][1] and C[1] are // non-zero and need to be computed. void av1_calc_proj_params_c(const uint8_t *src8, int width, int height, int src_stride, const uint8_t *dat8, int dat_stride,
int32_t *flt0, int flt0_stride, int32_t *flt1, int flt1_stride, int64_t H[2][2], int64_t C[2], const sgr_params_type *params) { if ((params->r[0] > 0) && (params->r[1] > 0)) {
calc_proj_params_r0_r1_c(src8, width, height, src_stride, dat8, dat_stride,
flt0, flt0_stride, flt1, flt1_stride, H, C);
} elseif (params->r[0] > 0) {
calc_proj_params_r0_c(src8, width, height, src_stride, dat8, dat_stride,
flt0, flt0_stride, H, C);
} elseif (params->r[1] > 0) {
calc_proj_params_r1_c(src8, width, height, src_stride, dat8, dat_stride,
flt1, flt1_stride, H, C);
}
}
if (params->r[0] == 0) { // H matrix is now only the scalar H[1][1] // C vector is now only the scalar C[1] const int64_t Det = H[1][1]; if (Det == 0) return; // ill-posed, return default values
xq[0] = 0;
xq[1] = (int)signed_rounded_divide(C[1] * (1 << SGRPROJ_PRJ_BITS), Det);
} elseif (params->r[1] == 0) { // H matrix is now only the scalar H[0][0] // C vector is now only the scalar C[0] const int64_t Det = H[0][0]; if (Det == 0) return; // ill-posed, return default values
xq[0] = (int)signed_rounded_divide(C[0] * (1 << SGRPROJ_PRJ_BITS), Det);
xq[1] = 0;
} else { const int64_t Det = H[0][0] * H[1][1] - H[0][1] * H[1][0]; if (Det == 0) return; // ill-posed, return default values
// If scaling up dividend would overflow, instead scale down the divisor const int64_t div1 = H[1][1] * C[0] - H[0][1] * C[1]; if ((div1 > 0 && INT64_MAX / (1 << SGRPROJ_PRJ_BITS) < div1) ||
(div1 < 0 && INT64_MIN / (1 << SGRPROJ_PRJ_BITS) > div1))
xq[0] = (int)signed_rounded_divide(div1, Det / (1 << SGRPROJ_PRJ_BITS)); else
xq[0] = (int)signed_rounded_divide(div1 * (1 << SGRPROJ_PRJ_BITS), Det);
// Apply the self-guided filter across an entire restoration unit. staticinlinevoid apply_sgr(int sgr_params_idx, const uint8_t *dat8, int width, int height, int dat_stride, int use_highbd, int bit_depth, int pu_width, int pu_height,
int32_t *flt0, int32_t *flt1, int flt_stride, struct aom_internal_error_info *error_info) { for (int i = 0; i < height; i += pu_height) { constint h = AOMMIN(pu_height, height - i);
int32_t *flt0_row = flt0 + i * flt_stride;
int32_t *flt1_row = flt1 + i * flt_stride; const uint8_t *dat8_row = dat8 + i * dat_stride;
// Iterate over the stripe in blocks of width pu_width for (int j = 0; j < width; j += pu_width) { constint w = AOMMIN(pu_width, width - j); if (av1_selfguided_restoration(
dat8_row + j, w, h, dat_stride, flt0_row + j, flt1_row + j,
flt_stride, sgr_params_idx, bit_depth, use_highbd) != 0) {
aom_internal_error(
error_info, AOM_CODEC_MEM_ERROR, "Error allocating buffer in av1_selfguided_restoration");
}
}
}
}
#if DEBUG_LR_COSTING // Store ref params for later checking
lr_ref_params[RESTORE_SGRPROJ][rsc->plane][rest_unit_idx].sgrproj_info =
rsc->ref_sgrproj; #endif// DEBUG_LR_COSTING
staticvoid acc_stat_one_line(const uint8_t *dgd, const uint8_t *src, int dgd_stride, int h_start, int h_end,
uint8_t avg, constint wiener_halfwin, constint wiener_win2, int32_t *M_int32,
int32_t *H_int32, int count) { int j, k, l;
int16_t Y[WIENER_WIN2];
for (j = h_start; j < h_end; j++) { const int16_t X = (int16_t)src[j] - (int16_t)avg; int idx = 0; for (k = -wiener_halfwin; k <= wiener_halfwin; k++) { for (l = -wiener_halfwin; l <= wiener_halfwin; l++) {
Y[idx] =
(int16_t)dgd[(count + l) * dgd_stride + (j + k)] - (int16_t)avg;
idx++;
}
}
assert(idx == wiener_win2); for (k = 0; k < wiener_win2; ++k) {
M_int32[k] += (int32_t)Y[k] * X; for (l = k; l < wiener_win2; ++l) { // H is a symmetric matrix, so we only need to fill out the upper // triangle here. We can copy it down to the lower triangle outside // the (i, j) loops.
H_int32[k * wiener_win2 + l] += (int32_t)Y[k] * Y[l];
}
}
}
}
void av1_compute_stats_c(int wiener_win, const uint8_t *dgd, const uint8_t *src,
int16_t *dgd_avg, int16_t *src_avg, int h_start, int h_end, int v_start, int v_end, int dgd_stride, int src_stride, int64_t *M, int64_t *H, int use_downsampled_wiener_stats) {
(void)dgd_avg;
(void)src_avg; int i, k, l; constint wiener_win2 = wiener_win * wiener_win; constint wiener_halfwin = (wiener_win >> 1);
uint8_t avg = find_average(dgd, h_start, h_end, v_start, v_end, dgd_stride);
int32_t M_row[WIENER_WIN2] = { 0 };
int32_t H_row[WIENER_WIN2 * WIENER_WIN2] = { 0 }; int downsample_factor =
use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1;
for (i = v_start; i < v_end; i = i + downsample_factor) { if (use_downsampled_wiener_stats &&
(v_end - i < WIENER_STATS_DOWNSAMPLE_FACTOR)) {
downsample_factor = v_end - i;
}
for (k = 0; k < wiener_win2; ++k) { // Scale M matrix based on the downsampling factor
M[k] += ((int64_t)M_row[k] * downsample_factor); for (l = k; l < wiener_win2; ++l) { // H is a symmetric matrix, so we only need to fill out the upper // triangle here. We can copy it down to the lower triangle outside // the (i, j) loops. // Scale H Matrix based on the downsampling factor
H[k * wiener_win2 + l] +=
((int64_t)H_row[k * wiener_win2 + l] * downsample_factor);
}
}
}
for (k = 0; k < wiener_win2; ++k) { for (l = k + 1; l < wiener_win2; ++l) {
H[l * wiener_win2 + k] = H[k * wiener_win2 + l];
}
}
}
#if CONFIG_AV1_HIGHBITDEPTH void av1_compute_stats_highbd_c(int wiener_win, const uint8_t *dgd8, const uint8_t *src8, int16_t *dgd_avg,
int16_t *src_avg, int h_start, int h_end, int v_start, int v_end, int dgd_stride, int src_stride, int64_t *M, int64_t *H,
aom_bit_depth_t bit_depth) {
(void)dgd_avg;
(void)src_avg; int i, j, k, l;
int32_t Y[WIENER_WIN2]; constint wiener_win2 = wiener_win * wiener_win; constint wiener_halfwin = (wiener_win >> 1); const uint16_t *src = CONVERT_TO_SHORTPTR(src8); const uint16_t *dgd = CONVERT_TO_SHORTPTR(dgd8);
uint16_t avg =
find_average_highbd(dgd, h_start, h_end, v_start, v_end, dgd_stride);
memset(M, 0, sizeof(*M) * wiener_win2);
memset(H, 0, sizeof(*H) * wiener_win2 * wiener_win2); for (i = v_start; i < v_end; i++) { for (j = h_start; j < h_end; j++) { const int32_t X = (int32_t)src[i * src_stride + j] - (int32_t)avg; int idx = 0; for (k = -wiener_halfwin; k <= wiener_halfwin; k++) { for (l = -wiener_halfwin; l <= wiener_halfwin; l++) {
Y[idx] = (int32_t)dgd[(i + l) * dgd_stride + (j + k)] - (int32_t)avg;
idx++;
}
}
assert(idx == wiener_win2); for (k = 0; k < wiener_win2; ++k) {
M[k] += (int64_t)Y[k] * X; for (l = k; l < wiener_win2; ++l) { // H is a symmetric matrix, so we only need to fill out the upper // triangle here. We can copy it down to the lower triangle outside // the (i, j) loops.
H[k * wiener_win2 + l] += (int64_t)Y[k] * Y[l];
}
}
}
} for (k = 0; k < wiener_win2; ++k) {
M[k] /= bit_depth_divider;
H[k * wiener_win2 + k] /= bit_depth_divider; for (l = k + 1; l < wiener_win2; ++l) {
H[k * wiener_win2 + l] /= bit_depth_divider;
H[l * wiener_win2 + k] = H[k * wiener_win2 + l];
}
}
} #endif// CONFIG_AV1_HIGHBITDEPTH
staticinlineint wrap_index(int i, int wiener_win) { constint wiener_halfwin1 = (wiener_win >> 1) + 1; return (i >= wiener_halfwin1 ? wiener_win - 1 - i : i);
}
// Splits each w[i] into smaller components w1[i] and w2[i] such that // w[i] = w1[i] * WIENER_TAP_SCALE_FACTOR + w2[i]. staticinlinevoid split_wiener_filter_coefficients(int wiener_win, const int32_t *w,
int32_t *w1, int32_t *w2) { for (int i = 0; i < wiener_win; i++) {
w1[i] = w[i] / WIENER_TAP_SCALE_FACTOR;
w2[i] = w[i] - w1[i] * WIENER_TAP_SCALE_FACTOR;
assert(w[i] == w1[i] * WIENER_TAP_SCALE_FACTOR + w2[i]);
}
}
// Calculates x * w / WIENER_TAP_SCALE_FACTOR, where // w = w1 * WIENER_TAP_SCALE_FACTOR + w2. // // The multiplication x * w may overflow, so we multiply x by the components of // w (w1 and w2) and combine the multiplication with the division. staticinline int64_t multiply_and_scale(int64_t x, int32_t w1, int32_t w2) { // Let y = x * w / WIENER_TAP_SCALE_FACTOR // = x * (w1 * WIENER_TAP_SCALE_FACTOR + w2) / WIENER_TAP_SCALE_FACTOR const int64_t y = x * w1 + x * w2 / WIENER_TAP_SCALE_FACTOR; return y;
}
// Solve linear equations to find Wiener filter tap values // Taps are output scaled by WIENER_FILT_STEP staticint linsolve_wiener(int n, int64_t *A, int stride, int64_t *b,
int64_t *x) { for (int k = 0; k < n - 1; k++) { // Partial pivoting: bring the row with the largest pivot to the top for (int i = n - 1; i > k; i--) { // If row i has a better (bigger) pivot than row (i-1), swap them if (llabs(A[(i - 1) * stride + k]) < llabs(A[i * stride + k])) { for (int j = 0; j < n; j++) { const int64_t c = A[i * stride + j];
A[i * stride + j] = A[(i - 1) * stride + j];
A[(i - 1) * stride + j] = c;
} const int64_t c = b[i];
b[i] = b[i - 1];
b[i - 1] = c;
}
}
// b/278065963: The multiplies // c / 256 * A[k * stride + j] / cd * 256 // and // c / 256 * b[k] / cd * 256 // within Gaussian elimination can cause a signed integer overflow. Rework // the multiplies so that larger scaling is used without significantly // impacting the overall precision. // // Precision guidance: // scale_threshold: Pick as high as possible. // For max_abs_akj >= scale_threshold scenario: // scaler_A: Pick as low as possible. Needed for A[(i + 1) * stride + j]. // scaler_c: Pick as low as possible while maintaining scaler_c >= // (1 << 7). Needed for A[(i + 1) * stride + j] and b[i + 1].
int64_t max_abs_akj = 0; for (int j = 0; j < n; j++) { const int64_t abs_akj = llabs(A[k * stride + j]); if (abs_akj > max_abs_akj) max_abs_akj = abs_akj;
} constint scale_threshold = 1 << 22; constint scaler_A = max_abs_akj < scale_threshold ? 1 : (1 << 6); constint scaler_c = max_abs_akj < scale_threshold ? 1 : (1 << 7); constint scaler = scaler_c * scaler_A;
// Forward elimination (convert A to row-echelon form) for (int i = k; i < n - 1; i++) { if (A[k * stride + k] == 0) return 0; const int64_t c = A[(i + 1) * stride + k] / scaler_c; const int64_t cd = A[k * stride + k]; for (int j = 0; j < n; j++) {
A[(i + 1) * stride + j] -=
A[k * stride + j] / scaler_A * c / cd * scaler;
}
b[i + 1] -= c * b[k] / cd * scaler_c;
}
} // Back-substitution for (int i = n - 1; i >= 0; i--) { if (A[i * stride + i] == 0) return 0;
int64_t c = 0; for (int j = i + 1; j <= n - 1; j++) {
c += A[i * stride + j] * x[j] / WIENER_TAP_SCALE_FACTOR;
} // Store filter taps x in scaled form.
x[i] = WIENER_TAP_SCALE_FACTOR * (b[i] - c) / A[i * stride + i];
}
for (i = 0; i < wiener_win; i++) { for (j = 0; j < wiener_win; j++) { int k, l; for (k = 0; k < wiener_win; ++k) { constint kk = wrap_index(k, wiener_win); for (l = 0; l < wiener_win; ++l) { constint ll = wrap_index(l, wiener_win); // Calculate // B[ll * wiener_halfwin1 + kk] += // Hc[j * wiener_win + i][k * wiener_win2 + l] * b[i] / // WIENER_TAP_SCALE_FACTOR * b[j] / WIENER_TAP_SCALE_FACTOR; // // The last multiplication may overflow, so we combine the last // multiplication with the last division. const int64_t x = Hc[j * wiener_win + i][k * wiener_win2 + l] * b[i] /
WIENER_TAP_SCALE_FACTOR; // b[j] = b1[j] * WIENER_TAP_SCALE_FACTOR + b2[j]
B[ll * wiener_halfwin1 + kk] += multiply_and_scale(x, b1[j], b2[j]);
}
}
}
} // Normalization enforcement in the system of equations itself for (i = 0; i < wiener_halfwin1 - 1; ++i) {
A[i] -=
A[wiener_halfwin1 - 1] * 2 +
B[i * wiener_halfwin1 + wiener_halfwin1 - 1] -
2 * B[(wiener_halfwin1 - 1) * wiener_halfwin1 + (wiener_halfwin1 - 1)];
} for (i = 0; i < wiener_halfwin1 - 1; ++i) { for (j = 0; j < wiener_halfwin1 - 1; ++j) {
B[i * wiener_halfwin1 + j] -=
2 * (B[i * wiener_halfwin1 + (wiener_halfwin1 - 1)] +
B[(wiener_halfwin1 - 1) * wiener_halfwin1 + j] -
2 * B[(wiener_halfwin1 - 1) * wiener_halfwin1 +
(wiener_halfwin1 - 1)]);
}
} if (linsolve_wiener(wiener_halfwin1 - 1, B, wiener_halfwin1, A, S)) {
S[wiener_halfwin1 - 1] = WIENER_TAP_SCALE_FACTOR; for (i = wiener_halfwin1; i < wiener_win; ++i) {
S[i] = S[wiener_win - 1 - i];
S[wiener_halfwin1 - 1] -= 2 * S[i];
} for (i = 0; i < wiener_win; ++i) {
a[i] = (int32_t)CLIP(S[i], -(1 << (WIENER_FILT_BITS - 1)),
(1 << (WIENER_FILT_BITS - 1)) - 1);
}
}
}
// Fix vector a, update vector b staticinlinevoid update_b_sep_sym(int wiener_win, int64_t **Mc, int64_t **Hc, const int32_t *a, int32_t *b) { int i, j;
int64_t S[WIENER_WIN];
int64_t A[WIENER_HALFWIN1], B[WIENER_HALFWIN1 * WIENER_HALFWIN1];
int32_t a1[WIENER_WIN], a2[WIENER_WIN]; constint wiener_win2 = wiener_win * wiener_win; constint wiener_halfwin1 = (wiener_win >> 1) + 1;
memset(A, 0, sizeof(A));
memset(B, 0, sizeof(B)); for (i = 0; i < wiener_win; i++) { constint ii = wrap_index(i, wiener_win); for (j = 0; j < wiener_win; j++) {
A[ii] += Mc[i][j] * a[j] / WIENER_TAP_SCALE_FACTOR;
}
}
split_wiener_filter_coefficients(wiener_win, a, a1, a2);
for (i = 0; i < wiener_win; i++) { constint ii = wrap_index(i, wiener_win); for (j = 0; j < wiener_win; j++) { constint jj = wrap_index(j, wiener_win); int k, l; for (k = 0; k < wiener_win; ++k) { for (l = 0; l < wiener_win; ++l) { // Calculate // B[jj * wiener_halfwin1 + ii] += // Hc[i * wiener_win + j][k * wiener_win2 + l] * a[k] / // WIENER_TAP_SCALE_FACTOR * a[l] / WIENER_TAP_SCALE_FACTOR; // // The last multiplication may overflow, so we combine the last // multiplication with the last division. const int64_t x = Hc[i * wiener_win + j][k * wiener_win2 + l] * a[k] /
WIENER_TAP_SCALE_FACTOR; // a[l] = a1[l] * WIENER_TAP_SCALE_FACTOR + a2[l]
B[jj * wiener_halfwin1 + ii] += multiply_and_scale(x, a1[l], a2[l]);
}
}
}
} // Normalization enforcement in the system of equations itself for (i = 0; i < wiener_halfwin1 - 1; ++i) {
A[i] -=
A[wiener_halfwin1 - 1] * 2 +
B[i * wiener_halfwin1 + wiener_halfwin1 - 1] -
2 * B[(wiener_halfwin1 - 1) * wiener_halfwin1 + (wiener_halfwin1 - 1)];
} for (i = 0; i < wiener_halfwin1 - 1; ++i) { for (j = 0; j < wiener_halfwin1 - 1; ++j) {
B[i * wiener_halfwin1 + j] -=
2 * (B[i * wiener_halfwin1 + (wiener_halfwin1 - 1)] +
B[(wiener_halfwin1 - 1) * wiener_halfwin1 + j] -
2 * B[(wiener_halfwin1 - 1) * wiener_halfwin1 +
(wiener_halfwin1 - 1)]);
}
} if (linsolve_wiener(wiener_halfwin1 - 1, B, wiener_halfwin1, A, S)) {
S[wiener_halfwin1 - 1] = WIENER_TAP_SCALE_FACTOR; for (i = wiener_halfwin1; i < wiener_win; ++i) {
S[i] = S[wiener_win - 1 - i];
S[wiener_halfwin1 - 1] -= 2 * S[i];
} for (i = 0; i < wiener_win; ++i) {
b[i] = (int32_t)CLIP(S[i], -(1 << (WIENER_FILT_BITS - 1)),
(1 << (WIENER_FILT_BITS - 1)) - 1);
}
}
}
staticvoid wiener_decompose_sep_sym(int wiener_win, int64_t *M, int64_t *H,
int32_t *a, int32_t *b) { staticconst int32_t init_filt[WIENER_WIN] = {
WIENER_FILT_TAP0_MIDV, WIENER_FILT_TAP1_MIDV, WIENER_FILT_TAP2_MIDV,
WIENER_FILT_TAP3_MIDV, WIENER_FILT_TAP2_MIDV, WIENER_FILT_TAP1_MIDV,
WIENER_FILT_TAP0_MIDV,
};
int64_t *Hc[WIENER_WIN2];
int64_t *Mc[WIENER_WIN]; int i, j, iter; constint plane_off = (WIENER_WIN - wiener_win) >> 1; constint wiener_win2 = wiener_win * wiener_win; for (i = 0; i < wiener_win; i++) {
a[i] = b[i] =
WIENER_TAP_SCALE_FACTOR / WIENER_FILT_STEP * init_filt[i + plane_off];
} for (i = 0; i < wiener_win; i++) {
Mc[i] = M + i * wiener_win; for (j = 0; j < wiener_win; j++) {
Hc[i * wiener_win + j] =
H + i * wiener_win * wiener_win2 + j * wiener_win;
}
}
iter = 1; while (iter < NUM_WIENER_ITERS) {
update_a_sep_sym(wiener_win, Mc, Hc, a, b);
update_b_sep_sym(wiener_win, Mc, Hc, a, b);
iter++;
}
}
// Computes the function x'*H*x - x'*M for the learned 2D filter x, and compares // against identity filters; Final score is defined as the difference between // the function values static int64_t compute_score(int wiener_win, int64_t *M, int64_t *H,
InterpKernel vfilt, InterpKernel hfilt) {
int32_t ab[WIENER_WIN * WIENER_WIN];
int16_t a[WIENER_WIN], b[WIENER_WIN];
int64_t P = 0, Q = 0;
int64_t iP = 0, iQ = 0;
int64_t Score, iScore; int i, k, l; constint plane_off = (WIENER_WIN - wiener_win) >> 1; constint wiener_win2 = wiener_win * wiener_win;
a[WIENER_HALFWIN] = b[WIENER_HALFWIN] = WIENER_FILT_STEP; for (i = 0; i < WIENER_HALFWIN; ++i) {
a[i] = a[WIENER_WIN - i - 1] = vfilt[i];
b[i] = b[WIENER_WIN - i - 1] = hfilt[i];
a[WIENER_HALFWIN] -= 2 * a[i];
b[WIENER_HALFWIN] -= 2 * b[i];
}
memset(ab, 0, sizeof(ab)); for (k = 0; k < wiener_win; ++k) { for (l = 0; l < wiener_win; ++l)
ab[k * wiener_win + l] = a[l + plane_off] * b[k + plane_off];
} for (k = 0; k < wiener_win2; ++k) {
P += ab[k] * M[k] / WIENER_FILT_STEP / WIENER_FILT_STEP; for (l = 0; l < wiener_win2; ++l) {
Q += ab[k] * H[k * wiener_win2 + l] * ab[l] / WIENER_FILT_STEP /
WIENER_FILT_STEP / WIENER_FILT_STEP / WIENER_FILT_STEP;
}
}
Score = Q - 2 * P;
iP = M[wiener_win2 >> 1];
iQ = H[(wiener_win2 >> 1) * wiener_win2 + (wiener_win2 >> 1)];
iScore = iQ - 2 * iP;
#if CONFIG_AV1_HIGHBITDEPTH const AV1_COMMON *const cm = rsc->cm; if (cm->seq_params->use_highbitdepth) { // TODO(any) : Add support for use_downsampled_wiener_stats SF in HBD // functions. Optimize intrinsics of HBD design similar to LBD (i.e., // pre-calculate d and s buffers and avoid most of the C operations).
av1_compute_stats_highbd(reduced_wiener_win, rsc->dgd_buffer,
rsc->src_buffer, rsc->dgd_avg, rsc->src_avg,
limits->h_start, limits->h_end, limits->v_start,
limits->v_end, rsc->dgd_stride, rsc->src_stride, M,
H, cm->seq_params->bit_depth);
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.22 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.