/* * Copyright (c) 2017, Alliance for Open Media. All rights reserved. * * This source code is subject to the terms of the BSD 2 Clause License and * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License * was not distributed with this source code in the LICENSE file, you can * obtain it at www.aomedia.org/license/software. If the Alliance for Open * Media Patent License 1.0 was not distributed with this source code in the * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
// Defines a function that can be used to obtain the mean of a block for the // provided data type (uint8_t, or uint16_t) #define GET_BLOCK_MEAN(INT_TYPE, suffix) \ staticdouble get_block_mean_##suffix(const INT_TYPE *data, int w, int h, \ int stride, int x_o, int y_o, \ int block_size) { \ constint max_h = AOMMIN(h - y_o, block_size); \ constint max_w = AOMMIN(w - x_o, block_size); \ double block_mean = 0; \ for (int y = 0; y < max_h; ++y) { \ for (int x = 0; x < max_w; ++x) { \
block_mean += data[(y_o + y) * stride + x_o + x]; \
} \
} \ return block_mean / (max_w * max_h); \
}
staticinlinedouble get_block_mean(const uint8_t *data, int w, int h, int stride, int x_o, int y_o, int block_size, int use_highbd) { if (use_highbd) return get_block_mean_highbd((const uint16_t *)data, w, h, stride, x_o, y_o,
block_size); return get_block_mean_lowbd(data, w, h, stride, x_o, y_o, block_size);
}
// Defines a function that can be used to obtain the variance of a block // for the provided data type (uint8_t, or uint16_t) #define GET_NOISE_VAR(INT_TYPE, suffix) \ staticdouble get_noise_var_##suffix( \ const INT_TYPE *data, const INT_TYPE *denoised, int stride, int w, \ int h, int x_o, int y_o, int block_size_x, int block_size_y) { \ constint max_h = AOMMIN(h - y_o, block_size_y); \ constint max_w = AOMMIN(w - x_o, block_size_x); \ double noise_var = 0; \ double noise_mean = 0; \ for (int y = 0; y < max_h; ++y) { \ for (int x = 0; x < max_w; ++x) { \ double noise = (double)data[(y_o + y) * stride + x_o + x] - \
denoised[(y_o + y) * stride + x_o + x]; \
noise_mean += noise; \
noise_var += noise * noise; \
} \
} \
noise_mean /= (max_w * max_h); \ return noise_var / (max_w * max_h) - noise_mean * noise_mean; \
}
// Return the number of coefficients required for the given parameters staticint num_coeffs(const aom_noise_model_params_t params) { constint n = 2 * params.lag + 1; switch (params.shape) { case AOM_NOISE_SHAPE_DIAMOND: return params.lag * (params.lag + 1); case AOM_NOISE_SHAPE_SQUARE: return (n * n) / 2;
} return 0;
}
staticint noise_state_init(aom_noise_state_t *state, int n, int bit_depth) { constint kNumBins = 20; if (!equation_system_init(&state->eqns, n)) {
fprintf(stderr, "Failed initialization noise state with size %d\n", n); return 0;
}
state->ar_gain = 1.0;
state->num_observations = 0; return aom_noise_strength_solver_init(&state->strength_solver, kNumBins,
bit_depth);
}
staticvoid set_chroma_coefficient_fallback_soln(aom_equation_system_t *eqns) { constdouble kTolerance = 1e-6; constint last = eqns->n - 1; // Set all of the AR coefficients to zero, but try to solve for correlation // with the luma channel
memset(eqns->x, 0, sizeof(*eqns->x) * eqns->n); if (fabs(eqns->A[last * eqns->n + last]) > kTolerance) {
eqns->x[last] = eqns->b[last] / eqns->A[last * eqns->n + last];
}
}
int aom_noise_strength_lut_init(aom_noise_strength_lut_t *lut, int num_points) { if (!lut) return 0; if (num_points <= 0) return 0;
lut->num_points = 0;
lut->points = (double(*)[2])aom_malloc(num_points * sizeof(*lut->points)); if (!lut->points) return 0;
lut->num_points = num_points;
memset(lut->points, 0, sizeof(*lut->points) * num_points); return 1;
}
staticdouble noise_strength_solver_get_value( const aom_noise_strength_solver_t *solver, double x) { constdouble bin = noise_strength_solver_get_bin_index(solver, x); constint bin_i0 = (int)floor(bin); constint bin_i1 = AOMMIN(solver->num_bins - 1, bin_i0 + 1); constdouble a = bin - bin_i0; return (1.0 - a) * solver->eqns.x[bin_i0] + a * solver->eqns.x[bin_i1];
}
void aom_noise_strength_solver_add_measurement(
aom_noise_strength_solver_t *solver, double block_mean, double noise_std) { constdouble bin = noise_strength_solver_get_bin_index(solver, block_mean); constint bin_i0 = (int)floor(bin); constint bin_i1 = AOMMIN(solver->num_bins - 1, bin_i0 + 1); constdouble a = bin - bin_i0; constint n = solver->num_bins;
solver->eqns.A[bin_i0 * n + bin_i0] += (1.0 - a) * (1.0 - a);
solver->eqns.A[bin_i1 * n + bin_i0] += a * (1.0 - a);
solver->eqns.A[bin_i1 * n + bin_i1] += a * a;
solver->eqns.A[bin_i0 * n + bin_i1] += a * (1.0 - a);
solver->eqns.b[bin_i0] += (1.0 - a) * noise_std;
solver->eqns.b[bin_i1] += a * noise_std;
solver->total += noise_std;
solver->num_equations++;
}
int aom_noise_strength_solver_solve(aom_noise_strength_solver_t *solver) { // Add regularization proportional to the number of constraints constint n = solver->num_bins; constdouble kAlpha = 2.0 * (double)(solver->num_equations) / n; int result = 0; double mean = 0;
// Do this in a non-destructive manner so it is not confusing to the caller double *old_A = solver->eqns.A; double *A = (double *)aom_malloc(sizeof(*A) * n * n); if (!A) {
fprintf(stderr, "Unable to allocate copy of A\n"); return 0;
}
memcpy(A, old_A, sizeof(*A) * n * n);
for (int i = 0; i < n; ++i) { constint i_lo = AOMMAX(0, i - 1); constint i_hi = AOMMIN(n - 1, i + 1);
A[i * n + i_lo] -= kAlpha;
A[i * n + i] += 2 * kAlpha;
A[i * n + i_hi] -= kAlpha;
}
// Small regularization to give average noise strength
mean = solver->total / solver->num_equations; for (int i = 0; i < n; ++i) {
A[i * n + i] += 1.0 / 8192.;
solver->eqns.b[i] += mean / 8192.;
}
solver->eqns.A = A;
result = equation_system_solve(&solver->eqns);
solver->eqns.A = old_A;
aom_free(A); return result;
}
int aom_noise_strength_solver_init(aom_noise_strength_solver_t *solver, int num_bins, int bit_depth) { if (!solver) return 0;
memset(solver, 0, sizeof(*solver));
solver->num_bins = num_bins;
solver->min_intensity = 0;
solver->max_intensity = (1 << bit_depth) - 1;
solver->total = 0;
solver->num_equations = 0; return equation_system_init(&solver->eqns, num_bins);
}
void aom_noise_strength_solver_free(aom_noise_strength_solver_t *solver) { if (!solver) return;
equation_system_free(&solver->eqns);
}
double aom_noise_strength_solver_get_center( const aom_noise_strength_solver_t *solver, int i) { constdouble range = solver->max_intensity - solver->min_intensity; constint n = solver->num_bins; return ((double)i) / (n - 1) * range + solver->min_intensity;
}
// Computes the residual if a point were to be removed from the lut. This is // calculated as the area between the output of the solver and the line segment // that would be formed between [x_{i - 1}, x_{i + 1}). staticvoid update_piecewise_linear_residual( const aom_noise_strength_solver_t *solver, const aom_noise_strength_lut_t *lut, double *residual, int start, int end) { constdouble dx = 255. / solver->num_bins; for (int i = AOMMAX(start, 1); i < AOMMIN(end, lut->num_points - 1); ++i) { constint lower = AOMMAX(0, (int)floor(noise_strength_solver_get_bin_index(
solver, lut->points[i - 1][0]))); constint upper = AOMMIN(solver->num_bins - 1,
(int)ceil(noise_strength_solver_get_bin_index(
solver, lut->points[i + 1][0]))); double r = 0; for (int j = lower; j <= upper; ++j) { constdouble x = aom_noise_strength_solver_get_center(solver, j); if (x < lut->points[i - 1][0]) continue; if (x >= lut->points[i + 1][0]) continue; constdouble y = solver->eqns.x[j]; constdouble a = (x - lut->points[i - 1][0]) /
(lut->points[i + 1][0] - lut->points[i - 1][0]); constdouble estimate_y =
lut->points[i - 1][1] * (1.0 - a) + lut->points[i + 1][1] * a;
r += fabs(y - estimate_y);
}
residual[i] = r * dx;
}
}
int aom_noise_strength_solver_fit_piecewise( const aom_noise_strength_solver_t *solver, int max_output_points,
aom_noise_strength_lut_t *lut) { // The tolerance is normalized to be give consistent results between // different bit-depths. constdouble kTolerance = solver->max_intensity * 0.00625 / 255.0; if (!aom_noise_strength_lut_init(lut, solver->num_bins)) {
fprintf(stderr, "Failed to init lut\n"); return 0;
} for (int i = 0; i < solver->num_bins; ++i) {
lut->points[i][0] = aom_noise_strength_solver_get_center(solver, i);
lut->points[i][1] = solver->eqns.x[i];
} if (max_output_points < 0) {
max_output_points = solver->num_bins;
}
// Greedily remove points if there are too many or if it doesn't hurt local // approximation (never remove the end points) while (lut->num_points > 2) { int min_index = 1; for (int j = 1; j < lut->num_points - 1; ++j) { if (residual[j] < residual[min_index]) {
min_index = j;
}
} constdouble dx =
lut->points[min_index + 1][0] - lut->points[min_index - 1][0]; constdouble avg_residual = residual[min_index] / dx; if (lut->num_points <= max_output_points && avg_residual > kTolerance) { break;
}
int aom_flat_block_finder_init(aom_flat_block_finder_t *block_finder, int block_size, int bit_depth, int use_highbd) { constint n = block_size * block_size;
aom_equation_system_t eqns; double *AtA_inv = 0; double *A = 0; int x = 0, y = 0, i = 0, j = 0;
block_finder->A = NULL;
block_finder->AtA_inv = NULL;
if (!equation_system_init(&eqns, kLowPolyNumParams)) {
fprintf(stderr, "Failed to init equation system for block_size=%d\n",
block_size); return 0;
}
AtA_inv = (double *)aom_malloc(kLowPolyNumParams * kLowPolyNumParams * sizeof(*AtA_inv));
A = (double *)aom_malloc(kLowPolyNumParams * n * sizeof(*A)); if (AtA_inv == NULL || A == NULL) {
fprintf(stderr, "Failed to alloc A or AtA_inv for block_size=%d\n",
block_size);
aom_free(AtA_inv);
aom_free(A);
equation_system_free(&eqns); return 0;
}
{ constdouble trace = Gxx + Gyy; constdouble det = Gxx * Gyy - Gxy * Gxy; constdouble e1 = (trace + sqrt(trace * trace - 4 * det)) / 2.; constdouble e2 = (trace - sqrt(trace * trace - 4 * det)) / 2.; constdouble norm = e1; // Spectral norm constdouble ratio = (e1 / AOMMAX(e2, 1e-6)); constint is_flat = (trace < kTraceThreshold) &&
(ratio < kRatioThreshold) &&
(norm < kNormThreshold) && (var > kVarThreshold); // The following weights are used to combine the above features to give // a sigmoid score for flatness. If the input was normalized to [0,100] // the magnitude of these values would be close to 1 (e.g., weights // corresponding to variance would be a factor of 10000x smaller). // The weights are given in the following order: // [{var}, {ratio}, {trace}, {norm}, offset] // with one of the most discriminative being simply the variance. constdouble weights[5] = { -6682, -0.2056, 13087, -12434, 2.5694 }; double sum_weights = weights[0] * var + weights[1] * ratio +
weights[2] * trace + weights[3] * norm +
weights[4]; // clamp the value to [-25.0, 100.0] to prevent overflow
sum_weights = fclamp(sum_weights, -25.0, 100.0); constfloat score = (float)(1.0 / (1 + exp(-sum_weights)));
flat_blocks[by * num_blocks_w + bx] = is_flat ? 255 : 0;
scores[by * num_blocks_w + bx].score = var > kVarThreshold ? score : 0;
scores[by * num_blocks_w + bx].index = by * num_blocks_w + bx; #ifdef NOISE_MODEL_LOG_SCORE
fprintf(stderr, "%g %g %g %g %g %d ", score, var, ratio, trace, norm,
is_flat); #endif
num_flat += is_flat;
}
} #ifdef NOISE_MODEL_LOG_SCORE
fprintf(stderr, "\n"); #endif
} #ifdef NOISE_MODEL_LOG_SCORE
fprintf(stderr, "];\n"); #endif // Find the top-scored blocks (most likely to be flat) and set the flat blocks // be the union of the thresholded results and the top 10th percentile of the // scored results.
qsort(scores, num_blocks_w * num_blocks_h, sizeof(*scores), &compare_scores); constint top_nth_percentile = num_blocks_w * num_blocks_h * 90 / 100; constfloat score_threshold = scores[top_nth_percentile].score; for (int i = 0; i < num_blocks_w * num_blocks_h; ++i) { if (scores[i].score >= score_threshold) {
num_flat += flat_blocks[scores[i].index] == 0;
flat_blocks[scores[i].index] |= 1;
}
}
aom_free(block);
aom_free(plane);
aom_free(scores); return num_flat;
}
int aom_noise_model_init(aom_noise_model_t *model, const aom_noise_model_params_t params) { constint n = num_coeffs(params); constint lag = params.lag; constint bit_depth = params.bit_depth; int x = 0, y = 0, i = 0, c = 0;
memset(model, 0, sizeof(*model)); if (params.lag < 1) {
fprintf(stderr, "Invalid noise param: lag = %d must be >= 1\n", params.lag); return 0;
} if (params.lag > kMaxLag) {
fprintf(stderr, "Invalid noise param: lag = %d must be <= %d\n", params.lag,
kMaxLag); return 0;
} if (!(params.bit_depth == 8 || params.bit_depth == 10 ||
params.bit_depth == 12)) { return 0;
}
memcpy(&model->params, ¶ms, sizeof(params)); for (c = 0; c < 3; ++c) { if (!noise_state_init(&model->combined_state[c], n + (c > 0), bit_depth)) {
fprintf(stderr, "Failed to allocate noise state for channel %d\n", c);
aom_noise_model_free(model); return 0;
} if (!noise_state_init(&model->latest_state[c], n + (c > 0), bit_depth)) {
fprintf(stderr, "Failed to allocate noise state for channel %d\n", c);
aom_noise_model_free(model); return 0;
}
}
model->n = n;
model->coords = (int(*)[2])aom_malloc(sizeof(*model->coords) * n); if (!model->coords) {
aom_noise_model_free(model); return 0;
}
for (y = -lag; y <= 0; ++y) { constint max_x = y == 0 ? -1 : lag; for (x = -lag; x <= max_x; ++x) { switch (params.shape) { case AOM_NOISE_SHAPE_DIAMOND: if (abs(x) <= y + lag) {
model->coords[i][0] = x;
model->coords[i][1] = y;
++i;
} break; case AOM_NOISE_SHAPE_SQUARE:
model->coords[i][0] = x;
model->coords[i][1] = y;
++i; break; default:
fprintf(stderr, "Invalid shape\n");
aom_noise_model_free(model); return 0;
}
}
}
assert(i == n); return 1;
}
void aom_noise_model_free(aom_noise_model_t *model) { int c = 0; if (!model) return;
aom_free(model->coords); for (c = 0; c < 3; ++c) {
equation_system_free(&model->latest_state[c].eqns);
equation_system_free(&model->combined_state[c].eqns);
staticint add_block_observations(
aom_noise_model_t *noise_model, int c, const uint8_t *const data, const uint8_t *const denoised, int w, int h, int stride, int sub_log2[2], const uint8_t *const alt_data, const uint8_t *const alt_denoised, int alt_stride, const uint8_t *const flat_blocks, int block_size, int num_blocks_w, int num_blocks_h) { constint lag = noise_model->params.lag; constint num_coords = noise_model->n; constdouble normalization = (1 << noise_model->params.bit_depth) - 1; double *A = noise_model->latest_state[c].eqns.A; double *b = noise_model->latest_state[c].eqns.b; double *buffer = (double *)aom_malloc(sizeof(*buffer) * (num_coords + 1)); constint n = noise_model->latest_state[c].eqns.n;
if (!buffer) {
fprintf(stderr, "Unable to allocate buffer of size %d\n", num_coords + 1); return 0;
} for (int by = 0; by < num_blocks_h; ++by) { constint y_o = by * (block_size >> sub_log2[1]); for (int bx = 0; bx < num_blocks_w; ++bx) { constint x_o = bx * (block_size >> sub_log2[0]); if (!flat_blocks[by * num_blocks_w + bx]) { continue;
} int y_start =
(by > 0 && flat_blocks[(by - 1) * num_blocks_w + bx]) ? 0 : lag; int x_start =
(bx > 0 && flat_blocks[by * num_blocks_w + bx - 1]) ? 0 : lag; int y_end = AOMMIN((h >> sub_log2[1]) - by * (block_size >> sub_log2[1]),
block_size >> sub_log2[1]); int x_end = AOMMIN(
(w >> sub_log2[0]) - bx * (block_size >> sub_log2[0]) - lag,
(bx + 1 < num_blocks_w && flat_blocks[by * num_blocks_w + bx + 1])
? (block_size >> sub_log2[0])
: ((block_size >> sub_log2[0]) - lag)); for (int y = y_start; y < y_end; ++y) { for (int x = x_start; x < x_end; ++x) { constdouble val =
noise_model->params.use_highbd
? extract_ar_row_highbd(noise_model->coords, num_coords,
(const uint16_t *const)data,
(const uint16_t *const)denoised,
stride, sub_log2,
(const uint16_t *const)alt_data,
(const uint16_t *const)alt_denoised,
alt_stride, x + x_o, y + y_o, buffer)
: extract_ar_row_lowbd(noise_model->coords, num_coords, data,
denoised, stride, sub_log2, alt_data,
alt_denoised, alt_stride, x + x_o,
y + y_o, buffer); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) {
A[i * n + j] +=
(buffer[i] * buffer[j]) / (normalization * normalization);
}
b[i] += (buffer[i] * val) / (normalization * normalization);
}
noise_model->latest_state[c].num_observations++;
}
}
}
}
aom_free(buffer); return 1;
}
staticvoid add_noise_std_observations(
aom_noise_model_t *noise_model, int c, constdouble *coeffs, const uint8_t *const data, const uint8_t *const denoised, int w, int h, int stride, int sub_log2[2], const uint8_t *const alt_data, int alt_stride, const uint8_t *const flat_blocks, int block_size, int num_blocks_w, int num_blocks_h) { constint num_coords = noise_model->n;
aom_noise_strength_solver_t *noise_strength_solver =
&noise_model->latest_state[c].strength_solver;
const aom_noise_strength_solver_t *noise_strength_luma =
&noise_model->latest_state[0].strength_solver; constdouble luma_gain = noise_model->latest_state[0].ar_gain; constdouble noise_gain = noise_model->latest_state[c].ar_gain; for (int by = 0; by < num_blocks_h; ++by) { constint y_o = by * (block_size >> sub_log2[1]); for (int bx = 0; bx < num_blocks_w; ++bx) { constint x_o = bx * (block_size >> sub_log2[0]); if (!flat_blocks[by * num_blocks_w + bx]) { continue;
} constint num_samples_h =
AOMMIN((h >> sub_log2[1]) - by * (block_size >> sub_log2[1]),
block_size >> sub_log2[1]); constint num_samples_w =
AOMMIN((w >> sub_log2[0]) - bx * (block_size >> sub_log2[0]),
(block_size >> sub_log2[0])); // Make sure that we have a reasonable amount of samples to consider the // block if (num_samples_w * num_samples_h > block_size) { constdouble block_mean = get_block_mean(
alt_data ? alt_data : data, w, h, alt_data ? alt_stride : stride,
x_o << sub_log2[0], y_o << sub_log2[1], block_size,
noise_model->params.use_highbd); constdouble noise_var = get_noise_var(
data, denoised, stride, w >> sub_log2[0], h >> sub_log2[1], x_o,
y_o, block_size >> sub_log2[0], block_size >> sub_log2[1],
noise_model->params.use_highbd); // We want to remove the part of the noise that came from being // correlated with luma. Note that the noise solver for luma must // have already been run. constdouble luma_strength =
c > 0 ? luma_gain * noise_strength_solver_get_value(
noise_strength_luma, block_mean)
: 0; constdouble corr = c > 0 ? coeffs[num_coords] : 0; // Chroma noise: // N(0, noise_var) = N(0, uncorr_var) + corr * N(0, luma_strength^2) // The uncorrelated component: // uncorr_var = noise_var - (corr * luma_strength)^2 // But don't allow fully correlated noise (hence the max), since the // synthesis cannot model it. constdouble uncorr_std = sqrt(
AOMMAX(noise_var / 16, noise_var - pow(corr * luma_strength, 2))); // After we've removed correlation with luma, undo the gain that will // come from running the IIR filter. constdouble adjusted_strength = uncorr_std / noise_gain;
aom_noise_strength_solver_add_measurement(
noise_strength_solver, block_mean, adjusted_strength);
}
}
}
}
// Return true if the noise estimate appears to be different from the combined // (multi-frame) estimate. The difference is measured by checking whether the // AR coefficients have diverged (using a threshold on normalized cross // correlation), or whether the noise strength has changed. staticint is_noise_model_different(aom_noise_model_t *const noise_model) { // These thresholds are kind of arbitrary and will likely need further tuning // (or exported as parameters). The threshold on noise strength is a weighted // difference between the noise strength histograms constdouble kCoeffThreshold = 0.9; constdouble kStrengthThreshold =
0.005 * (1 << (noise_model->params.bit_depth - 8)); for (int c = 0; c < 1; ++c) { constdouble corr =
aom_normalized_cross_correlation(noise_model->latest_state[c].eqns.x,
noise_model->combined_state[c].eqns.x,
noise_model->combined_state[c].eqns.n); if (corr < kCoeffThreshold) return 1;
staticint ar_equation_system_solve(aom_noise_state_t *state, int is_chroma) { constint ret = equation_system_solve(&state->eqns);
state->ar_gain = 1.0; if (!ret) return ret;
// Update the AR gain from the equation system as it will be used to fit // the noise strength as a function of intensity. In the Yule-Walker // equations, the diagonal should be the variance of the correlated noise. // In the case of the least squares estimate, there will be some variability // in the diagonal. So use the mean of the diagonal as the estimate of // overall variance (this works for least squares or Yule-Walker formulation). double var = 0; constint n = state->eqns.n; for (int i = 0; i < (state->eqns.n - is_chroma); ++i) {
var += state->eqns.A[i * n + i] / state->num_observations;
}
var /= (n - is_chroma);
// Keep track of E(Y^2) = <b, x> + E(X^2) // In the case that we are using chroma and have an estimate of correlation // with luma we adjust that estimate slightly to remove the correlated bits by // subtracting out the last column of a scaled by our correlation estimate // from b. E(y^2) = <b - A(:, end)*x(end), x> double sum_covar = 0; for (int i = 0; i < state->eqns.n - is_chroma; ++i) { double bi = state->eqns.b[i]; if (is_chroma) {
bi -= state->eqns.A[i * n + (n - 1)] * state->eqns.x[n - 1];
}
sum_covar += (bi * state->eqns.x[i]) / state->num_observations;
} // Now, get an estimate of the variance of uncorrelated noise signal and use // it to determine the gain of the AR filter. constdouble noise_var = AOMMAX(var - sum_covar, 1e-6);
state->ar_gain = AOMMAX(1, sqrt(AOMMAX(var / noise_var, 1e-6))); return ret;
}
aom_noise_status_t aom_noise_model_update(
aom_noise_model_t *const noise_model, const uint8_t *const data[3], const uint8_t *const denoised[3], int w, int h, int stride[3], int chroma_sub_log2[2], const uint8_t *const flat_blocks, int block_size) { constint num_blocks_w = (w + block_size - 1) / block_size; constint num_blocks_h = (h + block_size - 1) / block_size; int y_model_different = 0; int num_blocks = 0; int i = 0, channel = 0;
if (block_size <= 1) {
fprintf(stderr, "block_size = %d must be > 1\n", block_size); return AOM_NOISE_STATUS_INVALID_ARGUMENT;
}
if (block_size < noise_model->params.lag * 2 + 1) {
fprintf(stderr, "block_size = %d must be >= %d\n", block_size,
noise_model->params.lag * 2 + 1); return AOM_NOISE_STATUS_INVALID_ARGUMENT;
}
// Clear the latest equation system for (i = 0; i < 3; ++i) {
equation_system_clear(&noise_model->latest_state[i].eqns);
noise_model->latest_state[i].num_observations = 0;
noise_strength_solver_clear(&noise_model->latest_state[i].strength_solver);
}
// Check that we have enough flat blocks for (i = 0; i < num_blocks_h * num_blocks_w; ++i) { if (flat_blocks[i]) {
num_blocks++;
}
}
if (num_blocks <= 1) {
fprintf(stderr, "Not enough flat blocks to update noise estimate\n"); return AOM_NOISE_STATUS_INSUFFICIENT_FLAT_BLOCKS;
}
// Convert the scaling functions to 8 bit values
aom_noise_strength_lut_t scaling_points[3]; if (!aom_noise_strength_solver_fit_piecewise(
&noise_model->combined_state[0].strength_solver, 14,
scaling_points + 0)) { return 0;
} if (!aom_noise_strength_solver_fit_piecewise(
&noise_model->combined_state[1].strength_solver, 10,
scaling_points + 1)) {
aom_noise_strength_lut_free(scaling_points + 0); return 0;
} if (!aom_noise_strength_solver_fit_piecewise(
&noise_model->combined_state[2].strength_solver, 10,
scaling_points + 2)) {
aom_noise_strength_lut_free(scaling_points + 0);
aom_noise_strength_lut_free(scaling_points + 1); return 0;
}
// Both the domain and the range of the scaling functions in the film_grain // are normalized to 8-bit (e.g., they are implicitly scaled during grain // synthesis). constdouble strength_divisor = 1 << (noise_model->params.bit_depth - 8); double max_scaling_value = 1e-4; for (int c = 0; c < 3; ++c) { for (int i = 0; i < scaling_points[c].num_points; ++i) {
scaling_points[c].points[i][0] =
AOMMIN(255, scaling_points[c].points[i][0] / strength_divisor);
scaling_points[c].points[i][1] =
AOMMIN(255, scaling_points[c].points[i][1] / strength_divisor);
max_scaling_value =
AOMMAX(scaling_points[c].points[i][1], max_scaling_value);
}
}
// Scaling_shift values are in the range [8,11] constint max_scaling_value_log2 =
clamp((int)floor(log2(max_scaling_value) + 1), 2, 5);
film_grain->scaling_shift = 5 + (8 - max_scaling_value_log2);
int(*film_grain_scaling[3])[2] = {
film_grain->scaling_points_y,
film_grain->scaling_points_cb,
film_grain->scaling_points_cr,
}; for (int c = 0; c < 3; c++) { for (int i = 0; i < scaling_points[c].num_points; ++i) {
film_grain_scaling[c][i][0] = (int)(scaling_points[c].points[i][0] + 0.5);
film_grain_scaling[c][i][1] = clamp(
(int)(scale_factor * scaling_points[c].points[i][1] + 0.5), 0, 255);
}
}
aom_noise_strength_lut_free(scaling_points + 0);
aom_noise_strength_lut_free(scaling_points + 1);
aom_noise_strength_lut_free(scaling_points + 2);
// Convert the ar_coeffs into 8-bit values constint n_coeff = noise_model->combined_state[0].eqns.n; double max_coeff = 1e-4, min_coeff = -1e-4; double y_corr[2] = { 0, 0 }; double avg_luma_strength = 0; for (int c = 0; c < 3; c++) {
aom_equation_system_t *eqns = &noise_model->combined_state[c].eqns; for (int i = 0; i < n_coeff; ++i) {
max_coeff = AOMMAX(max_coeff, eqns->x[i]);
min_coeff = AOMMIN(min_coeff, eqns->x[i]);
} // Since the correlation between luma/chroma was computed in an already // scaled space, we adjust it in the un-scaled space.
aom_noise_strength_solver_t *solver =
&noise_model->combined_state[c].strength_solver; // Compute a weighted average of the strength for the channel. double average_strength = 0, total_weight = 0; for (int i = 0; i < solver->eqns.n; ++i) { double w = 0; for (int j = 0; j < solver->eqns.n; ++j) {
w += solver->eqns.A[i * solver->eqns.n + j];
}
w = sqrt(w);
average_strength += solver->eqns.x[i] * w;
total_weight += w;
} if (total_weight == 0)
average_strength = 1; else
average_strength /= total_weight; if (c == 0) {
avg_luma_strength = average_strength;
} else {
y_corr[c - 1] = avg_luma_strength * eqns->x[n_coeff] / average_strength;
max_coeff = AOMMAX(max_coeff, y_corr[c - 1]);
min_coeff = AOMMIN(min_coeff, y_corr[c - 1]);
}
} // Shift value: AR coeffs range (values 6-9) // 6: [-2, 2), 7: [-1, 1), 8: [-0.5, 0.5), 9: [-0.25, 0.25)
film_grain->ar_coeff_shift =
clamp(7 - (int)AOMMAX(1 + floor(log2(max_coeff)), ceil(log2(-min_coeff))),
6, 9); double scale_ar_coeff = 1 << film_grain->ar_coeff_shift; int *ar_coeffs[3] = {
film_grain->ar_coeffs_y,
film_grain->ar_coeffs_cb,
film_grain->ar_coeffs_cr,
}; for (int c = 0; c < 3; ++c) {
aom_equation_system_t *eqns = &noise_model->combined_state[c].eqns; for (int i = 0; i < n_coeff; ++i) {
ar_coeffs[c][i] =
clamp((int)round(scale_ar_coeff * eqns->x[i]), -128, 127);
} if (c > 0) {
ar_coeffs[c][n_coeff] =
clamp((int)round(scale_ar_coeff * y_corr[c - 1]), -128, 127);
}
}
// At the moment, the noise modeling code assumes that the chroma scaling // functions are a function of luma.
film_grain->cb_mult = 128; // 8 bits
film_grain->cb_luma_mult = 192; // 8 bits
film_grain->cb_offset = 256; // 9 bits
init_success &= (tx_full != NULL) && (tx_chroma != NULL) && (plane != NULL) &&
(plane_d != NULL) && (block != NULL) && (block_d != NULL) &&
(window_full != NULL) && (window_chroma != NULL) &&
(result != NULL); for (int c = init_success ? 0 : 3; c < 3; ++c) { float *window_function = c == 0 ? window_full : window_chroma;
aom_flat_block_finder_t *block_finder = &block_finder_full; constint chroma_sub_h = c > 0 ? chroma_sub[1] : 0; constint chroma_sub_w = c > 0 ? chroma_sub[0] : 0; struct aom_noise_tx_t *tx =
(c > 0 && chroma_sub[0] > 0) ? tx_chroma : tx_full; if (!data[c] || !denoised[c]) continue; if (c > 0 && chroma_sub[0] != 0) {
block_finder = &block_finder_chroma;
}
memset(result, 0, sizeof(*result) * result_stride * result_height); // Do overlapped block processing (half overlapped). The block rows can // easily be done in parallel for (int offsy = 0; offsy < (block_size >> chroma_sub_h);
offsy += (block_size >> chroma_sub_h) / 2) { for (int offsx = 0; offsx < (block_size >> chroma_sub_w);
offsx += (block_size >> chroma_sub_w) / 2) { // Pad the boundary when processing each block-set. for (int by = -1; by < num_blocks_h; ++by) { for (int bx = -1; bx < num_blocks_w; ++bx) { constint pixels_per_block =
(block_size >> chroma_sub_w) * (block_size >> chroma_sub_h);
aom_flat_block_finder_extract_block(
block_finder, data[c], w >> chroma_sub_w, h >> chroma_sub_h,
stride[c], bx * (block_size >> chroma_sub_w) + offsx,
by * (block_size >> chroma_sub_h) + offsy, plane_d, block_d); for (int j = 0; j < pixels_per_block; ++j) {
block[j] = (float)block_d[j];
plane[j] = (float)plane_d[j];
}
pointwise_multiply(window_function, block, pixels_per_block);
aom_noise_tx_forward(tx, block);
aom_noise_tx_filter(tx, noise_psd[c]);
aom_noise_tx_inverse(tx, block);
// Apply window function to the plane approximation (we will apply // it to the sum of plane + block when composing the results).
pointwise_multiply(window_function, plane, pixels_per_block);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.