// Copyright (c) the JPEG XL Project Authors. All rights reserved. // // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file.
// Some of the floating point constants in this file and in other // files in the libjxl project have been obtained using the // tools/optimizer/simplex_fork.py tool. It is a variation of // Nelder-Mead optimization, and we generally try to minimize // BPP * pnorm aggregate as reported by the benchmark_xl tool, // but occasionally the values are optimized by using additional // constraints such as maintaining a certain density, or ratio of // popularity of integral transforms. Jyrki visually reviews all // such changes and often makes manual changes to maintain good // visual quality to changes where butteraugli was not sufficiently // sensitive to some kind of degradation. Unfortunately image quality // is still more of an art than science.
// Set JXL_DEBUG_AC_STRATEGY to 1 to enable debugging. #ifndef JXL_DEBUG_AC_STRATEGY #define JXL_DEBUG_AC_STRATEGY 0 #endif
// This must come before the begin/end_target, but HWY_ONCE is only true // after that, so use an "include guard". #ifndef LIB_JXL_ENC_AC_STRATEGY_ #define LIB_JXL_ENC_AC_STRATEGY_ // Parameters of the heuristic are marked with a OPTIMIZE comment. namespace jxl { namespace {
// These templates are not found via ADL. using hwy::HWY_NAMESPACE::AbsDiff; using hwy::HWY_NAMESPACE::Eq; using hwy::HWY_NAMESPACE::IfThenElseZero; using hwy::HWY_NAMESPACE::IfThenZeroElse; using hwy::HWY_NAMESPACE::Round; using hwy::HWY_NAMESPACE::Sqrt;
bool MultiBlockTransformCrossesHorizontalBoundary( const AcStrategyImage& ac_strategy, size_t start_x, size_t y,
size_t end_x) { if (start_x >= ac_strategy.xsize() || y >= ac_strategy.ysize()) { returnfalse;
} if (y % 8 == 0) { // Nothing crosses 64x64 boundaries, and the memory on the other side // of the 64x64 block may still uninitialized. returnfalse;
}
end_x = std::min(end_x, ac_strategy.xsize()); // The first multiblock might be before the start_x, let's adjust it // to point to the first IsFirstBlock() == true block we find by backward // tracing.
AcStrategyRow row = ac_strategy.ConstRow(y); const size_t start_x_limit = start_x & ~7; while (start_x != start_x_limit && !row[start_x].IsFirstBlock()) {
--start_x;
} for (size_t x = start_x; x < end_x;) { if (row[x].IsFirstBlock()) {
x += row[x].covered_blocks_x();
} else { returntrue;
}
} returnfalse;
}
bool MultiBlockTransformCrossesVerticalBoundary( const AcStrategyImage& ac_strategy, size_t x, size_t start_y,
size_t end_y) { if (x >= ac_strategy.xsize() || start_y >= ac_strategy.ysize()) { returnfalse;
} if (x % 8 == 0) { // Nothing crosses 64x64 boundaries, and the memory on the other side // of the 64x64 block may still uninitialized. returnfalse;
}
end_y = std::min(end_y, ac_strategy.ysize()); // The first multiblock might be before the start_y, let's adjust it // to point to the first IsFirstBlock() == true block we find by backward // tracing. const size_t start_y_limit = start_y & ~7; while (start_y != start_y_limit &&
!ac_strategy.ConstRow(start_y)[x].IsFirstBlock()) {
--start_y;
}
for (size_t y = start_y; y < end_y;) {
AcStrategyRow row = ac_strategy.ConstRow(y); if (row[x].IsFirstBlock()) {
y += row[x].covered_blocks_y();
} else { returntrue;
}
} returnfalse;
}
const size_t num_blocks = acs.covered_blocks_x() * acs.covered_blocks_y(); // avoid large blocks when there is a lot going on in red-green. float quant_norm16 = 0; if (num_blocks == 1) { // When it is only one 8x8, we don't need aggregation of values.
quant_norm16 = config.Quant(x / 8, y / 8);
} elseif (num_blocks == 2) { // Taking max instead of 8th norm seems to work // better for smallest blocks up to 16x8. Jyrki couldn't get // improvements in trying the same for 16x16 blocks. if (acs.covered_blocks_y() == 2) {
quant_norm16 =
std::max(config.Quant(x / 8, y / 8), config.Quant(x / 8, y / 8 + 1));
} else {
quant_norm16 =
std::max(config.Quant(x / 8, y / 8), config.Quant(x / 8 + 1, y / 8));
}
} else { // Load QF value, calculate empirical heuristic on masking field // for weighting the information loss. Information loss manifests // itself as ringing, and masking could hide it. for (size_t iy = 0; iy < acs.covered_blocks_y(); iy++) { for (size_t ix = 0; ix < acs.covered_blocks_x(); ix++) { float qval = config.Quant(x / 8 + ix, y / 8 + iy);
qval *= qval;
qval *= qval;
qval *= qval;
quant_norm16 += qval * qval;
}
}
quant_norm16 /= num_blocks;
quant_norm16 = FastPowf(quant_norm16, 1.0f / 16.0f);
} constauto quant = Set(df, quant_norm16);
auto loss = Zero(df8); for (size_t c = 0; c < 3; c++) { constfloat* inv_matrix = config.dequant->InvMatrix(acs.Strategy(), c); constfloat* matrix = config.dequant->Matrix(acs.Strategy(), c); constauto cmap_factor = Set(df, cmap_factors[c]);
auto entropy_v = Zero(df); auto nzeros_v = Zero(df); for (size_t i = 0; i < num_blocks * kDCTBlockSize; i += Lanes(df)) { constauto in = Load(df, block + c * size + i); constauto in_y = Mul(Load(df, block + size + i), cmap_factor); constauto im = Load(df, inv_matrix + i); constauto val = Mul(Sub(in, in_y), Mul(im, quant)); constauto rval = Round(val); constauto diff = Sub(val, rval); constauto m = Load(df, matrix + i);
Store(Mul(m, diff), df, &mem[i]); constauto q = Abs(rval); constauto q_is_zero = Eq(q, Zero(df)); // We used to have q * C here, but that cost model seems to // be punishing large values more than necessary. Sqrt tries // to avoid large values less aggressively.
entropy_v = Add(Sqrt(q), entropy_v);
nzeros_v = Add(nzeros_v, IfThenZeroElse(q_is_zero, Set(df, 1.0f)));
}
// The following function tries to merge smaller transforms into // squares and the rectangles originating from a single middle division // (horizontal or vertical) fairly. // // This is now generalized to concern about squares // of blocks X blocks size, where a block is 8x8 pixels.
Status FindBestFirstLevelDivisionForSquare(
size_t blocks, bool allow_square_transform, size_t bx, size_t by, size_t cx,
size_t cy, const ACSConfig& config, constfloat* JXL_RESTRICT cmap_factors,
AcStrategyImage* JXL_RESTRICT ac_strategy, constfloat entropy_mul_JXK, constfloat entropy_mul_JXJ, float* JXL_RESTRICT entropy_estimate, float* block, float* scratch_space, uint32_t* quantized) { // We denote J for the larger dimension here, and K for the smaller. // For example, for 32x32 block splitting, J would be 32, K 16. const size_t blocks_half = blocks / 2; const AcStrategyType acs_rawJXK = AcsVerticalSplit(blocks); const AcStrategyType acs_rawKXJ = AcsHorizontalSplit(blocks); const AcStrategyType acs_rawJXJ = AcsSquare(blocks); const AcStrategy acsJXK = AcStrategy::FromRawStrategy(acs_rawJXK); const AcStrategy acsKXJ = AcStrategy::FromRawStrategy(acs_rawKXJ); const AcStrategy acsJXJ = AcStrategy::FromRawStrategy(acs_rawJXJ);
AcStrategyRow row0 = ac_strategy->ConstRow(by + cy + 0);
AcStrategyRow row1 = ac_strategy->ConstRow(by + cy + blocks_half); // Let's check if we can consider a JXJ block here at all. // This is not necessary in the basic use of hierarchically merging // blocks in the simplest possible way, but is needed when we try other // 'floating' options of merging, possibly after a simple hierarchical // merge has been explored. if (MultiBlockTransformCrossesHorizontalBoundary(*ac_strategy, bx + cx,
by + cy, bx + cx + blocks) ||
MultiBlockTransformCrossesHorizontalBoundary(
*ac_strategy, bx + cx, by + cy + blocks, bx + cx + blocks) ||
MultiBlockTransformCrossesVerticalBoundary(*ac_strategy, bx + cx, by + cy,
by + cy + blocks) ||
MultiBlockTransformCrossesVerticalBoundary(*ac_strategy, bx + cx + blocks,
by + cy, by + cy + blocks)) { returntrue; // not suitable for JxJ analysis, some transforms leak out.
} // For floating transforms there may be // already blocks selected that make either or both JXK and // KXJ not feasible for this location. constbool allow_JXK = !MultiBlockTransformCrossesVerticalBoundary(
*ac_strategy, bx + cx + blocks_half, by + cy, by + cy + blocks); constbool allow_KXJ = !MultiBlockTransformCrossesHorizontalBoundary(
*ac_strategy, bx + cx, by + cy + blocks_half, bx + cx + blocks); // Current entropies aggregated on NxN resolution. float entropy[2][2] = {}; for (size_t dy = 0; dy < blocks; ++dy) { for (size_t dx = 0; dx < blocks; ++dx) {
entropy[dy / blocks_half][dx / blocks_half] +=
entropy_estimate[(cy + dy) * 8 + (cx + dx)];
}
} float entropy_JXK_left = std::numeric_limits<float>::max(); float entropy_JXK_right = std::numeric_limits<float>::max(); float entropy_KXJ_top = std::numeric_limits<float>::max(); float entropy_KXJ_bottom = std::numeric_limits<float>::max(); float entropy_JXJ = std::numeric_limits<float>::max(); if (allow_JXK) { if (row0[bx + cx + 0].Strategy() != acs_rawJXK) {
JXL_RETURN_IF_ERROR(EstimateEntropy(
acsJXK, entropy_mul_JXK, (bx + cx + 0) * 8, (by + cy + 0) * 8, config,
cmap_factors, block, scratch_space, quantized, entropy_JXK_left));
} if (row0[bx + cx + blocks_half].Strategy() != acs_rawJXK) {
JXL_RETURN_IF_ERROR(
EstimateEntropy(acsJXK, entropy_mul_JXK, (bx + cx + blocks_half) * 8,
(by + cy + 0) * 8, config, cmap_factors, block,
scratch_space, quantized, entropy_JXK_right));
}
} if (allow_KXJ) { if (row0[bx + cx].Strategy() != acs_rawKXJ) {
JXL_RETURN_IF_ERROR(EstimateEntropy(
acsKXJ, entropy_mul_JXK, (bx + cx + 0) * 8, (by + cy + 0) * 8, config,
cmap_factors, block, scratch_space, quantized, entropy_KXJ_top));
} if (row1[bx + cx].Strategy() != acs_rawKXJ) {
JXL_RETURN_IF_ERROR(
EstimateEntropy(acsKXJ, entropy_mul_JXK, (bx + cx + 0) * 8,
(by + cy + blocks_half) * 8, config, cmap_factors,
block, scratch_space, quantized, entropy_KXJ_bottom));
}
} if (allow_square_transform) { // We control the exploration of the square transform separately so that // we can turn it off at high decoding speeds for 32x32, but still allow // exploring 16x32 and 32x16.
JXL_RETURN_IF_ERROR(EstimateEntropy(
acsJXJ, entropy_mul_JXJ, (bx + cx + 0) * 8, (by + cy + 0) * 8, config,
cmap_factors, block, scratch_space, quantized, entropy_JXJ));
}
// Test if this block should have JXK or KXJ transforms, // because it can have only one or the other. float costJxN = std::min(entropy_JXK_left, entropy[0][0] + entropy[1][0]) +
std::min(entropy_JXK_right, entropy[0][1] + entropy[1][1]); float costNxJ = std::min(entropy_KXJ_top, entropy[0][0] + entropy[0][1]) +
std::min(entropy_KXJ_bottom, entropy[1][0] + entropy[1][1]); if (entropy_JXJ < costJxN && entropy_JXJ < costNxJ) {
JXL_RETURN_IF_ERROR(ac_strategy->Set(bx + cx, by + cy, acs_rawJXJ));
SetEntropyForTransform(cx, cy, acs_rawJXJ, entropy_JXJ, entropy_estimate);
} elseif (costJxN < costNxJ) { if (entropy_JXK_left < entropy[0][0] + entropy[1][0]) {
JXL_RETURN_IF_ERROR(ac_strategy->Set(bx + cx, by + cy, acs_rawJXK));
SetEntropyForTransform(cx, cy, acs_rawJXK, entropy_JXK_left,
entropy_estimate);
} if (entropy_JXK_right < entropy[0][1] + entropy[1][1]) {
JXL_RETURN_IF_ERROR(
ac_strategy->Set(bx + cx + blocks_half, by + cy, acs_rawJXK));
SetEntropyForTransform(cx + blocks_half, cy, acs_rawJXK,
entropy_JXK_right, entropy_estimate);
}
} else { if (entropy_KXJ_top < entropy[0][0] + entropy[0][1]) {
JXL_RETURN_IF_ERROR(ac_strategy->Set(bx + cx, by + cy, acs_rawKXJ));
SetEntropyForTransform(cx, cy, acs_rawKXJ, entropy_KXJ_top,
entropy_estimate);
} if (entropy_KXJ_bottom < entropy[1][0] + entropy[1][1]) {
JXL_RETURN_IF_ERROR(
ac_strategy->Set(bx + cx, by + cy + blocks_half, acs_rawKXJ));
SetEntropyForTransform(cx, cy + blocks_half, acs_rawKXJ,
entropy_KXJ_bottom, entropy_estimate);
}
} returntrue;
}
Status ProcessRectACS(const CompressParams& cparams, const ACSConfig& config, const Rect& rect, const ColorCorrelationMap& cmap, float* JXL_RESTRICT block,
uint32_t* JXL_RESTRICT quantized,
AcStrategyImage* ac_strategy) { // Main philosophy here: // 1. First find best 8x8 transform for each area. // 2. Merging them into larger transforms where possibly, but // starting from the smallest transforms (16x8 and 8x16). // Additional complication: 16x8 and 8x16 are considered // simultaneously and fairly against each other. // We are looking at 64x64 squares since the Y-to-X and Y-to-B // maps happen to be at that resolution, and having // integral transforms cross these boundaries leads to // additional complications. constfloat butteraugli_target = cparams.butteraugli_distance; float* JXL_RESTRICT scratch_space = block + 3 * AcStrategy::kMaxCoeffArea;
size_t bx = rect.x0();
size_t by = rect.y0();
JXL_ENSURE(rect.xsize() <= 8);
JXL_ENSURE(rect.ysize() <= 8);
size_t tx = bx / kColorTileDimInBlocks;
size_t ty = by / kColorTileDimInBlocks; constfloat cmap_factors[3] = {
cmap.base().YtoXRatio(cmap.ytox_map.ConstRow(ty)[tx]),
0.0f,
cmap.base().YtoBRatio(cmap.ytob_map.ConstRow(ty)[tx]),
}; if (cparams.speed_tier > SpeedTier::kHare) returntrue; // First compute the best 8x8 transform for each square. Later, we do not // experiment with different combinations, but only use the best of the 8x8s // when DCT8X8 is specified in the tree search. // 8x8 transforms have 10 variants, but every larger transform is just a DCT. float entropy_estimate[64] = {}; // Favor all 8x8 transforms (against 16x8 and larger transforms)) at // low butteraugli_target distances. staticconstfloat k8x8mul1 = -0.4; staticconstfloat k8x8mul2 = 1.0; staticconstfloat k8x8base = 1.4; constfloat mul8x8 = k8x8mul2 + k8x8mul1 / (butteraugli_target + k8x8base); for (size_t iy = 0; iy < rect.ysize(); iy++) { for (size_t ix = 0; ix < rect.xsize(); ix++) { float entropy = 0.0;
AcStrategyType best_of_8x8s;
JXL_RETURN_IF_ERROR(FindBest8x8Transform(
8 * (bx + ix), 8 * (by + iy), static_cast<int>(cparams.speed_tier),
butteraugli_target, config, cmap_factors, ac_strategy, block,
scratch_space, quantized, &entropy, best_of_8x8s));
JXL_RETURN_IF_ERROR(ac_strategy->Set(bx + ix, by + iy, best_of_8x8s));
entropy_estimate[iy * 8 + ix] = entropy * mul8x8;
}
} // Merge when a larger transform is better than the previously // searched best combination of 8x8 transforms. struct MergeTry {
AcStrategyType type;
uint8_t priority;
uint8_t decoding_speed_tier_max_limit;
uint8_t encoding_speed_tier_max_limit; float entropy_mul;
}; // These numbers need to be figured out manually and looking at // ringing next to sky etc. Optimization will find smaller numbers // and produce more ringing than is ideal. Larger numbers will // help stop ringing. constfloat entropy_mul16X8 = 1.25; constfloat entropy_mul16X16 = 1.35; constfloat entropy_mul16X32 = 1.5; constfloat entropy_mul32X32 = 1.5; constfloat entropy_mul64X32 = 2.26; constfloat entropy_mul64X64 = 2.26; // TODO(jyrki): Consider this feedback in further changes: // Also effectively when the multipliers for smaller blocks are // below 1, this raises the bar for the bigger blocks even higher // in that sense these constants are not independent (e.g. changing // the constant for DCT16x32 by -5% (making it more likely) also // means that DCT32x32 becomes harder to do when starting from // two DCT16x32s). It might be better to make them more independent, // e.g. by not applying the multiplier when storing the new entropy // estimates in TryMergeToACSCandidate(). const MergeTry kTransformsForMerge[9] = {
{AcStrategyType::DCT16X8, 2, 4, 5, entropy_mul16X8},
{AcStrategyType::DCT8X16, 2, 4, 5, entropy_mul16X8}, // FindBestFirstLevelDivisionForSquare looks for DCT16X16 and its // subdivisions. {AcStrategyType::DCT16X16, 3, entropy_mul16X16},
{AcStrategyType::DCT16X32, 4, 4, 4, entropy_mul16X32},
{AcStrategyType::DCT32X16, 4, 4, 4, entropy_mul16X32}, // FindBestFirstLevelDivisionForSquare looks for DCT32X32 and its // subdivisions. {AcStrategyType::DCT32X32, 5, 1, 5, // 0.9822994906548809f},
{AcStrategyType::DCT64X32, 6, 1, 3, entropy_mul64X32},
{AcStrategyType::DCT32X64, 6, 1, 3, entropy_mul64X32}, // {AcStrategyType::DCT64X64, 8, 1, 3, 2.0846542128012948f},
}; /* These sizes not yet included in merge heuristic: set(AcStrategyType::DCT32X8, 0.0f, 2.261390410971102f); set(AcStrategyType::DCT8X32, 0.0f, 2.261390410971102f); set(AcStrategyType::DCT128X128, 0.0f, 1.0f); set(AcStrategyType::DCT128X64, 0.0f, 0.73f); set(AcStrategyType::DCT64X128, 0.0f, 0.73f); set(AcStrategyType::DCT256X256, 0.0f, 1.0f); set(AcStrategyType::DCT256X128, 0.0f, 0.73f); set(AcStrategyType::DCT128X256, 0.0f, 0.73f);
*/
// Priority is a tricky kludge to avoid collisions so that transforms // don't overlap.
uint8_t priority[64] = {}; bool enable_32x32 = cparams.decoding_speed_tier < 4; for (auto tx : kTransformsForMerge) { if (tx.decoding_speed_tier_max_limit < cparams.decoding_speed_tier) { continue;
}
AcStrategy acs = AcStrategy::FromRawStrategy(tx.type);
for (size_t cy = 0; cy + acs.covered_blocks_y() - 1 < rect.ysize();
cy += acs.covered_blocks_y()) { for (size_t cx = 0; cx + acs.covered_blocks_x() - 1 < rect.xsize();
cx += acs.covered_blocks_x()) { if (cy + 7 < rect.ysize() && cx + 7 < rect.xsize()) { if (cparams.decoding_speed_tier < 4 &&
tx.type == AcStrategyType::DCT32X64) { // We handle both DCT8X16 and DCT16X8 at the same time. if ((cy | cx) % 8 == 0) {
JXL_RETURN_IF_ERROR(FindBestFirstLevelDivisionForSquare(
8, true, bx, by, cx, cy, config, cmap_factors, ac_strategy,
tx.entropy_mul, entropy_mul64X64, entropy_estimate, block,
scratch_space, quantized));
} continue;
} elseif (tx.type == AcStrategyType::DCT32X16) { // We handled both DCT8X16 and DCT16X8 at the same time, // and that is above. The last column and last row, // when the last column or last row is odd numbered, // are still handled by TryMergeAcs. continue;
}
} if ((tx.type == AcStrategyType::DCT16X32 && cy % 4 != 0) ||
(tx.type == AcStrategyType::DCT32X16 && cx % 4 != 0)) { // already covered by FindBest32X32 continue;
}
if (cy + 3 < rect.ysize() && cx + 3 < rect.xsize()) { if (tx.type == AcStrategyType::DCT16X32) { // We handle both DCT8X16 and DCT16X8 at the same time. if ((cy | cx) % 4 == 0) {
JXL_RETURN_IF_ERROR(FindBestFirstLevelDivisionForSquare(
4, enable_32x32, bx, by, cx, cy, config, cmap_factors,
ac_strategy, tx.entropy_mul, entropy_mul32X32,
entropy_estimate, block, scratch_space, quantized));
} continue;
} elseif (tx.type == AcStrategyType::DCT32X16) { // We handled both DCT8X16 and DCT16X8 at the same time, // and that is above. The last column and last row, // when the last column or last row is odd numbered, // are still handled by TryMergeAcs. continue;
}
} if ((tx.type == AcStrategyType::DCT16X32 && cy % 4 != 0) ||
(tx.type == AcStrategyType::DCT32X16 && cx % 4 != 0)) { // already covered by FindBest32X32 continue;
} if (cy + 1 < rect.ysize() && cx + 1 < rect.xsize()) { if (tx.type == AcStrategyType::DCT8X16) { // We handle both DCT8X16 and DCT16X8 at the same time. if ((cy | cx) % 2 == 0) {
JXL_RETURN_IF_ERROR(FindBestFirstLevelDivisionForSquare(
2, true, bx, by, cx, cy, config, cmap_factors, ac_strategy,
tx.entropy_mul, entropy_mul16X16, entropy_estimate, block,
scratch_space, quantized));
} continue;
} elseif (tx.type == AcStrategyType::DCT16X8) { // We handled both DCT8X16 and DCT16X8 at the same time, // and that is above. The last column and last row, // when the last column or last row is odd numbered, // are still handled by TryMergeAcs. continue;
}
} if ((tx.type == AcStrategyType::DCT8X16 && cy % 2 == 1) ||
(tx.type == AcStrategyType::DCT16X8 && cx % 2 == 1)) { // already covered by FindBestFirstLevelDivisionForSquare continue;
} // All other merge sizes are handled here. // Some of the DCT16X8s and DCT8X16s will still leak through here // when there is an odd number of 8x8 blocks, then the last row // and column will get their DCT16X8s and DCT8X16s through the // normal integral transform merging process.
JXL_RETURN_IF_ERROR(
TryMergeAcs(tx.type, bx, by, cx, cy, config, cmap_factors,
ac_strategy, tx.entropy_mul, tx.priority, &priority[0],
entropy_estimate, block, scratch_space, quantized));
}
}
} if (cparams.speed_tier >= SpeedTier::kHare) { returntrue;
} // Here we still try to do some non-aligned matching, find a few more // 16X8, 8X16 and 16X16s between the non-2-aligned blocks. for (size_t cy = 0; cy + 1 < rect.ysize(); ++cy) { for (size_t cx = 0; cx + 1 < rect.xsize(); ++cx) { if ((cy | cx) % 2 != 0) {
JXL_RETURN_IF_ERROR(FindBestFirstLevelDivisionForSquare(
2, true, bx, by, cx, cy, config, cmap_factors, ac_strategy,
entropy_mul16X8, entropy_mul16X16, entropy_estimate, block,
scratch_space, quantized));
}
}
} // Non-aligned matching for 32X32, 16X32 and 32X16.
size_t step = cparams.speed_tier >= SpeedTier::kTortoise ? 2 : 1; for (size_t cy = 0; cy + 3 < rect.ysize(); cy += step) { for (size_t cx = 0; cx + 3 < rect.xsize(); cx += step) { if ((cy | cx) % 4 == 0) { continue; // Already tried with loop above (DCT16X32 case).
}
JXL_RETURN_IF_ERROR(FindBestFirstLevelDivisionForSquare(
4, enable_32x32, bx, by, cx, cy, config, cmap_factors, ac_strategy,
entropy_mul16X32, entropy_mul32X32, entropy_estimate, block,
scratch_space, quantized));
}
} returntrue;
}
// Entropy estimate is composed of two factors: // - estimate of the number of bits that will be used by the block // - information loss due to quantization // The following constant controls the relative weights of these components.
config.info_loss_multiplier = 1.2;
config.zeros_mul = 9.3089059022677905;
config.cost_delta = 10.833273317067883;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.