// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses.
*/
/* ZSTD_ldm_gear_init(): * * Initializes the rolling hash state such that it will honor the
* settings in params. */ staticvoid ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
{ unsigned maxBitsInMask = MIN(params->minMatchLength, 64); unsigned hashRateLog = params->hashRateLog;
state->rolling = ~(U32)0;
/* The choice of the splitting criterion is subject to two conditions: * 1. it has to trigger on average every 2^(hashRateLog) bytes; * 2. ideally, it has to depend on a window of minMatchLength bytes. * * In the gear hash algorithm, bit n depends on the last n bytes; * so in order to obtain a good quality splitting criterion it is * preferable to use bits with high weight. * * To match condition 1 we use a mask with hashRateLog bits set * and, because of the previous remark, we make sure these bits * have the highest possible weight while still respecting * condition 2.
*/ if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
} else { /* In this degenerate case we simply honor the hash rate. */
state->stopMask = ((U64)1 << hashRateLog) - 1;
}
}
/* ZSTD_ldm_gear_reset() * Feeds [data, data + minMatchLength) into the hash without registering any * splits. This effectively resets the hash state. This is used when skipping * over data, either at the beginning of a block, or skipping sections.
*/ staticvoid ZSTD_ldm_gear_reset(ldmRollingHashState_t* state,
BYTE const* data, size_t minMatchLength)
{
U64 hash = state->rolling;
size_t n = 0;
#define GEAR_ITER_ONCE() do { \
hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
n += 1; \
} while (0) while (n + 3 < minMatchLength) {
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
} while (n < minMatchLength) {
GEAR_ITER_ONCE();
} #undef GEAR_ITER_ONCE
}
/* ZSTD_ldm_gear_feed(): * * Registers in the splits array all the split points found in the first * size bytes following the data pointer. This function terminates when * either all the data has been processed or LDM_BATCH_SIZE splits are * present in the splits array. * * Precondition: The splits array must not be full.
* Returns: The number of bytes processed. */ static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
BYTE const* data, size_t size,
size_t* splits, unsigned* numSplits)
{
size_t n;
U64 hash, mask;
hash = state->rolling;
mask = state->stopMask;
n = 0;
#define GEAR_ITER_ONCE() do { \
hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
n += 1; \ if (UNLIKELY((hash & mask) == 0)) { \
splits[*numSplits] = n; \
*numSplits += 1; \ if (*numSplits == LDM_BATCH_SIZE) \ goto done; \
} \
} while (0)
while (n + 3 < size) {
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
} while (n < size) {
GEAR_ITER_ONCE();
}
#undef GEAR_ITER_ONCE
done:
state->rolling = hash; return n;
}
void ZSTD_ldm_adjustParameters(ldmParams_t* params, const ZSTD_compressionParameters* cParams)
{
params->windowLog = cParams->windowLog;
ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
DEBUGLOG(4, "ZSTD_ldm_adjustParameters"); if (params->hashRateLog == 0) { if (params->hashLog > 0) { /* if params->hashLog is set, derive hashRateLog from it */
assert(params->hashLog <= ZSTD_HASHLOG_MAX); if (params->windowLog > params->hashLog) {
params->hashRateLog = params->windowLog - params->hashLog;
}
} else {
assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9); /* mapping from [fast, rate7] to [btultra2, rate4] */
params->hashRateLog = 7 - (cParams->strategy/3);
}
} if (params->hashLog == 0) {
params->hashLog = BOUNDED(ZSTD_HASHLOG_MIN, params->windowLog - params->hashRateLog, ZSTD_HASHLOG_MAX);
} if (params->minMatchLength == 0) {
params->minMatchLength = LDM_MIN_MATCH_LENGTH; if (cParams->strategy >= ZSTD_btultra)
params->minMatchLength /= 2;
} if (params->bucketSizeLog==0) {
assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9);
params->bucketSizeLog = BOUNDED(LDM_BUCKET_SIZE_LOG, (U32)cParams->strategy, ZSTD_LDM_BUCKETSIZELOG_MAX);
}
params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
}
/* ZSTD_ldm_countBackwardsMatch() : * Returns the number of bytes that match backwards before pIn and pMatch. *
* We count only bytes where pMatch >= pBase and pIn >= pAnchor. */ static size_t ZSTD_ldm_countBackwardsMatch( const BYTE* pIn, const BYTE* pAnchor, const BYTE* pMatch, const BYTE* pMatchBase)
{
size_t matchLength = 0; while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
pIn--;
pMatch--;
matchLength++;
} return matchLength;
}
/* ZSTD_ldm_countBackwardsMatch_2segments() : * Returns the number of bytes that match backwards from pMatch, * even with the backwards match spanning 2 different segments. *
* On reaching `pMatchBase`, start counting from mEnd */ static size_t ZSTD_ldm_countBackwardsMatch_2segments( const BYTE* pIn, const BYTE* pAnchor, const BYTE* pMatch, const BYTE* pMatchBase, const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
{
size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase); if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) { /* If backwards match is entirely in the extDict or prefix, immediately return */ return matchLength;
}
DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
DEBUGLOG(7, "final backwards match length = %zu", matchLength); return matchLength;
}
/* ZSTD_ldm_fillFastTables() : * * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies. * This is similar to ZSTD_loadDictionaryContent. * * The tables for the other strategies are filled within their
* block compressors. */ static size_t ZSTD_ldm_fillFastTables(ZSTD_MatchState_t* ms, voidconst* end)
{ const BYTE* const iend = (const BYTE*)end;
switch(ms->cParams.strategy)
{ case ZSTD_fast:
ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx); break;
case ZSTD_dfast: #ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx); #else
assert(0); /* shouldn't be called: cparams should've been adjusted. */ #endif break;
case ZSTD_greedy: case ZSTD_lazy: case ZSTD_lazy2: case ZSTD_btlazy2: case ZSTD_btopt: case ZSTD_btultra: case ZSTD_btultra2: break; default:
assert(0); /* not possible : not a valid strategy id */
}
/* ZSTD_ldm_limitTableUpdate() : * * Sets cctx->nextToUpdate to a position corresponding closer to anchor * if it is far way
* (after a long match, only update tables a limited amount). */ staticvoid ZSTD_ldm_limitTableUpdate(ZSTD_MatchState_t* ms, const BYTE* anchor)
{
U32 const curr = (U32)(anchor - ms->window.base); if (curr > ms->nextToUpdate + 1024) {
ms->nextToUpdate =
curr - MIN(512, curr - ms->nextToUpdate - 1024);
}
}
if (srcSize < minMatchLength) return iend - anchor;
/* Initialize the rolling hash state with the first minMatchLength bytes */
ZSTD_ldm_gear_init(&hashState, params);
ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength);
ip += minMatchLength;
/* If a split point would generate a sequence overlapping with * the previous one, we merely register it in the hash table and
* move on */ if (split < anchor) {
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog); continue;
}
/* No match found -- insert an entry into the hash table
* and process the next candidate match */ if (bestEntry == NULL) {
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog); continue;
}
/* Out of sequence storage */ if (rawSeqStore->size == rawSeqStore->capacity) return ERROR(dstSize_tooSmall);
seq->litLength = (U32)(split - backwardMatchLength - anchor);
seq->matchLength = (U32)mLength;
seq->offset = offset;
rawSeqStore->size++;
}
/* Insert the current entry into the hash table --- it must be
* done after the previous block to avoid clobbering bestEntry */
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog);
anchor = split + forwardMatchLength;
/* If we find a match that ends after the data that we've hashed * then we have a repeating, overlapping, pattern. E.g. all zeros. * If one repetition of the pattern matches our `stopMask` then all * repetitions will. We don't need to insert them all into out table, * only the first one. So skip over overlapping matches. * This is a major speed boost (20x) for compressing a single byte * repeated, when that byte ends up in the table.
*/ if (anchor > ip + hashed) {
ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength); /* Continue the outer loop at anchor (ip + hashed == anchor). */
ip = anchor - hashed; break;
}
}
ip += hashed;
}
return iend - anchor;
}
/*! ZSTD_ldm_reduceTable() :
* reduce table indexes by `reducerValue` */ staticvoid ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
U32 const reducerValue)
{
U32 u; for (u = 0; u < size; u++) { if (table[u].offset < reducerValue) table[u].offset = 0; else table[u].offset -= reducerValue;
}
}
assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize); /* Check that ZSTD_window_update() has been called for this chunk prior * to passing it to this function.
*/
assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize); /* The input could be very large (in zstdmt), so it must be broken up into * chunks to enforce the maximum distance and handle overflow correction.
*/
assert(sequences->pos <= sequences->size);
assert(sequences->size <= sequences->capacity); for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
size_t const remaining = (size_t)(iend - chunkStart);
BYTE const *const chunkEnd =
(remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
size_t const chunkSize = chunkEnd - chunkStart;
size_t newLeftoverSize;
size_t const prevSize = sequences->size;
assert(chunkStart < iend); /* 1. Perform overflow correction if necessary. */ if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd)) {
U32 const ldmHSize = 1U << params->hashLog;
U32 const correction = ZSTD_window_correctOverflow(
&ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction); /* invalidate dictionaries on overflow correction */
ldmState->loadedDictEnd = 0;
} /* 2. We enforce the maximum offset allowed. * * kMaxChunkSize should be small enough that we don't lose too much of * the window through early invalidation. * TODO: * Test the chunk size. * * Try invalidation after the sequence generation and test the * offset against maxDist directly. * * NOTE: Because of dictionaries + sequence splitting we MUST make sure * that any offset used is valid at the END of the sequence, since it may * be split into two sequences. This condition holds when using * ZSTD_window_enforceMaxDist(), but if we move to checking offsets * against maxDist directly, we'll have to carefully handle that case.
*/
ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL); /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
newLeftoverSize = ZSTD_ldm_generateSequences_internal(
ldmState, sequences, params, chunkStart, chunkSize); if (ZSTD_isError(newLeftoverSize)) return newLeftoverSize; /* 4. We add the leftover literals from previous iterations to the first * newly generated sequence, or add the `newLeftoverSize` if none are * generated.
*/ /* Prepend the leftover literals from the last call */ if (prevSize < sequences->size) {
sequences->seq[prevSize].litLength += (U32)leftoverSize;
leftoverSize = newLeftoverSize;
} else {
assert(newLeftoverSize == chunkSize);
leftoverSize += chunkSize;
}
} return 0;
}
void
ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch)
{ while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos; if (srcSize <= seq->litLength) { /* Skip past srcSize literals */
seq->litLength -= (U32)srcSize; return;
}
srcSize -= seq->litLength;
seq->litLength = 0; if (srcSize < seq->matchLength) { /* Skip past the first srcSize of the match */
seq->matchLength -= (U32)srcSize; if (seq->matchLength < minMatch) { /* The match is too short, omit it */ if (rawSeqStore->pos + 1 < rawSeqStore->size) {
seq[1].litLength += seq[0].matchLength;
}
rawSeqStore->pos++;
} return;
}
srcSize -= seq->matchLength;
seq->matchLength = 0;
rawSeqStore->pos++;
}
}
/* * If the sequence length is longer than remaining then the sequence is split * between this block and the next. * * Returns the current sequence to handle, or if the rest of the block should * be literals, it returns a sequence with offset == 0.
*/ static rawSeq maybeSplitSequence(RawSeqStore_t* rawSeqStore,
U32 const remaining, U32 const minMatch)
{
rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
assert(sequence.offset > 0); /* Likely: No partial sequence */ if (remaining >= sequence.litLength + sequence.matchLength) {
rawSeqStore->pos++; return sequence;
} /* Cut the sequence short (offset == 0 ==> rest is literals). */ if (remaining <= sequence.litLength) {
sequence.offset = 0;
} elseif (remaining < sequence.litLength + sequence.matchLength) {
sequence.matchLength = remaining - sequence.litLength; if (sequence.matchLength < minMatch) {
sequence.offset = 0;
}
} /* Skip past `remaining` bytes for the future sequences. */
ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch); return sequence;
}
DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize); /* If using opt parser, use LDMs only as candidates rather than always accepting them */ if (cParams->strategy >= ZSTD_btopt) {
size_t lastLLSize;
ms->ldmSeqStore = rawSeqStore;
lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize); return lastLLSize;
}
assert(rawSeqStore->pos <= rawSeqStore->size);
assert(rawSeqStore->size <= rawSeqStore->capacity); /* Loop through each sequence and apply the block compressor to the literals */ while (rawSeqStore->pos < rawSeqStore->size && ip < iend) { /* maybeSplitSequence updates rawSeqStore->pos */
rawSeq const sequence = maybeSplitSequence(rawSeqStore,
(U32)(iend - ip), minMatch); /* End signal */ if (sequence.offset == 0) break;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.