Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*-************************************ * Tuning parameters
**************************************/ /* * LZ4_HEAPMODE : * Select how stateless compression functions like `LZ4_compress_default()` * allocate memory for their hash table, * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
*/ #ifndef LZ4_HEAPMODE # define LZ4_HEAPMODE 0 #endif
/* * LZ4_ACCELERATION_DEFAULT : * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
*/ #define LZ4_ACCELERATION_DEFAULT 1 /* * LZ4_ACCELERATION_MAX : * Any "acceleration" value higher than this threshold * get treated as LZ4_ACCELERATION_MAX instead (fix #876)
*/ #define LZ4_ACCELERATION_MAX 65537
/*-************************************ * CPU Feature Detection
**************************************/ /* LZ4_FORCE_MEMORY_ACCESS * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets which assembly generation depends on alignment. * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. * Prefer these methods in priority order (0 > 1 > 2)
*/ #ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */ # ifdefined(__GNUC__) && \
( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
|| defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define LZ4_FORCE_MEMORY_ACCESS 2 # elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) || defined(_MSC_VER) # define LZ4_FORCE_MEMORY_ACCESS 1 # endif #endif
/* * LZ4_FORCE_SW_BITCOUNT * Define this parameter if your target system or compiler does not support hardware bit count
*/ #ifdefined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */ # undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */ # define LZ4_FORCE_SW_BITCOUNT #endif
/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8, * together with a simple 8-byte copy loop as a fall-back path. * However, this optimization hurts the decompression speed by >30%, * because the execution does not go to the optimized loop * for typical compressible data, and all of the preamble checks * before going to the fall-back path become useless overhead. * This optimization happens only with the -O3 flag, and -O2 generates * a simple 8-byte copy loop. * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8 * functions are annotated with __attribute__((optimize("O2"))), * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute * of LZ4_wildCopy8 does not affect the compression speed.
*/ #ifdefined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__) # define LZ4_FORCE_O2 __attribute__((optimize("O2"))) # undef LZ4_FORCE_INLINE # define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline)) #else # define LZ4_FORCE_O2 #endif
/* Should the alignment test prove unreliable, for some reason,
* it can be disabled by setting LZ4_ALIGN_TEST to 0 */ #ifndef LZ4_ALIGN_TEST /* can be externally provided */ # define LZ4_ALIGN_TEST 1 #endif
/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION : * Disable relatively high-level LZ4/HC functions that use dynamic memory * allocation functions (malloc(), calloc(), free()). * * Note that this is a compile-time switch. And since it disables * public/stable LZ4 v1 API functions, we don't recommend using this * symbol to generate a library for distribution. * * The following public functions are removed when this symbol is defined. * - lz4 : LZ4_createStream, LZ4_freeStream, * LZ4_createStreamDecode, LZ4_freeStreamDecode, LZ4_create (deprecated) * - lz4hc : LZ4_createStreamHC, LZ4_freeStreamHC, * LZ4_createHC (deprecated), LZ4_freeHC (deprecated) * - lz4frame, lz4file : All LZ4F_* functions
*/ #ifdefined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) # define ALLOC(s) lz4_error_memory_allocation_is_disabled # define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled # define FREEMEM(p) lz4_error_memory_allocation_is_disabled #elifdefined(LZ4_USER_MEMORY_FUNCTIONS) /* memory management functions can be customized by user project. * Below functions must exist somewhere in the Project
* and be available at link time */ void* LZ4_malloc(size_t s); void* LZ4_calloc(size_t n, size_t s); void LZ4_free(void* p); # define ALLOC(s) LZ4_malloc(s) # define ALLOC_AND_ZERO(s) LZ4_calloc(1,s) # define FREEMEM(p) LZ4_free(p) #else # include <stdlib.h> /* malloc, calloc, free */ # define ALLOC(s) malloc(s) # define ALLOC_AND_ZERO(s) calloc(1,s) # define FREEMEM(p) free(p) #endif
#define LZ4_DISTANCE_ABSOLUTE_MAX 65535 #if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */ # error "LZ4_DISTANCE_MAX is too big : must be <= 65535" #endif
/*-************************************ * Reading and writing into memory
**************************************/
/** * LZ4 relies on memcpy with a constant size being inlined. In freestanding * environments, the compiler can't assume the implementation of memcpy() is * standard compliant, so it can't apply its specialized memcpy() inlining * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze * memcpy() as if it were standard compliant, so it can inline it in freestanding * environments. This is needed when decompressing the Linux Kernel, for example.
*/ #if !defined(LZ4_memcpy) # ifdefined(__GNUC__) && (__GNUC__ >= 4) # define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size) # else # define LZ4_memcpy(dst, src, size) memcpy(dst, src, size) # endif #endif
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */
LZ4_PACK(typedefstruct { U16 u16; }) LZ4_unalign16;
LZ4_PACK(typedefstruct { U32 u32; }) LZ4_unalign32;
LZ4_PACK(typedefstruct { reg_t uArch; }) LZ4_unalignST;
/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
LZ4_FORCE_INLINE void LZ4_wildCopy8(void* dstPtr, constvoid* srcPtr, void* dstEnd)
{
BYTE* d = (BYTE*)dstPtr; const BYTE* s = (const BYTE*)srcPtr;
BYTE* const e = (BYTE*)dstEnd;
do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d<e);
}
#ifndef LZ4_FAST_DEC_LOOP # ifdefined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64 # define LZ4_FAST_DEC_LOOP 1 # elif defined(__aarch64__) && defined(__APPLE__) # define LZ4_FAST_DEC_LOOP 1 # elif defined(__aarch64__) && !defined(__clang__) /* On non-Apple aarch64, we disable this optimization for clang because * on certain mobile chipsets, performance is reduced with clang. For
* more information refer to https://github.com/lz4/lz4/pull/707 */ # define LZ4_FAST_DEC_LOOP 1 # else # define LZ4_FAST_DEC_LOOP 0 # endif #endif
/* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd * this version copies two times 16 bytes (instead of one time 32 bytes)
* because it must be compatible with offsets >= 16. */
LZ4_FORCE_INLINE void
LZ4_wildCopy32(void* dstPtr, constvoid* srcPtr, void* dstEnd)
{
BYTE* d = (BYTE*)dstPtr; const BYTE* s = (const BYTE*)srcPtr;
BYTE* const e = (BYTE*)dstEnd;
do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
}
/* LZ4_memcpy_using_offset() presumes : * - dstEnd >= dstPtr + MINMATCH
* - there is at least 12 bytes available to write after dstEnd */
LZ4_FORCE_INLINE void
LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
{
BYTE v[8];
assert(dstEnd >= dstPtr + MINMATCH);
switch(offset) { case 1:
MEM_INIT(v, *srcPtr, 8); break; case 2:
LZ4_memcpy(v, srcPtr, 2);
LZ4_memcpy(&v[2], srcPtr, 2); #ifdefined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */ # pragma warning(push) # pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */ #endif
LZ4_memcpy(&v[4], v, 4); #ifdefined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */ # pragma warning(pop) #endif break; case 4:
LZ4_memcpy(v, srcPtr, 4);
LZ4_memcpy(&v[4], srcPtr, 4); break; default:
LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset); return;
}
#ifndef LZ4_COMMONDEFS_ONLY /*-************************************ * Local Constants
**************************************/ staticconstint LZ4_64Klimit = ((64 KB) + (MFLIMIT-1)); staticconst U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
/*-************************************ * Local Structures and types
**************************************/ typedefenum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
/** * This enum distinguishes several different modes of accessing previous * content in the stream. * * - noDict : There is no preceding content. * - withPrefix64k : Table entries up to ctx->dictSize before the current blob * blob being compressed are valid and refer to the preceding * content (of length ctx->dictSize), which is available * contiguously preceding in memory the content currently * being compressed. * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere * else in memory, starting at ctx->dictionary with length * ctx->dictSize. * - usingDictCtx : Everything concerning the preceding content is * in a separate context, pointed to by ctx->dictCtx. * ctx->dictionary, ctx->dictSize, and table entries * in the current context that refer to positions * preceding the beginning of the current compression are * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx * ->dictSize describe the location and size of the preceding * content, and matches are found by looking in the ctx * ->dictCtx->hashTable.
*/ typedefenum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive; typedefenum { noDictIssue = 0, dictSmall } dictIssue_directive;
/*-************************************ * Local Utils
**************************************/ int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; } constchar* LZ4_versionString(void) { return LZ4_VERSION_STRING; } int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); } int LZ4_sizeofState(void) { returnsizeof(LZ4_stream_t); }
/*-**************************************** * Internal Definitions, used only in Tests
*******************************************/ #ifdefined (__cplusplus) extern"C" { #endif
int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, constchar* source, char* dest, int srcSize);
int LZ4_decompress_safe_forceExtDict(constchar* source, char* dest, int compressedSize, int maxOutputSize, constvoid* dictStart, size_t dictSize); int LZ4_decompress_safe_partial_forceExtDict(constchar* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, constvoid* dictStart, size_t dictSize); #ifdefined (__cplusplus)
} #endif
LZ4_FORCE_INLINE void
LZ4_prepareTable(LZ4_stream_t_internal* const cctx, constint inputSize, const tableType_t tableType) { /* If the table hasn't been used, it's guaranteed to be zeroed out, and is * therefore safe to use no matter what mode we're in. Otherwise, we figure * out if it's safe to leave as is or whether it needs to be reset.
*/ if ((tableType_t)cctx->tableType != clearedTable) {
assert(inputSize >= 0); if ((tableType_t)cctx->tableType != tableType
|| ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
|| ((tableType == byU32) && cctx->currentOffset > 1 GB)
|| tableType == byPtr
|| inputSize >= 4 KB)
{
DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
cctx->currentOffset = 0;
cctx->tableType = (U32)clearedTable;
} else {
DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
}
}
/* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back, * is faster than compressing without a gap. * However, compressing with currentOffset == 0 is faster still, * so we preserve that case.
*/ if (cctx->currentOffset != 0 && tableType == byU32) {
DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
cctx->currentOffset += 64 KB;
}
/* the dictCtx currentOffset is indexed on the start of the dictionary,
* while a dictionary in the current context precedes the currentOffset */ const BYTE* dictBase = (dictionary == NULL) ? NULL :
(dictDirective == usingDictCtx) ?
dictionary + dictSize - dictCtx->currentOffset :
dictionary + dictSize - startIndex;
BYTE* op = (BYTE*) dest;
BYTE* const olimit = op + maxOutputSize;
U32 offset = 0;
U32 forwardH;
DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType);
assert(ip != NULL); if (tableType == byU16) assert(inputSize<LZ4_64Klimit); /* Size too large (not within 64K limit) */ if (tableType == byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */ /* If init conditions are not met, we don't have to mark stream
* as having dirty context, since no action was taken yet */ if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
assert(acceleration >= 1);
const BYTE* forwardIp = ip; int step = 1; int searchMatchNb = acceleration << LZ4_skipTrigger; do {
U32 const h = forwardH;
U32 const current = (U32)(forwardIp - base);
U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
assert(matchIndex <= current);
assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
ip = forwardIp;
forwardIp += step;
step = (searchMatchNb++ >> LZ4_skipTrigger);
if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
assert(ip < mflimitPlusOne);
if (dictDirective == usingDictCtx) { if (matchIndex < startIndex) { /* there was no match, try the dictionary */
assert(tableType == byU32);
matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
match = dictBase + matchIndex;
matchIndex += dictDelta; /* make dictCtx index comparable with current context */
lowLimit = dictionary;
} else {
match = base + matchIndex;
lowLimit = (const BYTE*)source;
}
} elseif (dictDirective == usingExtDict) { if (matchIndex < startIndex) {
DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex);
assert(startIndex - matchIndex >= MINMATCH);
assert(dictBase);
match = dictBase + matchIndex;
lowLimit = dictionary;
} else {
match = base + matchIndex;
lowLimit = (const BYTE*)source;
}
} else { /* single continuous memory segment */
match = base + matchIndex;
}
forwardH = LZ4_hashPosition(forwardIp, tableType);
LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex); if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */
assert(matchIndex < current); if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))
&& (matchIndex+LZ4_DISTANCE_MAX < current)) { continue;
} /* too far */
assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */
if (LZ4_read32(match) == LZ4_read32(ip)) { if (maybe_extMem) offset = current - matchIndex; break; /* match found */
}
} while(1);
}
/* Catch up */
filledIp = ip;
assert(ip > anchor); /* this is always true as ip has been advanced before entering the main loop */ if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) { do { ip--; match--; } while (((ip > anchor) & (match > lowLimit)) && (unlikely(ip[-1] == match[-1])));
}
/* Encode Literals */
{ unsignedconst litLength = (unsigned)(ip - anchor);
token = op++; if ((outputDirective == limitedOutput) && /* Check output buffer overflow */
(unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) { return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
} if ((outputDirective == fillOutput) &&
(unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {
op--; goto _last_literals;
} if (litLength >= RUN_MASK) { unsigned len = litLength - RUN_MASK;
*token = (RUN_MASK<<ML_BITS); for(; len >= 255 ; len-=255) *op++ = 255;
*op++ = (BYTE)len;
} else *token = (BYTE)(litLength<<ML_BITS);
_next_match: /* at this stage, the following variables must be correctly set : * - ip : at start of LZ operation * - match : at start of previous pattern occurrence; can be within current prefix, or within extDict * - offset : if maybe_ext_memSegment==1 (constant) * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
*/
if ((outputDirective == fillOutput) &&
(op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit)) { /* the match was too close to the end, rewind and go to last literals */
op = token; goto _last_literals;
}
if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)
&& (lowLimit==dictionary) /* match within extDict */ ) { const BYTE* limit = ip + (dictEnd-match);
assert(dictEnd > match); if (limit > matchlimit) limit = matchlimit;
matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
ip += (size_t)matchCode + MINMATCH; if (ip==limit) { unsignedconst more = LZ4_count(limit, (const BYTE*)source, matchlimit);
matchCode += more;
ip += more;
}
DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH);
} else {
matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
ip += (size_t)matchCode + MINMATCH;
DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH);
}
if ((outputDirective) && /* Check output buffer overflow */
(unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) { if (outputDirective == fillOutput) { /* Match description too long : reduce it */
U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
ip -= matchCode - newMatchCode;
assert(newMatchCode < matchCode);
matchCode = newMatchCode; if (unlikely(ip <= filledIp)) { /* We have already filled up to filledIp so if ip ends up less than filledIp * we have positions in the hash table beyond the current position. This is * a problem if we reuse the hash table. So we have to remove these positions * from the hash table.
*/ const BYTE* ptr;
DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip)); for (ptr = ip; ptr <= filledIp; ++ptr) {
U32 const h = LZ4_hashPosition(ptr, tableType);
LZ4_clearHash(h, cctx->hashTable, tableType);
}
}
} else {
assert(outputDirective == limitedOutput); return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
}
} if (matchCode >= ML_MASK) {
*token += ML_MASK;
matchCode -= ML_MASK;
LZ4_write32(op, 0xFFFFFFFF); while (matchCode >= 4*255) {
op+=4;
LZ4_write32(op, 0xFFFFFFFF);
matchCode -= 4*255;
}
op += matchCode / 255;
*op++ = (BYTE)(matchCode % 255);
} else
*token += (BYTE)(matchCode);
} /* Ensure we have enough space for the last literals. */
assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));
anchor = ip;
/* Test end of chunk */ if (ip >= mflimitPlusOne) break;
/** * LZ4_compress_fast_extState_fastReset() : * A variant of LZ4_compress_fast_extState(). * * Using this variant avoids an expensive initialization step. It is only safe * to call if the state buffer is known to be correctly initialized already * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of * "correctly initialized").
*/ int LZ4_compress_fast_extState_fastReset(void* state, constchar* src, char* dst, int srcSize, int dstCapacity, int acceleration)
{
LZ4_stream_t_internal* const ctx = &((LZ4_stream_t*)state)->internal_donotuse; if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
assert(ctx != NULL);
int LZ4_compress_default(constchar* src, char* dst, int srcSize, int dstCapacity)
{ return LZ4_compress_fast(src, dst, srcSize, dstCapacity, 1);
}
/* Note!: This function leaves the stream in an unclean/broken state! * It is not safe to subsequently use the same state with a _fastReset() or
* _continue() call without resetting it. */ staticint LZ4_compress_destSize_extState_internal(LZ4_stream_t* state, constchar* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)
{ void* const s = LZ4_initStream(state, sizeof (*state));
assert(s != NULL); (void)s;
/* resetStream is now deprecated,
* prefer initStream() which is more general */ void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
{
DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));
}
DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
/* It's necessary to reset the context, * and not just continue it with prepareTable() * to avoid any risk of generating overflowing matchIndex
* when compressing using this dictionary */
LZ4_resetStream(LZ4_dict);
/* We always increment the offset by 64 KB, since, if the dict is longer, * we truncate it to the last 64k, and if it's shorter, we still want to
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.