2 * LZ4 auto-framing library
3 * Copyright (C) 2011-2016, Yann Collet.
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You can contact the author at :
31 * - LZ4 homepage : http://www.lz4.org
32 * - LZ4 source repository : https://github.com/lz4/lz4
35 /* LZ4F is a stand-alone API to create LZ4-compressed Frames
36 * in full conformance with specification v1.6.1 .
37 * This library rely upon memory management capabilities (malloc, free)
38 * provided either by <stdlib.h>,
39 * or redirected towards another library of user's choice
40 * (see Memory Routines below).
44 /*-************************************
46 **************************************/
48 #ifdef _MSC_VER /* Visual Studio */
49 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
53 /*-************************************
55 **************************************/
58 * Control how LZ4F_compressFrame allocates the Compression State,
59 * either on stack (0:default, fastest), or in memory heap (1:requires malloc()).
62 # define LZ4F_HEAPMODE 0
66 /*-************************************
67 * Library declarations
68 **************************************/
69 #define LZ4F_STATIC_LINKING_ONLY
71 #define LZ4_STATIC_LINKING_ONLY
73 #define LZ4_HC_STATIC_LINKING_ONLY
75 #define XXH_STATIC_LINKING_ONLY
79 /*-************************************
81 **************************************/
83 * User may redirect invocations of
84 * malloc(), calloc() and free()
85 * towards another library or solution of their choice
86 * by modifying below section.
89 #include <string.h> /* memset, memcpy, memmove */
90 #ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
91 # define MEM_INIT(p,v,s) memset((p),(v),(s))
94 #ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
95 # include <stdlib.h> /* malloc, calloc, free */
96 # define ALLOC(s) malloc(s)
97 # define ALLOC_AND_ZERO(s) calloc(1,(s))
98 # define FREEMEM(p) free(p)
101 static void* LZ4F_calloc(size_t s
, LZ4F_CustomMem cmem
)
103 /* custom calloc defined : use it */
104 if (cmem
.customCalloc
!= NULL
) {
105 return cmem
.customCalloc(cmem
.opaqueState
, s
);
107 /* nothing defined : use default <stdlib.h>'s calloc() */
108 if (cmem
.customAlloc
== NULL
) {
109 return ALLOC_AND_ZERO(s
);
111 /* only custom alloc defined : use it, and combine it with memset() */
112 { void* const p
= cmem
.customAlloc(cmem
.opaqueState
, s
);
113 if (p
!= NULL
) MEM_INIT(p
, 0, s
);
117 static void* LZ4F_malloc(size_t s
, LZ4F_CustomMem cmem
)
119 /* custom malloc defined : use it */
120 if (cmem
.customAlloc
!= NULL
) {
121 return cmem
.customAlloc(cmem
.opaqueState
, s
);
123 /* nothing defined : use default <stdlib.h>'s malloc() */
127 static void LZ4F_free(void* p
, LZ4F_CustomMem cmem
)
129 if (p
== NULL
) return;
130 if (cmem
.customFree
!= NULL
) {
131 /* custom allocation defined : use it */
132 cmem
.customFree(cmem
.opaqueState
, p
);
135 /* nothing defined : use default <stdlib.h>'s free() */
140 /*-************************************
142 **************************************/
143 #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
147 # define assert(condition) ((void)0)
151 #define LZ4F_STATIC_ASSERT(c) { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
153 #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) && !defined(DEBUGLOG)
155 static int g_debuglog_enable
= 1;
156 # define DEBUGLOG(l, ...) { \
157 if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
158 fprintf(stderr, __FILE__ " (%i): ", __LINE__ ); \
159 fprintf(stderr, __VA_ARGS__); \
160 fprintf(stderr, " \n"); \
163 # define DEBUGLOG(l, ...) {} /* disabled */
167 /*-************************************
169 **************************************/
170 #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
172 typedef uint8_t BYTE
;
173 typedef uint16_t U16
;
174 typedef uint32_t U32
;
176 typedef uint64_t U64
;
178 typedef unsigned char BYTE
;
179 typedef unsigned short U16
;
180 typedef unsigned int U32
;
181 typedef signed int S32
;
182 typedef unsigned long long U64
;
186 /* unoptimized version; solves endianness & alignment issues */
187 static U32
LZ4F_readLE32 (const void* src
)
189 const BYTE
* const srcPtr
= (const BYTE
*)src
;
190 U32 value32
= srcPtr
[0];
191 value32
|= ((U32
)srcPtr
[1])<< 8;
192 value32
|= ((U32
)srcPtr
[2])<<16;
193 value32
|= ((U32
)srcPtr
[3])<<24;
197 static void LZ4F_writeLE32 (void* dst
, U32 value32
)
199 BYTE
* const dstPtr
= (BYTE
*)dst
;
200 dstPtr
[0] = (BYTE
)value32
;
201 dstPtr
[1] = (BYTE
)(value32
>> 8);
202 dstPtr
[2] = (BYTE
)(value32
>> 16);
203 dstPtr
[3] = (BYTE
)(value32
>> 24);
206 static U64
LZ4F_readLE64 (const void* src
)
208 const BYTE
* const srcPtr
= (const BYTE
*)src
;
209 U64 value64
= srcPtr
[0];
210 value64
|= ((U64
)srcPtr
[1]<<8);
211 value64
|= ((U64
)srcPtr
[2]<<16);
212 value64
|= ((U64
)srcPtr
[3]<<24);
213 value64
|= ((U64
)srcPtr
[4]<<32);
214 value64
|= ((U64
)srcPtr
[5]<<40);
215 value64
|= ((U64
)srcPtr
[6]<<48);
216 value64
|= ((U64
)srcPtr
[7]<<56);
220 static void LZ4F_writeLE64 (void* dst
, U64 value64
)
222 BYTE
* const dstPtr
= (BYTE
*)dst
;
223 dstPtr
[0] = (BYTE
)value64
;
224 dstPtr
[1] = (BYTE
)(value64
>> 8);
225 dstPtr
[2] = (BYTE
)(value64
>> 16);
226 dstPtr
[3] = (BYTE
)(value64
>> 24);
227 dstPtr
[4] = (BYTE
)(value64
>> 32);
228 dstPtr
[5] = (BYTE
)(value64
>> 40);
229 dstPtr
[6] = (BYTE
)(value64
>> 48);
230 dstPtr
[7] = (BYTE
)(value64
>> 56);
234 /*-************************************
236 **************************************/
237 #ifndef LZ4_SRC_INCLUDED /* avoid double definition */
249 #define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U
250 #define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB
252 static const size_t minFHSize
= LZ4F_HEADER_SIZE_MIN
; /* 7 */
253 static const size_t maxFHSize
= LZ4F_HEADER_SIZE_MAX
; /* 19 */
254 static const size_t BHSize
= LZ4F_BLOCK_HEADER_SIZE
; /* block header : size, and compress flag */
255 static const size_t BFSize
= LZ4F_BLOCK_CHECKSUM_SIZE
; /* block footer : checksum (optional) */
258 /*-************************************
259 * Structures and local types
260 **************************************/
262 typedef enum { LZ4B_COMPRESSED
, LZ4B_UNCOMPRESSED
} LZ4F_BlockCompressMode_e
;
263 typedef enum { ctxNone
, ctxFast
, ctxHC
} LZ4F_CtxType_e
;
265 typedef struct LZ4F_cctx_s
268 LZ4F_preferences_t prefs
;
270 U32 cStage
; /* 0 : compression uninitialized ; 1 : initialized, can compress */
271 const LZ4F_CDict
* cdict
;
273 size_t maxBufferSize
;
274 BYTE
* tmpBuff
; /* internal buffer, for streaming */
275 BYTE
* tmpIn
; /* starting position of data compress within internal buffer (>= tmpBuff) */
276 size_t tmpInSize
; /* amount of data to compress after tmpIn */
280 U16 lz4CtxAlloc
; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
281 U16 lz4CtxType
; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
282 LZ4F_BlockCompressMode_e blockCompressMode
;
286 /*-************************************
288 **************************************/
289 #define LZ4F_GENERATE_STRING(STRING) #STRING,
290 static const char* LZ4F_errorStrings
[] = { LZ4F_LIST_ERRORS(LZ4F_GENERATE_STRING
) };
293 unsigned LZ4F_isError(LZ4F_errorCode_t code
)
295 return (code
> (LZ4F_errorCode_t
)(-LZ4F_ERROR_maxCode
));
298 const char* LZ4F_getErrorName(LZ4F_errorCode_t code
)
300 static const char* codeError
= "Unspecified error code";
301 if (LZ4F_isError(code
)) return LZ4F_errorStrings
[-(int)(code
)];
305 LZ4F_errorCodes
LZ4F_getErrorCode(size_t functionResult
)
307 if (!LZ4F_isError(functionResult
)) return LZ4F_OK_NoError
;
308 return (LZ4F_errorCodes
)(-(ptrdiff_t)functionResult
);
311 static LZ4F_errorCode_t
LZ4F_returnErrorCode(LZ4F_errorCodes code
)
313 /* A compilation error here means sizeof(ptrdiff_t) is not large enough */
314 LZ4F_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t));
315 return (LZ4F_errorCode_t
)-(ptrdiff_t)code
;
318 #define RETURN_ERROR(e) return LZ4F_returnErrorCode(LZ4F_ERROR_ ## e)
320 #define RETURN_ERROR_IF(c,e) do { \
322 DEBUGLOG(3, "Error: " #c); \
327 #define FORWARD_IF_ERROR(r) do { if (LZ4F_isError(r)) return (r); } while (0)
329 unsigned LZ4F_getVersion(void) { return LZ4F_VERSION
; }
331 int LZ4F_compressionLevel_max(void) { return LZ4HC_CLEVEL_MAX
; }
333 size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID
)
335 static const size_t blockSizes
[4] = { 64 KB
, 256 KB
, 1 MB
, 4 MB
};
337 if (blockSizeID
== 0) blockSizeID
= LZ4F_BLOCKSIZEID_DEFAULT
;
338 if (blockSizeID
< LZ4F_max64KB
|| blockSizeID
> LZ4F_max4MB
)
339 RETURN_ERROR(maxBlockSize_invalid
);
340 { int const blockSizeIdx
= (int)blockSizeID
- (int)LZ4F_max64KB
;
341 return blockSizes
[blockSizeIdx
];
344 /*-************************************
346 **************************************/
347 #define MIN(a,b) ( (a) < (b) ? (a) : (b) )
349 static BYTE
LZ4F_headerChecksum (const void* header
, size_t length
)
351 U32
const xxh
= XXH32(header
, length
, 0);
352 return (BYTE
)(xxh
>> 8);
356 /*-************************************
357 * Simple-pass compression functions
358 **************************************/
359 static LZ4F_blockSizeID_t
LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSID
,
360 const size_t srcSize
)
362 LZ4F_blockSizeID_t proposedBSID
= LZ4F_max64KB
;
363 size_t maxBlockSize
= 64 KB
;
364 while (requestedBSID
> proposedBSID
) {
365 if (srcSize
<= maxBlockSize
)
367 proposedBSID
= (LZ4F_blockSizeID_t
)((int)proposedBSID
+ 1);
370 return requestedBSID
;
373 /*! LZ4F_compressBound_internal() :
374 * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations.
375 * prefsPtr is optional : if NULL is provided, preferences will be set to cover worst case scenario.
376 * @return is always the same for a srcSize and prefsPtr, so it can be relied upon to size reusable buffers.
377 * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations.
379 static size_t LZ4F_compressBound_internal(size_t srcSize
,
380 const LZ4F_preferences_t
* preferencesPtr
,
381 size_t alreadyBuffered
)
383 LZ4F_preferences_t prefsNull
= LZ4F_INIT_PREFERENCES
;
384 prefsNull
.frameInfo
.contentChecksumFlag
= LZ4F_contentChecksumEnabled
; /* worst case */
385 prefsNull
.frameInfo
.blockChecksumFlag
= LZ4F_blockChecksumEnabled
; /* worst case */
386 { const LZ4F_preferences_t
* const prefsPtr
= (preferencesPtr
==NULL
) ? &prefsNull
: preferencesPtr
;
387 U32
const flush
= prefsPtr
->autoFlush
| (srcSize
==0);
388 LZ4F_blockSizeID_t
const blockID
= prefsPtr
->frameInfo
.blockSizeID
;
389 size_t const blockSize
= LZ4F_getBlockSize(blockID
);
390 size_t const maxBuffered
= blockSize
- 1;
391 size_t const bufferedSize
= MIN(alreadyBuffered
, maxBuffered
);
392 size_t const maxSrcSize
= srcSize
+ bufferedSize
;
393 unsigned const nbFullBlocks
= (unsigned)(maxSrcSize
/ blockSize
);
394 size_t const partialBlockSize
= maxSrcSize
& (blockSize
-1);
395 size_t const lastBlockSize
= flush
? partialBlockSize
: 0;
396 unsigned const nbBlocks
= nbFullBlocks
+ (lastBlockSize
>0);
398 size_t const blockCRCSize
= BFSize
* prefsPtr
->frameInfo
.blockChecksumFlag
;
399 size_t const frameEnd
= BHSize
+ (prefsPtr
->frameInfo
.contentChecksumFlag
*BFSize
);
401 return ((BHSize
+ blockCRCSize
) * nbBlocks
) +
402 (blockSize
* nbFullBlocks
) + lastBlockSize
+ frameEnd
;
406 size_t LZ4F_compressFrameBound(size_t srcSize
, const LZ4F_preferences_t
* preferencesPtr
)
408 LZ4F_preferences_t prefs
;
409 size_t const headerSize
= maxFHSize
; /* max header size, including optional fields */
411 if (preferencesPtr
!=NULL
) prefs
= *preferencesPtr
;
412 else MEM_INIT(&prefs
, 0, sizeof(prefs
));
415 return headerSize
+ LZ4F_compressBound_internal(srcSize
, &prefs
, 0);;
419 /*! LZ4F_compressFrame_usingCDict() :
420 * Compress srcBuffer using a dictionary, in a single step.
421 * cdict can be NULL, in which case, no dictionary is used.
422 * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
423 * The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
424 * however, it's the only way to provide a dictID, so it's not recommended.
425 * @return : number of bytes written into dstBuffer,
426 * or an error code if it fails (can be tested using LZ4F_isError())
428 size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx
* cctx
,
429 void* dstBuffer
, size_t dstCapacity
,
430 const void* srcBuffer
, size_t srcSize
,
431 const LZ4F_CDict
* cdict
,
432 const LZ4F_preferences_t
* preferencesPtr
)
434 LZ4F_preferences_t prefs
;
435 LZ4F_compressOptions_t options
;
436 BYTE
* const dstStart
= (BYTE
*) dstBuffer
;
437 BYTE
* dstPtr
= dstStart
;
438 BYTE
* const dstEnd
= dstStart
+ dstCapacity
;
440 DEBUGLOG(4, "LZ4F_compressFrame_usingCDict (srcSize=%u)", (unsigned)srcSize
);
441 if (preferencesPtr
!=NULL
)
442 prefs
= *preferencesPtr
;
444 MEM_INIT(&prefs
, 0, sizeof(prefs
));
445 if (prefs
.frameInfo
.contentSize
!= 0)
446 prefs
.frameInfo
.contentSize
= (U64
)srcSize
; /* auto-correct content size if selected (!=0) */
448 prefs
.frameInfo
.blockSizeID
= LZ4F_optimalBSID(prefs
.frameInfo
.blockSizeID
, srcSize
);
450 if (srcSize
<= LZ4F_getBlockSize(prefs
.frameInfo
.blockSizeID
))
451 prefs
.frameInfo
.blockMode
= LZ4F_blockIndependent
; /* only one block => no need for inter-block link */
453 MEM_INIT(&options
, 0, sizeof(options
));
454 options
.stableSrc
= 1;
456 RETURN_ERROR_IF(dstCapacity
< LZ4F_compressFrameBound(srcSize
, &prefs
), dstMaxSize_tooSmall
);
458 { size_t const headerSize
= LZ4F_compressBegin_usingCDict(cctx
, dstBuffer
, dstCapacity
, cdict
, &prefs
); /* write header */
459 FORWARD_IF_ERROR(headerSize
);
460 dstPtr
+= headerSize
; /* header size */ }
462 assert(dstEnd
>= dstPtr
);
463 { size_t const cSize
= LZ4F_compressUpdate(cctx
, dstPtr
, (size_t)(dstEnd
-dstPtr
), srcBuffer
, srcSize
, &options
);
464 FORWARD_IF_ERROR(cSize
);
467 assert(dstEnd
>= dstPtr
);
468 { size_t const tailSize
= LZ4F_compressEnd(cctx
, dstPtr
, (size_t)(dstEnd
-dstPtr
), &options
); /* flush last block, and generate suffix */
469 FORWARD_IF_ERROR(tailSize
);
470 dstPtr
+= tailSize
; }
472 assert(dstEnd
>= dstStart
);
473 return (size_t)(dstPtr
- dstStart
);
477 /*! LZ4F_compressFrame() :
478 * Compress an entire srcBuffer into a valid LZ4 frame, in a single step.
479 * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
480 * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default.
481 * @return : number of bytes written into dstBuffer.
482 * or an error code if it fails (can be tested using LZ4F_isError())
484 size_t LZ4F_compressFrame(void* dstBuffer
, size_t dstCapacity
,
485 const void* srcBuffer
, size_t srcSize
,
486 const LZ4F_preferences_t
* preferencesPtr
)
490 LZ4F_cctx_t
* cctxPtr
;
491 result
= LZ4F_createCompressionContext(&cctxPtr
, LZ4F_VERSION
);
492 FORWARD_IF_ERROR(result
);
496 LZ4F_cctx_t
* const cctxPtr
= &cctx
;
498 MEM_INIT(&cctx
, 0, sizeof(cctx
));
499 cctx
.version
= LZ4F_VERSION
;
500 cctx
.maxBufferSize
= 5 MB
; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */
501 if ( preferencesPtr
== NULL
502 || preferencesPtr
->compressionLevel
< LZ4HC_CLEVEL_MIN
) {
503 LZ4_initStream(&lz4ctx
, sizeof(lz4ctx
));
504 cctxPtr
->lz4CtxPtr
= &lz4ctx
;
505 cctxPtr
->lz4CtxAlloc
= 1;
506 cctxPtr
->lz4CtxType
= ctxFast
;
509 DEBUGLOG(4, "LZ4F_compressFrame");
511 result
= LZ4F_compressFrame_usingCDict(cctxPtr
, dstBuffer
, dstCapacity
,
513 NULL
, preferencesPtr
);
516 LZ4F_freeCompressionContext(cctxPtr
);
518 if ( preferencesPtr
!= NULL
519 && preferencesPtr
->compressionLevel
>= LZ4HC_CLEVEL_MIN
) {
520 LZ4F_free(cctxPtr
->lz4CtxPtr
, cctxPtr
->cmem
);
527 /*-***************************************************
528 * Dictionary compression
529 *****************************************************/
531 struct LZ4F_CDict_s
{
534 LZ4_stream_t
* fastCtx
;
535 LZ4_streamHC_t
* HCCtx
;
536 }; /* typedef'd to LZ4F_CDict within lz4frame_static.h */
539 LZ4F_createCDict_advanced(LZ4F_CustomMem cmem
, const void* dictBuffer
, size_t dictSize
)
541 const char* dictStart
= (const char*)dictBuffer
;
542 LZ4F_CDict
* const cdict
= (LZ4F_CDict
*)LZ4F_malloc(sizeof(*cdict
), cmem
);
543 DEBUGLOG(4, "LZ4F_createCDict_advanced");
544 if (!cdict
) return NULL
;
546 if (dictSize
> 64 KB
) {
547 dictStart
+= dictSize
- 64 KB
;
550 cdict
->dictContent
= LZ4F_malloc(dictSize
, cmem
);
551 /* note: using @cmem to allocate => can't use default create */
552 cdict
->fastCtx
= (LZ4_stream_t
*)LZ4F_malloc(sizeof(LZ4_stream_t
), cmem
);
553 cdict
->HCCtx
= (LZ4_streamHC_t
*)LZ4F_malloc(sizeof(LZ4_streamHC_t
), cmem
);
554 if (!cdict
->dictContent
|| !cdict
->fastCtx
|| !cdict
->HCCtx
) {
555 LZ4F_freeCDict(cdict
);
558 memcpy(cdict
->dictContent
, dictStart
, dictSize
);
559 LZ4_initStream(cdict
->fastCtx
, sizeof(LZ4_stream_t
));
560 LZ4_loadDictSlow(cdict
->fastCtx
, (const char*)cdict
->dictContent
, (int)dictSize
);
561 LZ4_initStreamHC(cdict
->HCCtx
, sizeof(LZ4_streamHC_t
));
562 /* note: we don't know at this point which compression level is going to be used
563 * as a consequence, HCCtx is created for the more common HC mode */
564 LZ4_setCompressionLevel(cdict
->HCCtx
, LZ4HC_CLEVEL_DEFAULT
);
565 LZ4_loadDictHC(cdict
->HCCtx
, (const char*)cdict
->dictContent
, (int)dictSize
);
569 /*! LZ4F_createCDict() :
570 * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
571 * LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
572 * LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
573 * @dictBuffer can be released after LZ4F_CDict creation, since its content is copied within CDict
574 * @return : digested dictionary for compression, or NULL if failed */
575 LZ4F_CDict
* LZ4F_createCDict(const void* dictBuffer
, size_t dictSize
)
577 DEBUGLOG(4, "LZ4F_createCDict");
578 return LZ4F_createCDict_advanced(LZ4F_defaultCMem
, dictBuffer
, dictSize
);
581 void LZ4F_freeCDict(LZ4F_CDict
* cdict
)
583 if (cdict
==NULL
) return; /* support free on NULL */
584 LZ4F_free(cdict
->dictContent
, cdict
->cmem
);
585 LZ4F_free(cdict
->fastCtx
, cdict
->cmem
);
586 LZ4F_free(cdict
->HCCtx
, cdict
->cmem
);
587 LZ4F_free(cdict
, cdict
->cmem
);
591 /*-*********************************
592 * Advanced compression functions
593 ***********************************/
596 LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem
, unsigned version
)
598 LZ4F_cctx
* const cctxPtr
=
599 (LZ4F_cctx
*)LZ4F_calloc(sizeof(LZ4F_cctx
), customMem
);
600 if (cctxPtr
==NULL
) return NULL
;
602 cctxPtr
->cmem
= customMem
;
603 cctxPtr
->version
= version
;
604 cctxPtr
->cStage
= 0; /* Uninitialized. Next stage : init cctx */
609 /*! LZ4F_createCompressionContext() :
610 * The first thing to do is to create a compressionContext object, which will be used in all compression operations.
611 * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure.
612 * The version provided MUST be LZ4F_VERSION. It is intended to track potential incompatible differences between different binaries.
613 * The function will provide a pointer to an allocated LZ4F_compressionContext_t object.
614 * If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation.
615 * Object can release its memory using LZ4F_freeCompressionContext();
618 LZ4F_createCompressionContext(LZ4F_cctx
** LZ4F_compressionContextPtr
, unsigned version
)
620 assert(LZ4F_compressionContextPtr
!= NULL
); /* considered a violation of narrow contract */
621 /* in case it nonetheless happen in production */
622 RETURN_ERROR_IF(LZ4F_compressionContextPtr
== NULL
, parameter_null
);
624 *LZ4F_compressionContextPtr
= LZ4F_createCompressionContext_advanced(LZ4F_defaultCMem
, version
);
625 RETURN_ERROR_IF(*LZ4F_compressionContextPtr
==NULL
, allocation_failed
);
626 return LZ4F_OK_NoError
;
629 LZ4F_errorCode_t
LZ4F_freeCompressionContext(LZ4F_cctx
* cctxPtr
)
631 if (cctxPtr
!= NULL
) { /* support free on NULL */
632 LZ4F_free(cctxPtr
->lz4CtxPtr
, cctxPtr
->cmem
); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */
633 LZ4F_free(cctxPtr
->tmpBuff
, cctxPtr
->cmem
);
634 LZ4F_free(cctxPtr
, cctxPtr
->cmem
);
636 return LZ4F_OK_NoError
;
641 * This function prepares the internal LZ4(HC) stream for a new compression,
642 * resetting the context and attaching the dictionary, if there is one.
644 * It needs to be called at the beginning of each independent compression
645 * stream (i.e., at the beginning of a frame in blockLinked mode, or at the
646 * beginning of each block in blockIndependent mode).
648 static void LZ4F_initStream(void* ctx
,
649 const LZ4F_CDict
* cdict
,
651 LZ4F_blockMode_t blockMode
) {
652 if (level
< LZ4HC_CLEVEL_MIN
) {
653 if (cdict
|| blockMode
== LZ4F_blockLinked
) {
654 /* In these cases, we will call LZ4_compress_fast_continue(),
655 * which needs an already reset context. Otherwise, we'll call a
656 * one-shot API. The non-continued APIs internally perform their own
657 * resets at the beginning of their calls, where they know what
658 * tableType they need the context to be in. So in that case this
659 * would be misguided / wasted work. */
660 LZ4_resetStream_fast((LZ4_stream_t
*)ctx
);
662 LZ4_attach_dictionary((LZ4_stream_t
*)ctx
, cdict
->fastCtx
);
664 /* In these cases, we'll call a one-shot API.
665 * The non-continued APIs internally perform their own resets
666 * at the beginning of their calls, where they know
667 * which tableType they need the context to be in.
668 * Therefore, a reset here would be wasted work. */
670 LZ4_resetStreamHC_fast((LZ4_streamHC_t
*)ctx
, level
);
672 LZ4_attach_HC_dictionary((LZ4_streamHC_t
*)ctx
, cdict
->HCCtx
);
676 static int ctxTypeID_to_size(int ctxTypeID
) {
679 return LZ4_sizeofState();
681 return LZ4_sizeofStateHC();
687 /* LZ4F_compressBegin_internal()
688 * Note: only accepts @cdict _or_ @dictBuffer as non NULL.
690 size_t LZ4F_compressBegin_internal(LZ4F_cctx
* cctx
,
691 void* dstBuffer
, size_t dstCapacity
,
692 const void* dictBuffer
, size_t dictSize
,
693 const LZ4F_CDict
* cdict
,
694 const LZ4F_preferences_t
* preferencesPtr
)
696 LZ4F_preferences_t
const prefNull
= LZ4F_INIT_PREFERENCES
;
697 BYTE
* const dstStart
= (BYTE
*)dstBuffer
;
698 BYTE
* dstPtr
= dstStart
;
700 RETURN_ERROR_IF(dstCapacity
< maxFHSize
, dstMaxSize_tooSmall
);
701 if (preferencesPtr
== NULL
) preferencesPtr
= &prefNull
;
702 cctx
->prefs
= *preferencesPtr
;
704 /* cctx Management */
705 { U16
const ctxTypeID
= (cctx
->prefs
.compressionLevel
< LZ4HC_CLEVEL_MIN
) ? 1 : 2;
706 int requiredSize
= ctxTypeID_to_size(ctxTypeID
);
707 int allocatedSize
= ctxTypeID_to_size(cctx
->lz4CtxAlloc
);
708 if (allocatedSize
< requiredSize
) {
709 /* not enough space allocated */
710 LZ4F_free(cctx
->lz4CtxPtr
, cctx
->cmem
);
711 if (cctx
->prefs
.compressionLevel
< LZ4HC_CLEVEL_MIN
) {
712 /* must take ownership of memory allocation,
713 * in order to respect custom allocator contract */
714 cctx
->lz4CtxPtr
= LZ4F_malloc(sizeof(LZ4_stream_t
), cctx
->cmem
);
716 LZ4_initStream(cctx
->lz4CtxPtr
, sizeof(LZ4_stream_t
));
718 cctx
->lz4CtxPtr
= LZ4F_malloc(sizeof(LZ4_streamHC_t
), cctx
->cmem
);
720 LZ4_initStreamHC(cctx
->lz4CtxPtr
, sizeof(LZ4_streamHC_t
));
722 RETURN_ERROR_IF(cctx
->lz4CtxPtr
== NULL
, allocation_failed
);
723 cctx
->lz4CtxAlloc
= ctxTypeID
;
724 cctx
->lz4CtxType
= ctxTypeID
;
725 } else if (cctx
->lz4CtxType
!= ctxTypeID
) {
726 /* otherwise, a sufficient buffer is already allocated,
727 * but we need to reset it to the correct context type */
728 if (cctx
->prefs
.compressionLevel
< LZ4HC_CLEVEL_MIN
) {
729 LZ4_initStream((LZ4_stream_t
*)cctx
->lz4CtxPtr
, sizeof(LZ4_stream_t
));
731 LZ4_initStreamHC((LZ4_streamHC_t
*)cctx
->lz4CtxPtr
, sizeof(LZ4_streamHC_t
));
732 LZ4_setCompressionLevel((LZ4_streamHC_t
*)cctx
->lz4CtxPtr
, cctx
->prefs
.compressionLevel
);
734 cctx
->lz4CtxType
= ctxTypeID
;
737 /* Buffer Management */
738 if (cctx
->prefs
.frameInfo
.blockSizeID
== 0)
739 cctx
->prefs
.frameInfo
.blockSizeID
= LZ4F_BLOCKSIZEID_DEFAULT
;
740 cctx
->maxBlockSize
= LZ4F_getBlockSize(cctx
->prefs
.frameInfo
.blockSizeID
);
742 { size_t const requiredBuffSize
= preferencesPtr
->autoFlush
?
743 ((cctx
->prefs
.frameInfo
.blockMode
== LZ4F_blockLinked
) ? 64 KB
: 0) : /* only needs past data up to window size */
744 cctx
->maxBlockSize
+ ((cctx
->prefs
.frameInfo
.blockMode
== LZ4F_blockLinked
) ? 128 KB
: 0);
746 if (cctx
->maxBufferSize
< requiredBuffSize
) {
747 cctx
->maxBufferSize
= 0;
748 LZ4F_free(cctx
->tmpBuff
, cctx
->cmem
);
749 cctx
->tmpBuff
= (BYTE
*)LZ4F_malloc(requiredBuffSize
, cctx
->cmem
);
750 RETURN_ERROR_IF(cctx
->tmpBuff
== NULL
, allocation_failed
);
751 cctx
->maxBufferSize
= requiredBuffSize
;
753 cctx
->tmpIn
= cctx
->tmpBuff
;
755 (void)XXH32_reset(&(cctx
->xxh
), 0);
759 if (cctx
->prefs
.frameInfo
.blockMode
== LZ4F_blockLinked
) {
760 /* frame init only for blockLinked : blockIndependent will be init at each block */
761 LZ4F_initStream(cctx
->lz4CtxPtr
, cdict
, cctx
->prefs
.compressionLevel
, LZ4F_blockLinked
);
763 if (preferencesPtr
->compressionLevel
>= LZ4HC_CLEVEL_MIN
) {
764 LZ4_favorDecompressionSpeed((LZ4_streamHC_t
*)cctx
->lz4CtxPtr
, (int)preferencesPtr
->favorDecSpeed
);
767 assert(cdict
== NULL
);
768 RETURN_ERROR_IF(dictSize
> INT_MAX
, parameter_invalid
);
769 if (cctx
->lz4CtxType
== ctxFast
) {
771 LZ4_loadDict((LZ4_stream_t
*)cctx
->lz4CtxPtr
, (const char*)dictBuffer
, (int)dictSize
);
774 assert(cctx
->lz4CtxType
== ctxHC
);
775 LZ4_loadDictHC((LZ4_streamHC_t
*)cctx
->lz4CtxPtr
, (const char*)dictBuffer
, (int)dictSize
);
779 /* Stage 2 : Write Frame Header */
782 LZ4F_writeLE32(dstPtr
, LZ4F_MAGICNUMBER
);
784 { BYTE
* const headerStart
= dstPtr
;
787 *dstPtr
++ = (BYTE
)(((1 & _2BITS
) << 6) /* Version('01') */
788 + ((cctx
->prefs
.frameInfo
.blockMode
& _1BIT
) << 5)
789 + ((cctx
->prefs
.frameInfo
.blockChecksumFlag
& _1BIT
) << 4)
790 + ((unsigned)(cctx
->prefs
.frameInfo
.contentSize
> 0) << 3)
791 + ((cctx
->prefs
.frameInfo
.contentChecksumFlag
& _1BIT
) << 2)
792 + (cctx
->prefs
.frameInfo
.dictID
> 0) );
794 *dstPtr
++ = (BYTE
)((cctx
->prefs
.frameInfo
.blockSizeID
& _3BITS
) << 4);
795 /* Optional Frame content size field */
796 if (cctx
->prefs
.frameInfo
.contentSize
) {
797 LZ4F_writeLE64(dstPtr
, cctx
->prefs
.frameInfo
.contentSize
);
799 cctx
->totalInSize
= 0;
801 /* Optional dictionary ID field */
802 if (cctx
->prefs
.frameInfo
.dictID
) {
803 LZ4F_writeLE32(dstPtr
, cctx
->prefs
.frameInfo
.dictID
);
806 /* Header CRC Byte */
807 *dstPtr
= LZ4F_headerChecksum(headerStart
, (size_t)(dstPtr
- headerStart
));
811 cctx
->cStage
= 1; /* header written, now request input data block */
812 return (size_t)(dstPtr
- dstStart
);
815 size_t LZ4F_compressBegin(LZ4F_cctx
* cctx
,
816 void* dstBuffer
, size_t dstCapacity
,
817 const LZ4F_preferences_t
* preferencesPtr
)
819 return LZ4F_compressBegin_internal(cctx
, dstBuffer
, dstCapacity
,
821 NULL
, preferencesPtr
);
824 /* LZ4F_compressBegin_usingDictOnce:
825 * Hidden implementation,
826 * employed for multi-threaded compression
827 * when frame defines linked blocks */
828 size_t LZ4F_compressBegin_usingDictOnce(LZ4F_cctx
* cctx
,
829 void* dstBuffer
, size_t dstCapacity
,
830 const void* dict
, size_t dictSize
,
831 const LZ4F_preferences_t
* preferencesPtr
)
833 return LZ4F_compressBegin_internal(cctx
, dstBuffer
, dstCapacity
,
835 NULL
, preferencesPtr
);
838 size_t LZ4F_compressBegin_usingDict(LZ4F_cctx
* cctx
,
839 void* dstBuffer
, size_t dstCapacity
,
840 const void* dict
, size_t dictSize
,
841 const LZ4F_preferences_t
* preferencesPtr
)
843 /* note : incorrect implementation :
844 * this will only use the dictionary once,
845 * instead of once *per* block when frames defines independent blocks */
846 return LZ4F_compressBegin_usingDictOnce(cctx
, dstBuffer
, dstCapacity
,
851 size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx
* cctx
,
852 void* dstBuffer
, size_t dstCapacity
,
853 const LZ4F_CDict
* cdict
,
854 const LZ4F_preferences_t
* preferencesPtr
)
856 return LZ4F_compressBegin_internal(cctx
, dstBuffer
, dstCapacity
,
858 cdict
, preferencesPtr
);
862 /* LZ4F_compressBound() :
863 * @return minimum capacity of dstBuffer for a given srcSize to handle worst case scenario.
864 * LZ4F_preferences_t structure is optional : if NULL, preferences will be set to cover worst case scenario.
865 * This function cannot fail.
867 size_t LZ4F_compressBound(size_t srcSize
, const LZ4F_preferences_t
* preferencesPtr
)
869 if (preferencesPtr
&& preferencesPtr
->autoFlush
) {
870 return LZ4F_compressBound_internal(srcSize
, preferencesPtr
, 0);
872 return LZ4F_compressBound_internal(srcSize
, preferencesPtr
, (size_t)-1);
876 typedef int (*compressFunc_t
)(void* ctx
, const char* src
, char* dst
, int srcSize
, int dstSize
, int level
, const LZ4F_CDict
* cdict
);
879 /*! LZ4F_makeBlock():
880 * compress a single block, add header and optional checksum.
881 * assumption : dst buffer capacity is >= BHSize + srcSize + crcSize
883 static size_t LZ4F_makeBlock(void* dst
,
884 const void* src
, size_t srcSize
,
885 compressFunc_t compress
, void* lz4ctx
, int level
,
886 const LZ4F_CDict
* cdict
,
887 LZ4F_blockChecksum_t crcFlag
)
889 BYTE
* const cSizePtr
= (BYTE
*)dst
;
891 assert(compress
!= NULL
);
892 cSize
= (U32
)compress(lz4ctx
, (const char*)src
, (char*)(cSizePtr
+BHSize
),
893 (int)(srcSize
), (int)(srcSize
-1),
896 if (cSize
== 0 || cSize
>= srcSize
) {
897 cSize
= (U32
)srcSize
;
898 LZ4F_writeLE32(cSizePtr
, cSize
| LZ4F_BLOCKUNCOMPRESSED_FLAG
);
899 memcpy(cSizePtr
+BHSize
, src
, srcSize
);
901 LZ4F_writeLE32(cSizePtr
, cSize
);
904 U32
const crc32
= XXH32(cSizePtr
+BHSize
, cSize
, 0); /* checksum of compressed data */
905 LZ4F_writeLE32(cSizePtr
+BHSize
+cSize
, crc32
);
907 return BHSize
+ cSize
+ ((U32
)crcFlag
)*BFSize
;
911 static int LZ4F_compressBlock(void* ctx
, const char* src
, char* dst
, int srcSize
, int dstCapacity
, int level
, const LZ4F_CDict
* cdict
)
913 int const acceleration
= (level
< 0) ? -level
+ 1 : 1;
914 DEBUGLOG(5, "LZ4F_compressBlock (srcSize=%i)", srcSize
);
915 LZ4F_initStream(ctx
, cdict
, level
, LZ4F_blockIndependent
);
917 return LZ4_compress_fast_continue((LZ4_stream_t
*)ctx
, src
, dst
, srcSize
, dstCapacity
, acceleration
);
919 return LZ4_compress_fast_extState_fastReset(ctx
, src
, dst
, srcSize
, dstCapacity
, acceleration
);
923 static int LZ4F_compressBlock_continue(void* ctx
, const char* src
, char* dst
, int srcSize
, int dstCapacity
, int level
, const LZ4F_CDict
* cdict
)
925 int const acceleration
= (level
< 0) ? -level
+ 1 : 1;
926 (void)cdict
; /* init once at beginning of frame */
927 DEBUGLOG(5, "LZ4F_compressBlock_continue (srcSize=%i)", srcSize
);
928 return LZ4_compress_fast_continue((LZ4_stream_t
*)ctx
, src
, dst
, srcSize
, dstCapacity
, acceleration
);
931 static int LZ4F_compressBlockHC(void* ctx
, const char* src
, char* dst
, int srcSize
, int dstCapacity
, int level
, const LZ4F_CDict
* cdict
)
933 LZ4F_initStream(ctx
, cdict
, level
, LZ4F_blockIndependent
);
935 return LZ4_compress_HC_continue((LZ4_streamHC_t
*)ctx
, src
, dst
, srcSize
, dstCapacity
);
937 return LZ4_compress_HC_extStateHC_fastReset(ctx
, src
, dst
, srcSize
, dstCapacity
, level
);
940 static int LZ4F_compressBlockHC_continue(void* ctx
, const char* src
, char* dst
, int srcSize
, int dstCapacity
, int level
, const LZ4F_CDict
* cdict
)
942 (void)level
; (void)cdict
; /* init once at beginning of frame */
943 return LZ4_compress_HC_continue((LZ4_streamHC_t
*)ctx
, src
, dst
, srcSize
, dstCapacity
);
946 static int LZ4F_doNotCompressBlock(void* ctx
, const char* src
, char* dst
, int srcSize
, int dstCapacity
, int level
, const LZ4F_CDict
* cdict
)
948 (void)ctx
; (void)src
; (void)dst
; (void)srcSize
; (void)dstCapacity
; (void)level
; (void)cdict
;
952 static compressFunc_t
LZ4F_selectCompression(LZ4F_blockMode_t blockMode
, int level
, LZ4F_BlockCompressMode_e compressMode
)
954 if (compressMode
== LZ4B_UNCOMPRESSED
)
955 return LZ4F_doNotCompressBlock
;
956 if (level
< LZ4HC_CLEVEL_MIN
) {
957 if (blockMode
== LZ4F_blockIndependent
) return LZ4F_compressBlock
;
958 return LZ4F_compressBlock_continue
;
960 if (blockMode
== LZ4F_blockIndependent
) return LZ4F_compressBlockHC
;
961 return LZ4F_compressBlockHC_continue
;
964 /* Save history (up to 64KB) into @tmpBuff */
965 static int LZ4F_localSaveDict(LZ4F_cctx_t
* cctxPtr
)
967 if (cctxPtr
->prefs
.compressionLevel
< LZ4HC_CLEVEL_MIN
)
968 return LZ4_saveDict ((LZ4_stream_t
*)(cctxPtr
->lz4CtxPtr
), (char*)(cctxPtr
->tmpBuff
), 64 KB
);
969 return LZ4_saveDictHC ((LZ4_streamHC_t
*)(cctxPtr
->lz4CtxPtr
), (char*)(cctxPtr
->tmpBuff
), 64 KB
);
972 typedef enum { notDone
, fromTmpBuffer
, fromSrcBuffer
} LZ4F_lastBlockStatus
;
974 static const LZ4F_compressOptions_t k_cOptionsNull
= { 0, { 0, 0, 0 } };
977 /*! LZ4F_compressUpdateImpl() :
978 * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
979 * When successful, the function always entirely consumes @srcBuffer.
980 * src data is either buffered or compressed into @dstBuffer.
981 * If the block compression does not match the compression of the previous block, the old data is flushed
982 * and operations continue with the new compression mode.
983 * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr) when block compression is turned on.
984 * @compressOptionsPtr is optional : provide NULL to mean "default".
985 * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
986 * or an error code if it fails (which can be tested using LZ4F_isError())
987 * After an error, the state is left in a UB state, and must be re-initialized.
989 static size_t LZ4F_compressUpdateImpl(LZ4F_cctx
* cctxPtr
,
990 void* dstBuffer
, size_t dstCapacity
,
991 const void* srcBuffer
, size_t srcSize
,
992 const LZ4F_compressOptions_t
* compressOptionsPtr
,
993 LZ4F_BlockCompressMode_e blockCompression
)
995 size_t const blockSize
= cctxPtr
->maxBlockSize
;
996 const BYTE
* srcPtr
= (const BYTE
*)srcBuffer
;
997 const BYTE
* const srcEnd
= srcPtr
+ srcSize
;
998 BYTE
* const dstStart
= (BYTE
*)dstBuffer
;
999 BYTE
* dstPtr
= dstStart
;
1000 LZ4F_lastBlockStatus lastBlockCompressed
= notDone
;
1001 compressFunc_t
const compress
= LZ4F_selectCompression(cctxPtr
->prefs
.frameInfo
.blockMode
, cctxPtr
->prefs
.compressionLevel
, blockCompression
);
1002 size_t bytesWritten
;
1003 DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize
);
1005 RETURN_ERROR_IF(cctxPtr
->cStage
!= 1, compressionState_uninitialized
); /* state must be initialized and waiting for next block */
1006 if (dstCapacity
< LZ4F_compressBound_internal(srcSize
, &(cctxPtr
->prefs
), cctxPtr
->tmpInSize
))
1007 RETURN_ERROR(dstMaxSize_tooSmall
);
1009 if (blockCompression
== LZ4B_UNCOMPRESSED
&& dstCapacity
< srcSize
)
1010 RETURN_ERROR(dstMaxSize_tooSmall
);
1012 /* flush currently written block, to continue with new block compression */
1013 if (cctxPtr
->blockCompressMode
!= blockCompression
) {
1014 bytesWritten
= LZ4F_flush(cctxPtr
, dstBuffer
, dstCapacity
, compressOptionsPtr
);
1015 dstPtr
+= bytesWritten
;
1016 cctxPtr
->blockCompressMode
= blockCompression
;
1019 if (compressOptionsPtr
== NULL
) compressOptionsPtr
= &k_cOptionsNull
;
1021 /* complete tmp buffer */
1022 if (cctxPtr
->tmpInSize
> 0) { /* some data already within tmp buffer */
1023 size_t const sizeToCopy
= blockSize
- cctxPtr
->tmpInSize
;
1024 assert(blockSize
> cctxPtr
->tmpInSize
);
1025 if (sizeToCopy
> srcSize
) {
1026 /* add src to tmpIn buffer */
1027 memcpy(cctxPtr
->tmpIn
+ cctxPtr
->tmpInSize
, srcBuffer
, srcSize
);
1029 cctxPtr
->tmpInSize
+= srcSize
;
1030 /* still needs some CRC */
1032 /* complete tmpIn block and then compress it */
1033 lastBlockCompressed
= fromTmpBuffer
;
1034 memcpy(cctxPtr
->tmpIn
+ cctxPtr
->tmpInSize
, srcBuffer
, sizeToCopy
);
1035 srcPtr
+= sizeToCopy
;
1037 dstPtr
+= LZ4F_makeBlock(dstPtr
,
1038 cctxPtr
->tmpIn
, blockSize
,
1039 compress
, cctxPtr
->lz4CtxPtr
, cctxPtr
->prefs
.compressionLevel
,
1041 cctxPtr
->prefs
.frameInfo
.blockChecksumFlag
);
1042 if (cctxPtr
->prefs
.frameInfo
.blockMode
==LZ4F_blockLinked
) cctxPtr
->tmpIn
+= blockSize
;
1043 cctxPtr
->tmpInSize
= 0;
1046 while ((size_t)(srcEnd
- srcPtr
) >= blockSize
) {
1047 /* compress full blocks */
1048 lastBlockCompressed
= fromSrcBuffer
;
1049 dstPtr
+= LZ4F_makeBlock(dstPtr
,
1051 compress
, cctxPtr
->lz4CtxPtr
, cctxPtr
->prefs
.compressionLevel
,
1053 cctxPtr
->prefs
.frameInfo
.blockChecksumFlag
);
1054 srcPtr
+= blockSize
;
1057 if ((cctxPtr
->prefs
.autoFlush
) && (srcPtr
< srcEnd
)) {
1058 /* autoFlush : remaining input (< blockSize) is compressed */
1059 lastBlockCompressed
= fromSrcBuffer
;
1060 dstPtr
+= LZ4F_makeBlock(dstPtr
,
1061 srcPtr
, (size_t)(srcEnd
- srcPtr
),
1062 compress
, cctxPtr
->lz4CtxPtr
, cctxPtr
->prefs
.compressionLevel
,
1064 cctxPtr
->prefs
.frameInfo
.blockChecksumFlag
);
1068 /* preserve dictionary within @tmpBuff whenever necessary */
1069 if ((cctxPtr
->prefs
.frameInfo
.blockMode
==LZ4F_blockLinked
) && (lastBlockCompressed
==fromSrcBuffer
)) {
1070 /* linked blocks are only supported in compressed mode, see LZ4F_uncompressedUpdate */
1071 assert(blockCompression
== LZ4B_COMPRESSED
);
1072 if (compressOptionsPtr
->stableSrc
) {
1073 cctxPtr
->tmpIn
= cctxPtr
->tmpBuff
; /* src is stable : dictionary remains in src across invocations */
1075 int const realDictSize
= LZ4F_localSaveDict(cctxPtr
);
1076 assert(0 <= realDictSize
&& realDictSize
<= 64 KB
);
1077 cctxPtr
->tmpIn
= cctxPtr
->tmpBuff
+ realDictSize
;
1081 /* keep tmpIn within limits */
1082 if (!(cctxPtr
->prefs
.autoFlush
) /* no autoflush : there may be some data left within internal buffer */
1083 && (cctxPtr
->tmpIn
+ blockSize
) > (cctxPtr
->tmpBuff
+ cctxPtr
->maxBufferSize
) ) /* not enough room to store next block */
1085 /* only preserve 64KB within internal buffer. Ensures there is enough room for next block.
1086 * note: this situation necessarily implies lastBlockCompressed==fromTmpBuffer */
1087 int const realDictSize
= LZ4F_localSaveDict(cctxPtr
);
1088 cctxPtr
->tmpIn
= cctxPtr
->tmpBuff
+ realDictSize
;
1089 assert((cctxPtr
->tmpIn
+ blockSize
) <= (cctxPtr
->tmpBuff
+ cctxPtr
->maxBufferSize
));
1092 /* some input data left, necessarily < blockSize */
1093 if (srcPtr
< srcEnd
) {
1094 /* fill tmp buffer */
1095 size_t const sizeToCopy
= (size_t)(srcEnd
- srcPtr
);
1096 memcpy(cctxPtr
->tmpIn
, srcPtr
, sizeToCopy
);
1097 cctxPtr
->tmpInSize
= sizeToCopy
;
1100 if (cctxPtr
->prefs
.frameInfo
.contentChecksumFlag
== LZ4F_contentChecksumEnabled
)
1101 (void)XXH32_update(&(cctxPtr
->xxh
), srcBuffer
, srcSize
);
1103 cctxPtr
->totalInSize
+= srcSize
;
1104 return (size_t)(dstPtr
- dstStart
);
1107 /*! LZ4F_compressUpdate() :
1108 * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
1109 * When successful, the function always entirely consumes @srcBuffer.
1110 * src data is either buffered or compressed into @dstBuffer.
1111 * If previously an uncompressed block was written, buffered data is flushed
1112 * before appending compressed data is continued.
1113 * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
1114 * @compressOptionsPtr is optional : provide NULL to mean "default".
1115 * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
1116 * or an error code if it fails (which can be tested using LZ4F_isError())
1117 * After an error, the state is left in a UB state, and must be re-initialized.
1119 size_t LZ4F_compressUpdate(LZ4F_cctx
* cctxPtr
,
1120 void* dstBuffer
, size_t dstCapacity
,
1121 const void* srcBuffer
, size_t srcSize
,
1122 const LZ4F_compressOptions_t
* compressOptionsPtr
)
1124 return LZ4F_compressUpdateImpl(cctxPtr
,
1125 dstBuffer
, dstCapacity
,
1127 compressOptionsPtr
, LZ4B_COMPRESSED
);
1130 /*! LZ4F_uncompressedUpdate() :
1131 * Same as LZ4F_compressUpdate(), but requests blocks to be sent uncompressed.
1132 * This symbol is only supported when LZ4F_blockIndependent is used
1133 * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
1134 * @compressOptionsPtr is optional : provide NULL to mean "default".
1135 * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
1136 * or an error code if it fails (which can be tested using LZ4F_isError())
1137 * After an error, the state is left in a UB state, and must be re-initialized.
1139 size_t LZ4F_uncompressedUpdate(LZ4F_cctx
* cctxPtr
,
1140 void* dstBuffer
, size_t dstCapacity
,
1141 const void* srcBuffer
, size_t srcSize
,
1142 const LZ4F_compressOptions_t
* compressOptionsPtr
)
1144 return LZ4F_compressUpdateImpl(cctxPtr
,
1145 dstBuffer
, dstCapacity
,
1147 compressOptionsPtr
, LZ4B_UNCOMPRESSED
);
1152 * When compressed data must be sent immediately, without waiting for a block to be filled,
1153 * invoke LZ4_flush(), which will immediately compress any remaining data stored within LZ4F_cctx.
1154 * The result of the function is the number of bytes written into dstBuffer.
1155 * It can be zero, this means there was no data left within LZ4F_cctx.
1156 * The function outputs an error code if it fails (can be tested using LZ4F_isError())
1157 * LZ4F_compressOptions_t* is optional. NULL is a valid argument.
1159 size_t LZ4F_flush(LZ4F_cctx
* cctxPtr
,
1160 void* dstBuffer
, size_t dstCapacity
,
1161 const LZ4F_compressOptions_t
* compressOptionsPtr
)
1163 BYTE
* const dstStart
= (BYTE
*)dstBuffer
;
1164 BYTE
* dstPtr
= dstStart
;
1165 compressFunc_t compress
;
1167 if (cctxPtr
->tmpInSize
== 0) return 0; /* nothing to flush */
1168 RETURN_ERROR_IF(cctxPtr
->cStage
!= 1, compressionState_uninitialized
);
1169 RETURN_ERROR_IF(dstCapacity
< (cctxPtr
->tmpInSize
+ BHSize
+ BFSize
), dstMaxSize_tooSmall
);
1170 (void)compressOptionsPtr
; /* not useful (yet) */
1172 /* select compression function */
1173 compress
= LZ4F_selectCompression(cctxPtr
->prefs
.frameInfo
.blockMode
, cctxPtr
->prefs
.compressionLevel
, cctxPtr
->blockCompressMode
);
1175 /* compress tmp buffer */
1176 dstPtr
+= LZ4F_makeBlock(dstPtr
,
1177 cctxPtr
->tmpIn
, cctxPtr
->tmpInSize
,
1178 compress
, cctxPtr
->lz4CtxPtr
, cctxPtr
->prefs
.compressionLevel
,
1180 cctxPtr
->prefs
.frameInfo
.blockChecksumFlag
);
1181 assert(((void)"flush overflows dstBuffer!", (size_t)(dstPtr
- dstStart
) <= dstCapacity
));
1183 if (cctxPtr
->prefs
.frameInfo
.blockMode
== LZ4F_blockLinked
)
1184 cctxPtr
->tmpIn
+= cctxPtr
->tmpInSize
;
1185 cctxPtr
->tmpInSize
= 0;
1187 /* keep tmpIn within limits */
1188 if ((cctxPtr
->tmpIn
+ cctxPtr
->maxBlockSize
) > (cctxPtr
->tmpBuff
+ cctxPtr
->maxBufferSize
)) { /* necessarily LZ4F_blockLinked */
1189 int const realDictSize
= LZ4F_localSaveDict(cctxPtr
);
1190 cctxPtr
->tmpIn
= cctxPtr
->tmpBuff
+ realDictSize
;
1193 return (size_t)(dstPtr
- dstStart
);
1197 /*! LZ4F_compressEnd() :
1198 * When you want to properly finish the compressed frame, just call LZ4F_compressEnd().
1199 * It will flush whatever data remained within compressionContext (like LZ4_flush())
1200 * but also properly finalize the frame, with an endMark and an (optional) checksum.
1201 * LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
1202 * @return: the number of bytes written into dstBuffer (necessarily >= 4 (endMark size))
1203 * or an error code if it fails (can be tested using LZ4F_isError())
1204 * The context can then be used again to compress a new frame, starting with LZ4F_compressBegin().
1206 size_t LZ4F_compressEnd(LZ4F_cctx
* cctxPtr
,
1207 void* dstBuffer
, size_t dstCapacity
,
1208 const LZ4F_compressOptions_t
* compressOptionsPtr
)
1210 BYTE
* const dstStart
= (BYTE
*)dstBuffer
;
1211 BYTE
* dstPtr
= dstStart
;
1213 size_t const flushSize
= LZ4F_flush(cctxPtr
, dstBuffer
, dstCapacity
, compressOptionsPtr
);
1214 DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity
);
1215 FORWARD_IF_ERROR(flushSize
);
1216 dstPtr
+= flushSize
;
1218 assert(flushSize
<= dstCapacity
);
1219 dstCapacity
-= flushSize
;
1221 RETURN_ERROR_IF(dstCapacity
< 4, dstMaxSize_tooSmall
);
1222 LZ4F_writeLE32(dstPtr
, 0);
1223 dstPtr
+= 4; /* endMark */
1225 if (cctxPtr
->prefs
.frameInfo
.contentChecksumFlag
== LZ4F_contentChecksumEnabled
) {
1226 U32
const xxh
= XXH32_digest(&(cctxPtr
->xxh
));
1227 RETURN_ERROR_IF(dstCapacity
< 8, dstMaxSize_tooSmall
);
1228 DEBUGLOG(5,"Writing 32-bit content checksum (0x%0X)", xxh
);
1229 LZ4F_writeLE32(dstPtr
, xxh
);
1230 dstPtr
+=4; /* content Checksum */
1233 cctxPtr
->cStage
= 0; /* state is now re-usable (with identical preferences) */
1235 if (cctxPtr
->prefs
.frameInfo
.contentSize
) {
1236 if (cctxPtr
->prefs
.frameInfo
.contentSize
!= cctxPtr
->totalInSize
)
1237 RETURN_ERROR(frameSize_wrong
);
1240 return (size_t)(dstPtr
- dstStart
);
1244 /*-***************************************************
1245 * Frame Decompression
1246 *****************************************************/
1249 dstage_getFrameHeader
=0, dstage_storeFrameHeader
,
1251 dstage_getBlockHeader
, dstage_storeBlockHeader
,
1252 dstage_copyDirect
, dstage_getBlockChecksum
,
1253 dstage_getCBlock
, dstage_storeCBlock
,
1255 dstage_getSuffix
, dstage_storeSuffix
,
1256 dstage_getSFrameSize
, dstage_storeSFrameSize
,
1257 dstage_skipSkippable
1260 struct LZ4F_dctx_s
{
1261 LZ4F_CustomMem cmem
;
1262 LZ4F_frameInfo_t frameInfo
;
1265 U64 frameRemainingSize
;
1266 size_t maxBlockSize
;
1267 size_t maxBufferSize
;
1278 XXH32_state_t blockChecksum
;
1280 BYTE header
[LZ4F_HEADER_SIZE_MAX
];
1281 }; /* typedef'd to LZ4F_dctx in lz4frame.h */
1284 LZ4F_dctx
* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem
, unsigned version
)
1286 LZ4F_dctx
* const dctx
= (LZ4F_dctx
*)LZ4F_calloc(sizeof(LZ4F_dctx
), customMem
);
1287 if (dctx
== NULL
) return NULL
;
1289 dctx
->cmem
= customMem
;
1290 dctx
->version
= version
;
1294 /*! LZ4F_createDecompressionContext() :
1295 * Create a decompressionContext object, which will track all decompression operations.
1296 * Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object.
1297 * Object can later be released using LZ4F_freeDecompressionContext().
1298 * @return : if != 0, there was an error during context creation.
1301 LZ4F_createDecompressionContext(LZ4F_dctx
** LZ4F_decompressionContextPtr
, unsigned versionNumber
)
1303 assert(LZ4F_decompressionContextPtr
!= NULL
); /* violation of narrow contract */
1304 RETURN_ERROR_IF(LZ4F_decompressionContextPtr
== NULL
, parameter_null
); /* in case it nonetheless happen in production */
1306 *LZ4F_decompressionContextPtr
= LZ4F_createDecompressionContext_advanced(LZ4F_defaultCMem
, versionNumber
);
1307 if (*LZ4F_decompressionContextPtr
== NULL
) { /* failed allocation */
1308 RETURN_ERROR(allocation_failed
);
1310 return LZ4F_OK_NoError
;
1313 LZ4F_errorCode_t
LZ4F_freeDecompressionContext(LZ4F_dctx
* dctx
)
1315 LZ4F_errorCode_t result
= LZ4F_OK_NoError
;
1316 if (dctx
!= NULL
) { /* can accept NULL input, like free() */
1317 result
= (LZ4F_errorCode_t
)dctx
->dStage
;
1318 LZ4F_free(dctx
->tmpIn
, dctx
->cmem
);
1319 LZ4F_free(dctx
->tmpOutBuffer
, dctx
->cmem
);
1320 LZ4F_free(dctx
, dctx
->cmem
);
1326 /*==--- Streaming Decompression operations ---==*/
1327 void LZ4F_resetDecompressionContext(LZ4F_dctx
* dctx
)
1329 DEBUGLOG(5, "LZ4F_resetDecompressionContext");
1330 dctx
->dStage
= dstage_getFrameHeader
;
1333 dctx
->skipChecksum
= 0;
1334 dctx
->frameRemainingSize
= 0;
1338 /*! LZ4F_decodeHeader() :
1339 * input : `src` points at the **beginning of the frame**
1340 * output : set internal values of dctx, such as
1341 * dctx->frameInfo and dctx->dStage.
1342 * Also allocates internal buffers.
1343 * @return : nb Bytes read from src (necessarily <= srcSize)
1344 * or an error code (testable with LZ4F_isError())
1346 static size_t LZ4F_decodeHeader(LZ4F_dctx
* dctx
, const void* src
, size_t srcSize
)
1348 unsigned blockMode
, blockChecksumFlag
, contentSizeFlag
, contentChecksumFlag
, dictIDFlag
, blockSizeID
;
1349 size_t frameHeaderSize
;
1350 const BYTE
* srcPtr
= (const BYTE
*)src
;
1352 DEBUGLOG(5, "LZ4F_decodeHeader");
1353 /* need to decode header to get frameInfo */
1354 RETURN_ERROR_IF(srcSize
< minFHSize
, frameHeader_incomplete
); /* minimal frame header size */
1355 MEM_INIT(&(dctx
->frameInfo
), 0, sizeof(dctx
->frameInfo
));
1357 /* special case : skippable frames */
1358 if ((LZ4F_readLE32(srcPtr
) & 0xFFFFFFF0U
) == LZ4F_MAGIC_SKIPPABLE_START
) {
1359 dctx
->frameInfo
.frameType
= LZ4F_skippableFrame
;
1360 if (src
== (void*)(dctx
->header
)) {
1361 dctx
->tmpInSize
= srcSize
;
1362 dctx
->tmpInTarget
= 8;
1363 dctx
->dStage
= dstage_storeSFrameSize
;
1366 dctx
->dStage
= dstage_getSFrameSize
;
1370 /* control magic number */
1371 #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1372 if (LZ4F_readLE32(srcPtr
) != LZ4F_MAGICNUMBER
) {
1373 DEBUGLOG(4, "frame header error : unknown magic number");
1374 RETURN_ERROR(frameType_unknown
);
1377 dctx
->frameInfo
.frameType
= LZ4F_frame
;
1380 { U32
const FLG
= srcPtr
[4];
1381 U32
const version
= (FLG
>>6) & _2BITS
;
1382 blockChecksumFlag
= (FLG
>>4) & _1BIT
;
1383 blockMode
= (FLG
>>5) & _1BIT
;
1384 contentSizeFlag
= (FLG
>>3) & _1BIT
;
1385 contentChecksumFlag
= (FLG
>>2) & _1BIT
;
1386 dictIDFlag
= FLG
& _1BIT
;
1388 if (((FLG
>>1)&_1BIT
) != 0) RETURN_ERROR(reservedFlag_set
); /* Reserved bit */
1389 if (version
!= 1) RETURN_ERROR(headerVersion_wrong
); /* Version Number, only supported value */
1391 DEBUGLOG(6, "contentSizeFlag: %u", contentSizeFlag
);
1393 /* Frame Header Size */
1394 frameHeaderSize
= minFHSize
+ (contentSizeFlag
?8:0) + (dictIDFlag
?4:0);
1396 if (srcSize
< frameHeaderSize
) {
1397 /* not enough input to fully decode frame header */
1398 if (srcPtr
!= dctx
->header
)
1399 memcpy(dctx
->header
, srcPtr
, srcSize
);
1400 dctx
->tmpInSize
= srcSize
;
1401 dctx
->tmpInTarget
= frameHeaderSize
;
1402 dctx
->dStage
= dstage_storeFrameHeader
;
1406 { U32
const BD
= srcPtr
[5];
1407 blockSizeID
= (BD
>>4) & _3BITS
;
1409 if (((BD
>>7)&_1BIT
) != 0) RETURN_ERROR(reservedFlag_set
); /* Reserved bit */
1410 if (blockSizeID
< 4) RETURN_ERROR(maxBlockSize_invalid
); /* 4-7 only supported values for the time being */
1411 if (((BD
>>0)&_4BITS
) != 0) RETURN_ERROR(reservedFlag_set
); /* Reserved bits */
1415 assert(frameHeaderSize
> 5);
1416 #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1417 { BYTE
const HC
= LZ4F_headerChecksum(srcPtr
+4, frameHeaderSize
-5);
1418 RETURN_ERROR_IF(HC
!= srcPtr
[frameHeaderSize
-1], headerChecksum_invalid
);
1423 dctx
->frameInfo
.blockMode
= (LZ4F_blockMode_t
)blockMode
;
1424 dctx
->frameInfo
.blockChecksumFlag
= (LZ4F_blockChecksum_t
)blockChecksumFlag
;
1425 dctx
->frameInfo
.contentChecksumFlag
= (LZ4F_contentChecksum_t
)contentChecksumFlag
;
1426 dctx
->frameInfo
.blockSizeID
= (LZ4F_blockSizeID_t
)blockSizeID
;
1427 dctx
->maxBlockSize
= LZ4F_getBlockSize((LZ4F_blockSizeID_t
)blockSizeID
);
1428 if (contentSizeFlag
) {
1429 dctx
->frameRemainingSize
= dctx
->frameInfo
.contentSize
= LZ4F_readLE64(srcPtr
+6);
1432 dctx
->frameInfo
.dictID
= LZ4F_readLE32(srcPtr
+ frameHeaderSize
- 5);
1434 dctx
->dStage
= dstage_init
;
1436 return frameHeaderSize
;
1440 /*! LZ4F_headerSize() :
1441 * @return : size of frame header
1442 * or an error code, which can be tested using LZ4F_isError()
1444 size_t LZ4F_headerSize(const void* src
, size_t srcSize
)
1446 RETURN_ERROR_IF(src
== NULL
, srcPtr_wrong
);
1448 /* minimal srcSize to determine header size */
1449 if (srcSize
< LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH
)
1450 RETURN_ERROR(frameHeader_incomplete
);
1452 /* special case : skippable frames */
1453 if ((LZ4F_readLE32(src
) & 0xFFFFFFF0U
) == LZ4F_MAGIC_SKIPPABLE_START
)
1456 /* control magic number */
1457 #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1458 if (LZ4F_readLE32(src
) != LZ4F_MAGICNUMBER
)
1459 RETURN_ERROR(frameType_unknown
);
1462 /* Frame Header Size */
1463 { BYTE
const FLG
= ((const BYTE
*)src
)[4];
1464 U32
const contentSizeFlag
= (FLG
>>3) & _1BIT
;
1465 U32
const dictIDFlag
= FLG
& _1BIT
;
1466 return minFHSize
+ (contentSizeFlag
?8:0) + (dictIDFlag
?4:0);
1470 /*! LZ4F_getFrameInfo() :
1471 * This function extracts frame parameters (max blockSize, frame checksum, etc.).
1472 * Usage is optional. Objective is to provide relevant information for allocation purposes.
1473 * This function works in 2 situations :
1474 * - At the beginning of a new frame, in which case it will decode this information from `srcBuffer`, and start the decoding process.
1475 * Amount of input data provided must be large enough to successfully decode the frame header.
1476 * A header size is variable, but is guaranteed to be <= LZ4F_HEADER_SIZE_MAX bytes. It's possible to provide more input data than this minimum.
1477 * - After decoding has been started. In which case, no input is read, frame parameters are extracted from dctx.
1478 * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value).
1479 * Decompression must resume from (srcBuffer + *srcSizePtr).
1480 * @return : an hint about how many srcSize bytes LZ4F_decompress() expects for next call,
1481 * or an error code which can be tested using LZ4F_isError()
1482 * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped.
1483 * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure.
1485 LZ4F_errorCode_t
LZ4F_getFrameInfo(LZ4F_dctx
* dctx
,
1486 LZ4F_frameInfo_t
* frameInfoPtr
,
1487 const void* srcBuffer
, size_t* srcSizePtr
)
1489 LZ4F_STATIC_ASSERT(dstage_getFrameHeader
< dstage_storeFrameHeader
);
1490 if (dctx
->dStage
> dstage_storeFrameHeader
) {
1491 /* frameInfo already decoded */
1494 *frameInfoPtr
= dctx
->frameInfo
;
1495 /* returns : recommended nb of bytes for LZ4F_decompress() */
1496 return LZ4F_decompress(dctx
, NULL
, &o
, NULL
, &i
, NULL
);
1498 if (dctx
->dStage
== dstage_storeFrameHeader
) {
1499 /* frame decoding already started, in the middle of header => automatic fail */
1501 RETURN_ERROR(frameDecoding_alreadyStarted
);
1503 size_t const hSize
= LZ4F_headerSize(srcBuffer
, *srcSizePtr
);
1504 if (LZ4F_isError(hSize
)) { *srcSizePtr
=0; return hSize
; }
1505 if (*srcSizePtr
< hSize
) {
1507 RETURN_ERROR(frameHeader_incomplete
);
1510 { size_t decodeResult
= LZ4F_decodeHeader(dctx
, srcBuffer
, hSize
);
1511 if (LZ4F_isError(decodeResult
)) {
1514 *srcSizePtr
= decodeResult
;
1515 decodeResult
= BHSize
; /* block header size */
1517 *frameInfoPtr
= dctx
->frameInfo
;
1518 return decodeResult
;
1523 /* LZ4F_updateDict() :
1524 * only used for LZ4F_blockLinked mode
1525 * Condition : @dstPtr != NULL
1527 static void LZ4F_updateDict(LZ4F_dctx
* dctx
,
1528 const BYTE
* dstPtr
, size_t dstSize
, const BYTE
* dstBufferStart
,
1531 assert(dstPtr
!= NULL
);
1532 if (dctx
->dictSize
==0) dctx
->dict
= (const BYTE
*)dstPtr
; /* will lead to prefix mode */
1533 assert(dctx
->dict
!= NULL
);
1535 if (dctx
->dict
+ dctx
->dictSize
== dstPtr
) { /* prefix mode, everything within dstBuffer */
1536 dctx
->dictSize
+= dstSize
;
1540 assert(dstPtr
>= dstBufferStart
);
1541 if ((size_t)(dstPtr
- dstBufferStart
) + dstSize
>= 64 KB
) { /* history in dstBuffer becomes large enough to become dictionary */
1542 dctx
->dict
= (const BYTE
*)dstBufferStart
;
1543 dctx
->dictSize
= (size_t)(dstPtr
- dstBufferStart
) + dstSize
;
1547 assert(dstSize
< 64 KB
); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */
1549 /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */
1550 assert(dctx
->tmpOutBuffer
!= NULL
);
1552 if (withinTmp
&& (dctx
->dict
== dctx
->tmpOutBuffer
)) { /* continue history within tmpOutBuffer */
1553 /* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */
1554 assert(dctx
->dict
+ dctx
->dictSize
== dctx
->tmpOut
+ dctx
->tmpOutStart
);
1555 dctx
->dictSize
+= dstSize
;
1559 if (withinTmp
) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */
1560 size_t const preserveSize
= (size_t)(dctx
->tmpOut
- dctx
->tmpOutBuffer
);
1561 size_t copySize
= 64 KB
- dctx
->tmpOutSize
;
1562 const BYTE
* const oldDictEnd
= dctx
->dict
+ dctx
->dictSize
- dctx
->tmpOutStart
;
1563 if (dctx
->tmpOutSize
> 64 KB
) copySize
= 0;
1564 if (copySize
> preserveSize
) copySize
= preserveSize
;
1566 memcpy(dctx
->tmpOutBuffer
+ preserveSize
- copySize
, oldDictEnd
- copySize
, copySize
);
1568 dctx
->dict
= dctx
->tmpOutBuffer
;
1569 dctx
->dictSize
= preserveSize
+ dctx
->tmpOutStart
+ dstSize
;
1573 if (dctx
->dict
== dctx
->tmpOutBuffer
) { /* copy dst into tmp to complete dict */
1574 if (dctx
->dictSize
+ dstSize
> dctx
->maxBufferSize
) { /* tmp buffer not large enough */
1575 size_t const preserveSize
= 64 KB
- dstSize
;
1576 memcpy(dctx
->tmpOutBuffer
, dctx
->dict
+ dctx
->dictSize
- preserveSize
, preserveSize
);
1577 dctx
->dictSize
= preserveSize
;
1579 memcpy(dctx
->tmpOutBuffer
+ dctx
->dictSize
, dstPtr
, dstSize
);
1580 dctx
->dictSize
+= dstSize
;
1584 /* join dict & dest into tmp */
1585 { size_t preserveSize
= 64 KB
- dstSize
;
1586 if (preserveSize
> dctx
->dictSize
) preserveSize
= dctx
->dictSize
;
1587 memcpy(dctx
->tmpOutBuffer
, dctx
->dict
+ dctx
->dictSize
- preserveSize
, preserveSize
);
1588 memcpy(dctx
->tmpOutBuffer
+ preserveSize
, dstPtr
, dstSize
);
1589 dctx
->dict
= dctx
->tmpOutBuffer
;
1590 dctx
->dictSize
= preserveSize
+ dstSize
;
1595 /*! LZ4F_decompress() :
1596 * Call this function repetitively to regenerate compressed data in srcBuffer.
1597 * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer
1598 * into dstBuffer of capacity *dstSizePtr.
1600 * The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value).
1602 * The number of bytes effectively read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value).
1603 * If number of bytes read is < number of bytes provided, then decompression operation is not complete.
1604 * Remaining data will have to be presented again in a subsequent invocation.
1606 * The function result is an hint of the better srcSize to use for next call to LZ4F_decompress.
1607 * Schematically, it's the size of the current (or remaining) compressed block + header of next block.
1608 * Respecting the hint provides a small boost to performance, since it allows less buffer shuffling.
1609 * Note that this is just a hint, and it's always possible to any srcSize value.
1610 * When a frame is fully decoded, @return will be 0.
1611 * If decompression failed, @return is an error code which can be tested using LZ4F_isError().
1613 size_t LZ4F_decompress(LZ4F_dctx
* dctx
,
1614 void* dstBuffer
, size_t* dstSizePtr
,
1615 const void* srcBuffer
, size_t* srcSizePtr
,
1616 const LZ4F_decompressOptions_t
* decompressOptionsPtr
)
1618 LZ4F_decompressOptions_t optionsNull
;
1619 const BYTE
* const srcStart
= (const BYTE
*)srcBuffer
;
1620 const BYTE
* const srcEnd
= srcStart
+ *srcSizePtr
;
1621 const BYTE
* srcPtr
= srcStart
;
1622 BYTE
* const dstStart
= (BYTE
*)dstBuffer
;
1623 BYTE
* const dstEnd
= dstStart
? dstStart
+ *dstSizePtr
: NULL
;
1624 BYTE
* dstPtr
= dstStart
;
1625 const BYTE
* selectedIn
= NULL
;
1626 unsigned doAnotherStage
= 1;
1627 size_t nextSrcSizeHint
= 1;
1630 DEBUGLOG(5, "LZ4F_decompress: src[%p](%u) => dst[%p](%u)",
1631 srcBuffer
, (unsigned)*srcSizePtr
, dstBuffer
, (unsigned)*dstSizePtr
);
1632 if (dstBuffer
== NULL
) assert(*dstSizePtr
== 0);
1633 MEM_INIT(&optionsNull
, 0, sizeof(optionsNull
));
1634 if (decompressOptionsPtr
==NULL
) decompressOptionsPtr
= &optionsNull
;
1637 assert(dctx
!= NULL
);
1638 dctx
->skipChecksum
|= (decompressOptionsPtr
->skipChecksums
!= 0); /* once set, disable for the remainder of the frame */
1640 /* behaves as a state machine */
1642 while (doAnotherStage
) {
1644 switch(dctx
->dStage
)
1647 case dstage_getFrameHeader
:
1648 DEBUGLOG(6, "dstage_getFrameHeader");
1649 if ((size_t)(srcEnd
-srcPtr
) >= maxFHSize
) { /* enough to decode - shortcut */
1650 size_t const hSize
= LZ4F_decodeHeader(dctx
, srcPtr
, (size_t)(srcEnd
-srcPtr
)); /* will update dStage appropriately */
1651 FORWARD_IF_ERROR(hSize
);
1655 dctx
->tmpInSize
= 0;
1656 if (srcEnd
-srcPtr
== 0) return minFHSize
; /* 0-size input */
1657 dctx
->tmpInTarget
= minFHSize
; /* minimum size to decode header */
1658 dctx
->dStage
= dstage_storeFrameHeader
;
1661 case dstage_storeFrameHeader
:
1662 DEBUGLOG(6, "dstage_storeFrameHeader");
1663 { size_t const sizeToCopy
= MIN(dctx
->tmpInTarget
- dctx
->tmpInSize
, (size_t)(srcEnd
- srcPtr
));
1664 memcpy(dctx
->header
+ dctx
->tmpInSize
, srcPtr
, sizeToCopy
);
1665 dctx
->tmpInSize
+= sizeToCopy
;
1666 srcPtr
+= sizeToCopy
;
1668 if (dctx
->tmpInSize
< dctx
->tmpInTarget
) {
1669 nextSrcSizeHint
= (dctx
->tmpInTarget
- dctx
->tmpInSize
) + BHSize
; /* rest of header + nextBlockHeader */
1670 doAnotherStage
= 0; /* not enough src data, ask for some more */
1673 FORWARD_IF_ERROR( LZ4F_decodeHeader(dctx
, dctx
->header
, dctx
->tmpInTarget
) ); /* will update dStage appropriately */
1677 DEBUGLOG(6, "dstage_init");
1678 if (dctx
->frameInfo
.contentChecksumFlag
) (void)XXH32_reset(&(dctx
->xxh
), 0);
1679 /* internal buffers allocation */
1680 { size_t const bufferNeeded
= dctx
->maxBlockSize
1681 + ((dctx
->frameInfo
.blockMode
==LZ4F_blockLinked
) ? 128 KB
: 0);
1682 if (bufferNeeded
> dctx
->maxBufferSize
) { /* tmp buffers too small */
1683 dctx
->maxBufferSize
= 0; /* ensure allocation will be re-attempted on next entry*/
1684 LZ4F_free(dctx
->tmpIn
, dctx
->cmem
);
1685 dctx
->tmpIn
= (BYTE
*)LZ4F_malloc(dctx
->maxBlockSize
+ BFSize
/* block checksum */, dctx
->cmem
);
1686 RETURN_ERROR_IF(dctx
->tmpIn
== NULL
, allocation_failed
);
1687 LZ4F_free(dctx
->tmpOutBuffer
, dctx
->cmem
);
1688 dctx
->tmpOutBuffer
= (BYTE
*)LZ4F_malloc(bufferNeeded
, dctx
->cmem
);
1689 RETURN_ERROR_IF(dctx
->tmpOutBuffer
== NULL
, allocation_failed
);
1690 dctx
->maxBufferSize
= bufferNeeded
;
1692 dctx
->tmpInSize
= 0;
1693 dctx
->tmpInTarget
= 0;
1694 dctx
->tmpOut
= dctx
->tmpOutBuffer
;
1695 dctx
->tmpOutStart
= 0;
1696 dctx
->tmpOutSize
= 0;
1698 dctx
->dStage
= dstage_getBlockHeader
;
1701 case dstage_getBlockHeader
:
1702 if ((size_t)(srcEnd
- srcPtr
) >= BHSize
) {
1703 selectedIn
= srcPtr
;
1706 /* not enough input to read cBlockSize field */
1707 dctx
->tmpInSize
= 0;
1708 dctx
->dStage
= dstage_storeBlockHeader
;
1711 if (dctx
->dStage
== dstage_storeBlockHeader
) /* can be skipped */
1712 case dstage_storeBlockHeader
:
1713 { size_t const remainingInput
= (size_t)(srcEnd
- srcPtr
);
1714 size_t const wantedData
= BHSize
- dctx
->tmpInSize
;
1715 size_t const sizeToCopy
= MIN(wantedData
, remainingInput
);
1716 memcpy(dctx
->tmpIn
+ dctx
->tmpInSize
, srcPtr
, sizeToCopy
);
1717 srcPtr
+= sizeToCopy
;
1718 dctx
->tmpInSize
+= sizeToCopy
;
1720 if (dctx
->tmpInSize
< BHSize
) { /* not enough input for cBlockSize */
1721 nextSrcSizeHint
= BHSize
- dctx
->tmpInSize
;
1725 selectedIn
= dctx
->tmpIn
;
1726 } /* if (dctx->dStage == dstage_storeBlockHeader) */
1728 /* decode block header */
1729 { U32
const blockHeader
= LZ4F_readLE32(selectedIn
);
1730 size_t const nextCBlockSize
= blockHeader
& 0x7FFFFFFFU
;
1731 size_t const crcSize
= dctx
->frameInfo
.blockChecksumFlag
* BFSize
;
1732 if (blockHeader
==0) { /* frameEnd signal, no more block */
1733 DEBUGLOG(5, "end of frame");
1734 dctx
->dStage
= dstage_getSuffix
;
1737 if (nextCBlockSize
> dctx
->maxBlockSize
) {
1738 RETURN_ERROR(maxBlockSize_invalid
);
1740 if (blockHeader
& LZ4F_BLOCKUNCOMPRESSED_FLAG
) {
1741 /* next block is uncompressed */
1742 dctx
->tmpInTarget
= nextCBlockSize
;
1743 DEBUGLOG(5, "next block is uncompressed (size %u)", (U32
)nextCBlockSize
);
1744 if (dctx
->frameInfo
.blockChecksumFlag
) {
1745 (void)XXH32_reset(&dctx
->blockChecksum
, 0);
1747 dctx
->dStage
= dstage_copyDirect
;
1750 /* next block is a compressed block */
1751 dctx
->tmpInTarget
= nextCBlockSize
+ crcSize
;
1752 dctx
->dStage
= dstage_getCBlock
;
1753 if (dstPtr
==dstEnd
|| srcPtr
==srcEnd
) {
1754 nextSrcSizeHint
= BHSize
+ nextCBlockSize
+ crcSize
;
1760 case dstage_copyDirect
: /* uncompressed block */
1761 DEBUGLOG(6, "dstage_copyDirect");
1762 { size_t sizeToCopy
;
1763 if (dstPtr
== NULL
) {
1766 size_t const minBuffSize
= MIN((size_t)(srcEnd
-srcPtr
), (size_t)(dstEnd
-dstPtr
));
1767 sizeToCopy
= MIN(dctx
->tmpInTarget
, minBuffSize
);
1768 memcpy(dstPtr
, srcPtr
, sizeToCopy
);
1769 if (!dctx
->skipChecksum
) {
1770 if (dctx
->frameInfo
.blockChecksumFlag
) {
1771 (void)XXH32_update(&dctx
->blockChecksum
, srcPtr
, sizeToCopy
);
1773 if (dctx
->frameInfo
.contentChecksumFlag
)
1774 (void)XXH32_update(&dctx
->xxh
, srcPtr
, sizeToCopy
);
1776 if (dctx
->frameInfo
.contentSize
)
1777 dctx
->frameRemainingSize
-= sizeToCopy
;
1779 /* history management (linked blocks only)*/
1780 if (dctx
->frameInfo
.blockMode
== LZ4F_blockLinked
) {
1781 LZ4F_updateDict(dctx
, dstPtr
, sizeToCopy
, dstStart
, 0);
1783 srcPtr
+= sizeToCopy
;
1784 dstPtr
+= sizeToCopy
;
1786 if (sizeToCopy
== dctx
->tmpInTarget
) { /* all done */
1787 if (dctx
->frameInfo
.blockChecksumFlag
) {
1788 dctx
->tmpInSize
= 0;
1789 dctx
->dStage
= dstage_getBlockChecksum
;
1791 dctx
->dStage
= dstage_getBlockHeader
; /* new block */
1794 dctx
->tmpInTarget
-= sizeToCopy
; /* need to copy more */
1796 nextSrcSizeHint
= dctx
->tmpInTarget
+
1797 +(dctx
->frameInfo
.blockChecksumFlag
? BFSize
: 0)
1798 + BHSize
/* next header size */;
1802 /* check block checksum for recently transferred uncompressed block */
1803 case dstage_getBlockChecksum
:
1804 DEBUGLOG(6, "dstage_getBlockChecksum");
1805 { const void* crcSrc
;
1806 if ((srcEnd
-srcPtr
>= 4) && (dctx
->tmpInSize
==0)) {
1810 size_t const stillToCopy
= 4 - dctx
->tmpInSize
;
1811 size_t const sizeToCopy
= MIN(stillToCopy
, (size_t)(srcEnd
-srcPtr
));
1812 memcpy(dctx
->header
+ dctx
->tmpInSize
, srcPtr
, sizeToCopy
);
1813 dctx
->tmpInSize
+= sizeToCopy
;
1814 srcPtr
+= sizeToCopy
;
1815 if (dctx
->tmpInSize
< 4) { /* all input consumed */
1819 crcSrc
= dctx
->header
;
1821 if (!dctx
->skipChecksum
) {
1822 U32
const readCRC
= LZ4F_readLE32(crcSrc
);
1823 U32
const calcCRC
= XXH32_digest(&dctx
->blockChecksum
);
1824 #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1825 DEBUGLOG(6, "compare block checksum");
1826 if (readCRC
!= calcCRC
) {
1827 DEBUGLOG(4, "incorrect block checksum: %08X != %08X",
1829 RETURN_ERROR(blockChecksum_invalid
);
1836 dctx
->dStage
= dstage_getBlockHeader
; /* new block */
1839 case dstage_getCBlock
:
1840 DEBUGLOG(6, "dstage_getCBlock");
1841 if ((size_t)(srcEnd
-srcPtr
) < dctx
->tmpInTarget
) {
1842 dctx
->tmpInSize
= 0;
1843 dctx
->dStage
= dstage_storeCBlock
;
1846 /* input large enough to read full block directly */
1847 selectedIn
= srcPtr
;
1848 srcPtr
+= dctx
->tmpInTarget
;
1850 if (0) /* always jump over next block */
1851 case dstage_storeCBlock
:
1852 { size_t const wantedData
= dctx
->tmpInTarget
- dctx
->tmpInSize
;
1853 size_t const inputLeft
= (size_t)(srcEnd
-srcPtr
);
1854 size_t const sizeToCopy
= MIN(wantedData
, inputLeft
);
1855 memcpy(dctx
->tmpIn
+ dctx
->tmpInSize
, srcPtr
, sizeToCopy
);
1856 dctx
->tmpInSize
+= sizeToCopy
;
1857 srcPtr
+= sizeToCopy
;
1858 if (dctx
->tmpInSize
< dctx
->tmpInTarget
) { /* need more input */
1859 nextSrcSizeHint
= (dctx
->tmpInTarget
- dctx
->tmpInSize
)
1860 + (dctx
->frameInfo
.blockChecksumFlag
? BFSize
: 0)
1861 + BHSize
/* next header size */;
1865 selectedIn
= dctx
->tmpIn
;
1868 /* At this stage, input is large enough to decode a block */
1870 /* First, decode and control block checksum if it exists */
1871 if (dctx
->frameInfo
.blockChecksumFlag
) {
1872 assert(dctx
->tmpInTarget
>= 4);
1873 dctx
->tmpInTarget
-= 4;
1874 assert(selectedIn
!= NULL
); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */
1875 { U32
const readBlockCrc
= LZ4F_readLE32(selectedIn
+ dctx
->tmpInTarget
);
1876 U32
const calcBlockCrc
= XXH32(selectedIn
, dctx
->tmpInTarget
, 0);
1877 #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1878 RETURN_ERROR_IF(readBlockCrc
!= calcBlockCrc
, blockChecksum_invalid
);
1885 /* decode directly into destination buffer if there is enough room */
1886 if ( ((size_t)(dstEnd
-dstPtr
) >= dctx
->maxBlockSize
)
1887 /* unless the dictionary is stored in tmpOut:
1888 * in which case it's faster to decode within tmpOut
1889 * to benefit from prefix speedup */
1890 && !(dctx
->dict
!= NULL
&& (const BYTE
*)dctx
->dict
+ dctx
->dictSize
== dctx
->tmpOut
) )
1892 const char* dict
= (const char*)dctx
->dict
;
1893 size_t dictSize
= dctx
->dictSize
;
1895 assert(dstPtr
!= NULL
);
1896 if (dict
&& dictSize
> 1 GB
) {
1897 /* overflow control : dctx->dictSize is an int, avoid truncation / sign issues */
1898 dict
+= dictSize
- 64 KB
;
1901 decodedSize
= LZ4_decompress_safe_usingDict(
1902 (const char*)selectedIn
, (char*)dstPtr
,
1903 (int)dctx
->tmpInTarget
, (int)dctx
->maxBlockSize
,
1904 dict
, (int)dictSize
);
1905 RETURN_ERROR_IF(decodedSize
< 0, decompressionFailed
);
1906 if ((dctx
->frameInfo
.contentChecksumFlag
) && (!dctx
->skipChecksum
))
1907 XXH32_update(&(dctx
->xxh
), dstPtr
, (size_t)decodedSize
);
1908 if (dctx
->frameInfo
.contentSize
)
1909 dctx
->frameRemainingSize
-= (size_t)decodedSize
;
1911 /* dictionary management */
1912 if (dctx
->frameInfo
.blockMode
==LZ4F_blockLinked
) {
1913 LZ4F_updateDict(dctx
, dstPtr
, (size_t)decodedSize
, dstStart
, 0);
1916 dstPtr
+= decodedSize
;
1917 dctx
->dStage
= dstage_getBlockHeader
; /* end of block, let's get another one */
1921 /* not enough place into dst : decode into tmpOut */
1923 /* manage dictionary */
1924 if (dctx
->frameInfo
.blockMode
== LZ4F_blockLinked
) {
1925 if (dctx
->dict
== dctx
->tmpOutBuffer
) {
1926 /* truncate dictionary to 64 KB if too big */
1927 if (dctx
->dictSize
> 128 KB
) {
1928 memcpy(dctx
->tmpOutBuffer
, dctx
->dict
+ dctx
->dictSize
- 64 KB
, 64 KB
);
1929 dctx
->dictSize
= 64 KB
;
1931 dctx
->tmpOut
= dctx
->tmpOutBuffer
+ dctx
->dictSize
;
1932 } else { /* dict not within tmpOut */
1933 size_t const reservedDictSpace
= MIN(dctx
->dictSize
, 64 KB
);
1934 dctx
->tmpOut
= dctx
->tmpOutBuffer
+ reservedDictSpace
;
1937 /* Decode block into tmpOut */
1938 { const char* dict
= (const char*)dctx
->dict
;
1939 size_t dictSize
= dctx
->dictSize
;
1941 if (dict
&& dictSize
> 1 GB
) {
1942 /* the dictSize param is an int, avoid truncation / sign issues */
1943 dict
+= dictSize
- 64 KB
;
1946 decodedSize
= LZ4_decompress_safe_usingDict(
1947 (const char*)selectedIn
, (char*)dctx
->tmpOut
,
1948 (int)dctx
->tmpInTarget
, (int)dctx
->maxBlockSize
,
1949 dict
, (int)dictSize
);
1950 RETURN_ERROR_IF(decodedSize
< 0, decompressionFailed
);
1951 if (dctx
->frameInfo
.contentChecksumFlag
&& !dctx
->skipChecksum
)
1952 XXH32_update(&(dctx
->xxh
), dctx
->tmpOut
, (size_t)decodedSize
);
1953 if (dctx
->frameInfo
.contentSize
)
1954 dctx
->frameRemainingSize
-= (size_t)decodedSize
;
1955 dctx
->tmpOutSize
= (size_t)decodedSize
;
1956 dctx
->tmpOutStart
= 0;
1957 dctx
->dStage
= dstage_flushOut
;
1961 case dstage_flushOut
: /* flush decoded data from tmpOut to dstBuffer */
1962 DEBUGLOG(6, "dstage_flushOut");
1963 if (dstPtr
!= NULL
) {
1964 size_t const sizeToCopy
= MIN(dctx
->tmpOutSize
- dctx
->tmpOutStart
, (size_t)(dstEnd
-dstPtr
));
1965 memcpy(dstPtr
, dctx
->tmpOut
+ dctx
->tmpOutStart
, sizeToCopy
);
1967 /* dictionary management */
1968 if (dctx
->frameInfo
.blockMode
== LZ4F_blockLinked
)
1969 LZ4F_updateDict(dctx
, dstPtr
, sizeToCopy
, dstStart
, 1 /*withinTmp*/);
1971 dctx
->tmpOutStart
+= sizeToCopy
;
1972 dstPtr
+= sizeToCopy
;
1974 if (dctx
->tmpOutStart
== dctx
->tmpOutSize
) { /* all flushed */
1975 dctx
->dStage
= dstage_getBlockHeader
; /* get next block */
1978 /* could not flush everything : stop there, just request a block header */
1980 nextSrcSizeHint
= BHSize
;
1983 case dstage_getSuffix
:
1984 RETURN_ERROR_IF(dctx
->frameRemainingSize
, frameSize_wrong
); /* incorrect frame size decoded */
1985 if (!dctx
->frameInfo
.contentChecksumFlag
) { /* no checksum, frame is completed */
1986 nextSrcSizeHint
= 0;
1987 LZ4F_resetDecompressionContext(dctx
);
1991 if ((srcEnd
- srcPtr
) < 4) { /* not enough size for entire CRC */
1992 dctx
->tmpInSize
= 0;
1993 dctx
->dStage
= dstage_storeSuffix
;
1995 selectedIn
= srcPtr
;
1999 if (dctx
->dStage
== dstage_storeSuffix
) /* can be skipped */
2000 case dstage_storeSuffix
:
2001 { size_t const remainingInput
= (size_t)(srcEnd
- srcPtr
);
2002 size_t const wantedData
= 4 - dctx
->tmpInSize
;
2003 size_t const sizeToCopy
= MIN(wantedData
, remainingInput
);
2004 memcpy(dctx
->tmpIn
+ dctx
->tmpInSize
, srcPtr
, sizeToCopy
);
2005 srcPtr
+= sizeToCopy
;
2006 dctx
->tmpInSize
+= sizeToCopy
;
2007 if (dctx
->tmpInSize
< 4) { /* not enough input to read complete suffix */
2008 nextSrcSizeHint
= 4 - dctx
->tmpInSize
;
2012 selectedIn
= dctx
->tmpIn
;
2013 } /* if (dctx->dStage == dstage_storeSuffix) */
2015 /* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */
2016 if (!dctx
->skipChecksum
) {
2017 U32
const readCRC
= LZ4F_readLE32(selectedIn
);
2018 U32
const resultCRC
= XXH32_digest(&(dctx
->xxh
));
2019 DEBUGLOG(4, "frame checksum: stored 0x%0X vs 0x%0X processed", readCRC
, resultCRC
);
2020 #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
2021 RETURN_ERROR_IF(readCRC
!= resultCRC
, contentChecksum_invalid
);
2027 nextSrcSizeHint
= 0;
2028 LZ4F_resetDecompressionContext(dctx
);
2032 case dstage_getSFrameSize
:
2033 if ((srcEnd
- srcPtr
) >= 4) {
2034 selectedIn
= srcPtr
;
2037 /* not enough input to read cBlockSize field */
2038 dctx
->tmpInSize
= 4;
2039 dctx
->tmpInTarget
= 8;
2040 dctx
->dStage
= dstage_storeSFrameSize
;
2043 if (dctx
->dStage
== dstage_storeSFrameSize
)
2044 case dstage_storeSFrameSize
:
2045 { size_t const sizeToCopy
= MIN(dctx
->tmpInTarget
- dctx
->tmpInSize
,
2046 (size_t)(srcEnd
- srcPtr
) );
2047 memcpy(dctx
->header
+ dctx
->tmpInSize
, srcPtr
, sizeToCopy
);
2048 srcPtr
+= sizeToCopy
;
2049 dctx
->tmpInSize
+= sizeToCopy
;
2050 if (dctx
->tmpInSize
< dctx
->tmpInTarget
) {
2051 /* not enough input to get full sBlockSize; wait for more */
2052 nextSrcSizeHint
= dctx
->tmpInTarget
- dctx
->tmpInSize
;
2056 selectedIn
= dctx
->header
+ 4;
2057 } /* if (dctx->dStage == dstage_storeSFrameSize) */
2059 /* case dstage_decodeSFrameSize: */ /* no direct entry */
2060 { size_t const SFrameSize
= LZ4F_readLE32(selectedIn
);
2061 dctx
->frameInfo
.contentSize
= SFrameSize
;
2062 dctx
->tmpInTarget
= SFrameSize
;
2063 dctx
->dStage
= dstage_skipSkippable
;
2067 case dstage_skipSkippable
:
2068 { size_t const skipSize
= MIN(dctx
->tmpInTarget
, (size_t)(srcEnd
-srcPtr
));
2070 dctx
->tmpInTarget
-= skipSize
;
2072 nextSrcSizeHint
= dctx
->tmpInTarget
;
2073 if (nextSrcSizeHint
) break; /* still more to skip */
2074 /* frame fully skipped : prepare context for a new frame */
2075 LZ4F_resetDecompressionContext(dctx
);
2078 } /* switch (dctx->dStage) */
2079 } /* while (doAnotherStage) */
2081 /* preserve history within tmpOut whenever necessary */
2082 LZ4F_STATIC_ASSERT((unsigned)dstage_init
== 2);
2083 if ( (dctx
->frameInfo
.blockMode
==LZ4F_blockLinked
) /* next block will use up to 64KB from previous ones */
2084 && (dctx
->dict
!= dctx
->tmpOutBuffer
) /* dictionary is not already within tmp */
2085 && (dctx
->dict
!= NULL
) /* dictionary exists */
2086 && (!decompressOptionsPtr
->stableDst
) /* cannot rely on dst data to remain there for next call */
2087 && ((unsigned)(dctx
->dStage
)-2 < (unsigned)(dstage_getSuffix
)-2) ) /* valid stages : [init ... getSuffix[ */
2089 if (dctx
->dStage
== dstage_flushOut
) {
2090 size_t const preserveSize
= (size_t)(dctx
->tmpOut
- dctx
->tmpOutBuffer
);
2091 size_t copySize
= 64 KB
- dctx
->tmpOutSize
;
2092 const BYTE
* oldDictEnd
= dctx
->dict
+ dctx
->dictSize
- dctx
->tmpOutStart
;
2093 if (dctx
->tmpOutSize
> 64 KB
) copySize
= 0;
2094 if (copySize
> preserveSize
) copySize
= preserveSize
;
2095 assert(dctx
->tmpOutBuffer
!= NULL
);
2097 memcpy(dctx
->tmpOutBuffer
+ preserveSize
- copySize
, oldDictEnd
- copySize
, copySize
);
2099 dctx
->dict
= dctx
->tmpOutBuffer
;
2100 dctx
->dictSize
= preserveSize
+ dctx
->tmpOutStart
;
2102 const BYTE
* const oldDictEnd
= dctx
->dict
+ dctx
->dictSize
;
2103 size_t const newDictSize
= MIN(dctx
->dictSize
, 64 KB
);
2105 memcpy(dctx
->tmpOutBuffer
, oldDictEnd
- newDictSize
, newDictSize
);
2107 dctx
->dict
= dctx
->tmpOutBuffer
;
2108 dctx
->dictSize
= newDictSize
;
2109 dctx
->tmpOut
= dctx
->tmpOutBuffer
+ newDictSize
;
2113 *srcSizePtr
= (size_t)(srcPtr
- srcStart
);
2114 *dstSizePtr
= (size_t)(dstPtr
- dstStart
);
2115 return nextSrcSizeHint
;
2118 /*! LZ4F_decompress_usingDict() :
2119 * Same as LZ4F_decompress(), using a predefined dictionary.
2120 * Dictionary is used "in place", without any preprocessing.
2121 * It must remain accessible throughout the entire frame decoding.
2123 size_t LZ4F_decompress_usingDict(LZ4F_dctx
* dctx
,
2124 void* dstBuffer
, size_t* dstSizePtr
,
2125 const void* srcBuffer
, size_t* srcSizePtr
,
2126 const void* dict
, size_t dictSize
,
2127 const LZ4F_decompressOptions_t
* decompressOptionsPtr
)
2129 if (dctx
->dStage
<= dstage_init
) {
2130 dctx
->dict
= (const BYTE
*)dict
;
2131 dctx
->dictSize
= dictSize
;
2133 return LZ4F_decompress(dctx
, dstBuffer
, dstSizePtr
,
2134 srcBuffer
, srcSizePtr
,
2135 decompressOptionsPtr
);