2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of https://github.com/facebook/zstd.
7 * An additional grant of patent rights can be found in the PATENTS file in the
10 * This program is free software; you can redistribute it and/or modify it under
11 * the terms of the GNU General Public License version 2 as published by the
12 * Free Software Foundation. This program is dual-licensed; you may select
13 * either version 2 of the GNU General Public License ("GPL") or BSD license
17 /*-*************************************
19 ***************************************/
23 #include "zstd_internal.h" /* includes zstd.h */
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/string.h> /* memset */
28 /*-*************************************
30 ***************************************/
31 static const U32 g_searchStrength
= 8; /* control skip over incompressible data */
32 #define HASH_READ_SIZE 8
33 typedef enum { ZSTDcs_created
= 0, ZSTDcs_init
, ZSTDcs_ongoing
, ZSTDcs_ending
} ZSTD_compressionStage_e
;
35 /*-*************************************
37 ***************************************/
38 size_t ZSTD_compressBound(size_t srcSize
) { return FSE_compressBound(srcSize
) + 12; }
40 /*-*************************************
42 ***************************************/
43 static void ZSTD_resetSeqStore(seqStore_t
*ssPtr
)
45 ssPtr
->lit
= ssPtr
->litStart
;
46 ssPtr
->sequences
= ssPtr
->sequencesStart
;
47 ssPtr
->longLengthID
= 0;
50 /*-*************************************
51 * Context memory management
52 ***************************************/
54 const BYTE
*nextSrc
; /* next block here to continue on curr prefix */
55 const BYTE
*base
; /* All regular indexes relative to this position */
56 const BYTE
*dictBase
; /* extDict indexes relative to this position */
57 U32 dictLimit
; /* below that point, need extDict */
58 U32 lowLimit
; /* below that point, no more data */
59 U32 nextToUpdate
; /* index from which to continue dictionary update */
60 U32 nextToUpdate3
; /* index from which to continue dictionary update */
61 U32 hashLog3
; /* dispatch table : larger == faster, more memory */
62 U32 loadedDictEnd
; /* index of end of dictionary */
63 U32 forceWindow
; /* force back-references to respect limit of 1<<wLog, even for dictionary */
64 U32 forceRawDict
; /* Force loading dictionary in "content-only" mode (no header analysis) */
65 ZSTD_compressionStage_e stage
;
66 U32 rep
[ZSTD_REP_NUM
];
67 U32 repToConfirm
[ZSTD_REP_NUM
];
69 ZSTD_parameters params
;
74 struct xxh64_state xxhState
;
75 ZSTD_customMem customMem
;
77 seqStore_t seqStore
; /* sequences storage ptrs */
83 HUF_repeat flagStaticHufTable
;
84 FSE_CTable offcodeCTable
[FSE_CTABLE_SIZE_U32(OffFSELog
, MaxOff
)];
85 FSE_CTable matchlengthCTable
[FSE_CTABLE_SIZE_U32(MLFSELog
, MaxML
)];
86 FSE_CTable litlengthCTable
[FSE_CTABLE_SIZE_U32(LLFSELog
, MaxLL
)];
87 unsigned tmpCounters
[HUF_COMPRESS_WORKSPACE_SIZE_U32
];
90 size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams
)
92 size_t const blockSize
= MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX
, (size_t)1 << cParams
.windowLog
);
93 U32
const divider
= (cParams
.searchLength
== 3) ? 3 : 4;
94 size_t const maxNbSeq
= blockSize
/ divider
;
95 size_t const tokenSpace
= blockSize
+ 11 * maxNbSeq
;
96 size_t const chainSize
= (cParams
.strategy
== ZSTD_fast
) ? 0 : (1 << cParams
.chainLog
);
97 size_t const hSize
= ((size_t)1) << cParams
.hashLog
;
98 U32
const hashLog3
= (cParams
.searchLength
> 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX
, cParams
.windowLog
);
99 size_t const h3Size
= ((size_t)1) << hashLog3
;
100 size_t const tableSpace
= (chainSize
+ hSize
+ h3Size
) * sizeof(U32
);
101 size_t const optSpace
=
102 ((MaxML
+ 1) + (MaxLL
+ 1) + (MaxOff
+ 1) + (1 << Litbits
)) * sizeof(U32
) + (ZSTD_OPT_NUM
+ 1) * (sizeof(ZSTD_match_t
) + sizeof(ZSTD_optimal_t
));
103 size_t const workspaceSize
= tableSpace
+ (256 * sizeof(U32
)) /* huffTable */ + tokenSpace
+
104 (((cParams
.strategy
== ZSTD_btopt
) || (cParams
.strategy
== ZSTD_btopt2
)) ? optSpace
: 0);
106 return ZSTD_ALIGN(sizeof(ZSTD_stack
)) + ZSTD_ALIGN(sizeof(ZSTD_CCtx
)) + ZSTD_ALIGN(workspaceSize
);
109 static ZSTD_CCtx
*ZSTD_createCCtx_advanced(ZSTD_customMem customMem
)
112 if (!customMem
.customAlloc
|| !customMem
.customFree
)
114 cctx
= (ZSTD_CCtx
*)ZSTD_malloc(sizeof(ZSTD_CCtx
), customMem
);
117 memset(cctx
, 0, sizeof(ZSTD_CCtx
));
118 cctx
->customMem
= customMem
;
122 ZSTD_CCtx
*ZSTD_initCCtx(void *workspace
, size_t workspaceSize
)
124 ZSTD_customMem
const stackMem
= ZSTD_initStack(workspace
, workspaceSize
);
125 ZSTD_CCtx
*cctx
= ZSTD_createCCtx_advanced(stackMem
);
127 cctx
->workSpace
= ZSTD_stackAllocAll(cctx
->customMem
.opaque
, &cctx
->workSpaceSize
);
132 size_t ZSTD_freeCCtx(ZSTD_CCtx
*cctx
)
135 return 0; /* support free on NULL */
136 ZSTD_free(cctx
->workSpace
, cctx
->customMem
);
137 ZSTD_free(cctx
, cctx
->customMem
);
138 return 0; /* reserved as a potential error code in the future */
141 const seqStore_t
*ZSTD_getSeqStore(const ZSTD_CCtx
*ctx
) /* hidden interface */ { return &(ctx
->seqStore
); }
143 static ZSTD_parameters
ZSTD_getParamsFromCCtx(const ZSTD_CCtx
*cctx
) { return cctx
->params
; }
145 /** ZSTD_checkParams() :
146 ensure param values remain within authorized range.
147 @return : 0, or an error code if one value is beyond authorized range */
148 size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams
)
150 #define CLAMPCHECK(val, min, max) \
152 if ((val < min) | (val > max)) \
153 return ERROR(compressionParameter_unsupported); \
155 CLAMPCHECK(cParams
.windowLog
, ZSTD_WINDOWLOG_MIN
, ZSTD_WINDOWLOG_MAX
);
156 CLAMPCHECK(cParams
.chainLog
, ZSTD_CHAINLOG_MIN
, ZSTD_CHAINLOG_MAX
);
157 CLAMPCHECK(cParams
.hashLog
, ZSTD_HASHLOG_MIN
, ZSTD_HASHLOG_MAX
);
158 CLAMPCHECK(cParams
.searchLog
, ZSTD_SEARCHLOG_MIN
, ZSTD_SEARCHLOG_MAX
);
159 CLAMPCHECK(cParams
.searchLength
, ZSTD_SEARCHLENGTH_MIN
, ZSTD_SEARCHLENGTH_MAX
);
160 CLAMPCHECK(cParams
.targetLength
, ZSTD_TARGETLENGTH_MIN
, ZSTD_TARGETLENGTH_MAX
);
161 if ((U32
)(cParams
.strategy
) > (U32
)ZSTD_btopt2
)
162 return ERROR(compressionParameter_unsupported
);
166 /** ZSTD_cycleLog() :
167 * condition for correct operation : hashLog > 1 */
168 static U32
ZSTD_cycleLog(U32 hashLog
, ZSTD_strategy strat
)
170 U32
const btScale
= ((U32
)strat
>= (U32
)ZSTD_btlazy2
);
171 return hashLog
- btScale
;
174 /** ZSTD_adjustCParams() :
175 optimize `cPar` for a given input (`srcSize` and `dictSize`).
176 mostly downsizing to reduce memory consumption and initialization.
177 Both `srcSize` and `dictSize` are optional (use 0 if unknown),
178 but if both are 0, no optimization can be done.
179 Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */
180 ZSTD_compressionParameters
ZSTD_adjustCParams(ZSTD_compressionParameters cPar
, unsigned long long srcSize
, size_t dictSize
)
182 if (srcSize
+ dictSize
== 0)
183 return cPar
; /* no size information available : no adjustment */
185 /* resize params, to use less memory when necessary */
187 U32
const minSrcSize
= (srcSize
== 0) ? 500 : 0;
188 U64
const rSize
= srcSize
+ dictSize
+ minSrcSize
;
189 if (rSize
< ((U64
)1 << ZSTD_WINDOWLOG_MAX
)) {
190 U32
const srcLog
= MAX(ZSTD_HASHLOG_MIN
, ZSTD_highbit32((U32
)(rSize
)-1) + 1);
191 if (cPar
.windowLog
> srcLog
)
192 cPar
.windowLog
= srcLog
;
195 if (cPar
.hashLog
> cPar
.windowLog
)
196 cPar
.hashLog
= cPar
.windowLog
;
198 U32
const cycleLog
= ZSTD_cycleLog(cPar
.chainLog
, cPar
.strategy
);
199 if (cycleLog
> cPar
.windowLog
)
200 cPar
.chainLog
-= (cycleLog
- cPar
.windowLog
);
203 if (cPar
.windowLog
< ZSTD_WINDOWLOG_ABSOLUTEMIN
)
204 cPar
.windowLog
= ZSTD_WINDOWLOG_ABSOLUTEMIN
; /* required for frame header */
209 static U32
ZSTD_equivalentParams(ZSTD_parameters param1
, ZSTD_parameters param2
)
211 return (param1
.cParams
.hashLog
== param2
.cParams
.hashLog
) & (param1
.cParams
.chainLog
== param2
.cParams
.chainLog
) &
212 (param1
.cParams
.strategy
== param2
.cParams
.strategy
) & ((param1
.cParams
.searchLength
== 3) == (param2
.cParams
.searchLength
== 3));
215 /*! ZSTD_continueCCtx() :
216 reuse CCtx without reset (note : requires no dictionary) */
217 static size_t ZSTD_continueCCtx(ZSTD_CCtx
*cctx
, ZSTD_parameters params
, U64 frameContentSize
)
219 U32
const end
= (U32
)(cctx
->nextSrc
- cctx
->base
);
220 cctx
->params
= params
;
221 cctx
->frameContentSize
= frameContentSize
;
222 cctx
->lowLimit
= end
;
223 cctx
->dictLimit
= end
;
224 cctx
->nextToUpdate
= end
+ 1;
225 cctx
->stage
= ZSTDcs_init
;
227 cctx
->loadedDictEnd
= 0;
230 for (i
= 0; i
< ZSTD_REP_NUM
; i
++)
231 cctx
->rep
[i
] = repStartValue
[i
];
233 cctx
->seqStore
.litLengthSum
= 0; /* force reset of btopt stats */
234 xxh64_reset(&cctx
->xxhState
, 0);
238 typedef enum { ZSTDcrp_continue
, ZSTDcrp_noMemset
, ZSTDcrp_fullReset
} ZSTD_compResetPolicy_e
;
240 /*! ZSTD_resetCCtx_advanced() :
241 note : `params` must be validated */
242 static size_t ZSTD_resetCCtx_advanced(ZSTD_CCtx
*zc
, ZSTD_parameters params
, U64 frameContentSize
, ZSTD_compResetPolicy_e
const crp
)
244 if (crp
== ZSTDcrp_continue
)
245 if (ZSTD_equivalentParams(params
, zc
->params
)) {
246 zc
->flagStaticTables
= 0;
247 zc
->flagStaticHufTable
= HUF_repeat_none
;
248 return ZSTD_continueCCtx(zc
, params
, frameContentSize
);
252 size_t const blockSize
= MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX
, (size_t)1 << params
.cParams
.windowLog
);
253 U32
const divider
= (params
.cParams
.searchLength
== 3) ? 3 : 4;
254 size_t const maxNbSeq
= blockSize
/ divider
;
255 size_t const tokenSpace
= blockSize
+ 11 * maxNbSeq
;
256 size_t const chainSize
= (params
.cParams
.strategy
== ZSTD_fast
) ? 0 : (1 << params
.cParams
.chainLog
);
257 size_t const hSize
= ((size_t)1) << params
.cParams
.hashLog
;
258 U32
const hashLog3
= (params
.cParams
.searchLength
> 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX
, params
.cParams
.windowLog
);
259 size_t const h3Size
= ((size_t)1) << hashLog3
;
260 size_t const tableSpace
= (chainSize
+ hSize
+ h3Size
) * sizeof(U32
);
263 /* Check if workSpace is large enough, alloc a new one if needed */
265 size_t const optSpace
= ((MaxML
+ 1) + (MaxLL
+ 1) + (MaxOff
+ 1) + (1 << Litbits
)) * sizeof(U32
) +
266 (ZSTD_OPT_NUM
+ 1) * (sizeof(ZSTD_match_t
) + sizeof(ZSTD_optimal_t
));
267 size_t const neededSpace
= tableSpace
+ (256 * sizeof(U32
)) /* huffTable */ + tokenSpace
+
268 (((params
.cParams
.strategy
== ZSTD_btopt
) || (params
.cParams
.strategy
== ZSTD_btopt2
)) ? optSpace
: 0);
269 if (zc
->workSpaceSize
< neededSpace
) {
270 ZSTD_free(zc
->workSpace
, zc
->customMem
);
271 zc
->workSpace
= ZSTD_malloc(neededSpace
, zc
->customMem
);
272 if (zc
->workSpace
== NULL
)
273 return ERROR(memory_allocation
);
274 zc
->workSpaceSize
= neededSpace
;
278 if (crp
!= ZSTDcrp_noMemset
)
279 memset(zc
->workSpace
, 0, tableSpace
); /* reset tables only */
280 xxh64_reset(&zc
->xxhState
, 0);
281 zc
->hashLog3
= hashLog3
;
282 zc
->hashTable
= (U32
*)(zc
->workSpace
);
283 zc
->chainTable
= zc
->hashTable
+ hSize
;
284 zc
->hashTable3
= zc
->chainTable
+ chainSize
;
285 ptr
= zc
->hashTable3
+ h3Size
;
286 zc
->hufTable
= (HUF_CElt
*)ptr
;
287 zc
->flagStaticTables
= 0;
288 zc
->flagStaticHufTable
= HUF_repeat_none
;
289 ptr
= ((U32
*)ptr
) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */
291 zc
->nextToUpdate
= 1;
298 zc
->blockSize
= blockSize
;
299 zc
->frameContentSize
= frameContentSize
;
302 for (i
= 0; i
< ZSTD_REP_NUM
; i
++)
303 zc
->rep
[i
] = repStartValue
[i
];
306 if ((params
.cParams
.strategy
== ZSTD_btopt
) || (params
.cParams
.strategy
== ZSTD_btopt2
)) {
307 zc
->seqStore
.litFreq
= (U32
*)ptr
;
308 zc
->seqStore
.litLengthFreq
= zc
->seqStore
.litFreq
+ (1 << Litbits
);
309 zc
->seqStore
.matchLengthFreq
= zc
->seqStore
.litLengthFreq
+ (MaxLL
+ 1);
310 zc
->seqStore
.offCodeFreq
= zc
->seqStore
.matchLengthFreq
+ (MaxML
+ 1);
311 ptr
= zc
->seqStore
.offCodeFreq
+ (MaxOff
+ 1);
312 zc
->seqStore
.matchTable
= (ZSTD_match_t
*)ptr
;
313 ptr
= zc
->seqStore
.matchTable
+ ZSTD_OPT_NUM
+ 1;
314 zc
->seqStore
.priceTable
= (ZSTD_optimal_t
*)ptr
;
315 ptr
= zc
->seqStore
.priceTable
+ ZSTD_OPT_NUM
+ 1;
316 zc
->seqStore
.litLengthSum
= 0;
318 zc
->seqStore
.sequencesStart
= (seqDef
*)ptr
;
319 ptr
= zc
->seqStore
.sequencesStart
+ maxNbSeq
;
320 zc
->seqStore
.llCode
= (BYTE
*)ptr
;
321 zc
->seqStore
.mlCode
= zc
->seqStore
.llCode
+ maxNbSeq
;
322 zc
->seqStore
.ofCode
= zc
->seqStore
.mlCode
+ maxNbSeq
;
323 zc
->seqStore
.litStart
= zc
->seqStore
.ofCode
+ maxNbSeq
;
325 zc
->stage
= ZSTDcs_init
;
327 zc
->loadedDictEnd
= 0;
333 /* ZSTD_invalidateRepCodes() :
334 * ensures next compression will not use repcodes from previous block.
335 * Note : only works with regular variant;
336 * do not use with extDict variant ! */
337 void ZSTD_invalidateRepCodes(ZSTD_CCtx
*cctx
)
340 for (i
= 0; i
< ZSTD_REP_NUM
; i
++)
344 /*! ZSTD_copyCCtx() :
345 * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
346 * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
347 * @return : 0, or an error code */
348 size_t ZSTD_copyCCtx(ZSTD_CCtx
*dstCCtx
, const ZSTD_CCtx
*srcCCtx
, unsigned long long pledgedSrcSize
)
350 if (srcCCtx
->stage
!= ZSTDcs_init
)
351 return ERROR(stage_wrong
);
353 memcpy(&dstCCtx
->customMem
, &srcCCtx
->customMem
, sizeof(ZSTD_customMem
));
355 ZSTD_parameters params
= srcCCtx
->params
;
356 params
.fParams
.contentSizeFlag
= (pledgedSrcSize
> 0);
357 ZSTD_resetCCtx_advanced(dstCCtx
, params
, pledgedSrcSize
, ZSTDcrp_noMemset
);
362 size_t const chainSize
= (srcCCtx
->params
.cParams
.strategy
== ZSTD_fast
) ? 0 : (1 << srcCCtx
->params
.cParams
.chainLog
);
363 size_t const hSize
= ((size_t)1) << srcCCtx
->params
.cParams
.hashLog
;
364 size_t const h3Size
= (size_t)1 << srcCCtx
->hashLog3
;
365 size_t const tableSpace
= (chainSize
+ hSize
+ h3Size
) * sizeof(U32
);
366 memcpy(dstCCtx
->workSpace
, srcCCtx
->workSpace
, tableSpace
);
369 /* copy dictionary offsets */
370 dstCCtx
->nextToUpdate
= srcCCtx
->nextToUpdate
;
371 dstCCtx
->nextToUpdate3
= srcCCtx
->nextToUpdate3
;
372 dstCCtx
->nextSrc
= srcCCtx
->nextSrc
;
373 dstCCtx
->base
= srcCCtx
->base
;
374 dstCCtx
->dictBase
= srcCCtx
->dictBase
;
375 dstCCtx
->dictLimit
= srcCCtx
->dictLimit
;
376 dstCCtx
->lowLimit
= srcCCtx
->lowLimit
;
377 dstCCtx
->loadedDictEnd
= srcCCtx
->loadedDictEnd
;
378 dstCCtx
->dictID
= srcCCtx
->dictID
;
380 /* copy entropy tables */
381 dstCCtx
->flagStaticTables
= srcCCtx
->flagStaticTables
;
382 dstCCtx
->flagStaticHufTable
= srcCCtx
->flagStaticHufTable
;
383 if (srcCCtx
->flagStaticTables
) {
384 memcpy(dstCCtx
->litlengthCTable
, srcCCtx
->litlengthCTable
, sizeof(dstCCtx
->litlengthCTable
));
385 memcpy(dstCCtx
->matchlengthCTable
, srcCCtx
->matchlengthCTable
, sizeof(dstCCtx
->matchlengthCTable
));
386 memcpy(dstCCtx
->offcodeCTable
, srcCCtx
->offcodeCTable
, sizeof(dstCCtx
->offcodeCTable
));
388 if (srcCCtx
->flagStaticHufTable
) {
389 memcpy(dstCCtx
->hufTable
, srcCCtx
->hufTable
, 256 * 4);
395 /*! ZSTD_reduceTable() :
396 * reduce table indexes by `reducerValue` */
397 static void ZSTD_reduceTable(U32
*const table
, U32
const size
, U32
const reducerValue
)
400 for (u
= 0; u
< size
; u
++) {
401 if (table
[u
] < reducerValue
)
404 table
[u
] -= reducerValue
;
408 /*! ZSTD_reduceIndex() :
409 * rescale all indexes to avoid future overflow (indexes are U32) */
410 static void ZSTD_reduceIndex(ZSTD_CCtx
*zc
, const U32 reducerValue
)
413 U32
const hSize
= 1 << zc
->params
.cParams
.hashLog
;
414 ZSTD_reduceTable(zc
->hashTable
, hSize
, reducerValue
);
418 U32
const chainSize
= (zc
->params
.cParams
.strategy
== ZSTD_fast
) ? 0 : (1 << zc
->params
.cParams
.chainLog
);
419 ZSTD_reduceTable(zc
->chainTable
, chainSize
, reducerValue
);
423 U32
const h3Size
= (zc
->hashLog3
) ? 1 << zc
->hashLog3
: 0;
424 ZSTD_reduceTable(zc
->hashTable3
, h3Size
, reducerValue
);
428 /*-*******************************************************
429 * Block entropic compression
430 *********************************************************/
432 /* See doc/zstd_compression_format.md for detailed format description */
434 size_t ZSTD_noCompressBlock(void *dst
, size_t dstCapacity
, const void *src
, size_t srcSize
)
436 if (srcSize
+ ZSTD_blockHeaderSize
> dstCapacity
)
437 return ERROR(dstSize_tooSmall
);
438 memcpy((BYTE
*)dst
+ ZSTD_blockHeaderSize
, src
, srcSize
);
439 ZSTD_writeLE24(dst
, (U32
)(srcSize
<< 2) + (U32
)bt_raw
);
440 return ZSTD_blockHeaderSize
+ srcSize
;
443 static size_t ZSTD_noCompressLiterals(void *dst
, size_t dstCapacity
, const void *src
, size_t srcSize
)
445 BYTE
*const ostart
= (BYTE
* const)dst
;
446 U32
const flSize
= 1 + (srcSize
> 31) + (srcSize
> 4095);
448 if (srcSize
+ flSize
> dstCapacity
)
449 return ERROR(dstSize_tooSmall
);
452 case 1: /* 2 - 1 - 5 */ ostart
[0] = (BYTE
)((U32
)set_basic
+ (srcSize
<< 3)); break;
453 case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart
, (U16
)((U32
)set_basic
+ (1 << 2) + (srcSize
<< 4))); break;
454 default: /*note : should not be necessary : flSize is within {1,2,3} */
455 case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart
, (U32
)((U32
)set_basic
+ (3 << 2) + (srcSize
<< 4))); break;
458 memcpy(ostart
+ flSize
, src
, srcSize
);
459 return srcSize
+ flSize
;
462 static size_t ZSTD_compressRleLiteralsBlock(void *dst
, size_t dstCapacity
, const void *src
, size_t srcSize
)
464 BYTE
*const ostart
= (BYTE
* const)dst
;
465 U32
const flSize
= 1 + (srcSize
> 31) + (srcSize
> 4095);
467 (void)dstCapacity
; /* dstCapacity already guaranteed to be >=4, hence large enough */
470 case 1: /* 2 - 1 - 5 */ ostart
[0] = (BYTE
)((U32
)set_rle
+ (srcSize
<< 3)); break;
471 case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart
, (U16
)((U32
)set_rle
+ (1 << 2) + (srcSize
<< 4))); break;
472 default: /*note : should not be necessary : flSize is necessarily within {1,2,3} */
473 case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart
, (U32
)((U32
)set_rle
+ (3 << 2) + (srcSize
<< 4))); break;
476 ostart
[flSize
] = *(const BYTE
*)src
;
480 static size_t ZSTD_minGain(size_t srcSize
) { return (srcSize
>> 6) + 2; }
482 static size_t ZSTD_compressLiterals(ZSTD_CCtx
*zc
, void *dst
, size_t dstCapacity
, const void *src
, size_t srcSize
)
484 size_t const minGain
= ZSTD_minGain(srcSize
);
485 size_t const lhSize
= 3 + (srcSize
>= 1 KB
) + (srcSize
>= 16 KB
);
486 BYTE
*const ostart
= (BYTE
*)dst
;
487 U32 singleStream
= srcSize
< 256;
488 symbolEncodingType_e hType
= set_compressed
;
491 /* small ? don't even attempt compression (speed opt) */
492 #define LITERAL_NOENTROPY 63
494 size_t const minLitSize
= zc
->flagStaticHufTable
== HUF_repeat_valid
? 6 : LITERAL_NOENTROPY
;
495 if (srcSize
<= minLitSize
)
496 return ZSTD_noCompressLiterals(dst
, dstCapacity
, src
, srcSize
);
499 if (dstCapacity
< lhSize
+ 1)
500 return ERROR(dstSize_tooSmall
); /* not enough space for compression */
502 HUF_repeat repeat
= zc
->flagStaticHufTable
;
503 int const preferRepeat
= zc
->params
.cParams
.strategy
< ZSTD_lazy
? srcSize
<= 1024 : 0;
504 if (repeat
== HUF_repeat_valid
&& lhSize
== 3)
506 cLitSize
= singleStream
? HUF_compress1X_repeat(ostart
+ lhSize
, dstCapacity
- lhSize
, src
, srcSize
, 255, 11, zc
->tmpCounters
,
507 sizeof(zc
->tmpCounters
), zc
->hufTable
, &repeat
, preferRepeat
)
508 : HUF_compress4X_repeat(ostart
+ lhSize
, dstCapacity
- lhSize
, src
, srcSize
, 255, 11, zc
->tmpCounters
,
509 sizeof(zc
->tmpCounters
), zc
->hufTable
, &repeat
, preferRepeat
);
510 if (repeat
!= HUF_repeat_none
) {
512 } /* reused the existing table */
514 zc
->flagStaticHufTable
= HUF_repeat_check
;
515 } /* now have a table to reuse */
518 if ((cLitSize
== 0) | (cLitSize
>= srcSize
- minGain
)) {
519 zc
->flagStaticHufTable
= HUF_repeat_none
;
520 return ZSTD_noCompressLiterals(dst
, dstCapacity
, src
, srcSize
);
523 zc
->flagStaticHufTable
= HUF_repeat_none
;
524 return ZSTD_compressRleLiteralsBlock(dst
, dstCapacity
, src
, srcSize
);
529 case 3: /* 2 - 2 - 10 - 10 */
531 U32
const lhc
= hType
+ ((!singleStream
) << 2) + ((U32
)srcSize
<< 4) + ((U32
)cLitSize
<< 14);
532 ZSTD_writeLE24(ostart
, lhc
);
535 case 4: /* 2 - 2 - 14 - 14 */
537 U32
const lhc
= hType
+ (2 << 2) + ((U32
)srcSize
<< 4) + ((U32
)cLitSize
<< 18);
538 ZSTD_writeLE32(ostart
, lhc
);
541 default: /* should not be necessary, lhSize is only {3,4,5} */
542 case 5: /* 2 - 2 - 18 - 18 */
544 U32
const lhc
= hType
+ (3 << 2) + ((U32
)srcSize
<< 4) + ((U32
)cLitSize
<< 22);
545 ZSTD_writeLE32(ostart
, lhc
);
546 ostart
[4] = (BYTE
)(cLitSize
>> 10);
550 return lhSize
+ cLitSize
;
553 static const BYTE LL_Code
[64] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18,
554 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
555 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24};
557 static const BYTE ML_Code
[128] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
558 26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38,
559 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
560 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42,
561 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42};
563 void ZSTD_seqToCodes(const seqStore_t
*seqStorePtr
)
565 BYTE
const LL_deltaCode
= 19;
566 BYTE
const ML_deltaCode
= 36;
567 const seqDef
*const sequences
= seqStorePtr
->sequencesStart
;
568 BYTE
*const llCodeTable
= seqStorePtr
->llCode
;
569 BYTE
*const ofCodeTable
= seqStorePtr
->ofCode
;
570 BYTE
*const mlCodeTable
= seqStorePtr
->mlCode
;
571 U32
const nbSeq
= (U32
)(seqStorePtr
->sequences
- seqStorePtr
->sequencesStart
);
573 for (u
= 0; u
< nbSeq
; u
++) {
574 U32
const llv
= sequences
[u
].litLength
;
575 U32
const mlv
= sequences
[u
].matchLength
;
576 llCodeTable
[u
] = (llv
> 63) ? (BYTE
)ZSTD_highbit32(llv
) + LL_deltaCode
: LL_Code
[llv
];
577 ofCodeTable
[u
] = (BYTE
)ZSTD_highbit32(sequences
[u
].offset
);
578 mlCodeTable
[u
] = (mlv
> 127) ? (BYTE
)ZSTD_highbit32(mlv
) + ML_deltaCode
: ML_Code
[mlv
];
580 if (seqStorePtr
->longLengthID
== 1)
581 llCodeTable
[seqStorePtr
->longLengthPos
] = MaxLL
;
582 if (seqStorePtr
->longLengthID
== 2)
583 mlCodeTable
[seqStorePtr
->longLengthPos
] = MaxML
;
586 ZSTD_STATIC
size_t ZSTD_compressSequences_internal(ZSTD_CCtx
*zc
, void *dst
, size_t dstCapacity
)
588 const int longOffsets
= zc
->params
.cParams
.windowLog
> STREAM_ACCUMULATOR_MIN
;
589 const seqStore_t
*seqStorePtr
= &(zc
->seqStore
);
590 FSE_CTable
*CTable_LitLength
= zc
->litlengthCTable
;
591 FSE_CTable
*CTable_OffsetBits
= zc
->offcodeCTable
;
592 FSE_CTable
*CTable_MatchLength
= zc
->matchlengthCTable
;
593 U32 LLtype
, Offtype
, MLtype
; /* compressed, raw or rle */
594 const seqDef
*const sequences
= seqStorePtr
->sequencesStart
;
595 const BYTE
*const ofCodeTable
= seqStorePtr
->ofCode
;
596 const BYTE
*const llCodeTable
= seqStorePtr
->llCode
;
597 const BYTE
*const mlCodeTable
= seqStorePtr
->mlCode
;
598 BYTE
*const ostart
= (BYTE
*)dst
;
599 BYTE
*const oend
= ostart
+ dstCapacity
;
601 size_t const nbSeq
= seqStorePtr
->sequences
- seqStorePtr
->sequencesStart
;
607 size_t workspaceSize
= sizeof(zc
->tmpCounters
);
609 size_t spaceUsed32
= 0;
610 count
= (U32
*)zc
->tmpCounters
+ spaceUsed32
;
611 spaceUsed32
+= MaxSeq
+ 1;
612 norm
= (S16
*)((U32
*)zc
->tmpCounters
+ spaceUsed32
);
613 spaceUsed32
+= ALIGN(sizeof(S16
) * (MaxSeq
+ 1), sizeof(U32
)) >> 2;
615 workspace
= (U32
*)zc
->tmpCounters
+ spaceUsed32
;
616 workspaceSize
-= (spaceUsed32
<< 2);
619 /* Compress literals */
621 const BYTE
*const literals
= seqStorePtr
->litStart
;
622 size_t const litSize
= seqStorePtr
->lit
- literals
;
623 size_t const cSize
= ZSTD_compressLiterals(zc
, op
, dstCapacity
, literals
, litSize
);
624 if (ZSTD_isError(cSize
))
629 /* Sequences Header */
630 if ((oend
- op
) < 3 /*max nbSeq Size*/ + 1 /*seqHead */)
631 return ERROR(dstSize_tooSmall
);
634 else if (nbSeq
< LONGNBSEQ
)
635 op
[0] = (BYTE
)((nbSeq
>> 8) + 0x80), op
[1] = (BYTE
)nbSeq
, op
+= 2;
637 op
[0] = 0xFF, ZSTD_writeLE16(op
+ 1, (U16
)(nbSeq
- LONGNBSEQ
)), op
+= 3;
641 /* seqHead : flags for FSE encoding type */
644 #define MIN_SEQ_FOR_DYNAMIC_FSE 64
645 #define MAX_SEQ_FOR_STATIC_FSE 1000
647 /* convert length/distances into codes */
648 ZSTD_seqToCodes(seqStorePtr
);
650 /* CTable for Literal Lengths */
653 size_t const mostFrequent
= FSE_countFast_wksp(count
, &max
, llCodeTable
, nbSeq
, workspace
);
654 if ((mostFrequent
== nbSeq
) && (nbSeq
> 2)) {
655 *op
++ = llCodeTable
[0];
656 FSE_buildCTable_rle(CTable_LitLength
, (BYTE
)max
);
658 } else if ((zc
->flagStaticTables
) && (nbSeq
< MAX_SEQ_FOR_STATIC_FSE
)) {
660 } else if ((nbSeq
< MIN_SEQ_FOR_DYNAMIC_FSE
) || (mostFrequent
< (nbSeq
>> (LL_defaultNormLog
- 1)))) {
661 FSE_buildCTable_wksp(CTable_LitLength
, LL_defaultNorm
, MaxLL
, LL_defaultNormLog
, workspace
, workspaceSize
);
664 size_t nbSeq_1
= nbSeq
;
665 const U32 tableLog
= FSE_optimalTableLog(LLFSELog
, nbSeq
, max
);
666 if (count
[llCodeTable
[nbSeq
- 1]] > 1) {
667 count
[llCodeTable
[nbSeq
- 1]]--;
670 FSE_normalizeCount(norm
, tableLog
, count
, nbSeq_1
, max
);
672 size_t const NCountSize
= FSE_writeNCount(op
, oend
- op
, norm
, max
, tableLog
); /* overflow protected */
673 if (FSE_isError(NCountSize
))
677 FSE_buildCTable_wksp(CTable_LitLength
, norm
, max
, tableLog
, workspace
, workspaceSize
);
678 LLtype
= set_compressed
;
682 /* CTable for Offsets */
685 size_t const mostFrequent
= FSE_countFast_wksp(count
, &max
, ofCodeTable
, nbSeq
, workspace
);
686 if ((mostFrequent
== nbSeq
) && (nbSeq
> 2)) {
687 *op
++ = ofCodeTable
[0];
688 FSE_buildCTable_rle(CTable_OffsetBits
, (BYTE
)max
);
690 } else if ((zc
->flagStaticTables
) && (nbSeq
< MAX_SEQ_FOR_STATIC_FSE
)) {
691 Offtype
= set_repeat
;
692 } else if ((nbSeq
< MIN_SEQ_FOR_DYNAMIC_FSE
) || (mostFrequent
< (nbSeq
>> (OF_defaultNormLog
- 1)))) {
693 FSE_buildCTable_wksp(CTable_OffsetBits
, OF_defaultNorm
, MaxOff
, OF_defaultNormLog
, workspace
, workspaceSize
);
696 size_t nbSeq_1
= nbSeq
;
697 const U32 tableLog
= FSE_optimalTableLog(OffFSELog
, nbSeq
, max
);
698 if (count
[ofCodeTable
[nbSeq
- 1]] > 1) {
699 count
[ofCodeTable
[nbSeq
- 1]]--;
702 FSE_normalizeCount(norm
, tableLog
, count
, nbSeq_1
, max
);
704 size_t const NCountSize
= FSE_writeNCount(op
, oend
- op
, norm
, max
, tableLog
); /* overflow protected */
705 if (FSE_isError(NCountSize
))
709 FSE_buildCTable_wksp(CTable_OffsetBits
, norm
, max
, tableLog
, workspace
, workspaceSize
);
710 Offtype
= set_compressed
;
714 /* CTable for MatchLengths */
717 size_t const mostFrequent
= FSE_countFast_wksp(count
, &max
, mlCodeTable
, nbSeq
, workspace
);
718 if ((mostFrequent
== nbSeq
) && (nbSeq
> 2)) {
719 *op
++ = *mlCodeTable
;
720 FSE_buildCTable_rle(CTable_MatchLength
, (BYTE
)max
);
722 } else if ((zc
->flagStaticTables
) && (nbSeq
< MAX_SEQ_FOR_STATIC_FSE
)) {
724 } else if ((nbSeq
< MIN_SEQ_FOR_DYNAMIC_FSE
) || (mostFrequent
< (nbSeq
>> (ML_defaultNormLog
- 1)))) {
725 FSE_buildCTable_wksp(CTable_MatchLength
, ML_defaultNorm
, MaxML
, ML_defaultNormLog
, workspace
, workspaceSize
);
728 size_t nbSeq_1
= nbSeq
;
729 const U32 tableLog
= FSE_optimalTableLog(MLFSELog
, nbSeq
, max
);
730 if (count
[mlCodeTable
[nbSeq
- 1]] > 1) {
731 count
[mlCodeTable
[nbSeq
- 1]]--;
734 FSE_normalizeCount(norm
, tableLog
, count
, nbSeq_1
, max
);
736 size_t const NCountSize
= FSE_writeNCount(op
, oend
- op
, norm
, max
, tableLog
); /* overflow protected */
737 if (FSE_isError(NCountSize
))
741 FSE_buildCTable_wksp(CTable_MatchLength
, norm
, max
, tableLog
, workspace
, workspaceSize
);
742 MLtype
= set_compressed
;
746 *seqHead
= (BYTE
)((LLtype
<< 6) + (Offtype
<< 4) + (MLtype
<< 2));
747 zc
->flagStaticTables
= 0;
749 /* Encoding Sequences */
751 BIT_CStream_t blockStream
;
752 FSE_CState_t stateMatchLength
;
753 FSE_CState_t stateOffsetBits
;
754 FSE_CState_t stateLitLength
;
756 CHECK_E(BIT_initCStream(&blockStream
, op
, oend
- op
), dstSize_tooSmall
); /* not enough space remaining */
759 FSE_initCState2(&stateMatchLength
, CTable_MatchLength
, mlCodeTable
[nbSeq
- 1]);
760 FSE_initCState2(&stateOffsetBits
, CTable_OffsetBits
, ofCodeTable
[nbSeq
- 1]);
761 FSE_initCState2(&stateLitLength
, CTable_LitLength
, llCodeTable
[nbSeq
- 1]);
762 BIT_addBits(&blockStream
, sequences
[nbSeq
- 1].litLength
, LL_bits
[llCodeTable
[nbSeq
- 1]]);
764 BIT_flushBits(&blockStream
);
765 BIT_addBits(&blockStream
, sequences
[nbSeq
- 1].matchLength
, ML_bits
[mlCodeTable
[nbSeq
- 1]]);
767 BIT_flushBits(&blockStream
);
769 U32
const ofBits
= ofCodeTable
[nbSeq
- 1];
770 int const extraBits
= ofBits
- MIN(ofBits
, STREAM_ACCUMULATOR_MIN
- 1);
772 BIT_addBits(&blockStream
, sequences
[nbSeq
- 1].offset
, extraBits
);
773 BIT_flushBits(&blockStream
);
775 BIT_addBits(&blockStream
, sequences
[nbSeq
- 1].offset
>> extraBits
, ofBits
- extraBits
);
777 BIT_addBits(&blockStream
, sequences
[nbSeq
- 1].offset
, ofCodeTable
[nbSeq
- 1]);
779 BIT_flushBits(&blockStream
);
783 for (n
= nbSeq
- 2; n
< nbSeq
; n
--) { /* intentional underflow */
784 BYTE
const llCode
= llCodeTable
[n
];
785 BYTE
const ofCode
= ofCodeTable
[n
];
786 BYTE
const mlCode
= mlCodeTable
[n
];
787 U32
const llBits
= LL_bits
[llCode
];
788 U32
const ofBits
= ofCode
; /* 32b*/ /* 64b*/
789 U32
const mlBits
= ML_bits
[mlCode
];
791 FSE_encodeSymbol(&blockStream
, &stateOffsetBits
, ofCode
); /* 15 */ /* 15 */
792 FSE_encodeSymbol(&blockStream
, &stateMatchLength
, mlCode
); /* 24 */ /* 24 */
794 BIT_flushBits(&blockStream
); /* (7)*/
795 FSE_encodeSymbol(&blockStream
, &stateLitLength
, llCode
); /* 16 */ /* 33 */
796 if (ZSTD_32bits() || (ofBits
+ mlBits
+ llBits
>= 64 - 7 - (LLFSELog
+ MLFSELog
+ OffFSELog
)))
797 BIT_flushBits(&blockStream
); /* (7)*/
798 BIT_addBits(&blockStream
, sequences
[n
].litLength
, llBits
);
799 if (ZSTD_32bits() && ((llBits
+ mlBits
) > 24))
800 BIT_flushBits(&blockStream
);
801 BIT_addBits(&blockStream
, sequences
[n
].matchLength
, mlBits
);
803 BIT_flushBits(&blockStream
); /* (7)*/
805 int const extraBits
= ofBits
- MIN(ofBits
, STREAM_ACCUMULATOR_MIN
- 1);
807 BIT_addBits(&blockStream
, sequences
[n
].offset
, extraBits
);
808 BIT_flushBits(&blockStream
); /* (7)*/
810 BIT_addBits(&blockStream
, sequences
[n
].offset
>> extraBits
, ofBits
- extraBits
); /* 31 */
812 BIT_addBits(&blockStream
, sequences
[n
].offset
, ofBits
); /* 31 */
814 BIT_flushBits(&blockStream
); /* (7)*/
818 FSE_flushCState(&blockStream
, &stateMatchLength
);
819 FSE_flushCState(&blockStream
, &stateOffsetBits
);
820 FSE_flushCState(&blockStream
, &stateLitLength
);
823 size_t const streamSize
= BIT_closeCStream(&blockStream
);
825 return ERROR(dstSize_tooSmall
); /* not enough space */
832 ZSTD_STATIC
size_t ZSTD_compressSequences(ZSTD_CCtx
*zc
, void *dst
, size_t dstCapacity
, size_t srcSize
)
834 size_t const cSize
= ZSTD_compressSequences_internal(zc
, dst
, dstCapacity
);
835 size_t const minGain
= ZSTD_minGain(srcSize
);
836 size_t const maxCSize
= srcSize
- minGain
;
837 /* If the srcSize <= dstCapacity, then there is enough space to write a
838 * raw uncompressed block. Since we ran out of space, the block must not
839 * be compressible, so fall back to a raw uncompressed block.
841 int const uncompressibleError
= cSize
== ERROR(dstSize_tooSmall
) && srcSize
<= dstCapacity
;
844 if (ZSTD_isError(cSize
) && !uncompressibleError
)
846 if (cSize
>= maxCSize
|| uncompressibleError
) {
847 zc
->flagStaticHufTable
= HUF_repeat_none
;
850 /* confirm repcodes */
851 for (i
= 0; i
< ZSTD_REP_NUM
; i
++)
852 zc
->rep
[i
] = zc
->repToConfirm
[i
];
856 /*! ZSTD_storeSeq() :
857 Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
858 `offsetCode` : distance to match, or 0 == repCode.
859 `matchCode` : matchLength - MINMATCH
861 ZSTD_STATIC
void ZSTD_storeSeq(seqStore_t
*seqStorePtr
, size_t litLength
, const void *literals
, U32 offsetCode
, size_t matchCode
)
864 ZSTD_wildcopy(seqStorePtr
->lit
, literals
, litLength
);
865 seqStorePtr
->lit
+= litLength
;
868 if (litLength
> 0xFFFF) {
869 seqStorePtr
->longLengthID
= 1;
870 seqStorePtr
->longLengthPos
= (U32
)(seqStorePtr
->sequences
- seqStorePtr
->sequencesStart
);
872 seqStorePtr
->sequences
[0].litLength
= (U16
)litLength
;
875 seqStorePtr
->sequences
[0].offset
= offsetCode
+ 1;
878 if (matchCode
> 0xFFFF) {
879 seqStorePtr
->longLengthID
= 2;
880 seqStorePtr
->longLengthPos
= (U32
)(seqStorePtr
->sequences
- seqStorePtr
->sequencesStart
);
882 seqStorePtr
->sequences
[0].matchLength
= (U16
)matchCode
;
884 seqStorePtr
->sequences
++;
887 /*-*************************************
888 * Match length counter
889 ***************************************/
890 static unsigned ZSTD_NbCommonBytes(register size_t val
)
892 if (ZSTD_isLittleEndian()) {
894 return (__builtin_ctzll((U64
)val
) >> 3);
895 } else { /* 32 bits */
896 return (__builtin_ctz((U32
)val
) >> 3);
898 } else { /* Big Endian CPU */
900 return (__builtin_clzll(val
) >> 3);
901 } else { /* 32 bits */
902 return (__builtin_clz((U32
)val
) >> 3);
907 static size_t ZSTD_count(const BYTE
*pIn
, const BYTE
*pMatch
, const BYTE
*const pInLimit
)
909 const BYTE
*const pStart
= pIn
;
910 const BYTE
*const pInLoopLimit
= pInLimit
- (sizeof(size_t) - 1);
912 while (pIn
< pInLoopLimit
) {
913 size_t const diff
= ZSTD_readST(pMatch
) ^ ZSTD_readST(pIn
);
915 pIn
+= sizeof(size_t);
916 pMatch
+= sizeof(size_t);
919 pIn
+= ZSTD_NbCommonBytes(diff
);
920 return (size_t)(pIn
- pStart
);
923 if ((pIn
< (pInLimit
- 3)) && (ZSTD_read32(pMatch
) == ZSTD_read32(pIn
))) {
927 if ((pIn
< (pInLimit
- 1)) && (ZSTD_read16(pMatch
) == ZSTD_read16(pIn
))) {
931 if ((pIn
< pInLimit
) && (*pMatch
== *pIn
))
933 return (size_t)(pIn
- pStart
);
936 /** ZSTD_count_2segments() :
937 * can count match length with `ip` & `match` in 2 different segments.
938 * convention : on reaching mEnd, match count continue starting from iStart
940 static size_t ZSTD_count_2segments(const BYTE
*ip
, const BYTE
*match
, const BYTE
*iEnd
, const BYTE
*mEnd
, const BYTE
*iStart
)
942 const BYTE
*const vEnd
= MIN(ip
+ (mEnd
- match
), iEnd
);
943 size_t const matchLength
= ZSTD_count(ip
, match
, vEnd
);
944 if (match
+ matchLength
!= mEnd
)
946 return matchLength
+ ZSTD_count(ip
+ matchLength
, iStart
, iEnd
);
949 /*-*************************************
951 ***************************************/
952 static const U32 prime3bytes
= 506832829U;
953 static U32
ZSTD_hash3(U32 u
, U32 h
) { return ((u
<< (32 - 24)) * prime3bytes
) >> (32 - h
); }
954 ZSTD_STATIC
size_t ZSTD_hash3Ptr(const void *ptr
, U32 h
) { return ZSTD_hash3(ZSTD_readLE32(ptr
), h
); } /* only in zstd_opt.h */
956 static const U32 prime4bytes
= 2654435761U;
957 static U32
ZSTD_hash4(U32 u
, U32 h
) { return (u
* prime4bytes
) >> (32 - h
); }
958 static size_t ZSTD_hash4Ptr(const void *ptr
, U32 h
) { return ZSTD_hash4(ZSTD_read32(ptr
), h
); }
960 static const U64 prime5bytes
= 889523592379ULL;
961 static size_t ZSTD_hash5(U64 u
, U32 h
) { return (size_t)(((u
<< (64 - 40)) * prime5bytes
) >> (64 - h
)); }
962 static size_t ZSTD_hash5Ptr(const void *p
, U32 h
) { return ZSTD_hash5(ZSTD_readLE64(p
), h
); }
964 static const U64 prime6bytes
= 227718039650203ULL;
965 static size_t ZSTD_hash6(U64 u
, U32 h
) { return (size_t)(((u
<< (64 - 48)) * prime6bytes
) >> (64 - h
)); }
966 static size_t ZSTD_hash6Ptr(const void *p
, U32 h
) { return ZSTD_hash6(ZSTD_readLE64(p
), h
); }
968 static const U64 prime7bytes
= 58295818150454627ULL;
969 static size_t ZSTD_hash7(U64 u
, U32 h
) { return (size_t)(((u
<< (64 - 56)) * prime7bytes
) >> (64 - h
)); }
970 static size_t ZSTD_hash7Ptr(const void *p
, U32 h
) { return ZSTD_hash7(ZSTD_readLE64(p
), h
); }
972 static const U64 prime8bytes
= 0xCF1BBCDCB7A56463ULL
;
973 static size_t ZSTD_hash8(U64 u
, U32 h
) { return (size_t)(((u
)*prime8bytes
) >> (64 - h
)); }
974 static size_t ZSTD_hash8Ptr(const void *p
, U32 h
) { return ZSTD_hash8(ZSTD_readLE64(p
), h
); }
976 static size_t ZSTD_hashPtr(const void *p
, U32 hBits
, U32 mls
)
979 // case 3: return ZSTD_hash3Ptr(p, hBits);
981 case 4: return ZSTD_hash4Ptr(p
, hBits
);
982 case 5: return ZSTD_hash5Ptr(p
, hBits
);
983 case 6: return ZSTD_hash6Ptr(p
, hBits
);
984 case 7: return ZSTD_hash7Ptr(p
, hBits
);
985 case 8: return ZSTD_hash8Ptr(p
, hBits
);
989 /*-*************************************
991 ***************************************/
992 static void ZSTD_fillHashTable(ZSTD_CCtx
*zc
, const void *end
, const U32 mls
)
994 U32
*const hashTable
= zc
->hashTable
;
995 U32
const hBits
= zc
->params
.cParams
.hashLog
;
996 const BYTE
*const base
= zc
->base
;
997 const BYTE
*ip
= base
+ zc
->nextToUpdate
;
998 const BYTE
*const iend
= ((const BYTE
*)end
) - HASH_READ_SIZE
;
999 const size_t fastHashFillStep
= 3;
1001 while (ip
<= iend
) {
1002 hashTable
[ZSTD_hashPtr(ip
, hBits
, mls
)] = (U32
)(ip
- base
);
1003 ip
+= fastHashFillStep
;
1008 void ZSTD_compressBlock_fast_generic(ZSTD_CCtx
*cctx
, const void *src
, size_t srcSize
, const U32 mls
)
1010 U32
*const hashTable
= cctx
->hashTable
;
1011 U32
const hBits
= cctx
->params
.cParams
.hashLog
;
1012 seqStore_t
*seqStorePtr
= &(cctx
->seqStore
);
1013 const BYTE
*const base
= cctx
->base
;
1014 const BYTE
*const istart
= (const BYTE
*)src
;
1015 const BYTE
*ip
= istart
;
1016 const BYTE
*anchor
= istart
;
1017 const U32 lowestIndex
= cctx
->dictLimit
;
1018 const BYTE
*const lowest
= base
+ lowestIndex
;
1019 const BYTE
*const iend
= istart
+ srcSize
;
1020 const BYTE
*const ilimit
= iend
- HASH_READ_SIZE
;
1021 U32 offset_1
= cctx
->rep
[0], offset_2
= cctx
->rep
[1];
1022 U32 offsetSaved
= 0;
1025 ip
+= (ip
== lowest
);
1027 U32
const maxRep
= (U32
)(ip
- lowest
);
1028 if (offset_2
> maxRep
)
1029 offsetSaved
= offset_2
, offset_2
= 0;
1030 if (offset_1
> maxRep
)
1031 offsetSaved
= offset_1
, offset_1
= 0;
1034 /* Main Search Loop */
1035 while (ip
< ilimit
) { /* < instead of <=, because repcode check at (ip+1) */
1037 size_t const h
= ZSTD_hashPtr(ip
, hBits
, mls
);
1038 U32
const curr
= (U32
)(ip
- base
);
1039 U32
const matchIndex
= hashTable
[h
];
1040 const BYTE
*match
= base
+ matchIndex
;
1041 hashTable
[h
] = curr
; /* update hash table */
1043 if ((offset_1
> 0) & (ZSTD_read32(ip
+ 1 - offset_1
) == ZSTD_read32(ip
+ 1))) {
1044 mLength
= ZSTD_count(ip
+ 1 + 4, ip
+ 1 + 4 - offset_1
, iend
) + 4;
1046 ZSTD_storeSeq(seqStorePtr
, ip
- anchor
, anchor
, 0, mLength
- MINMATCH
);
1049 if ((matchIndex
<= lowestIndex
) || (ZSTD_read32(match
) != ZSTD_read32(ip
))) {
1050 ip
+= ((ip
- anchor
) >> g_searchStrength
) + 1;
1053 mLength
= ZSTD_count(ip
+ 4, match
+ 4, iend
) + 4;
1054 offset
= (U32
)(ip
- match
);
1055 while (((ip
> anchor
) & (match
> lowest
)) && (ip
[-1] == match
[-1])) {
1060 offset_2
= offset_1
;
1063 ZSTD_storeSeq(seqStorePtr
, ip
- anchor
, anchor
, offset
+ ZSTD_REP_MOVE
, mLength
- MINMATCH
);
1072 hashTable
[ZSTD_hashPtr(base
+ curr
+ 2, hBits
, mls
)] = curr
+ 2; /* here because curr+2 could be > iend-8 */
1073 hashTable
[ZSTD_hashPtr(ip
- 2, hBits
, mls
)] = (U32
)(ip
- 2 - base
);
1074 /* check immediate repcode */
1075 while ((ip
<= ilimit
) && ((offset_2
> 0) & (ZSTD_read32(ip
) == ZSTD_read32(ip
- offset_2
)))) {
1076 /* store sequence */
1077 size_t const rLength
= ZSTD_count(ip
+ 4, ip
+ 4 - offset_2
, iend
) + 4;
1079 U32
const tmpOff
= offset_2
;
1080 offset_2
= offset_1
;
1082 } /* swap offset_2 <=> offset_1 */
1083 hashTable
[ZSTD_hashPtr(ip
, hBits
, mls
)] = (U32
)(ip
- base
);
1084 ZSTD_storeSeq(seqStorePtr
, 0, anchor
, 0, rLength
- MINMATCH
);
1087 continue; /* faster when present ... (?) */
1092 /* save reps for next block */
1093 cctx
->repToConfirm
[0] = offset_1
? offset_1
: offsetSaved
;
1094 cctx
->repToConfirm
[1] = offset_2
? offset_2
: offsetSaved
;
1098 size_t const lastLLSize
= iend
- anchor
;
1099 memcpy(seqStorePtr
->lit
, anchor
, lastLLSize
);
1100 seqStorePtr
->lit
+= lastLLSize
;
1104 static void ZSTD_compressBlock_fast(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
)
1106 const U32 mls
= ctx
->params
.cParams
.searchLength
;
1108 default: /* includes case 3 */
1109 case 4: ZSTD_compressBlock_fast_generic(ctx
, src
, srcSize
, 4); return;
1110 case 5: ZSTD_compressBlock_fast_generic(ctx
, src
, srcSize
, 5); return;
1111 case 6: ZSTD_compressBlock_fast_generic(ctx
, src
, srcSize
, 6); return;
1112 case 7: ZSTD_compressBlock_fast_generic(ctx
, src
, srcSize
, 7); return;
1116 static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
, const U32 mls
)
1118 U32
*hashTable
= ctx
->hashTable
;
1119 const U32 hBits
= ctx
->params
.cParams
.hashLog
;
1120 seqStore_t
*seqStorePtr
= &(ctx
->seqStore
);
1121 const BYTE
*const base
= ctx
->base
;
1122 const BYTE
*const dictBase
= ctx
->dictBase
;
1123 const BYTE
*const istart
= (const BYTE
*)src
;
1124 const BYTE
*ip
= istart
;
1125 const BYTE
*anchor
= istart
;
1126 const U32 lowestIndex
= ctx
->lowLimit
;
1127 const BYTE
*const dictStart
= dictBase
+ lowestIndex
;
1128 const U32 dictLimit
= ctx
->dictLimit
;
1129 const BYTE
*const lowPrefixPtr
= base
+ dictLimit
;
1130 const BYTE
*const dictEnd
= dictBase
+ dictLimit
;
1131 const BYTE
*const iend
= istart
+ srcSize
;
1132 const BYTE
*const ilimit
= iend
- 8;
1133 U32 offset_1
= ctx
->rep
[0], offset_2
= ctx
->rep
[1];
1136 while (ip
< ilimit
) { /* < instead of <=, because (ip+1) */
1137 const size_t h
= ZSTD_hashPtr(ip
, hBits
, mls
);
1138 const U32 matchIndex
= hashTable
[h
];
1139 const BYTE
*matchBase
= matchIndex
< dictLimit
? dictBase
: base
;
1140 const BYTE
*match
= matchBase
+ matchIndex
;
1141 const U32 curr
= (U32
)(ip
- base
);
1142 const U32 repIndex
= curr
+ 1 - offset_1
; /* offset_1 expected <= curr +1 */
1143 const BYTE
*repBase
= repIndex
< dictLimit
? dictBase
: base
;
1144 const BYTE
*repMatch
= repBase
+ repIndex
;
1146 hashTable
[h
] = curr
; /* update hash table */
1148 if ((((U32
)((dictLimit
- 1) - repIndex
) >= 3) /* intentional underflow */ & (repIndex
> lowestIndex
)) &&
1149 (ZSTD_read32(repMatch
) == ZSTD_read32(ip
+ 1))) {
1150 const BYTE
*repMatchEnd
= repIndex
< dictLimit
? dictEnd
: iend
;
1151 mLength
= ZSTD_count_2segments(ip
+ 1 + EQUAL_READ32
, repMatch
+ EQUAL_READ32
, iend
, repMatchEnd
, lowPrefixPtr
) + EQUAL_READ32
;
1153 ZSTD_storeSeq(seqStorePtr
, ip
- anchor
, anchor
, 0, mLength
- MINMATCH
);
1155 if ((matchIndex
< lowestIndex
) || (ZSTD_read32(match
) != ZSTD_read32(ip
))) {
1156 ip
+= ((ip
- anchor
) >> g_searchStrength
) + 1;
1160 const BYTE
*matchEnd
= matchIndex
< dictLimit
? dictEnd
: iend
;
1161 const BYTE
*lowMatchPtr
= matchIndex
< dictLimit
? dictStart
: lowPrefixPtr
;
1163 mLength
= ZSTD_count_2segments(ip
+ EQUAL_READ32
, match
+ EQUAL_READ32
, iend
, matchEnd
, lowPrefixPtr
) + EQUAL_READ32
;
1164 while (((ip
> anchor
) & (match
> lowMatchPtr
)) && (ip
[-1] == match
[-1])) {
1169 offset
= curr
- matchIndex
;
1170 offset_2
= offset_1
;
1172 ZSTD_storeSeq(seqStorePtr
, ip
- anchor
, anchor
, offset
+ ZSTD_REP_MOVE
, mLength
- MINMATCH
);
1176 /* found a match : store it */
1182 hashTable
[ZSTD_hashPtr(base
+ curr
+ 2, hBits
, mls
)] = curr
+ 2;
1183 hashTable
[ZSTD_hashPtr(ip
- 2, hBits
, mls
)] = (U32
)(ip
- 2 - base
);
1184 /* check immediate repcode */
1185 while (ip
<= ilimit
) {
1186 U32
const curr2
= (U32
)(ip
- base
);
1187 U32
const repIndex2
= curr2
- offset_2
;
1188 const BYTE
*repMatch2
= repIndex2
< dictLimit
? dictBase
+ repIndex2
: base
+ repIndex2
;
1189 if ((((U32
)((dictLimit
- 1) - repIndex2
) >= 3) & (repIndex2
> lowestIndex
)) /* intentional overflow */
1190 && (ZSTD_read32(repMatch2
) == ZSTD_read32(ip
))) {
1191 const BYTE
*const repEnd2
= repIndex2
< dictLimit
? dictEnd
: iend
;
1193 ZSTD_count_2segments(ip
+ EQUAL_READ32
, repMatch2
+ EQUAL_READ32
, iend
, repEnd2
, lowPrefixPtr
) + EQUAL_READ32
;
1194 U32 tmpOffset
= offset_2
;
1195 offset_2
= offset_1
;
1196 offset_1
= tmpOffset
; /* swap offset_2 <=> offset_1 */
1197 ZSTD_storeSeq(seqStorePtr
, 0, anchor
, 0, repLength2
- MINMATCH
);
1198 hashTable
[ZSTD_hashPtr(ip
, hBits
, mls
)] = curr2
;
1208 /* save reps for next block */
1209 ctx
->repToConfirm
[0] = offset_1
;
1210 ctx
->repToConfirm
[1] = offset_2
;
1214 size_t const lastLLSize
= iend
- anchor
;
1215 memcpy(seqStorePtr
->lit
, anchor
, lastLLSize
);
1216 seqStorePtr
->lit
+= lastLLSize
;
1220 static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
)
1222 U32
const mls
= ctx
->params
.cParams
.searchLength
;
1224 default: /* includes case 3 */
1225 case 4: ZSTD_compressBlock_fast_extDict_generic(ctx
, src
, srcSize
, 4); return;
1226 case 5: ZSTD_compressBlock_fast_extDict_generic(ctx
, src
, srcSize
, 5); return;
1227 case 6: ZSTD_compressBlock_fast_extDict_generic(ctx
, src
, srcSize
, 6); return;
1228 case 7: ZSTD_compressBlock_fast_extDict_generic(ctx
, src
, srcSize
, 7); return;
1232 /*-*************************************
1234 ***************************************/
1235 static void ZSTD_fillDoubleHashTable(ZSTD_CCtx
*cctx
, const void *end
, const U32 mls
)
1237 U32
*const hashLarge
= cctx
->hashTable
;
1238 U32
const hBitsL
= cctx
->params
.cParams
.hashLog
;
1239 U32
*const hashSmall
= cctx
->chainTable
;
1240 U32
const hBitsS
= cctx
->params
.cParams
.chainLog
;
1241 const BYTE
*const base
= cctx
->base
;
1242 const BYTE
*ip
= base
+ cctx
->nextToUpdate
;
1243 const BYTE
*const iend
= ((const BYTE
*)end
) - HASH_READ_SIZE
;
1244 const size_t fastHashFillStep
= 3;
1246 while (ip
<= iend
) {
1247 hashSmall
[ZSTD_hashPtr(ip
, hBitsS
, mls
)] = (U32
)(ip
- base
);
1248 hashLarge
[ZSTD_hashPtr(ip
, hBitsL
, 8)] = (U32
)(ip
- base
);
1249 ip
+= fastHashFillStep
;
1254 void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx
*cctx
, const void *src
, size_t srcSize
, const U32 mls
)
1256 U32
*const hashLong
= cctx
->hashTable
;
1257 const U32 hBitsL
= cctx
->params
.cParams
.hashLog
;
1258 U32
*const hashSmall
= cctx
->chainTable
;
1259 const U32 hBitsS
= cctx
->params
.cParams
.chainLog
;
1260 seqStore_t
*seqStorePtr
= &(cctx
->seqStore
);
1261 const BYTE
*const base
= cctx
->base
;
1262 const BYTE
*const istart
= (const BYTE
*)src
;
1263 const BYTE
*ip
= istart
;
1264 const BYTE
*anchor
= istart
;
1265 const U32 lowestIndex
= cctx
->dictLimit
;
1266 const BYTE
*const lowest
= base
+ lowestIndex
;
1267 const BYTE
*const iend
= istart
+ srcSize
;
1268 const BYTE
*const ilimit
= iend
- HASH_READ_SIZE
;
1269 U32 offset_1
= cctx
->rep
[0], offset_2
= cctx
->rep
[1];
1270 U32 offsetSaved
= 0;
1273 ip
+= (ip
== lowest
);
1275 U32
const maxRep
= (U32
)(ip
- lowest
);
1276 if (offset_2
> maxRep
)
1277 offsetSaved
= offset_2
, offset_2
= 0;
1278 if (offset_1
> maxRep
)
1279 offsetSaved
= offset_1
, offset_1
= 0;
1282 /* Main Search Loop */
1283 while (ip
< ilimit
) { /* < instead of <=, because repcode check at (ip+1) */
1285 size_t const h2
= ZSTD_hashPtr(ip
, hBitsL
, 8);
1286 size_t const h
= ZSTD_hashPtr(ip
, hBitsS
, mls
);
1287 U32
const curr
= (U32
)(ip
- base
);
1288 U32
const matchIndexL
= hashLong
[h2
];
1289 U32
const matchIndexS
= hashSmall
[h
];
1290 const BYTE
*matchLong
= base
+ matchIndexL
;
1291 const BYTE
*match
= base
+ matchIndexS
;
1292 hashLong
[h2
] = hashSmall
[h
] = curr
; /* update hash tables */
1294 if ((offset_1
> 0) & (ZSTD_read32(ip
+ 1 - offset_1
) == ZSTD_read32(ip
+ 1))) { /* note : by construction, offset_1 <= curr */
1295 mLength
= ZSTD_count(ip
+ 1 + 4, ip
+ 1 + 4 - offset_1
, iend
) + 4;
1297 ZSTD_storeSeq(seqStorePtr
, ip
- anchor
, anchor
, 0, mLength
- MINMATCH
);
1300 if ((matchIndexL
> lowestIndex
) && (ZSTD_read64(matchLong
) == ZSTD_read64(ip
))) {
1301 mLength
= ZSTD_count(ip
+ 8, matchLong
+ 8, iend
) + 8;
1302 offset
= (U32
)(ip
- matchLong
);
1303 while (((ip
> anchor
) & (matchLong
> lowest
)) && (ip
[-1] == matchLong
[-1])) {
1308 } else if ((matchIndexS
> lowestIndex
) && (ZSTD_read32(match
) == ZSTD_read32(ip
))) {
1309 size_t const h3
= ZSTD_hashPtr(ip
+ 1, hBitsL
, 8);
1310 U32
const matchIndex3
= hashLong
[h3
];
1311 const BYTE
*match3
= base
+ matchIndex3
;
1312 hashLong
[h3
] = curr
+ 1;
1313 if ((matchIndex3
> lowestIndex
) && (ZSTD_read64(match3
) == ZSTD_read64(ip
+ 1))) {
1314 mLength
= ZSTD_count(ip
+ 9, match3
+ 8, iend
) + 8;
1316 offset
= (U32
)(ip
- match3
);
1317 while (((ip
> anchor
) & (match3
> lowest
)) && (ip
[-1] == match3
[-1])) {
1323 mLength
= ZSTD_count(ip
+ 4, match
+ 4, iend
) + 4;
1324 offset
= (U32
)(ip
- match
);
1325 while (((ip
> anchor
) & (match
> lowest
)) && (ip
[-1] == match
[-1])) {
1332 ip
+= ((ip
- anchor
) >> g_searchStrength
) + 1;
1336 offset_2
= offset_1
;
1339 ZSTD_storeSeq(seqStorePtr
, ip
- anchor
, anchor
, offset
+ ZSTD_REP_MOVE
, mLength
- MINMATCH
);
1348 hashLong
[ZSTD_hashPtr(base
+ curr
+ 2, hBitsL
, 8)] = hashSmall
[ZSTD_hashPtr(base
+ curr
+ 2, hBitsS
, mls
)] =
1349 curr
+ 2; /* here because curr+2 could be > iend-8 */
1350 hashLong
[ZSTD_hashPtr(ip
- 2, hBitsL
, 8)] = hashSmall
[ZSTD_hashPtr(ip
- 2, hBitsS
, mls
)] = (U32
)(ip
- 2 - base
);
1352 /* check immediate repcode */
1353 while ((ip
<= ilimit
) && ((offset_2
> 0) & (ZSTD_read32(ip
) == ZSTD_read32(ip
- offset_2
)))) {
1354 /* store sequence */
1355 size_t const rLength
= ZSTD_count(ip
+ 4, ip
+ 4 - offset_2
, iend
) + 4;
1357 U32
const tmpOff
= offset_2
;
1358 offset_2
= offset_1
;
1360 } /* swap offset_2 <=> offset_1 */
1361 hashSmall
[ZSTD_hashPtr(ip
, hBitsS
, mls
)] = (U32
)(ip
- base
);
1362 hashLong
[ZSTD_hashPtr(ip
, hBitsL
, 8)] = (U32
)(ip
- base
);
1363 ZSTD_storeSeq(seqStorePtr
, 0, anchor
, 0, rLength
- MINMATCH
);
1366 continue; /* faster when present ... (?) */
1371 /* save reps for next block */
1372 cctx
->repToConfirm
[0] = offset_1
? offset_1
: offsetSaved
;
1373 cctx
->repToConfirm
[1] = offset_2
? offset_2
: offsetSaved
;
1377 size_t const lastLLSize
= iend
- anchor
;
1378 memcpy(seqStorePtr
->lit
, anchor
, lastLLSize
);
1379 seqStorePtr
->lit
+= lastLLSize
;
1383 static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
)
1385 const U32 mls
= ctx
->params
.cParams
.searchLength
;
1387 default: /* includes case 3 */
1388 case 4: ZSTD_compressBlock_doubleFast_generic(ctx
, src
, srcSize
, 4); return;
1389 case 5: ZSTD_compressBlock_doubleFast_generic(ctx
, src
, srcSize
, 5); return;
1390 case 6: ZSTD_compressBlock_doubleFast_generic(ctx
, src
, srcSize
, 6); return;
1391 case 7: ZSTD_compressBlock_doubleFast_generic(ctx
, src
, srcSize
, 7); return;
1395 static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
, const U32 mls
)
1397 U32
*const hashLong
= ctx
->hashTable
;
1398 U32
const hBitsL
= ctx
->params
.cParams
.hashLog
;
1399 U32
*const hashSmall
= ctx
->chainTable
;
1400 U32
const hBitsS
= ctx
->params
.cParams
.chainLog
;
1401 seqStore_t
*seqStorePtr
= &(ctx
->seqStore
);
1402 const BYTE
*const base
= ctx
->base
;
1403 const BYTE
*const dictBase
= ctx
->dictBase
;
1404 const BYTE
*const istart
= (const BYTE
*)src
;
1405 const BYTE
*ip
= istart
;
1406 const BYTE
*anchor
= istart
;
1407 const U32 lowestIndex
= ctx
->lowLimit
;
1408 const BYTE
*const dictStart
= dictBase
+ lowestIndex
;
1409 const U32 dictLimit
= ctx
->dictLimit
;
1410 const BYTE
*const lowPrefixPtr
= base
+ dictLimit
;
1411 const BYTE
*const dictEnd
= dictBase
+ dictLimit
;
1412 const BYTE
*const iend
= istart
+ srcSize
;
1413 const BYTE
*const ilimit
= iend
- 8;
1414 U32 offset_1
= ctx
->rep
[0], offset_2
= ctx
->rep
[1];
1417 while (ip
< ilimit
) { /* < instead of <=, because (ip+1) */
1418 const size_t hSmall
= ZSTD_hashPtr(ip
, hBitsS
, mls
);
1419 const U32 matchIndex
= hashSmall
[hSmall
];
1420 const BYTE
*matchBase
= matchIndex
< dictLimit
? dictBase
: base
;
1421 const BYTE
*match
= matchBase
+ matchIndex
;
1423 const size_t hLong
= ZSTD_hashPtr(ip
, hBitsL
, 8);
1424 const U32 matchLongIndex
= hashLong
[hLong
];
1425 const BYTE
*matchLongBase
= matchLongIndex
< dictLimit
? dictBase
: base
;
1426 const BYTE
*matchLong
= matchLongBase
+ matchLongIndex
;
1428 const U32 curr
= (U32
)(ip
- base
);
1429 const U32 repIndex
= curr
+ 1 - offset_1
; /* offset_1 expected <= curr +1 */
1430 const BYTE
*repBase
= repIndex
< dictLimit
? dictBase
: base
;
1431 const BYTE
*repMatch
= repBase
+ repIndex
;
1433 hashSmall
[hSmall
] = hashLong
[hLong
] = curr
; /* update hash table */
1435 if ((((U32
)((dictLimit
- 1) - repIndex
) >= 3) /* intentional underflow */ & (repIndex
> lowestIndex
)) &&
1436 (ZSTD_read32(repMatch
) == ZSTD_read32(ip
+ 1))) {
1437 const BYTE
*repMatchEnd
= repIndex
< dictLimit
? dictEnd
: iend
;
1438 mLength
= ZSTD_count_2segments(ip
+ 1 + 4, repMatch
+ 4, iend
, repMatchEnd
, lowPrefixPtr
) + 4;
1440 ZSTD_storeSeq(seqStorePtr
, ip
- anchor
, anchor
, 0, mLength
- MINMATCH
);
1442 if ((matchLongIndex
> lowestIndex
) && (ZSTD_read64(matchLong
) == ZSTD_read64(ip
))) {
1443 const BYTE
*matchEnd
= matchLongIndex
< dictLimit
? dictEnd
: iend
;
1444 const BYTE
*lowMatchPtr
= matchLongIndex
< dictLimit
? dictStart
: lowPrefixPtr
;
1446 mLength
= ZSTD_count_2segments(ip
+ 8, matchLong
+ 8, iend
, matchEnd
, lowPrefixPtr
) + 8;
1447 offset
= curr
- matchLongIndex
;
1448 while (((ip
> anchor
) & (matchLong
> lowMatchPtr
)) && (ip
[-1] == matchLong
[-1])) {
1453 offset_2
= offset_1
;
1455 ZSTD_storeSeq(seqStorePtr
, ip
- anchor
, anchor
, offset
+ ZSTD_REP_MOVE
, mLength
- MINMATCH
);
1457 } else if ((matchIndex
> lowestIndex
) && (ZSTD_read32(match
) == ZSTD_read32(ip
))) {
1458 size_t const h3
= ZSTD_hashPtr(ip
+ 1, hBitsL
, 8);
1459 U32
const matchIndex3
= hashLong
[h3
];
1460 const BYTE
*const match3Base
= matchIndex3
< dictLimit
? dictBase
: base
;
1461 const BYTE
*match3
= match3Base
+ matchIndex3
;
1463 hashLong
[h3
] = curr
+ 1;
1464 if ((matchIndex3
> lowestIndex
) && (ZSTD_read64(match3
) == ZSTD_read64(ip
+ 1))) {
1465 const BYTE
*matchEnd
= matchIndex3
< dictLimit
? dictEnd
: iend
;
1466 const BYTE
*lowMatchPtr
= matchIndex3
< dictLimit
? dictStart
: lowPrefixPtr
;
1467 mLength
= ZSTD_count_2segments(ip
+ 9, match3
+ 8, iend
, matchEnd
, lowPrefixPtr
) + 8;
1469 offset
= curr
+ 1 - matchIndex3
;
1470 while (((ip
> anchor
) & (match3
> lowMatchPtr
)) && (ip
[-1] == match3
[-1])) {
1476 const BYTE
*matchEnd
= matchIndex
< dictLimit
? dictEnd
: iend
;
1477 const BYTE
*lowMatchPtr
= matchIndex
< dictLimit
? dictStart
: lowPrefixPtr
;
1478 mLength
= ZSTD_count_2segments(ip
+ 4, match
+ 4, iend
, matchEnd
, lowPrefixPtr
) + 4;
1479 offset
= curr
- matchIndex
;
1480 while (((ip
> anchor
) & (match
> lowMatchPtr
)) && (ip
[-1] == match
[-1])) {
1486 offset_2
= offset_1
;
1488 ZSTD_storeSeq(seqStorePtr
, ip
- anchor
, anchor
, offset
+ ZSTD_REP_MOVE
, mLength
- MINMATCH
);
1491 ip
+= ((ip
- anchor
) >> g_searchStrength
) + 1;
1496 /* found a match : store it */
1502 hashSmall
[ZSTD_hashPtr(base
+ curr
+ 2, hBitsS
, mls
)] = curr
+ 2;
1503 hashLong
[ZSTD_hashPtr(base
+ curr
+ 2, hBitsL
, 8)] = curr
+ 2;
1504 hashSmall
[ZSTD_hashPtr(ip
- 2, hBitsS
, mls
)] = (U32
)(ip
- 2 - base
);
1505 hashLong
[ZSTD_hashPtr(ip
- 2, hBitsL
, 8)] = (U32
)(ip
- 2 - base
);
1506 /* check immediate repcode */
1507 while (ip
<= ilimit
) {
1508 U32
const curr2
= (U32
)(ip
- base
);
1509 U32
const repIndex2
= curr2
- offset_2
;
1510 const BYTE
*repMatch2
= repIndex2
< dictLimit
? dictBase
+ repIndex2
: base
+ repIndex2
;
1511 if ((((U32
)((dictLimit
- 1) - repIndex2
) >= 3) & (repIndex2
> lowestIndex
)) /* intentional overflow */
1512 && (ZSTD_read32(repMatch2
) == ZSTD_read32(ip
))) {
1513 const BYTE
*const repEnd2
= repIndex2
< dictLimit
? dictEnd
: iend
;
1514 size_t const repLength2
=
1515 ZSTD_count_2segments(ip
+ EQUAL_READ32
, repMatch2
+ EQUAL_READ32
, iend
, repEnd2
, lowPrefixPtr
) + EQUAL_READ32
;
1516 U32 tmpOffset
= offset_2
;
1517 offset_2
= offset_1
;
1518 offset_1
= tmpOffset
; /* swap offset_2 <=> offset_1 */
1519 ZSTD_storeSeq(seqStorePtr
, 0, anchor
, 0, repLength2
- MINMATCH
);
1520 hashSmall
[ZSTD_hashPtr(ip
, hBitsS
, mls
)] = curr2
;
1521 hashLong
[ZSTD_hashPtr(ip
, hBitsL
, 8)] = curr2
;
1531 /* save reps for next block */
1532 ctx
->repToConfirm
[0] = offset_1
;
1533 ctx
->repToConfirm
[1] = offset_2
;
1537 size_t const lastLLSize
= iend
- anchor
;
1538 memcpy(seqStorePtr
->lit
, anchor
, lastLLSize
);
1539 seqStorePtr
->lit
+= lastLLSize
;
1543 static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
)
1545 U32
const mls
= ctx
->params
.cParams
.searchLength
;
1547 default: /* includes case 3 */
1548 case 4: ZSTD_compressBlock_doubleFast_extDict_generic(ctx
, src
, srcSize
, 4); return;
1549 case 5: ZSTD_compressBlock_doubleFast_extDict_generic(ctx
, src
, srcSize
, 5); return;
1550 case 6: ZSTD_compressBlock_doubleFast_extDict_generic(ctx
, src
, srcSize
, 6); return;
1551 case 7: ZSTD_compressBlock_doubleFast_extDict_generic(ctx
, src
, srcSize
, 7); return;
1555 /*-*************************************
1556 * Binary Tree search
1557 ***************************************/
1558 /** ZSTD_insertBt1() : add one or multiple positions to tree.
1559 * ip : assumed <= iend-8 .
1560 * @return : nb of positions added */
1561 static U32
ZSTD_insertBt1(ZSTD_CCtx
*zc
, const BYTE
*const ip
, const U32 mls
, const BYTE
*const iend
, U32 nbCompares
, U32 extDict
)
1563 U32
*const hashTable
= zc
->hashTable
;
1564 U32
const hashLog
= zc
->params
.cParams
.hashLog
;
1565 size_t const h
= ZSTD_hashPtr(ip
, hashLog
, mls
);
1566 U32
*const bt
= zc
->chainTable
;
1567 U32
const btLog
= zc
->params
.cParams
.chainLog
- 1;
1568 U32
const btMask
= (1 << btLog
) - 1;
1569 U32 matchIndex
= hashTable
[h
];
1570 size_t commonLengthSmaller
= 0, commonLengthLarger
= 0;
1571 const BYTE
*const base
= zc
->base
;
1572 const BYTE
*const dictBase
= zc
->dictBase
;
1573 const U32 dictLimit
= zc
->dictLimit
;
1574 const BYTE
*const dictEnd
= dictBase
+ dictLimit
;
1575 const BYTE
*const prefixStart
= base
+ dictLimit
;
1577 const U32 curr
= (U32
)(ip
- base
);
1578 const U32 btLow
= btMask
>= curr
? 0 : curr
- btMask
;
1579 U32
*smallerPtr
= bt
+ 2 * (curr
& btMask
);
1580 U32
*largerPtr
= smallerPtr
+ 1;
1581 U32 dummy32
; /* to be nullified at the end */
1582 U32
const windowLow
= zc
->lowLimit
;
1583 U32 matchEndIdx
= curr
+ 8;
1584 size_t bestLength
= 8;
1586 hashTable
[h
] = curr
; /* Update Hash Table */
1588 while (nbCompares
-- && (matchIndex
> windowLow
)) {
1589 U32
*const nextPtr
= bt
+ 2 * (matchIndex
& btMask
);
1590 size_t matchLength
= MIN(commonLengthSmaller
, commonLengthLarger
); /* guaranteed minimum nb of common bytes */
1592 if ((!extDict
) || (matchIndex
+ matchLength
>= dictLimit
)) {
1593 match
= base
+ matchIndex
;
1594 if (match
[matchLength
] == ip
[matchLength
])
1595 matchLength
+= ZSTD_count(ip
+ matchLength
+ 1, match
+ matchLength
+ 1, iend
) + 1;
1597 match
= dictBase
+ matchIndex
;
1598 matchLength
+= ZSTD_count_2segments(ip
+ matchLength
, match
+ matchLength
, iend
, dictEnd
, prefixStart
);
1599 if (matchIndex
+ matchLength
>= dictLimit
)
1600 match
= base
+ matchIndex
; /* to prepare for next usage of match[matchLength] */
1603 if (matchLength
> bestLength
) {
1604 bestLength
= matchLength
;
1605 if (matchLength
> matchEndIdx
- matchIndex
)
1606 matchEndIdx
= matchIndex
+ (U32
)matchLength
;
1609 if (ip
+ matchLength
== iend
) /* equal : no way to know if inf or sup */
1610 break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */
1612 if (match
[matchLength
] < ip
[matchLength
]) { /* necessarily within correct buffer */
1613 /* match is smaller than curr */
1614 *smallerPtr
= matchIndex
; /* update smaller idx */
1615 commonLengthSmaller
= matchLength
; /* all smaller will now have at least this guaranteed common length */
1616 if (matchIndex
<= btLow
) {
1617 smallerPtr
= &dummy32
;
1619 } /* beyond tree size, stop the search */
1620 smallerPtr
= nextPtr
+ 1; /* new "smaller" => larger of match */
1621 matchIndex
= nextPtr
[1]; /* new matchIndex larger than previous (closer to curr) */
1623 /* match is larger than curr */
1624 *largerPtr
= matchIndex
;
1625 commonLengthLarger
= matchLength
;
1626 if (matchIndex
<= btLow
) {
1627 largerPtr
= &dummy32
;
1629 } /* beyond tree size, stop the search */
1630 largerPtr
= nextPtr
;
1631 matchIndex
= nextPtr
[0];
1635 *smallerPtr
= *largerPtr
= 0;
1636 if (bestLength
> 384)
1637 return MIN(192, (U32
)(bestLength
- 384)); /* speed optimization */
1638 if (matchEndIdx
> curr
+ 8)
1639 return matchEndIdx
- curr
- 8;
1643 static size_t ZSTD_insertBtAndFindBestMatch(ZSTD_CCtx
*zc
, const BYTE
*const ip
, const BYTE
*const iend
, size_t *offsetPtr
, U32 nbCompares
, const U32 mls
,
1646 U32
*const hashTable
= zc
->hashTable
;
1647 U32
const hashLog
= zc
->params
.cParams
.hashLog
;
1648 size_t const h
= ZSTD_hashPtr(ip
, hashLog
, mls
);
1649 U32
*const bt
= zc
->chainTable
;
1650 U32
const btLog
= zc
->params
.cParams
.chainLog
- 1;
1651 U32
const btMask
= (1 << btLog
) - 1;
1652 U32 matchIndex
= hashTable
[h
];
1653 size_t commonLengthSmaller
= 0, commonLengthLarger
= 0;
1654 const BYTE
*const base
= zc
->base
;
1655 const BYTE
*const dictBase
= zc
->dictBase
;
1656 const U32 dictLimit
= zc
->dictLimit
;
1657 const BYTE
*const dictEnd
= dictBase
+ dictLimit
;
1658 const BYTE
*const prefixStart
= base
+ dictLimit
;
1659 const U32 curr
= (U32
)(ip
- base
);
1660 const U32 btLow
= btMask
>= curr
? 0 : curr
- btMask
;
1661 const U32 windowLow
= zc
->lowLimit
;
1662 U32
*smallerPtr
= bt
+ 2 * (curr
& btMask
);
1663 U32
*largerPtr
= bt
+ 2 * (curr
& btMask
) + 1;
1664 U32 matchEndIdx
= curr
+ 8;
1665 U32 dummy32
; /* to be nullified at the end */
1666 size_t bestLength
= 0;
1668 hashTable
[h
] = curr
; /* Update Hash Table */
1670 while (nbCompares
-- && (matchIndex
> windowLow
)) {
1671 U32
*const nextPtr
= bt
+ 2 * (matchIndex
& btMask
);
1672 size_t matchLength
= MIN(commonLengthSmaller
, commonLengthLarger
); /* guaranteed minimum nb of common bytes */
1675 if ((!extDict
) || (matchIndex
+ matchLength
>= dictLimit
)) {
1676 match
= base
+ matchIndex
;
1677 if (match
[matchLength
] == ip
[matchLength
])
1678 matchLength
+= ZSTD_count(ip
+ matchLength
+ 1, match
+ matchLength
+ 1, iend
) + 1;
1680 match
= dictBase
+ matchIndex
;
1681 matchLength
+= ZSTD_count_2segments(ip
+ matchLength
, match
+ matchLength
, iend
, dictEnd
, prefixStart
);
1682 if (matchIndex
+ matchLength
>= dictLimit
)
1683 match
= base
+ matchIndex
; /* to prepare for next usage of match[matchLength] */
1686 if (matchLength
> bestLength
) {
1687 if (matchLength
> matchEndIdx
- matchIndex
)
1688 matchEndIdx
= matchIndex
+ (U32
)matchLength
;
1689 if ((4 * (int)(matchLength
- bestLength
)) > (int)(ZSTD_highbit32(curr
- matchIndex
+ 1) - ZSTD_highbit32((U32
)offsetPtr
[0] + 1)))
1690 bestLength
= matchLength
, *offsetPtr
= ZSTD_REP_MOVE
+ curr
- matchIndex
;
1691 if (ip
+ matchLength
== iend
) /* equal : no way to know if inf or sup */
1692 break; /* drop, to guarantee consistency (miss a little bit of compression) */
1695 if (match
[matchLength
] < ip
[matchLength
]) {
1696 /* match is smaller than curr */
1697 *smallerPtr
= matchIndex
; /* update smaller idx */
1698 commonLengthSmaller
= matchLength
; /* all smaller will now have at least this guaranteed common length */
1699 if (matchIndex
<= btLow
) {
1700 smallerPtr
= &dummy32
;
1702 } /* beyond tree size, stop the search */
1703 smallerPtr
= nextPtr
+ 1; /* new "smaller" => larger of match */
1704 matchIndex
= nextPtr
[1]; /* new matchIndex larger than previous (closer to curr) */
1706 /* match is larger than curr */
1707 *largerPtr
= matchIndex
;
1708 commonLengthLarger
= matchLength
;
1709 if (matchIndex
<= btLow
) {
1710 largerPtr
= &dummy32
;
1712 } /* beyond tree size, stop the search */
1713 largerPtr
= nextPtr
;
1714 matchIndex
= nextPtr
[0];
1718 *smallerPtr
= *largerPtr
= 0;
1720 zc
->nextToUpdate
= (matchEndIdx
> curr
+ 8) ? matchEndIdx
- 8 : curr
+ 1;
1724 static void ZSTD_updateTree(ZSTD_CCtx
*zc
, const BYTE
*const ip
, const BYTE
*const iend
, const U32 nbCompares
, const U32 mls
)
1726 const BYTE
*const base
= zc
->base
;
1727 const U32 target
= (U32
)(ip
- base
);
1728 U32 idx
= zc
->nextToUpdate
;
1730 while (idx
< target
)
1731 idx
+= ZSTD_insertBt1(zc
, base
+ idx
, mls
, iend
, nbCompares
, 0);
1734 /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
1735 static size_t ZSTD_BtFindBestMatch(ZSTD_CCtx
*zc
, const BYTE
*const ip
, const BYTE
*const iLimit
, size_t *offsetPtr
, const U32 maxNbAttempts
, const U32 mls
)
1737 if (ip
< zc
->base
+ zc
->nextToUpdate
)
1738 return 0; /* skipped area */
1739 ZSTD_updateTree(zc
, ip
, iLimit
, maxNbAttempts
, mls
);
1740 return ZSTD_insertBtAndFindBestMatch(zc
, ip
, iLimit
, offsetPtr
, maxNbAttempts
, mls
, 0);
1743 static size_t ZSTD_BtFindBestMatch_selectMLS(ZSTD_CCtx
*zc
, /* Index table will be updated */
1744 const BYTE
*ip
, const BYTE
*const iLimit
, size_t *offsetPtr
, const U32 maxNbAttempts
, const U32 matchLengthSearch
)
1746 switch (matchLengthSearch
) {
1747 default: /* includes case 3 */
1748 case 4: return ZSTD_BtFindBestMatch(zc
, ip
, iLimit
, offsetPtr
, maxNbAttempts
, 4);
1749 case 5: return ZSTD_BtFindBestMatch(zc
, ip
, iLimit
, offsetPtr
, maxNbAttempts
, 5);
1751 case 6: return ZSTD_BtFindBestMatch(zc
, ip
, iLimit
, offsetPtr
, maxNbAttempts
, 6);
1755 static void ZSTD_updateTree_extDict(ZSTD_CCtx
*zc
, const BYTE
*const ip
, const BYTE
*const iend
, const U32 nbCompares
, const U32 mls
)
1757 const BYTE
*const base
= zc
->base
;
1758 const U32 target
= (U32
)(ip
- base
);
1759 U32 idx
= zc
->nextToUpdate
;
1761 while (idx
< target
)
1762 idx
+= ZSTD_insertBt1(zc
, base
+ idx
, mls
, iend
, nbCompares
, 1);
1765 /** Tree updater, providing best match */
1766 static size_t ZSTD_BtFindBestMatch_extDict(ZSTD_CCtx
*zc
, const BYTE
*const ip
, const BYTE
*const iLimit
, size_t *offsetPtr
, const U32 maxNbAttempts
,
1769 if (ip
< zc
->base
+ zc
->nextToUpdate
)
1770 return 0; /* skipped area */
1771 ZSTD_updateTree_extDict(zc
, ip
, iLimit
, maxNbAttempts
, mls
);
1772 return ZSTD_insertBtAndFindBestMatch(zc
, ip
, iLimit
, offsetPtr
, maxNbAttempts
, mls
, 1);
1775 static size_t ZSTD_BtFindBestMatch_selectMLS_extDict(ZSTD_CCtx
*zc
, /* Index table will be updated */
1776 const BYTE
*ip
, const BYTE
*const iLimit
, size_t *offsetPtr
, const U32 maxNbAttempts
,
1777 const U32 matchLengthSearch
)
1779 switch (matchLengthSearch
) {
1780 default: /* includes case 3 */
1781 case 4: return ZSTD_BtFindBestMatch_extDict(zc
, ip
, iLimit
, offsetPtr
, maxNbAttempts
, 4);
1782 case 5: return ZSTD_BtFindBestMatch_extDict(zc
, ip
, iLimit
, offsetPtr
, maxNbAttempts
, 5);
1784 case 6: return ZSTD_BtFindBestMatch_extDict(zc
, ip
, iLimit
, offsetPtr
, maxNbAttempts
, 6);
1788 /* *********************************
1790 ***********************************/
1791 #define NEXT_IN_CHAIN(d, mask) chainTable[(d)&mask]
1793 /* Update chains up to ip (excluded)
1794 Assumption : always within prefix (i.e. not within extDict) */
1796 U32
ZSTD_insertAndFindFirstIndex(ZSTD_CCtx
*zc
, const BYTE
*ip
, U32 mls
)
1798 U32
*const hashTable
= zc
->hashTable
;
1799 const U32 hashLog
= zc
->params
.cParams
.hashLog
;
1800 U32
*const chainTable
= zc
->chainTable
;
1801 const U32 chainMask
= (1 << zc
->params
.cParams
.chainLog
) - 1;
1802 const BYTE
*const base
= zc
->base
;
1803 const U32 target
= (U32
)(ip
- base
);
1804 U32 idx
= zc
->nextToUpdate
;
1806 while (idx
< target
) { /* catch up */
1807 size_t const h
= ZSTD_hashPtr(base
+ idx
, hashLog
, mls
);
1808 NEXT_IN_CHAIN(idx
, chainMask
) = hashTable
[h
];
1813 zc
->nextToUpdate
= target
;
1814 return hashTable
[ZSTD_hashPtr(ip
, hashLog
, mls
)];
1817 /* inlining is important to hardwire a hot branch (template emulation) */
1819 size_t ZSTD_HcFindBestMatch_generic(ZSTD_CCtx
*zc
, /* Index table will be updated */
1820 const BYTE
*const ip
, const BYTE
*const iLimit
, size_t *offsetPtr
, const U32 maxNbAttempts
, const U32 mls
,
1823 U32
*const chainTable
= zc
->chainTable
;
1824 const U32 chainSize
= (1 << zc
->params
.cParams
.chainLog
);
1825 const U32 chainMask
= chainSize
- 1;
1826 const BYTE
*const base
= zc
->base
;
1827 const BYTE
*const dictBase
= zc
->dictBase
;
1828 const U32 dictLimit
= zc
->dictLimit
;
1829 const BYTE
*const prefixStart
= base
+ dictLimit
;
1830 const BYTE
*const dictEnd
= dictBase
+ dictLimit
;
1831 const U32 lowLimit
= zc
->lowLimit
;
1832 const U32 curr
= (U32
)(ip
- base
);
1833 const U32 minChain
= curr
> chainSize
? curr
- chainSize
: 0;
1834 int nbAttempts
= maxNbAttempts
;
1835 size_t ml
= EQUAL_READ32
- 1;
1837 /* HC4 match finder */
1838 U32 matchIndex
= ZSTD_insertAndFindFirstIndex(zc
, ip
, mls
);
1840 for (; (matchIndex
> lowLimit
) & (nbAttempts
> 0); nbAttempts
--) {
1843 if ((!extDict
) || matchIndex
>= dictLimit
) {
1844 match
= base
+ matchIndex
;
1845 if (match
[ml
] == ip
[ml
]) /* potentially better */
1846 currMl
= ZSTD_count(ip
, match
, iLimit
);
1848 match
= dictBase
+ matchIndex
;
1849 if (ZSTD_read32(match
) == ZSTD_read32(ip
)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
1850 currMl
= ZSTD_count_2segments(ip
+ EQUAL_READ32
, match
+ EQUAL_READ32
, iLimit
, dictEnd
, prefixStart
) + EQUAL_READ32
;
1853 /* save best solution */
1856 *offsetPtr
= curr
- matchIndex
+ ZSTD_REP_MOVE
;
1857 if (ip
+ currMl
== iLimit
)
1858 break; /* best possible, and avoid read overflow*/
1861 if (matchIndex
<= minChain
)
1863 matchIndex
= NEXT_IN_CHAIN(matchIndex
, chainMask
);
1869 FORCE_INLINE
size_t ZSTD_HcFindBestMatch_selectMLS(ZSTD_CCtx
*zc
, const BYTE
*ip
, const BYTE
*const iLimit
, size_t *offsetPtr
, const U32 maxNbAttempts
,
1870 const U32 matchLengthSearch
)
1872 switch (matchLengthSearch
) {
1873 default: /* includes case 3 */
1874 case 4: return ZSTD_HcFindBestMatch_generic(zc
, ip
, iLimit
, offsetPtr
, maxNbAttempts
, 4, 0);
1875 case 5: return ZSTD_HcFindBestMatch_generic(zc
, ip
, iLimit
, offsetPtr
, maxNbAttempts
, 5, 0);
1877 case 6: return ZSTD_HcFindBestMatch_generic(zc
, ip
, iLimit
, offsetPtr
, maxNbAttempts
, 6, 0);
1881 FORCE_INLINE
size_t ZSTD_HcFindBestMatch_extDict_selectMLS(ZSTD_CCtx
*zc
, const BYTE
*ip
, const BYTE
*const iLimit
, size_t *offsetPtr
, const U32 maxNbAttempts
,
1882 const U32 matchLengthSearch
)
1884 switch (matchLengthSearch
) {
1885 default: /* includes case 3 */
1886 case 4: return ZSTD_HcFindBestMatch_generic(zc
, ip
, iLimit
, offsetPtr
, maxNbAttempts
, 4, 1);
1887 case 5: return ZSTD_HcFindBestMatch_generic(zc
, ip
, iLimit
, offsetPtr
, maxNbAttempts
, 5, 1);
1889 case 6: return ZSTD_HcFindBestMatch_generic(zc
, ip
, iLimit
, offsetPtr
, maxNbAttempts
, 6, 1);
1893 /* *******************************
1894 * Common parser - lazy strategy
1895 *********************************/
1897 void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
, const U32 searchMethod
, const U32 depth
)
1899 seqStore_t
*seqStorePtr
= &(ctx
->seqStore
);
1900 const BYTE
*const istart
= (const BYTE
*)src
;
1901 const BYTE
*ip
= istart
;
1902 const BYTE
*anchor
= istart
;
1903 const BYTE
*const iend
= istart
+ srcSize
;
1904 const BYTE
*const ilimit
= iend
- 8;
1905 const BYTE
*const base
= ctx
->base
+ ctx
->dictLimit
;
1907 U32
const maxSearches
= 1 << ctx
->params
.cParams
.searchLog
;
1908 U32
const mls
= ctx
->params
.cParams
.searchLength
;
1910 typedef size_t (*searchMax_f
)(ZSTD_CCtx
* zc
, const BYTE
*ip
, const BYTE
*iLimit
, size_t *offsetPtr
, U32 maxNbAttempts
, U32 matchLengthSearch
);
1911 searchMax_f
const searchMax
= searchMethod
? ZSTD_BtFindBestMatch_selectMLS
: ZSTD_HcFindBestMatch_selectMLS
;
1912 U32 offset_1
= ctx
->rep
[0], offset_2
= ctx
->rep
[1], savedOffset
= 0;
1916 ctx
->nextToUpdate3
= ctx
->nextToUpdate
;
1918 U32
const maxRep
= (U32
)(ip
- base
);
1919 if (offset_2
> maxRep
)
1920 savedOffset
= offset_2
, offset_2
= 0;
1921 if (offset_1
> maxRep
)
1922 savedOffset
= offset_1
, offset_1
= 0;
1926 while (ip
< ilimit
) {
1927 size_t matchLength
= 0;
1929 const BYTE
*start
= ip
+ 1;
1932 if ((offset_1
> 0) & (ZSTD_read32(ip
+ 1) == ZSTD_read32(ip
+ 1 - offset_1
))) {
1933 /* repcode : we take it */
1934 matchLength
= ZSTD_count(ip
+ 1 + EQUAL_READ32
, ip
+ 1 + EQUAL_READ32
- offset_1
, iend
) + EQUAL_READ32
;
1936 goto _storeSequence
;
1939 /* first search (depth 0) */
1941 size_t offsetFound
= 99999999;
1942 size_t const ml2
= searchMax(ctx
, ip
, iend
, &offsetFound
, maxSearches
, mls
);
1943 if (ml2
> matchLength
)
1944 matchLength
= ml2
, start
= ip
, offset
= offsetFound
;
1947 if (matchLength
< EQUAL_READ32
) {
1948 ip
+= ((ip
- anchor
) >> g_searchStrength
) + 1; /* jump faster over incompressible sections */
1952 /* let's try to find a better solution */
1954 while (ip
< ilimit
) {
1956 if ((offset
) && ((offset_1
> 0) & (ZSTD_read32(ip
) == ZSTD_read32(ip
- offset_1
)))) {
1957 size_t const mlRep
= ZSTD_count(ip
+ EQUAL_READ32
, ip
+ EQUAL_READ32
- offset_1
, iend
) + EQUAL_READ32
;
1958 int const gain2
= (int)(mlRep
* 3);
1959 int const gain1
= (int)(matchLength
* 3 - ZSTD_highbit32((U32
)offset
+ 1) + 1);
1960 if ((mlRep
>= EQUAL_READ32
) && (gain2
> gain1
))
1961 matchLength
= mlRep
, offset
= 0, start
= ip
;
1964 size_t offset2
= 99999999;
1965 size_t const ml2
= searchMax(ctx
, ip
, iend
, &offset2
, maxSearches
, mls
);
1966 int const gain2
= (int)(ml2
* 4 - ZSTD_highbit32((U32
)offset2
+ 1)); /* raw approx */
1967 int const gain1
= (int)(matchLength
* 4 - ZSTD_highbit32((U32
)offset
+ 1) + 4);
1968 if ((ml2
>= EQUAL_READ32
) && (gain2
> gain1
)) {
1969 matchLength
= ml2
, offset
= offset2
, start
= ip
;
1970 continue; /* search a better one */
1974 /* let's find an even better one */
1975 if ((depth
== 2) && (ip
< ilimit
)) {
1977 if ((offset
) && ((offset_1
> 0) & (ZSTD_read32(ip
) == ZSTD_read32(ip
- offset_1
)))) {
1978 size_t const ml2
= ZSTD_count(ip
+ EQUAL_READ32
, ip
+ EQUAL_READ32
- offset_1
, iend
) + EQUAL_READ32
;
1979 int const gain2
= (int)(ml2
* 4);
1980 int const gain1
= (int)(matchLength
* 4 - ZSTD_highbit32((U32
)offset
+ 1) + 1);
1981 if ((ml2
>= EQUAL_READ32
) && (gain2
> gain1
))
1982 matchLength
= ml2
, offset
= 0, start
= ip
;
1985 size_t offset2
= 99999999;
1986 size_t const ml2
= searchMax(ctx
, ip
, iend
, &offset2
, maxSearches
, mls
);
1987 int const gain2
= (int)(ml2
* 4 - ZSTD_highbit32((U32
)offset2
+ 1)); /* raw approx */
1988 int const gain1
= (int)(matchLength
* 4 - ZSTD_highbit32((U32
)offset
+ 1) + 7);
1989 if ((ml2
>= EQUAL_READ32
) && (gain2
> gain1
)) {
1990 matchLength
= ml2
, offset
= offset2
, start
= ip
;
1995 break; /* nothing found : store previous solution */
1999 * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
2000 * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
2001 * overflows the pointer, which is undefined behavior.
2005 while ((start
> anchor
) && (start
> base
+ offset
- ZSTD_REP_MOVE
) &&
2006 (start
[-1] == (start
-offset
+ZSTD_REP_MOVE
)[-1])) /* only search for offset within prefix */
2011 offset_2
= offset_1
;
2012 offset_1
= (U32
)(offset
- ZSTD_REP_MOVE
);
2015 /* store sequence */
2018 size_t const litLength
= start
- anchor
;
2019 ZSTD_storeSeq(seqStorePtr
, litLength
, anchor
, (U32
)offset
, matchLength
- MINMATCH
);
2020 anchor
= ip
= start
+ matchLength
;
2023 /* check immediate repcode */
2024 while ((ip
<= ilimit
) && ((offset_2
> 0) & (ZSTD_read32(ip
) == ZSTD_read32(ip
- offset_2
)))) {
2025 /* store sequence */
2026 matchLength
= ZSTD_count(ip
+ EQUAL_READ32
, ip
+ EQUAL_READ32
- offset_2
, iend
) + EQUAL_READ32
;
2028 offset_2
= offset_1
;
2029 offset_1
= (U32
)offset
; /* swap repcodes */
2030 ZSTD_storeSeq(seqStorePtr
, 0, anchor
, 0, matchLength
- MINMATCH
);
2033 continue; /* faster when present ... (?) */
2037 /* Save reps for next block */
2038 ctx
->repToConfirm
[0] = offset_1
? offset_1
: savedOffset
;
2039 ctx
->repToConfirm
[1] = offset_2
? offset_2
: savedOffset
;
2043 size_t const lastLLSize
= iend
- anchor
;
2044 memcpy(seqStorePtr
->lit
, anchor
, lastLLSize
);
2045 seqStorePtr
->lit
+= lastLLSize
;
2049 static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
) { ZSTD_compressBlock_lazy_generic(ctx
, src
, srcSize
, 1, 2); }
2051 static void ZSTD_compressBlock_lazy2(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
) { ZSTD_compressBlock_lazy_generic(ctx
, src
, srcSize
, 0, 2); }
2053 static void ZSTD_compressBlock_lazy(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
) { ZSTD_compressBlock_lazy_generic(ctx
, src
, srcSize
, 0, 1); }
2055 static void ZSTD_compressBlock_greedy(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
) { ZSTD_compressBlock_lazy_generic(ctx
, src
, srcSize
, 0, 0); }
2058 void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
, const U32 searchMethod
, const U32 depth
)
2060 seqStore_t
*seqStorePtr
= &(ctx
->seqStore
);
2061 const BYTE
*const istart
= (const BYTE
*)src
;
2062 const BYTE
*ip
= istart
;
2063 const BYTE
*anchor
= istart
;
2064 const BYTE
*const iend
= istart
+ srcSize
;
2065 const BYTE
*const ilimit
= iend
- 8;
2066 const BYTE
*const base
= ctx
->base
;
2067 const U32 dictLimit
= ctx
->dictLimit
;
2068 const U32 lowestIndex
= ctx
->lowLimit
;
2069 const BYTE
*const prefixStart
= base
+ dictLimit
;
2070 const BYTE
*const dictBase
= ctx
->dictBase
;
2071 const BYTE
*const dictEnd
= dictBase
+ dictLimit
;
2072 const BYTE
*const dictStart
= dictBase
+ ctx
->lowLimit
;
2074 const U32 maxSearches
= 1 << ctx
->params
.cParams
.searchLog
;
2075 const U32 mls
= ctx
->params
.cParams
.searchLength
;
2077 typedef size_t (*searchMax_f
)(ZSTD_CCtx
* zc
, const BYTE
*ip
, const BYTE
*iLimit
, size_t *offsetPtr
, U32 maxNbAttempts
, U32 matchLengthSearch
);
2078 searchMax_f searchMax
= searchMethod
? ZSTD_BtFindBestMatch_selectMLS_extDict
: ZSTD_HcFindBestMatch_extDict_selectMLS
;
2080 U32 offset_1
= ctx
->rep
[0], offset_2
= ctx
->rep
[1];
2083 ctx
->nextToUpdate3
= ctx
->nextToUpdate
;
2084 ip
+= (ip
== prefixStart
);
2087 while (ip
< ilimit
) {
2088 size_t matchLength
= 0;
2090 const BYTE
*start
= ip
+ 1;
2091 U32 curr
= (U32
)(ip
- base
);
2095 const U32 repIndex
= (U32
)(curr
+ 1 - offset_1
);
2096 const BYTE
*const repBase
= repIndex
< dictLimit
? dictBase
: base
;
2097 const BYTE
*const repMatch
= repBase
+ repIndex
;
2098 if (((U32
)((dictLimit
- 1) - repIndex
) >= 3) & (repIndex
> lowestIndex
)) /* intentional overflow */
2099 if (ZSTD_read32(ip
+ 1) == ZSTD_read32(repMatch
)) {
2100 /* repcode detected we should take it */
2101 const BYTE
*const repEnd
= repIndex
< dictLimit
? dictEnd
: iend
;
2103 ZSTD_count_2segments(ip
+ 1 + EQUAL_READ32
, repMatch
+ EQUAL_READ32
, iend
, repEnd
, prefixStart
) + EQUAL_READ32
;
2105 goto _storeSequence
;
2109 /* first search (depth 0) */
2111 size_t offsetFound
= 99999999;
2112 size_t const ml2
= searchMax(ctx
, ip
, iend
, &offsetFound
, maxSearches
, mls
);
2113 if (ml2
> matchLength
)
2114 matchLength
= ml2
, start
= ip
, offset
= offsetFound
;
2117 if (matchLength
< EQUAL_READ32
) {
2118 ip
+= ((ip
- anchor
) >> g_searchStrength
) + 1; /* jump faster over incompressible sections */
2122 /* let's try to find a better solution */
2124 while (ip
< ilimit
) {
2129 const U32 repIndex
= (U32
)(curr
- offset_1
);
2130 const BYTE
*const repBase
= repIndex
< dictLimit
? dictBase
: base
;
2131 const BYTE
*const repMatch
= repBase
+ repIndex
;
2132 if (((U32
)((dictLimit
- 1) - repIndex
) >= 3) & (repIndex
> lowestIndex
)) /* intentional overflow */
2133 if (ZSTD_read32(ip
) == ZSTD_read32(repMatch
)) {
2134 /* repcode detected */
2135 const BYTE
*const repEnd
= repIndex
< dictLimit
? dictEnd
: iend
;
2136 size_t const repLength
=
2137 ZSTD_count_2segments(ip
+ EQUAL_READ32
, repMatch
+ EQUAL_READ32
, iend
, repEnd
, prefixStart
) +
2139 int const gain2
= (int)(repLength
* 3);
2140 int const gain1
= (int)(matchLength
* 3 - ZSTD_highbit32((U32
)offset
+ 1) + 1);
2141 if ((repLength
>= EQUAL_READ32
) && (gain2
> gain1
))
2142 matchLength
= repLength
, offset
= 0, start
= ip
;
2146 /* search match, depth 1 */
2148 size_t offset2
= 99999999;
2149 size_t const ml2
= searchMax(ctx
, ip
, iend
, &offset2
, maxSearches
, mls
);
2150 int const gain2
= (int)(ml2
* 4 - ZSTD_highbit32((U32
)offset2
+ 1)); /* raw approx */
2151 int const gain1
= (int)(matchLength
* 4 - ZSTD_highbit32((U32
)offset
+ 1) + 4);
2152 if ((ml2
>= EQUAL_READ32
) && (gain2
> gain1
)) {
2153 matchLength
= ml2
, offset
= offset2
, start
= ip
;
2154 continue; /* search a better one */
2158 /* let's find an even better one */
2159 if ((depth
== 2) && (ip
< ilimit
)) {
2164 const U32 repIndex
= (U32
)(curr
- offset_1
);
2165 const BYTE
*const repBase
= repIndex
< dictLimit
? dictBase
: base
;
2166 const BYTE
*const repMatch
= repBase
+ repIndex
;
2167 if (((U32
)((dictLimit
- 1) - repIndex
) >= 3) & (repIndex
> lowestIndex
)) /* intentional overflow */
2168 if (ZSTD_read32(ip
) == ZSTD_read32(repMatch
)) {
2169 /* repcode detected */
2170 const BYTE
*const repEnd
= repIndex
< dictLimit
? dictEnd
: iend
;
2171 size_t repLength
= ZSTD_count_2segments(ip
+ EQUAL_READ32
, repMatch
+ EQUAL_READ32
, iend
,
2172 repEnd
, prefixStart
) +
2174 int gain2
= (int)(repLength
* 4);
2175 int gain1
= (int)(matchLength
* 4 - ZSTD_highbit32((U32
)offset
+ 1) + 1);
2176 if ((repLength
>= EQUAL_READ32
) && (gain2
> gain1
))
2177 matchLength
= repLength
, offset
= 0, start
= ip
;
2181 /* search match, depth 2 */
2183 size_t offset2
= 99999999;
2184 size_t const ml2
= searchMax(ctx
, ip
, iend
, &offset2
, maxSearches
, mls
);
2185 int const gain2
= (int)(ml2
* 4 - ZSTD_highbit32((U32
)offset2
+ 1)); /* raw approx */
2186 int const gain1
= (int)(matchLength
* 4 - ZSTD_highbit32((U32
)offset
+ 1) + 7);
2187 if ((ml2
>= EQUAL_READ32
) && (gain2
> gain1
)) {
2188 matchLength
= ml2
, offset
= offset2
, start
= ip
;
2193 break; /* nothing found : store previous solution */
2198 U32
const matchIndex
= (U32
)((start
- base
) - (offset
- ZSTD_REP_MOVE
));
2199 const BYTE
*match
= (matchIndex
< dictLimit
) ? dictBase
+ matchIndex
: base
+ matchIndex
;
2200 const BYTE
*const mStart
= (matchIndex
< dictLimit
) ? dictStart
: prefixStart
;
2201 while ((start
> anchor
) && (match
> mStart
) && (start
[-1] == match
[-1])) {
2206 offset_2
= offset_1
;
2207 offset_1
= (U32
)(offset
- ZSTD_REP_MOVE
);
2210 /* store sequence */
2212 size_t const litLength
= start
- anchor
;
2213 ZSTD_storeSeq(seqStorePtr
, litLength
, anchor
, (U32
)offset
, matchLength
- MINMATCH
);
2214 anchor
= ip
= start
+ matchLength
;
2217 /* check immediate repcode */
2218 while (ip
<= ilimit
) {
2219 const U32 repIndex
= (U32
)((ip
- base
) - offset_2
);
2220 const BYTE
*const repBase
= repIndex
< dictLimit
? dictBase
: base
;
2221 const BYTE
*const repMatch
= repBase
+ repIndex
;
2222 if (((U32
)((dictLimit
- 1) - repIndex
) >= 3) & (repIndex
> lowestIndex
)) /* intentional overflow */
2223 if (ZSTD_read32(ip
) == ZSTD_read32(repMatch
)) {
2224 /* repcode detected we should take it */
2225 const BYTE
*const repEnd
= repIndex
< dictLimit
? dictEnd
: iend
;
2227 ZSTD_count_2segments(ip
+ EQUAL_READ32
, repMatch
+ EQUAL_READ32
, iend
, repEnd
, prefixStart
) + EQUAL_READ32
;
2229 offset_2
= offset_1
;
2230 offset_1
= (U32
)offset
; /* swap offset history */
2231 ZSTD_storeSeq(seqStorePtr
, 0, anchor
, 0, matchLength
- MINMATCH
);
2234 continue; /* faster when present ... (?) */
2240 /* Save reps for next block */
2241 ctx
->repToConfirm
[0] = offset_1
;
2242 ctx
->repToConfirm
[1] = offset_2
;
2246 size_t const lastLLSize
= iend
- anchor
;
2247 memcpy(seqStorePtr
->lit
, anchor
, lastLLSize
);
2248 seqStorePtr
->lit
+= lastLLSize
;
2252 void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
) { ZSTD_compressBlock_lazy_extDict_generic(ctx
, src
, srcSize
, 0, 0); }
2254 static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
)
2256 ZSTD_compressBlock_lazy_extDict_generic(ctx
, src
, srcSize
, 0, 1);
2259 static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
)
2261 ZSTD_compressBlock_lazy_extDict_generic(ctx
, src
, srcSize
, 0, 2);
2264 static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
)
2266 ZSTD_compressBlock_lazy_extDict_generic(ctx
, src
, srcSize
, 1, 2);
2269 /* The optimal parser */
2270 #include "zstd_opt.h"
2272 static void ZSTD_compressBlock_btopt(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
)
2274 #ifdef ZSTD_OPT_H_91842398743
2275 ZSTD_compressBlock_opt_generic(ctx
, src
, srcSize
, 0);
2284 static void ZSTD_compressBlock_btopt2(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
)
2286 #ifdef ZSTD_OPT_H_91842398743
2287 ZSTD_compressBlock_opt_generic(ctx
, src
, srcSize
, 1);
2296 static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
)
2298 #ifdef ZSTD_OPT_H_91842398743
2299 ZSTD_compressBlock_opt_extDict_generic(ctx
, src
, srcSize
, 0);
2308 static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
)
2310 #ifdef ZSTD_OPT_H_91842398743
2311 ZSTD_compressBlock_opt_extDict_generic(ctx
, src
, srcSize
, 1);
2320 typedef void (*ZSTD_blockCompressor
)(ZSTD_CCtx
*ctx
, const void *src
, size_t srcSize
);
2322 static ZSTD_blockCompressor
ZSTD_selectBlockCompressor(ZSTD_strategy strat
, int extDict
)
2324 static const ZSTD_blockCompressor blockCompressor
[2][8] = {
2325 {ZSTD_compressBlock_fast
, ZSTD_compressBlock_doubleFast
, ZSTD_compressBlock_greedy
, ZSTD_compressBlock_lazy
, ZSTD_compressBlock_lazy2
,
2326 ZSTD_compressBlock_btlazy2
, ZSTD_compressBlock_btopt
, ZSTD_compressBlock_btopt2
},
2327 {ZSTD_compressBlock_fast_extDict
, ZSTD_compressBlock_doubleFast_extDict
, ZSTD_compressBlock_greedy_extDict
, ZSTD_compressBlock_lazy_extDict
,
2328 ZSTD_compressBlock_lazy2_extDict
, ZSTD_compressBlock_btlazy2_extDict
, ZSTD_compressBlock_btopt_extDict
, ZSTD_compressBlock_btopt2_extDict
}};
2330 return blockCompressor
[extDict
][(U32
)strat
];
2333 static size_t ZSTD_compressBlock_internal(ZSTD_CCtx
*zc
, void *dst
, size_t dstCapacity
, const void *src
, size_t srcSize
)
2335 ZSTD_blockCompressor
const blockCompressor
= ZSTD_selectBlockCompressor(zc
->params
.cParams
.strategy
, zc
->lowLimit
< zc
->dictLimit
);
2336 const BYTE
*const base
= zc
->base
;
2337 const BYTE
*const istart
= (const BYTE
*)src
;
2338 const U32 curr
= (U32
)(istart
- base
);
2339 if (srcSize
< MIN_CBLOCK_SIZE
+ ZSTD_blockHeaderSize
+ 1)
2340 return 0; /* don't even attempt compression below a certain srcSize */
2341 ZSTD_resetSeqStore(&(zc
->seqStore
));
2342 if (curr
> zc
->nextToUpdate
+ 384)
2343 zc
->nextToUpdate
= curr
- MIN(192, (U32
)(curr
- zc
->nextToUpdate
- 384)); /* update tree not updated after finding very long rep matches */
2344 blockCompressor(zc
, src
, srcSize
);
2345 return ZSTD_compressSequences(zc
, dst
, dstCapacity
, srcSize
);
2348 /*! ZSTD_compress_generic() :
2349 * Compress a chunk of data into one or multiple blocks.
2350 * All blocks will be terminated, all input will be consumed.
2351 * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
2352 * Frame is supposed already started (header already produced)
2353 * @return : compressed size, or an error code
2355 static size_t ZSTD_compress_generic(ZSTD_CCtx
*cctx
, void *dst
, size_t dstCapacity
, const void *src
, size_t srcSize
, U32 lastFrameChunk
)
2357 size_t blockSize
= cctx
->blockSize
;
2358 size_t remaining
= srcSize
;
2359 const BYTE
*ip
= (const BYTE
*)src
;
2360 BYTE
*const ostart
= (BYTE
*)dst
;
2362 U32
const maxDist
= 1 << cctx
->params
.cParams
.windowLog
;
2364 if (cctx
->params
.fParams
.checksumFlag
&& srcSize
)
2365 xxh64_update(&cctx
->xxhState
, src
, srcSize
);
2368 U32
const lastBlock
= lastFrameChunk
& (blockSize
>= remaining
);
2371 if (dstCapacity
< ZSTD_blockHeaderSize
+ MIN_CBLOCK_SIZE
)
2372 return ERROR(dstSize_tooSmall
); /* not enough space to store compressed block */
2373 if (remaining
< blockSize
)
2374 blockSize
= remaining
;
2376 /* preemptive overflow correction */
2377 if (cctx
->lowLimit
> (3U << 29)) {
2378 U32
const cycleMask
= (1 << ZSTD_cycleLog(cctx
->params
.cParams
.hashLog
, cctx
->params
.cParams
.strategy
)) - 1;
2379 U32
const curr
= (U32
)(ip
- cctx
->base
);
2380 U32
const newCurr
= (curr
& cycleMask
) + (1 << cctx
->params
.cParams
.windowLog
);
2381 U32
const correction
= curr
- newCurr
;
2382 ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64
<= 30);
2383 ZSTD_reduceIndex(cctx
, correction
);
2384 cctx
->base
+= correction
;
2385 cctx
->dictBase
+= correction
;
2386 cctx
->lowLimit
-= correction
;
2387 cctx
->dictLimit
-= correction
;
2388 if (cctx
->nextToUpdate
< correction
)
2389 cctx
->nextToUpdate
= 0;
2391 cctx
->nextToUpdate
-= correction
;
2394 if ((U32
)(ip
+ blockSize
- cctx
->base
) > cctx
->loadedDictEnd
+ maxDist
) {
2395 /* enforce maxDist */
2396 U32
const newLowLimit
= (U32
)(ip
+ blockSize
- cctx
->base
) - maxDist
;
2397 if (cctx
->lowLimit
< newLowLimit
)
2398 cctx
->lowLimit
= newLowLimit
;
2399 if (cctx
->dictLimit
< cctx
->lowLimit
)
2400 cctx
->dictLimit
= cctx
->lowLimit
;
2403 cSize
= ZSTD_compressBlock_internal(cctx
, op
+ ZSTD_blockHeaderSize
, dstCapacity
- ZSTD_blockHeaderSize
, ip
, blockSize
);
2404 if (ZSTD_isError(cSize
))
2407 if (cSize
== 0) { /* block is not compressible */
2408 U32
const cBlockHeader24
= lastBlock
+ (((U32
)bt_raw
) << 1) + (U32
)(blockSize
<< 3);
2409 if (blockSize
+ ZSTD_blockHeaderSize
> dstCapacity
)
2410 return ERROR(dstSize_tooSmall
);
2411 ZSTD_writeLE32(op
, cBlockHeader24
); /* no pb, 4th byte will be overwritten */
2412 memcpy(op
+ ZSTD_blockHeaderSize
, ip
, blockSize
);
2413 cSize
= ZSTD_blockHeaderSize
+ blockSize
;
2415 U32
const cBlockHeader24
= lastBlock
+ (((U32
)bt_compressed
) << 1) + (U32
)(cSize
<< 3);
2416 ZSTD_writeLE24(op
, cBlockHeader24
);
2417 cSize
+= ZSTD_blockHeaderSize
;
2420 remaining
-= blockSize
;
2421 dstCapacity
-= cSize
;
2426 if (lastFrameChunk
&& (op
> ostart
))
2427 cctx
->stage
= ZSTDcs_ending
;
2431 static size_t ZSTD_writeFrameHeader(void *dst
, size_t dstCapacity
, ZSTD_parameters params
, U64 pledgedSrcSize
, U32 dictID
)
2433 BYTE
*const op
= (BYTE
*)dst
;
2434 U32
const dictIDSizeCode
= (dictID
> 0) + (dictID
>= 256) + (dictID
>= 65536); /* 0-3 */
2435 U32
const checksumFlag
= params
.fParams
.checksumFlag
> 0;
2436 U32
const windowSize
= 1U << params
.cParams
.windowLog
;
2437 U32
const singleSegment
= params
.fParams
.contentSizeFlag
&& (windowSize
>= pledgedSrcSize
);
2438 BYTE
const windowLogByte
= (BYTE
)((params
.cParams
.windowLog
- ZSTD_WINDOWLOG_ABSOLUTEMIN
) << 3);
2440 params
.fParams
.contentSizeFlag
? (pledgedSrcSize
>= 256) + (pledgedSrcSize
>= 65536 + 256) + (pledgedSrcSize
>= 0xFFFFFFFFU
) : 0; /* 0-3 */
2441 BYTE
const frameHeaderDecriptionByte
= (BYTE
)(dictIDSizeCode
+ (checksumFlag
<< 2) + (singleSegment
<< 5) + (fcsCode
<< 6));
2444 if (dstCapacity
< ZSTD_frameHeaderSize_max
)
2445 return ERROR(dstSize_tooSmall
);
2447 ZSTD_writeLE32(dst
, ZSTD_MAGICNUMBER
);
2448 op
[4] = frameHeaderDecriptionByte
;
2451 op
[pos
++] = windowLogByte
;
2452 switch (dictIDSizeCode
) {
2453 default: /* impossible */
2456 op
[pos
] = (BYTE
)(dictID
);
2460 ZSTD_writeLE16(op
+ pos
, (U16
)dictID
);
2464 ZSTD_writeLE32(op
+ pos
, dictID
);
2469 default: /* impossible */
2472 op
[pos
++] = (BYTE
)(pledgedSrcSize
);
2475 ZSTD_writeLE16(op
+ pos
, (U16
)(pledgedSrcSize
- 256));
2479 ZSTD_writeLE32(op
+ pos
, (U32
)(pledgedSrcSize
));
2483 ZSTD_writeLE64(op
+ pos
, (U64
)(pledgedSrcSize
));
2490 static size_t ZSTD_compressContinue_internal(ZSTD_CCtx
*cctx
, void *dst
, size_t dstCapacity
, const void *src
, size_t srcSize
, U32 frame
, U32 lastFrameChunk
)
2492 const BYTE
*const ip
= (const BYTE
*)src
;
2495 if (cctx
->stage
== ZSTDcs_created
)
2496 return ERROR(stage_wrong
); /* missing init (ZSTD_compressBegin) */
2498 if (frame
&& (cctx
->stage
== ZSTDcs_init
)) {
2499 fhSize
= ZSTD_writeFrameHeader(dst
, dstCapacity
, cctx
->params
, cctx
->frameContentSize
, cctx
->dictID
);
2500 if (ZSTD_isError(fhSize
))
2502 dstCapacity
-= fhSize
;
2503 dst
= (char *)dst
+ fhSize
;
2504 cctx
->stage
= ZSTDcs_ongoing
;
2507 /* Check if blocks follow each other */
2508 if (src
!= cctx
->nextSrc
) {
2509 /* not contiguous */
2510 ptrdiff_t const delta
= cctx
->nextSrc
- ip
;
2511 cctx
->lowLimit
= cctx
->dictLimit
;
2512 cctx
->dictLimit
= (U32
)(cctx
->nextSrc
- cctx
->base
);
2513 cctx
->dictBase
= cctx
->base
;
2514 cctx
->base
-= delta
;
2515 cctx
->nextToUpdate
= cctx
->dictLimit
;
2516 if (cctx
->dictLimit
- cctx
->lowLimit
< HASH_READ_SIZE
)
2517 cctx
->lowLimit
= cctx
->dictLimit
; /* too small extDict */
2520 /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
2521 if ((ip
+ srcSize
> cctx
->dictBase
+ cctx
->lowLimit
) & (ip
< cctx
->dictBase
+ cctx
->dictLimit
)) {
2522 ptrdiff_t const highInputIdx
= (ip
+ srcSize
) - cctx
->dictBase
;
2523 U32
const lowLimitMax
= (highInputIdx
> (ptrdiff_t)cctx
->dictLimit
) ? cctx
->dictLimit
: (U32
)highInputIdx
;
2524 cctx
->lowLimit
= lowLimitMax
;
2527 cctx
->nextSrc
= ip
+ srcSize
;
2530 size_t const cSize
= frame
? ZSTD_compress_generic(cctx
, dst
, dstCapacity
, src
, srcSize
, lastFrameChunk
)
2531 : ZSTD_compressBlock_internal(cctx
, dst
, dstCapacity
, src
, srcSize
);
2532 if (ZSTD_isError(cSize
))
2534 return cSize
+ fhSize
;
2539 size_t ZSTD_compressContinue(ZSTD_CCtx
*cctx
, void *dst
, size_t dstCapacity
, const void *src
, size_t srcSize
)
2541 return ZSTD_compressContinue_internal(cctx
, dst
, dstCapacity
, src
, srcSize
, 1, 0);
2544 size_t ZSTD_getBlockSizeMax(ZSTD_CCtx
*cctx
) { return MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX
, 1 << cctx
->params
.cParams
.windowLog
); }
2546 size_t ZSTD_compressBlock(ZSTD_CCtx
*cctx
, void *dst
, size_t dstCapacity
, const void *src
, size_t srcSize
)
2548 size_t const blockSizeMax
= ZSTD_getBlockSizeMax(cctx
);
2549 if (srcSize
> blockSizeMax
)
2550 return ERROR(srcSize_wrong
);
2551 return ZSTD_compressContinue_internal(cctx
, dst
, dstCapacity
, src
, srcSize
, 0, 0);
2554 /*! ZSTD_loadDictionaryContent() :
2555 * @return : 0, or an error code
2557 static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx
*zc
, const void *src
, size_t srcSize
)
2559 const BYTE
*const ip
= (const BYTE
*)src
;
2560 const BYTE
*const iend
= ip
+ srcSize
;
2562 /* input becomes curr prefix */
2563 zc
->lowLimit
= zc
->dictLimit
;
2564 zc
->dictLimit
= (U32
)(zc
->nextSrc
- zc
->base
);
2565 zc
->dictBase
= zc
->base
;
2566 zc
->base
+= ip
- zc
->nextSrc
;
2567 zc
->nextToUpdate
= zc
->dictLimit
;
2568 zc
->loadedDictEnd
= zc
->forceWindow
? 0 : (U32
)(iend
- zc
->base
);
2571 if (srcSize
<= HASH_READ_SIZE
)
2574 switch (zc
->params
.cParams
.strategy
) {
2575 case ZSTD_fast
: ZSTD_fillHashTable(zc
, iend
, zc
->params
.cParams
.searchLength
); break;
2577 case ZSTD_dfast
: ZSTD_fillDoubleHashTable(zc
, iend
, zc
->params
.cParams
.searchLength
); break;
2582 if (srcSize
>= HASH_READ_SIZE
)
2583 ZSTD_insertAndFindFirstIndex(zc
, iend
- HASH_READ_SIZE
, zc
->params
.cParams
.searchLength
);
2589 if (srcSize
>= HASH_READ_SIZE
)
2590 ZSTD_updateTree(zc
, iend
- HASH_READ_SIZE
, iend
, 1 << zc
->params
.cParams
.searchLog
, zc
->params
.cParams
.searchLength
);
2594 return ERROR(GENERIC
); /* strategy doesn't exist; impossible */
2597 zc
->nextToUpdate
= (U32
)(iend
- zc
->base
);
2601 /* Dictionaries that assign zero probability to symbols that show up causes problems
2602 when FSE encoding. Refuse dictionaries that assign zero probability to symbols
2603 that we may encounter during compression.
2604 NOTE: This behavior is not standard and could be improved in the future. */
2605 static size_t ZSTD_checkDictNCount(short *normalizedCounter
, unsigned dictMaxSymbolValue
, unsigned maxSymbolValue
)
2608 if (dictMaxSymbolValue
< maxSymbolValue
)
2609 return ERROR(dictionary_corrupted
);
2610 for (s
= 0; s
<= maxSymbolValue
; ++s
) {
2611 if (normalizedCounter
[s
] == 0)
2612 return ERROR(dictionary_corrupted
);
2617 /* Dictionary format :
2619 * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
2621 /*! ZSTD_loadZstdDictionary() :
2622 * @return : 0, or an error code
2623 * assumptions : magic number supposed already checked
2624 * dictSize supposed > 8
2626 static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx
*cctx
, const void *dict
, size_t dictSize
)
2628 const BYTE
*dictPtr
= (const BYTE
*)dict
;
2629 const BYTE
*const dictEnd
= dictPtr
+ dictSize
;
2630 short offcodeNCount
[MaxOff
+ 1];
2631 unsigned offcodeMaxValue
= MaxOff
;
2633 dictPtr
+= 4; /* skip magic number */
2634 cctx
->dictID
= cctx
->params
.fParams
.noDictIDFlag
? 0 : ZSTD_readLE32(dictPtr
);
2638 size_t const hufHeaderSize
= HUF_readCTable_wksp(cctx
->hufTable
, 255, dictPtr
, dictEnd
- dictPtr
, cctx
->tmpCounters
, sizeof(cctx
->tmpCounters
));
2639 if (HUF_isError(hufHeaderSize
))
2640 return ERROR(dictionary_corrupted
);
2641 dictPtr
+= hufHeaderSize
;
2645 unsigned offcodeLog
;
2646 size_t const offcodeHeaderSize
= FSE_readNCount(offcodeNCount
, &offcodeMaxValue
, &offcodeLog
, dictPtr
, dictEnd
- dictPtr
);
2647 if (FSE_isError(offcodeHeaderSize
))
2648 return ERROR(dictionary_corrupted
);
2649 if (offcodeLog
> OffFSELog
)
2650 return ERROR(dictionary_corrupted
);
2651 /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
2652 CHECK_E(FSE_buildCTable_wksp(cctx
->offcodeCTable
, offcodeNCount
, offcodeMaxValue
, offcodeLog
, cctx
->tmpCounters
, sizeof(cctx
->tmpCounters
)),
2653 dictionary_corrupted
);
2654 dictPtr
+= offcodeHeaderSize
;
2658 short matchlengthNCount
[MaxML
+ 1];
2659 unsigned matchlengthMaxValue
= MaxML
, matchlengthLog
;
2660 size_t const matchlengthHeaderSize
= FSE_readNCount(matchlengthNCount
, &matchlengthMaxValue
, &matchlengthLog
, dictPtr
, dictEnd
- dictPtr
);
2661 if (FSE_isError(matchlengthHeaderSize
))
2662 return ERROR(dictionary_corrupted
);
2663 if (matchlengthLog
> MLFSELog
)
2664 return ERROR(dictionary_corrupted
);
2665 /* Every match length code must have non-zero probability */
2666 CHECK_F(ZSTD_checkDictNCount(matchlengthNCount
, matchlengthMaxValue
, MaxML
));
2668 FSE_buildCTable_wksp(cctx
->matchlengthCTable
, matchlengthNCount
, matchlengthMaxValue
, matchlengthLog
, cctx
->tmpCounters
, sizeof(cctx
->tmpCounters
)),
2669 dictionary_corrupted
);
2670 dictPtr
+= matchlengthHeaderSize
;
2674 short litlengthNCount
[MaxLL
+ 1];
2675 unsigned litlengthMaxValue
= MaxLL
, litlengthLog
;
2676 size_t const litlengthHeaderSize
= FSE_readNCount(litlengthNCount
, &litlengthMaxValue
, &litlengthLog
, dictPtr
, dictEnd
- dictPtr
);
2677 if (FSE_isError(litlengthHeaderSize
))
2678 return ERROR(dictionary_corrupted
);
2679 if (litlengthLog
> LLFSELog
)
2680 return ERROR(dictionary_corrupted
);
2681 /* Every literal length code must have non-zero probability */
2682 CHECK_F(ZSTD_checkDictNCount(litlengthNCount
, litlengthMaxValue
, MaxLL
));
2683 CHECK_E(FSE_buildCTable_wksp(cctx
->litlengthCTable
, litlengthNCount
, litlengthMaxValue
, litlengthLog
, cctx
->tmpCounters
, sizeof(cctx
->tmpCounters
)),
2684 dictionary_corrupted
);
2685 dictPtr
+= litlengthHeaderSize
;
2688 if (dictPtr
+ 12 > dictEnd
)
2689 return ERROR(dictionary_corrupted
);
2690 cctx
->rep
[0] = ZSTD_readLE32(dictPtr
+ 0);
2691 cctx
->rep
[1] = ZSTD_readLE32(dictPtr
+ 4);
2692 cctx
->rep
[2] = ZSTD_readLE32(dictPtr
+ 8);
2696 size_t const dictContentSize
= (size_t)(dictEnd
- dictPtr
);
2697 U32 offcodeMax
= MaxOff
;
2698 if (dictContentSize
<= ((U32
)-1) - 128 KB
) {
2699 U32
const maxOffset
= (U32
)dictContentSize
+ 128 KB
; /* The maximum offset that must be supported */
2700 offcodeMax
= ZSTD_highbit32(maxOffset
); /* Calculate minimum offset code required to represent maxOffset */
2702 /* All offset values <= dictContentSize + 128 KB must be representable */
2703 CHECK_F(ZSTD_checkDictNCount(offcodeNCount
, offcodeMaxValue
, MIN(offcodeMax
, MaxOff
)));
2704 /* All repCodes must be <= dictContentSize and != 0*/
2707 for (u
= 0; u
< 3; u
++) {
2708 if (cctx
->rep
[u
] == 0)
2709 return ERROR(dictionary_corrupted
);
2710 if (cctx
->rep
[u
] > dictContentSize
)
2711 return ERROR(dictionary_corrupted
);
2715 cctx
->flagStaticTables
= 1;
2716 cctx
->flagStaticHufTable
= HUF_repeat_valid
;
2717 return ZSTD_loadDictionaryContent(cctx
, dictPtr
, dictContentSize
);
2721 /** ZSTD_compress_insertDictionary() :
2722 * @return : 0, or an error code */
2723 static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx
*cctx
, const void *dict
, size_t dictSize
)
2725 if ((dict
== NULL
) || (dictSize
<= 8))
2728 /* dict as pure content */
2729 if ((ZSTD_readLE32(dict
) != ZSTD_DICT_MAGIC
) || (cctx
->forceRawDict
))
2730 return ZSTD_loadDictionaryContent(cctx
, dict
, dictSize
);
2732 /* dict as zstd dictionary */
2733 return ZSTD_loadZstdDictionary(cctx
, dict
, dictSize
);
2736 /*! ZSTD_compressBegin_internal() :
2737 * @return : 0, or an error code */
2738 static size_t ZSTD_compressBegin_internal(ZSTD_CCtx
*cctx
, const void *dict
, size_t dictSize
, ZSTD_parameters params
, U64 pledgedSrcSize
)
2740 ZSTD_compResetPolicy_e
const crp
= dictSize
? ZSTDcrp_fullReset
: ZSTDcrp_continue
;
2741 CHECK_F(ZSTD_resetCCtx_advanced(cctx
, params
, pledgedSrcSize
, crp
));
2742 return ZSTD_compress_insertDictionary(cctx
, dict
, dictSize
);
2745 /*! ZSTD_compressBegin_advanced() :
2746 * @return : 0, or an error code */
2747 size_t ZSTD_compressBegin_advanced(ZSTD_CCtx
*cctx
, const void *dict
, size_t dictSize
, ZSTD_parameters params
, unsigned long long pledgedSrcSize
)
2749 /* compression parameters verification and optimization */
2750 CHECK_F(ZSTD_checkCParams(params
.cParams
));
2751 return ZSTD_compressBegin_internal(cctx
, dict
, dictSize
, params
, pledgedSrcSize
);
2754 size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx
*cctx
, const void *dict
, size_t dictSize
, int compressionLevel
)
2756 ZSTD_parameters
const params
= ZSTD_getParams(compressionLevel
, 0, dictSize
);
2757 return ZSTD_compressBegin_internal(cctx
, dict
, dictSize
, params
, 0);
2760 size_t ZSTD_compressBegin(ZSTD_CCtx
*cctx
, int compressionLevel
) { return ZSTD_compressBegin_usingDict(cctx
, NULL
, 0, compressionLevel
); }
2762 /*! ZSTD_writeEpilogue() :
2764 * @return : nb of bytes written into dst (or an error code) */
2765 static size_t ZSTD_writeEpilogue(ZSTD_CCtx
*cctx
, void *dst
, size_t dstCapacity
)
2767 BYTE
*const ostart
= (BYTE
*)dst
;
2771 if (cctx
->stage
== ZSTDcs_created
)
2772 return ERROR(stage_wrong
); /* init missing */
2774 /* special case : empty frame */
2775 if (cctx
->stage
== ZSTDcs_init
) {
2776 fhSize
= ZSTD_writeFrameHeader(dst
, dstCapacity
, cctx
->params
, 0, 0);
2777 if (ZSTD_isError(fhSize
))
2779 dstCapacity
-= fhSize
;
2781 cctx
->stage
= ZSTDcs_ongoing
;
2784 if (cctx
->stage
!= ZSTDcs_ending
) {
2785 /* write one last empty block, make it the "last" block */
2786 U32
const cBlockHeader24
= 1 /* last block */ + (((U32
)bt_raw
) << 1) + 0;
2787 if (dstCapacity
< 4)
2788 return ERROR(dstSize_tooSmall
);
2789 ZSTD_writeLE32(op
, cBlockHeader24
);
2790 op
+= ZSTD_blockHeaderSize
;
2791 dstCapacity
-= ZSTD_blockHeaderSize
;
2794 if (cctx
->params
.fParams
.checksumFlag
) {
2795 U32
const checksum
= (U32
)xxh64_digest(&cctx
->xxhState
);
2796 if (dstCapacity
< 4)
2797 return ERROR(dstSize_tooSmall
);
2798 ZSTD_writeLE32(op
, checksum
);
2802 cctx
->stage
= ZSTDcs_created
; /* return to "created but no init" status */
2806 size_t ZSTD_compressEnd(ZSTD_CCtx
*cctx
, void *dst
, size_t dstCapacity
, const void *src
, size_t srcSize
)
2809 size_t const cSize
= ZSTD_compressContinue_internal(cctx
, dst
, dstCapacity
, src
, srcSize
, 1, 1);
2810 if (ZSTD_isError(cSize
))
2812 endResult
= ZSTD_writeEpilogue(cctx
, (char *)dst
+ cSize
, dstCapacity
- cSize
);
2813 if (ZSTD_isError(endResult
))
2815 return cSize
+ endResult
;
2818 static size_t ZSTD_compress_internal(ZSTD_CCtx
*cctx
, void *dst
, size_t dstCapacity
, const void *src
, size_t srcSize
, const void *dict
, size_t dictSize
,
2819 ZSTD_parameters params
)
2821 CHECK_F(ZSTD_compressBegin_internal(cctx
, dict
, dictSize
, params
, srcSize
));
2822 return ZSTD_compressEnd(cctx
, dst
, dstCapacity
, src
, srcSize
);
2825 size_t ZSTD_compress_usingDict(ZSTD_CCtx
*ctx
, void *dst
, size_t dstCapacity
, const void *src
, size_t srcSize
, const void *dict
, size_t dictSize
,
2826 ZSTD_parameters params
)
2828 return ZSTD_compress_internal(ctx
, dst
, dstCapacity
, src
, srcSize
, dict
, dictSize
, params
);
2831 size_t ZSTD_compressCCtx(ZSTD_CCtx
*ctx
, void *dst
, size_t dstCapacity
, const void *src
, size_t srcSize
, ZSTD_parameters params
)
2833 return ZSTD_compress_internal(ctx
, dst
, dstCapacity
, src
, srcSize
, NULL
, 0, params
);
2836 /* ===== Dictionary API ===== */
2838 struct ZSTD_CDict_s
{
2840 const void *dictContent
;
2841 size_t dictContentSize
;
2842 ZSTD_CCtx
*refContext
;
2843 }; /* typedef'd tp ZSTD_CDict within "zstd.h" */
2845 size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams
) { return ZSTD_CCtxWorkspaceBound(cParams
) + ZSTD_ALIGN(sizeof(ZSTD_CDict
)); }
2847 static ZSTD_CDict
*ZSTD_createCDict_advanced(const void *dictBuffer
, size_t dictSize
, unsigned byReference
, ZSTD_parameters params
, ZSTD_customMem customMem
)
2849 if (!customMem
.customAlloc
|| !customMem
.customFree
)
2853 ZSTD_CDict
*const cdict
= (ZSTD_CDict
*)ZSTD_malloc(sizeof(ZSTD_CDict
), customMem
);
2854 ZSTD_CCtx
*const cctx
= ZSTD_createCCtx_advanced(customMem
);
2856 if (!cdict
|| !cctx
) {
2857 ZSTD_free(cdict
, customMem
);
2858 ZSTD_freeCCtx(cctx
);
2862 if ((byReference
) || (!dictBuffer
) || (!dictSize
)) {
2863 cdict
->dictBuffer
= NULL
;
2864 cdict
->dictContent
= dictBuffer
;
2866 void *const internalBuffer
= ZSTD_malloc(dictSize
, customMem
);
2867 if (!internalBuffer
) {
2868 ZSTD_free(cctx
, customMem
);
2869 ZSTD_free(cdict
, customMem
);
2872 memcpy(internalBuffer
, dictBuffer
, dictSize
);
2873 cdict
->dictBuffer
= internalBuffer
;
2874 cdict
->dictContent
= internalBuffer
;
2878 size_t const errorCode
= ZSTD_compressBegin_advanced(cctx
, cdict
->dictContent
, dictSize
, params
, 0);
2879 if (ZSTD_isError(errorCode
)) {
2880 ZSTD_free(cdict
->dictBuffer
, customMem
);
2881 ZSTD_free(cdict
, customMem
);
2882 ZSTD_freeCCtx(cctx
);
2887 cdict
->refContext
= cctx
;
2888 cdict
->dictContentSize
= dictSize
;
2893 ZSTD_CDict
*ZSTD_initCDict(const void *dict
, size_t dictSize
, ZSTD_parameters params
, void *workspace
, size_t workspaceSize
)
2895 ZSTD_customMem
const stackMem
= ZSTD_initStack(workspace
, workspaceSize
);
2896 return ZSTD_createCDict_advanced(dict
, dictSize
, 1, params
, stackMem
);
2899 size_t ZSTD_freeCDict(ZSTD_CDict
*cdict
)
2902 return 0; /* support free on NULL */
2904 ZSTD_customMem
const cMem
= cdict
->refContext
->customMem
;
2905 ZSTD_freeCCtx(cdict
->refContext
);
2906 ZSTD_free(cdict
->dictBuffer
, cMem
);
2907 ZSTD_free(cdict
, cMem
);
2912 static ZSTD_parameters
ZSTD_getParamsFromCDict(const ZSTD_CDict
*cdict
) { return ZSTD_getParamsFromCCtx(cdict
->refContext
); }
2914 size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx
*cctx
, const ZSTD_CDict
*cdict
, unsigned long long pledgedSrcSize
)
2916 if (cdict
->dictContentSize
)
2917 CHECK_F(ZSTD_copyCCtx(cctx
, cdict
->refContext
, pledgedSrcSize
))
2919 ZSTD_parameters params
= cdict
->refContext
->params
;
2920 params
.fParams
.contentSizeFlag
= (pledgedSrcSize
> 0);
2921 CHECK_F(ZSTD_compressBegin_advanced(cctx
, NULL
, 0, params
, pledgedSrcSize
));
2926 /*! ZSTD_compress_usingCDict() :
2927 * Compression using a digested Dictionary.
2928 * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
2929 * Note that compression level is decided during dictionary creation */
2930 size_t ZSTD_compress_usingCDict(ZSTD_CCtx
*cctx
, void *dst
, size_t dstCapacity
, const void *src
, size_t srcSize
, const ZSTD_CDict
*cdict
)
2932 CHECK_F(ZSTD_compressBegin_usingCDict(cctx
, cdict
, srcSize
));
2934 if (cdict
->refContext
->params
.fParams
.contentSizeFlag
== 1) {
2935 cctx
->params
.fParams
.contentSizeFlag
= 1;
2936 cctx
->frameContentSize
= srcSize
;
2938 cctx
->params
.fParams
.contentSizeFlag
= 0;
2941 return ZSTD_compressEnd(cctx
, dst
, dstCapacity
, src
, srcSize
);
2944 /* ******************************************************************
2946 ********************************************************************/
2948 typedef enum { zcss_init
, zcss_load
, zcss_flush
, zcss_final
} ZSTD_cStreamStage
;
2950 struct ZSTD_CStream_s
{
2952 ZSTD_CDict
*cdictLocal
;
2953 const ZSTD_CDict
*cdict
;
2956 size_t inToCompress
;
2958 size_t inBuffTarget
;
2962 size_t outBuffContentSize
;
2963 size_t outBuffFlushedSize
;
2964 ZSTD_cStreamStage stage
;
2969 ZSTD_parameters params
;
2970 ZSTD_customMem customMem
;
2971 }; /* typedef'd to ZSTD_CStream within "zstd.h" */
2973 size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams
)
2975 size_t const inBuffSize
= (size_t)1 << cParams
.windowLog
;
2976 size_t const blockSize
= MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX
, inBuffSize
);
2977 size_t const outBuffSize
= ZSTD_compressBound(blockSize
) + 1;
2979 return ZSTD_CCtxWorkspaceBound(cParams
) + ZSTD_ALIGN(sizeof(ZSTD_CStream
)) + ZSTD_ALIGN(inBuffSize
) + ZSTD_ALIGN(outBuffSize
);
2982 ZSTD_CStream
*ZSTD_createCStream_advanced(ZSTD_customMem customMem
)
2986 if (!customMem
.customAlloc
|| !customMem
.customFree
)
2989 zcs
= (ZSTD_CStream
*)ZSTD_malloc(sizeof(ZSTD_CStream
), customMem
);
2992 memset(zcs
, 0, sizeof(ZSTD_CStream
));
2993 memcpy(&zcs
->customMem
, &customMem
, sizeof(ZSTD_customMem
));
2994 zcs
->cctx
= ZSTD_createCCtx_advanced(customMem
);
2995 if (zcs
->cctx
== NULL
) {
2996 ZSTD_freeCStream(zcs
);
3002 size_t ZSTD_freeCStream(ZSTD_CStream
*zcs
)
3005 return 0; /* support free on NULL */
3007 ZSTD_customMem
const cMem
= zcs
->customMem
;
3008 ZSTD_freeCCtx(zcs
->cctx
);
3010 ZSTD_freeCDict(zcs
->cdictLocal
);
3011 zcs
->cdictLocal
= NULL
;
3012 ZSTD_free(zcs
->inBuff
, cMem
);
3014 ZSTD_free(zcs
->outBuff
, cMem
);
3015 zcs
->outBuff
= NULL
;
3016 ZSTD_free(zcs
, cMem
);
3021 /*====== Initialization ======*/
3023 size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX
; }
3024 size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX
) + ZSTD_blockHeaderSize
+ 4 /* 32-bits hash */; }
3026 static size_t ZSTD_resetCStream_internal(ZSTD_CStream
*zcs
, unsigned long long pledgedSrcSize
)
3028 if (zcs
->inBuffSize
== 0)
3029 return ERROR(stage_wrong
); /* zcs has not been init at least once => can't reset */
3032 CHECK_F(ZSTD_compressBegin_usingCDict(zcs
->cctx
, zcs
->cdict
, pledgedSrcSize
))
3034 CHECK_F(ZSTD_compressBegin_advanced(zcs
->cctx
, NULL
, 0, zcs
->params
, pledgedSrcSize
));
3036 zcs
->inToCompress
= 0;
3038 zcs
->inBuffTarget
= zcs
->blockSize
;
3039 zcs
->outBuffContentSize
= zcs
->outBuffFlushedSize
= 0;
3040 zcs
->stage
= zcss_load
;
3041 zcs
->frameEnded
= 0;
3042 zcs
->pledgedSrcSize
= pledgedSrcSize
;
3043 zcs
->inputProcessed
= 0;
3044 return 0; /* ready to go */
3047 size_t ZSTD_resetCStream(ZSTD_CStream
*zcs
, unsigned long long pledgedSrcSize
)
3050 zcs
->params
.fParams
.contentSizeFlag
= (pledgedSrcSize
> 0);
3052 return ZSTD_resetCStream_internal(zcs
, pledgedSrcSize
);
3055 static size_t ZSTD_initCStream_advanced(ZSTD_CStream
*zcs
, const void *dict
, size_t dictSize
, ZSTD_parameters params
, unsigned long long pledgedSrcSize
)
3057 /* allocate buffers */
3059 size_t const neededInBuffSize
= (size_t)1 << params
.cParams
.windowLog
;
3060 if (zcs
->inBuffSize
< neededInBuffSize
) {
3061 zcs
->inBuffSize
= neededInBuffSize
;
3062 ZSTD_free(zcs
->inBuff
, zcs
->customMem
);
3063 zcs
->inBuff
= (char *)ZSTD_malloc(neededInBuffSize
, zcs
->customMem
);
3064 if (zcs
->inBuff
== NULL
)
3065 return ERROR(memory_allocation
);
3067 zcs
->blockSize
= MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX
, neededInBuffSize
);
3069 if (zcs
->outBuffSize
< ZSTD_compressBound(zcs
->blockSize
) + 1) {
3070 zcs
->outBuffSize
= ZSTD_compressBound(zcs
->blockSize
) + 1;
3071 ZSTD_free(zcs
->outBuff
, zcs
->customMem
);
3072 zcs
->outBuff
= (char *)ZSTD_malloc(zcs
->outBuffSize
, zcs
->customMem
);
3073 if (zcs
->outBuff
== NULL
)
3074 return ERROR(memory_allocation
);
3077 if (dict
&& dictSize
>= 8) {
3078 ZSTD_freeCDict(zcs
->cdictLocal
);
3079 zcs
->cdictLocal
= ZSTD_createCDict_advanced(dict
, dictSize
, 0, params
, zcs
->customMem
);
3080 if (zcs
->cdictLocal
== NULL
)
3081 return ERROR(memory_allocation
);
3082 zcs
->cdict
= zcs
->cdictLocal
;
3086 zcs
->checksum
= params
.fParams
.checksumFlag
> 0;
3087 zcs
->params
= params
;
3089 return ZSTD_resetCStream_internal(zcs
, pledgedSrcSize
);
3092 ZSTD_CStream
*ZSTD_initCStream(ZSTD_parameters params
, unsigned long long pledgedSrcSize
, void *workspace
, size_t workspaceSize
)
3094 ZSTD_customMem
const stackMem
= ZSTD_initStack(workspace
, workspaceSize
);
3095 ZSTD_CStream
*const zcs
= ZSTD_createCStream_advanced(stackMem
);
3097 size_t const code
= ZSTD_initCStream_advanced(zcs
, NULL
, 0, params
, pledgedSrcSize
);
3098 if (ZSTD_isError(code
)) {
3105 ZSTD_CStream
*ZSTD_initCStream_usingCDict(const ZSTD_CDict
*cdict
, unsigned long long pledgedSrcSize
, void *workspace
, size_t workspaceSize
)
3107 ZSTD_parameters
const params
= ZSTD_getParamsFromCDict(cdict
);
3108 ZSTD_CStream
*const zcs
= ZSTD_initCStream(params
, pledgedSrcSize
, workspace
, workspaceSize
);
3111 if (ZSTD_isError(ZSTD_resetCStream_internal(zcs
, pledgedSrcSize
))) {
3118 /*====== Compression ======*/
3120 typedef enum { zsf_gather
, zsf_flush
, zsf_end
} ZSTD_flush_e
;
3122 ZSTD_STATIC
size_t ZSTD_limitCopy(void *dst
, size_t dstCapacity
, const void *src
, size_t srcSize
)
3124 size_t const length
= MIN(dstCapacity
, srcSize
);
3125 memcpy(dst
, src
, length
);
3129 static size_t ZSTD_compressStream_generic(ZSTD_CStream
*zcs
, void *dst
, size_t *dstCapacityPtr
, const void *src
, size_t *srcSizePtr
, ZSTD_flush_e
const flush
)
3131 U32 someMoreWork
= 1;
3132 const char *const istart
= (const char *)src
;
3133 const char *const iend
= istart
+ *srcSizePtr
;
3134 const char *ip
= istart
;
3135 char *const ostart
= (char *)dst
;
3136 char *const oend
= ostart
+ *dstCapacityPtr
;
3139 while (someMoreWork
) {
3140 switch (zcs
->stage
) {
3142 return ERROR(init_missing
); /* call ZBUFF_compressInit() first ! */
3145 /* complete inBuffer */
3147 size_t const toLoad
= zcs
->inBuffTarget
- zcs
->inBuffPos
;
3148 size_t const loaded
= ZSTD_limitCopy(zcs
->inBuff
+ zcs
->inBuffPos
, toLoad
, ip
, iend
- ip
);
3149 zcs
->inBuffPos
+= loaded
;
3151 if ((zcs
->inBuffPos
== zcs
->inToCompress
) || (!flush
&& (toLoad
!= loaded
))) {
3153 break; /* not enough input to get a full block : stop there, wait for more */
3156 /* compress curr block (note : this stage cannot be stopped in the middle) */
3160 size_t const iSize
= zcs
->inBuffPos
- zcs
->inToCompress
;
3161 size_t oSize
= oend
- op
;
3162 if (oSize
>= ZSTD_compressBound(iSize
))
3163 cDst
= op
; /* compress directly into output buffer (avoid flush stage) */
3165 cDst
= zcs
->outBuff
, oSize
= zcs
->outBuffSize
;
3166 cSize
= (flush
== zsf_end
) ? ZSTD_compressEnd(zcs
->cctx
, cDst
, oSize
, zcs
->inBuff
+ zcs
->inToCompress
, iSize
)
3167 : ZSTD_compressContinue(zcs
->cctx
, cDst
, oSize
, zcs
->inBuff
+ zcs
->inToCompress
, iSize
);
3168 if (ZSTD_isError(cSize
))
3170 if (flush
== zsf_end
)
3171 zcs
->frameEnded
= 1;
3172 /* prepare next block */
3173 zcs
->inBuffTarget
= zcs
->inBuffPos
+ zcs
->blockSize
;
3174 if (zcs
->inBuffTarget
> zcs
->inBuffSize
)
3175 zcs
->inBuffPos
= 0, zcs
->inBuffTarget
= zcs
->blockSize
; /* note : inBuffSize >= blockSize */
3176 zcs
->inToCompress
= zcs
->inBuffPos
;
3180 } /* no need to flush */
3181 zcs
->outBuffContentSize
= cSize
;
3182 zcs
->outBuffFlushedSize
= 0;
3183 zcs
->stage
= zcss_flush
; /* pass-through to flush stage */
3188 size_t const toFlush
= zcs
->outBuffContentSize
- zcs
->outBuffFlushedSize
;
3189 size_t const flushed
= ZSTD_limitCopy(op
, oend
- op
, zcs
->outBuff
+ zcs
->outBuffFlushedSize
, toFlush
);
3191 zcs
->outBuffFlushedSize
+= flushed
;
3192 if (toFlush
!= flushed
) {
3195 } /* dst too small to store flushed data : stop there */
3196 zcs
->outBuffContentSize
= zcs
->outBuffFlushedSize
= 0;
3197 zcs
->stage
= zcss_load
;
3202 someMoreWork
= 0; /* do nothing */
3206 return ERROR(GENERIC
); /* impossible */
3210 *srcSizePtr
= ip
- istart
;
3211 *dstCapacityPtr
= op
- ostart
;
3212 zcs
->inputProcessed
+= *srcSizePtr
;
3213 if (zcs
->frameEnded
)
3216 size_t hintInSize
= zcs
->inBuffTarget
- zcs
->inBuffPos
;
3217 if (hintInSize
== 0)
3218 hintInSize
= zcs
->blockSize
;
3223 size_t ZSTD_compressStream(ZSTD_CStream
*zcs
, ZSTD_outBuffer
*output
, ZSTD_inBuffer
*input
)
3225 size_t sizeRead
= input
->size
- input
->pos
;
3226 size_t sizeWritten
= output
->size
- output
->pos
;
3227 size_t const result
=
3228 ZSTD_compressStream_generic(zcs
, (char *)(output
->dst
) + output
->pos
, &sizeWritten
, (const char *)(input
->src
) + input
->pos
, &sizeRead
, zsf_gather
);
3229 input
->pos
+= sizeRead
;
3230 output
->pos
+= sizeWritten
;
3234 /*====== Finalize ======*/
3236 /*! ZSTD_flushStream() :
3237 * @return : amount of data remaining to flush */
3238 size_t ZSTD_flushStream(ZSTD_CStream
*zcs
, ZSTD_outBuffer
*output
)
3241 size_t sizeWritten
= output
->size
- output
->pos
;
3242 size_t const result
= ZSTD_compressStream_generic(zcs
, (char *)(output
->dst
) + output
->pos
, &sizeWritten
, &srcSize
,
3243 &srcSize
, /* use a valid src address instead of NULL */
3245 output
->pos
+= sizeWritten
;
3246 if (ZSTD_isError(result
))
3248 return zcs
->outBuffContentSize
- zcs
->outBuffFlushedSize
; /* remaining to flush */
3251 size_t ZSTD_endStream(ZSTD_CStream
*zcs
, ZSTD_outBuffer
*output
)
3253 BYTE
*const ostart
= (BYTE
*)(output
->dst
) + output
->pos
;
3254 BYTE
*const oend
= (BYTE
*)(output
->dst
) + output
->size
;
3257 if ((zcs
->pledgedSrcSize
) && (zcs
->inputProcessed
!= zcs
->pledgedSrcSize
))
3258 return ERROR(srcSize_wrong
); /* pledgedSrcSize not respected */
3260 if (zcs
->stage
!= zcss_final
) {
3261 /* flush whatever remains */
3263 size_t sizeWritten
= output
->size
- output
->pos
;
3264 size_t const notEnded
=
3265 ZSTD_compressStream_generic(zcs
, ostart
, &sizeWritten
, &srcSize
, &srcSize
, zsf_end
); /* use a valid src address instead of NULL */
3266 size_t const remainingToFlush
= zcs
->outBuffContentSize
- zcs
->outBuffFlushedSize
;
3268 if (remainingToFlush
) {
3269 output
->pos
+= sizeWritten
;
3270 return remainingToFlush
+ ZSTD_BLOCKHEADERSIZE
/* final empty block */ + (zcs
->checksum
* 4);
3272 /* create epilogue */
3273 zcs
->stage
= zcss_final
;
3274 zcs
->outBuffContentSize
= !notEnded
? 0 : ZSTD_compressEnd(zcs
->cctx
, zcs
->outBuff
, zcs
->outBuffSize
, NULL
,
3275 0); /* write epilogue, including final empty block, into outBuff */
3278 /* flush epilogue */
3280 size_t const toFlush
= zcs
->outBuffContentSize
- zcs
->outBuffFlushedSize
;
3281 size_t const flushed
= ZSTD_limitCopy(op
, oend
- op
, zcs
->outBuff
+ zcs
->outBuffFlushedSize
, toFlush
);
3283 zcs
->outBuffFlushedSize
+= flushed
;
3284 output
->pos
+= op
- ostart
;
3285 if (toFlush
== flushed
)
3286 zcs
->stage
= zcss_init
; /* end reached */
3287 return toFlush
- flushed
;
3291 /*-===== Pre-defined compression levels =====-*/
3293 #define ZSTD_DEFAULT_CLEVEL 1
3294 #define ZSTD_MAX_CLEVEL 22
3295 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL
; }
3297 static const ZSTD_compressionParameters ZSTD_defaultCParameters
[4][ZSTD_MAX_CLEVEL
+ 1] = {
3300 /* W, C, H, S, L, TL, strat */
3301 {18, 12, 12, 1, 7, 16, ZSTD_fast
}, /* level 0 - never used */
3302 {19, 13, 14, 1, 7, 16, ZSTD_fast
}, /* level 1 */
3303 {19, 15, 16, 1, 6, 16, ZSTD_fast
}, /* level 2 */
3304 {20, 16, 17, 1, 5, 16, ZSTD_dfast
}, /* level 3.*/
3305 {20, 18, 18, 1, 5, 16, ZSTD_dfast
}, /* level 4.*/
3306 {20, 15, 18, 3, 5, 16, ZSTD_greedy
}, /* level 5 */
3307 {21, 16, 19, 2, 5, 16, ZSTD_lazy
}, /* level 6 */
3308 {21, 17, 20, 3, 5, 16, ZSTD_lazy
}, /* level 7 */
3309 {21, 18, 20, 3, 5, 16, ZSTD_lazy2
}, /* level 8 */
3310 {21, 20, 20, 3, 5, 16, ZSTD_lazy2
}, /* level 9 */
3311 {21, 19, 21, 4, 5, 16, ZSTD_lazy2
}, /* level 10 */
3312 {22, 20, 22, 4, 5, 16, ZSTD_lazy2
}, /* level 11 */
3313 {22, 20, 22, 5, 5, 16, ZSTD_lazy2
}, /* level 12 */
3314 {22, 21, 22, 5, 5, 16, ZSTD_lazy2
}, /* level 13 */
3315 {22, 21, 22, 6, 5, 16, ZSTD_lazy2
}, /* level 14 */
3316 {22, 21, 21, 5, 5, 16, ZSTD_btlazy2
}, /* level 15 */
3317 {23, 22, 22, 5, 5, 16, ZSTD_btlazy2
}, /* level 16 */
3318 {23, 21, 22, 4, 5, 24, ZSTD_btopt
}, /* level 17 */
3319 {23, 23, 22, 6, 5, 32, ZSTD_btopt
}, /* level 18 */
3320 {23, 23, 22, 6, 3, 48, ZSTD_btopt
}, /* level 19 */
3321 {25, 25, 23, 7, 3, 64, ZSTD_btopt2
}, /* level 20 */
3322 {26, 26, 23, 7, 3, 256, ZSTD_btopt2
}, /* level 21 */
3323 {27, 27, 25, 9, 3, 512, ZSTD_btopt2
}, /* level 22 */
3326 /* for srcSize <= 256 KB */
3327 /* W, C, H, S, L, T, strat */
3328 {0, 0, 0, 0, 0, 0, ZSTD_fast
}, /* level 0 - not used */
3329 {18, 13, 14, 1, 6, 8, ZSTD_fast
}, /* level 1 */
3330 {18, 14, 13, 1, 5, 8, ZSTD_dfast
}, /* level 2 */
3331 {18, 16, 15, 1, 5, 8, ZSTD_dfast
}, /* level 3 */
3332 {18, 15, 17, 1, 5, 8, ZSTD_greedy
}, /* level 4.*/
3333 {18, 16, 17, 4, 5, 8, ZSTD_greedy
}, /* level 5.*/
3334 {18, 16, 17, 3, 5, 8, ZSTD_lazy
}, /* level 6.*/
3335 {18, 17, 17, 4, 4, 8, ZSTD_lazy
}, /* level 7 */
3336 {18, 17, 17, 4, 4, 8, ZSTD_lazy2
}, /* level 8 */
3337 {18, 17, 17, 5, 4, 8, ZSTD_lazy2
}, /* level 9 */
3338 {18, 17, 17, 6, 4, 8, ZSTD_lazy2
}, /* level 10 */
3339 {18, 18, 17, 6, 4, 8, ZSTD_lazy2
}, /* level 11.*/
3340 {18, 18, 17, 7, 4, 8, ZSTD_lazy2
}, /* level 12.*/
3341 {18, 19, 17, 6, 4, 8, ZSTD_btlazy2
}, /* level 13 */
3342 {18, 18, 18, 4, 4, 16, ZSTD_btopt
}, /* level 14.*/
3343 {18, 18, 18, 4, 3, 16, ZSTD_btopt
}, /* level 15.*/
3344 {18, 19, 18, 6, 3, 32, ZSTD_btopt
}, /* level 16.*/
3345 {18, 19, 18, 8, 3, 64, ZSTD_btopt
}, /* level 17.*/
3346 {18, 19, 18, 9, 3, 128, ZSTD_btopt
}, /* level 18.*/
3347 {18, 19, 18, 10, 3, 256, ZSTD_btopt
}, /* level 19.*/
3348 {18, 19, 18, 11, 3, 512, ZSTD_btopt2
}, /* level 20.*/
3349 {18, 19, 18, 12, 3, 512, ZSTD_btopt2
}, /* level 21.*/
3350 {18, 19, 18, 13, 3, 512, ZSTD_btopt2
}, /* level 22.*/
3353 /* for srcSize <= 128 KB */
3354 /* W, C, H, S, L, T, strat */
3355 {17, 12, 12, 1, 7, 8, ZSTD_fast
}, /* level 0 - not used */
3356 {17, 12, 13, 1, 6, 8, ZSTD_fast
}, /* level 1 */
3357 {17, 13, 16, 1, 5, 8, ZSTD_fast
}, /* level 2 */
3358 {17, 16, 16, 2, 5, 8, ZSTD_dfast
}, /* level 3 */
3359 {17, 13, 15, 3, 4, 8, ZSTD_greedy
}, /* level 4 */
3360 {17, 15, 17, 4, 4, 8, ZSTD_greedy
}, /* level 5 */
3361 {17, 16, 17, 3, 4, 8, ZSTD_lazy
}, /* level 6 */
3362 {17, 15, 17, 4, 4, 8, ZSTD_lazy2
}, /* level 7 */
3363 {17, 17, 17, 4, 4, 8, ZSTD_lazy2
}, /* level 8 */
3364 {17, 17, 17, 5, 4, 8, ZSTD_lazy2
}, /* level 9 */
3365 {17, 17, 17, 6, 4, 8, ZSTD_lazy2
}, /* level 10 */
3366 {17, 17, 17, 7, 4, 8, ZSTD_lazy2
}, /* level 11 */
3367 {17, 17, 17, 8, 4, 8, ZSTD_lazy2
}, /* level 12 */
3368 {17, 18, 17, 6, 4, 8, ZSTD_btlazy2
}, /* level 13.*/
3369 {17, 17, 17, 7, 3, 8, ZSTD_btopt
}, /* level 14.*/
3370 {17, 17, 17, 7, 3, 16, ZSTD_btopt
}, /* level 15.*/
3371 {17, 18, 17, 7, 3, 32, ZSTD_btopt
}, /* level 16.*/
3372 {17, 18, 17, 7, 3, 64, ZSTD_btopt
}, /* level 17.*/
3373 {17, 18, 17, 7, 3, 256, ZSTD_btopt
}, /* level 18.*/
3374 {17, 18, 17, 8, 3, 256, ZSTD_btopt
}, /* level 19.*/
3375 {17, 18, 17, 9, 3, 256, ZSTD_btopt2
}, /* level 20.*/
3376 {17, 18, 17, 10, 3, 256, ZSTD_btopt2
}, /* level 21.*/
3377 {17, 18, 17, 11, 3, 512, ZSTD_btopt2
}, /* level 22.*/
3380 /* for srcSize <= 16 KB */
3381 /* W, C, H, S, L, T, strat */
3382 {14, 12, 12, 1, 7, 6, ZSTD_fast
}, /* level 0 - not used */
3383 {14, 14, 14, 1, 6, 6, ZSTD_fast
}, /* level 1 */
3384 {14, 14, 14, 1, 4, 6, ZSTD_fast
}, /* level 2 */
3385 {14, 14, 14, 1, 4, 6, ZSTD_dfast
}, /* level 3.*/
3386 {14, 14, 14, 4, 4, 6, ZSTD_greedy
}, /* level 4.*/
3387 {14, 14, 14, 3, 4, 6, ZSTD_lazy
}, /* level 5.*/
3388 {14, 14, 14, 4, 4, 6, ZSTD_lazy2
}, /* level 6 */
3389 {14, 14, 14, 5, 4, 6, ZSTD_lazy2
}, /* level 7 */
3390 {14, 14, 14, 6, 4, 6, ZSTD_lazy2
}, /* level 8.*/
3391 {14, 15, 14, 6, 4, 6, ZSTD_btlazy2
}, /* level 9.*/
3392 {14, 15, 14, 3, 3, 6, ZSTD_btopt
}, /* level 10.*/
3393 {14, 15, 14, 6, 3, 8, ZSTD_btopt
}, /* level 11.*/
3394 {14, 15, 14, 6, 3, 16, ZSTD_btopt
}, /* level 12.*/
3395 {14, 15, 14, 6, 3, 24, ZSTD_btopt
}, /* level 13.*/
3396 {14, 15, 15, 6, 3, 48, ZSTD_btopt
}, /* level 14.*/
3397 {14, 15, 15, 6, 3, 64, ZSTD_btopt
}, /* level 15.*/
3398 {14, 15, 15, 6, 3, 96, ZSTD_btopt
}, /* level 16.*/
3399 {14, 15, 15, 6, 3, 128, ZSTD_btopt
}, /* level 17.*/
3400 {14, 15, 15, 6, 3, 256, ZSTD_btopt
}, /* level 18.*/
3401 {14, 15, 15, 7, 3, 256, ZSTD_btopt
}, /* level 19.*/
3402 {14, 15, 15, 8, 3, 256, ZSTD_btopt2
}, /* level 20.*/
3403 {14, 15, 15, 9, 3, 256, ZSTD_btopt2
}, /* level 21.*/
3404 {14, 15, 15, 10, 3, 256, ZSTD_btopt2
}, /* level 22.*/
3408 /*! ZSTD_getCParams() :
3409 * @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`.
3410 * Size values are optional, provide 0 if not known or unused */
3411 ZSTD_compressionParameters
ZSTD_getCParams(int compressionLevel
, unsigned long long srcSize
, size_t dictSize
)
3413 ZSTD_compressionParameters cp
;
3414 size_t const addedSize
= srcSize
? 0 : 500;
3415 U64
const rSize
= srcSize
+ dictSize
? srcSize
+ dictSize
+ addedSize
: (U64
)-1;
3416 U32
const tableID
= (rSize
<= 256 KB
) + (rSize
<= 128 KB
) + (rSize
<= 16 KB
); /* intentional underflow for srcSizeHint == 0 */
3417 if (compressionLevel
<= 0)
3418 compressionLevel
= ZSTD_DEFAULT_CLEVEL
; /* 0 == default; no negative compressionLevel yet */
3419 if (compressionLevel
> ZSTD_MAX_CLEVEL
)
3420 compressionLevel
= ZSTD_MAX_CLEVEL
;
3421 cp
= ZSTD_defaultCParameters
[tableID
][compressionLevel
];
3422 if (ZSTD_32bits()) { /* auto-correction, for 32-bits mode */
3423 if (cp
.windowLog
> ZSTD_WINDOWLOG_MAX
)
3424 cp
.windowLog
= ZSTD_WINDOWLOG_MAX
;
3425 if (cp
.chainLog
> ZSTD_CHAINLOG_MAX
)
3426 cp
.chainLog
= ZSTD_CHAINLOG_MAX
;
3427 if (cp
.hashLog
> ZSTD_HASHLOG_MAX
)
3428 cp
.hashLog
= ZSTD_HASHLOG_MAX
;
3430 cp
= ZSTD_adjustCParams(cp
, srcSize
, dictSize
);
3434 /*! ZSTD_getParams() :
3435 * same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
3436 * All fields of `ZSTD_frameParameters` are set to default (0) */
3437 ZSTD_parameters
ZSTD_getParams(int compressionLevel
, unsigned long long srcSize
, size_t dictSize
)
3439 ZSTD_parameters params
;
3440 ZSTD_compressionParameters
const cParams
= ZSTD_getCParams(compressionLevel
, srcSize
, dictSize
);
3441 memset(¶ms
, 0, sizeof(params
));
3442 params
.cParams
= cParams
;
3446 EXPORT_SYMBOL(ZSTD_maxCLevel
);
3447 EXPORT_SYMBOL(ZSTD_compressBound
);
3449 EXPORT_SYMBOL(ZSTD_CCtxWorkspaceBound
);
3450 EXPORT_SYMBOL(ZSTD_initCCtx
);
3451 EXPORT_SYMBOL(ZSTD_compressCCtx
);
3452 EXPORT_SYMBOL(ZSTD_compress_usingDict
);
3454 EXPORT_SYMBOL(ZSTD_CDictWorkspaceBound
);
3455 EXPORT_SYMBOL(ZSTD_initCDict
);
3456 EXPORT_SYMBOL(ZSTD_compress_usingCDict
);
3458 EXPORT_SYMBOL(ZSTD_CStreamWorkspaceBound
);
3459 EXPORT_SYMBOL(ZSTD_initCStream
);
3460 EXPORT_SYMBOL(ZSTD_initCStream_usingCDict
);
3461 EXPORT_SYMBOL(ZSTD_resetCStream
);
3462 EXPORT_SYMBOL(ZSTD_compressStream
);
3463 EXPORT_SYMBOL(ZSTD_flushStream
);
3464 EXPORT_SYMBOL(ZSTD_endStream
);
3465 EXPORT_SYMBOL(ZSTD_CStreamInSize
);
3466 EXPORT_SYMBOL(ZSTD_CStreamOutSize
);
3468 EXPORT_SYMBOL(ZSTD_getCParams
);
3469 EXPORT_SYMBOL(ZSTD_getParams
);
3470 EXPORT_SYMBOL(ZSTD_checkCParams
);
3471 EXPORT_SYMBOL(ZSTD_adjustCParams
);
3473 EXPORT_SYMBOL(ZSTD_compressBegin
);
3474 EXPORT_SYMBOL(ZSTD_compressBegin_usingDict
);
3475 EXPORT_SYMBOL(ZSTD_compressBegin_advanced
);
3476 EXPORT_SYMBOL(ZSTD_copyCCtx
);
3477 EXPORT_SYMBOL(ZSTD_compressBegin_usingCDict
);
3478 EXPORT_SYMBOL(ZSTD_compressContinue
);
3479 EXPORT_SYMBOL(ZSTD_compressEnd
);
3481 EXPORT_SYMBOL(ZSTD_getBlockSizeMax
);
3482 EXPORT_SYMBOL(ZSTD_compressBlock
);
3484 MODULE_LICENSE("Dual BSD/GPL");
3485 MODULE_DESCRIPTION("Zstd Compressor");