2 * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
11 #include "zstd_compress_internal.h"
12 #include "zstd_double_fast.h"
15 void ZSTD_fillDoubleHashTable(ZSTD_matchState_t
* ms
,
16 void const* end
, ZSTD_dictTableLoadMethod_e dtlm
)
18 const ZSTD_compressionParameters
* const cParams
= &ms
->cParams
;
19 U32
* const hashLarge
= ms
->hashTable
;
20 U32
const hBitsL
= cParams
->hashLog
;
21 U32
const mls
= cParams
->minMatch
;
22 U32
* const hashSmall
= ms
->chainTable
;
23 U32
const hBitsS
= cParams
->chainLog
;
24 const BYTE
* const base
= ms
->window
.base
;
25 const BYTE
* ip
= base
+ ms
->nextToUpdate
;
26 const BYTE
* const iend
= ((const BYTE
*)end
) - HASH_READ_SIZE
;
27 const U32 fastHashFillStep
= 3;
29 /* Always insert every fastHashFillStep position into the hash tables.
30 * Insert the other positions into the large hash table if their entry
33 for (; ip
+ fastHashFillStep
- 1 <= iend
; ip
+= fastHashFillStep
) {
34 U32
const current
= (U32
)(ip
- base
);
36 for (i
= 0; i
< fastHashFillStep
; ++i
) {
37 size_t const smHash
= ZSTD_hashPtr(ip
+ i
, hBitsS
, mls
);
38 size_t const lgHash
= ZSTD_hashPtr(ip
+ i
, hBitsL
, 8);
40 hashSmall
[smHash
] = current
+ i
;
41 if (i
== 0 || hashLarge
[lgHash
] == 0)
42 hashLarge
[lgHash
] = current
+ i
;
43 /* Only load extra positions for ZSTD_dtlm_full */
44 if (dtlm
== ZSTD_dtlm_fast
)
51 size_t ZSTD_compressBlock_doubleFast_generic(
52 ZSTD_matchState_t
* ms
, seqStore_t
* seqStore
, U32 rep
[ZSTD_REP_NUM
],
53 void const* src
, size_t srcSize
,
54 U32
const mls
/* template */, ZSTD_dictMode_e
const dictMode
)
56 ZSTD_compressionParameters
const* cParams
= &ms
->cParams
;
57 U32
* const hashLong
= ms
->hashTable
;
58 const U32 hBitsL
= cParams
->hashLog
;
59 U32
* const hashSmall
= ms
->chainTable
;
60 const U32 hBitsS
= cParams
->chainLog
;
61 const BYTE
* const base
= ms
->window
.base
;
62 const BYTE
* const istart
= (const BYTE
*)src
;
63 const BYTE
* ip
= istart
;
64 const BYTE
* anchor
= istart
;
65 const U32 endIndex
= (U32
)((size_t)(istart
- base
) + srcSize
);
66 /* presumes that, if there is a dictionary, it must be using Attach mode */
67 const U32 prefixLowestIndex
= ZSTD_getLowestPrefixIndex(ms
, endIndex
, cParams
->windowLog
);
68 const BYTE
* const prefixLowest
= base
+ prefixLowestIndex
;
69 const BYTE
* const iend
= istart
+ srcSize
;
70 const BYTE
* const ilimit
= iend
- HASH_READ_SIZE
;
71 U32 offset_1
=rep
[0], offset_2
=rep
[1];
74 const ZSTD_matchState_t
* const dms
= ms
->dictMatchState
;
75 const ZSTD_compressionParameters
* const dictCParams
=
76 dictMode
== ZSTD_dictMatchState
?
78 const U32
* const dictHashLong
= dictMode
== ZSTD_dictMatchState
?
79 dms
->hashTable
: NULL
;
80 const U32
* const dictHashSmall
= dictMode
== ZSTD_dictMatchState
?
81 dms
->chainTable
: NULL
;
82 const U32 dictStartIndex
= dictMode
== ZSTD_dictMatchState
?
83 dms
->window
.dictLimit
: 0;
84 const BYTE
* const dictBase
= dictMode
== ZSTD_dictMatchState
?
85 dms
->window
.base
: NULL
;
86 const BYTE
* const dictStart
= dictMode
== ZSTD_dictMatchState
?
87 dictBase
+ dictStartIndex
: NULL
;
88 const BYTE
* const dictEnd
= dictMode
== ZSTD_dictMatchState
?
89 dms
->window
.nextSrc
: NULL
;
90 const U32 dictIndexDelta
= dictMode
== ZSTD_dictMatchState
?
91 prefixLowestIndex
- (U32
)(dictEnd
- dictBase
) :
93 const U32 dictHBitsL
= dictMode
== ZSTD_dictMatchState
?
94 dictCParams
->hashLog
: hBitsL
;
95 const U32 dictHBitsS
= dictMode
== ZSTD_dictMatchState
?
96 dictCParams
->chainLog
: hBitsS
;
97 const U32 dictAndPrefixLength
= (U32
)((ip
- prefixLowest
) + (dictEnd
- dictStart
));
99 DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic");
101 assert(dictMode
== ZSTD_noDict
|| dictMode
== ZSTD_dictMatchState
);
103 /* if a dictionary is attached, it must be within window range */
104 if (dictMode
== ZSTD_dictMatchState
) {
105 assert(ms
->window
.dictLimit
+ (1U << cParams
->windowLog
) >= endIndex
);
109 ip
+= (dictAndPrefixLength
== 0);
110 if (dictMode
== ZSTD_noDict
) {
111 U32
const current
= (U32
)(ip
- base
);
112 U32
const windowLow
= ZSTD_getLowestPrefixIndex(ms
, current
, cParams
->windowLog
);
113 U32
const maxRep
= current
- windowLow
;
114 if (offset_2
> maxRep
) offsetSaved
= offset_2
, offset_2
= 0;
115 if (offset_1
> maxRep
) offsetSaved
= offset_1
, offset_1
= 0;
117 if (dictMode
== ZSTD_dictMatchState
) {
118 /* dictMatchState repCode checks don't currently handle repCode == 0
120 assert(offset_1
<= dictAndPrefixLength
);
121 assert(offset_2
<= dictAndPrefixLength
);
124 /* Main Search Loop */
125 while (ip
< ilimit
) { /* < instead of <=, because repcode check at (ip+1) */
128 size_t const h2
= ZSTD_hashPtr(ip
, hBitsL
, 8);
129 size_t const h
= ZSTD_hashPtr(ip
, hBitsS
, mls
);
130 size_t const dictHL
= ZSTD_hashPtr(ip
, dictHBitsL
, 8);
131 size_t const dictHS
= ZSTD_hashPtr(ip
, dictHBitsS
, mls
);
132 U32
const current
= (U32
)(ip
-base
);
133 U32
const matchIndexL
= hashLong
[h2
];
134 U32 matchIndexS
= hashSmall
[h
];
135 const BYTE
* matchLong
= base
+ matchIndexL
;
136 const BYTE
* match
= base
+ matchIndexS
;
137 const U32 repIndex
= current
+ 1 - offset_1
;
138 const BYTE
* repMatch
= (dictMode
== ZSTD_dictMatchState
139 && repIndex
< prefixLowestIndex
) ?
140 dictBase
+ (repIndex
- dictIndexDelta
) :
142 hashLong
[h2
] = hashSmall
[h
] = current
; /* update hash tables */
144 /* check dictMatchState repcode */
145 if (dictMode
== ZSTD_dictMatchState
146 && ((U32
)((prefixLowestIndex
-1) - repIndex
) >= 3 /* intentional underflow */)
147 && (MEM_read32(repMatch
) == MEM_read32(ip
+1)) ) {
148 const BYTE
* repMatchEnd
= repIndex
< prefixLowestIndex
? dictEnd
: iend
;
149 mLength
= ZSTD_count_2segments(ip
+1+4, repMatch
+4, iend
, repMatchEnd
, prefixLowest
) + 4;
151 ZSTD_storeSeq(seqStore
, (size_t)(ip
-anchor
), anchor
, iend
, 0, mLength
-MINMATCH
);
155 /* check noDict repcode */
156 if ( dictMode
== ZSTD_noDict
157 && ((offset_1
> 0) & (MEM_read32(ip
+1-offset_1
) == MEM_read32(ip
+1)))) {
158 mLength
= ZSTD_count(ip
+1+4, ip
+1+4-offset_1
, iend
) + 4;
160 ZSTD_storeSeq(seqStore
, (size_t)(ip
-anchor
), anchor
, iend
, 0, mLength
-MINMATCH
);
164 if (matchIndexL
> prefixLowestIndex
) {
165 /* check prefix long match */
166 if (MEM_read64(matchLong
) == MEM_read64(ip
)) {
167 mLength
= ZSTD_count(ip
+8, matchLong
+8, iend
) + 8;
168 offset
= (U32
)(ip
-matchLong
);
169 while (((ip
>anchor
) & (matchLong
>prefixLowest
)) && (ip
[-1] == matchLong
[-1])) { ip
--; matchLong
--; mLength
++; } /* catch up */
172 } else if (dictMode
== ZSTD_dictMatchState
) {
173 /* check dictMatchState long match */
174 U32
const dictMatchIndexL
= dictHashLong
[dictHL
];
175 const BYTE
* dictMatchL
= dictBase
+ dictMatchIndexL
;
176 assert(dictMatchL
< dictEnd
);
178 if (dictMatchL
> dictStart
&& MEM_read64(dictMatchL
) == MEM_read64(ip
)) {
179 mLength
= ZSTD_count_2segments(ip
+8, dictMatchL
+8, iend
, dictEnd
, prefixLowest
) + 8;
180 offset
= (U32
)(current
- dictMatchIndexL
- dictIndexDelta
);
181 while (((ip
>anchor
) & (dictMatchL
>dictStart
)) && (ip
[-1] == dictMatchL
[-1])) { ip
--; dictMatchL
--; mLength
++; } /* catch up */
185 if (matchIndexS
> prefixLowestIndex
) {
186 /* check prefix short match */
187 if (MEM_read32(match
) == MEM_read32(ip
)) {
188 goto _search_next_long
;
190 } else if (dictMode
== ZSTD_dictMatchState
) {
191 /* check dictMatchState short match */
192 U32
const dictMatchIndexS
= dictHashSmall
[dictHS
];
193 match
= dictBase
+ dictMatchIndexS
;
194 matchIndexS
= dictMatchIndexS
+ dictIndexDelta
;
196 if (match
> dictStart
&& MEM_read32(match
) == MEM_read32(ip
)) {
197 goto _search_next_long
;
200 ip
+= ((ip
-anchor
) >> kSearchStrength
) + 1;
201 #if defined(__aarch64__)
208 { size_t const hl3
= ZSTD_hashPtr(ip
+1, hBitsL
, 8);
209 size_t const dictHLNext
= ZSTD_hashPtr(ip
+1, dictHBitsL
, 8);
210 U32
const matchIndexL3
= hashLong
[hl3
];
211 const BYTE
* matchL3
= base
+ matchIndexL3
;
212 hashLong
[hl3
] = current
+ 1;
214 /* check prefix long +1 match */
215 if (matchIndexL3
> prefixLowestIndex
) {
216 if (MEM_read64(matchL3
) == MEM_read64(ip
+1)) {
217 mLength
= ZSTD_count(ip
+9, matchL3
+8, iend
) + 8;
219 offset
= (U32
)(ip
-matchL3
);
220 while (((ip
>anchor
) & (matchL3
>prefixLowest
)) && (ip
[-1] == matchL3
[-1])) { ip
--; matchL3
--; mLength
++; } /* catch up */
223 } else if (dictMode
== ZSTD_dictMatchState
) {
224 /* check dict long +1 match */
225 U32
const dictMatchIndexL3
= dictHashLong
[dictHLNext
];
226 const BYTE
* dictMatchL3
= dictBase
+ dictMatchIndexL3
;
227 assert(dictMatchL3
< dictEnd
);
228 if (dictMatchL3
> dictStart
&& MEM_read64(dictMatchL3
) == MEM_read64(ip
+1)) {
229 mLength
= ZSTD_count_2segments(ip
+1+8, dictMatchL3
+8, iend
, dictEnd
, prefixLowest
) + 8;
231 offset
= (U32
)(current
+ 1 - dictMatchIndexL3
- dictIndexDelta
);
232 while (((ip
>anchor
) & (dictMatchL3
>dictStart
)) && (ip
[-1] == dictMatchL3
[-1])) { ip
--; dictMatchL3
--; mLength
++; } /* catch up */
236 /* if no long +1 match, explore the short match we found */
237 if (dictMode
== ZSTD_dictMatchState
&& matchIndexS
< prefixLowestIndex
) {
238 mLength
= ZSTD_count_2segments(ip
+4, match
+4, iend
, dictEnd
, prefixLowest
) + 4;
239 offset
= (U32
)(current
- matchIndexS
);
240 while (((ip
>anchor
) & (match
>dictStart
)) && (ip
[-1] == match
[-1])) { ip
--; match
--; mLength
++; } /* catch up */
242 mLength
= ZSTD_count(ip
+4, match
+4, iend
) + 4;
243 offset
= (U32
)(ip
- match
);
244 while (((ip
>anchor
) & (match
>prefixLowest
)) && (ip
[-1] == match
[-1])) { ip
--; match
--; mLength
++; } /* catch up */
253 ZSTD_storeSeq(seqStore
, (size_t)(ip
-anchor
), anchor
, iend
, offset
+ ZSTD_REP_MOVE
, mLength
-MINMATCH
);
261 /* Complementary insertion */
262 /* done after iLimit test, as candidates could be > iend-8 */
263 { U32
const indexToInsert
= current
+2;
264 hashLong
[ZSTD_hashPtr(base
+indexToInsert
, hBitsL
, 8)] = indexToInsert
;
265 hashLong
[ZSTD_hashPtr(ip
-2, hBitsL
, 8)] = (U32
)(ip
-2-base
);
266 hashSmall
[ZSTD_hashPtr(base
+indexToInsert
, hBitsS
, mls
)] = indexToInsert
;
267 hashSmall
[ZSTD_hashPtr(ip
-1, hBitsS
, mls
)] = (U32
)(ip
-1-base
);
270 /* check immediate repcode */
271 if (dictMode
== ZSTD_dictMatchState
) {
272 while (ip
<= ilimit
) {
273 U32
const current2
= (U32
)(ip
-base
);
274 U32
const repIndex2
= current2
- offset_2
;
275 const BYTE
* repMatch2
= dictMode
== ZSTD_dictMatchState
276 && repIndex2
< prefixLowestIndex
?
277 dictBase
+ repIndex2
- dictIndexDelta
:
279 if ( ((U32
)((prefixLowestIndex
-1) - (U32
)repIndex2
) >= 3 /* intentional overflow */)
280 && (MEM_read32(repMatch2
) == MEM_read32(ip
)) ) {
281 const BYTE
* const repEnd2
= repIndex2
< prefixLowestIndex
? dictEnd
: iend
;
282 size_t const repLength2
= ZSTD_count_2segments(ip
+4, repMatch2
+4, iend
, repEnd2
, prefixLowest
) + 4;
283 U32 tmpOffset
= offset_2
; offset_2
= offset_1
; offset_1
= tmpOffset
; /* swap offset_2 <=> offset_1 */
284 ZSTD_storeSeq(seqStore
, 0, anchor
, iend
, 0, repLength2
-MINMATCH
);
285 hashSmall
[ZSTD_hashPtr(ip
, hBitsS
, mls
)] = current2
;
286 hashLong
[ZSTD_hashPtr(ip
, hBitsL
, 8)] = current2
;
294 if (dictMode
== ZSTD_noDict
) {
295 while ( (ip
<= ilimit
)
297 & (MEM_read32(ip
) == MEM_read32(ip
- offset_2
)) )) {
299 size_t const rLength
= ZSTD_count(ip
+4, ip
+4-offset_2
, iend
) + 4;
300 U32
const tmpOff
= offset_2
; offset_2
= offset_1
; offset_1
= tmpOff
; /* swap offset_2 <=> offset_1 */
301 hashSmall
[ZSTD_hashPtr(ip
, hBitsS
, mls
)] = (U32
)(ip
-base
);
302 hashLong
[ZSTD_hashPtr(ip
, hBitsL
, 8)] = (U32
)(ip
-base
);
303 ZSTD_storeSeq(seqStore
, 0, anchor
, iend
, 0, rLength
-MINMATCH
);
306 continue; /* faster when present ... (?) */
308 } /* while (ip < ilimit) */
310 /* save reps for next block */
311 rep
[0] = offset_1
? offset_1
: offsetSaved
;
312 rep
[1] = offset_2
? offset_2
: offsetSaved
;
314 /* Return the last literals size */
315 return (size_t)(iend
- anchor
);
319 size_t ZSTD_compressBlock_doubleFast(
320 ZSTD_matchState_t
* ms
, seqStore_t
* seqStore
, U32 rep
[ZSTD_REP_NUM
],
321 void const* src
, size_t srcSize
)
323 const U32 mls
= ms
->cParams
.minMatch
;
326 default: /* includes case 3 */
328 return ZSTD_compressBlock_doubleFast_generic(ms
, seqStore
, rep
, src
, srcSize
, 4, ZSTD_noDict
);
330 return ZSTD_compressBlock_doubleFast_generic(ms
, seqStore
, rep
, src
, srcSize
, 5, ZSTD_noDict
);
332 return ZSTD_compressBlock_doubleFast_generic(ms
, seqStore
, rep
, src
, srcSize
, 6, ZSTD_noDict
);
334 return ZSTD_compressBlock_doubleFast_generic(ms
, seqStore
, rep
, src
, srcSize
, 7, ZSTD_noDict
);
339 size_t ZSTD_compressBlock_doubleFast_dictMatchState(
340 ZSTD_matchState_t
* ms
, seqStore_t
* seqStore
, U32 rep
[ZSTD_REP_NUM
],
341 void const* src
, size_t srcSize
)
343 const U32 mls
= ms
->cParams
.minMatch
;
346 default: /* includes case 3 */
348 return ZSTD_compressBlock_doubleFast_generic(ms
, seqStore
, rep
, src
, srcSize
, 4, ZSTD_dictMatchState
);
350 return ZSTD_compressBlock_doubleFast_generic(ms
, seqStore
, rep
, src
, srcSize
, 5, ZSTD_dictMatchState
);
352 return ZSTD_compressBlock_doubleFast_generic(ms
, seqStore
, rep
, src
, srcSize
, 6, ZSTD_dictMatchState
);
354 return ZSTD_compressBlock_doubleFast_generic(ms
, seqStore
, rep
, src
, srcSize
, 7, ZSTD_dictMatchState
);
359 static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
360 ZSTD_matchState_t
* ms
, seqStore_t
* seqStore
, U32 rep
[ZSTD_REP_NUM
],
361 void const* src
, size_t srcSize
,
362 U32
const mls
/* template */)
364 ZSTD_compressionParameters
const* cParams
= &ms
->cParams
;
365 U32
* const hashLong
= ms
->hashTable
;
366 U32
const hBitsL
= cParams
->hashLog
;
367 U32
* const hashSmall
= ms
->chainTable
;
368 U32
const hBitsS
= cParams
->chainLog
;
369 const BYTE
* const istart
= (const BYTE
*)src
;
370 const BYTE
* ip
= istart
;
371 const BYTE
* anchor
= istart
;
372 const BYTE
* const iend
= istart
+ srcSize
;
373 const BYTE
* const ilimit
= iend
- 8;
374 const BYTE
* const base
= ms
->window
.base
;
375 const U32 endIndex
= (U32
)((size_t)(istart
- base
) + srcSize
);
376 const U32 lowLimit
= ZSTD_getLowestMatchIndex(ms
, endIndex
, cParams
->windowLog
);
377 const U32 dictStartIndex
= lowLimit
;
378 const U32 dictLimit
= ms
->window
.dictLimit
;
379 const U32 prefixStartIndex
= (dictLimit
> lowLimit
) ? dictLimit
: lowLimit
;
380 const BYTE
* const prefixStart
= base
+ prefixStartIndex
;
381 const BYTE
* const dictBase
= ms
->window
.dictBase
;
382 const BYTE
* const dictStart
= dictBase
+ dictStartIndex
;
383 const BYTE
* const dictEnd
= dictBase
+ prefixStartIndex
;
384 U32 offset_1
=rep
[0], offset_2
=rep
[1];
386 DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize
);
388 /* if extDict is invalidated due to maxDistance, switch to "regular" variant */
389 if (prefixStartIndex
== dictStartIndex
)
390 return ZSTD_compressBlock_doubleFast_generic(ms
, seqStore
, rep
, src
, srcSize
, mls
, ZSTD_noDict
);
393 while (ip
< ilimit
) { /* < instead of <=, because (ip+1) */
394 const size_t hSmall
= ZSTD_hashPtr(ip
, hBitsS
, mls
);
395 const U32 matchIndex
= hashSmall
[hSmall
];
396 const BYTE
* const matchBase
= matchIndex
< prefixStartIndex
? dictBase
: base
;
397 const BYTE
* match
= matchBase
+ matchIndex
;
399 const size_t hLong
= ZSTD_hashPtr(ip
, hBitsL
, 8);
400 const U32 matchLongIndex
= hashLong
[hLong
];
401 const BYTE
* const matchLongBase
= matchLongIndex
< prefixStartIndex
? dictBase
: base
;
402 const BYTE
* matchLong
= matchLongBase
+ matchLongIndex
;
404 const U32 current
= (U32
)(ip
-base
);
405 const U32 repIndex
= current
+ 1 - offset_1
; /* offset_1 expected <= current +1 */
406 const BYTE
* const repBase
= repIndex
< prefixStartIndex
? dictBase
: base
;
407 const BYTE
* const repMatch
= repBase
+ repIndex
;
409 hashSmall
[hSmall
] = hashLong
[hLong
] = current
; /* update hash table */
411 if ((((U32
)((prefixStartIndex
-1) - repIndex
) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
412 & (offset_1
< current
+1 - dictStartIndex
)) /* note: we are searching at current+1 */
413 && (MEM_read32(repMatch
) == MEM_read32(ip
+1)) ) {
414 const BYTE
* repMatchEnd
= repIndex
< prefixStartIndex
? dictEnd
: iend
;
415 mLength
= ZSTD_count_2segments(ip
+1+4, repMatch
+4, iend
, repMatchEnd
, prefixStart
) + 4;
417 ZSTD_storeSeq(seqStore
, (size_t)(ip
-anchor
), anchor
, iend
, 0, mLength
-MINMATCH
);
419 if ((matchLongIndex
> dictStartIndex
) && (MEM_read64(matchLong
) == MEM_read64(ip
))) {
420 const BYTE
* const matchEnd
= matchLongIndex
< prefixStartIndex
? dictEnd
: iend
;
421 const BYTE
* const lowMatchPtr
= matchLongIndex
< prefixStartIndex
? dictStart
: prefixStart
;
423 mLength
= ZSTD_count_2segments(ip
+8, matchLong
+8, iend
, matchEnd
, prefixStart
) + 8;
424 offset
= current
- matchLongIndex
;
425 while (((ip
>anchor
) & (matchLong
>lowMatchPtr
)) && (ip
[-1] == matchLong
[-1])) { ip
--; matchLong
--; mLength
++; } /* catch up */
428 ZSTD_storeSeq(seqStore
, (size_t)(ip
-anchor
), anchor
, iend
, offset
+ ZSTD_REP_MOVE
, mLength
-MINMATCH
);
430 } else if ((matchIndex
> dictStartIndex
) && (MEM_read32(match
) == MEM_read32(ip
))) {
431 size_t const h3
= ZSTD_hashPtr(ip
+1, hBitsL
, 8);
432 U32
const matchIndex3
= hashLong
[h3
];
433 const BYTE
* const match3Base
= matchIndex3
< prefixStartIndex
? dictBase
: base
;
434 const BYTE
* match3
= match3Base
+ matchIndex3
;
436 hashLong
[h3
] = current
+ 1;
437 if ( (matchIndex3
> dictStartIndex
) && (MEM_read64(match3
) == MEM_read64(ip
+1)) ) {
438 const BYTE
* const matchEnd
= matchIndex3
< prefixStartIndex
? dictEnd
: iend
;
439 const BYTE
* const lowMatchPtr
= matchIndex3
< prefixStartIndex
? dictStart
: prefixStart
;
440 mLength
= ZSTD_count_2segments(ip
+9, match3
+8, iend
, matchEnd
, prefixStart
) + 8;
442 offset
= current
+1 - matchIndex3
;
443 while (((ip
>anchor
) & (match3
>lowMatchPtr
)) && (ip
[-1] == match3
[-1])) { ip
--; match3
--; mLength
++; } /* catch up */
445 const BYTE
* const matchEnd
= matchIndex
< prefixStartIndex
? dictEnd
: iend
;
446 const BYTE
* const lowMatchPtr
= matchIndex
< prefixStartIndex
? dictStart
: prefixStart
;
447 mLength
= ZSTD_count_2segments(ip
+4, match
+4, iend
, matchEnd
, prefixStart
) + 4;
448 offset
= current
- matchIndex
;
449 while (((ip
>anchor
) & (match
>lowMatchPtr
)) && (ip
[-1] == match
[-1])) { ip
--; match
--; mLength
++; } /* catch up */
453 ZSTD_storeSeq(seqStore
, (size_t)(ip
-anchor
), anchor
, iend
, offset
+ ZSTD_REP_MOVE
, mLength
-MINMATCH
);
456 ip
+= ((ip
-anchor
) >> kSearchStrength
) + 1;
460 /* move to next sequence start */
465 /* Complementary insertion */
466 /* done after iLimit test, as candidates could be > iend-8 */
467 { U32
const indexToInsert
= current
+2;
468 hashLong
[ZSTD_hashPtr(base
+indexToInsert
, hBitsL
, 8)] = indexToInsert
;
469 hashLong
[ZSTD_hashPtr(ip
-2, hBitsL
, 8)] = (U32
)(ip
-2-base
);
470 hashSmall
[ZSTD_hashPtr(base
+indexToInsert
, hBitsS
, mls
)] = indexToInsert
;
471 hashSmall
[ZSTD_hashPtr(ip
-1, hBitsS
, mls
)] = (U32
)(ip
-1-base
);
474 /* check immediate repcode */
475 while (ip
<= ilimit
) {
476 U32
const current2
= (U32
)(ip
-base
);
477 U32
const repIndex2
= current2
- offset_2
;
478 const BYTE
* repMatch2
= repIndex2
< prefixStartIndex
? dictBase
+ repIndex2
: base
+ repIndex2
;
479 if ( (((U32
)((prefixStartIndex
-1) - repIndex2
) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
480 & (offset_2
< current2
- dictStartIndex
))
481 && (MEM_read32(repMatch2
) == MEM_read32(ip
)) ) {
482 const BYTE
* const repEnd2
= repIndex2
< prefixStartIndex
? dictEnd
: iend
;
483 size_t const repLength2
= ZSTD_count_2segments(ip
+4, repMatch2
+4, iend
, repEnd2
, prefixStart
) + 4;
484 U32
const tmpOffset
= offset_2
; offset_2
= offset_1
; offset_1
= tmpOffset
; /* swap offset_2 <=> offset_1 */
485 ZSTD_storeSeq(seqStore
, 0, anchor
, iend
, 0, repLength2
-MINMATCH
);
486 hashSmall
[ZSTD_hashPtr(ip
, hBitsS
, mls
)] = current2
;
487 hashLong
[ZSTD_hashPtr(ip
, hBitsL
, 8)] = current2
;
495 /* save reps for next block */
499 /* Return the last literals size */
500 return (size_t)(iend
- anchor
);
504 size_t ZSTD_compressBlock_doubleFast_extDict(
505 ZSTD_matchState_t
* ms
, seqStore_t
* seqStore
, U32 rep
[ZSTD_REP_NUM
],
506 void const* src
, size_t srcSize
)
508 U32
const mls
= ms
->cParams
.minMatch
;
511 default: /* includes case 3 */
513 return ZSTD_compressBlock_doubleFast_extDict_generic(ms
, seqStore
, rep
, src
, srcSize
, 4);
515 return ZSTD_compressBlock_doubleFast_extDict_generic(ms
, seqStore
, rep
, src
, srcSize
, 5);
517 return ZSTD_compressBlock_doubleFast_extDict_generic(ms
, seqStore
, rep
, src
, srcSize
, 6);
519 return ZSTD_compressBlock_doubleFast_extDict_generic(ms
, seqStore
, rep
, src
, srcSize
, 7);