2 * LZ4 - Fast LZ compression algorithm
3 * Copyright (C) 2011 - 2016, Yann Collet.
4 * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
15 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
16 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
17 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
18 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
20 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * You can contact the author at :
26 * - LZ4 homepage : http://www.lz4.org
27 * - LZ4 source repository : https://github.com/lz4/lz4
29 * Changed for kernel usage by:
30 * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
33 /*-************************************
35 **************************************/
36 #include <linux/lz4.h>
38 #include <linux/module.h>
39 #include <linux/kernel.h>
40 #include <asm/unaligned.h>
42 static const int LZ4_minLength
= (MFLIMIT
+ 1);
43 static const int LZ4_64Klimit
= ((64 * KB
) + (MFLIMIT
- 1));
45 /*-******************************
46 * Compression functions
47 ********************************/
48 static FORCE_INLINE U32
LZ4_hash4(
50 tableType_t
const tableType
)
52 if (tableType
== byU16
)
53 return ((sequence
* 2654435761U)
54 >> ((MINMATCH
* 8) - (LZ4_HASHLOG
+ 1)));
56 return ((sequence
* 2654435761U)
57 >> ((MINMATCH
* 8) - LZ4_HASHLOG
));
60 static FORCE_INLINE U32
LZ4_hash5(
62 tableType_t
const tableType
)
64 const U32 hashLog
= (tableType
== byU16
)
69 static const U64 prime5bytes
= 889523592379ULL;
71 return (U32
)(((sequence
<< 24) * prime5bytes
) >> (64 - hashLog
));
73 static const U64 prime8bytes
= 11400714785074694791ULL;
75 return (U32
)(((sequence
>> 24) * prime8bytes
) >> (64 - hashLog
));
79 static FORCE_INLINE U32
LZ4_hashPosition(
81 tableType_t
const tableType
)
84 if (tableType
== byU32
)
85 return LZ4_hash5(LZ4_read_ARCH(p
), tableType
);
88 return LZ4_hash4(LZ4_read32(p
), tableType
);
91 static void LZ4_putPositionOnHash(
95 tableType_t
const tableType
,
101 const BYTE
**hashTable
= (const BYTE
**)tableBase
;
108 U32
*hashTable
= (U32
*) tableBase
;
110 hashTable
[h
] = (U32
)(p
- srcBase
);
115 U16
*hashTable
= (U16
*) tableBase
;
117 hashTable
[h
] = (U16
)(p
- srcBase
);
123 static FORCE_INLINE
void LZ4_putPosition(
126 tableType_t tableType
,
129 U32
const h
= LZ4_hashPosition(p
, tableType
);
131 LZ4_putPositionOnHash(p
, h
, tableBase
, tableType
, srcBase
);
134 static const BYTE
*LZ4_getPositionOnHash(
137 tableType_t tableType
,
140 if (tableType
== byPtr
) {
141 const BYTE
**hashTable
= (const BYTE
**) tableBase
;
146 if (tableType
== byU32
) {
147 const U32
* const hashTable
= (U32
*) tableBase
;
149 return hashTable
[h
] + srcBase
;
153 /* default, to ensure a return */
154 const U16
* const hashTable
= (U16
*) tableBase
;
156 return hashTable
[h
] + srcBase
;
160 static FORCE_INLINE
const BYTE
*LZ4_getPosition(
163 tableType_t tableType
,
166 U32
const h
= LZ4_hashPosition(p
, tableType
);
168 return LZ4_getPositionOnHash(h
, tableBase
, tableType
, srcBase
);
173 * LZ4_compress_generic() :
174 * inlined, to ensure branches are decided at compilation time
176 static FORCE_INLINE
int LZ4_compress_generic(
177 LZ4_stream_t_internal
* const dictPtr
,
178 const char * const source
,
181 const int maxOutputSize
,
182 const limitedOutput_directive outputLimited
,
183 const tableType_t tableType
,
184 const dict_directive dict
,
185 const dictIssue_directive dictIssue
,
186 const U32 acceleration
)
188 const BYTE
*ip
= (const BYTE
*) source
;
190 const BYTE
*lowLimit
;
191 const BYTE
* const lowRefLimit
= ip
- dictPtr
->dictSize
;
192 const BYTE
* const dictionary
= dictPtr
->dictionary
;
193 const BYTE
* const dictEnd
= dictionary
+ dictPtr
->dictSize
;
194 const size_t dictDelta
= dictEnd
- (const BYTE
*)source
;
195 const BYTE
*anchor
= (const BYTE
*) source
;
196 const BYTE
* const iend
= ip
+ inputSize
;
197 const BYTE
* const mflimit
= iend
- MFLIMIT
;
198 const BYTE
* const matchlimit
= iend
- LASTLITERALS
;
200 BYTE
*op
= (BYTE
*) dest
;
201 BYTE
* const olimit
= op
+ maxOutputSize
;
206 /* Init conditions */
207 if ((U32
)inputSize
> (U32
)LZ4_MAX_INPUT_SIZE
) {
208 /* Unsupported inputSize, too large (or negative) */
215 base
= (const BYTE
*)source
;
216 lowLimit
= (const BYTE
*)source
;
219 base
= (const BYTE
*)source
- dictPtr
->currentOffset
;
220 lowLimit
= (const BYTE
*)source
- dictPtr
->dictSize
;
223 base
= (const BYTE
*)source
- dictPtr
->currentOffset
;
224 lowLimit
= (const BYTE
*)source
;
228 if ((tableType
== byU16
)
229 && (inputSize
>= LZ4_64Klimit
)) {
230 /* Size too large (not within 64K limit) */
234 if (inputSize
< LZ4_minLength
) {
235 /* Input too small, no compression (all literals) */
240 LZ4_putPosition(ip
, dictPtr
->hashTable
, tableType
, base
);
242 forwardH
= LZ4_hashPosition(ip
, tableType
);
251 const BYTE
*forwardIp
= ip
;
252 unsigned int step
= 1;
253 unsigned int searchMatchNb
= acceleration
<< LZ4_SKIPTRIGGER
;
256 U32
const h
= forwardH
;
260 step
= (searchMatchNb
++ >> LZ4_SKIPTRIGGER
);
262 if (unlikely(forwardIp
> mflimit
))
265 match
= LZ4_getPositionOnHash(h
,
269 if (dict
== usingExtDict
) {
270 if (match
< (const BYTE
*)source
) {
271 refDelta
= dictDelta
;
272 lowLimit
= dictionary
;
275 lowLimit
= (const BYTE
*)source
;
278 forwardH
= LZ4_hashPosition(forwardIp
,
281 LZ4_putPositionOnHash(ip
, h
, dictPtr
->hashTable
,
283 } while (((dictIssue
== dictSmall
)
284 ? (match
< lowRefLimit
)
286 || ((tableType
== byU16
)
288 : (match
+ MAX_DISTANCE
< ip
))
289 || (LZ4_read32(match
+ refDelta
)
294 while (((ip
> anchor
) & (match
+ refDelta
> lowLimit
))
295 && (unlikely(ip
[-1] == match
[refDelta
- 1]))) {
300 /* Encode Literals */
302 unsigned const int litLength
= (unsigned int)(ip
- anchor
);
306 if ((outputLimited
) &&
307 /* Check output buffer overflow */
308 (unlikely(op
+ litLength
+
309 (2 + 1 + LASTLITERALS
) +
310 (litLength
/ 255) > olimit
)))
313 if (litLength
>= RUN_MASK
) {
314 int len
= (int)litLength
- RUN_MASK
;
316 *token
= (RUN_MASK
<< ML_BITS
);
318 for (; len
>= 255; len
-= 255)
322 *token
= (BYTE
)(litLength
<< ML_BITS
);
325 LZ4_wildCopy(op
, anchor
, op
+ litLength
);
331 LZ4_writeLE16(op
, (U16
)(ip
- match
));
334 /* Encode MatchLength */
336 unsigned int matchCode
;
338 if ((dict
== usingExtDict
)
339 && (lowLimit
== dictionary
)) {
343 limit
= ip
+ (dictEnd
- match
);
345 if (limit
> matchlimit
)
348 matchCode
= LZ4_count(ip
+ MINMATCH
,
349 match
+ MINMATCH
, limit
);
351 ip
+= MINMATCH
+ matchCode
;
354 unsigned const int more
= LZ4_count(ip
,
355 (const BYTE
*)source
,
362 matchCode
= LZ4_count(ip
+ MINMATCH
,
363 match
+ MINMATCH
, matchlimit
);
364 ip
+= MINMATCH
+ matchCode
;
368 /* Check output buffer overflow */
371 (matchCode
>> 8) > olimit
)))
374 if (matchCode
>= ML_MASK
) {
376 matchCode
-= ML_MASK
;
377 LZ4_write32(op
, 0xFFFFFFFF);
379 while (matchCode
>= 4 * 255) {
381 LZ4_write32(op
, 0xFFFFFFFF);
382 matchCode
-= 4 * 255;
385 op
+= matchCode
/ 255;
386 *op
++ = (BYTE
)(matchCode
% 255);
388 *token
+= (BYTE
)(matchCode
);
393 /* Test end of chunk */
398 LZ4_putPosition(ip
- 2, dictPtr
->hashTable
, tableType
, base
);
400 /* Test next position */
401 match
= LZ4_getPosition(ip
, dictPtr
->hashTable
,
404 if (dict
== usingExtDict
) {
405 if (match
< (const BYTE
*)source
) {
406 refDelta
= dictDelta
;
407 lowLimit
= dictionary
;
410 lowLimit
= (const BYTE
*)source
;
414 LZ4_putPosition(ip
, dictPtr
->hashTable
, tableType
, base
);
416 if (((dictIssue
== dictSmall
) ? (match
>= lowRefLimit
) : 1)
417 && (match
+ MAX_DISTANCE
>= ip
)
418 && (LZ4_read32(match
+ refDelta
) == LZ4_read32(ip
))) {
424 /* Prepare next loop */
425 forwardH
= LZ4_hashPosition(++ip
, tableType
);
429 /* Encode Last Literals */
431 size_t const lastRun
= (size_t)(iend
- anchor
);
433 if ((outputLimited
) &&
434 /* Check output buffer overflow */
435 ((op
- (BYTE
*)dest
) + lastRun
+ 1 +
436 ((lastRun
+ 255 - RUN_MASK
) / 255) > (U32
)maxOutputSize
))
439 if (lastRun
>= RUN_MASK
) {
440 size_t accumulator
= lastRun
- RUN_MASK
;
441 *op
++ = RUN_MASK
<< ML_BITS
;
442 for (; accumulator
>= 255; accumulator
-= 255)
444 *op
++ = (BYTE
) accumulator
;
446 *op
++ = (BYTE
)(lastRun
<< ML_BITS
);
449 LZ4_memcpy(op
, anchor
, lastRun
);
455 return (int) (((char *)op
) - dest
);
458 static int LZ4_compress_fast_extState(
466 LZ4_stream_t_internal
*ctx
= &((LZ4_stream_t
*)state
)->internal_donotuse
;
468 const tableType_t tableType
= byU32
;
470 const tableType_t tableType
= byPtr
;
473 LZ4_resetStream((LZ4_stream_t
*)state
);
475 if (acceleration
< 1)
476 acceleration
= LZ4_ACCELERATION_DEFAULT
;
478 if (maxOutputSize
>= LZ4_COMPRESSBOUND(inputSize
)) {
479 if (inputSize
< LZ4_64Klimit
)
480 return LZ4_compress_generic(ctx
, source
,
482 noLimit
, byU16
, noDict
,
483 noDictIssue
, acceleration
);
485 return LZ4_compress_generic(ctx
, source
,
487 noLimit
, tableType
, noDict
,
488 noDictIssue
, acceleration
);
490 if (inputSize
< LZ4_64Klimit
)
491 return LZ4_compress_generic(ctx
, source
,
493 maxOutputSize
, limitedOutput
, byU16
, noDict
,
494 noDictIssue
, acceleration
);
496 return LZ4_compress_generic(ctx
, source
,
498 maxOutputSize
, limitedOutput
, tableType
, noDict
,
499 noDictIssue
, acceleration
);
503 int LZ4_compress_fast(const char *source
, char *dest
, int inputSize
,
504 int maxOutputSize
, int acceleration
, void *wrkmem
)
506 return LZ4_compress_fast_extState(wrkmem
, source
, dest
, inputSize
,
507 maxOutputSize
, acceleration
);
509 EXPORT_SYMBOL(LZ4_compress_fast
);
511 int LZ4_compress_default(const char *source
, char *dest
, int inputSize
,
512 int maxOutputSize
, void *wrkmem
)
514 return LZ4_compress_fast(source
, dest
, inputSize
,
515 maxOutputSize
, LZ4_ACCELERATION_DEFAULT
, wrkmem
);
517 EXPORT_SYMBOL(LZ4_compress_default
);
519 /*-******************************
520 * *_destSize() variant
521 ********************************/
522 static int LZ4_compress_destSize_generic(
523 LZ4_stream_t_internal
* const ctx
,
524 const char * const src
,
526 int * const srcSizePtr
,
527 const int targetDstSize
,
528 const tableType_t tableType
)
530 const BYTE
*ip
= (const BYTE
*) src
;
531 const BYTE
*base
= (const BYTE
*) src
;
532 const BYTE
*lowLimit
= (const BYTE
*) src
;
533 const BYTE
*anchor
= ip
;
534 const BYTE
* const iend
= ip
+ *srcSizePtr
;
535 const BYTE
* const mflimit
= iend
- MFLIMIT
;
536 const BYTE
* const matchlimit
= iend
- LASTLITERALS
;
538 BYTE
*op
= (BYTE
*) dst
;
539 BYTE
* const oend
= op
+ targetDstSize
;
540 BYTE
* const oMaxLit
= op
+ targetDstSize
- 2 /* offset */
541 - 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */;
542 BYTE
* const oMaxMatch
= op
+ targetDstSize
543 - (LASTLITERALS
+ 1 /* token */);
544 BYTE
* const oMaxSeq
= oMaxLit
- 1 /* token */;
548 /* Init conditions */
549 /* Impossible to store anything */
550 if (targetDstSize
< 1)
552 /* Unsupported input size, too large (or negative) */
553 if ((U32
)*srcSizePtr
> (U32
)LZ4_MAX_INPUT_SIZE
)
555 /* Size too large (not within 64K limit) */
556 if ((tableType
== byU16
) && (*srcSizePtr
>= LZ4_64Klimit
))
558 /* Input too small, no compression (all literals) */
559 if (*srcSizePtr
< LZ4_minLength
)
564 LZ4_putPosition(ip
, ctx
->hashTable
, tableType
, base
);
565 ip
++; forwardH
= LZ4_hashPosition(ip
, tableType
);
574 const BYTE
*forwardIp
= ip
;
575 unsigned int step
= 1;
576 unsigned int searchMatchNb
= 1 << LZ4_SKIPTRIGGER
;
583 step
= (searchMatchNb
++ >> LZ4_SKIPTRIGGER
);
585 if (unlikely(forwardIp
> mflimit
))
588 match
= LZ4_getPositionOnHash(h
, ctx
->hashTable
,
590 forwardH
= LZ4_hashPosition(forwardIp
,
592 LZ4_putPositionOnHash(ip
, h
,
593 ctx
->hashTable
, tableType
,
596 } while (((tableType
== byU16
)
598 : (match
+ MAX_DISTANCE
< ip
))
599 || (LZ4_read32(match
) != LZ4_read32(ip
)));
604 && (match
> lowLimit
)
605 && (unlikely(ip
[-1] == match
[-1]))) {
610 /* Encode Literal length */
612 unsigned int litLength
= (unsigned int)(ip
- anchor
);
615 if (op
+ ((litLength
+ 240) / 255)
616 + litLength
> oMaxLit
) {
617 /* Not enough space for a last match */
621 if (litLength
>= RUN_MASK
) {
622 unsigned int len
= litLength
- RUN_MASK
;
623 *token
= (RUN_MASK
<<ML_BITS
);
624 for (; len
>= 255; len
-= 255)
628 *token
= (BYTE
)(litLength
<< ML_BITS
);
631 LZ4_wildCopy(op
, anchor
, op
+ litLength
);
637 LZ4_writeLE16(op
, (U16
)(ip
- match
)); op
+= 2;
639 /* Encode MatchLength */
641 size_t matchLength
= LZ4_count(ip
+ MINMATCH
,
642 match
+ MINMATCH
, matchlimit
);
644 if (op
+ ((matchLength
+ 240)/255) > oMaxMatch
) {
645 /* Match description too long : reduce it */
646 matchLength
= (15 - 1) + (oMaxMatch
- op
) * 255;
648 ip
+= MINMATCH
+ matchLength
;
650 if (matchLength
>= ML_MASK
) {
652 matchLength
-= ML_MASK
;
653 while (matchLength
>= 255) {
657 *op
++ = (BYTE
)matchLength
;
659 *token
+= (BYTE
)(matchLength
);
664 /* Test end of block */
671 LZ4_putPosition(ip
- 2, ctx
->hashTable
, tableType
, base
);
673 /* Test next position */
674 match
= LZ4_getPosition(ip
, ctx
->hashTable
, tableType
, base
);
675 LZ4_putPosition(ip
, ctx
->hashTable
, tableType
, base
);
677 if ((match
+ MAX_DISTANCE
>= ip
)
678 && (LZ4_read32(match
) == LZ4_read32(ip
))) {
679 token
= op
++; *token
= 0;
683 /* Prepare next loop */
684 forwardH
= LZ4_hashPosition(++ip
, tableType
);
688 /* Encode Last Literals */
690 size_t lastRunSize
= (size_t)(iend
- anchor
);
692 if (op
+ 1 /* token */
693 + ((lastRunSize
+ 240) / 255) /* litLength */
694 + lastRunSize
/* literals */ > oend
) {
695 /* adapt lastRunSize to fill 'dst' */
696 lastRunSize
= (oend
- op
) - 1;
697 lastRunSize
-= (lastRunSize
+ 240) / 255;
699 ip
= anchor
+ lastRunSize
;
701 if (lastRunSize
>= RUN_MASK
) {
702 size_t accumulator
= lastRunSize
- RUN_MASK
;
704 *op
++ = RUN_MASK
<< ML_BITS
;
705 for (; accumulator
>= 255; accumulator
-= 255)
707 *op
++ = (BYTE
) accumulator
;
709 *op
++ = (BYTE
)(lastRunSize
<<ML_BITS
);
711 LZ4_memcpy(op
, anchor
, lastRunSize
);
716 *srcSizePtr
= (int) (((const char *)ip
) - src
);
717 return (int) (((char *)op
) - dst
);
720 static int LZ4_compress_destSize_extState(
728 const tableType_t tableType
= byU32
;
730 const tableType_t tableType
= byPtr
;
733 LZ4_resetStream(state
);
735 if (targetDstSize
>= LZ4_COMPRESSBOUND(*srcSizePtr
)) {
736 /* compression success is guaranteed */
737 return LZ4_compress_fast_extState(
738 state
, src
, dst
, *srcSizePtr
,
741 if (*srcSizePtr
< LZ4_64Klimit
)
742 return LZ4_compress_destSize_generic(
743 &state
->internal_donotuse
,
744 src
, dst
, srcSizePtr
,
745 targetDstSize
, byU16
);
747 return LZ4_compress_destSize_generic(
748 &state
->internal_donotuse
,
749 src
, dst
, srcSizePtr
,
750 targetDstSize
, tableType
);
755 int LZ4_compress_destSize(
762 return LZ4_compress_destSize_extState(wrkmem
, src
, dst
, srcSizePtr
,
765 EXPORT_SYMBOL(LZ4_compress_destSize
);
767 /*-******************************
768 * Streaming functions
769 ********************************/
770 void LZ4_resetStream(LZ4_stream_t
*LZ4_stream
)
772 memset(LZ4_stream
, 0, sizeof(LZ4_stream_t
));
775 int LZ4_loadDict(LZ4_stream_t
*LZ4_dict
,
776 const char *dictionary
, int dictSize
)
778 LZ4_stream_t_internal
*dict
= &LZ4_dict
->internal_donotuse
;
779 const BYTE
*p
= (const BYTE
*)dictionary
;
780 const BYTE
* const dictEnd
= p
+ dictSize
;
783 if ((dict
->initCheck
)
784 || (dict
->currentOffset
> 1 * GB
)) {
785 /* Uninitialized structure, or reuse overflow */
786 LZ4_resetStream(LZ4_dict
);
789 if (dictSize
< (int)HASH_UNIT
) {
790 dict
->dictionary
= NULL
;
795 if ((dictEnd
- p
) > 64 * KB
)
796 p
= dictEnd
- 64 * KB
;
797 dict
->currentOffset
+= 64 * KB
;
798 base
= p
- dict
->currentOffset
;
799 dict
->dictionary
= p
;
800 dict
->dictSize
= (U32
)(dictEnd
- p
);
801 dict
->currentOffset
+= dict
->dictSize
;
803 while (p
<= dictEnd
- HASH_UNIT
) {
804 LZ4_putPosition(p
, dict
->hashTable
, byU32
, base
);
808 return dict
->dictSize
;
810 EXPORT_SYMBOL(LZ4_loadDict
);
812 static void LZ4_renormDictT(LZ4_stream_t_internal
*LZ4_dict
,
815 if ((LZ4_dict
->currentOffset
> 0x80000000) ||
816 ((uptrval
)LZ4_dict
->currentOffset
> (uptrval
)src
)) {
817 /* address space overflow */
818 /* rescale hash table */
819 U32
const delta
= LZ4_dict
->currentOffset
- 64 * KB
;
820 const BYTE
*dictEnd
= LZ4_dict
->dictionary
+ LZ4_dict
->dictSize
;
823 for (i
= 0; i
< LZ4_HASH_SIZE_U32
; i
++) {
824 if (LZ4_dict
->hashTable
[i
] < delta
)
825 LZ4_dict
->hashTable
[i
] = 0;
827 LZ4_dict
->hashTable
[i
] -= delta
;
829 LZ4_dict
->currentOffset
= 64 * KB
;
830 if (LZ4_dict
->dictSize
> 64 * KB
)
831 LZ4_dict
->dictSize
= 64 * KB
;
832 LZ4_dict
->dictionary
= dictEnd
- LZ4_dict
->dictSize
;
836 int LZ4_saveDict(LZ4_stream_t
*LZ4_dict
, char *safeBuffer
, int dictSize
)
838 LZ4_stream_t_internal
* const dict
= &LZ4_dict
->internal_donotuse
;
839 const BYTE
* const previousDictEnd
= dict
->dictionary
+ dict
->dictSize
;
841 if ((U32
)dictSize
> 64 * KB
) {
842 /* useless to define a dictionary > 64 * KB */
845 if ((U32
)dictSize
> dict
->dictSize
)
846 dictSize
= dict
->dictSize
;
848 memmove(safeBuffer
, previousDictEnd
- dictSize
, dictSize
);
850 dict
->dictionary
= (const BYTE
*)safeBuffer
;
851 dict
->dictSize
= (U32
)dictSize
;
855 EXPORT_SYMBOL(LZ4_saveDict
);
857 int LZ4_compress_fast_continue(LZ4_stream_t
*LZ4_stream
, const char *source
,
858 char *dest
, int inputSize
, int maxOutputSize
, int acceleration
)
860 LZ4_stream_t_internal
*streamPtr
= &LZ4_stream
->internal_donotuse
;
861 const BYTE
* const dictEnd
= streamPtr
->dictionary
862 + streamPtr
->dictSize
;
864 const BYTE
*smallest
= (const BYTE
*) source
;
866 if (streamPtr
->initCheck
) {
867 /* Uninitialized structure detected */
871 if ((streamPtr
->dictSize
> 0) && (smallest
> dictEnd
))
874 LZ4_renormDictT(streamPtr
, smallest
);
876 if (acceleration
< 1)
877 acceleration
= LZ4_ACCELERATION_DEFAULT
;
879 /* Check overlapping input/dictionary space */
881 const BYTE
*sourceEnd
= (const BYTE
*) source
+ inputSize
;
883 if ((sourceEnd
> streamPtr
->dictionary
)
884 && (sourceEnd
< dictEnd
)) {
885 streamPtr
->dictSize
= (U32
)(dictEnd
- sourceEnd
);
886 if (streamPtr
->dictSize
> 64 * KB
)
887 streamPtr
->dictSize
= 64 * KB
;
888 if (streamPtr
->dictSize
< 4)
889 streamPtr
->dictSize
= 0;
890 streamPtr
->dictionary
= dictEnd
- streamPtr
->dictSize
;
894 /* prefix mode : source data follows dictionary */
895 if (dictEnd
== (const BYTE
*)source
) {
898 if ((streamPtr
->dictSize
< 64 * KB
) &&
899 (streamPtr
->dictSize
< streamPtr
->currentOffset
)) {
900 result
= LZ4_compress_generic(
901 streamPtr
, source
, dest
, inputSize
,
902 maxOutputSize
, limitedOutput
, byU32
,
903 withPrefix64k
, dictSmall
, acceleration
);
905 result
= LZ4_compress_generic(
906 streamPtr
, source
, dest
, inputSize
,
907 maxOutputSize
, limitedOutput
, byU32
,
908 withPrefix64k
, noDictIssue
, acceleration
);
910 streamPtr
->dictSize
+= (U32
)inputSize
;
911 streamPtr
->currentOffset
+= (U32
)inputSize
;
915 /* external dictionary mode */
919 if ((streamPtr
->dictSize
< 64 * KB
) &&
920 (streamPtr
->dictSize
< streamPtr
->currentOffset
)) {
921 result
= LZ4_compress_generic(
922 streamPtr
, source
, dest
, inputSize
,
923 maxOutputSize
, limitedOutput
, byU32
,
924 usingExtDict
, dictSmall
, acceleration
);
926 result
= LZ4_compress_generic(
927 streamPtr
, source
, dest
, inputSize
,
928 maxOutputSize
, limitedOutput
, byU32
,
929 usingExtDict
, noDictIssue
, acceleration
);
931 streamPtr
->dictionary
= (const BYTE
*)source
;
932 streamPtr
->dictSize
= (U32
)inputSize
;
933 streamPtr
->currentOffset
+= (U32
)inputSize
;
937 EXPORT_SYMBOL(LZ4_compress_fast_continue
);
939 MODULE_LICENSE("Dual BSD/GPL");
940 MODULE_DESCRIPTION("LZ4 compressor");