2 * LZ4 - Fast LZ compression algorithm
4 * Copyright (C) 2011-2013, Yann Collet.
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You can contact the author at :
31 * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
32 * - LZ4 source repository : http://code.google.com/p/lz4/
35 #include <sys/types.h>
36 #include <sys/byteorder.h>
41 size_t lz4_compress(void *, void *, size_t, size_t, int);
42 int lz4_decompress(void *, void *, size_t, size_t, int);
43 static int real_LZ4_compress(const char *source
, char *dest
, int isize
,
45 static int LZ4_uncompress_unknownOutputSize(const char *source
, char *dest
,
46 int isize
, int maxOutputSize
);
47 static int LZ4_compressCtx(void *ctx
, const char *source
, char *dest
,
48 int isize
, int osize
);
49 static int LZ4_compress64kCtx(void *ctx
, const char *source
, char *dest
,
50 int isize
, int osize
);
54 lz4_compress(void *s_start
, void *d_start
, size_t s_len
, size_t d_len
, int n
)
59 assert(d_len
>= sizeof (bufsiz
));
61 bufsiz
= real_LZ4_compress(s_start
, &dest
[sizeof (bufsiz
)], s_len
,
62 d_len
- sizeof (bufsiz
));
64 /* Signal an error if the compression routine returned zero. */
69 * Encode the compresed buffer size at the start. We'll need this in
70 * decompression to counter the effects of padding which might be
71 * added to the compressed buffer and which, if unhandled, would
72 * confuse the hell out of our decompression function.
74 *(uint32_t *)dest
= BE_32(bufsiz
);
76 return (bufsiz
+ sizeof (bufsiz
));
81 lz4_decompress(void *s_start
, void *d_start
, size_t s_len
, size_t d_len
, int n
)
83 const char *src
= s_start
;
84 uint32_t bufsiz
= BE_IN32(src
);
86 /* invalid compressed buffer size encoded at start */
87 if (bufsiz
+ sizeof (bufsiz
) > s_len
)
91 * Returns 0 on success (decompression function returned non-negative)
92 * and non-zero on failure (decompression function returned negative.
94 return (LZ4_uncompress_unknownOutputSize(&src
[sizeof (bufsiz
)],
95 d_start
, bufsiz
, d_len
) < 0);
99 * LZ4 API Description:
102 * real_LZ4_compress() :
103 * isize : is the input size. Max supported value is ~1.9GB
104 * return : the number of bytes written in buffer dest
105 * or 0 if the compression fails (if LZ4_COMPRESSMIN is set).
106 * note : destination buffer must be already allocated.
107 * destination buffer must be sized to handle worst cases
108 * situations (input data not compressible)
112 * LZ4_uncompress_unknownOutputSize() :
113 * isize : is the input size, therefore the compressed size
114 * maxOutputSize : is the size of the destination buffer (which must be
116 * return : the number of bytes decoded in the destination buffer
117 * (necessarily <= maxOutputSize). If the source stream is
118 * malformed, the function will stop decoding and return a
119 * negative result, indicating the byte position of the faulty
120 * instruction. This function never writes beyond dest +
121 * maxOutputSize, and is therefore protected against malicious
123 * note : Destination buffer must be already allocated.
125 * LZ4_compressCtx() :
126 * This function explicitly handles the CTX memory structure.
128 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
129 * by the caller (either on the stack or using kmem_zalloc). Passing NULL
132 * LZ4_compress64kCtx() :
133 * Same as LZ4_compressCtx(), but specific to small inputs (<64KB).
134 * isize *Must* be <64KB, otherwise the output will be corrupted.
136 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
137 * by the caller (either on the stack or using kmem_zalloc). Passing NULL
146 * COMPRESSIONLEVEL: Increasing this value improves compression ratio
147 * Lowering this value reduces memory usage. Reduced memory usage
148 * typically improves speed, due to cache effect (ex: L1 32KB for Intel,
149 * L1 64KB for AMD). Memory usage formula : N->2^(N+2) Bytes
150 * (examples : 12 -> 16KB ; 17 -> 512KB)
152 #define COMPRESSIONLEVEL 12
155 * NOTCOMPRESSIBLE_CONFIRMATION: Decreasing this value will make the
156 * algorithm skip faster data segments considered "incompressible".
157 * This may decrease compression ratio dramatically, but will be
158 * faster on incompressible data. Increasing this value will make
159 * the algorithm search more before declaring a segment "incompressible".
160 * This could improve compression a bit, but will be slower on
161 * incompressible data. The default value (6) is recommended.
163 #define NOTCOMPRESSIBLE_CONFIRMATION 6
166 * BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE: This will provide a boost to
167 * performance for big endian cpu, but the resulting compressed stream
168 * will be incompatible with little-endian CPU. You can set this option
169 * to 1 in situations where data will stay within closed environment.
170 * This option is useless on Little_Endian CPU (such as x86).
172 /* #define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 */
175 * CPU Feature Detection
178 /* 32 or 64 bits ? */
179 #if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || \
180 defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || \
181 defined(__LP64__) || defined(_LP64))
188 * Limits the amount of stack space that the algorithm may consume to hold
189 * the compression lookup table. The value `9' here means we'll never use
190 * more than 2k of stack (see above for a description of COMPRESSIONLEVEL).
191 * If more memory is needed, it is allocated from the heap.
196 * Little Endian or Big Endian?
197 * Note: overwrite the below #define if you know your architecture endianess.
199 #if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || \
200 defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || \
201 defined(__PPC) || defined(PPC) || defined(__powerpc__) || \
202 defined(__powerpc) || defined(powerpc) || \
203 ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))))
204 #define LZ4_BIG_ENDIAN 1
207 * Little Endian assumed. PDP Endian and other very rare endian format
213 * Unaligned memory access is automatically enabled for "common" CPU,
214 * such as x86. For others CPU, the compiler will be more cautious, and
215 * insert extra code to ensure aligned access is respected. If you know
216 * your target CPU supports unaligned memory access, you may want to
217 * force this option manually to improve performance
219 #if defined(__ARM_FEATURE_UNALIGNED)
220 #define LZ4_FORCE_UNALIGNED_ACCESS 1
224 #define LZ4_FORCE_SW_BITCOUNT
230 #if __STDC_VERSION__ >= 199901L /* C99 */
231 /* "restrict" is a known keyword */
233 /* Disable restrict */
237 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
241 /* Visual is not C99, but supports some kind of inline */
242 #define inline __forceinline
244 /* For Visual 2005 */
245 #pragma intrinsic(_BitScanForward64)
246 #pragma intrinsic(_BitScanReverse64)
247 #else /* !LZ4_ARCH64 */
248 /* For Visual 2005 */
249 #pragma intrinsic(_BitScanForward)
250 #pragma intrinsic(_BitScanReverse)
251 #endif /* !LZ4_ARCH64 */
252 #endif /* _MSC_VER */
255 #define lz4_bswap16(x) _byteswap_ushort(x)
256 #else /* !_MSC_VER */
257 #define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | \
258 (((x) & 0xffu) << 8)))
259 #endif /* !_MSC_VER */
261 #if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
262 #define expect(expr, value) (__builtin_expect((expr), (value)))
264 #define expect(expr, value) (expr)
267 #define likely(expr) expect((expr) != 0, 1)
268 #define unlikely(expr) expect((expr) != 0, 0)
271 #if defined(_MSC_VER)
272 /* Visual Studio does not support 'stdint' natively */
273 #define BYTE unsigned __int8
274 #define U16 unsigned __int16
275 #define U32 unsigned __int32
277 #define U64 unsigned __int64
278 #else /* !defined(_MSC_VER) */
284 #endif /* !defined(_MSC_VER) */
286 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
290 typedef struct _U16_S
{
293 typedef struct _U32_S
{
296 typedef struct _U64_S
{
300 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
304 #define A64(x) (((U64_S *)(x))->v)
305 #define A32(x) (((U32_S *)(x))->v)
306 #define A16(x) (((U16_S *)(x))->v)
313 #define HASH_LOG COMPRESSIONLEVEL
314 #define HASHTABLESIZE (1 << HASH_LOG)
315 #define HASH_MASK (HASHTABLESIZE - 1)
317 #define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION > 2 ? \
318 NOTCOMPRESSIBLE_CONFIRMATION : 2)
321 * Defines if memory is allocated into the stack (local variable),
322 * or into the heap (kmem_alloc()).
324 #define HEAPMODE (HASH_LOG > STACKLIMIT)
326 #define LASTLITERALS 5
327 #define MFLIMIT (COPYLENGTH + MINMATCH)
328 #define MINLENGTH (MFLIMIT + 1)
331 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
334 #define ML_MASK ((1U<<ML_BITS)-1)
335 #define RUN_BITS (8-ML_BITS)
336 #define RUN_MASK ((1U<<RUN_BITS)-1)
340 * Architecture-specific macros
346 #define LZ4_COPYSTEP(s, d) A64(d) = A64(s); d += 8; s += 8;
347 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d)
348 #define LZ4_SECURECOPY(s, d, e) if (d < e) LZ4_WILDCOPY(s, d, e)
350 #define INITBASE(base) const BYTE* const base = ip
351 #else /* !LZ4_ARCH64 */
355 #define LZ4_COPYSTEP(s, d) A32(d) = A32(s); d += 4; s += 4;
356 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d); LZ4_COPYSTEP(s, d);
357 #define LZ4_SECURECOPY LZ4_WILDCOPY
358 #define HTYPE const BYTE *
359 #define INITBASE(base) const int base = 0
360 #endif /* !LZ4_ARCH64 */
362 #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
363 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
364 { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
365 #define LZ4_WRITE_LITTLEENDIAN_16(p, i) \
366 { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p += 2; }
368 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) { d = (s) - A16(p); }
369 #define LZ4_WRITE_LITTLEENDIAN_16(p, v) { A16(p) = v; p += 2; }
373 /* Local structures */
375 HTYPE hashTable
[HASHTABLESIZE
];
380 #define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH * 8) - \
382 #define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p))
383 #define LZ4_WILDCOPY(s, d, e) do { LZ4_COPYPACKET(s, d) } while (d < e);
384 #define LZ4_BLINDCOPY(s, d, l) { BYTE* e = (d) + l; LZ4_WILDCOPY(s, d, e); \
388 /* Private functions */
392 LZ4_NbCommonBytes(register U64 val
)
394 #if defined(LZ4_BIG_ENDIAN)
395 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
397 _BitScanReverse64(&r
, val
);
398 return (int)(r
>> 3);
399 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && \
400 !defined(LZ4_FORCE_SW_BITCOUNT)
401 return (__builtin_clzll(val
) >> 3);
420 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
422 _BitScanForward64(&r
, val
);
423 return (int)(r
>> 3);
424 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && \
425 !defined(LZ4_FORCE_SW_BITCOUNT)
426 return (__builtin_ctzll(val
) >> 3);
428 static const int DeBruijnBytePos
[64] =
429 { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5,
430 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5,
431 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4,
432 4, 5, 7, 2, 6, 5, 7, 6, 7, 7
434 return DeBruijnBytePos
[((U64
) ((val
& -val
) * 0x0218A392CDABBD3F)) >>
443 LZ4_NbCommonBytes(register U32 val
)
445 #if defined(LZ4_BIG_ENDIAN)
446 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
448 _BitScanReverse(&r
, val
);
449 return (int)(r
>> 3);
450 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && \
451 !defined(LZ4_FORCE_SW_BITCOUNT)
452 return (__builtin_clz(val
) >> 3);
466 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
468 _BitScanForward(&r
, val
);
469 return (int)(r
>> 3);
470 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && \
471 !defined(LZ4_FORCE_SW_BITCOUNT)
472 return (__builtin_ctz(val
) >> 3);
474 static const int DeBruijnBytePos
[32] = {
475 0, 0, 3, 0, 3, 1, 3, 0,
476 3, 2, 2, 1, 3, 2, 0, 1,
477 3, 3, 1, 2, 2, 2, 2, 0,
478 3, 1, 2, 0, 1, 0, 1, 1
480 return DeBruijnBytePos
[((U32
) ((val
& -(S32
) val
) * 0x077CB531U
)) >>
488 /* Public functions */
490 /* Compression functions */
494 LZ4_compressCtx(void *ctx
, const char *source
, char *dest
, int isize
,
498 struct refTables
*srt
= (struct refTables
*)ctx
;
499 HTYPE
*HashTable
= (HTYPE
*) (srt
->hashTable
);
501 HTYPE HashTable
[HASHTABLESIZE
] = { 0 };
504 const BYTE
*ip
= (BYTE
*) source
;
506 const BYTE
*anchor
= ip
;
507 const BYTE
*const iend
= ip
+ isize
;
508 const BYTE
*const oend
= (BYTE
*) dest
+ osize
;
509 const BYTE
*const mflimit
= iend
- MFLIMIT
;
510 #define matchlimit (iend - LASTLITERALS)
512 BYTE
*op
= (BYTE
*) dest
;
515 const int skipStrength
= SKIPSTRENGTH
;
520 if (isize
< MINLENGTH
)
524 HashTable
[LZ4_HASH_VALUE(ip
)] = ip
- base
;
526 forwardH
= LZ4_HASH_VALUE(ip
);
530 int findMatchAttempts
= (1U << skipStrength
) + 3;
531 const BYTE
*forwardIp
= ip
;
538 int step
= findMatchAttempts
++ >> skipStrength
;
540 forwardIp
= ip
+ step
;
542 if unlikely(forwardIp
> mflimit
) {
546 forwardH
= LZ4_HASH_VALUE(forwardIp
);
547 ref
= base
+ HashTable
[h
];
548 HashTable
[h
] = ip
- base
;
550 } while ((ref
< ip
- MAX_DISTANCE
) || (A32(ref
) != A32(ip
)));
553 while ((ip
> anchor
) && (ref
> (BYTE
*) source
) &&
554 unlikely(ip
[-1] == ref
[-1])) {
559 /* Encode Literal length */
560 length
= ip
- anchor
;
563 /* Check output limit */
564 if unlikely(op
+ length
+ (2 + 1 + LASTLITERALS
) +
565 (length
>> 8) > oend
)
568 if (length
>= (int)RUN_MASK
) {
569 *token
= (RUN_MASK
<< ML_BITS
);
570 len
= length
- RUN_MASK
;
571 for (; len
> 254; len
-= 255)
575 *token
= (length
<< ML_BITS
);
578 LZ4_BLINDCOPY(anchor
, op
, length
);
582 LZ4_WRITE_LITTLEENDIAN_16(op
, ip
- ref
);
586 ref
+= MINMATCH
; /* MinMatch verified */
588 while likely(ip
< matchlimit
- (STEPSIZE
- 1)) {
589 UARCH diff
= AARCH(ref
) ^ AARCH(ip
);
595 ip
+= LZ4_NbCommonBytes(diff
);
599 if ((ip
< (matchlimit
- 3)) && (A32(ref
) == A32(ip
))) {
604 if ((ip
< (matchlimit
- 1)) && (A16(ref
) == A16(ip
))) {
608 if ((ip
< matchlimit
) && (*ref
== *ip
))
612 /* Encode MatchLength */
614 /* Check output limit */
615 if unlikely(op
+ (1 + LASTLITERALS
) + (len
>> 8) > oend
)
617 if (len
>= (int)ML_MASK
) {
620 for (; len
> 509; len
-= 510) {
632 /* Test end of chunk */
638 HashTable
[LZ4_HASH_VALUE(ip
- 2)] = ip
- 2 - base
;
640 /* Test next position */
641 ref
= base
+ HashTable
[LZ4_HASH_VALUE(ip
)];
642 HashTable
[LZ4_HASH_VALUE(ip
)] = ip
- base
;
643 if ((ref
> ip
- (MAX_DISTANCE
+ 1)) && (A32(ref
) == A32(ip
))) {
648 /* Prepare next loop */
650 forwardH
= LZ4_HASH_VALUE(ip
);
654 /* Encode Last Literals */
656 int lastRun
= iend
- anchor
;
657 if (op
+ lastRun
+ 1 + ((lastRun
+ 255 - RUN_MASK
) / 255) >
660 if (lastRun
>= (int)RUN_MASK
) {
661 *op
++ = (RUN_MASK
<< ML_BITS
);
663 for (; lastRun
> 254; lastRun
-= 255) {
666 *op
++ = (BYTE
)lastRun
;
668 *op
++ = (lastRun
<< ML_BITS
);
669 (void) memcpy(op
, anchor
, iend
- anchor
);
674 return (int)(((char *)op
) - dest
);
679 /* Note : this function is valid only if isize < LZ4_64KLIMIT */
680 #define LZ4_64KLIMIT ((1 << 16) + (MFLIMIT - 1))
681 #define HASHLOG64K (HASH_LOG + 1)
682 #define HASH64KTABLESIZE (1U << HASHLOG64K)
683 #define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8) - \
685 #define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
689 LZ4_compress64kCtx(void *ctx
, const char *source
, char *dest
, int isize
,
693 struct refTables
*srt
= (struct refTables
*)ctx
;
694 U16
*HashTable
= (U16
*) (srt
->hashTable
);
696 U16 HashTable
[HASH64KTABLESIZE
] = { 0 };
699 const BYTE
*ip
= (BYTE
*) source
;
700 const BYTE
*anchor
= ip
;
701 const BYTE
*const base
= ip
;
702 const BYTE
*const iend
= ip
+ isize
;
703 const BYTE
*const oend
= (BYTE
*) dest
+ osize
;
704 const BYTE
*const mflimit
= iend
- MFLIMIT
;
705 #define matchlimit (iend - LASTLITERALS)
707 BYTE
*op
= (BYTE
*) dest
;
710 const int skipStrength
= SKIPSTRENGTH
;
714 if (isize
< MINLENGTH
)
719 forwardH
= LZ4_HASH64K_VALUE(ip
);
723 int findMatchAttempts
= (1U << skipStrength
) + 3;
724 const BYTE
*forwardIp
= ip
;
731 int step
= findMatchAttempts
++ >> skipStrength
;
733 forwardIp
= ip
+ step
;
735 if (forwardIp
> mflimit
) {
739 forwardH
= LZ4_HASH64K_VALUE(forwardIp
);
740 ref
= base
+ HashTable
[h
];
741 HashTable
[h
] = ip
- base
;
743 } while (A32(ref
) != A32(ip
));
746 while ((ip
> anchor
) && (ref
> (BYTE
*) source
) &&
747 (ip
[-1] == ref
[-1])) {
752 /* Encode Literal length */
753 length
= ip
- anchor
;
756 /* Check output limit */
757 if unlikely(op
+ length
+ (2 + 1 + LASTLITERALS
) +
758 (length
>> 8) > oend
)
761 if (length
>= (int)RUN_MASK
) {
762 *token
= (RUN_MASK
<< ML_BITS
);
763 len
= length
- RUN_MASK
;
764 for (; len
> 254; len
-= 255)
768 *token
= (length
<< ML_BITS
);
771 LZ4_BLINDCOPY(anchor
, op
, length
);
775 LZ4_WRITE_LITTLEENDIAN_16(op
, ip
- ref
);
779 ref
+= MINMATCH
; /* MinMatch verified */
781 while (ip
< matchlimit
- (STEPSIZE
- 1)) {
782 UARCH diff
= AARCH(ref
) ^ AARCH(ip
);
788 ip
+= LZ4_NbCommonBytes(diff
);
792 if ((ip
< (matchlimit
- 3)) && (A32(ref
) == A32(ip
))) {
797 if ((ip
< (matchlimit
- 1)) && (A16(ref
) == A16(ip
))) {
801 if ((ip
< matchlimit
) && (*ref
== *ip
))
805 /* Encode MatchLength */
807 /* Check output limit */
808 if unlikely(op
+ (1 + LASTLITERALS
) + (len
>> 8) > oend
)
810 if (len
>= (int)ML_MASK
) {
813 for (; len
> 509; len
-= 510) {
825 /* Test end of chunk */
831 HashTable
[LZ4_HASH64K_VALUE(ip
- 2)] = ip
- 2 - base
;
833 /* Test next position */
834 ref
= base
+ HashTable
[LZ4_HASH64K_VALUE(ip
)];
835 HashTable
[LZ4_HASH64K_VALUE(ip
)] = ip
- base
;
836 if (A32(ref
) == A32(ip
)) {
841 /* Prepare next loop */
843 forwardH
= LZ4_HASH64K_VALUE(ip
);
847 /* Encode Last Literals */
849 int lastRun
= iend
- anchor
;
850 if (op
+ lastRun
+ 1 + ((lastRun
+ 255 - RUN_MASK
) / 255) >
853 if (lastRun
>= (int)RUN_MASK
) {
854 *op
++ = (RUN_MASK
<< ML_BITS
);
856 for (; lastRun
> 254; lastRun
-= 255)
858 *op
++ = (BYTE
)lastRun
;
860 *op
++ = (lastRun
<< ML_BITS
);
861 (void) memcpy(op
, anchor
, iend
- anchor
);
866 return (int)(((char *)op
) - dest
);
870 real_LZ4_compress(const char *source
, char *dest
, int isize
, int osize
)
873 void *ctx
= umem_zalloc(sizeof (struct refTables
), UMEM_DEFAULT
);
877 * out of kernel memory, gently fall through - this will disable
878 * compression in zio_compress_data
883 if (isize
< LZ4_64KLIMIT
)
884 result
= LZ4_compress64kCtx(ctx
, source
, dest
, isize
, osize
);
886 result
= LZ4_compressCtx(ctx
, source
, dest
, isize
, osize
);
888 umem_free(ctx
, sizeof (struct refTables
));
891 if (isize
< (int)LZ4_64KLIMIT
)
892 return (LZ4_compress64kCtx(NULL
, source
, dest
, isize
, osize
));
893 return (LZ4_compressCtx(NULL
, source
, dest
, isize
, osize
));
897 /* Decompression functions */
900 * Note: The decoding function LZ4_uncompress_unknownOutputSize() is safe
901 * against "buffer overflow" attack type.
902 * LZ4_uncompress_unknownOutputSize() insures that it will never read
903 * outside of the input buffer. A corrupted input will produce an error
904 * result, a negative int, indicating the position of the error within
909 LZ4_uncompress_unknownOutputSize(const char *source
, char *dest
, int isize
,
912 /* Local Variables */
913 const BYTE
*restrict ip
= (const BYTE
*) source
;
914 const BYTE
*const iend
= ip
+ isize
;
917 BYTE
*op
= (BYTE
*) dest
;
918 BYTE
*const oend
= op
+ maxOutputSize
;
921 size_t dec32table
[] = {0, 3, 2, 3, 0, 0, 0, 0};
923 size_t dec64table
[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
933 if ((length
= (token
>> ML_BITS
)) == RUN_MASK
) {
935 while ((ip
< iend
) && (s
== 255)) {
942 /* CORNER-CASE: cpy might overflow. */
944 goto _output_error
; /* cpy was overflowed, bail! */
945 if ((cpy
> oend
- COPYLENGTH
) ||
946 (ip
+ length
> iend
- COPYLENGTH
)) {
948 /* Error: writes beyond output buffer */
950 if (ip
+ length
!= iend
)
952 * Error: LZ4 format requires to consume all
953 * input at this stage
956 (void) memcpy(op
, ip
, length
);
958 /* Necessarily EOF, due to parsing restrictions */
961 LZ4_WILDCOPY(ip
, op
, cpy
);
966 LZ4_READ_LITTLEENDIAN_16(ref
, cpy
, ip
);
968 if (ref
< (BYTE
* const) dest
)
970 * Error: offset creates reference outside of
975 /* get matchlength */
976 if ((length
= (token
& ML_MASK
)) == ML_MASK
) {
985 /* copy repeated sequence */
986 if unlikely(op
- ref
< STEPSIZE
) {
988 size_t dec64
= dec64table
[op
-ref
];
998 ref
-= dec32table
[op
-ref
];
1003 LZ4_COPYSTEP(ref
, op
);
1005 cpy
= op
+ length
- (STEPSIZE
- 4);
1006 if (cpy
> oend
- COPYLENGTH
) {
1009 * Error: request to write outside of
1010 * destination buffer
1013 LZ4_SECURECOPY(ref
, op
, (oend
- COPYLENGTH
));
1019 * Check EOF (should never happen, since
1020 * last 5 bytes are supposed to be literals)
1025 LZ4_SECURECOPY(ref
, op
, cpy
);
1026 op
= cpy
; /* correction */
1029 /* end of decoding */
1030 return (int)(((char *)op
) - dest
);
1032 /* write overflow error detected */
1034 return (int)(-(((char *)ip
) - source
));