2 /*! pako 2.1.0 https://github.com/nodeca/pako @license (MIT AND Zlib) */
3 (function (global, factory) {
4 typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
5 typeof define === 'function' && define.amd ? define(['exports'], factory) :
6 (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global.pako = {}));
7 })(this, (function (exports) { 'use strict';
9 // (C) 1995-2013 Jean-loup Gailly and Mark Adler
10 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin
12 // This software is provided 'as-is', without any express or implied
13 // warranty. In no event will the authors be held liable for any damages
14 // arising from the use of this software.
16 // Permission is granted to anyone to use this software for any purpose,
17 // including commercial applications, and to alter it and redistribute it
18 // freely, subject to the following restrictions:
20 // 1. The origin of this software must not be misrepresented; you must not
21 // claim that you wrote the original software. If you use this software
22 // in a product, an acknowledgment in the product documentation would be
23 // appreciated but is not required.
24 // 2. Altered source versions must be plainly marked as such, and must not be
25 // misrepresented as being the original software.
26 // 3. This notice may not be removed or altered from any source distribution.
28 /* eslint-disable space-unary-ops */
30 /* Public constants ==========================================================*/
31 /* ===========================================================================*/
34 //const Z_FILTERED = 1;
35 //const Z_HUFFMAN_ONLY = 2;
38 //const Z_DEFAULT_STRATEGY = 0;
40 /* Possible values of the data_type field (though see inflate()) */
43 //const Z_ASCII = 1; // = Z_TEXT
44 const Z_UNKNOWN$1 = 2;
46 /*============================================================================*/
49 function zero$1(buf) { let len = buf.length; while (--len >= 0) { buf[len] = 0; } }
53 const STORED_BLOCK = 0;
54 const STATIC_TREES = 1;
56 /* The three kinds of block type */
58 const MIN_MATCH$1 = 3;
59 const MAX_MATCH$1 = 258;
60 /* The minimum and maximum match lengths */
63 /* ===========================================================================
64 * Internal compression state.
67 const LENGTH_CODES$1 = 29;
68 /* number of length codes, not counting the special END_BLOCK code */
70 const LITERALS$1 = 256;
71 /* number of literal bytes 0..255 */
73 const L_CODES$1 = LITERALS$1 + 1 + LENGTH_CODES$1;
74 /* number of Literal or Length codes, including the END_BLOCK code */
77 /* number of distance codes */
79 const BL_CODES$1 = 19;
80 /* number of codes used to transfer the bit lengths */
82 const HEAP_SIZE$1 = 2 * L_CODES$1 + 1;
83 /* maximum heap size */
85 const MAX_BITS$1 = 15;
86 /* All codes must not exceed MAX_BITS bits */
89 /* size of bit buffer in bi_buf */
92 /* ===========================================================================
96 const MAX_BL_BITS = 7;
97 /* Bit length codes must not exceed MAX_BL_BITS bits */
99 const END_BLOCK = 256;
100 /* end of block literal code */
103 /* repeat previous bit length 3-6 times (2 bits of repeat count) */
105 const REPZ_3_10 = 17;
106 /* repeat a zero length 3-10 times (3 bits of repeat count) */
108 const REPZ_11_138 = 18;
109 /* repeat a zero length 11-138 times (7 bits of repeat count) */
111 /* eslint-disable comma-spacing,array-bracket-spacing */
112 const extra_lbits = /* extra bits for each length code */
113 new Uint8Array([0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0]);
115 const extra_dbits = /* extra bits for each distance code */
116 new Uint8Array([0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13]);
118 const extra_blbits = /* extra bits for each bit length code */
119 new Uint8Array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7]);
122 new Uint8Array([16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15]);
123 /* eslint-enable comma-spacing,array-bracket-spacing */
125 /* The lengths of the bit length codes are sent in order of decreasing
126 * probability, to avoid transmitting the lengths for unused bit length codes.
129 /* ===========================================================================
130 * Local data. These are initialized only once.
133 // We pre-fill arrays with 0 to avoid uninitialized gaps
135 const DIST_CODE_LEN = 512; /* see definition of array dist_code below */
137 // !!!! Use flat array instead of structure, Freq = i*2, Len = i*2+1
138 const static_ltree = new Array((L_CODES$1 + 2) * 2);
139 zero$1(static_ltree);
140 /* The static literal tree. Since the bit lengths are imposed, there is no
141 * need for the L_CODES extra codes used during heap construction. However
142 * The codes 286 and 287 are needed to build a canonical tree (see _tr_init
146 const static_dtree = new Array(D_CODES$1 * 2);
147 zero$1(static_dtree);
148 /* The static distance tree. (Actually a trivial tree since all codes use
152 const _dist_code = new Array(DIST_CODE_LEN);
154 /* Distance codes. The first 256 values correspond to the distances
155 * 3 .. 258, the last 256 values correspond to the top 8 bits of
156 * the 15 bit distances.
159 const _length_code = new Array(MAX_MATCH$1 - MIN_MATCH$1 + 1);
160 zero$1(_length_code);
161 /* length code for each normalized match length (0 == MIN_MATCH) */
163 const base_length = new Array(LENGTH_CODES$1);
165 /* First normalized length for each code (0 = MIN_MATCH) */
167 const base_dist = new Array(D_CODES$1);
169 /* First normalized distance for each code (0 = distance of 1) */
172 function StaticTreeDesc(static_tree, extra_bits, extra_base, elems, max_length) {
174 this.static_tree = static_tree; /* static tree or NULL */
175 this.extra_bits = extra_bits; /* extra bits for each code or NULL */
176 this.extra_base = extra_base; /* base index for extra_bits */
177 this.elems = elems; /* max number of elements in the tree */
178 this.max_length = max_length; /* max bit length for the codes */
180 // show if `static_tree` has data or dummy - needed for monomorphic objects
181 this.has_stree = static_tree && static_tree.length;
190 function TreeDesc(dyn_tree, stat_desc) {
191 this.dyn_tree = dyn_tree; /* the dynamic tree */
192 this.max_code = 0; /* largest code with non zero frequency */
193 this.stat_desc = stat_desc; /* the corresponding static tree */
198 const d_code = (dist) => {
200 return dist < 256 ? _dist_code[dist] : _dist_code[256 + (dist >>> 7)];
204 /* ===========================================================================
205 * Output a short LSB first on the stream.
206 * IN assertion: there is enough room in pendingBuf.
208 const put_short = (s, w) => {
209 // put_byte(s, (uch)((w) & 0xff));
210 // put_byte(s, (uch)((ush)(w) >> 8));
211 s.pending_buf[s.pending++] = (w) & 0xff;
212 s.pending_buf[s.pending++] = (w >>> 8) & 0xff;
216 /* ===========================================================================
217 * Send a value on a given number of bits.
218 * IN assertion: length <= 16 and value fits in length bits.
220 const send_bits = (s, value, length) => {
222 if (s.bi_valid > (Buf_size - length)) {
223 s.bi_buf |= (value << s.bi_valid) & 0xffff;
224 put_short(s, s.bi_buf);
225 s.bi_buf = value >> (Buf_size - s.bi_valid);
226 s.bi_valid += length - Buf_size;
228 s.bi_buf |= (value << s.bi_valid) & 0xffff;
229 s.bi_valid += length;
234 const send_code = (s, c, tree) => {
236 send_bits(s, tree[c * 2]/*.Code*/, tree[c * 2 + 1]/*.Len*/);
240 /* ===========================================================================
241 * Reverse the first len bits of a code, using straightforward code (a faster
242 * method would use a table)
243 * IN assertion: 1 <= len <= 15
245 const bi_reverse = (code, len) => {
257 /* ===========================================================================
258 * Flush the bit buffer, keeping at most 7 bits in it.
260 const bi_flush = (s) => {
262 if (s.bi_valid === 16) {
263 put_short(s, s.bi_buf);
267 } else if (s.bi_valid >= 8) {
268 s.pending_buf[s.pending++] = s.bi_buf & 0xff;
275 /* ===========================================================================
276 * Compute the optimal bit lengths for a tree and update the total bit length
277 * for the current block.
278 * IN assertion: the fields freq and dad are set, heap[heap_max] and
279 * above are the tree nodes sorted by increasing frequency.
280 * OUT assertions: the field len is set to the optimal bit length, the
281 * array bl_count contains the frequencies for each bit length.
282 * The length opt_len is updated; static_len is also updated if stree is
285 const gen_bitlen = (s, desc) => {
287 // tree_desc *desc; /* the tree descriptor */
289 const tree = desc.dyn_tree;
290 const max_code = desc.max_code;
291 const stree = desc.stat_desc.static_tree;
292 const has_stree = desc.stat_desc.has_stree;
293 const extra = desc.stat_desc.extra_bits;
294 const base = desc.stat_desc.extra_base;
295 const max_length = desc.stat_desc.max_length;
296 let h; /* heap index */
297 let n, m; /* iterate over the tree elements */
298 let bits; /* bit length */
299 let xbits; /* extra bits */
300 let f; /* frequency */
301 let overflow = 0; /* number of elements with bit length too large */
303 for (bits = 0; bits <= MAX_BITS$1; bits++) {
304 s.bl_count[bits] = 0;
307 /* In a first pass, compute the optimal bit lengths (which may
308 * overflow in the case of the bit length tree).
310 tree[s.heap[s.heap_max] * 2 + 1]/*.Len*/ = 0; /* root of the heap */
312 for (h = s.heap_max + 1; h < HEAP_SIZE$1; h++) {
314 bits = tree[tree[n * 2 + 1]/*.Dad*/ * 2 + 1]/*.Len*/ + 1;
315 if (bits > max_length) {
319 tree[n * 2 + 1]/*.Len*/ = bits;
320 /* We overwrite tree[n].Dad which is no longer needed */
322 if (n > max_code) { continue; } /* not a leaf node */
327 xbits = extra[n - base];
329 f = tree[n * 2]/*.Freq*/;
330 s.opt_len += f * (bits + xbits);
332 s.static_len += f * (stree[n * 2 + 1]/*.Len*/ + xbits);
335 if (overflow === 0) { return; }
337 // Tracev((stderr,"\nbit length overflow\n"));
338 /* This happens for example on obj2 and pic of the Calgary corpus */
340 /* Find the first bit length which could increase: */
342 bits = max_length - 1;
343 while (s.bl_count[bits] === 0) { bits--; }
344 s.bl_count[bits]--; /* move one leaf down the tree */
345 s.bl_count[bits + 1] += 2; /* move one overflow item as its brother */
346 s.bl_count[max_length]--;
347 /* The brother of the overflow item also moves one step up,
348 * but this does not affect bl_count[max_length]
351 } while (overflow > 0);
353 /* Now recompute all bit lengths, scanning in increasing frequency.
354 * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
355 * lengths instead of fixing only the wrong ones. This idea is taken
356 * from 'ar' written by Haruhiko Okumura.)
358 for (bits = max_length; bits !== 0; bits--) {
359 n = s.bl_count[bits];
362 if (m > max_code) { continue; }
363 if (tree[m * 2 + 1]/*.Len*/ !== bits) {
364 // Tracev((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
365 s.opt_len += (bits - tree[m * 2 + 1]/*.Len*/) * tree[m * 2]/*.Freq*/;
366 tree[m * 2 + 1]/*.Len*/ = bits;
374 /* ===========================================================================
375 * Generate the codes for a given tree and bit counts (which need not be
377 * IN assertion: the array bl_count contains the bit length statistics for
378 * the given tree and the field len is set for all tree elements.
379 * OUT assertion: the field code is set for all tree elements of non
382 const gen_codes = (tree, max_code, bl_count) => {
383 // ct_data *tree; /* the tree to decorate */
384 // int max_code; /* largest code with non zero frequency */
385 // ushf *bl_count; /* number of codes at each bit length */
387 const next_code = new Array(MAX_BITS$1 + 1); /* next code value for each bit length */
388 let code = 0; /* running code value */
389 let bits; /* bit index */
390 let n; /* code index */
392 /* The distribution counts are first used to generate the code values
393 * without bit reversal.
395 for (bits = 1; bits <= MAX_BITS$1; bits++) {
396 code = (code + bl_count[bits - 1]) << 1;
397 next_code[bits] = code;
399 /* Check that the bit counts in bl_count are consistent. The last code
402 //Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
403 // "inconsistent bit counts");
404 //Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
406 for (n = 0; n <= max_code; n++) {
407 let len = tree[n * 2 + 1]/*.Len*/;
408 if (len === 0) { continue; }
409 /* Now reverse the bits */
410 tree[n * 2]/*.Code*/ = bi_reverse(next_code[len]++, len);
412 //Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
413 // n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
418 /* ===========================================================================
419 * Initialize the various 'constant' tables.
421 const tr_static_init = () => {
423 let n; /* iterates over tree elements */
424 let bits; /* bit counter */
425 let length; /* length value */
426 let code; /* code value */
427 let dist; /* distance index */
428 const bl_count = new Array(MAX_BITS$1 + 1);
429 /* number of codes at each bit length for an optimal tree */
431 // do check in _tr_init()
432 //if (static_init_done) return;
434 /* For some embedded targets, global variables are not initialized: */
435 /*#ifdef NO_INIT_GLOBAL_POINTERS
436 static_l_desc.static_tree = static_ltree;
437 static_l_desc.extra_bits = extra_lbits;
438 static_d_desc.static_tree = static_dtree;
439 static_d_desc.extra_bits = extra_dbits;
440 static_bl_desc.extra_bits = extra_blbits;
443 /* Initialize the mapping length (0..255) -> length code (0..28) */
445 for (code = 0; code < LENGTH_CODES$1 - 1; code++) {
446 base_length[code] = length;
447 for (n = 0; n < (1 << extra_lbits[code]); n++) {
448 _length_code[length++] = code;
451 //Assert (length == 256, "tr_static_init: length != 256");
452 /* Note that the length 255 (match length 258) can be represented
453 * in two different ways: code 284 + 5 bits or code 285, so we
454 * overwrite length_code[255] to use the best encoding:
456 _length_code[length - 1] = code;
458 /* Initialize the mapping dist (0..32K) -> dist code (0..29) */
460 for (code = 0; code < 16; code++) {
461 base_dist[code] = dist;
462 for (n = 0; n < (1 << extra_dbits[code]); n++) {
463 _dist_code[dist++] = code;
466 //Assert (dist == 256, "tr_static_init: dist != 256");
467 dist >>= 7; /* from now on, all distances are divided by 128 */
468 for (; code < D_CODES$1; code++) {
469 base_dist[code] = dist << 7;
470 for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) {
471 _dist_code[256 + dist++] = code;
474 //Assert (dist == 256, "tr_static_init: 256+dist != 512");
476 /* Construct the codes of the static literal tree */
477 for (bits = 0; bits <= MAX_BITS$1; bits++) {
483 static_ltree[n * 2 + 1]/*.Len*/ = 8;
488 static_ltree[n * 2 + 1]/*.Len*/ = 9;
493 static_ltree[n * 2 + 1]/*.Len*/ = 7;
498 static_ltree[n * 2 + 1]/*.Len*/ = 8;
502 /* Codes 286 and 287 do not exist, but we must include them in the
503 * tree construction to get a canonical Huffman tree (longest code
506 gen_codes(static_ltree, L_CODES$1 + 1, bl_count);
508 /* The static distance tree is trivial: */
509 for (n = 0; n < D_CODES$1; n++) {
510 static_dtree[n * 2 + 1]/*.Len*/ = 5;
511 static_dtree[n * 2]/*.Code*/ = bi_reverse(n, 5);
514 // Now data ready and we can init static trees
515 static_l_desc = new StaticTreeDesc(static_ltree, extra_lbits, LITERALS$1 + 1, L_CODES$1, MAX_BITS$1);
516 static_d_desc = new StaticTreeDesc(static_dtree, extra_dbits, 0, D_CODES$1, MAX_BITS$1);
517 static_bl_desc = new StaticTreeDesc(new Array(0), extra_blbits, 0, BL_CODES$1, MAX_BL_BITS);
519 //static_init_done = true;
523 /* ===========================================================================
524 * Initialize a new block.
526 const init_block = (s) => {
528 let n; /* iterates over tree elements */
530 /* Initialize the trees. */
531 for (n = 0; n < L_CODES$1; n++) { s.dyn_ltree[n * 2]/*.Freq*/ = 0; }
532 for (n = 0; n < D_CODES$1; n++) { s.dyn_dtree[n * 2]/*.Freq*/ = 0; }
533 for (n = 0; n < BL_CODES$1; n++) { s.bl_tree[n * 2]/*.Freq*/ = 0; }
535 s.dyn_ltree[END_BLOCK * 2]/*.Freq*/ = 1;
536 s.opt_len = s.static_len = 0;
537 s.sym_next = s.matches = 0;
541 /* ===========================================================================
542 * Flush the bit buffer and align the output on a byte boundary
544 const bi_windup = (s) =>
546 if (s.bi_valid > 8) {
547 put_short(s, s.bi_buf);
548 } else if (s.bi_valid > 0) {
549 //put_byte(s, (Byte)s->bi_buf);
550 s.pending_buf[s.pending++] = s.bi_buf;
556 /* ===========================================================================
557 * Compares to subtrees, using the tree depth as tie breaker when
558 * the subtrees have equal frequency. This minimizes the worst case length.
560 const smaller = (tree, n, m, depth) => {
564 return (tree[_n2]/*.Freq*/ < tree[_m2]/*.Freq*/ ||
565 (tree[_n2]/*.Freq*/ === tree[_m2]/*.Freq*/ && depth[n] <= depth[m]));
568 /* ===========================================================================
569 * Restore the heap property by moving down the tree starting at node k,
570 * exchanging a node with the smallest of its two sons if necessary, stopping
571 * when the heap property is re-established (each father smaller than its
574 const pqdownheap = (s, tree, k) => {
576 // ct_data *tree; /* the tree to restore */
577 // int k; /* node to move down */
580 let j = k << 1; /* left son of k */
581 while (j <= s.heap_len) {
582 /* Set j to the smallest of the two sons: */
583 if (j < s.heap_len &&
584 smaller(tree, s.heap[j + 1], s.heap[j], s.depth)) {
587 /* Exit if v is smaller than both sons */
588 if (smaller(tree, v, s.heap[j], s.depth)) { break; }
590 /* Exchange v with the smallest son */
591 s.heap[k] = s.heap[j];
594 /* And continue down the tree, setting j to the left son of k */
602 // const SMALLEST = 1;
604 /* ===========================================================================
605 * Send the block data compressed using the given Huffman trees
607 const compress_block = (s, ltree, dtree) => {
609 // const ct_data *ltree; /* literal tree */
610 // const ct_data *dtree; /* distance tree */
612 let dist; /* distance of matched string */
613 let lc; /* match length or unmatched char (if dist == 0) */
614 let sx = 0; /* running index in sym_buf */
615 let code; /* the code to send */
616 let extra; /* number of extra bits to send */
618 if (s.sym_next !== 0) {
620 dist = s.pending_buf[s.sym_buf + sx++] & 0xff;
621 dist += (s.pending_buf[s.sym_buf + sx++] & 0xff) << 8;
622 lc = s.pending_buf[s.sym_buf + sx++];
624 send_code(s, lc, ltree); /* send a literal byte */
625 //Tracecv(isgraph(lc), (stderr," '%c' ", lc));
627 /* Here, lc is the match length - MIN_MATCH */
628 code = _length_code[lc];
629 send_code(s, code + LITERALS$1 + 1, ltree); /* send the length code */
630 extra = extra_lbits[code];
632 lc -= base_length[code];
633 send_bits(s, lc, extra); /* send the extra length bits */
635 dist--; /* dist is now the match distance - 1 */
637 //Assert (code < D_CODES, "bad d_code");
639 send_code(s, code, dtree); /* send the distance code */
640 extra = extra_dbits[code];
642 dist -= base_dist[code];
643 send_bits(s, dist, extra); /* send the extra distance bits */
645 } /* literal or match pair ? */
647 /* Check that the overlay between pending_buf and sym_buf is ok: */
648 //Assert(s->pending < s->lit_bufsize + sx, "pendingBuf overflow");
650 } while (sx < s.sym_next);
653 send_code(s, END_BLOCK, ltree);
657 /* ===========================================================================
658 * Construct one Huffman tree and assigns the code bit strings and lengths.
659 * Update the total bit length for the current block.
660 * IN assertion: the field freq is set for all tree elements.
661 * OUT assertions: the fields len and code are set to the optimal bit length
662 * and corresponding code. The length opt_len is updated; static_len is
663 * also updated if stree is not null. The field max_code is set.
665 const build_tree = (s, desc) => {
667 // tree_desc *desc; /* the tree descriptor */
669 const tree = desc.dyn_tree;
670 const stree = desc.stat_desc.static_tree;
671 const has_stree = desc.stat_desc.has_stree;
672 const elems = desc.stat_desc.elems;
673 let n, m; /* iterate over heap elements */
674 let max_code = -1; /* largest code with non zero frequency */
675 let node; /* new node being created */
677 /* Construct the initial heap, with least frequent element in
678 * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
679 * heap[0] is not used.
682 s.heap_max = HEAP_SIZE$1;
684 for (n = 0; n < elems; n++) {
685 if (tree[n * 2]/*.Freq*/ !== 0) {
686 s.heap[++s.heap_len] = max_code = n;
690 tree[n * 2 + 1]/*.Len*/ = 0;
694 /* The pkzip format requires that at least one distance code exists,
695 * and that at least one bit should be sent even if there is only one
696 * possible code. So to avoid special checks later on we force at least
697 * two codes of non zero frequency.
699 while (s.heap_len < 2) {
700 node = s.heap[++s.heap_len] = (max_code < 2 ? ++max_code : 0);
701 tree[node * 2]/*.Freq*/ = 1;
706 s.static_len -= stree[node * 2 + 1]/*.Len*/;
708 /* node is 0 or 1 so it does not have extra bits */
710 desc.max_code = max_code;
712 /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
713 * establish sub-heaps of increasing lengths:
715 for (n = (s.heap_len >> 1/*int /2*/); n >= 1; n--) { pqdownheap(s, tree, n); }
717 /* Construct the Huffman tree by repeatedly combining the least two
720 node = elems; /* next internal node of the tree */
722 //pqremove(s, tree, n); /* n = node of least frequency */
724 n = s.heap[1/*SMALLEST*/];
725 s.heap[1/*SMALLEST*/] = s.heap[s.heap_len--];
726 pqdownheap(s, tree, 1/*SMALLEST*/);
729 m = s.heap[1/*SMALLEST*/]; /* m = node of next least frequency */
731 s.heap[--s.heap_max] = n; /* keep the nodes sorted by frequency */
732 s.heap[--s.heap_max] = m;
734 /* Create a new node father of n and m */
735 tree[node * 2]/*.Freq*/ = tree[n * 2]/*.Freq*/ + tree[m * 2]/*.Freq*/;
736 s.depth[node] = (s.depth[n] >= s.depth[m] ? s.depth[n] : s.depth[m]) + 1;
737 tree[n * 2 + 1]/*.Dad*/ = tree[m * 2 + 1]/*.Dad*/ = node;
739 /* and insert the new node in the heap */
740 s.heap[1/*SMALLEST*/] = node++;
741 pqdownheap(s, tree, 1/*SMALLEST*/);
743 } while (s.heap_len >= 2);
745 s.heap[--s.heap_max] = s.heap[1/*SMALLEST*/];
747 /* At this point, the fields freq and dad are set. We can now
748 * generate the bit lengths.
752 /* The field len is now set, we can generate the bit codes */
753 gen_codes(tree, max_code, s.bl_count);
757 /* ===========================================================================
758 * Scan a literal or distance tree to determine the frequencies of the codes
759 * in the bit length tree.
761 const scan_tree = (s, tree, max_code) => {
763 // ct_data *tree; /* the tree to be scanned */
764 // int max_code; /* and its largest code of non zero frequency */
766 let n; /* iterates over all tree elements */
767 let prevlen = -1; /* last emitted length */
768 let curlen; /* length of current code */
770 let nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */
772 let count = 0; /* repeat count of the current code */
773 let max_count = 7; /* max repeat count */
774 let min_count = 4; /* min repeat count */
780 tree[(max_code + 1) * 2 + 1]/*.Len*/ = 0xffff; /* guard */
782 for (n = 0; n <= max_code; n++) {
784 nextlen = tree[(n + 1) * 2 + 1]/*.Len*/;
786 if (++count < max_count && curlen === nextlen) {
789 } else if (count < min_count) {
790 s.bl_tree[curlen * 2]/*.Freq*/ += count;
792 } else if (curlen !== 0) {
794 if (curlen !== prevlen) { s.bl_tree[curlen * 2]/*.Freq*/++; }
795 s.bl_tree[REP_3_6 * 2]/*.Freq*/++;
797 } else if (count <= 10) {
798 s.bl_tree[REPZ_3_10 * 2]/*.Freq*/++;
801 s.bl_tree[REPZ_11_138 * 2]/*.Freq*/++;
811 } else if (curlen === nextlen) {
823 /* ===========================================================================
824 * Send a literal or distance tree in compressed form, using the codes in
827 const send_tree = (s, tree, max_code) => {
829 // ct_data *tree; /* the tree to be scanned */
830 // int max_code; /* and its largest code of non zero frequency */
832 let n; /* iterates over all tree elements */
833 let prevlen = -1; /* last emitted length */
834 let curlen; /* length of current code */
836 let nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */
838 let count = 0; /* repeat count of the current code */
839 let max_count = 7; /* max repeat count */
840 let min_count = 4; /* min repeat count */
842 /* tree[max_code+1].Len = -1; */ /* guard already set */
848 for (n = 0; n <= max_code; n++) {
850 nextlen = tree[(n + 1) * 2 + 1]/*.Len*/;
852 if (++count < max_count && curlen === nextlen) {
855 } else if (count < min_count) {
856 do { send_code(s, curlen, s.bl_tree); } while (--count !== 0);
858 } else if (curlen !== 0) {
859 if (curlen !== prevlen) {
860 send_code(s, curlen, s.bl_tree);
863 //Assert(count >= 3 && count <= 6, " 3_6?");
864 send_code(s, REP_3_6, s.bl_tree);
865 send_bits(s, count - 3, 2);
867 } else if (count <= 10) {
868 send_code(s, REPZ_3_10, s.bl_tree);
869 send_bits(s, count - 3, 3);
872 send_code(s, REPZ_11_138, s.bl_tree);
873 send_bits(s, count - 11, 7);
882 } else if (curlen === nextlen) {
894 /* ===========================================================================
895 * Construct the Huffman tree for the bit lengths and return the index in
896 * bl_order of the last bit length code to send.
898 const build_bl_tree = (s) => {
900 let max_blindex; /* index of last bit length code of non zero freq */
902 /* Determine the bit length frequencies for literal and distance trees */
903 scan_tree(s, s.dyn_ltree, s.l_desc.max_code);
904 scan_tree(s, s.dyn_dtree, s.d_desc.max_code);
906 /* Build the bit length tree: */
907 build_tree(s, s.bl_desc);
908 /* opt_len now includes the length of the tree representations, except
909 * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
912 /* Determine the number of bit length codes to send. The pkzip format
913 * requires that at least 4 bit length codes be sent. (appnote.txt says
914 * 3 but the actual value used is 4.)
916 for (max_blindex = BL_CODES$1 - 1; max_blindex >= 3; max_blindex--) {
917 if (s.bl_tree[bl_order[max_blindex] * 2 + 1]/*.Len*/ !== 0) {
921 /* Update opt_len to include the bit length tree and counts */
922 s.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4;
923 //Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
924 // s->opt_len, s->static_len));
930 /* ===========================================================================
931 * Send the header for a block using dynamic Huffman trees: the counts, the
932 * lengths of the bit length codes, the literal tree and the distance tree.
933 * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
935 const send_all_trees = (s, lcodes, dcodes, blcodes) => {
937 // int lcodes, dcodes, blcodes; /* number of codes for each tree */
939 let rank; /* index in bl_order */
941 //Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
942 //Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
943 // "too many codes");
944 //Tracev((stderr, "\nbl counts: "));
945 send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */
946 send_bits(s, dcodes - 1, 5);
947 send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */
948 for (rank = 0; rank < blcodes; rank++) {
949 //Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
950 send_bits(s, s.bl_tree[bl_order[rank] * 2 + 1]/*.Len*/, 3);
952 //Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
954 send_tree(s, s.dyn_ltree, lcodes - 1); /* literal tree */
955 //Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
957 send_tree(s, s.dyn_dtree, dcodes - 1); /* distance tree */
958 //Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
962 /* ===========================================================================
963 * Check if the data type is TEXT or BINARY, using the following algorithm:
964 * - TEXT if the two conditions below are satisfied:
965 * a) There are no non-portable control characters belonging to the
966 * "block list" (0..6, 14..25, 28..31).
967 * b) There is at least one printable character belonging to the
968 * "allow list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255).
969 * - BINARY otherwise.
970 * - The following partially-portable control characters form a
971 * "gray list" that is ignored in this detection algorithm:
972 * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}).
973 * IN assertion: the fields Freq of dyn_ltree are set.
975 const detect_data_type = (s) => {
976 /* block_mask is the bit mask of block-listed bytes
977 * set bits 0..6, 14..25, and 28..31
978 * 0xf3ffc07f = binary 11110011111111111100000001111111
980 let block_mask = 0xf3ffc07f;
983 /* Check for non-textual ("block-listed") bytes. */
984 for (n = 0; n <= 31; n++, block_mask >>>= 1) {
985 if ((block_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) {
990 /* Check for textual ("allow-listed") bytes. */
991 if (s.dyn_ltree[9 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[10 * 2]/*.Freq*/ !== 0 ||
992 s.dyn_ltree[13 * 2]/*.Freq*/ !== 0) {
995 for (n = 32; n < LITERALS$1; n++) {
996 if (s.dyn_ltree[n * 2]/*.Freq*/ !== 0) {
1001 /* There are no "block-listed" or "allow-listed" bytes:
1002 * this stream either is empty or has tolerated ("gray-listed") bytes only.
1008 let static_init_done = false;
1010 /* ===========================================================================
1011 * Initialize the tree data structures for a new zlib stream.
1013 const _tr_init$1 = (s) =>
1016 if (!static_init_done) {
1018 static_init_done = true;
1021 s.l_desc = new TreeDesc(s.dyn_ltree, static_l_desc);
1022 s.d_desc = new TreeDesc(s.dyn_dtree, static_d_desc);
1023 s.bl_desc = new TreeDesc(s.bl_tree, static_bl_desc);
1028 /* Initialize the first block of the first file: */
1033 /* ===========================================================================
1034 * Send a stored block
1036 const _tr_stored_block$1 = (s, buf, stored_len, last) => {
1038 //charf *buf; /* input block */
1039 //ulg stored_len; /* length of input block */
1040 //int last; /* one if this is the last block for a file */
1042 send_bits(s, (STORED_BLOCK << 1) + (last ? 1 : 0), 3); /* send block type */
1043 bi_windup(s); /* align on byte boundary */
1044 put_short(s, stored_len);
1045 put_short(s, ~stored_len);
1047 s.pending_buf.set(s.window.subarray(buf, buf + stored_len), s.pending);
1049 s.pending += stored_len;
1053 /* ===========================================================================
1054 * Send one empty static block to give enough lookahead for inflate.
1055 * This takes 10 bits, of which 7 may remain in the bit buffer.
1057 const _tr_align$1 = (s) => {
1058 send_bits(s, STATIC_TREES << 1, 3);
1059 send_code(s, END_BLOCK, static_ltree);
1064 /* ===========================================================================
1065 * Determine the best encoding for the current block: dynamic trees, static
1066 * trees or store, and write out the encoded block.
1068 const _tr_flush_block$1 = (s, buf, stored_len, last) => {
1070 //charf *buf; /* input block, or NULL if too old */
1071 //ulg stored_len; /* length of input block */
1072 //int last; /* one if this is the last block for a file */
1074 let opt_lenb, static_lenb; /* opt_len and static_len in bytes */
1075 let max_blindex = 0; /* index of last bit length code of non zero freq */
1077 /* Build the Huffman trees unless a stored block is forced */
1080 /* Check if the file is binary or text */
1081 if (s.strm.data_type === Z_UNKNOWN$1) {
1082 s.strm.data_type = detect_data_type(s);
1085 /* Construct the literal and distance trees */
1086 build_tree(s, s.l_desc);
1087 // Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
1090 build_tree(s, s.d_desc);
1091 // Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
1093 /* At this point, opt_len and static_len are the total bit lengths of
1094 * the compressed block data, excluding the tree representations.
1097 /* Build the bit length tree for the above two trees, and get the index
1098 * in bl_order of the last bit length code to send.
1100 max_blindex = build_bl_tree(s);
1102 /* Determine the best encoding. Compute the block lengths in bytes. */
1103 opt_lenb = (s.opt_len + 3 + 7) >>> 3;
1104 static_lenb = (s.static_len + 3 + 7) >>> 3;
1106 // Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
1107 // opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
1108 // s->sym_next / 3));
1110 if (static_lenb <= opt_lenb) { opt_lenb = static_lenb; }
1113 // Assert(buf != (char*)0, "lost buf");
1114 opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
1117 if ((stored_len + 4 <= opt_lenb) && (buf !== -1)) {
1118 /* 4: two words for the lengths */
1120 /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
1121 * Otherwise we can't have processed more than WSIZE input bytes since
1122 * the last block flush, because compression would have been
1123 * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
1124 * transform a block into a stored block.
1126 _tr_stored_block$1(s, buf, stored_len, last);
1128 } else if (s.strategy === Z_FIXED$1 || static_lenb === opt_lenb) {
1130 send_bits(s, (STATIC_TREES << 1) + (last ? 1 : 0), 3);
1131 compress_block(s, static_ltree, static_dtree);
1134 send_bits(s, (DYN_TREES << 1) + (last ? 1 : 0), 3);
1135 send_all_trees(s, s.l_desc.max_code + 1, s.d_desc.max_code + 1, max_blindex + 1);
1136 compress_block(s, s.dyn_ltree, s.dyn_dtree);
1138 // Assert (s->compressed_len == s->bits_sent, "bad compressed size");
1139 /* The above check is made mod 2^32, for files larger than 512 MB
1140 * and uLong implemented on 32 bits.
1147 // Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
1148 // s->compressed_len-7*last));
1151 /* ===========================================================================
1152 * Save the match info and tally the frequency counts. Return true if
1153 * the current block must be flushed.
1155 const _tr_tally$1 = (s, dist, lc) => {
1156 // deflate_state *s;
1157 // unsigned dist; /* distance of matched string */
1158 // unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */
1160 s.pending_buf[s.sym_buf + s.sym_next++] = dist;
1161 s.pending_buf[s.sym_buf + s.sym_next++] = dist >> 8;
1162 s.pending_buf[s.sym_buf + s.sym_next++] = lc;
1164 /* lc is the unmatched char */
1165 s.dyn_ltree[lc * 2]/*.Freq*/++;
1168 /* Here, lc is the match length - MIN_MATCH */
1169 dist--; /* dist = match distance - 1 */
1170 //Assert((ush)dist < (ush)MAX_DIST(s) &&
1171 // (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
1172 // (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match");
1174 s.dyn_ltree[(_length_code[lc] + LITERALS$1 + 1) * 2]/*.Freq*/++;
1175 s.dyn_dtree[d_code(dist) * 2]/*.Freq*/++;
1178 return (s.sym_next === s.sym_end);
1181 var _tr_init_1 = _tr_init$1;
1182 var _tr_stored_block_1 = _tr_stored_block$1;
1183 var _tr_flush_block_1 = _tr_flush_block$1;
1184 var _tr_tally_1 = _tr_tally$1;
1185 var _tr_align_1 = _tr_align$1;
1188 _tr_init: _tr_init_1,
1189 _tr_stored_block: _tr_stored_block_1,
1190 _tr_flush_block: _tr_flush_block_1,
1191 _tr_tally: _tr_tally_1,
1192 _tr_align: _tr_align_1
1195 // Note: adler32 takes 12% for level 0 and 2% for level 6.
1196 // It isn't worth it to make additional optimizations as in original.
1197 // Small size is preferable.
1199 // (C) 1995-2013 Jean-loup Gailly and Mark Adler
1200 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin
1202 // This software is provided 'as-is', without any express or implied
1203 // warranty. In no event will the authors be held liable for any damages
1204 // arising from the use of this software.
1206 // Permission is granted to anyone to use this software for any purpose,
1207 // including commercial applications, and to alter it and redistribute it
1208 // freely, subject to the following restrictions:
1210 // 1. The origin of this software must not be misrepresented; you must not
1211 // claim that you wrote the original software. If you use this software
1212 // in a product, an acknowledgment in the product documentation would be
1213 // appreciated but is not required.
1214 // 2. Altered source versions must be plainly marked as such, and must not be
1215 // misrepresented as being the original software.
1216 // 3. This notice may not be removed or altered from any source distribution.
1218 const adler32 = (adler, buf, len, pos) => {
1219 let s1 = (adler & 0xffff) |0,
1220 s2 = ((adler >>> 16) & 0xffff) |0,
1224 // Set limit ~ twice less than 5552, to keep
1225 // s2 in 31-bits, because we force signed ints.
1226 // in other case %= will fail.
1227 n = len > 2000 ? 2000 : len;
1231 s1 = (s1 + buf[pos++]) |0;
1239 return (s1 | (s2 << 16)) |0;
1243 var adler32_1 = adler32;
1245 // Note: we can't get significant speed boost here.
1246 // So write code to minimize size - no pregenerated tables
1247 // and array tools dependencies.
1249 // (C) 1995-2013 Jean-loup Gailly and Mark Adler
1250 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin
1252 // This software is provided 'as-is', without any express or implied
1253 // warranty. In no event will the authors be held liable for any damages
1254 // arising from the use of this software.
1256 // Permission is granted to anyone to use this software for any purpose,
1257 // including commercial applications, and to alter it and redistribute it
1258 // freely, subject to the following restrictions:
1260 // 1. The origin of this software must not be misrepresented; you must not
1261 // claim that you wrote the original software. If you use this software
1262 // in a product, an acknowledgment in the product documentation would be
1263 // appreciated but is not required.
1264 // 2. Altered source versions must be plainly marked as such, and must not be
1265 // misrepresented as being the original software.
1266 // 3. This notice may not be removed or altered from any source distribution.
1268 // Use ordinary array, since untyped makes no boost here
1269 const makeTable = () => {
1272 for (var n = 0; n < 256; n++) {
1274 for (var k = 0; k < 8; k++) {
1275 c = ((c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1));
1283 // Create table on load. Just 255 signed longs. Not a problem.
1284 const crcTable = new Uint32Array(makeTable());
1287 const crc32 = (crc, buf, len, pos) => {
1289 const end = pos + len;
1293 for (let i = pos; i < end; i++) {
1294 crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF];
1297 return (crc ^ (-1)); // >>> 0;
1301 var crc32_1 = crc32;
1303 // (C) 1995-2013 Jean-loup Gailly and Mark Adler
1304 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin
1306 // This software is provided 'as-is', without any express or implied
1307 // warranty. In no event will the authors be held liable for any damages
1308 // arising from the use of this software.
1310 // Permission is granted to anyone to use this software for any purpose,
1311 // including commercial applications, and to alter it and redistribute it
1312 // freely, subject to the following restrictions:
1314 // 1. The origin of this software must not be misrepresented; you must not
1315 // claim that you wrote the original software. If you use this software
1316 // in a product, an acknowledgment in the product documentation would be
1317 // appreciated but is not required.
1318 // 2. Altered source versions must be plainly marked as such, and must not be
1319 // misrepresented as being the original software.
1320 // 3. This notice may not be removed or altered from any source distribution.
1323 2: 'need dictionary', /* Z_NEED_DICT 2 */
1324 1: 'stream end', /* Z_STREAM_END 1 */
1326 '-1': 'file error', /* Z_ERRNO (-1) */
1327 '-2': 'stream error', /* Z_STREAM_ERROR (-2) */
1328 '-3': 'data error', /* Z_DATA_ERROR (-3) */
1329 '-4': 'insufficient memory', /* Z_MEM_ERROR (-4) */
1330 '-5': 'buffer error', /* Z_BUF_ERROR (-5) */
1331 '-6': 'incompatible version' /* Z_VERSION_ERROR (-6) */
1334 // (C) 1995-2013 Jean-loup Gailly and Mark Adler
1335 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin
1337 // This software is provided 'as-is', without any express or implied
1338 // warranty. In no event will the authors be held liable for any damages
1339 // arising from the use of this software.
1341 // Permission is granted to anyone to use this software for any purpose,
1342 // including commercial applications, and to alter it and redistribute it
1343 // freely, subject to the following restrictions:
1345 // 1. The origin of this software must not be misrepresented; you must not
1346 // claim that you wrote the original software. If you use this software
1347 // in a product, an acknowledgment in the product documentation would be
1348 // appreciated but is not required.
1349 // 2. Altered source versions must be plainly marked as such, and must not be
1350 // misrepresented as being the original software.
1351 // 3. This notice may not be removed or altered from any source distribution.
1355 /* Allowed flush values; see deflate() and inflate() below for details */
1364 /* Return codes for the compression/decompression functions. Negative values
1365 * are errors, positive values are used for special but normal events.
1375 //Z_VERSION_ERROR: -6,
1377 /* compression levels */
1378 Z_NO_COMPRESSION: 0,
1380 Z_BEST_COMPRESSION: 9,
1381 Z_DEFAULT_COMPRESSION: -1,
1388 Z_DEFAULT_STRATEGY: 0,
1390 /* Possible values of the data_type field (though see inflate()) */
1393 //Z_ASCII: 1, // = Z_TEXT (deprecated)
1396 /* The deflate compression method */
1398 //Z_NULL: null // Use -1 or null inline, depending on var type
1401 // (C) 1995-2013 Jean-loup Gailly and Mark Adler
1402 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin
1404 // This software is provided 'as-is', without any express or implied
1405 // warranty. In no event will the authors be held liable for any damages
1406 // arising from the use of this software.
1408 // Permission is granted to anyone to use this software for any purpose,
1409 // including commercial applications, and to alter it and redistribute it
1410 // freely, subject to the following restrictions:
1412 // 1. The origin of this software must not be misrepresented; you must not
1413 // claim that you wrote the original software. If you use this software
1414 // in a product, an acknowledgment in the product documentation would be
1415 // appreciated but is not required.
1416 // 2. Altered source versions must be plainly marked as such, and must not be
1417 // misrepresented as being the original software.
1418 // 3. This notice may not be removed or altered from any source distribution.
1420 const { _tr_init, _tr_stored_block, _tr_flush_block, _tr_tally, _tr_align } = trees;
1425 /* Public constants ==========================================================*/
1426 /* ===========================================================================*/
1429 Z_NO_FLUSH: Z_NO_FLUSH$1, Z_PARTIAL_FLUSH, Z_FULL_FLUSH: Z_FULL_FLUSH$1, Z_FINISH: Z_FINISH$1, Z_BLOCK,
1430 Z_OK: Z_OK$1, Z_STREAM_END: Z_STREAM_END$1, Z_STREAM_ERROR, Z_DATA_ERROR, Z_BUF_ERROR,
1431 Z_DEFAULT_COMPRESSION: Z_DEFAULT_COMPRESSION$1,
1432 Z_FILTERED, Z_HUFFMAN_ONLY, Z_RLE, Z_FIXED, Z_DEFAULT_STRATEGY: Z_DEFAULT_STRATEGY$1,
1434 Z_DEFLATED: Z_DEFLATED$1
1437 /*============================================================================*/
1440 const MAX_MEM_LEVEL = 9;
1441 /* Maximum value for memLevel in deflateInit2 */
1442 const MAX_WBITS = 15;
1443 /* 32K LZ77 window */
1444 const DEF_MEM_LEVEL = 8;
1447 const LENGTH_CODES = 29;
1448 /* number of length codes, not counting the special END_BLOCK code */
1449 const LITERALS = 256;
1450 /* number of literal bytes 0..255 */
1451 const L_CODES = LITERALS + 1 + LENGTH_CODES;
1452 /* number of Literal or Length codes, including the END_BLOCK code */
1454 /* number of distance codes */
1455 const BL_CODES = 19;
1456 /* number of codes used to transfer the bit lengths */
1457 const HEAP_SIZE = 2 * L_CODES + 1;
1458 /* maximum heap size */
1459 const MAX_BITS = 15;
1460 /* All codes must not exceed MAX_BITS bits */
1462 const MIN_MATCH = 3;
1463 const MAX_MATCH = 258;
1464 const MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1);
1466 const PRESET_DICT = 0x20;
1468 const INIT_STATE = 42; /* zlib header -> BUSY_STATE */
1470 const GZIP_STATE = 57; /* gzip header -> BUSY_STATE | EXTRA_STATE */
1472 const EXTRA_STATE = 69; /* gzip extra block -> NAME_STATE */
1473 const NAME_STATE = 73; /* gzip file name -> COMMENT_STATE */
1474 const COMMENT_STATE = 91; /* gzip comment -> HCRC_STATE */
1475 const HCRC_STATE = 103; /* gzip header CRC -> BUSY_STATE */
1476 const BUSY_STATE = 113; /* deflate -> FINISH_STATE */
1477 const FINISH_STATE = 666; /* stream complete */
1479 const BS_NEED_MORE = 1; /* block not completed, need more input or more output */
1480 const BS_BLOCK_DONE = 2; /* block flush performed */
1481 const BS_FINISH_STARTED = 3; /* finish started, need only more output at next deflate */
1482 const BS_FINISH_DONE = 4; /* finish done, accept no more input or output */
1484 const OS_CODE = 0x03; // Unix :) . Don't detect, use this default.
1486 const err = (strm, errorCode) => {
1487 strm.msg = messages[errorCode];
1491 const rank = (f) => {
1492 return ((f) * 2) - ((f) > 4 ? 9 : 0);
1495 const zero = (buf) => {
1496 let len = buf.length; while (--len >= 0) { buf[len] = 0; }
1499 /* ===========================================================================
1500 * Slide the hash table when sliding the window down (could be avoided with 32
1501 * bit values at the expense of memory usage). We slide even when level == 0 to
1502 * keep the hash table consistent if we switch back to level > 0 later.
1504 const slide_hash = (s) => {
1507 let wsize = s.w_size;
1513 s.head[p] = (m >= wsize ? m - wsize : 0);
1520 s.prev[p] = (m >= wsize ? m - wsize : 0);
1521 /* If n is not on any hash chain, prev[n] is garbage but
1522 * its value will never be used.
1528 /* eslint-disable new-cap */
1529 let HASH_ZLIB = (s, prev, data) => ((prev << s.hash_shift) ^ data) & s.hash_mask;
1530 // This hash causes less collisions, https://github.com/nodeca/pako/issues/135
1531 // But breaks binary compatibility
1532 //let HASH_FAST = (s, prev, data) => ((prev << 8) + (prev >> 8) + (data << 4)) & s.hash_mask;
1533 let HASH = HASH_ZLIB;
1536 /* =========================================================================
1537 * Flush as much pending output as possible. All deflate() output, except for
1538 * some deflate_stored() output, goes through this function so some
1539 * applications may wish to modify it to avoid allocating a large
1540 * strm->next_out buffer and copying into it. (See also read_buf()).
1542 const flush_pending = (strm) => {
1543 const s = strm.state;
1545 //_tr_flush_bits(s);
1546 let len = s.pending;
1547 if (len > strm.avail_out) {
1548 len = strm.avail_out;
1550 if (len === 0) { return; }
1552 strm.output.set(s.pending_buf.subarray(s.pending_out, s.pending_out + len), strm.next_out);
1553 strm.next_out += len;
1554 s.pending_out += len;
1555 strm.total_out += len;
1556 strm.avail_out -= len;
1558 if (s.pending === 0) {
1564 const flush_block_only = (s, last) => {
1565 _tr_flush_block(s, (s.block_start >= 0 ? s.block_start : -1), s.strstart - s.block_start, last);
1566 s.block_start = s.strstart;
1567 flush_pending(s.strm);
1571 const put_byte = (s, b) => {
1572 s.pending_buf[s.pending++] = b;
1576 /* =========================================================================
1577 * Put a short in the pending buffer. The 16-bit value is put in MSB order.
1578 * IN assertion: the stream state is correct and there is enough room in
1581 const putShortMSB = (s, b) => {
1583 // put_byte(s, (Byte)(b >> 8));
1584 // put_byte(s, (Byte)(b & 0xff));
1585 s.pending_buf[s.pending++] = (b >>> 8) & 0xff;
1586 s.pending_buf[s.pending++] = b & 0xff;
1590 /* ===========================================================================
1591 * Read a new buffer from the current input stream, update the adler32
1592 * and total number of bytes read. All deflate() input goes through
1593 * this function so some applications may wish to modify it to avoid
1594 * allocating a large strm->input buffer and copying from it.
1595 * (See also flush_pending()).
1597 const read_buf = (strm, buf, start, size) => {
1599 let len = strm.avail_in;
1601 if (len > size) { len = size; }
1602 if (len === 0) { return 0; }
1604 strm.avail_in -= len;
1606 // zmemcpy(buf, strm->next_in, len);
1607 buf.set(strm.input.subarray(strm.next_in, strm.next_in + len), start);
1608 if (strm.state.wrap === 1) {
1609 strm.adler = adler32_1(strm.adler, buf, len, start);
1612 else if (strm.state.wrap === 2) {
1613 strm.adler = crc32_1(strm.adler, buf, len, start);
1616 strm.next_in += len;
1617 strm.total_in += len;
1623 /* ===========================================================================
1624 * Set match_start to the longest match starting at the given string and
1625 * return its length. Matches shorter or equal to prev_length are discarded,
1626 * in which case the result is equal to prev_length and match_start is
1628 * IN assertions: cur_match is the head of the hash chain for the current
1629 * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
1630 * OUT assertion: the match length is not greater than s->lookahead.
1632 const longest_match = (s, cur_match) => {
1634 let chain_length = s.max_chain_length; /* max hash chain length */
1635 let scan = s.strstart; /* current string */
1636 let match; /* matched string */
1637 let len; /* length of current match */
1638 let best_len = s.prev_length; /* best match length so far */
1639 let nice_match = s.nice_match; /* stop if match long enough */
1640 const limit = (s.strstart > (s.w_size - MIN_LOOKAHEAD)) ?
1641 s.strstart - (s.w_size - MIN_LOOKAHEAD) : 0/*NIL*/;
1643 const _win = s.window; // shortcut
1645 const wmask = s.w_mask;
1646 const prev = s.prev;
1648 /* Stop when cur_match becomes <= limit. To simplify the code,
1649 * we prevent matches with the string of window index 0.
1652 const strend = s.strstart + MAX_MATCH;
1653 let scan_end1 = _win[scan + best_len - 1];
1654 let scan_end = _win[scan + best_len];
1656 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
1657 * It is easy to get rid of this optimization if necessary.
1659 // Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
1661 /* Do not waste too much time if we already have a good match: */
1662 if (s.prev_length >= s.good_match) {
1665 /* Do not look for matches beyond the end of the input. This is necessary
1666 * to make deflate deterministic.
1668 if (nice_match > s.lookahead) { nice_match = s.lookahead; }
1670 // Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
1673 // Assert(cur_match < s->strstart, "no future");
1676 /* Skip to next match if the match length cannot increase
1677 * or if the match length is less than 2. Note that the checks below
1678 * for insufficient lookahead only occur occasionally for performance
1679 * reasons. Therefore uninitialized memory will be accessed, and
1680 * conditional jumps will be made that depend on those values.
1681 * However the length of the match is limited to the lookahead, so
1682 * the output of deflate is not affected by the uninitialized values.
1685 if (_win[match + best_len] !== scan_end ||
1686 _win[match + best_len - 1] !== scan_end1 ||
1687 _win[match] !== _win[scan] ||
1688 _win[++match] !== _win[scan + 1]) {
1692 /* The check at best_len-1 can be removed because it will be made
1693 * again later. (This heuristic is not always a win.)
1694 * It is not necessary to compare scan[2] and match[2] since they
1695 * are always equal when the other bytes match, given that
1696 * the hash keys are equal and that HASH_BITS >= 8.
1700 // Assert(*scan == *match, "match[2]?");
1702 /* We check for insufficient lookahead only every 8th comparison;
1703 * the 256th check will be made at strstart+258.
1706 /*jshint noempty:false*/
1707 } while (_win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&
1708 _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&
1709 _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&
1710 _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&
1713 // Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1715 len = MAX_MATCH - (strend - scan);
1716 scan = strend - MAX_MATCH;
1718 if (len > best_len) {
1719 s.match_start = cur_match;
1721 if (len >= nice_match) {
1724 scan_end1 = _win[scan + best_len - 1];
1725 scan_end = _win[scan + best_len];
1727 } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length !== 0);
1729 if (best_len <= s.lookahead) {
1736 /* ===========================================================================
1737 * Fill the window when the lookahead becomes insufficient.
1738 * Updates strstart and lookahead.
1740 * IN assertion: lookahead < MIN_LOOKAHEAD
1741 * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
1742 * At least one byte has been read, or avail_in == 0; reads are
1743 * performed for at least two bytes (required for the zip translate_eol
1744 * option -- not supported here).
1746 const fill_window = (s) => {
1748 const _w_size = s.w_size;
1751 //Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
1754 more = s.window_size - s.lookahead - s.strstart;
1756 // JS ints have 32 bit, block below not needed
1757 /* Deal with !@#$% 64K limit: */
1758 //if (sizeof(int) <= 2) {
1759 // if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
1762 // } else if (more == (unsigned)(-1)) {
1763 // /* Very unlikely, but possible on 16 bit machine if
1764 // * strstart == 0 && lookahead == 1 (input done a byte at time)
1771 /* If the window is almost full and there is insufficient lookahead,
1772 * move the upper half to the lower one to make room in the upper half.
1774 if (s.strstart >= _w_size + (_w_size - MIN_LOOKAHEAD)) {
1776 s.window.set(s.window.subarray(_w_size, _w_size + _w_size - more), 0);
1777 s.match_start -= _w_size;
1778 s.strstart -= _w_size;
1779 /* we now have strstart >= MAX_DIST */
1780 s.block_start -= _w_size;
1781 if (s.insert > s.strstart) {
1782 s.insert = s.strstart;
1787 if (s.strm.avail_in === 0) {
1791 /* If there was no sliding:
1792 * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
1793 * more == window_size - lookahead - strstart
1794 * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
1795 * => more >= window_size - 2*WSIZE + 2
1796 * In the BIG_MEM or MMAP case (not yet supported),
1797 * window_size == input_size + MIN_LOOKAHEAD &&
1798 * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
1799 * Otherwise, window_size == 2*WSIZE so more >= 2.
1800 * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
1802 //Assert(more >= 2, "more < 2");
1803 n = read_buf(s.strm, s.window, s.strstart + s.lookahead, more);
1806 /* Initialize the hash value now that we have some input: */
1807 if (s.lookahead + s.insert >= MIN_MATCH) {
1808 str = s.strstart - s.insert;
1809 s.ins_h = s.window[str];
1811 /* UPDATE_HASH(s, s->ins_h, s->window[str + 1]); */
1812 s.ins_h = HASH(s, s.ins_h, s.window[str + 1]);
1813 //#if MIN_MATCH != 3
1814 // Call update_hash() MIN_MATCH-3 more times
1817 /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */
1818 s.ins_h = HASH(s, s.ins_h, s.window[str + MIN_MATCH - 1]);
1820 s.prev[str & s.w_mask] = s.head[s.ins_h];
1821 s.head[s.ins_h] = str;
1824 if (s.lookahead + s.insert < MIN_MATCH) {
1829 /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
1830 * but this is not important since only literal bytes will be emitted.
1833 } while (s.lookahead < MIN_LOOKAHEAD && s.strm.avail_in !== 0);
1835 /* If the WIN_INIT bytes after the end of the current data have never been
1836 * written, then zero those bytes in order to avoid memory check reports of
1837 * the use of uninitialized (or uninitialised as Julian writes) bytes by
1838 * the longest match routines. Update the high water mark for the next
1839 * time through here. WIN_INIT is set to MAX_MATCH since the longest match
1840 * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
1842 // if (s.high_water < s.window_size) {
1843 // const curr = s.strstart + s.lookahead;
1846 // if (s.high_water < curr) {
1847 // /* Previous high water mark below current data -- zero WIN_INIT
1848 // * bytes or up to end of window, whichever is less.
1850 // init = s.window_size - curr;
1851 // if (init > WIN_INIT)
1853 // zmemzero(s->window + curr, (unsigned)init);
1854 // s->high_water = curr + init;
1856 // else if (s->high_water < (ulg)curr + WIN_INIT) {
1857 // /* High water mark at or above current data, but below current data
1858 // * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
1859 // * to end of window, whichever is less.
1861 // init = (ulg)curr + WIN_INIT - s->high_water;
1862 // if (init > s->window_size - s->high_water)
1863 // init = s->window_size - s->high_water;
1864 // zmemzero(s->window + s->high_water, (unsigned)init);
1865 // s->high_water += init;
1869 // Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
1870 // "not enough room for search");
1873 /* ===========================================================================
1874 * Copy without compression as much as possible from the input stream, return
1875 * the current block state.
1877 * In case deflateParams() is used to later switch to a non-zero compression
1878 * level, s->matches (otherwise unused when storing) keeps track of the number
1879 * of hash table slides to perform. If s->matches is 1, then one hash table
1880 * slide will be done when switching. If s->matches is 2, the maximum value
1881 * allowed here, then the hash table will be cleared, since two or more slides
1882 * is the same as a clear.
1884 * deflate_stored() is written to minimize the number of times an input byte is
1885 * copied. It is most efficient with large input and output buffers, which
1886 * maximizes the opportunites to have a single copy from next_in to next_out.
1888 const deflate_stored = (s, flush) => {
1890 /* Smallest worthy block size when not flushing or finishing. By default
1891 * this is 32K. This can be as small as 507 bytes for memLevel == 1. For
1892 * large input and output buffers, the stored block size will be larger.
1894 let min_block = s.pending_buf_size - 5 > s.w_size ? s.w_size : s.pending_buf_size - 5;
1896 /* Copy as many min_block or larger stored blocks directly to next_out as
1897 * possible. If flushing, copy the remaining available input to next_out as
1898 * stored blocks, if there is enough space.
1900 let len, left, have, last = 0;
1901 let used = s.strm.avail_in;
1903 /* Set len to the maximum size block that we can copy directly with the
1904 * available input data and output space. Set left to how much of that
1905 * would be copied from what's left in the window.
1907 len = 65535/* MAX_STORED */; /* maximum deflate stored block length */
1908 have = (s.bi_valid + 42) >> 3; /* number of header bytes */
1909 if (s.strm.avail_out < have) { /* need room for header */
1912 /* maximum stored block length that will fit in avail_out: */
1913 have = s.strm.avail_out - have;
1914 left = s.strstart - s.block_start; /* bytes left in window */
1915 if (len > left + s.strm.avail_in) {
1916 len = left + s.strm.avail_in; /* limit len to the input */
1919 len = have; /* limit len to the output */
1922 /* If the stored block would be less than min_block in length, or if
1923 * unable to copy all of the available input when flushing, then try
1924 * copying to the window and the pending buffer instead. Also don't
1925 * write an empty block when flushing -- deflate() does that.
1927 if (len < min_block && ((len === 0 && flush !== Z_FINISH$1) ||
1928 flush === Z_NO_FLUSH$1 ||
1929 len !== left + s.strm.avail_in)) {
1933 /* Make a dummy stored block in pending to get the header bytes,
1934 * including any pending bits. This also updates the debugging counts.
1936 last = flush === Z_FINISH$1 && len === left + s.strm.avail_in ? 1 : 0;
1937 _tr_stored_block(s, 0, 0, last);
1939 /* Replace the lengths in the dummy stored block with len. */
1940 s.pending_buf[s.pending - 4] = len;
1941 s.pending_buf[s.pending - 3] = len >> 8;
1942 s.pending_buf[s.pending - 2] = ~len;
1943 s.pending_buf[s.pending - 1] = ~len >> 8;
1945 /* Write the stored block header bytes. */
1946 flush_pending(s.strm);
1949 // /* Update debugging counts for the data about to be copied. */
1950 // s->compressed_len += len << 3;
1951 // s->bits_sent += len << 3;
1954 /* Copy uncompressed bytes from the window to next_out. */
1959 //zmemcpy(s->strm->next_out, s->window + s->block_start, left);
1960 s.strm.output.set(s.window.subarray(s.block_start, s.block_start + left), s.strm.next_out);
1961 s.strm.next_out += left;
1962 s.strm.avail_out -= left;
1963 s.strm.total_out += left;
1964 s.block_start += left;
1968 /* Copy uncompressed bytes directly from next_in to next_out, updating
1972 read_buf(s.strm, s.strm.output, s.strm.next_out, len);
1973 s.strm.next_out += len;
1974 s.strm.avail_out -= len;
1975 s.strm.total_out += len;
1977 } while (last === 0);
1979 /* Update the sliding window with the last s->w_size bytes of the copied
1980 * data, or append all of the copied data to the existing window if less
1981 * than s->w_size bytes were copied. Also update the number of bytes to
1982 * insert in the hash tables, in the event that deflateParams() switches to
1983 * a non-zero compression level.
1985 used -= s.strm.avail_in; /* number of input bytes directly copied */
1987 /* If any input was used, then no unused input remains in the window,
1988 * therefore s->block_start == s->strstart.
1990 if (used >= s.w_size) { /* supplant the previous history */
1991 s.matches = 2; /* clear hash */
1992 //zmemcpy(s->window, s->strm->next_in - s->w_size, s->w_size);
1993 s.window.set(s.strm.input.subarray(s.strm.next_in - s.w_size, s.strm.next_in), 0);
1994 s.strstart = s.w_size;
1995 s.insert = s.strstart;
1998 if (s.window_size - s.strstart <= used) {
1999 /* Slide the window down. */
2000 s.strstart -= s.w_size;
2001 //zmemcpy(s->window, s->window + s->w_size, s->strstart);
2002 s.window.set(s.window.subarray(s.w_size, s.w_size + s.strstart), 0);
2003 if (s.matches < 2) {
2004 s.matches++; /* add a pending slide_hash() */
2006 if (s.insert > s.strstart) {
2007 s.insert = s.strstart;
2010 //zmemcpy(s->window + s->strstart, s->strm->next_in - used, used);
2011 s.window.set(s.strm.input.subarray(s.strm.next_in - used, s.strm.next_in), s.strstart);
2013 s.insert += used > s.w_size - s.insert ? s.w_size - s.insert : used;
2015 s.block_start = s.strstart;
2017 if (s.high_water < s.strstart) {
2018 s.high_water = s.strstart;
2021 /* If the last block was written to next_out, then done. */
2023 return BS_FINISH_DONE;
2026 /* If flushing and all input has been consumed, then done. */
2027 if (flush !== Z_NO_FLUSH$1 && flush !== Z_FINISH$1 &&
2028 s.strm.avail_in === 0 && s.strstart === s.block_start) {
2029 return BS_BLOCK_DONE;
2032 /* Fill the window with any remaining input. */
2033 have = s.window_size - s.strstart;
2034 if (s.strm.avail_in > have && s.block_start >= s.w_size) {
2035 /* Slide the window down. */
2036 s.block_start -= s.w_size;
2037 s.strstart -= s.w_size;
2038 //zmemcpy(s->window, s->window + s->w_size, s->strstart);
2039 s.window.set(s.window.subarray(s.w_size, s.w_size + s.strstart), 0);
2040 if (s.matches < 2) {
2041 s.matches++; /* add a pending slide_hash() */
2043 have += s.w_size; /* more space now */
2044 if (s.insert > s.strstart) {
2045 s.insert = s.strstart;
2048 if (have > s.strm.avail_in) {
2049 have = s.strm.avail_in;
2052 read_buf(s.strm, s.window, s.strstart, have);
2054 s.insert += have > s.w_size - s.insert ? s.w_size - s.insert : have;
2056 if (s.high_water < s.strstart) {
2057 s.high_water = s.strstart;
2060 /* There was not enough avail_out to write a complete worthy or flushed
2061 * stored block to next_out. Write a stored block to pending instead, if we
2062 * have enough input for a worthy block, or if flushing and there is enough
2063 * room for the remaining input as a stored block in the pending buffer.
2065 have = (s.bi_valid + 42) >> 3; /* number of header bytes */
2066 /* maximum stored block length that will fit in pending: */
2067 have = s.pending_buf_size - have > 65535/* MAX_STORED */ ? 65535/* MAX_STORED */ : s.pending_buf_size - have;
2068 min_block = have > s.w_size ? s.w_size : have;
2069 left = s.strstart - s.block_start;
2070 if (left >= min_block ||
2071 ((left || flush === Z_FINISH$1) && flush !== Z_NO_FLUSH$1 &&
2072 s.strm.avail_in === 0 && left <= have)) {
2073 len = left > have ? have : left;
2074 last = flush === Z_FINISH$1 && s.strm.avail_in === 0 &&
2075 len === left ? 1 : 0;
2076 _tr_stored_block(s, s.block_start, len, last);
2077 s.block_start += len;
2078 flush_pending(s.strm);
2081 /* We've done all we can with the available input and output. */
2082 return last ? BS_FINISH_STARTED : BS_NEED_MORE;
2086 /* ===========================================================================
2087 * Compress as much as possible from the input stream, return the current
2089 * This function does not perform lazy evaluation of matches and inserts
2090 * new strings in the dictionary only for unmatched strings or for short
2091 * matches. It is used only for the fast compression options.
2093 const deflate_fast = (s, flush) => {
2095 let hash_head; /* head of the hash chain */
2096 let bflush; /* set if current block must be flushed */
2099 /* Make sure that we always have enough lookahead, except
2100 * at the end of the input file. We need MAX_MATCH bytes
2101 * for the next match, plus MIN_MATCH bytes to insert the
2102 * string following the next match.
2104 if (s.lookahead < MIN_LOOKAHEAD) {
2106 if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH$1) {
2107 return BS_NEED_MORE;
2109 if (s.lookahead === 0) {
2110 break; /* flush the current block */
2114 /* Insert the string window[strstart .. strstart+2] in the
2115 * dictionary, and set hash_head to the head of the hash chain:
2117 hash_head = 0/*NIL*/;
2118 if (s.lookahead >= MIN_MATCH) {
2119 /*** INSERT_STRING(s, s.strstart, hash_head); ***/
2120 s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH - 1]);
2121 hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];
2122 s.head[s.ins_h] = s.strstart;
2126 /* Find the longest match, discarding those <= prev_length.
2127 * At this point we have always match_length < MIN_MATCH
2129 if (hash_head !== 0/*NIL*/ && ((s.strstart - hash_head) <= (s.w_size - MIN_LOOKAHEAD))) {
2130 /* To simplify the code, we prevent matches with the string
2131 * of window index 0 (in particular we have to avoid a match
2132 * of the string with itself at the start of the input file).
2134 s.match_length = longest_match(s, hash_head);
2135 /* longest_match() sets match_start */
2137 if (s.match_length >= MIN_MATCH) {
2138 // check_match(s, s.strstart, s.match_start, s.match_length); // for debug only
2140 /*** _tr_tally_dist(s, s.strstart - s.match_start,
2141 s.match_length - MIN_MATCH, bflush); ***/
2142 bflush = _tr_tally(s, s.strstart - s.match_start, s.match_length - MIN_MATCH);
2144 s.lookahead -= s.match_length;
2146 /* Insert new strings in the hash table only if the match length
2147 * is not too large. This saves time but degrades compression.
2149 if (s.match_length <= s.max_lazy_match/*max_insert_length*/ && s.lookahead >= MIN_MATCH) {
2150 s.match_length--; /* string at strstart already in table */
2153 /*** INSERT_STRING(s, s.strstart, hash_head); ***/
2154 s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH - 1]);
2155 hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];
2156 s.head[s.ins_h] = s.strstart;
2158 /* strstart never exceeds WSIZE-MAX_MATCH, so there are
2159 * always MIN_MATCH bytes ahead.
2161 } while (--s.match_length !== 0);
2165 s.strstart += s.match_length;
2167 s.ins_h = s.window[s.strstart];
2168 /* UPDATE_HASH(s, s.ins_h, s.window[s.strstart+1]); */
2169 s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + 1]);
2171 //#if MIN_MATCH != 3
2172 // Call UPDATE_HASH() MIN_MATCH-3 more times
2174 /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
2175 * matter since it will be recomputed at next deflate call.
2179 /* No match, output a literal byte */
2180 //Tracevv((stderr,"%c", s.window[s.strstart]));
2181 /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/
2182 bflush = _tr_tally(s, 0, s.window[s.strstart]);
2188 /*** FLUSH_BLOCK(s, 0); ***/
2189 flush_block_only(s, false);
2190 if (s.strm.avail_out === 0) {
2191 return BS_NEED_MORE;
2196 s.insert = ((s.strstart < (MIN_MATCH - 1)) ? s.strstart : MIN_MATCH - 1);
2197 if (flush === Z_FINISH$1) {
2198 /*** FLUSH_BLOCK(s, 1); ***/
2199 flush_block_only(s, true);
2200 if (s.strm.avail_out === 0) {
2201 return BS_FINISH_STARTED;
2204 return BS_FINISH_DONE;
2207 /*** FLUSH_BLOCK(s, 0); ***/
2208 flush_block_only(s, false);
2209 if (s.strm.avail_out === 0) {
2210 return BS_NEED_MORE;
2214 return BS_BLOCK_DONE;
2217 /* ===========================================================================
2218 * Same as above, but achieves better compression. We use a lazy
2219 * evaluation for matches: a match is finally adopted only if there is
2220 * no better match at the next window position.
2222 const deflate_slow = (s, flush) => {
2224 let hash_head; /* head of hash chain */
2225 let bflush; /* set if current block must be flushed */
2229 /* Process the input block. */
2231 /* Make sure that we always have enough lookahead, except
2232 * at the end of the input file. We need MAX_MATCH bytes
2233 * for the next match, plus MIN_MATCH bytes to insert the
2234 * string following the next match.
2236 if (s.lookahead < MIN_LOOKAHEAD) {
2238 if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH$1) {
2239 return BS_NEED_MORE;
2241 if (s.lookahead === 0) { break; } /* flush the current block */
2244 /* Insert the string window[strstart .. strstart+2] in the
2245 * dictionary, and set hash_head to the head of the hash chain:
2247 hash_head = 0/*NIL*/;
2248 if (s.lookahead >= MIN_MATCH) {
2249 /*** INSERT_STRING(s, s.strstart, hash_head); ***/
2250 s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH - 1]);
2251 hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];
2252 s.head[s.ins_h] = s.strstart;
2256 /* Find the longest match, discarding those <= prev_length.
2258 s.prev_length = s.match_length;
2259 s.prev_match = s.match_start;
2260 s.match_length = MIN_MATCH - 1;
2262 if (hash_head !== 0/*NIL*/ && s.prev_length < s.max_lazy_match &&
2263 s.strstart - hash_head <= (s.w_size - MIN_LOOKAHEAD)/*MAX_DIST(s)*/) {
2264 /* To simplify the code, we prevent matches with the string
2265 * of window index 0 (in particular we have to avoid a match
2266 * of the string with itself at the start of the input file).
2268 s.match_length = longest_match(s, hash_head);
2269 /* longest_match() sets match_start */
2271 if (s.match_length <= 5 &&
2272 (s.strategy === Z_FILTERED || (s.match_length === MIN_MATCH && s.strstart - s.match_start > 4096/*TOO_FAR*/))) {
2274 /* If prev_match is also MIN_MATCH, match_start is garbage
2275 * but we will ignore the current match anyway.
2277 s.match_length = MIN_MATCH - 1;
2280 /* If there was a match at the previous step and the current
2281 * match is not better, output the previous match:
2283 if (s.prev_length >= MIN_MATCH && s.match_length <= s.prev_length) {
2284 max_insert = s.strstart + s.lookahead - MIN_MATCH;
2285 /* Do not insert strings in hash table beyond this. */
2287 //check_match(s, s.strstart-1, s.prev_match, s.prev_length);
2289 /***_tr_tally_dist(s, s.strstart - 1 - s.prev_match,
2290 s.prev_length - MIN_MATCH, bflush);***/
2291 bflush = _tr_tally(s, s.strstart - 1 - s.prev_match, s.prev_length - MIN_MATCH);
2292 /* Insert in hash table all strings up to the end of the match.
2293 * strstart-1 and strstart are already inserted. If there is not
2294 * enough lookahead, the last two strings are not inserted in
2297 s.lookahead -= s.prev_length - 1;
2300 if (++s.strstart <= max_insert) {
2301 /*** INSERT_STRING(s, s.strstart, hash_head); ***/
2302 s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH - 1]);
2303 hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];
2304 s.head[s.ins_h] = s.strstart;
2307 } while (--s.prev_length !== 0);
2308 s.match_available = 0;
2309 s.match_length = MIN_MATCH - 1;
2313 /*** FLUSH_BLOCK(s, 0); ***/
2314 flush_block_only(s, false);
2315 if (s.strm.avail_out === 0) {
2316 return BS_NEED_MORE;
2321 } else if (s.match_available) {
2322 /* If there was no match at the previous position, output a
2323 * single literal. If there was a match but the current match
2324 * is longer, truncate the previous match to a single literal.
2326 //Tracevv((stderr,"%c", s->window[s->strstart-1]));
2327 /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/
2328 bflush = _tr_tally(s, 0, s.window[s.strstart - 1]);
2331 /*** FLUSH_BLOCK_ONLY(s, 0) ***/
2332 flush_block_only(s, false);
2337 if (s.strm.avail_out === 0) {
2338 return BS_NEED_MORE;
2341 /* There is no previous match to compare with, wait for
2342 * the next step to decide.
2344 s.match_available = 1;
2349 //Assert (flush != Z_NO_FLUSH, "no flush?");
2350 if (s.match_available) {
2351 //Tracevv((stderr,"%c", s->window[s->strstart-1]));
2352 /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/
2353 bflush = _tr_tally(s, 0, s.window[s.strstart - 1]);
2355 s.match_available = 0;
2357 s.insert = s.strstart < MIN_MATCH - 1 ? s.strstart : MIN_MATCH - 1;
2358 if (flush === Z_FINISH$1) {
2359 /*** FLUSH_BLOCK(s, 1); ***/
2360 flush_block_only(s, true);
2361 if (s.strm.avail_out === 0) {
2362 return BS_FINISH_STARTED;
2365 return BS_FINISH_DONE;
2368 /*** FLUSH_BLOCK(s, 0); ***/
2369 flush_block_only(s, false);
2370 if (s.strm.avail_out === 0) {
2371 return BS_NEED_MORE;
2376 return BS_BLOCK_DONE;
2380 /* ===========================================================================
2381 * For Z_RLE, simply look for runs of bytes, generate matches only of distance
2382 * one. Do not maintain a hash table. (It will be regenerated if this run of
2383 * deflate switches away from Z_RLE.)
2385 const deflate_rle = (s, flush) => {
2387 let bflush; /* set if current block must be flushed */
2388 let prev; /* byte at distance one to match */
2389 let scan, strend; /* scan goes up to strend for length of run */
2391 const _win = s.window;
2394 /* Make sure that we always have enough lookahead, except
2395 * at the end of the input file. We need MAX_MATCH bytes
2396 * for the longest run, plus one for the unrolled loop.
2398 if (s.lookahead <= MAX_MATCH) {
2400 if (s.lookahead <= MAX_MATCH && flush === Z_NO_FLUSH$1) {
2401 return BS_NEED_MORE;
2403 if (s.lookahead === 0) { break; } /* flush the current block */
2406 /* See how many times the previous byte repeats */
2408 if (s.lookahead >= MIN_MATCH && s.strstart > 0) {
2409 scan = s.strstart - 1;
2411 if (prev === _win[++scan] && prev === _win[++scan] && prev === _win[++scan]) {
2412 strend = s.strstart + MAX_MATCH;
2414 /*jshint noempty:false*/
2415 } while (prev === _win[++scan] && prev === _win[++scan] &&
2416 prev === _win[++scan] && prev === _win[++scan] &&
2417 prev === _win[++scan] && prev === _win[++scan] &&
2418 prev === _win[++scan] && prev === _win[++scan] &&
2420 s.match_length = MAX_MATCH - (strend - scan);
2421 if (s.match_length > s.lookahead) {
2422 s.match_length = s.lookahead;
2425 //Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan");
2428 /* Emit match if have run of MIN_MATCH or longer, else emit literal */
2429 if (s.match_length >= MIN_MATCH) {
2430 //check_match(s, s.strstart, s.strstart - 1, s.match_length);
2432 /*** _tr_tally_dist(s, 1, s.match_length - MIN_MATCH, bflush); ***/
2433 bflush = _tr_tally(s, 1, s.match_length - MIN_MATCH);
2435 s.lookahead -= s.match_length;
2436 s.strstart += s.match_length;
2439 /* No match, output a literal byte */
2440 //Tracevv((stderr,"%c", s->window[s->strstart]));
2441 /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/
2442 bflush = _tr_tally(s, 0, s.window[s.strstart]);
2448 /*** FLUSH_BLOCK(s, 0); ***/
2449 flush_block_only(s, false);
2450 if (s.strm.avail_out === 0) {
2451 return BS_NEED_MORE;
2457 if (flush === Z_FINISH$1) {
2458 /*** FLUSH_BLOCK(s, 1); ***/
2459 flush_block_only(s, true);
2460 if (s.strm.avail_out === 0) {
2461 return BS_FINISH_STARTED;
2464 return BS_FINISH_DONE;
2467 /*** FLUSH_BLOCK(s, 0); ***/
2468 flush_block_only(s, false);
2469 if (s.strm.avail_out === 0) {
2470 return BS_NEED_MORE;
2474 return BS_BLOCK_DONE;
2477 /* ===========================================================================
2478 * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.
2479 * (It will be regenerated if this run of deflate switches away from Huffman.)
2481 const deflate_huff = (s, flush) => {
2483 let bflush; /* set if current block must be flushed */
2486 /* Make sure that we have a literal to write. */
2487 if (s.lookahead === 0) {
2489 if (s.lookahead === 0) {
2490 if (flush === Z_NO_FLUSH$1) {
2491 return BS_NEED_MORE;
2493 break; /* flush the current block */
2497 /* Output a literal byte */
2499 //Tracevv((stderr,"%c", s->window[s->strstart]));
2500 /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/
2501 bflush = _tr_tally(s, 0, s.window[s.strstart]);
2505 /*** FLUSH_BLOCK(s, 0); ***/
2506 flush_block_only(s, false);
2507 if (s.strm.avail_out === 0) {
2508 return BS_NEED_MORE;
2514 if (flush === Z_FINISH$1) {
2515 /*** FLUSH_BLOCK(s, 1); ***/
2516 flush_block_only(s, true);
2517 if (s.strm.avail_out === 0) {
2518 return BS_FINISH_STARTED;
2521 return BS_FINISH_DONE;
2524 /*** FLUSH_BLOCK(s, 0); ***/
2525 flush_block_only(s, false);
2526 if (s.strm.avail_out === 0) {
2527 return BS_NEED_MORE;
2531 return BS_BLOCK_DONE;
2534 /* Values for max_lazy_match, good_match and max_chain_length, depending on
2535 * the desired pack level (0..9). The values given below have been tuned to
2536 * exclude worst case performance for pathological files. Better values may be
2537 * found for specific files.
2539 function Config(good_length, max_lazy, nice_length, max_chain, func) {
2541 this.good_length = good_length;
2542 this.max_lazy = max_lazy;
2543 this.nice_length = nice_length;
2544 this.max_chain = max_chain;
2548 const configuration_table = [
2549 /* good lazy nice chain */
2550 new Config(0, 0, 0, 0, deflate_stored), /* 0 store only */
2551 new Config(4, 4, 8, 4, deflate_fast), /* 1 max speed, no lazy matches */
2552 new Config(4, 5, 16, 8, deflate_fast), /* 2 */
2553 new Config(4, 6, 32, 32, deflate_fast), /* 3 */
2555 new Config(4, 4, 16, 16, deflate_slow), /* 4 lazy matches */
2556 new Config(8, 16, 32, 32, deflate_slow), /* 5 */
2557 new Config(8, 16, 128, 128, deflate_slow), /* 6 */
2558 new Config(8, 32, 128, 256, deflate_slow), /* 7 */
2559 new Config(32, 128, 258, 1024, deflate_slow), /* 8 */
2560 new Config(32, 258, 258, 4096, deflate_slow) /* 9 max compression */
2564 /* ===========================================================================
2565 * Initialize the "longest match" routines for a new zlib stream
2567 const lm_init = (s) => {
2569 s.window_size = 2 * s.w_size;
2571 /*** CLEAR_HASH(s); ***/
2572 zero(s.head); // Fill with NIL (= 0);
2574 /* Set the default configuration parameters:
2576 s.max_lazy_match = configuration_table[s.level].max_lazy;
2577 s.good_match = configuration_table[s.level].good_length;
2578 s.nice_match = configuration_table[s.level].nice_length;
2579 s.max_chain_length = configuration_table[s.level].max_chain;
2585 s.match_length = s.prev_length = MIN_MATCH - 1;
2586 s.match_available = 0;
2591 function DeflateState() {
2592 this.strm = null; /* pointer back to this zlib stream */
2593 this.status = 0; /* as the name implies */
2594 this.pending_buf = null; /* output still pending */
2595 this.pending_buf_size = 0; /* size of pending_buf */
2596 this.pending_out = 0; /* next pending byte to output to the stream */
2597 this.pending = 0; /* nb of bytes in the pending buffer */
2598 this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */
2599 this.gzhead = null; /* gzip header information to write */
2600 this.gzindex = 0; /* where in extra, name, or comment */
2601 this.method = Z_DEFLATED$1; /* can only be DEFLATED */
2602 this.last_flush = -1; /* value of flush param for previous deflate call */
2604 this.w_size = 0; /* LZ77 window size (32K by default) */
2605 this.w_bits = 0; /* log2(w_size) (8..16) */
2606 this.w_mask = 0; /* w_size - 1 */
2609 /* Sliding window. Input bytes are read into the second half of the window,
2610 * and move to the first half later to keep a dictionary of at least wSize
2611 * bytes. With this organization, matches are limited to a distance of
2612 * wSize-MAX_MATCH bytes, but this ensures that IO is always
2613 * performed with a length multiple of the block size.
2616 this.window_size = 0;
2617 /* Actual size of window: 2*wSize, except when the user input buffer
2618 * is directly used as sliding window.
2622 /* Link to older string with same hash index. To limit the size of this
2623 * array to 64K, this link is maintained only for the last 32K strings.
2624 * An index in this array is thus a window index modulo 32K.
2627 this.head = null; /* Heads of the hash chains or NIL. */
2629 this.ins_h = 0; /* hash index of string to be inserted */
2630 this.hash_size = 0; /* number of elements in hash table */
2631 this.hash_bits = 0; /* log2(hash_size) */
2632 this.hash_mask = 0; /* hash_size-1 */
2634 this.hash_shift = 0;
2635 /* Number of bits by which ins_h must be shifted at each input
2636 * step. It must be such that after MIN_MATCH steps, the oldest
2637 * byte no longer takes part in the hash key, that is:
2638 * hash_shift * MIN_MATCH >= hash_bits
2641 this.block_start = 0;
2642 /* Window position at the beginning of the current output block. Gets
2643 * negative when the window is moved backwards.
2646 this.match_length = 0; /* length of best match */
2647 this.prev_match = 0; /* previous match */
2648 this.match_available = 0; /* set if previous match exists */
2649 this.strstart = 0; /* start of string to insert */
2650 this.match_start = 0; /* start of matching string */
2651 this.lookahead = 0; /* number of valid bytes ahead in window */
2653 this.prev_length = 0;
2654 /* Length of the best match at previous step. Matches not greater than this
2655 * are discarded. This is used in the lazy match evaluation.
2658 this.max_chain_length = 0;
2659 /* To speed up deflation, hash chains are never searched beyond this
2660 * length. A higher limit improves compression ratio but degrades the
2664 this.max_lazy_match = 0;
2665 /* Attempt to find a better match only when the current match is strictly
2666 * smaller than this value. This mechanism is used only for compression
2669 // That's alias to max_lazy_match, don't use directly
2670 //this.max_insert_length = 0;
2671 /* Insert new strings in the hash table only if the match length is not
2672 * greater than this length. This saves time but degrades compression.
2673 * max_insert_length is used only for compression levels <= 3.
2676 this.level = 0; /* compression level (1..9) */
2677 this.strategy = 0; /* favor or force Huffman coding*/
2679 this.good_match = 0;
2680 /* Use a faster search when the previous match is longer than this */
2682 this.nice_match = 0; /* Stop searching when current match exceeds this */
2684 /* used by trees.c: */
2686 /* Didn't use ct_data typedef below to suppress compiler warning */
2688 // struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
2689 // struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
2690 // struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
2692 // Use flat array of DOUBLE size, with interleaved fata,
2693 // because JS does not support effective
2694 this.dyn_ltree = new Uint16Array(HEAP_SIZE * 2);
2695 this.dyn_dtree = new Uint16Array((2 * D_CODES + 1) * 2);
2696 this.bl_tree = new Uint16Array((2 * BL_CODES + 1) * 2);
2697 zero(this.dyn_ltree);
2698 zero(this.dyn_dtree);
2701 this.l_desc = null; /* desc. for literal tree */
2702 this.d_desc = null; /* desc. for distance tree */
2703 this.bl_desc = null; /* desc. for bit length tree */
2705 //ush bl_count[MAX_BITS+1];
2706 this.bl_count = new Uint16Array(MAX_BITS + 1);
2707 /* number of codes at each bit length for an optimal tree */
2709 //int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
2710 this.heap = new Uint16Array(2 * L_CODES + 1); /* heap used to build the Huffman trees */
2713 this.heap_len = 0; /* number of elements in the heap */
2714 this.heap_max = 0; /* element of largest frequency */
2715 /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
2716 * The same heap array is used to build all trees.
2719 this.depth = new Uint16Array(2 * L_CODES + 1); //uch depth[2*L_CODES+1];
2721 /* Depth of each subtree used as tie breaker for trees of equal frequency
2724 this.sym_buf = 0; /* buffer for distances and literals/lengths */
2726 this.lit_bufsize = 0;
2727 /* Size of match buffer for literals/lengths. There are 4 reasons for
2728 * limiting lit_bufsize to 64K:
2729 * - frequencies can be kept in 16 bit counters
2730 * - if compression is not successful for the first block, all input
2731 * data is still in the window so we can still emit a stored block even
2732 * when input comes from standard input. (This can also be done for
2733 * all blocks if lit_bufsize is not greater than 32K.)
2734 * - if compression is not successful for a file smaller than 64K, we can
2735 * even emit a stored file instead of a stored block (saving 5 bytes).
2736 * This is applicable only for zip (not gzip or zlib).
2737 * - creating new Huffman trees less frequently may not provide fast
2738 * adaptation to changes in the input data statistics. (Take for
2739 * example a binary file with poorly compressible code followed by
2740 * a highly compressible string table.) Smaller buffer sizes give
2741 * fast adaptation but have of course the overhead of transmitting
2742 * trees more frequently.
2743 * - I can't count above 4
2746 this.sym_next = 0; /* running index in sym_buf */
2747 this.sym_end = 0; /* symbol table full when sym_next reaches this */
2749 this.opt_len = 0; /* bit length of current block with optimal trees */
2750 this.static_len = 0; /* bit length of current block with static trees */
2751 this.matches = 0; /* number of string matches in current block */
2752 this.insert = 0; /* bytes at end of window left to insert */
2756 /* Output buffer. bits are inserted starting at the bottom (least
2757 * significant bits).
2760 /* Number of valid bits in bi_buf. All bits above the last valid bit
2764 // Used for window memory init. We safely ignore it for JS. That makes
2765 // sense only for pointers and memory check tools.
2766 //this.high_water = 0;
2767 /* High water mark offset in window for initialized bytes -- bytes above
2768 * this are set to zero in order to avoid memory check warnings when
2769 * longest match routines access bytes past the input. This is then
2770 * updated to the new high water mark.
2775 /* =========================================================================
2776 * Check for a valid deflate stream state. Return 0 if ok, 1 if not.
2778 const deflateStateCheck = (strm) => {
2783 const s = strm.state;
2784 if (!s || s.strm !== strm || (s.status !== INIT_STATE &&
2786 s.status !== GZIP_STATE &&
2788 s.status !== EXTRA_STATE &&
2789 s.status !== NAME_STATE &&
2790 s.status !== COMMENT_STATE &&
2791 s.status !== HCRC_STATE &&
2792 s.status !== BUSY_STATE &&
2793 s.status !== FINISH_STATE)) {
2800 const deflateResetKeep = (strm) => {
2802 if (deflateStateCheck(strm)) {
2803 return err(strm, Z_STREAM_ERROR);
2806 strm.total_in = strm.total_out = 0;
2807 strm.data_type = Z_UNKNOWN;
2809 const s = strm.state;
2815 /* was made negative by deflate(..., Z_FINISH); */
2819 s.wrap === 2 ? GZIP_STATE :
2821 s.wrap ? INIT_STATE : BUSY_STATE;
2822 strm.adler = (s.wrap === 2) ?
2823 0 // crc32(0, Z_NULL, 0)
2825 1; // adler32(0, Z_NULL, 0)
2832 const deflateReset = (strm) => {
2834 const ret = deflateResetKeep(strm);
2835 if (ret === Z_OK$1) {
2836 lm_init(strm.state);
2842 const deflateSetHeader = (strm, head) => {
2844 if (deflateStateCheck(strm) || strm.state.wrap !== 2) {
2845 return Z_STREAM_ERROR;
2847 strm.state.gzhead = head;
2852 const deflateInit2 = (strm, level, method, windowBits, memLevel, strategy) => {
2854 if (!strm) { // === Z_NULL
2855 return Z_STREAM_ERROR;
2859 if (level === Z_DEFAULT_COMPRESSION$1) {
2863 if (windowBits < 0) { /* suppress zlib wrapper */
2865 windowBits = -windowBits;
2868 else if (windowBits > 15) {
2869 wrap = 2; /* write gzip wrapper instead */
2874 if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method !== Z_DEFLATED$1 ||
2875 windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
2876 strategy < 0 || strategy > Z_FIXED || (windowBits === 8 && wrap !== 1)) {
2877 return err(strm, Z_STREAM_ERROR);
2881 if (windowBits === 8) {
2884 /* until 256-byte window bug fixed */
2886 const s = new DeflateState();
2890 s.status = INIT_STATE; /* to pass state test in deflateReset() */
2894 s.w_bits = windowBits;
2895 s.w_size = 1 << s.w_bits;
2896 s.w_mask = s.w_size - 1;
2898 s.hash_bits = memLevel + 7;
2899 s.hash_size = 1 << s.hash_bits;
2900 s.hash_mask = s.hash_size - 1;
2901 s.hash_shift = ~~((s.hash_bits + MIN_MATCH - 1) / MIN_MATCH);
2903 s.window = new Uint8Array(s.w_size * 2);
2904 s.head = new Uint16Array(s.hash_size);
2905 s.prev = new Uint16Array(s.w_size);
2907 // Don't need mem init magic for JS.
2908 //s.high_water = 0; /* nothing written to s->window yet */
2910 s.lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
2912 /* We overlay pending_buf and sym_buf. This works since the average size
2913 * for length/distance pairs over any compressed block is assured to be 31
2916 * Analysis: The longest fixed codes are a length code of 8 bits plus 5
2917 * extra bits, for lengths 131 to 257. The longest fixed distance codes are
2918 * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest
2919 * possible fixed-codes length/distance pair is then 31 bits total.
2921 * sym_buf starts one-fourth of the way into pending_buf. So there are
2922 * three bytes in sym_buf for every four bytes in pending_buf. Each symbol
2923 * in sym_buf is three bytes -- two for the distance and one for the
2924 * literal/length. As each symbol is consumed, the pointer to the next
2925 * sym_buf value to read moves forward three bytes. From that symbol, up to
2926 * 31 bits are written to pending_buf. The closest the written pending_buf
2927 * bits gets to the next sym_buf symbol to read is just before the last
2928 * code is written. At that time, 31*(n-2) bits have been written, just
2929 * after 24*(n-2) bits have been consumed from sym_buf. sym_buf starts at
2930 * 8*n bits into pending_buf. (Note that the symbol buffer fills when n-1
2931 * symbols are written.) The closest the writing gets to what is unread is
2932 * then n+14 bits. Here n is lit_bufsize, which is 16384 by default, and
2933 * can range from 128 to 32768.
2935 * Therefore, at a minimum, there are 142 bits of space between what is
2936 * written and what is read in the overlain buffers, so the symbols cannot
2937 * be overwritten by the compressed data. That space is actually 139 bits,
2938 * due to the three-bit fixed-code block header.
2940 * That covers the case where either Z_FIXED is specified, forcing fixed
2941 * codes, or when the use of fixed codes is chosen, because that choice
2942 * results in a smaller compressed block than dynamic codes. That latter
2943 * condition then assures that the above analysis also covers all dynamic
2944 * blocks. A dynamic-code block will only be chosen to be emitted if it has
2945 * fewer bits than a fixed-code block would for the same set of symbols.
2946 * Therefore its average symbol length is assured to be less than 31. So
2947 * the compressed data for a dynamic block also cannot overwrite the
2948 * symbols from which it is being constructed.
2951 s.pending_buf_size = s.lit_bufsize * 4;
2952 s.pending_buf = new Uint8Array(s.pending_buf_size);
2954 // It is offset from `s.pending_buf` (size is `s.lit_bufsize * 2`)
2955 //s->sym_buf = s->pending_buf + s->lit_bufsize;
2956 s.sym_buf = s.lit_bufsize;
2958 //s->sym_end = (s->lit_bufsize - 1) * 3;
2959 s.sym_end = (s.lit_bufsize - 1) * 3;
2960 /* We avoid equality with lit_bufsize*3 because of wraparound at 64K
2961 * on 16 bit machines and because stored blocks are restricted to
2966 s.strategy = strategy;
2969 return deflateReset(strm);
2972 const deflateInit = (strm, level) => {
2974 return deflateInit2(strm, level, Z_DEFLATED$1, MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY$1);
2978 /* ========================================================================= */
2979 const deflate$1 = (strm, flush) => {
2981 if (deflateStateCheck(strm) || flush > Z_BLOCK || flush < 0) {
2982 return strm ? err(strm, Z_STREAM_ERROR) : Z_STREAM_ERROR;
2985 const s = strm.state;
2988 (strm.avail_in !== 0 && !strm.input) ||
2989 (s.status === FINISH_STATE && flush !== Z_FINISH$1)) {
2990 return err(strm, (strm.avail_out === 0) ? Z_BUF_ERROR : Z_STREAM_ERROR);
2993 const old_flush = s.last_flush;
2994 s.last_flush = flush;
2996 /* Flush as much pending output as possible */
2997 if (s.pending !== 0) {
2998 flush_pending(strm);
2999 if (strm.avail_out === 0) {
3000 /* Since avail_out is 0, deflate will be called again with
3001 * more output space, but possibly with both pending and
3002 * avail_in equal to zero. There won't be anything to do,
3003 * but this is not an error situation so make sure we
3004 * return OK instead of BUF_ERROR at next call of deflate:
3010 /* Make sure there is something to do and avoid duplicate consecutive
3011 * flushes. For repeated and useless calls with Z_FINISH, we keep
3012 * returning Z_STREAM_END instead of Z_BUF_ERROR.
3014 } else if (strm.avail_in === 0 && rank(flush) <= rank(old_flush) &&
3015 flush !== Z_FINISH$1) {
3016 return err(strm, Z_BUF_ERROR);
3019 /* User must not provide more input after the first FINISH: */
3020 if (s.status === FINISH_STATE && strm.avail_in !== 0) {
3021 return err(strm, Z_BUF_ERROR);
3024 /* Write the header */
3025 if (s.status === INIT_STATE && s.wrap === 0) {
3026 s.status = BUSY_STATE;
3028 if (s.status === INIT_STATE) {
3030 let header = (Z_DEFLATED$1 + ((s.w_bits - 8) << 4)) << 8;
3031 let level_flags = -1;
3033 if (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2) {
3035 } else if (s.level < 6) {
3037 } else if (s.level === 6) {
3042 header |= (level_flags << 6);
3043 if (s.strstart !== 0) { header |= PRESET_DICT; }
3044 header += 31 - (header % 31);
3046 putShortMSB(s, header);
3048 /* Save the adler32 of the preset dictionary: */
3049 if (s.strstart !== 0) {
3050 putShortMSB(s, strm.adler >>> 16);
3051 putShortMSB(s, strm.adler & 0xffff);
3053 strm.adler = 1; // adler32(0L, Z_NULL, 0);
3054 s.status = BUSY_STATE;
3056 /* Compression must start with an empty pending buffer */
3057 flush_pending(strm);
3058 if (s.pending !== 0) {
3064 if (s.status === GZIP_STATE) {
3066 strm.adler = 0; //crc32(0L, Z_NULL, 0);
3070 if (!s.gzhead) { // s->gzhead == Z_NULL
3076 put_byte(s, s.level === 9 ? 2 :
3077 (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ?
3079 put_byte(s, OS_CODE);
3080 s.status = BUSY_STATE;
3082 /* Compression must start with an empty pending buffer */
3083 flush_pending(strm);
3084 if (s.pending !== 0) {
3090 put_byte(s, (s.gzhead.text ? 1 : 0) +
3091 (s.gzhead.hcrc ? 2 : 0) +
3092 (!s.gzhead.extra ? 0 : 4) +
3093 (!s.gzhead.name ? 0 : 8) +
3094 (!s.gzhead.comment ? 0 : 16)
3096 put_byte(s, s.gzhead.time & 0xff);
3097 put_byte(s, (s.gzhead.time >> 8) & 0xff);
3098 put_byte(s, (s.gzhead.time >> 16) & 0xff);
3099 put_byte(s, (s.gzhead.time >> 24) & 0xff);
3100 put_byte(s, s.level === 9 ? 2 :
3101 (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ?
3103 put_byte(s, s.gzhead.os & 0xff);
3104 if (s.gzhead.extra && s.gzhead.extra.length) {
3105 put_byte(s, s.gzhead.extra.length & 0xff);
3106 put_byte(s, (s.gzhead.extra.length >> 8) & 0xff);
3108 if (s.gzhead.hcrc) {
3109 strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending, 0);
3112 s.status = EXTRA_STATE;
3115 if (s.status === EXTRA_STATE) {
3116 if (s.gzhead.extra/* != Z_NULL*/) {
3117 let beg = s.pending; /* start of bytes to update crc */
3118 let left = (s.gzhead.extra.length & 0xffff) - s.gzindex;
3119 while (s.pending + left > s.pending_buf_size) {
3120 let copy = s.pending_buf_size - s.pending;
3121 // zmemcpy(s.pending_buf + s.pending,
3122 // s.gzhead.extra + s.gzindex, copy);
3123 s.pending_buf.set(s.gzhead.extra.subarray(s.gzindex, s.gzindex + copy), s.pending);
3124 s.pending = s.pending_buf_size;
3125 //--- HCRC_UPDATE(beg) ---//
3126 if (s.gzhead.hcrc && s.pending > beg) {
3127 strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg);
3131 flush_pending(strm);
3132 if (s.pending !== 0) {
3139 // JS specific: s.gzhead.extra may be TypedArray or Array for backward compatibility
3140 // TypedArray.slice and TypedArray.from don't exist in IE10-IE11
3141 let gzhead_extra = new Uint8Array(s.gzhead.extra);
3142 // zmemcpy(s->pending_buf + s->pending,
3143 // s->gzhead->extra + s->gzindex, left);
3144 s.pending_buf.set(gzhead_extra.subarray(s.gzindex, s.gzindex + left), s.pending);
3146 //--- HCRC_UPDATE(beg) ---//
3147 if (s.gzhead.hcrc && s.pending > beg) {
3148 strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg);
3153 s.status = NAME_STATE;
3155 if (s.status === NAME_STATE) {
3156 if (s.gzhead.name/* != Z_NULL*/) {
3157 let beg = s.pending; /* start of bytes to update crc */
3160 if (s.pending === s.pending_buf_size) {
3161 //--- HCRC_UPDATE(beg) ---//
3162 if (s.gzhead.hcrc && s.pending > beg) {
3163 strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg);
3166 flush_pending(strm);
3167 if (s.pending !== 0) {
3173 // JS specific: little magic to add zero terminator to end of string
3174 if (s.gzindex < s.gzhead.name.length) {
3175 val = s.gzhead.name.charCodeAt(s.gzindex++) & 0xff;
3180 } while (val !== 0);
3181 //--- HCRC_UPDATE(beg) ---//
3182 if (s.gzhead.hcrc && s.pending > beg) {
3183 strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg);
3188 s.status = COMMENT_STATE;
3190 if (s.status === COMMENT_STATE) {
3191 if (s.gzhead.comment/* != Z_NULL*/) {
3192 let beg = s.pending; /* start of bytes to update crc */
3195 if (s.pending === s.pending_buf_size) {
3196 //--- HCRC_UPDATE(beg) ---//
3197 if (s.gzhead.hcrc && s.pending > beg) {
3198 strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg);
3201 flush_pending(strm);
3202 if (s.pending !== 0) {
3208 // JS specific: little magic to add zero terminator to end of string
3209 if (s.gzindex < s.gzhead.comment.length) {
3210 val = s.gzhead.comment.charCodeAt(s.gzindex++) & 0xff;
3215 } while (val !== 0);
3216 //--- HCRC_UPDATE(beg) ---//
3217 if (s.gzhead.hcrc && s.pending > beg) {
3218 strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg);
3222 s.status = HCRC_STATE;
3224 if (s.status === HCRC_STATE) {
3225 if (s.gzhead.hcrc) {
3226 if (s.pending + 2 > s.pending_buf_size) {
3227 flush_pending(strm);
3228 if (s.pending !== 0) {
3233 put_byte(s, strm.adler & 0xff);
3234 put_byte(s, (strm.adler >> 8) & 0xff);
3235 strm.adler = 0; //crc32(0L, Z_NULL, 0);
3237 s.status = BUSY_STATE;
3239 /* Compression must start with an empty pending buffer */
3240 flush_pending(strm);
3241 if (s.pending !== 0) {
3248 /* Start a new block or continue the current one.
3250 if (strm.avail_in !== 0 || s.lookahead !== 0 ||
3251 (flush !== Z_NO_FLUSH$1 && s.status !== FINISH_STATE)) {
3252 let bstate = s.level === 0 ? deflate_stored(s, flush) :
3253 s.strategy === Z_HUFFMAN_ONLY ? deflate_huff(s, flush) :
3254 s.strategy === Z_RLE ? deflate_rle(s, flush) :
3255 configuration_table[s.level].func(s, flush);
3257 if (bstate === BS_FINISH_STARTED || bstate === BS_FINISH_DONE) {
3258 s.status = FINISH_STATE;
3260 if (bstate === BS_NEED_MORE || bstate === BS_FINISH_STARTED) {
3261 if (strm.avail_out === 0) {
3263 /* avoid BUF_ERROR next call, see above */
3266 /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
3267 * of deflate should use the same flush parameter to make sure
3268 * that the flush is complete. So we don't have to output an
3269 * empty block here, this will be done at next call. This also
3270 * ensures that for a very small output buffer, we emit at most
3274 if (bstate === BS_BLOCK_DONE) {
3275 if (flush === Z_PARTIAL_FLUSH) {
3278 else if (flush !== Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */
3280 _tr_stored_block(s, 0, 0, false);
3281 /* For a full flush, this empty block will be recognized
3282 * as a special marker by inflate_sync().
3284 if (flush === Z_FULL_FLUSH$1) {
3285 /*** CLEAR_HASH(s); ***/ /* forget history */
3286 zero(s.head); // Fill with NIL (= 0);
3288 if (s.lookahead === 0) {
3295 flush_pending(strm);
3296 if (strm.avail_out === 0) {
3297 s.last_flush = -1; /* avoid BUF_ERROR at next call, see above */
3303 if (flush !== Z_FINISH$1) { return Z_OK$1; }
3304 if (s.wrap <= 0) { return Z_STREAM_END$1; }
3306 /* Write the trailer */
3308 put_byte(s, strm.adler & 0xff);
3309 put_byte(s, (strm.adler >> 8) & 0xff);
3310 put_byte(s, (strm.adler >> 16) & 0xff);
3311 put_byte(s, (strm.adler >> 24) & 0xff);
3312 put_byte(s, strm.total_in & 0xff);
3313 put_byte(s, (strm.total_in >> 8) & 0xff);
3314 put_byte(s, (strm.total_in >> 16) & 0xff);
3315 put_byte(s, (strm.total_in >> 24) & 0xff);
3319 putShortMSB(s, strm.adler >>> 16);
3320 putShortMSB(s, strm.adler & 0xffff);
3323 flush_pending(strm);
3324 /* If avail_out is zero, the application will call deflate again
3325 * to flush the rest.
3327 if (s.wrap > 0) { s.wrap = -s.wrap; }
3328 /* write the trailer only once! */
3329 return s.pending !== 0 ? Z_OK$1 : Z_STREAM_END$1;
3333 const deflateEnd = (strm) => {
3335 if (deflateStateCheck(strm)) {
3336 return Z_STREAM_ERROR;
3339 const status = strm.state.status;
3343 return status === BUSY_STATE ? err(strm, Z_DATA_ERROR) : Z_OK$1;
3347 /* =========================================================================
3348 * Initializes the compression dictionary from the given byte
3349 * sequence without producing any compressed output.
3351 const deflateSetDictionary = (strm, dictionary) => {
3353 let dictLength = dictionary.length;
3355 if (deflateStateCheck(strm)) {
3356 return Z_STREAM_ERROR;
3359 const s = strm.state;
3360 const wrap = s.wrap;
3362 if (wrap === 2 || (wrap === 1 && s.status !== INIT_STATE) || s.lookahead) {
3363 return Z_STREAM_ERROR;
3366 /* when using zlib wrappers, compute Adler-32 for provided dictionary */
3368 /* adler32(strm->adler, dictionary, dictLength); */
3369 strm.adler = adler32_1(strm.adler, dictionary, dictLength, 0);
3372 s.wrap = 0; /* avoid computing Adler-32 in read_buf */
3374 /* if dictionary would fill window, just replace the history */
3375 if (dictLength >= s.w_size) {
3376 if (wrap === 0) { /* already empty otherwise */
3377 /*** CLEAR_HASH(s); ***/
3378 zero(s.head); // Fill with NIL (= 0);
3384 // dictionary = dictionary.slice(dictLength - s.w_size);
3385 let tmpDict = new Uint8Array(s.w_size);
3386 tmpDict.set(dictionary.subarray(dictLength - s.w_size, dictLength), 0);
3387 dictionary = tmpDict;
3388 dictLength = s.w_size;
3390 /* insert dictionary into window and hash */
3391 const avail = strm.avail_in;
3392 const next = strm.next_in;
3393 const input = strm.input;
3394 strm.avail_in = dictLength;
3396 strm.input = dictionary;
3398 while (s.lookahead >= MIN_MATCH) {
3399 let str = s.strstart;
3400 let n = s.lookahead - (MIN_MATCH - 1);
3402 /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */
3403 s.ins_h = HASH(s, s.ins_h, s.window[str + MIN_MATCH - 1]);
3405 s.prev[str & s.w_mask] = s.head[s.ins_h];
3407 s.head[s.ins_h] = str;
3411 s.lookahead = MIN_MATCH - 1;
3414 s.strstart += s.lookahead;
3415 s.block_start = s.strstart;
3416 s.insert = s.lookahead;
3418 s.match_length = s.prev_length = MIN_MATCH - 1;
3419 s.match_available = 0;
3420 strm.next_in = next;
3422 strm.avail_in = avail;
3428 var deflateInit_1 = deflateInit;
3429 var deflateInit2_1 = deflateInit2;
3430 var deflateReset_1 = deflateReset;
3431 var deflateResetKeep_1 = deflateResetKeep;
3432 var deflateSetHeader_1 = deflateSetHeader;
3433 var deflate_2$1 = deflate$1;
3434 var deflateEnd_1 = deflateEnd;
3435 var deflateSetDictionary_1 = deflateSetDictionary;
3436 var deflateInfo = 'pako deflate (from Nodeca project)';
3439 module.exports.deflateBound = deflateBound;
3440 module.exports.deflateCopy = deflateCopy;
3441 module.exports.deflateGetDictionary = deflateGetDictionary;
3442 module.exports.deflateParams = deflateParams;
3443 module.exports.deflatePending = deflatePending;
3444 module.exports.deflatePrime = deflatePrime;
3445 module.exports.deflateTune = deflateTune;
3449 deflateInit: deflateInit_1,
3450 deflateInit2: deflateInit2_1,
3451 deflateReset: deflateReset_1,
3452 deflateResetKeep: deflateResetKeep_1,
3453 deflateSetHeader: deflateSetHeader_1,
3454 deflate: deflate_2$1,
3455 deflateEnd: deflateEnd_1,
3456 deflateSetDictionary: deflateSetDictionary_1,
3457 deflateInfo: deflateInfo
3460 const _has = (obj, key) => {
3461 return Object.prototype.hasOwnProperty.call(obj, key);
3464 var assign = function (obj /*from1, from2, from3, ...*/) {
3465 const sources = Array.prototype.slice.call(arguments, 1);
3466 while (sources.length) {
3467 const source = sources.shift();
3468 if (!source) { continue; }
3470 if (typeof source !== 'object') {
3471 throw new TypeError(source + 'must be non-object');
3474 for (const p in source) {
3475 if (_has(source, p)) {
3485 // Join array of chunks to single array.
3486 var flattenChunks = (chunks) => {
3487 // calculate data length
3490 for (let i = 0, l = chunks.length; i < l; i++) {
3491 len += chunks[i].length;
3495 const result = new Uint8Array(len);
3497 for (let i = 0, pos = 0, l = chunks.length; i < l; i++) {
3498 let chunk = chunks[i];
3499 result.set(chunk, pos);
3500 pos += chunk.length;
3508 flattenChunks: flattenChunks
3511 // String encode/decode helpers
3514 // Quick check if we can use fast array to bin string conversion
3516 // - apply(Array) can fail on Android 2.2
3517 // - apply(Uint8Array) can fail on iOS 5.1 Safari
3519 let STR_APPLY_UIA_OK = true;
3521 try { String.fromCharCode.apply(null, new Uint8Array(1)); } catch (__) { STR_APPLY_UIA_OK = false; }
3524 // Table with utf8 lengths (calculated by first byte of sequence)
3525 // Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS,
3526 // because max possible codepoint is 0x10ffff
3527 const _utf8len = new Uint8Array(256);
3528 for (let q = 0; q < 256; q++) {
3529 _utf8len[q] = (q >= 252 ? 6 : q >= 248 ? 5 : q >= 240 ? 4 : q >= 224 ? 3 : q >= 192 ? 2 : 1);
3531 _utf8len[254] = _utf8len[254] = 1; // Invalid sequence start
3534 // convert string to array (typed, when possible)
3535 var string2buf = (str) => {
3536 if (typeof TextEncoder === 'function' && TextEncoder.prototype.encode) {
3537 return new TextEncoder().encode(str);
3540 let buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0;
3542 // count binary size
3543 for (m_pos = 0; m_pos < str_len; m_pos++) {
3544 c = str.charCodeAt(m_pos);
3545 if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {
3546 c2 = str.charCodeAt(m_pos + 1);
3547 if ((c2 & 0xfc00) === 0xdc00) {
3548 c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);
3552 buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4;
3556 buf = new Uint8Array(buf_len);
3559 for (i = 0, m_pos = 0; i < buf_len; m_pos++) {
3560 c = str.charCodeAt(m_pos);
3561 if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {
3562 c2 = str.charCodeAt(m_pos + 1);
3563 if ((c2 & 0xfc00) === 0xdc00) {
3564 c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);
3571 } else if (c < 0x800) {
3573 buf[i++] = 0xC0 | (c >>> 6);
3574 buf[i++] = 0x80 | (c & 0x3f);
3575 } else if (c < 0x10000) {
3577 buf[i++] = 0xE0 | (c >>> 12);
3578 buf[i++] = 0x80 | (c >>> 6 & 0x3f);
3579 buf[i++] = 0x80 | (c & 0x3f);
3582 buf[i++] = 0xf0 | (c >>> 18);
3583 buf[i++] = 0x80 | (c >>> 12 & 0x3f);
3584 buf[i++] = 0x80 | (c >>> 6 & 0x3f);
3585 buf[i++] = 0x80 | (c & 0x3f);
3593 const buf2binstring = (buf, len) => {
3594 // On Chrome, the arguments in a function call that are allowed is `65534`.
3595 // If the length of the buffer is smaller than that, we can use this optimization,
3596 // otherwise we will take a slower path.
3598 if (buf.subarray && STR_APPLY_UIA_OK) {
3599 return String.fromCharCode.apply(null, buf.length === len ? buf : buf.subarray(0, len));
3604 for (let i = 0; i < len; i++) {
3605 result += String.fromCharCode(buf[i]);
3611 // convert array to string
3612 var buf2string = (buf, max) => {
3613 const len = max || buf.length;
3615 if (typeof TextDecoder === 'function' && TextDecoder.prototype.decode) {
3616 return new TextDecoder().decode(buf.subarray(0, max));
3621 // Reserve max possible length (2 words per char)
3622 // NB: by unknown reasons, Array is significantly faster for
3623 // String.fromCharCode.apply than Uint16Array.
3624 const utf16buf = new Array(len * 2);
3626 for (out = 0, i = 0; i < len;) {
3628 // quick process ascii
3629 if (c < 0x80) { utf16buf[out++] = c; continue; }
3631 let c_len = _utf8len[c];
3632 // skip 5 & 6 byte codes
3633 if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len - 1; continue; }
3635 // apply mask on first byte
3636 c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07;
3638 while (c_len > 1 && i < len) {
3639 c = (c << 6) | (buf[i++] & 0x3f);
3643 // terminated by end of string?
3644 if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; }
3647 utf16buf[out++] = c;
3650 utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff);
3651 utf16buf[out++] = 0xdc00 | (c & 0x3ff);
3655 return buf2binstring(utf16buf, out);
3659 // Calculate max possible position in utf8 buffer,
3660 // that will not break sequence. If that's not possible
3661 // - (very small limits) return max size as is.
3663 // buf[] - utf8 bytes array
3664 // max - length limit (mandatory);
3665 var utf8border = (buf, max) => {
3667 max = max || buf.length;
3668 if (max > buf.length) { max = buf.length; }
3670 // go back from last position, until start of sequence found
3672 while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; }
3674 // Very small and broken sequence,
3675 // return max, because we should return something anyway.
3676 if (pos < 0) { return max; }
3678 // If we came to start of buffer - that means buffer is too small,
3680 if (pos === 0) { return max; }
3682 return (pos + _utf8len[buf[pos]] > max) ? pos : max;
3686 string2buf: string2buf,
3687 buf2string: buf2string,
3688 utf8border: utf8border
3691 // (C) 1995-2013 Jean-loup Gailly and Mark Adler
3692 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin
3694 // This software is provided 'as-is', without any express or implied
3695 // warranty. In no event will the authors be held liable for any damages
3696 // arising from the use of this software.
3698 // Permission is granted to anyone to use this software for any purpose,
3699 // including commercial applications, and to alter it and redistribute it
3700 // freely, subject to the following restrictions:
3702 // 1. The origin of this software must not be misrepresented; you must not
3703 // claim that you wrote the original software. If you use this software
3704 // in a product, an acknowledgment in the product documentation would be
3705 // appreciated but is not required.
3706 // 2. Altered source versions must be plainly marked as such, and must not be
3707 // misrepresented as being the original software.
3708 // 3. This notice may not be removed or altered from any source distribution.
3710 function ZStream() {
3711 /* next input byte */
3712 this.input = null; // JS specific, because we have no pointers
3714 /* number of bytes available at input */
3716 /* total number of input bytes read so far */
3718 /* next output byte should be put there */
3719 this.output = null; // JS specific, because we have no pointers
3721 /* remaining free space at output */
3723 /* total number of bytes output so far */
3725 /* last error message, NULL if no error */
3726 this.msg = ''/*Z_NULL*/;
3727 /* not visible by applications */
3729 /* best guess about the data type: binary or text */
3730 this.data_type = 2/*Z_UNKNOWN*/;
3731 /* adler32 value of the uncompressed data */
3735 var zstream = ZStream;
3737 const toString = Object.prototype.toString;
3739 /* Public constants ==========================================================*/
3740 /* ===========================================================================*/
3743 Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FULL_FLUSH, Z_FINISH,
3745 Z_DEFAULT_COMPRESSION,
3750 /* ===========================================================================*/
3756 * Generic JS-style wrapper for zlib calls. If you don't need
3757 * streaming behaviour - use more simple functions: [[deflate]],
3758 * [[deflateRaw]] and [[gzip]].
3762 * Deflate.chunks -> Array
3764 * Chunks of output data, if [[Deflate#onData]] not overridden.
3768 * Deflate.result -> Uint8Array
3770 * Compressed result, generated by default [[Deflate#onData]]
3771 * and [[Deflate#onEnd]] handlers. Filled after you push last chunk
3772 * (call [[Deflate#push]] with `Z_FINISH` / `true` param).
3776 * Deflate.err -> Number
3778 * Error code after deflate finished. 0 (Z_OK) on success.
3779 * You will not need it in real life, because deflate errors
3780 * are possible only on wrong options or bad `onData` / `onEnd`
3785 * Deflate.msg -> String
3787 * Error message, if [[Deflate.err]] != 0
3792 * new Deflate(options)
3793 * - options (Object): zlib deflate options.
3795 * Creates new deflator instance with specified params. Throws exception
3796 * on bad params. Supported options:
3804 * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)
3805 * for more information on these.
3807 * Additional options, for internal needs:
3809 * - `chunkSize` - size of generated data chunks (16K by default)
3810 * - `raw` (Boolean) - do raw deflate
3811 * - `gzip` (Boolean) - create gzip wrapper
3812 * - `header` (Object) - custom header for gzip
3813 * - `text` (Boolean) - true if compressed data believed to be text
3814 * - `time` (Number) - modification time, unix timestamp
3815 * - `os` (Number) - operation system code
3816 * - `extra` (Array) - array of bytes with extra data (max 65536)
3817 * - `name` (String) - file name (binary string)
3818 * - `comment` (String) - comment (binary string)
3819 * - `hcrc` (Boolean) - true if header crc should be added
3824 * const pako = require('pako')
3825 * , chunk1 = new Uint8Array([1,2,3,4,5,6,7,8,9])
3826 * , chunk2 = new Uint8Array([10,11,12,13,14,15,16,17,18,19]);
3828 * const deflate = new pako.Deflate({ level: 3});
3830 * deflate.push(chunk1, false);
3831 * deflate.push(chunk2, true); // true -> last chunk
3833 * if (deflate.err) { throw new Error(deflate.err); }
3835 * console.log(deflate.result);
3838 function Deflate(options) {
3839 this.options = common.assign({
3840 level: Z_DEFAULT_COMPRESSION,
3845 strategy: Z_DEFAULT_STRATEGY
3848 let opt = this.options;
3850 if (opt.raw && (opt.windowBits > 0)) {
3851 opt.windowBits = -opt.windowBits;
3854 else if (opt.gzip && (opt.windowBits > 0) && (opt.windowBits < 16)) {
3855 opt.windowBits += 16;
3858 this.err = 0; // error code, if happens (0 = Z_OK)
3859 this.msg = ''; // error message
3860 this.ended = false; // used to avoid multiple onEnd() calls
3861 this.chunks = []; // chunks of compressed data
3863 this.strm = new zstream();
3864 this.strm.avail_out = 0;
3866 let status = deflate_1$1.deflateInit2(
3875 if (status !== Z_OK) {
3876 throw new Error(messages[status]);
3880 deflate_1$1.deflateSetHeader(this.strm, opt.header);
3883 if (opt.dictionary) {
3885 // Convert data if needed
3886 if (typeof opt.dictionary === 'string') {
3887 // If we need to compress text, change encoding to utf8.
3888 dict = strings.string2buf(opt.dictionary);
3889 } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') {
3890 dict = new Uint8Array(opt.dictionary);
3892 dict = opt.dictionary;
3895 status = deflate_1$1.deflateSetDictionary(this.strm, dict);
3897 if (status !== Z_OK) {
3898 throw new Error(messages[status]);
3901 this._dict_set = true;
3906 * Deflate#push(data[, flush_mode]) -> Boolean
3907 * - data (Uint8Array|ArrayBuffer|String): input data. Strings will be
3908 * converted to utf8 byte sequence.
3909 * - flush_mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes.
3910 * See constants. Skipped or `false` means Z_NO_FLUSH, `true` means Z_FINISH.
3912 * Sends input data to deflate pipe, generating [[Deflate#onData]] calls with
3913 * new compressed chunks. Returns `true` on success. The last data block must
3914 * have `flush_mode` Z_FINISH (or `true`). That will flush internal pending
3915 * buffers and call [[Deflate#onEnd]].
3917 * On fail call [[Deflate#onEnd]] with error code and return false.
3922 * push(chunk, false); // push one of data chunks
3924 * push(chunk, true); // push last chunk
3927 Deflate.prototype.push = function (data, flush_mode) {
3928 const strm = this.strm;
3929 const chunkSize = this.options.chunkSize;
3930 let status, _flush_mode;
3932 if (this.ended) { return false; }
3934 if (flush_mode === ~~flush_mode) _flush_mode = flush_mode;
3935 else _flush_mode = flush_mode === true ? Z_FINISH : Z_NO_FLUSH;
3937 // Convert data if needed
3938 if (typeof data === 'string') {
3939 // If we need to compress text, change encoding to utf8.
3940 strm.input = strings.string2buf(data);
3941 } else if (toString.call(data) === '[object ArrayBuffer]') {
3942 strm.input = new Uint8Array(data);
3948 strm.avail_in = strm.input.length;
3951 if (strm.avail_out === 0) {
3952 strm.output = new Uint8Array(chunkSize);
3954 strm.avail_out = chunkSize;
3957 // Make sure avail_out > 6 to avoid repeating markers
3958 if ((_flush_mode === Z_SYNC_FLUSH || _flush_mode === Z_FULL_FLUSH) && strm.avail_out <= 6) {
3959 this.onData(strm.output.subarray(0, strm.next_out));
3964 status = deflate_1$1.deflate(strm, _flush_mode);
3966 // Ended => flush and finish
3967 if (status === Z_STREAM_END) {
3968 if (strm.next_out > 0) {
3969 this.onData(strm.output.subarray(0, strm.next_out));
3971 status = deflate_1$1.deflateEnd(this.strm);
3974 return status === Z_OK;
3977 // Flush if out buffer full
3978 if (strm.avail_out === 0) {
3979 this.onData(strm.output);
3983 // Flush if requested and has data
3984 if (_flush_mode > 0 && strm.next_out > 0) {
3985 this.onData(strm.output.subarray(0, strm.next_out));
3990 if (strm.avail_in === 0) break;
3998 * Deflate#onData(chunk) -> Void
3999 * - chunk (Uint8Array): output data.
4001 * By default, stores data blocks in `chunks[]` property and glue
4002 * those in `onEnd`. Override this handler, if you need another behaviour.
4004 Deflate.prototype.onData = function (chunk) {
4005 this.chunks.push(chunk);
4010 * Deflate#onEnd(status) -> Void
4011 * - status (Number): deflate status. 0 (Z_OK) on success,
4014 * Called once after you tell deflate that the input stream is
4015 * complete (Z_FINISH). By default - join collected chunks,
4016 * free memory and fill `results` / `err` properties.
4018 Deflate.prototype.onEnd = function (status) {
4019 // On success - join
4020 if (status === Z_OK) {
4021 this.result = common.flattenChunks(this.chunks);
4025 this.msg = this.strm.msg;
4030 * deflate(data[, options]) -> Uint8Array
4031 * - data (Uint8Array|ArrayBuffer|String): input data to compress.
4032 * - options (Object): zlib deflate options.
4034 * Compress `data` with deflate algorithm and `options`.
4036 * Supported options are:
4044 * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)
4045 * for more information on these.
4049 * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify
4050 * negative windowBits implicitly.
4055 * const pako = require('pako')
4056 * const data = new Uint8Array([1,2,3,4,5,6,7,8,9]);
4058 * console.log(pako.deflate(data));
4061 function deflate(input, options) {
4062 const deflator = new Deflate(options);
4064 deflator.push(input, true);
4066 // That will never happens, if you don't cheat with options :)
4067 if (deflator.err) { throw deflator.msg || messages[deflator.err]; }
4069 return deflator.result;
4074 * deflateRaw(data[, options]) -> Uint8Array
4075 * - data (Uint8Array|ArrayBuffer|String): input data to compress.
4076 * - options (Object): zlib deflate options.
4078 * The same as [[deflate]], but creates raw data, without wrapper
4079 * (header and adler32 crc).
4081 function deflateRaw(input, options) {
4082 options = options || {};
4084 return deflate(input, options);
4089 * gzip(data[, options]) -> Uint8Array
4090 * - data (Uint8Array|ArrayBuffer|String): input data to compress.
4091 * - options (Object): zlib deflate options.
4093 * The same as [[deflate]], but create gzip wrapper instead of
4096 function gzip(input, options) {
4097 options = options || {};
4098 options.gzip = true;
4099 return deflate(input, options);
4103 var Deflate_1 = Deflate;
4104 var deflate_2 = deflate;
4105 var deflateRaw_1 = deflateRaw;
4107 var constants = constants$1;
4112 deflateRaw: deflateRaw_1,
4114 constants: constants
4117 exports.Deflate = Deflate_1;
4118 exports.constants = constants;
4119 exports["default"] = deflate_1;
4120 exports.deflate = deflate_2;
4121 exports.deflateRaw = deflateRaw_1;
4122 exports.gzip = gzip_1;
4124 Object.defineProperty(exports, '__esModule', { value: true });