2 * bzip2 is written by Julian Seward <jseward@bzip.org>.
3 * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
4 * See README and LICENSE files in this directory for more information.
7 /*-------------------------------------------------------------*/
8 /*--- Compression machinery (not incl block sorting) ---*/
10 /*-------------------------------------------------------------*/
12 /* ------------------------------------------------------------------
13 This file is part of bzip2/libbzip2, a program and library for
14 lossless, block-sorting data compression.
16 bzip2/libbzip2 version 1.0.4 of 20 December 2006
17 Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
19 Please read the WARNING, DISCLAIMER and PATENTS sections in the
22 This program is released under the terms of the license contained
24 ------------------------------------------------------------------ */
27 * 0.9.0 -- original version.
28 * 0.9.0a/b -- no changes in this file.
29 * 0.9.0c -- changed setting of nGroups in sendMTFValues()
30 * so as to do a bit better on small files
33 /* #include "bzlib_private.h" */
35 /*---------------------------------------------------*/
36 /*--- Bit stream I/O ---*/
37 /*---------------------------------------------------*/
39 /*---------------------------------------------------*/
41 void BZ2_bsInitWrite(EState
* s
)
48 /*---------------------------------------------------*/
50 void bsFinishWrite(EState
* s
)
52 while (s
->bsLive
> 0) {
53 s
->zbits
[s
->numZ
] = (uint8_t)(s
->bsBuff
>> 24);
61 /*---------------------------------------------------*/
63 /* Helps only on level 5, on other levels hurts. ? */
64 #if CONFIG_BZIP2_FEATURE_SPEED >= 5
67 void bsW(EState
* s
, int32_t n
, uint32_t v
)
69 while (s
->bsLive
>= 8) {
70 s
->zbits
[s
->numZ
] = (uint8_t)(s
->bsBuff
>> 24);
75 s
->bsBuff
|= (v
<< (32 - s
->bsLive
- n
));
80 /*---------------------------------------------------*/
82 void bsPutU32(EState
* s
, unsigned u
)
84 bsW(s
, 8, (u
>> 24) & 0xff);
85 bsW(s
, 8, (u
>> 16) & 0xff);
86 bsW(s
, 8, (u
>> 8) & 0xff);
91 /*---------------------------------------------------*/
93 void bsPutU16(EState
* s
, unsigned u
)
95 bsW(s
, 8, (u
>> 8) & 0xff);
100 /*---------------------------------------------------*/
101 /*--- The back end proper ---*/
102 /*---------------------------------------------------*/
104 /*---------------------------------------------------*/
106 void makeMaps_e(EState
* s
)
110 for (i
= 0; i
< 256; i
++) {
112 s
->unseqToSeq
[i
] = s
->nInUse
;
119 /*---------------------------------------------------*/
121 void generateMTFValues(EState
* s
)
130 * After sorting (eg, here),
131 * s->arr1[0 .. s->nblock-1] holds sorted order,
133 * ((uint8_t*)s->arr2)[0 .. s->nblock-1]
134 * holds the original block data.
136 * The first thing to do is generate the MTF values,
138 * ((uint16_t*)s->arr1)[0 .. s->nblock-1].
139 * Because there are strictly fewer or equal MTF values
140 * than block values, ptr values in this area are overwritten
141 * with MTF values only when they are no longer needed.
143 * The final compressed bitstream is generated into the
145 * &((uint8_t*)s->arr2)[s->nblock]
147 * These storage aliases are set up in bzCompressInit(),
148 * except for the last one, which is arranged in
151 uint32_t* ptr
= s
->ptr
;
152 uint8_t* block
= s
->block
;
153 uint16_t* mtfv
= s
->mtfv
;
158 for (i
= 0; i
<= EOB
; i
++)
163 for (i
= 0; i
< s
->nInUse
; i
++)
166 for (i
= 0; i
< s
->nblock
; i
++) {
168 AssertD(wr
<= i
, "generateMTFValues(1)");
172 ll_i
= s
->unseqToSeq
[block
[j
]];
173 AssertD(ll_i
< s
->nInUse
, "generateMTFValues(2a)");
182 mtfv
[wr
] = BZ_RUNB
; wr
++;
183 s
->mtfFreq
[BZ_RUNB
]++;
185 mtfv
[wr
] = BZ_RUNA
; wr
++;
186 s
->mtfFreq
[BZ_RUNA
]++;
188 if (zPend
< 2) break;
189 zPend
= (uint32_t)(zPend
- 2) / 2;
190 /* bbox: unsigned div is easier */
195 register uint8_t rtmp
;
196 register uint8_t* ryy_j
;
197 register uint8_t rll_i
;
202 while (rll_i
!= rtmp
) {
203 register uint8_t rtmp2
;
210 j
= ryy_j
- &(yy
[0]);
225 s
->mtfFreq
[BZ_RUNB
]++;
229 s
->mtfFreq
[BZ_RUNA
]++;
233 zPend
= (uint32_t)(zPend
- 2) / 2;
234 /* bbox: unsigned div is easier */
247 /*---------------------------------------------------*/
248 #define BZ_LESSER_ICOST 0
249 #define BZ_GREATER_ICOST 15
252 void sendMTFValues(EState
* s
)
254 int32_t v
, t
, i
, j
, gs
, ge
, totc
, bt
, bc
, iter
;
255 int32_t nSelectors
, alphaSize
, minLen
, maxLen
, selCtr
;
256 int32_t nGroups
, nBytes
;
259 * uint8_t len[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
260 * is a global since the decoder also needs it.
262 * int32_t code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
263 * int32_t rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
264 * are also globals only used in this proc.
265 * Made global to keep stack frame size small.
267 #define code sendMTFValues__code
268 #define rfreq sendMTFValues__rfreq
269 #define len_pack sendMTFValues__len_pack
271 uint16_t cost
[BZ_N_GROUPS
];
272 int32_t fave
[BZ_N_GROUPS
];
274 uint16_t* mtfv
= s
->mtfv
;
276 alphaSize
= s
->nInUse
+ 2;
277 for (t
= 0; t
< BZ_N_GROUPS
; t
++)
278 for (v
= 0; v
< alphaSize
; v
++)
279 s
->len
[t
][v
] = BZ_GREATER_ICOST
;
281 /*--- Decide how many coding tables to use ---*/
282 AssertH(s
->nMTF
> 0, 3001);
283 if (s
->nMTF
< 200) nGroups
= 2; else
284 if (s
->nMTF
< 600) nGroups
= 3; else
285 if (s
->nMTF
< 1200) nGroups
= 4; else
286 if (s
->nMTF
< 2400) nGroups
= 5; else
289 /*--- Generate an initial set of coding tables ---*/
291 int32_t nPart
, remF
, tFreq
, aFreq
;
297 tFreq
= remF
/ nPart
;
300 while (aFreq
< tFreq
&& ge
< alphaSize
-1) {
302 aFreq
+= s
->mtfFreq
[ge
];
306 && nPart
!= nGroups
&& nPart
!= 1
307 && ((nGroups
- nPart
) % 2 == 1) /* bbox: can this be replaced by x & 1? */
309 aFreq
-= s
->mtfFreq
[ge
];
313 for (v
= 0; v
< alphaSize
; v
++)
314 if (v
>= gs
&& v
<= ge
)
315 s
->len
[nPart
-1][v
] = BZ_LESSER_ICOST
;
317 s
->len
[nPart
-1][v
] = BZ_GREATER_ICOST
;
326 * Iterate up to BZ_N_ITERS times to improve the tables.
328 for (iter
= 0; iter
< BZ_N_ITERS
; iter
++) {
329 for (t
= 0; t
< nGroups
; t
++)
332 for (t
= 0; t
< nGroups
; t
++)
333 for (v
= 0; v
< alphaSize
; v
++)
336 #if CONFIG_BZIP2_FEATURE_SPEED >= 5
338 * Set up an auxiliary length table which is used to fast-track
339 * the common case (nGroups == 6).
342 for (v
= 0; v
< alphaSize
; v
++) {
343 s
->len_pack
[v
][0] = (s
->len
[1][v
] << 16) | s
->len
[0][v
];
344 s
->len_pack
[v
][1] = (s
->len
[3][v
] << 16) | s
->len
[2][v
];
345 s
->len_pack
[v
][2] = (s
->len
[5][v
] << 16) | s
->len
[4][v
];
353 /*--- Set group start & end marks. --*/
356 ge
= gs
+ BZ_G_SIZE
- 1;
361 * Calculate the cost of this group as coded
362 * by each of the coding tables.
364 for (t
= 0; t
< nGroups
; t
++)
366 #if CONFIG_BZIP2_FEATURE_SPEED >= 5
367 if (nGroups
== 6 && 50 == ge
-gs
+1) {
368 /*--- fast track the common case ---*/
369 register uint32_t cost01
, cost23
, cost45
;
370 register uint16_t icv
;
371 cost01
= cost23
= cost45
= 0;
372 #define BZ_ITER(nn) \
373 icv = mtfv[gs+(nn)]; \
374 cost01 += s->len_pack[icv][0]; \
375 cost23 += s->len_pack[icv][1]; \
376 cost45 += s->len_pack[icv][2];
377 BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
378 BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
379 BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
380 BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
381 BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
382 BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
383 BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
384 BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
385 BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
386 BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
388 cost
[0] = cost01
& 0xffff; cost
[1] = cost01
>> 16;
389 cost
[2] = cost23
& 0xffff; cost
[3] = cost23
>> 16;
390 cost
[4] = cost45
& 0xffff; cost
[5] = cost45
>> 16;
395 /*--- slow version which correctly handles all situations ---*/
396 for (i
= gs
; i
<= ge
; i
++) {
397 uint16_t icv
= mtfv
[i
];
398 for (t
= 0; t
< nGroups
; t
++)
399 cost
[t
] += s
->len
[t
][icv
];
403 * Find the coding table which is best for this group,
404 * and record its identity in the selector table.
410 for (t
= 1 /*0*/; t
< nGroups
; t
++) {
418 s
->selector
[nSelectors
] = bt
;
422 * Increment the symbol frequencies for the selected table.
424 /* 1% faster compress. +800 bytes */
425 #if CONFIG_BZIP2_FEATURE_SPEED >= 4
426 if (nGroups
== 6 && 50 == ge
-gs
+1) {
427 /*--- fast track the common case ---*/
428 #define BZ_ITUR(nn) s->rfreq[bt][mtfv[gs + (nn)]]++
429 BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
430 BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
431 BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
432 BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
433 BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
434 BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
435 BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
436 BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
437 BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
438 BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
444 /*--- slow version which correctly handles all situations ---*/
446 s
->rfreq
[bt
][mtfv
[gs
]]++;
449 /* already is: gs = ge + 1; */
454 * Recompute the tables based on the accumulated frequencies.
456 /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
457 * comment in huffman.c for details. */
458 for (t
= 0; t
< nGroups
; t
++)
459 BZ2_hbMakeCodeLengths(s
, &(s
->len
[t
][0]), &(s
->rfreq
[t
][0]), alphaSize
, 17 /*20*/);
462 AssertH(nGroups
< 8, 3002);
463 AssertH(nSelectors
< 32768 && nSelectors
<= (2 + (900000 / BZ_G_SIZE
)), 3003);
465 /*--- Compute MTF values for the selectors. ---*/
467 uint8_t pos
[BZ_N_GROUPS
], ll_i
, tmp2
, tmp
;
469 for (i
= 0; i
< nGroups
; i
++)
471 for (i
= 0; i
< nSelectors
; i
++) {
472 ll_i
= s
->selector
[i
];
475 while (ll_i
!= tmp
) {
482 s
->selectorMtf
[i
] = j
;
486 /*--- Assign actual codes for the tables. --*/
487 for (t
= 0; t
< nGroups
; t
++) {
490 for (i
= 0; i
< alphaSize
; i
++) {
491 if (s
->len
[t
][i
] > maxLen
) maxLen
= s
->len
[t
][i
];
492 if (s
->len
[t
][i
] < minLen
) minLen
= s
->len
[t
][i
];
494 AssertH(!(maxLen
> 17 /*20*/), 3004);
495 AssertH(!(minLen
< 1), 3005);
496 BZ2_hbAssignCodes(&(s
->code
[t
][0]), &(s
->len
[t
][0]), minLen
, maxLen
, alphaSize
);
499 /*--- Transmit the mapping table. ---*/
501 /* bbox: optimized a bit more than in bzip2 */
503 for (i
= 0; i
< 16; i
++) {
504 if (sizeof(long) <= 4) {
505 inUse16
= inUse16
*2 +
506 ((*(uint32_t*)&(s
->inUse
[i
* 16 + 0])
507 | *(uint32_t*)&(s
->inUse
[i
* 16 + 4])
508 | *(uint32_t*)&(s
->inUse
[i
* 16 + 8])
509 | *(uint32_t*)&(s
->inUse
[i
* 16 + 12])) != 0);
510 } else { /* Our CPU can do better */
511 inUse16
= inUse16
*2 +
512 ((*(uint64_t*)&(s
->inUse
[i
* 16 + 0])
513 | *(uint64_t*)&(s
->inUse
[i
* 16 + 8])) != 0);
520 inUse16
<<= (sizeof(int)*8 - 16); /* move 15th bit into sign bit */
521 for (i
= 0; i
< 16; i
++) {
524 for (j
= 0; j
< 16; j
++)
525 v16
= v16
*2 + s
->inUse
[i
* 16 + j
];
532 /*--- Now the selectors. ---*/
535 bsW(s
, 15, nSelectors
);
536 for (i
= 0; i
< nSelectors
; i
++) {
537 for (j
= 0; j
< s
->selectorMtf
[i
]; j
++)
542 /*--- Now the coding tables. ---*/
545 for (t
= 0; t
< nGroups
; t
++) {
546 int32_t curr
= s
->len
[t
][0];
548 for (i
= 0; i
< alphaSize
; i
++) {
549 while (curr
< s
->len
[t
][i
]) { bsW(s
, 2, 2); curr
++; /* 10 */ };
550 while (curr
> s
->len
[t
][i
]) { bsW(s
, 2, 3); curr
--; /* 11 */ };
555 /*--- And finally, the block data proper ---*/
562 ge
= gs
+ BZ_G_SIZE
- 1;
565 AssertH(s
->selector
[selCtr
] < nGroups
, 3006);
567 /* Costs 1300 bytes and is _slower_ (on Intel Core 2) */
569 if (nGroups
== 6 && 50 == ge
-gs
+1) {
570 /*--- fast track the common case ---*/
572 uint8_t* s_len_sel_selCtr
= &(s
->len
[s
->selector
[selCtr
]][0]);
573 int32_t* s_code_sel_selCtr
= &(s
->code
[s
->selector
[selCtr
]][0]);
574 #define BZ_ITAH(nn) \
575 mtfv_i = mtfv[gs+(nn)]; \
576 bsW(s, s_len_sel_selCtr[mtfv_i], s_code_sel_selCtr[mtfv_i])
577 BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
578 BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
579 BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
580 BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
581 BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
582 BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
583 BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
584 BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
585 BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
586 BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
592 /*--- slow version which correctly handles all situations ---*/
593 /* code is bit bigger, but moves multiply out of the loop */
594 uint8_t* s_len_sel_selCtr
= &(s
->len
[s
->selector
[selCtr
]][0]);
595 int32_t* s_code_sel_selCtr
= &(s
->code
[s
->selector
[selCtr
]][0]);
598 s_len_sel_selCtr
[mtfv
[gs
]],
599 s_code_sel_selCtr
[mtfv
[gs
]]
603 /* already is: gs = ge+1; */
607 AssertH(selCtr
== nSelectors
, 3007);
614 /*---------------------------------------------------*/
616 void BZ2_compressBlock(EState
* s
, int is_last_block
)
619 BZ_FINALISE_CRC(s
->blockCRC
);
620 s
->combinedCRC
= (s
->combinedCRC
<< 1) | (s
->combinedCRC
>> 31);
621 s
->combinedCRC
^= s
->blockCRC
;
628 s
->zbits
= &((uint8_t*)s
->arr2
)[s
->nblock
];
630 /*-- If this is the first block, create the stream header. --*/
631 if (s
->blockNo
== 1) {
633 /*bsPutU8(s, BZ_HDR_B);*/
634 /*bsPutU8(s, BZ_HDR_Z);*/
635 /*bsPutU8(s, BZ_HDR_h);*/
636 /*bsPutU8(s, BZ_HDR_0 + s->blockSize100k);*/
637 bsPutU32(s
, BZ_HDR_BZh0
+ s
->blockSize100k
);
641 /*bsPutU8(s, 0x31);*/
642 /*bsPutU8(s, 0x41);*/
643 /*bsPutU8(s, 0x59);*/
644 /*bsPutU8(s, 0x26);*/
645 bsPutU32(s
, 0x31415926);
646 /*bsPutU8(s, 0x53);*/
647 /*bsPutU8(s, 0x59);*/
650 /*-- Now the block's CRC, so it is in a known place. --*/
651 bsPutU32(s
, s
->blockCRC
);
654 * Now a single bit indicating (non-)randomisation.
655 * As of version 0.9.5, we use a better sorting algorithm
656 * which makes randomisation unnecessary. So always set
657 * the randomised bit to 'no'. Of course, the decoder
658 * still needs to be able to handle randomised blocks
659 * so as to maintain backwards compatibility with
660 * older versions of bzip2.
664 bsW(s
, 24, s
->origPtr
);
665 generateMTFValues(s
);
669 /*-- If this is the last block, add the stream trailer. --*/
671 /*bsPutU8(s, 0x17);*/
672 /*bsPutU8(s, 0x72);*/
673 /*bsPutU8(s, 0x45);*/
674 /*bsPutU8(s, 0x38);*/
675 bsPutU32(s
, 0x17724538);
676 /*bsPutU8(s, 0x50);*/
677 /*bsPutU8(s, 0x90);*/
679 bsPutU32(s
, s
->combinedCRC
);
685 /*-------------------------------------------------------------*/
686 /*--- end compress.c ---*/
687 /*-------------------------------------------------------------*/