add ext4,vfat and tar.bz2
[u-tools.git] / u-tools / apps / busybox / archival / bz / compress.c
blob640b8872bed52c031c1e0c6b69c81eedc846661b
1 /*
2 * bzip2 is written by Julian Seward <jseward@bzip.org>.
3 * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
4 * See README and LICENSE files in this directory for more information.
5 */
7 /*-------------------------------------------------------------*/
8 /*--- Compression machinery (not incl block sorting) ---*/
9 /*--- compress.c ---*/
10 /*-------------------------------------------------------------*/
12 /* ------------------------------------------------------------------
13 This file is part of bzip2/libbzip2, a program and library for
14 lossless, block-sorting data compression.
16 bzip2/libbzip2 version 1.0.4 of 20 December 2006
17 Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
19 Please read the WARNING, DISCLAIMER and PATENTS sections in the
20 README file.
22 This program is released under the terms of the license contained
23 in the file LICENSE.
24 ------------------------------------------------------------------ */
26 /* CHANGES
27 * 0.9.0 -- original version.
28 * 0.9.0a/b -- no changes in this file.
29 * 0.9.0c -- changed setting of nGroups in sendMTFValues()
30 * so as to do a bit better on small files
33 /* #include "bzlib_private.h" */
35 /*---------------------------------------------------*/
36 /*--- Bit stream I/O ---*/
37 /*---------------------------------------------------*/
39 /*---------------------------------------------------*/
40 static
41 void BZ2_bsInitWrite(EState* s)
43 s->bsLive = 0;
44 s->bsBuff = 0;
48 /*---------------------------------------------------*/
49 static NOINLINE
50 void bsFinishWrite(EState* s)
52 while (s->bsLive > 0) {
53 s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24);
54 s->numZ++;
55 s->bsBuff <<= 8;
56 s->bsLive -= 8;
61 /*---------------------------------------------------*/
62 static
63 /* Helps only on level 5, on other levels hurts. ? */
64 #if CONFIG_BZIP2_FEATURE_SPEED >= 5
65 ALWAYS_INLINE
66 #endif
67 void bsW(EState* s, int32_t n, uint32_t v)
69 while (s->bsLive >= 8) {
70 s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24);
71 s->numZ++;
72 s->bsBuff <<= 8;
73 s->bsLive -= 8;
75 s->bsBuff |= (v << (32 - s->bsLive - n));
76 s->bsLive += n;
80 /*---------------------------------------------------*/
81 static
82 void bsPutU32(EState* s, unsigned u)
84 bsW(s, 8, (u >> 24) & 0xff);
85 bsW(s, 8, (u >> 16) & 0xff);
86 bsW(s, 8, (u >> 8) & 0xff);
87 bsW(s, 8, u & 0xff);
91 /*---------------------------------------------------*/
92 static
93 void bsPutU16(EState* s, unsigned u)
95 bsW(s, 8, (u >> 8) & 0xff);
96 bsW(s, 8, u & 0xff);
100 /*---------------------------------------------------*/
101 /*--- The back end proper ---*/
102 /*---------------------------------------------------*/
104 /*---------------------------------------------------*/
105 static
106 void makeMaps_e(EState* s)
108 int i;
109 s->nInUse = 0;
110 for (i = 0; i < 256; i++) {
111 if (s->inUse[i]) {
112 s->unseqToSeq[i] = s->nInUse;
113 s->nInUse++;
119 /*---------------------------------------------------*/
120 static NOINLINE
121 void generateMTFValues(EState* s)
123 uint8_t yy[256];
124 int32_t i, j;
125 int32_t zPend;
126 int32_t wr;
127 int32_t EOB;
130 * After sorting (eg, here),
131 * s->arr1[0 .. s->nblock-1] holds sorted order,
132 * and
133 * ((uint8_t*)s->arr2)[0 .. s->nblock-1]
134 * holds the original block data.
136 * The first thing to do is generate the MTF values,
137 * and put them in
138 * ((uint16_t*)s->arr1)[0 .. s->nblock-1].
139 * Because there are strictly fewer or equal MTF values
140 * than block values, ptr values in this area are overwritten
141 * with MTF values only when they are no longer needed.
143 * The final compressed bitstream is generated into the
144 * area starting at
145 * &((uint8_t*)s->arr2)[s->nblock]
147 * These storage aliases are set up in bzCompressInit(),
148 * except for the last one, which is arranged in
149 * compressBlock().
151 uint32_t* ptr = s->ptr;
152 uint8_t* block = s->block;
153 uint16_t* mtfv = s->mtfv;
155 makeMaps_e(s);
156 EOB = s->nInUse+1;
158 for (i = 0; i <= EOB; i++)
159 s->mtfFreq[i] = 0;
161 wr = 0;
162 zPend = 0;
163 for (i = 0; i < s->nInUse; i++)
164 yy[i] = (uint8_t) i;
166 for (i = 0; i < s->nblock; i++) {
167 uint8_t ll_i;
168 AssertD(wr <= i, "generateMTFValues(1)");
169 j = ptr[i] - 1;
170 if (j < 0)
171 j += s->nblock;
172 ll_i = s->unseqToSeq[block[j]];
173 AssertD(ll_i < s->nInUse, "generateMTFValues(2a)");
175 if (yy[0] == ll_i) {
176 zPend++;
177 } else {
178 if (zPend > 0) {
179 zPend--;
180 while (1) {
181 if (zPend & 1) {
182 mtfv[wr] = BZ_RUNB; wr++;
183 s->mtfFreq[BZ_RUNB]++;
184 } else {
185 mtfv[wr] = BZ_RUNA; wr++;
186 s->mtfFreq[BZ_RUNA]++;
188 if (zPend < 2) break;
189 zPend = (uint32_t)(zPend - 2) / 2;
190 /* bbox: unsigned div is easier */
192 zPend = 0;
195 register uint8_t rtmp;
196 register uint8_t* ryy_j;
197 register uint8_t rll_i;
198 rtmp = yy[1];
199 yy[1] = yy[0];
200 ryy_j = &(yy[1]);
201 rll_i = ll_i;
202 while (rll_i != rtmp) {
203 register uint8_t rtmp2;
204 ryy_j++;
205 rtmp2 = rtmp;
206 rtmp = *ryy_j;
207 *ryy_j = rtmp2;
209 yy[0] = rtmp;
210 j = ryy_j - &(yy[0]);
211 mtfv[wr] = j+1;
212 wr++;
213 s->mtfFreq[j+1]++;
219 if (zPend > 0) {
220 zPend--;
221 while (1) {
222 if (zPend & 1) {
223 mtfv[wr] = BZ_RUNB;
224 wr++;
225 s->mtfFreq[BZ_RUNB]++;
226 } else {
227 mtfv[wr] = BZ_RUNA;
228 wr++;
229 s->mtfFreq[BZ_RUNA]++;
231 if (zPend < 2)
232 break;
233 zPend = (uint32_t)(zPend - 2) / 2;
234 /* bbox: unsigned div is easier */
236 zPend = 0;
239 mtfv[wr] = EOB;
240 wr++;
241 s->mtfFreq[EOB]++;
243 s->nMTF = wr;
247 /*---------------------------------------------------*/
248 #define BZ_LESSER_ICOST 0
249 #define BZ_GREATER_ICOST 15
251 static NOINLINE
252 void sendMTFValues(EState* s)
254 int32_t v, t, i, j, gs, ge, totc, bt, bc, iter;
255 int32_t nSelectors, alphaSize, minLen, maxLen, selCtr;
256 int32_t nGroups, nBytes;
259 * uint8_t len[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
260 * is a global since the decoder also needs it.
262 * int32_t code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
263 * int32_t rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
264 * are also globals only used in this proc.
265 * Made global to keep stack frame size small.
267 #define code sendMTFValues__code
268 #define rfreq sendMTFValues__rfreq
269 #define len_pack sendMTFValues__len_pack
271 uint16_t cost[BZ_N_GROUPS];
272 int32_t fave[BZ_N_GROUPS];
274 uint16_t* mtfv = s->mtfv;
276 alphaSize = s->nInUse + 2;
277 for (t = 0; t < BZ_N_GROUPS; t++)
278 for (v = 0; v < alphaSize; v++)
279 s->len[t][v] = BZ_GREATER_ICOST;
281 /*--- Decide how many coding tables to use ---*/
282 AssertH(s->nMTF > 0, 3001);
283 if (s->nMTF < 200) nGroups = 2; else
284 if (s->nMTF < 600) nGroups = 3; else
285 if (s->nMTF < 1200) nGroups = 4; else
286 if (s->nMTF < 2400) nGroups = 5; else
287 nGroups = 6;
289 /*--- Generate an initial set of coding tables ---*/
291 int32_t nPart, remF, tFreq, aFreq;
293 nPart = nGroups;
294 remF = s->nMTF;
295 gs = 0;
296 while (nPart > 0) {
297 tFreq = remF / nPart;
298 ge = gs - 1;
299 aFreq = 0;
300 while (aFreq < tFreq && ge < alphaSize-1) {
301 ge++;
302 aFreq += s->mtfFreq[ge];
305 if (ge > gs
306 && nPart != nGroups && nPart != 1
307 && ((nGroups - nPart) % 2 == 1) /* bbox: can this be replaced by x & 1? */
309 aFreq -= s->mtfFreq[ge];
310 ge--;
313 for (v = 0; v < alphaSize; v++)
314 if (v >= gs && v <= ge)
315 s->len[nPart-1][v] = BZ_LESSER_ICOST;
316 else
317 s->len[nPart-1][v] = BZ_GREATER_ICOST;
319 nPart--;
320 gs = ge + 1;
321 remF -= aFreq;
326 * Iterate up to BZ_N_ITERS times to improve the tables.
328 for (iter = 0; iter < BZ_N_ITERS; iter++) {
329 for (t = 0; t < nGroups; t++)
330 fave[t] = 0;
332 for (t = 0; t < nGroups; t++)
333 for (v = 0; v < alphaSize; v++)
334 s->rfreq[t][v] = 0;
336 #if CONFIG_BZIP2_FEATURE_SPEED >= 5
338 * Set up an auxiliary length table which is used to fast-track
339 * the common case (nGroups == 6).
341 if (nGroups == 6) {
342 for (v = 0; v < alphaSize; v++) {
343 s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
344 s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
345 s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
348 #endif
349 nSelectors = 0;
350 totc = 0;
351 gs = 0;
352 while (1) {
353 /*--- Set group start & end marks. --*/
354 if (gs >= s->nMTF)
355 break;
356 ge = gs + BZ_G_SIZE - 1;
357 if (ge >= s->nMTF)
358 ge = s->nMTF-1;
361 * Calculate the cost of this group as coded
362 * by each of the coding tables.
364 for (t = 0; t < nGroups; t++)
365 cost[t] = 0;
366 #if CONFIG_BZIP2_FEATURE_SPEED >= 5
367 if (nGroups == 6 && 50 == ge-gs+1) {
368 /*--- fast track the common case ---*/
369 register uint32_t cost01, cost23, cost45;
370 register uint16_t icv;
371 cost01 = cost23 = cost45 = 0;
372 #define BZ_ITER(nn) \
373 icv = mtfv[gs+(nn)]; \
374 cost01 += s->len_pack[icv][0]; \
375 cost23 += s->len_pack[icv][1]; \
376 cost45 += s->len_pack[icv][2];
377 BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
378 BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
379 BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
380 BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
381 BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
382 BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
383 BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
384 BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
385 BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
386 BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
387 #undef BZ_ITER
388 cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
389 cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
390 cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
392 } else
393 #endif
395 /*--- slow version which correctly handles all situations ---*/
396 for (i = gs; i <= ge; i++) {
397 uint16_t icv = mtfv[i];
398 for (t = 0; t < nGroups; t++)
399 cost[t] += s->len[t][icv];
403 * Find the coding table which is best for this group,
404 * and record its identity in the selector table.
406 /*bc = 999999999;*/
407 /*bt = -1;*/
408 bc = cost[0];
409 bt = 0;
410 for (t = 1 /*0*/; t < nGroups; t++) {
411 if (cost[t] < bc) {
412 bc = cost[t];
413 bt = t;
416 totc += bc;
417 fave[bt]++;
418 s->selector[nSelectors] = bt;
419 nSelectors++;
422 * Increment the symbol frequencies for the selected table.
424 /* 1% faster compress. +800 bytes */
425 #if CONFIG_BZIP2_FEATURE_SPEED >= 4
426 if (nGroups == 6 && 50 == ge-gs+1) {
427 /*--- fast track the common case ---*/
428 #define BZ_ITUR(nn) s->rfreq[bt][mtfv[gs + (nn)]]++
429 BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
430 BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
431 BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
432 BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
433 BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
434 BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
435 BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
436 BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
437 BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
438 BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
439 #undef BZ_ITUR
440 gs = ge + 1;
441 } else
442 #endif
444 /*--- slow version which correctly handles all situations ---*/
445 while (gs <= ge) {
446 s->rfreq[bt][mtfv[gs]]++;
447 gs++;
449 /* already is: gs = ge + 1; */
454 * Recompute the tables based on the accumulated frequencies.
456 /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
457 * comment in huffman.c for details. */
458 for (t = 0; t < nGroups; t++)
459 BZ2_hbMakeCodeLengths(s, &(s->len[t][0]), &(s->rfreq[t][0]), alphaSize, 17 /*20*/);
462 AssertH(nGroups < 8, 3002);
463 AssertH(nSelectors < 32768 && nSelectors <= (2 + (900000 / BZ_G_SIZE)), 3003);
465 /*--- Compute MTF values for the selectors. ---*/
467 uint8_t pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
469 for (i = 0; i < nGroups; i++)
470 pos[i] = i;
471 for (i = 0; i < nSelectors; i++) {
472 ll_i = s->selector[i];
473 j = 0;
474 tmp = pos[j];
475 while (ll_i != tmp) {
476 j++;
477 tmp2 = tmp;
478 tmp = pos[j];
479 pos[j] = tmp2;
481 pos[0] = tmp;
482 s->selectorMtf[i] = j;
486 /*--- Assign actual codes for the tables. --*/
487 for (t = 0; t < nGroups; t++) {
488 minLen = 32;
489 maxLen = 0;
490 for (i = 0; i < alphaSize; i++) {
491 if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
492 if (s->len[t][i] < minLen) minLen = s->len[t][i];
494 AssertH(!(maxLen > 17 /*20*/), 3004);
495 AssertH(!(minLen < 1), 3005);
496 BZ2_hbAssignCodes(&(s->code[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize);
499 /*--- Transmit the mapping table. ---*/
501 /* bbox: optimized a bit more than in bzip2 */
502 int inUse16 = 0;
503 for (i = 0; i < 16; i++) {
504 if (sizeof(long) <= 4) {
505 inUse16 = inUse16*2 +
506 ((*(uint32_t*)&(s->inUse[i * 16 + 0])
507 | *(uint32_t*)&(s->inUse[i * 16 + 4])
508 | *(uint32_t*)&(s->inUse[i * 16 + 8])
509 | *(uint32_t*)&(s->inUse[i * 16 + 12])) != 0);
510 } else { /* Our CPU can do better */
511 inUse16 = inUse16*2 +
512 ((*(uint64_t*)&(s->inUse[i * 16 + 0])
513 | *(uint64_t*)&(s->inUse[i * 16 + 8])) != 0);
517 nBytes = s->numZ;
518 bsW(s, 16, inUse16);
520 inUse16 <<= (sizeof(int)*8 - 16); /* move 15th bit into sign bit */
521 for (i = 0; i < 16; i++) {
522 if (inUse16 < 0) {
523 unsigned v16 = 0;
524 for (j = 0; j < 16; j++)
525 v16 = v16*2 + s->inUse[i * 16 + j];
526 bsW(s, 16, v16);
528 inUse16 <<= 1;
532 /*--- Now the selectors. ---*/
533 nBytes = s->numZ;
534 bsW(s, 3, nGroups);
535 bsW(s, 15, nSelectors);
536 for (i = 0; i < nSelectors; i++) {
537 for (j = 0; j < s->selectorMtf[i]; j++)
538 bsW(s, 1, 1);
539 bsW(s, 1, 0);
542 /*--- Now the coding tables. ---*/
543 nBytes = s->numZ;
545 for (t = 0; t < nGroups; t++) {
546 int32_t curr = s->len[t][0];
547 bsW(s, 5, curr);
548 for (i = 0; i < alphaSize; i++) {
549 while (curr < s->len[t][i]) { bsW(s, 2, 2); curr++; /* 10 */ };
550 while (curr > s->len[t][i]) { bsW(s, 2, 3); curr--; /* 11 */ };
551 bsW(s, 1, 0);
555 /*--- And finally, the block data proper ---*/
556 nBytes = s->numZ;
557 selCtr = 0;
558 gs = 0;
559 while (1) {
560 if (gs >= s->nMTF)
561 break;
562 ge = gs + BZ_G_SIZE - 1;
563 if (ge >= s->nMTF)
564 ge = s->nMTF-1;
565 AssertH(s->selector[selCtr] < nGroups, 3006);
567 /* Costs 1300 bytes and is _slower_ (on Intel Core 2) */
568 #if 0
569 if (nGroups == 6 && 50 == ge-gs+1) {
570 /*--- fast track the common case ---*/
571 uint16_t mtfv_i;
572 uint8_t* s_len_sel_selCtr = &(s->len[s->selector[selCtr]][0]);
573 int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
574 #define BZ_ITAH(nn) \
575 mtfv_i = mtfv[gs+(nn)]; \
576 bsW(s, s_len_sel_selCtr[mtfv_i], s_code_sel_selCtr[mtfv_i])
577 BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
578 BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
579 BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
580 BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
581 BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
582 BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
583 BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
584 BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
585 BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
586 BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
587 #undef BZ_ITAH
588 gs = ge+1;
589 } else
590 #endif
592 /*--- slow version which correctly handles all situations ---*/
593 /* code is bit bigger, but moves multiply out of the loop */
594 uint8_t* s_len_sel_selCtr = &(s->len [s->selector[selCtr]][0]);
595 int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
596 while (gs <= ge) {
597 bsW(s,
598 s_len_sel_selCtr[mtfv[gs]],
599 s_code_sel_selCtr[mtfv[gs]]
601 gs++;
603 /* already is: gs = ge+1; */
605 selCtr++;
607 AssertH(selCtr == nSelectors, 3007);
608 #undef code
609 #undef rfreq
610 #undef len_pack
614 /*---------------------------------------------------*/
615 static
616 void BZ2_compressBlock(EState* s, int is_last_block)
618 if (s->nblock > 0) {
619 BZ_FINALISE_CRC(s->blockCRC);
620 s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
621 s->combinedCRC ^= s->blockCRC;
622 if (s->blockNo > 1)
623 s->numZ = 0;
625 BZ2_blockSort(s);
628 s->zbits = &((uint8_t*)s->arr2)[s->nblock];
630 /*-- If this is the first block, create the stream header. --*/
631 if (s->blockNo == 1) {
632 BZ2_bsInitWrite(s);
633 /*bsPutU8(s, BZ_HDR_B);*/
634 /*bsPutU8(s, BZ_HDR_Z);*/
635 /*bsPutU8(s, BZ_HDR_h);*/
636 /*bsPutU8(s, BZ_HDR_0 + s->blockSize100k);*/
637 bsPutU32(s, BZ_HDR_BZh0 + s->blockSize100k);
640 if (s->nblock > 0) {
641 /*bsPutU8(s, 0x31);*/
642 /*bsPutU8(s, 0x41);*/
643 /*bsPutU8(s, 0x59);*/
644 /*bsPutU8(s, 0x26);*/
645 bsPutU32(s, 0x31415926);
646 /*bsPutU8(s, 0x53);*/
647 /*bsPutU8(s, 0x59);*/
648 bsPutU16(s, 0x5359);
650 /*-- Now the block's CRC, so it is in a known place. --*/
651 bsPutU32(s, s->blockCRC);
654 * Now a single bit indicating (non-)randomisation.
655 * As of version 0.9.5, we use a better sorting algorithm
656 * which makes randomisation unnecessary. So always set
657 * the randomised bit to 'no'. Of course, the decoder
658 * still needs to be able to handle randomised blocks
659 * so as to maintain backwards compatibility with
660 * older versions of bzip2.
662 bsW(s, 1, 0);
664 bsW(s, 24, s->origPtr);
665 generateMTFValues(s);
666 sendMTFValues(s);
669 /*-- If this is the last block, add the stream trailer. --*/
670 if (is_last_block) {
671 /*bsPutU8(s, 0x17);*/
672 /*bsPutU8(s, 0x72);*/
673 /*bsPutU8(s, 0x45);*/
674 /*bsPutU8(s, 0x38);*/
675 bsPutU32(s, 0x17724538);
676 /*bsPutU8(s, 0x50);*/
677 /*bsPutU8(s, 0x90);*/
678 bsPutU16(s, 0x5090);
679 bsPutU32(s, s->combinedCRC);
680 bsFinishWrite(s);
685 /*-------------------------------------------------------------*/
686 /*--- end compress.c ---*/
687 /*-------------------------------------------------------------*/