4 * This is a collection of several routines from gzip-1.0.3
7 * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
8 * puts by Nick Holloway 1993, better puts by Martin Mares 1995
9 * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
13 #include "../string.h"
16 * This code is compiled with -fPIC and it is relocated dynamically
17 * at run time, but no relocation processing is performed.
18 * This means that it is not safe to place pointers in static structures.
22 * Getting to provable safe in place decompression is hard.
23 * Worst case behaviours need to be analyzed.
24 * Background information:
33 * compressed data blocks[N]
36 * resulting in 18 bytes of non compressed data overhead.
38 * Files divided into blocks
39 * 1 bit (last block flag)
42 * 1 block occurs every 32K -1 bytes or when there 50% compression
43 * has been achieved. The smallest block type encoding is always used.
46 * 32 bits length in bytes.
53 * dynamic tree encoding.
57 * The buffer for decompression in place is the length of the
58 * uncompressed data, plus a small amount extra to keep the algorithm safe.
59 * The compressed data is placed at the end of the buffer. The output
60 * pointer is placed at the start of the buffer and the input pointer
61 * is placed where the compressed data starts. Problems will occur
62 * when the output pointer overruns the input pointer.
64 * The output pointer can only overrun the input pointer if the input
65 * pointer is moving faster than the output pointer. A condition only
66 * triggered by data whose compressed form is larger than the uncompressed
69 * The worst case at the block level is a growth of the compressed data
70 * of 5 bytes per 32767 bytes.
72 * The worst case internal to a compressed block is very hard to figure.
73 * The worst case can at least be boundined by having one bit that represents
74 * 32764 bytes and then all of the rest of the bytes representing the very
77 * All of which is enough to compute an amount of extra data that is required
78 * to be safe. To avoid problems at the block level allocating 5 extra bytes
79 * per 32767 bytes of data is sufficient. To avoind problems internal to a
80 * block adding an extra 32767 bytes (the worst case uncompressed block size)
81 * is sufficient, to ensure that in the worst case the decompressed data for
82 * block will stop the byte before the compressed data for a block begins.
83 * To avoid problems with the compressed data's meta information an extra 18
84 * bytes are needed. Leading to the formula:
86 * extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size.
88 * Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
89 * Adding 32768 instead of 32767 just makes for round numbers.
90 * Adding the decompressor_size is necessary as it musht live after all
91 * of the data as well. Last I measured the decompressor is about 14K.
92 * 10K of actual data and 4K of bss.
104 * Use a normal definition of memset() from string.c. There are already
105 * included header files which expect a definition of memset() and by
106 * the time we define memset macro, it is too late.
109 #define memzero(s, n) memset((s), 0, (n))
112 static void error(char *m
);
115 * This is set up by the setup-routine at boot-time
117 struct boot_params
*real_mode
; /* Pointer to real-mode data */
120 memptr free_mem_end_ptr
;
124 static int lines
, cols
;
126 #ifdef CONFIG_KERNEL_GZIP
127 #include "../../../../lib/decompress_inflate.c"
130 #ifdef CONFIG_KERNEL_BZIP2
131 #include "../../../../lib/decompress_bunzip2.c"
134 #ifdef CONFIG_KERNEL_LZMA
135 #include "../../../../lib/decompress_unlzma.c"
138 #ifdef CONFIG_KERNEL_XZ
139 #include "../../../../lib/decompress_unxz.c"
142 #ifdef CONFIG_KERNEL_LZO
143 #include "../../../../lib/decompress_unlzo.c"
146 #ifdef CONFIG_KERNEL_LZ4
147 #include "../../../../lib/decompress_unlz4.c"
150 static void scroll(void)
154 memcpy(vidmem
, vidmem
+ cols
* 2, (lines
- 1) * cols
* 2);
155 for (i
= (lines
- 1) * cols
* 2; i
< lines
* cols
* 2; i
+= 2)
161 #define TXR 0 /* Transmit register (WRITE) */
162 #define LSR 5 /* Line Status */
163 static void serial_putchar(int ch
)
165 unsigned timeout
= 0xffff;
167 while ((inb(early_serial_base
+ LSR
) & XMTRDY
) == 0 && --timeout
)
170 outb(ch
, early_serial_base
+ TXR
);
173 void __putstr(const char *s
)
178 if (early_serial_base
) {
182 serial_putchar('\r');
183 serial_putchar(*str
++);
187 if (real_mode
->screen_info
.orig_video_mode
== 0 &&
188 lines
== 0 && cols
== 0)
191 x
= real_mode
->screen_info
.orig_x
;
192 y
= real_mode
->screen_info
.orig_y
;
194 while ((c
= *s
++) != '\0') {
202 vidmem
[(x
+ cols
* y
) * 2] = c
;
213 real_mode
->screen_info
.orig_x
= x
;
214 real_mode
->screen_info
.orig_y
= y
;
216 pos
= (x
+ cols
* y
) * 2; /* Update cursor position */
218 outb(0xff & (pos
>> 9), vidport
+1);
220 outb(0xff & (pos
>> 1), vidport
+1);
223 void __puthex(unsigned long value
)
228 for (bits
= sizeof(value
) * 8 - 4; bits
>= 0; bits
-= 4) {
229 unsigned long digit
= (value
>> bits
) & 0xf;
232 alpha
[0] = '0' + digit
;
234 alpha
[0] = 'a' + (digit
- 0xA);
240 static void error(char *x
)
242 error_putstr("\n\n");
244 error_putstr("\n\n -- System halted");
250 #if CONFIG_X86_NEED_RELOCS
251 static void handle_relocations(void *output
, unsigned long output_len
)
254 unsigned long delta
, map
, ptr
;
255 unsigned long min_addr
= (unsigned long)output
;
256 unsigned long max_addr
= min_addr
+ output_len
;
259 * Calculate the delta between where vmlinux was linked to load
260 * and where it was actually loaded.
262 delta
= min_addr
- LOAD_PHYSICAL_ADDR
;
264 debug_putstr("No relocation needed... ");
267 debug_putstr("Performing relocations... ");
270 * The kernel contains a table of relocation addresses. Those
271 * addresses have the final load address of the kernel in virtual
272 * memory. We are currently working in the self map. So we need to
273 * create an adjustment for kernel memory addresses to the self map.
274 * This will involve subtracting out the base address of the kernel.
276 map
= delta
- __START_KERNEL_map
;
279 * Process relocations: 32 bit relocations first then 64 bit after.
280 * Three sets of binary relocations are added to the end of the kernel
281 * before compression. Each relocation table entry is the kernel
282 * address of the location which needs to be updated stored as a
283 * 32-bit value which is sign extended to 64 bits.
288 * 0 - zero terminator for 64 bit relocations
289 * 64 bit relocation repeated
290 * 0 - zero terminator for inverse 32 bit relocations
291 * 32 bit inverse relocation repeated
292 * 0 - zero terminator for 32 bit relocations
293 * 32 bit relocation repeated
295 * So we work backwards from the end of the decompressed image.
297 for (reloc
= output
+ output_len
- sizeof(*reloc
); *reloc
; reloc
--) {
298 int extended
= *reloc
;
301 ptr
= (unsigned long)extended
;
302 if (ptr
< min_addr
|| ptr
> max_addr
)
303 error("32-bit relocation outside of kernel!\n");
305 *(uint32_t *)ptr
+= delta
;
309 long extended
= *reloc
;
312 ptr
= (unsigned long)extended
;
313 if (ptr
< min_addr
|| ptr
> max_addr
)
314 error("inverse 32-bit relocation outside of kernel!\n");
316 *(int32_t *)ptr
-= delta
;
318 for (reloc
--; *reloc
; reloc
--) {
319 long extended
= *reloc
;
322 ptr
= (unsigned long)extended
;
323 if (ptr
< min_addr
|| ptr
> max_addr
)
324 error("64-bit relocation outside of kernel!\n");
326 *(uint64_t *)ptr
+= delta
;
331 static inline void handle_relocations(void *output
, unsigned long output_len
)
335 static void parse_elf(void *output
)
339 Elf64_Phdr
*phdrs
, *phdr
;
342 Elf32_Phdr
*phdrs
, *phdr
;
347 memcpy(&ehdr
, output
, sizeof(ehdr
));
348 if (ehdr
.e_ident
[EI_MAG0
] != ELFMAG0
||
349 ehdr
.e_ident
[EI_MAG1
] != ELFMAG1
||
350 ehdr
.e_ident
[EI_MAG2
] != ELFMAG2
||
351 ehdr
.e_ident
[EI_MAG3
] != ELFMAG3
) {
352 error("Kernel is not a valid ELF file");
356 debug_putstr("Parsing ELF... ");
358 phdrs
= malloc(sizeof(*phdrs
) * ehdr
.e_phnum
);
360 error("Failed to allocate space for phdrs");
362 memcpy(phdrs
, output
+ ehdr
.e_phoff
, sizeof(*phdrs
) * ehdr
.e_phnum
);
364 for (i
= 0; i
< ehdr
.e_phnum
; i
++) {
367 switch (phdr
->p_type
) {
369 #ifdef CONFIG_RELOCATABLE
371 dest
+= (phdr
->p_paddr
- LOAD_PHYSICAL_ADDR
);
373 dest
= (void *)(phdr
->p_paddr
);
376 output
+ phdr
->p_offset
,
379 default: /* Ignore other PT_* */ break;
386 asmlinkage __visible
void *decompress_kernel(void *rmode
, memptr heap
,
387 unsigned char *input_data
,
388 unsigned long input_len
,
389 unsigned char *output
,
390 unsigned long output_len
,
391 unsigned long run_size
)
393 unsigned char *output_orig
= output
;
397 /* Clear it for solely in-kernel use */
398 real_mode
->hdr
.loadflags
&= ~KASLR_FLAG
;
400 sanitize_boot_params(real_mode
);
402 if (real_mode
->screen_info
.orig_video_mode
== 7) {
403 vidmem
= (char *) 0xb0000;
406 vidmem
= (char *) 0xb8000;
410 lines
= real_mode
->screen_info
.orig_video_lines
;
411 cols
= real_mode
->screen_info
.orig_video_cols
;
414 debug_putstr("early console in decompress_kernel\n");
416 free_mem_ptr
= heap
; /* Heap */
417 free_mem_end_ptr
= heap
+ BOOT_HEAP_SIZE
;
419 /* Report initial kernel position details. */
420 debug_putaddr(input_data
);
421 debug_putaddr(input_len
);
422 debug_putaddr(output
);
423 debug_putaddr(output_len
);
424 debug_putaddr(run_size
);
427 * The memory hole needed for the kernel is the larger of either
428 * the entire decompressed kernel plus relocation table, or the
429 * entire decompressed kernel plus .bss and .brk sections.
431 output
= choose_kernel_location(real_mode
, input_data
, input_len
, output
,
432 output_len
> run_size
? output_len
435 /* Validate memory location choices. */
436 if ((unsigned long)output
& (MIN_KERNEL_ALIGN
- 1))
437 error("Destination address inappropriately aligned");
439 if (heap
> 0x3fffffffffffUL
)
440 error("Destination address too large");
442 if (heap
> ((-__PAGE_OFFSET
-(128<<20)-1) & 0x7fffffff))
443 error("Destination address too large");
445 #ifndef CONFIG_RELOCATABLE
446 if ((unsigned long)output
!= LOAD_PHYSICAL_ADDR
)
447 error("Wrong destination address");
450 debug_putstr("\nDecompressing Linux... ");
451 __decompress(input_data
, input_len
, NULL
, NULL
, output
, output_len
,
455 * 32-bit always performs relocations. 64-bit relocations are only
456 * needed if kASLR has chosen a different load address.
458 if (!IS_ENABLED(CONFIG_X86_64
) || output
!= output_orig
)
459 handle_relocations(output
, output_len
);
460 debug_putstr("done.\nBooting the kernel.\n");