Adding upstream version 3.53.
[syslinux-debian/hramrach.git] / com32 / modules / mboot.c
blobd1657ea14279e30edb085a8995755197c2a4fbab
1 /*
2 * mboot.c
4 * Loader for Multiboot-compliant kernels and modules.
6 * Copyright (C) 2005 Tim Deegan <Tim.Deegan@cl.cam.ac.uk>
7 * Parts based on GNU GRUB, Copyright (C) 2000 Free Software Foundation, Inc.
8 * Parts based on SYSLINUX, Copyright (C) 1994-2005 H. Peter Anvin.
9 * Thanks to Ram Yalamanchili for the ELF section-header loading.
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2 of the
14 * License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
24 * 02111-1307, USA.
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <stddef.h>
32 #include <string.h>
33 #include <consoles.h>
34 #include <zlib.h>
35 #include <com32.h>
37 #include "i386-elf.h"
38 #include "mb_info.h"
39 #include "mb_header.h"
41 #include <klibc/compiler.h> /* For __constructor */
43 #define MIN(_x, _y) (((_x)<(_y))?(_x):(_y))
44 #define MAX(_x, _y) (((_x)>(_y))?(_x):(_y))
46 /* Define this for some more printout */
47 #undef DEBUG
49 /* Memory magic numbers */
50 #define STACK_SIZE 0x20000 /* XXX Could be much smaller */
51 #define MALLOC_SIZE 0x100000 /* XXX Could be much smaller */
52 #define MIN_RUN_ADDR 0x10000 /* Lowest address we'll consider using */
53 #define MEM_HOLE_START 0xa0000 /* Memory hole runs from 640k ... */
54 #define MEM_HOLE_END 0x100000 /* ... to 1MB */
55 #define X86_PAGE_SIZE 0x1000
57 size_t __stack_size = STACK_SIZE; /* How much stack we'll use */
58 extern void *__mem_end; /* Start of malloc() heap */
59 extern char _end[]; /* End of static data */
61 /* Pointer to free memory for loading into: load area is between here
62 * and section_addr */
63 static char *next_load_addr;
65 /* Memory map for run-time */
66 typedef struct section section_t;
67 struct section {
68 size_t dest; /* Start of run-time allocation */
69 char *src; /* Current location of data for memmove(),
70 * or NULL for bzero() */
71 size_t size; /* Length of allocation */
73 static char *section_addr;
74 static int section_count;
76 static size_t max_run_addr; /* Highest address we'll consider using */
77 static size_t next_mod_run_addr; /* Where the next module will be put */
79 /* File loads are in units of this much */
80 #define LOAD_CHUNK 0x20000
82 /* Layout of the input to the 32-bit lidt instruction */
83 struct lidt_operand {
84 unsigned int limit:16;
85 unsigned int base:32;
86 } __attribute__((packed));
88 /* Magic strings */
89 static const char version_string[] = "COM32 Multiboot loader v0.2";
90 static const char copyright_string[] = "Copyright (C) 2005-2006 Tim Deegan.";
91 static const char module_separator[] = "---";
95 * Start of day magic, run from __start during library init.
98 static void __constructor check_version(void)
99 /* Check the SYSLINUX version. Docs say we should be OK from v2.08,
100 * but in fact we crash on anything below v2.12 (when libc came in). */
102 com32sys_t regs_in, regs_out;
103 const char *p, *too_old = "Fatal: SYSLINUX image is too old; "
104 "mboot.c32 needs at least version 2.12.\r\n";
106 memset(&regs_in, 0, sizeof(regs_in));
107 regs_in.eax.l = 0x0001; /* "Get version" */
108 __intcall(0x22, &regs_in, &regs_out);
109 if (regs_out.ecx.w[0] >= 0x020c) return;
111 /* Pointless: on older versions this print fails too. :( */
112 for (p = too_old ; *p ; p++) {
113 memset(&regs_in, 0, sizeof(regs_in));
114 regs_in.eax.b[1] = 0x02; /* "Write character" */
115 regs_in.edx.b[0] = *p;
116 __intcall(0x21, &regs_in, &regs_out);
119 __intcall(0x20, &regs_in, &regs_out); /* "Terminate program" */
123 static void __constructor grab_memory(void)
124 /* Runs before init_memory_arena() (com32/lib/malloc.c) to let
125 * the malloc() code know how much space it's allowed to use.
126 * We don't use malloc() directly, but some of the library code
127 * does (zlib, for example). */
129 /* Find the stack pointer */
130 register char * sp;
131 asm volatile("movl %%esp, %0" : "=r" (sp));
133 /* Initialize the allocation of *run-time* memory: don't let ourselves
134 * overwrite the stack during the relocation later. */
135 max_run_addr = (size_t) sp - (MALLOC_SIZE + STACK_SIZE);
137 /* Move the end-of-memory marker: malloc() will use only memory
138 * above __mem_end and below the stack. We will load files starting
139 * at the old __mem_end and working towards the new one, and allocate
140 * section descriptors at the top of that area, working down. */
141 next_load_addr = __mem_end;
142 section_addr = sp - (MALLOC_SIZE + STACK_SIZE);
143 section_count = 0;
145 /* But be careful not to move it the wrong direction if memory is
146 * tight. Instead we'll fail more gracefully later, when we try to
147 * load a file and find that next_load_addr > section_addr. */
148 __mem_end = MAX(section_addr, next_load_addr);
155 * Run-time memory map functions: allocating and recording allocations.
158 static int cmp_sections(const void *a, const void *b)
159 /* For sorting section descriptors by destination address */
161 const section_t *sa = a;
162 const section_t *sb = b;
163 if (sa->dest < sb->dest) return -1;
164 if (sa->dest > sb->dest) return 1;
165 return 0;
169 static void add_section(size_t dest, char *src, size_t size)
170 /* Adds something to the list of sections to relocate. */
172 section_t *sec;
174 #ifdef DEBUG
175 printf("SECTION: %#8.8x --> %#8.8x (%#x)\n", (size_t) src, dest, size);
176 #endif
178 section_addr -= sizeof (section_t);
179 if (section_addr < next_load_addr) {
180 printf("Fatal: out of memory allocating section descriptor.\n");
181 exit(1);
183 sec = (section_t *) section_addr;
184 section_count++;
186 sec->src = src;
187 sec->dest = dest;
188 sec->size = size;
190 /* Keep the list sorted */
191 qsort(sec, section_count, sizeof (section_t), cmp_sections);
195 static size_t place_low_section(size_t size, size_t align)
196 /* Find a space in the run-time memory map, below 640K */
198 int i;
199 size_t start;
200 section_t *sections = (section_t *) section_addr;
202 start = MIN_RUN_ADDR;
203 start = (start + (align-1)) & ~(align-1);
205 /* Section list is sorted by destination, so can do this in one pass */
206 for (i = 0; i < section_count; i++) {
207 if (sections[i].dest < start + size) {
208 /* Hit the bottom of this section */
209 start = sections[i].dest + sections[i].size;
210 start = (start + (align-1)) & ~(align-1);
213 if (start + size < MEM_HOLE_START) return start;
214 else return 0;
218 static size_t place_module_section(size_t size, size_t align)
219 /* Find a space in the run-time memory map for this module. */
221 /* Ideally we'd run through the sections looking for a free space
222 * like place_low_section() does, but some OSes (Xen, at least)
223 * assume that the bootloader has loaded all the modules
224 * consecutively, above the kernel. So, what we actually do is
225 * keep a pointer to the highest address allocated so far, and
226 * always allocate modules there. */
228 size_t start = next_mod_run_addr;
229 start = (start + (align-1)) & ~(align-1);
231 if (start + size > max_run_addr) return 0;
233 next_mod_run_addr = start + size;
234 return start;
238 static void place_kernel_section(size_t start, size_t size)
239 /* Allocate run-time space for part of the kernel, checking for
240 * sanity. We assume the kernel isn't broken enough to have
241 * overlapping segments. */
243 /* We always place modules above the kernel */
244 next_mod_run_addr = MAX(next_mod_run_addr, start + size);
246 if (start > max_run_addr || start + size > max_run_addr) {
247 /* Overruns the end of memory */
248 printf("Fatal: kernel loads too high (%#8.8x+%#x > %#8.8x).\n",
249 start, size, max_run_addr);
250 exit(1);
252 if (start >= MEM_HOLE_END) {
253 /* Above the memory hole: easy */
254 #ifdef DEBUG
255 printf("Placed kernel section (%#8.8x+%#x)\n", start, size);
256 #endif
257 return;
259 if (start >= MEM_HOLE_START) {
260 /* In the memory hole. Not so good */
261 printf("Fatal: kernel load address (%#8.8x) is in the memory hole.\n",
262 start);
263 exit(1);
265 if (start + size > MEM_HOLE_START) {
266 /* Too big for low memory */
267 printf("Fatal: kernel (%#8.8x+%#x) runs into the memory hole.\n",
268 start, size);
269 exit(1);
271 if (start < MIN_RUN_ADDR) {
272 /* Loads too low */
273 printf("Fatal: kernel load address (%#8.8x) is too low (<%#8.8x).\n",
274 start, MIN_RUN_ADDR);
275 exit(1);
277 /* Kernel loads below the memory hole: OK */
278 #ifdef DEBUG
279 printf("Placed kernel section (%#8.8x+%#x)\n", start, size);
280 #endif
284 static void reorder_sections(void)
285 /* Reorders sections into a safe order, where no relocation
286 * overwrites the source of a later one. */
288 section_t *secs = (section_t *) section_addr;
289 section_t tmp;
290 int i, j, tries;
292 #ifdef DEBUG
293 printf("Relocations:\n");
294 for (i = 0; i < section_count ; i++) {
295 printf(" %#8.8x --> %#8.8x (%#x)\n",
296 (size_t)secs[i].src, secs[i].dest, secs[i].size);
298 #endif
300 for (i = 0; i < section_count; i++) {
301 tries = 0;
302 scan_again:
303 for (j = i + 1 ; j < section_count; j++) {
304 if (secs[j].src != NULL
305 && secs[i].dest + secs[i].size > (size_t) secs[j].src
306 && secs[i].dest < (size_t) secs[j].src + secs[j].size) {
307 /* Would overwrite the source of the later move */
308 if (++tries > section_count) {
309 /* Deadlock! */
310 /* XXX Try to break deadlocks? */
311 printf("Fatal: circular dependence in relocations.\n");
312 exit(1);
314 /* Swap these sections (using struct copies) */
315 tmp = secs[i]; secs[i] = secs[j]; secs[j] = tmp;
316 /* Start scanning again from the new secs[i]... */
317 goto scan_again;
322 #ifdef DEBUG
323 printf("Relocations:\n");
324 for (i = 0; i < section_count ; i++) {
325 printf(" %#8.8x --> %#8.8x (%#x)\n",
326 (size_t)secs[i].src, secs[i].dest, secs[i].size);
328 #endif
332 static void init_mmap(struct multiboot_info *mbi)
333 /* Get a full memory map from the BIOS to pass to the kernel. */
335 com32sys_t regs_in, regs_out;
336 struct AddrRangeDesc *e820;
337 int e820_slots;
338 size_t mem_lower, mem_upper, run_addr, mmap_size;
339 register size_t sp;
341 /* Default values for mem_lower and mem_upper in case the BIOS won't
342 * tell us: 640K, and all memory up to the stack. */
343 asm volatile("movl %%esp, %0" : "=r" (sp));
344 mem_upper = (sp - MEM_HOLE_END) / 1024;
345 mem_lower = (MEM_HOLE_START) / 1024;
347 #ifdef DEBUG
348 printf("Requesting memory map from BIOS:\n");
349 #endif
351 /* Ask the BIOS for the full memory map of the machine. We'll
352 * build it in Multiboot format (i.e. with size fields) in the
353 * bounce buffer, and then allocate some high memory to keep it in
354 * until boot time. */
355 e820 = __com32.cs_bounce;
356 e820_slots = 0;
357 regs_out.ebx.l = 0;
359 while(((void *)(e820 + 1)) < __com32.cs_bounce + __com32.cs_bounce_size)
361 memset(e820, 0, sizeof (*e820));
362 memset(&regs_in, 0, sizeof regs_in);
363 e820->size = sizeof(*e820) - sizeof(e820->size);
365 /* Ask the BIOS to fill in this descriptor */
366 regs_in.eax.l = 0xe820; /* "Get system memory map" */
367 regs_in.ebx.l = regs_out.ebx.l; /* Continuation value from last call */
368 regs_in.ecx.l = 20; /* Size of buffer to write into */
369 regs_in.edx.l = 0x534d4150; /* "SMAP" */
370 regs_in.es = SEG(&e820->BaseAddr);
371 regs_in.edi.w[0] = OFFS(&e820->BaseAddr);
372 __intcall(0x15, &regs_in, &regs_out);
374 if ((regs_out.eflags.l & EFLAGS_CF) != 0 && regs_out.ebx.l != 0)
375 break; /* End of map */
377 if (((regs_out.eflags.l & EFLAGS_CF) != 0 && regs_out.ebx.l == 0)
378 || (regs_out.eax.l != 0x534d4150))
380 /* Error */
381 printf("Error %x reading E820 memory map: %s.\n",
382 (int) regs_out.eax.b[0],
383 (regs_out.eax.b[0] == 0x80) ? "invalid command" :
384 (regs_out.eax.b[0] == 0x86) ? "not supported" :
385 "unknown error");
386 break;
389 /* Success */
390 #ifdef DEBUG
391 printf(" %#16.16Lx -- %#16.16Lx : ",
392 e820->BaseAddr, e820->BaseAddr + e820->Length);
393 switch (e820->Type) {
394 case 1: printf("Available\n"); break;
395 case 2: printf("Reserved\n"); break;
396 case 3: printf("ACPI Reclaim\n"); break;
397 case 4: printf("ACPI NVS\n"); break;
398 default: printf("? (Reserved)\n"); break;
400 #endif
402 if (e820->Type == 1) {
403 if (e820->BaseAddr == 0) {
404 mem_lower = MIN(MEM_HOLE_START, e820->Length) / 1024;
405 } else if (e820->BaseAddr == MEM_HOLE_END) {
406 mem_upper = MIN(0xfff00000, e820->Length) / 1024;
410 /* Move to next slot */
411 e820++;
412 e820_slots++;
414 /* Done? */
415 if (regs_out.ebx.l == 0)
416 break;
419 /* Record the simple information in the MBI */
420 mbi->flags |= MB_INFO_MEMORY;
421 mbi->mem_lower = mem_lower;
422 mbi->mem_upper = mem_upper;
424 /* Record the full memory map in the MBI */
425 if (e820_slots != 0) {
426 mmap_size = e820_slots * sizeof(*e820);
427 /* Where will it live at run time? */
428 run_addr = place_low_section(mmap_size, 1);
429 if (run_addr == 0) {
430 printf("Fatal: can't find space for the e820 mmap.\n");
431 exit(1);
433 /* Where will it live now? */
434 e820 = (struct AddrRangeDesc *) next_load_addr;
435 if (next_load_addr + mmap_size > section_addr) {
436 printf("Fatal: out of memory storing the e820 mmap.\n");
437 exit(1);
439 next_load_addr += mmap_size;
440 /* Copy it out of the bounce buffer */
441 memcpy(e820, __com32.cs_bounce, mmap_size);
442 /* Remember to copy it again at run time */
443 add_section(run_addr, (char *) e820, mmap_size);
444 /* Record it in the MBI */
445 mbi->flags |= MB_INFO_MEM_MAP;
446 mbi->mmap_length = mmap_size;
447 mbi->mmap_addr = run_addr;
455 * Code for loading and parsing files.
458 static void load_file(char *filename, char **startp, size_t *sizep)
459 /* Load a file into memory. Returns where it is and how big via
460 * startp and sizep */
462 gzFile fp;
463 char *start;
464 int bsize;
466 printf("Loading %s.", filename);
468 start = next_load_addr;
469 startp[0] = start;
470 sizep[0] = 0;
472 /* Open the file */
473 if ((fp = gzopen(filename, "r")) == NULL) {
474 printf("\nFatal: cannot open %s\n", filename);
475 exit(1);
478 while (next_load_addr + LOAD_CHUNK <= section_addr) {
479 bsize = gzread(fp, next_load_addr, LOAD_CHUNK);
480 printf("%s",".");
482 if (bsize < 0) {
483 printf("\nFatal: read error in %s\n", filename);
484 gzclose(fp);
485 exit(1);
488 next_load_addr += bsize;
489 sizep[0] += bsize;
491 if (bsize < LOAD_CHUNK) {
492 printf("%s","\n");
493 gzclose(fp);
494 return;
498 /* Running out of memory. Try and use up the last bit */
499 if (section_addr > next_load_addr) {
500 bsize = gzread(fp, next_load_addr, section_addr - next_load_addr);
501 printf("%s",".");
502 } else {
503 bsize = 0;
506 if (bsize < 0) {
507 gzclose(fp);
508 printf("\nFatal: read error in %s\n", filename);
509 exit(1);
512 next_load_addr += bsize;
513 sizep[0] += bsize;
515 if (!gzeof(fp)) {
516 gzclose(fp);
517 printf("\nFatal: out of memory reading %s\n", filename);
518 exit(1);
521 printf("%s","\n");
522 gzclose(fp);
523 return;
527 static size_t load_kernel(struct multiboot_info *mbi, char *cmdline)
528 /* Load a multiboot/elf32 kernel and allocate run-time memory for it.
529 * Returns the kernel's entry address. */
531 unsigned int i;
532 char *load_addr; /* Where the image was loaded */
533 size_t load_size; /* How big it is */
534 char *seg_addr; /* Where a segment was loaded */
535 size_t seg_size, bss_size; /* How big it is */
536 size_t run_addr, run_size; /* Where it should be put */
537 size_t shdr_run_addr;
538 char *p;
539 Elf32_Ehdr *ehdr;
540 Elf32_Phdr *phdr;
541 Elf32_Shdr *shdr;
542 struct multiboot_header *mbh;
544 printf("Kernel: %s\n", cmdline);
546 load_addr = 0;
547 load_size = 0;
548 p = strchr(cmdline, ' ');
549 if (p != NULL) *p = 0;
550 load_file(cmdline, &load_addr, &load_size);
551 if (load_size < 12) {
552 printf("Fatal: %s is too short to be a multiboot kernel.",
553 cmdline);
554 exit(1);
556 if (p != NULL) *p = ' ';
559 /* Look for a multiboot header in the first 8k of the file */
560 for (i = 0; i <= MIN(load_size - 12, MULTIBOOT_SEARCH - 12); i += 4)
562 mbh = (struct multiboot_header *)(load_addr + i);
563 if (mbh->magic != MULTIBOOT_MAGIC
564 || ((mbh->magic+mbh->flags+mbh->checksum) & 0xffffffff))
566 /* Not a multiboot header */
567 continue;
569 if (mbh->flags & (MULTIBOOT_UNSUPPORTED | MULTIBOOT_VIDEO_MODE)) {
570 /* Requires options we don't support */
571 printf("Fatal: Kernel requires multiboot options "
572 "that I don't support: %#x.\n",
573 mbh->flags & (MULTIBOOT_UNSUPPORTED|MULTIBOOT_VIDEO_MODE));
574 exit(1);
577 /* This kernel will do: figure out where all the pieces will live */
579 if (mbh->flags & MULTIBOOT_AOUT_KLUDGE) {
581 /* Use the offsets in the multiboot header */
582 #ifdef DEBUG
583 printf("Using multiboot header.\n");
584 #endif
586 /* Where is the code in the loaded file? */
587 seg_addr = ((char *)mbh) - (mbh->header_addr - mbh->load_addr);
589 /* How much code is there? */
590 run_addr = mbh->load_addr;
591 if (mbh->load_end_addr != 0)
592 seg_size = mbh->load_end_addr - mbh->load_addr;
593 else
594 seg_size = load_size - (seg_addr - load_addr);
596 /* How much memory will it take up? */
597 if (mbh->bss_end_addr != 0)
598 run_size = mbh->bss_end_addr - mbh->load_addr;
599 else
600 run_size = seg_size;
602 if (seg_size > run_size) {
603 printf("Fatal: can't put %i bytes of kernel into %i bytes "
604 "of memory.\n", seg_size, run_size);
605 exit(1);
607 if (seg_addr + seg_size > load_addr + load_size) {
608 printf("Fatal: multiboot load segment runs off the "
609 "end of the file.\n");
610 exit(1);
613 /* Does it fit where it wants to be? */
614 place_kernel_section(run_addr, run_size);
616 /* Put it on the relocation list */
617 if (seg_size < run_size) {
618 /* Set up the kernel BSS too */
619 if (seg_size > 0)
620 add_section(run_addr, seg_addr, seg_size);
621 bss_size = run_size - seg_size;
622 add_section(run_addr + seg_size, NULL, bss_size);
623 } else {
624 /* No BSS */
625 add_section(run_addr, seg_addr, run_size);
628 /* Done. */
629 return mbh->entry_addr;
631 } else {
633 /* Now look for an ELF32 header */
634 ehdr = (Elf32_Ehdr *)load_addr;
635 if (*(unsigned long *)ehdr != 0x464c457f
636 || ehdr->e_ident[EI_DATA] != ELFDATA2LSB
637 || ehdr->e_ident[EI_CLASS] != ELFCLASS32
638 || ehdr->e_machine != EM_386)
640 printf("Fatal: kernel has neither ELF32/x86 nor multiboot load"
641 " headers.\n");
642 exit(1);
644 if (ehdr->e_phoff + ehdr->e_phnum*sizeof (*phdr) > load_size) {
645 printf("Fatal: malformed ELF header overruns EOF.\n");
646 exit(1);
648 if (ehdr->e_phnum <= 0) {
649 printf("Fatal: ELF kernel has no program headers.\n");
650 exit(1);
653 #ifdef DEBUG
654 printf("Using ELF header.\n");
655 #endif
657 if (ehdr->e_type != ET_EXEC
658 || ehdr->e_version != EV_CURRENT
659 || ehdr->e_phentsize != sizeof (Elf32_Phdr)) {
660 printf("Warning: funny-looking ELF header.\n");
662 phdr = (Elf32_Phdr *)(load_addr + ehdr->e_phoff);
664 /* Obey the program headers to load the kernel */
665 for(i = 0; i < ehdr->e_phnum; i++) {
667 /* How much is in this segment? */
668 run_size = phdr[i].p_memsz;
669 if (phdr[i].p_type != PT_LOAD)
670 seg_size = 0;
671 else
672 seg_size = (size_t)phdr[i].p_filesz;
674 /* Where is it in the loaded file? */
675 seg_addr = load_addr + phdr[i].p_offset;
676 if (seg_addr + seg_size > load_addr + load_size) {
677 printf("Fatal: ELF load segment runs off the "
678 "end of the file.\n");
679 exit(1);
682 /* Skip segments that don't take up any memory */
683 if (run_size == 0) continue;
685 /* Place the segment where it wants to be */
686 run_addr = phdr[i].p_paddr;
687 place_kernel_section(run_addr, run_size);
689 /* Put it on the relocation list */
690 if (seg_size < run_size) {
691 /* Set up the kernel BSS too */
692 if (seg_size > 0)
693 add_section(run_addr, seg_addr, seg_size);
694 bss_size = run_size - seg_size;
695 add_section(run_addr + seg_size, NULL, bss_size);
696 } else {
697 /* No BSS */
698 add_section(run_addr, seg_addr, run_size);
702 if (ehdr->e_shoff != 0) {
703 #ifdef DEBUG
704 printf("Loading ELF section table.\n");
705 #endif
706 /* Section Header */
707 shdr = (Elf32_Shdr *)(load_addr + ehdr->e_shoff);
709 /* Section Header Table size */
710 run_size = ehdr->e_shentsize * ehdr->e_shnum;
711 shdr_run_addr = place_module_section(run_size, 0x1000);
712 if (shdr_run_addr == 0) {
713 printf("Warning: Not enough memory to load the "
714 "section table.\n");
715 return ehdr->e_entry;
717 add_section(shdr_run_addr, (void*) shdr, run_size);
719 /* Load section tables not loaded thru program segments */
720 for (i = 0; i < ehdr->e_shnum; i++) {
721 /* This case is when this section is already included in
722 * program header or it's 0 size, so no need to load */
723 if (shdr[i].sh_addr != 0 || !shdr[i].sh_size)
724 continue;
726 if (shdr[i].sh_addralign == 0)
727 shdr[i].sh_addralign = 1;
729 run_addr = place_module_section(shdr[i].sh_size,
730 shdr[i].sh_addralign);
731 if (run_addr == 0) {
732 printf("Warning: Not enough memory to load "
733 "section %d.\n", i);
734 return ehdr->e_entry;
736 shdr[i].sh_addr = run_addr;
737 add_section(run_addr,
738 (void*) (shdr[i].sh_offset + load_addr),
739 shdr[i].sh_size);
742 mbi->flags |= MB_INFO_ELF_SHDR;
743 mbi->syms.e.num = ehdr->e_shnum;
744 mbi->syms.e.size = ehdr->e_shentsize;
745 mbi->syms.e.shndx = ehdr->e_shstrndx;
746 mbi->syms.e.addr = shdr_run_addr;
747 #ifdef DEBUG
748 printf("Section information: shnum: %lu, entSize: %lu, "
749 "shstrndx: %lu, addr: 0x%lx\n",
750 mbi->syms.e.num, mbi->syms.e.size,
751 mbi->syms.e.shndx, mbi->syms.e.addr);
752 #endif
755 /* Done! */
756 return ehdr->e_entry;
760 /* This is not a multiboot kernel */
761 printf("Fatal: not a multiboot kernel.\n");
762 exit(1);
767 static void load_module(struct mod_list *mod, char *cmdline)
768 /* Load a multiboot module and allocate a memory area for it */
770 char *load_addr, *p;
771 size_t load_size, run_addr;
773 printf("Module: %s\n", cmdline);
775 load_addr = 0;
776 load_size = 0;
777 p = strchr(cmdline, ' ');
778 if (p != NULL) *p = 0;
779 load_file(cmdline, &load_addr, &load_size);
780 if (p != NULL) *p = ' ';
782 /* Decide where it's going to live */
783 run_addr = place_module_section(load_size, X86_PAGE_SIZE);
784 if (run_addr == 0) {
785 printf("Fatal: can't find space for this module.\n");
786 exit(1);
788 add_section(run_addr, load_addr, load_size);
790 /* Remember where we put it */
791 mod->mod_start = run_addr;
792 mod->mod_end = run_addr + load_size;
793 mod->pad = 0;
795 #ifdef DEBUG
796 printf("Placed module (%#8.8x+%#x)\n", run_addr, load_size);
797 #endif
804 * Code for shuffling sections into place and booting the new kernel
807 static void trampoline_start(section_t *secs, int sec_count,
808 size_t mbi_run_addr, size_t entry)
809 /* Final shuffle-and-boot code. Running on the stack; no external code
810 * or data can be relied on. */
812 int i;
813 struct lidt_operand idt;
815 /* SYSLINUX has set up SS, DS and ES as 32-bit 0--4G data segments,
816 * but doesn't specify FS and GS. Multiboot wants them all to be
817 * the same, so we'd better do that before we overwrite the GDT. */
818 asm volatile("movl %ds, %ecx; movl %ecx, %fs; movl %ecx, %gs");
820 /* Turn off interrupts */
821 asm volatile("cli");
823 /* SYSLINUX has set up an IDT at 0x100000 that does all the
824 * comboot calls, and we're about to overwrite it. The Multiboot
825 * spec says that the kernel must set up its own IDT before turning
826 * on interrupts, but it's still entitled to use BIOS calls, so we'll
827 * put the IDT back to the BIOS one at the base of memory. */
828 idt.base = 0;
829 idt.limit = 0x800;
830 asm volatile("lidt %0" : : "m" (idt));
832 /* Now, shuffle the sections */
833 for (i = 0; i < sec_count; i++) {
834 if (secs[i].src == NULL) {
835 /* asm bzero() code from com32/lib/memset.c */
836 char *q = (char *) secs[i].dest;
837 size_t nl = secs[i].size >> 2;
838 asm volatile("cld ; rep ; stosl ; movl %3,%0 ; rep ; stosb"
839 : "+c" (nl), "+D" (q)
840 : "a" (0x0U), "r" (secs[i].size & 3));
841 } else {
842 /* asm memmove() code from com32/lib/memmove.c */
843 const char *p = secs[i].src;
844 char *q = (char *) secs[i].dest;
845 size_t n = secs[i].size;
846 if ( q < p ) {
847 asm volatile("cld ; rep ; movsb"
848 : "+c" (n), "+S" (p), "+D" (q));
849 } else {
850 p += (n-1);
851 q += (n-1);
852 asm volatile("std ; rep ; movsb"
853 : "+c" (n), "+S" (p), "+D" (q));
858 /* Now set up the last tiny bit of Multiboot environment.
859 * A20 is already enabled.
860 * CR0 already has PG cleared and PE set.
861 * EFLAGS already has VM and IF cleared.
862 * ESP is the kernels' problem.
863 * GDTR is the kernel's problem.
864 * CS is already a 32-bit, 0--4G code segments.
865 * DS, ES, FS and GS are already 32-bit, 0--4G data segments.
867 * EAX must be 0x2badb002 and EBX must point to the MBI when we jump. */
869 asm volatile ("jmp %*%2"
870 : : "a" (0x2badb002), "b" (mbi_run_addr), "cdSDm" (entry));
872 static void trampoline_end(void) {}
875 static void boot(size_t mbi_run_addr, size_t entry)
876 /* Tidy up SYSLINUX, shuffle memory and boot the kernel */
878 com32sys_t regs;
879 section_t *tr_sections;
880 void (*trampoline)(section_t *, int, size_t, size_t);
881 size_t trampoline_size;
883 /* Make sure the relocations are safe. */
884 reorder_sections();
886 /* Copy the shuffle-and-boot code and the array of relocations
887 * onto the memory we previously used for malloc() heap. This is
888 * safe because it's not the source or the destination of any
889 * copies, and there'll be no more library calls after the copy. */
891 tr_sections = ((section_t *) section_addr) + section_count;
892 trampoline = (void *) (tr_sections + section_count);
893 trampoline_size = (void *)&trampoline_end - (void *)&trampoline_start;
895 #ifdef DEBUG
896 printf("tr_sections: %p\n"
897 "trampoline: %p\n"
898 "trampoline_size: %#8.8x\n"
899 "max_run_addr: %#8.8x\n",
900 tr_sections, trampoline, trampoline_size, max_run_addr);
901 #endif
903 printf("Booting: MBI=%#8.8x, entry=%#8.8x\n", mbi_run_addr, entry);
905 memmove(tr_sections, section_addr, section_count * sizeof (section_t));
906 memmove(trampoline, trampoline_start, trampoline_size);
908 /* Tell SYSLINUX to clean up */
909 memset(&regs, 0, sizeof regs);
910 regs.eax.l = 0x000c; /* "Perform final cleanup" */
911 regs.edx.l = 0; /* "Normal cleanup" */
912 __intcall(0x22, &regs, NULL);
914 /* Into the unknown */
915 trampoline(tr_sections, section_count, mbi_run_addr, entry);
919 int main(int argc, char **argv)
920 /* Parse the command-line and invoke loaders */
922 struct multiboot_info *mbi;
923 struct mod_list *modp;
924 int modules, num_append_args;
925 int mbi_reloc_offset;
926 char *p;
927 size_t mbi_run_addr, mbi_size, entry;
928 int i;
930 /* Say hello */
931 console_ansi_std();
932 printf("%s. %s\n", version_string, copyright_string);
934 if (argc < 2 || !strcmp(argv[1], module_separator)) {
935 printf("Fatal: No kernel filename!\n");
936 exit(1);
939 #ifdef DEBUG
940 printf("_end: %p\n"
941 "argv[1]: %p\n"
942 "next_load_addr: %p\n"
943 "section_addr %p\n"
944 "__mem_end: %p\n"
945 "argv[0]: %p\n",
946 &_end, argv[1], next_load_addr, section_addr, __mem_end, argv[0]);
947 #endif
949 /* How much space will the MBI need? */
950 modules = 0;
951 mbi_size = sizeof(struct multiboot_info) + strlen(version_string) + 5;
952 for (i = 1 ; i < argc ; i++) {
953 if (!strcmp(argv[i], module_separator)) {
954 modules++;
955 mbi_size += sizeof(struct mod_list) + 1;
956 } else {
957 mbi_size += strlen(argv[i]) + 1;
961 /* Allocate space in the load buffer for the MBI, all the command
962 * lines, and all the module details. */
963 mbi = (struct multiboot_info *)next_load_addr;
964 next_load_addr += mbi_size;
965 if (next_load_addr > section_addr) {
966 printf("Fatal: out of memory allocating for boot metadata.\n");
967 exit(1);
969 memset(mbi, 0, sizeof (struct multiboot_info));
970 p = (char *)(mbi + 1);
971 mbi->flags = MB_INFO_CMDLINE | MB_INFO_BOOT_LOADER_NAME;
973 /* Figure out the memory map.
974 * N.B. Must happen before place_section() is called */
975 init_mmap(mbi);
977 mbi_run_addr = place_low_section(mbi_size, 4);
978 if (mbi_run_addr == 0) {
979 printf("Fatal: can't find space for the MBI!\n");
980 exit(1);
982 mbi_reloc_offset = (size_t)mbi - mbi_run_addr;
983 add_section(mbi_run_addr, (void *)mbi, mbi_size);
985 /* Module info structs */
986 modp = (struct mod_list *) (((size_t)p + 3) & ~3);
987 if (modules > 0) mbi->flags |= MB_INFO_MODS;
988 mbi->mods_count = modules;
989 mbi->mods_addr = ((size_t)modp) - mbi_reloc_offset;
990 p = (char *)(modp + modules);
992 /* Append cmdline args show up in the beginning, append these
993 * to kernel cmdline later on */
994 for (i = 1; i < argc; i++) {
995 if (strchr(argv[i], '=') != NULL) {
996 continue;
998 break;
1001 /* Command lines: first kernel, then modules */
1002 mbi->cmdline = ((size_t)p) - mbi_reloc_offset;
1003 modules = 0;
1004 num_append_args = i-1;
1006 for (; i < argc ; i++) {
1007 if (!strcmp(argv[i], module_separator)) {
1008 /* Add append args to kernel cmdline */
1009 if (modules == 0 && num_append_args) {
1010 int j;
1011 for (j = 1; j < num_append_args+1; j++) {
1012 strcpy(p, argv[j]);
1013 p += strlen(argv[j]);
1014 *p++ = ' ';
1017 *p++ = '\0';
1018 modp[modules++].cmdline = ((size_t)p) - mbi_reloc_offset;
1019 } else {
1020 strcpy(p, argv[i]);
1021 p += strlen(argv[i]);
1022 *p++ = ' ';
1025 *p++ = '\0';
1027 /* Bootloader ID */
1028 strcpy(p, version_string);
1029 mbi->boot_loader_name = ((size_t)p) - mbi_reloc_offset;
1030 p += strlen(version_string) + 1;
1032 /* Now, do all the loading, and boot it */
1033 entry = load_kernel(mbi, (char *)(mbi->cmdline + mbi_reloc_offset));
1034 for (i=0; i<modules; i++) {
1035 load_module(&(modp[i]), (char *)(modp[i].cmdline + mbi_reloc_offset));
1037 boot(mbi_run_addr, entry);
1039 return 1;
1043 * EOF