2 * ld script for the x86 kernel
4 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 * Modernisation, unification and other changes and fixes:
7 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org>
10 * Don't define absolute symbols until and unless you know that symbol
11 * value is should remain constant even if kernel image is relocated
12 * at run time. Absolute symbols are not relocated. If symbol value should
13 * change if kernel is relocated, make the symbol section relative and
14 * put it inside the section definition.
18 #define LOAD_OFFSET __PAGE_OFFSET
20 #define LOAD_OFFSET __START_KERNEL_map
23 #include <asm-generic/vmlinux.lds.h>
24 #include <asm/asm-offsets.h>
25 #include <asm/thread_info.h>
26 #include <asm/page_types.h>
27 #include <asm/cache.h>
30 #undef i386 /* in case the preprocessor is a 32bit one */
32 OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
36 ENTRY(phys_startup_32)
39 OUTPUT_ARCH(i386:x86-64)
40 ENTRY(phys_startup_64)
45 text PT_LOAD FLAGS(5); /* R_E */
46 data PT_LOAD FLAGS(7); /* RWE */
48 user PT_LOAD FLAGS(7); /* RWE */
49 data.init PT_LOAD FLAGS(7); /* RWE */
51 percpu PT_LOAD FLAGS(7); /* RWE */
53 data.init2 PT_LOAD FLAGS(7); /* RWE */
55 note PT_NOTE FLAGS(0); /* ___ */
61 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
62 phys_startup_32 = startup_32 - LOAD_OFFSET;
65 phys_startup_64 = startup_64 - LOAD_OFFSET;
68 /* Text and read-only data */
70 /* bootstrapping code */
71 .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
76 /* The rest of the text */
77 .text : AT(ADDR(.text) - LOAD_OFFSET) {
79 /* not really needed, already page aligned */
92 /* End of text section */
100 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
101 __start___ex_table = .;
103 __stop___ex_table = .;
109 . = ALIGN(PAGE_SIZE);
110 .data : AT(ADDR(.data) - LOAD_OFFSET) {
111 /* Start of data section */
118 /* 32 bit has nosave before _edata */
119 . = ALIGN(PAGE_SIZE);
120 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
123 . = ALIGN(PAGE_SIZE);
128 . = ALIGN(PAGE_SIZE);
129 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
130 *(.data.page_aligned)
137 . = ALIGN(PAGE_SIZE);
138 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
140 .data.cacheline_aligned :
141 AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
142 *(.data.cacheline_aligned)
145 /* rarely changed data like cpu maps */
149 . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
151 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
154 /* End of data section */
160 #define VSYSCALL_ADDR (-10*1024*1024)
161 #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \
162 SIZEOF(.data.read_mostly) + 4095) & ~(4095))
163 #define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \
164 SIZEOF(.data.read_mostly) + 4095) & ~(4095))
166 #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
167 #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
169 #define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
170 #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
173 .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) {
177 __vsyscall_0 = VSYSCALL_VIRT_ADDR;
179 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
180 .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
184 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
185 .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
186 *(.vsyscall_gtod_data)
189 vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
190 .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
193 vsyscall_clock = VVIRT(.vsyscall_clock);
196 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
199 .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
203 .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
206 vgetcpu_mode = VVIRT(.vgetcpu_mode);
208 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
209 .jiffies : AT(VLOAD(.jiffies)) {
212 jiffies = VVIRT(.jiffies);
214 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
218 . = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
221 #undef VSYSCALL_PHYS_ADDR
222 #undef VSYSCALL_VIRT_ADDR
228 #endif /* CONFIG_X86_64 */
231 . = ALIGN(THREAD_SIZE);
232 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
240 * smp_locks might be freed after init
241 * start/end must be page aligned
243 . = ALIGN(PAGE_SIZE);
244 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
248 . = ALIGN(PAGE_SIZE);
251 /* Init code and data - will be freed after init */
252 . = ALIGN(PAGE_SIZE);
253 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
254 __init_begin = .; /* paired with __init_end */
260 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
265 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
270 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
271 __initcall_start = .;
276 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
277 __con_initcall_start = .;
278 *(.con_initcall.init)
279 __con_initcall_end = .;
282 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
283 __x86_cpu_dev_start = .;
285 __x86_cpu_dev_end = .;
291 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
292 __parainstructions = .;
294 __parainstructions_end = .;
298 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
299 __alt_instructions = .;
301 __alt_instructions_end = .;
304 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
305 *(.altinstr_replacement)
309 * .exit.text is discard at runtime, not link time, to deal with
310 * references from .altinstructions and .eh_frame
312 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
316 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
320 #ifdef CONFIG_BLK_DEV_INITRD
321 . = ALIGN(PAGE_SIZE);
322 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
323 __initramfs_start = .;
329 #if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
331 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
332 * output PHDR, so the next output section - __data_nosave - should
333 * start another section data.init2. Also, pda should be at the head of
334 * percpu area. Preallocate it and define the percpu offset symbol
335 * so that it can be accessed as a percpu variable.
337 . = ALIGN(PAGE_SIZE);
338 PERCPU_VADDR(0, :percpu)
343 . = ALIGN(PAGE_SIZE);
345 /* freed after init ends here */
346 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
351 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
352 . = ALIGN(PAGE_SIZE);
355 . = ALIGN(PAGE_SIZE);
358 /* use another section data.init2, see PERCPU_VADDR() above */
362 . = ALIGN(PAGE_SIZE);
363 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
371 . = ALIGN(PAGE_SIZE);
372 .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
374 . += 64 * 1024; /* 64k alignment slop space */
375 *(.brk_reservation) /* areas brk users have reserved */
379 .end : AT(ADDR(.end) - LOAD_OFFSET) {
383 /* Sections to be discarded */
396 ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
397 "kernel image bigger than KERNEL_IMAGE_SIZE")
400 * Per-cpu symbols which need to be offset from __per_cpu_load
401 * for the boot processor.
403 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
404 INIT_PER_CPU(gdt_page);
405 INIT_PER_CPU(irq_stack_union);
408 * Build-time check on the image size:
410 ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
411 "kernel image bigger than KERNEL_IMAGE_SIZE")
414 ASSERT((per_cpu__irq_stack_union == 0),
415 "irq_stack_union is not at start of per-cpu area");
418 #endif /* CONFIG_X86_32 */
421 #include <asm/kexec.h>
423 ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
424 "kexec control code size is too big")