1 /* ld script to make i386 Linux kernel
2 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
4 * Don't define absolute symbols until and unless you know that symbol
5 * value is should remain constant even if kernel image is relocated
6 * at run time. Absolute symbols are not relocated. If symbol value should
7 * change if kernel is relocated, make the symbol section relative and
8 * put it inside the section definition.
11 #define LOAD_OFFSET __PAGE_OFFSET
13 #include <asm-generic/vmlinux.lds.h>
14 #include <asm/thread_info.h>
16 #include <asm/cache.h>
19 OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
21 ENTRY(phys_startup_32)
25 text PT_LOAD FLAGS(5); /* R_E */
26 data PT_LOAD FLAGS(7); /* RWE */
27 note PT_NOTE FLAGS(0); /* ___ */
31 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
32 phys_startup_32 = startup_32 - LOAD_OFFSET;
34 .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
35 _text = .; /* Text and read-only data */
40 .text : AT(ADDR(.text) - LOAD_OFFSET) {
41 <<<<<<< HEAD:arch/x86/kernel/vmlinux_32.lds.S
42 . = ALIGN(4096); /* not really needed, already page aligned */
44 . = ALIGN(PAGE_SIZE); /* not really needed, already page aligned */
45 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kernel/vmlinux_32.lds.S
53 _etext = .; /* End of text section */
56 . = ALIGN(16); /* Exception table */
57 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
58 __start___ex_table = .;
60 __stop___ex_table = .;
68 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {
69 __tracedata_start = .;
77 <<<<<<< HEAD:arch/x86/kernel/vmlinux_32.lds.S
81 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kernel/vmlinux_32.lds.S
82 .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */
87 <<<<<<< HEAD:arch/x86/kernel/vmlinux_32.lds.S
91 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kernel/vmlinux_32.lds.S
92 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
95 <<<<<<< HEAD:arch/x86/kernel/vmlinux_32.lds.S
99 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kernel/vmlinux_32.lds.S
103 <<<<<<< HEAD:arch/x86/kernel/vmlinux_32.lds.S
106 . = ALIGN(PAGE_SIZE);
107 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kernel/vmlinux_32.lds.S
108 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
109 *(.data.page_aligned)
114 .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
115 *(.data.cacheline_aligned)
118 /* rarely changed data like cpu maps */
120 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
122 _edata = .; /* End of data section */
125 . = ALIGN(THREAD_SIZE); /* init_task */
126 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
130 /* might get freed after init */
131 <<<<<<< HEAD:arch/x86/kernel/vmlinux_32.lds.S
134 . = ALIGN(PAGE_SIZE);
135 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kernel/vmlinux_32.lds.S
136 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
141 /* will be freed after init
142 * Following ALIGN() is required to make sure no other data falls on the
143 * same page where __smp_alt_end is pointing as that page might be freed
144 * after boot. Always make sure that ALIGN() directive is present after
145 * the section which contains __smp_alt_end.
147 <<<<<<< HEAD:arch/x86/kernel/vmlinux_32.lds.S
150 . = ALIGN(PAGE_SIZE);
151 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kernel/vmlinux_32.lds.S
153 /* will be freed after init */
154 <<<<<<< HEAD:arch/x86/kernel/vmlinux_32.lds.S
155 . = ALIGN(4096); /* Init code and data */
157 . = ALIGN(PAGE_SIZE); /* Init code and data */
158 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kernel/vmlinux_32.lds.S
159 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
165 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
169 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
174 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
175 __initcall_start = .;
179 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
180 __con_initcall_start = .;
181 *(.con_initcall.init)
182 __con_initcall_end = .;
186 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
187 __alt_instructions = .;
189 __alt_instructions_end = .;
191 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
192 *(.altinstr_replacement)
195 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
196 __parainstructions = .;
198 __parainstructions_end = .;
200 /* .exit.text is discard at runtime, not link time, to deal with references
201 from .altinstructions and .eh_frame */
202 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
205 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
208 #if defined(CONFIG_BLK_DEV_INITRD)
209 <<<<<<< HEAD:arch/x86/kernel/vmlinux_32.lds.S
212 . = ALIGN(PAGE_SIZE);
213 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kernel/vmlinux_32.lds.S
214 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
215 __initramfs_start = .;
220 <<<<<<< HEAD:arch/x86/kernel/vmlinux_32.lds.S
223 . = ALIGN(PAGE_SIZE);
224 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kernel/vmlinux_32.lds.S
225 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
228 *(.data.percpu.shared_aligned)
231 <<<<<<< HEAD:arch/x86/kernel/vmlinux_32.lds.S
234 . = ALIGN(PAGE_SIZE);
235 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kernel/vmlinux_32.lds.S
236 /* freed after init ends here */
237 <<<<<<< HEAD:arch/x86/kernel/vmlinux_32.lds.S
241 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kernel/vmlinux_32.lds.S
242 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
244 __bss_start = .; /* BSS */
250 /* This is where the kernel creates the early boot page tables */
251 <<<<<<< HEAD:arch/x86/kernel/vmlinux_32.lds.S
254 . = ALIGN(PAGE_SIZE);
255 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kernel/vmlinux_32.lds.S
259 /* Sections to be discarded */