1 /* SPDX-License-Identifier: GPL-2.0 */
3 * ld script for the x86 kernel
5 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
7 * Modernisation, unification and other changes and fixes:
8 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org>
11 * Don't define absolute symbols until and unless you know that symbol
12 * value is should remain constant even if kernel image is relocated
13 * at run time. Absolute symbols are not relocated. If symbol value should
14 * change if kernel is relocated, make the symbol section relative and
15 * put it inside the section definition.
19 #define LOAD_OFFSET __PAGE_OFFSET
21 #define LOAD_OFFSET __START_KERNEL_map
25 #define RO_EXCEPTION_TABLE_ALIGN 16
27 #include <asm-generic/vmlinux.lds.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/thread_info.h>
30 #include <asm/page_types.h>
31 #include <asm/orc_lookup.h>
32 #include <asm/cache.h>
35 #undef i386 /* in case the preprocessor is a 32bit one */
37 OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
41 ENTRY(phys_startup_32)
44 OUTPUT_ARCH(i386:x86-64)
45 ENTRY(phys_startup_64)
49 #if defined(CONFIG_X86_64)
51 * On 64-bit, align RODATA to 2MB so we retain large page mappings for
52 * boundaries spanning kernel text, rodata and data sections.
54 * However, kernel identity mappings will have different RWX permissions
55 * to the pages mapping to text and to the pages padding (which are freed) the
56 * text section. Hence kernel identity mappings will be broken to smaller
57 * pages. For 64-bit, kernel text and kernel identity mappings are different,
58 * so we can enable protection checks as well as retain 2MB large page
59 * mappings for kernel text.
61 #define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
63 #define X86_ALIGN_RODATA_END \
64 . = ALIGN(HPAGE_SIZE); \
65 __end_rodata_hpage_align = .; \
66 __end_rodata_aligned = .;
68 #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
69 #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
72 * This section contains data which will be mapped as decrypted. Memory
73 * encryption operates on a page basis. Make this section PMD-aligned
74 * to avoid splitting the pages while mapping the section early.
76 * Note: We use a separate section so that only this section gets
77 * decrypted to avoid exposing more than we wish.
79 #define BSS_DECRYPTED \
80 . = ALIGN(PMD_SIZE); \
81 __start_bss_decrypted = .; \
83 . = ALIGN(PAGE_SIZE); \
84 __start_bss_decrypted_unused = .; \
85 . = ALIGN(PMD_SIZE); \
86 __end_bss_decrypted = .; \
90 #define X86_ALIGN_RODATA_BEGIN
91 #define X86_ALIGN_RODATA_END \
92 . = ALIGN(PAGE_SIZE); \
93 __end_rodata_aligned = .;
95 #define ALIGN_ENTRY_TEXT_BEGIN
96 #define ALIGN_ENTRY_TEXT_END
102 text PT_LOAD FLAGS(5); /* R_E */
103 data PT_LOAD FLAGS(6); /* RW_ */
106 percpu PT_LOAD FLAGS(6); /* RW_ */
108 init PT_LOAD FLAGS(7); /* RWE */
110 note PT_NOTE FLAGS(0); /* ___ */
116 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
117 phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
120 phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
123 /* Text and read-only data */
124 .text : AT(ADDR(.text) - LOAD_OFFSET) {
127 /* bootstrapping code */
134 ALIGN_ENTRY_TEXT_BEGIN
142 #ifdef CONFIG_RETPOLINE
143 __indirect_thunk_start = .;
144 *(.text.__x86.indirect_thunk)
145 __indirect_thunk_end = .;
149 /* End of text section, which should occupy whole number of pages */
151 . = ALIGN(PAGE_SIZE);
153 X86_ALIGN_RODATA_BEGIN
158 .data : AT(ADDR(.data) - LOAD_OFFSET) {
159 /* Start of data section */
163 INIT_TASK_DATA(THREAD_SIZE)
166 /* 32 bit has nosave before _edata */
170 PAGE_ALIGNED_DATA(PAGE_SIZE)
172 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
177 /* rarely changed data like cpu maps */
178 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
180 /* End of data section */
188 . = ALIGN(PAGE_SIZE);
191 .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) {
192 /* work around gold bug 13023 */
193 __vvar_beginning_hack = .;
195 /* Place all vvars at the offsets in asm/vvar.h. */
196 #define EMIT_VVAR(name, offset) \
197 . = __vvar_beginning_hack + offset; \
199 #include <asm/vvar.h>
203 * Pad the rest of the page with zeros. Otherwise the loader
204 * can leave garbage here.
206 . = __vvar_beginning_hack + PAGE_SIZE;
209 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
211 /* Init code and data - will be freed after init */
212 . = ALIGN(PAGE_SIZE);
213 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
214 __init_begin = .; /* paired with __init_end */
217 #if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
219 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
220 * output PHDR, so the next output section - .init.text - should
221 * start another segment - init.
223 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
224 ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
225 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
228 INIT_TEXT_SECTION(PAGE_SIZE)
234 * Section for code used exclusively before alternatives are run. All
235 * references to such code must be patched out by alternatives, normally
236 * by using X86_FEATURE_ALWAYS CPU feature bit.
238 * See static_cpu_has() for an example.
240 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
244 INIT_DATA_SECTION(16)
246 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
247 __x86_cpu_dev_start = .;
249 __x86_cpu_dev_end = .;
252 #ifdef CONFIG_X86_INTEL_MID
253 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
255 __x86_intel_mid_dev_start = .;
256 *(.x86_intel_mid_dev.init)
257 __x86_intel_mid_dev_end = .;
262 * start address and size of operations which during runtime
263 * can be patched with virtualization friendly instructions or
264 * baremetal native ones. Think page table operations.
265 * Details in paravirt_types.h
268 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
269 __parainstructions = .;
271 __parainstructions_end = .;
275 * struct alt_inst entries. From the header (alternative.h):
276 * "Alternative instructions for different CPU types or capabilities"
277 * Think locking instructions on spinlocks.
280 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
281 __alt_instructions = .;
283 __alt_instructions_end = .;
287 * And here are the replacement instructions. The linker sticks
288 * them as binary blobs. The .altinstructions has enough data to
289 * get the address and the length of them to patch the kernel safely.
291 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
292 *(.altinstr_replacement)
296 * struct iommu_table_entry entries are injected in this section.
297 * It is an array of IOMMUs which during run time gets sorted depending
298 * on its dependency order. After rootfs_initcall is complete
299 * this section can be safely removed.
301 .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) {
304 __iommu_table_end = .;
308 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
311 __apicdrivers_end = .;
316 * .exit.text is discard at runtime, not link time, to deal with
317 * references from .altinstructions and .eh_frame
319 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
323 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
327 #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
328 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
331 . = ALIGN(PAGE_SIZE);
333 /* freed after init ends here */
334 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
339 * smp_locks might be freed after init
340 * start/end must be page aligned
342 . = ALIGN(PAGE_SIZE);
343 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
346 . = ALIGN(PAGE_SIZE);
351 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
357 . = ALIGN(PAGE_SIZE);
358 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
360 *(.bss..page_aligned)
363 . = ALIGN(PAGE_SIZE);
368 * The memory occupied from _text to here, __end_of_kernel_reserve, is
369 * automatically reserved in setup_arch(). Anything after here must be
370 * explicitly reserved using memblock_reserve() or it will be discarded
371 * and treated as available memory.
373 __end_of_kernel_reserve = .;
375 . = ALIGN(PAGE_SIZE);
376 .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
378 . += 64 * 1024; /* 64k alignment slop space */
379 *(.brk_reservation) /* areas brk users have reserved */
383 . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */
386 #ifdef CONFIG_AMD_MEM_ENCRYPT
388 * Early scratch/workarea section: Lives outside of the kernel proper
391 * Resides after _end because even though the .brk section is after
392 * __end_of_kernel_reserve, the .brk section is later reserved as a
393 * part of the kernel. Since it is located after __end_of_kernel_reserve
394 * it will be discarded and become part of the available memory. As
395 * such, it can only be used by very early boot code and must not be
398 * Currently used by SME for performing in-place encryption of the
399 * kernel during boot. Resides on a 2MB boundary to simplify the
400 * pagetable setup used for SME in-place encryption.
402 . = ALIGN(HPAGE_SIZE);
403 .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) {
404 __init_scratch_begin = .;
406 . = ALIGN(HPAGE_SIZE);
407 __init_scratch_end = .;
423 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
425 . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
426 "kernel image bigger than KERNEL_IMAGE_SIZE");
429 * Per-cpu symbols which need to be offset from __per_cpu_load
430 * for the boot processor.
432 #define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
433 INIT_PER_CPU(gdt_page);
434 INIT_PER_CPU(fixed_percpu_data);
435 INIT_PER_CPU(irq_stack_backing_store);
438 * Build-time check on the image size:
440 . = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
441 "kernel image bigger than KERNEL_IMAGE_SIZE");
444 . = ASSERT((fixed_percpu_data == 0),
445 "fixed_percpu_data is not at start of per-cpu area");
448 #endif /* CONFIG_X86_32 */
450 #ifdef CONFIG_KEXEC_CORE
451 #include <asm/kexec.h>
453 . = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
454 "kexec control code size is too big");