1 /* SPDX-License-Identifier: GPL-2.0 */
4 #include <asm/ptrace.h>
5 #include <asm/pgtable.h>
7 #include <asm-generic/vmlinux.lds.h>
9 OUTPUT_FORMAT("elf64-ia64-little")
19 unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
24 * unwind exit sections must be discarded before
25 * the rest of the sections get included.
28 *(.IA_64.unwind.exit.text)
29 *(.IA_64.unwind_info.exit.text)
34 v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
35 phys_start = _start - LOAD_OFFSET;
44 .text : AT(ADDR(.text) - LOAD_OFFSET) {
56 .text2 : AT(ADDR(.text2) - LOAD_OFFSET) {
61 .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
70 NOTES :code :note /* put .notes in text and mark in PT_NOTE */
72 } : code /* switch back to regular program... */
78 __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
79 __start___mca_table = .;
81 __stop___mca_table = .;
84 .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
85 __start___phys_stack_reg_patchlist = .;
86 *(.data..patch.phys_stack_reg)
87 __end___phys_stack_reg_patchlist = .;
95 /* Unwind info & table: */
97 .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
98 *(.IA_64.unwind_info*)
100 .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
110 .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
115 * Initialization code and data:
117 . = ALIGN(PAGE_SIZE);
120 INIT_TEXT_SECTION(PAGE_SIZE)
121 INIT_DATA_SECTION(16)
123 .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
124 __start___vtop_patchlist = .;
126 __end___vtop_patchlist = .;
129 .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
130 __start___rse_patchlist = .;
132 __end___rse_patchlist = .;
135 .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
136 __start___mckinley_e9_bundles = .;
137 *(.data..patch.mckinley_e9)
138 __end___mckinley_e9_bundles = .;
141 #if defined(CONFIG_IA64_GENERIC)
144 .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) {
152 . = ALIGN(PERCPU_PAGE_SIZE);
154 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
157 . = ALIGN(PAGE_SIZE);
160 .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
161 PAGE_ALIGNED_DATA(PAGE_SIZE)
162 . = ALIGN(PAGE_SIZE);
163 __start_gate_section = .;
165 __stop_gate_section = .;
168 * make sure the gate page doesn't expose
171 . = ALIGN(PAGE_SIZE);
174 . = ALIGN(PERCPU_PAGE_SIZE);
175 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
176 __phys_per_cpu_start = __per_cpu_load;
178 * ensure percpu data fits
179 * into percpu page size
181 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
185 .data : AT(ADDR(.data) - LOAD_OFFSET) {
187 INIT_TASK_DATA(PAGE_SIZE)
188 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
189 READ_MOSTLY_DATA(SMP_CACHE_BYTES)
198 . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
199 .got : AT(ADDR(.got) - LOAD_OFFSET) {
203 __gp = ADDR(.got) + 0x200000;
206 * We want the small data sections together,
207 * so single-instruction offsets can access
208 * them all, and initialized data all before
209 * uninitialized, so we can shorten the
210 * on-disk segment size.
212 .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
229 /* Default discards */