3 #include <asm/ptrace.h>
4 #include <asm/system.h>
5 #include <asm/pgtable.h>
7 #include <asm-generic/vmlinux.lds.h>
10 VMLINUX_SYMBOL(__start_ivt_text) = .; \
12 VMLINUX_SYMBOL(__end_ivt_text) = .;
14 OUTPUT_FORMAT("elf64-ia64-little")
23 unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
27 /* unwind exit sections must be discarded before the rest of the
28 sections get included. */
30 *(.IA_64.unwind.exit.text)
31 *(.IA_64.unwind_info.exit.text)
36 v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
37 phys_start = _start - LOAD_OFFSET;
45 .text : AT(ADDR(.text) - LOAD_OFFSET)
54 .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
57 .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET)
64 NOTES :code :note /* put .notes in text and mark in PT_NOTE */
65 code_continues : {} :code /* switch back to regular program... */
71 __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET)
73 __start___mca_table = .;
75 __stop___mca_table = .;
78 .data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET)
80 __start___phys_stack_reg_patchlist = .;
81 *(.data.patch.phys_stack_reg)
82 __end___phys_stack_reg_patchlist = .;
88 /* Unwind info & table: */
90 .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET)
91 { *(.IA_64.unwind_info*) }
92 .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET)
98 code_continues2 : {} : code
102 .opd : AT(ADDR(.opd) - LOAD_OFFSET)
105 /* Initialization code and data: */
107 . = ALIGN(PAGE_SIZE);
110 INIT_TEXT_SECTION(PAGE_SIZE)
111 INIT_DATA_SECTION(16)
113 .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
115 __start___vtop_patchlist = .;
117 __end___vtop_patchlist = .;
120 .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET)
122 __start___rse_patchlist = .;
124 __end___rse_patchlist = .;
127 .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
129 __start___mckinley_e9_bundles = .;
130 *(.data.patch.mckinley_e9)
131 __end___mckinley_e9_bundles = .;
134 #if defined(CONFIG_PARAVIRT)
136 .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET)
138 __start_paravirt_bundles = .;
140 __stop_paravirt_bundles = .;
143 .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET)
145 __start_paravirt_insts = .;
147 __stop_paravirt_insts = .;
150 .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET)
152 __start_paravirt_branches = .;
153 *(.paravirt_branches)
154 __stop_paravirt_branches = .;
158 #if defined(CONFIG_IA64_GENERIC)
161 .machvec : AT(ADDR(.machvec) - LOAD_OFFSET)
169 . = ALIGN(PAGE_SIZE);
172 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET)
174 PAGE_ALIGNED_DATA(PAGE_SIZE)
175 . = ALIGN(PAGE_SIZE);
176 __start_gate_section = .;
178 __stop_gate_section = .;
180 . = ALIGN(PAGE_SIZE);
181 __xen_start_gate_section = .;
183 __xen_stop_gate_section = .;
186 . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose
191 . = ALIGN(PERCPU_PAGE_SIZE);
192 PERCPU_VADDR(PERCPU_ADDR, :percpu)
193 __phys_per_cpu_start = __per_cpu_load;
194 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
195 * into percpu page size
199 .data : AT(ADDR(.data) - LOAD_OFFSET)
202 . = ALIGN(PERCPU_PAGE_SIZE);
204 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
206 INIT_TASK_DATA(PAGE_SIZE)
207 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
208 READ_MOSTLY_DATA(SMP_CACHE_BYTES)
215 . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
216 .got : AT(ADDR(.got) - LOAD_OFFSET)
217 { *(.got.plt) *(.got) }
218 __gp = ADDR(.got) + 0x200000;
219 /* We want the small data sections together, so single-instruction offsets
220 can access them all, and initialized data all before uninitialized, so
221 we can shorten the on-disk segment size. */
222 .sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
223 { *(.sdata) *(.sdata1) *(.srdata) }
235 /* Default discards */