2 * Helper macros to support writing architecture specific
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU_SECTION(CACHELINE_SIZE)
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
30 * EXCEPTION_TABLE(...)
33 * BSS_SECTION(0, 0, 0)
39 * DISCARDS // must be the last
42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * // __init_begin and __init_end should be page aligned, so that we can
44 * // free the whole .init memory
45 * [_stext, _etext] is the text section
46 * [_sdata, _edata] is the data section
48 * Some of the included output section have their own set of constants.
49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
50 * [__nosave_begin, __nosave_end] for the nosave data
57 #include <linux/export.h>
59 /* Align . to a 8 byte boundary equals to maximum function alignment. */
60 #define ALIGN_FUNCTION() . = ALIGN(8)
63 * Align to a 32 byte boundary equal to the
64 * alignment gcc 4.5 uses for a struct
66 #define STRUCT_ALIGNMENT 32
67 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
69 /* The actual configuration determine if the init/exit sections
70 * are handled as text/data or they can be discarded (which
71 * often happens at runtime)
73 #ifdef CONFIG_HOTPLUG_CPU
74 #define CPU_KEEP(sec) *(.cpu##sec)
75 #define CPU_DISCARD(sec)
78 #define CPU_DISCARD(sec) *(.cpu##sec)
81 #if defined(CONFIG_MEMORY_HOTPLUG)
82 #define MEM_KEEP(sec) *(.mem##sec)
83 #define MEM_DISCARD(sec)
86 #define MEM_DISCARD(sec) *(.mem##sec)
89 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
90 #define MCOUNT_REC() . = ALIGN(8); \
91 VMLINUX_SYMBOL(__start_mcount_loc) = .; \
93 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
98 #ifdef CONFIG_TRACE_BRANCH_PROFILING
99 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
100 *(_ftrace_annotated_branch) \
101 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
103 #define LIKELY_PROFILE()
106 #ifdef CONFIG_PROFILE_ALL_BRANCHES
107 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
109 VMLINUX_SYMBOL(__stop_branch_profile) = .;
111 #define BRANCH_PROFILE()
114 #ifdef CONFIG_KPROBES
115 #define KPROBE_BLACKLIST() . = ALIGN(8); \
116 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
117 *(_kprobe_blacklist) \
118 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
120 #define KPROBE_BLACKLIST()
123 #ifdef CONFIG_EVENT_TRACING
124 #define FTRACE_EVENTS() . = ALIGN(8); \
125 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
127 VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
128 VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \
129 *(_ftrace_enum_map) \
130 VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
132 #define FTRACE_EVENTS()
135 #ifdef CONFIG_TRACING
136 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
137 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
138 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
139 #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
140 *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
141 VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
143 #define TRACE_PRINTKS()
144 #define TRACEPOINT_STR()
147 #ifdef CONFIG_FTRACE_SYSCALLS
148 #define TRACE_SYSCALLS() . = ALIGN(8); \
149 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
150 *(__syscalls_metadata) \
151 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
153 #define TRACE_SYSCALLS()
156 #ifdef CONFIG_SERIAL_EARLYCON
157 #define EARLYCON_TABLE() STRUCT_ALIGN(); \
158 VMLINUX_SYMBOL(__earlycon_table) = .; \
159 *(__earlycon_table) \
160 *(__earlycon_table_end)
162 #define EARLYCON_TABLE()
165 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
166 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
167 #define OF_TABLE(cfg, name) __OF_TABLE(config_enabled(cfg), name)
168 #define _OF_TABLE_0(name)
169 #define _OF_TABLE_1(name) \
171 VMLINUX_SYMBOL(__##name##_of_table) = .; \
172 *(__##name##_of_table) \
173 *(__##name##_of_table_end)
175 #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
176 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
177 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
178 #define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
179 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
180 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
181 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
182 #define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
184 #define KERNEL_DTB() \
186 VMLINUX_SYMBOL(__dtb_start) = .; \
187 *(.dtb.init.rodata) \
188 VMLINUX_SYMBOL(__dtb_end) = .;
194 *(.data..shared_aligned) /* percpu related */ \
195 MEM_KEEP(init.data) \
196 MEM_KEEP(exit.data) \
200 /* implement dynamic printk debug */ \
202 VMLINUX_SYMBOL(__start___jump_table) = .; \
204 VMLINUX_SYMBOL(__stop___jump_table) = .; \
206 VMLINUX_SYMBOL(__start___verbose) = .; \
208 VMLINUX_SYMBOL(__stop___verbose) = .; \
215 * Data section helpers
217 #define NOSAVE_DATA \
218 . = ALIGN(PAGE_SIZE); \
219 VMLINUX_SYMBOL(__nosave_begin) = .; \
221 . = ALIGN(PAGE_SIZE); \
222 VMLINUX_SYMBOL(__nosave_end) = .;
224 #define PAGE_ALIGNED_DATA(page_align) \
225 . = ALIGN(page_align); \
226 *(.data..page_aligned)
228 #define READ_MOSTLY_DATA(align) \
230 *(.data..read_mostly) \
233 #define CACHELINE_ALIGNED_DATA(align) \
235 *(.data..cacheline_aligned)
237 #define INIT_TASK_DATA(align) \
244 #define RO_DATA_SECTION(align) \
245 . = ALIGN((align)); \
246 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
247 VMLINUX_SYMBOL(__start_rodata) = .; \
248 *(.rodata) *(.rodata.*) \
249 *(__vermagic) /* Kernel version magic */ \
251 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
252 *(__tracepoints_ptrs) /* Tracepoints: pointer array */\
253 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
254 *(__tracepoints_strings)/* Tracepoints: strings */ \
257 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
264 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
265 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
266 *(.pci_fixup_early) \
267 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
268 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
269 *(.pci_fixup_header) \
270 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
271 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
272 *(.pci_fixup_final) \
273 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
274 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
275 *(.pci_fixup_enable) \
276 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
277 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
278 *(.pci_fixup_resume) \
279 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
280 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
281 *(.pci_fixup_resume_early) \
282 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
283 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
284 *(.pci_fixup_suspend) \
285 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
286 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
287 *(.pci_fixup_suspend_late) \
288 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
291 /* Built-in firmware blobs */ \
292 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
293 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
295 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
300 /* Kernel symbol table: Normal symbols */ \
301 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
302 VMLINUX_SYMBOL(__start___ksymtab) = .; \
303 *(SORT(___ksymtab+*)) \
304 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
307 /* Kernel symbol table: GPL-only symbols */ \
308 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
309 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
310 *(SORT(___ksymtab_gpl+*)) \
311 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
314 /* Kernel symbol table: Normal unused symbols */ \
315 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
316 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
317 *(SORT(___ksymtab_unused+*)) \
318 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
321 /* Kernel symbol table: GPL-only unused symbols */ \
322 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
323 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
324 *(SORT(___ksymtab_unused_gpl+*)) \
325 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
328 /* Kernel symbol table: GPL-future-only symbols */ \
329 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
330 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
331 *(SORT(___ksymtab_gpl_future+*)) \
332 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
335 /* Kernel symbol table: Normal symbols */ \
336 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
337 VMLINUX_SYMBOL(__start___kcrctab) = .; \
338 *(SORT(___kcrctab+*)) \
339 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
342 /* Kernel symbol table: GPL-only symbols */ \
343 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
344 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
345 *(SORT(___kcrctab_gpl+*)) \
346 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
349 /* Kernel symbol table: Normal unused symbols */ \
350 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
351 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
352 *(SORT(___kcrctab_unused+*)) \
353 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
356 /* Kernel symbol table: GPL-only unused symbols */ \
357 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
358 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
359 *(SORT(___kcrctab_unused_gpl+*)) \
360 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
363 /* Kernel symbol table: GPL-future-only symbols */ \
364 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
365 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
366 *(SORT(___kcrctab_gpl_future+*)) \
367 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
370 /* Kernel symbol table: strings */ \
371 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
372 *(__ksymtab_strings) \
375 /* __*init sections */ \
376 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
378 MEM_KEEP(init.rodata) \
379 MEM_KEEP(exit.rodata) \
382 /* Built-in module parameters. */ \
383 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
384 VMLINUX_SYMBOL(__start___param) = .; \
386 VMLINUX_SYMBOL(__stop___param) = .; \
389 /* Built-in module versions. */ \
390 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
391 VMLINUX_SYMBOL(__start___modver) = .; \
393 VMLINUX_SYMBOL(__stop___modver) = .; \
394 . = ALIGN((align)); \
395 VMLINUX_SYMBOL(__end_rodata) = .; \
399 /* RODATA & RO_DATA provided for backward compatibility.
400 * All archs are supposed to use RO_DATA() */
401 #define RODATA RO_DATA_SECTION(4096)
402 #define RO_DATA(align) RO_DATA_SECTION(align)
404 #define SECURITY_INIT \
405 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
406 VMLINUX_SYMBOL(__security_initcall_start) = .; \
407 *(.security_initcall.init) \
408 VMLINUX_SYMBOL(__security_initcall_end) = .; \
411 /* .text section. Map to function alignment to avoid address changes
412 * during second ld run in second ld pass when generating System.map */
416 *(.text .text.fixup) \
418 MEM_KEEP(init.text) \
419 MEM_KEEP(exit.text) \
423 /* sched.text is aling to function alignment to secure we have same
424 * address even at second ld pass when generating System.map */
427 VMLINUX_SYMBOL(__sched_text_start) = .; \
429 VMLINUX_SYMBOL(__sched_text_end) = .;
431 /* spinlock.text is aling to function alignment to secure we have same
432 * address even at second ld pass when generating System.map */
435 VMLINUX_SYMBOL(__lock_text_start) = .; \
437 VMLINUX_SYMBOL(__lock_text_end) = .;
439 #define KPROBES_TEXT \
441 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
443 VMLINUX_SYMBOL(__kprobes_text_end) = .;
447 VMLINUX_SYMBOL(__entry_text_start) = .; \
449 VMLINUX_SYMBOL(__entry_text_end) = .;
451 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
452 #define IRQENTRY_TEXT \
454 VMLINUX_SYMBOL(__irqentry_text_start) = .; \
456 VMLINUX_SYMBOL(__irqentry_text_end) = .;
458 #define IRQENTRY_TEXT
461 /* Section used for early init (in .S files) */
462 #define HEAD_TEXT *(.head.text)
464 #define HEAD_TEXT_SECTION \
465 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
472 #define EXCEPTION_TABLE(align) \
474 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
475 VMLINUX_SYMBOL(__start___ex_table) = .; \
477 VMLINUX_SYMBOL(__stop___ex_table) = .; \
483 #define INIT_TASK_DATA_SECTION(align) \
485 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
486 INIT_TASK_DATA(align) \
489 #ifdef CONFIG_CONSTRUCTORS
490 #define KERNEL_CTORS() . = ALIGN(8); \
491 VMLINUX_SYMBOL(__ctors_start) = .; \
493 *(SORT(.init_array.*)) \
495 VMLINUX_SYMBOL(__ctors_end) = .;
497 #define KERNEL_CTORS()
500 /* init and exit section handling */
503 MEM_DISCARD(init.data) \
510 MEM_DISCARD(init.rodata) \
512 RESERVEDMEM_OF_TABLES() \
515 CPU_METHOD_OF_TABLES() \
516 CPUIDLE_METHOD_OF_TABLES() \
518 IRQCHIP_OF_MATCH_TABLE() \
524 MEM_DISCARD(init.text)
528 MEM_DISCARD(exit.data) \
529 MEM_DISCARD(exit.rodata)
533 MEM_DISCARD(exit.text)
539 * bss (Block Started by Symbol) - uninitialized data
540 * zeroed during startup
542 #define SBSS(sbss_align) \
543 . = ALIGN(sbss_align); \
544 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
550 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
551 * sections to the front of bss.
553 #ifndef BSS_FIRST_SECTIONS
554 #define BSS_FIRST_SECTIONS
557 #define BSS(bss_align) \
558 . = ALIGN(bss_align); \
559 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
561 *(.bss..page_aligned) \
568 * DWARF debug sections.
569 * Symbols in the DWARF debugging sections are relative to
570 * the beginning of the section so we begin them at 0.
572 #define DWARF_DEBUG \
574 .debug 0 : { *(.debug) } \
575 .line 0 : { *(.line) } \
576 /* GNU DWARF 1 extensions */ \
577 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
578 .debug_sfnames 0 : { *(.debug_sfnames) } \
579 /* DWARF 1.1 and DWARF 2 */ \
580 .debug_aranges 0 : { *(.debug_aranges) } \
581 .debug_pubnames 0 : { *(.debug_pubnames) } \
583 .debug_info 0 : { *(.debug_info \
584 .gnu.linkonce.wi.*) } \
585 .debug_abbrev 0 : { *(.debug_abbrev) } \
586 .debug_line 0 : { *(.debug_line) } \
587 .debug_frame 0 : { *(.debug_frame) } \
588 .debug_str 0 : { *(.debug_str) } \
589 .debug_loc 0 : { *(.debug_loc) } \
590 .debug_macinfo 0 : { *(.debug_macinfo) } \
591 /* SGI/MIPS DWARF 2 extensions */ \
592 .debug_weaknames 0 : { *(.debug_weaknames) } \
593 .debug_funcnames 0 : { *(.debug_funcnames) } \
594 .debug_typenames 0 : { *(.debug_typenames) } \
595 .debug_varnames 0 : { *(.debug_varnames) } \
597 /* Stabs debugging sections. */
598 #define STABS_DEBUG \
599 .stab 0 : { *(.stab) } \
600 .stabstr 0 : { *(.stabstr) } \
601 .stab.excl 0 : { *(.stab.excl) } \
602 .stab.exclstr 0 : { *(.stab.exclstr) } \
603 .stab.index 0 : { *(.stab.index) } \
604 .stab.indexstr 0 : { *(.stab.indexstr) } \
605 .comment 0 : { *(.comment) }
607 #ifdef CONFIG_GENERIC_BUG
610 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
611 VMLINUX_SYMBOL(__start___bug_table) = .; \
613 VMLINUX_SYMBOL(__stop___bug_table) = .; \
619 #ifdef CONFIG_PM_TRACE
622 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
623 VMLINUX_SYMBOL(__tracedata_start) = .; \
625 VMLINUX_SYMBOL(__tracedata_end) = .; \
632 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
633 VMLINUX_SYMBOL(__start_notes) = .; \
635 VMLINUX_SYMBOL(__stop_notes) = .; \
638 #define INIT_SETUP(initsetup_align) \
639 . = ALIGN(initsetup_align); \
640 VMLINUX_SYMBOL(__setup_start) = .; \
642 VMLINUX_SYMBOL(__setup_end) = .;
644 #define INIT_CALLS_LEVEL(level) \
645 VMLINUX_SYMBOL(__initcall##level##_start) = .; \
646 *(.initcall##level##.init) \
647 *(.initcall##level##s.init) \
650 VMLINUX_SYMBOL(__initcall_start) = .; \
651 *(.initcallearly.init) \
652 INIT_CALLS_LEVEL(0) \
653 INIT_CALLS_LEVEL(1) \
654 INIT_CALLS_LEVEL(2) \
655 INIT_CALLS_LEVEL(3) \
656 INIT_CALLS_LEVEL(4) \
657 INIT_CALLS_LEVEL(5) \
658 INIT_CALLS_LEVEL(rootfs) \
659 INIT_CALLS_LEVEL(6) \
660 INIT_CALLS_LEVEL(7) \
661 VMLINUX_SYMBOL(__initcall_end) = .;
663 #define CON_INITCALL \
664 VMLINUX_SYMBOL(__con_initcall_start) = .; \
665 *(.con_initcall.init) \
666 VMLINUX_SYMBOL(__con_initcall_end) = .;
668 #define SECURITY_INITCALL \
669 VMLINUX_SYMBOL(__security_initcall_start) = .; \
670 *(.security_initcall.init) \
671 VMLINUX_SYMBOL(__security_initcall_end) = .;
673 #ifdef CONFIG_BLK_DEV_INITRD
674 #define INIT_RAM_FS \
676 VMLINUX_SYMBOL(__initramfs_start) = .; \
685 * Default discarded sections.
687 * Some archs want to discard exit text/data at runtime rather than
688 * link time due to cross-section references such as alt instructions,
689 * bug table, eh_frame, etc. DISCARDS must be the last of output
690 * section definitions so that such archs put those in earlier section
703 * PERCPU_INPUT - the percpu input sections
704 * @cacheline: cacheline size
706 * The core percpu section names and core symbols which do not rely
707 * directly upon load addresses.
709 * @cacheline is used to align subsections to avoid false cacheline
710 * sharing between subsections for different purposes.
712 #define PERCPU_INPUT(cacheline) \
713 VMLINUX_SYMBOL(__per_cpu_start) = .; \
714 *(.data..percpu..first) \
715 . = ALIGN(PAGE_SIZE); \
716 *(.data..percpu..page_aligned) \
717 . = ALIGN(cacheline); \
718 *(.data..percpu..read_mostly) \
719 . = ALIGN(cacheline); \
721 *(.data..percpu..shared_aligned) \
722 VMLINUX_SYMBOL(__per_cpu_end) = .;
725 * PERCPU_VADDR - define output section for percpu area
726 * @cacheline: cacheline size
727 * @vaddr: explicit base address (optional)
728 * @phdr: destination PHDR (optional)
730 * Macro which expands to output section for percpu area.
732 * @cacheline is used to align subsections to avoid false cacheline
733 * sharing between subsections for different purposes.
735 * If @vaddr is not blank, it specifies explicit base address and all
736 * percpu symbols will be offset from the given address. If blank,
737 * @vaddr always equals @laddr + LOAD_OFFSET.
739 * @phdr defines the output PHDR to use if not blank. Be warned that
740 * output PHDR is sticky. If @phdr is specified, the next output
741 * section in the linker script will go there too. @phdr should have
744 * Note that this macros defines __per_cpu_load as an absolute symbol.
745 * If there is no need to put the percpu section at a predetermined
746 * address, use PERCPU_SECTION.
748 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
749 VMLINUX_SYMBOL(__per_cpu_load) = .; \
750 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
752 PERCPU_INPUT(cacheline) \
754 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
757 * PERCPU_SECTION - define output section for percpu area, simple version
758 * @cacheline: cacheline size
760 * Align to PAGE_SIZE and outputs output section for percpu area. This
761 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
762 * __per_cpu_start will be identical.
764 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
765 * except that __per_cpu_load is defined as a relative symbol against
766 * .data..percpu which is required for relocatable x86_32 configuration.
768 #define PERCPU_SECTION(cacheline) \
769 . = ALIGN(PAGE_SIZE); \
770 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
771 VMLINUX_SYMBOL(__per_cpu_load) = .; \
772 PERCPU_INPUT(cacheline) \
777 * Definition of the high level *_SECTION macros
778 * They will fit only a subset of the architectures
784 * All sections are combined in a single .data section.
785 * The sections following CONSTRUCTORS are arranged so their
786 * typical alignment matches.
787 * A cacheline is typical/always less than a PAGE_SIZE so
788 * the sections that has this restriction (or similar)
789 * is located before the ones requiring PAGE_SIZE alignment.
790 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
791 * matches the requirement of PAGE_ALIGNED_DATA.
793 * use 0 as page_align if page_aligned data is not used */
794 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
795 . = ALIGN(PAGE_SIZE); \
796 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
797 INIT_TASK_DATA(inittask) \
799 PAGE_ALIGNED_DATA(pagealigned) \
800 CACHELINE_ALIGNED_DATA(cacheline) \
801 READ_MOSTLY_DATA(cacheline) \
806 #define INIT_TEXT_SECTION(inittext_align) \
807 . = ALIGN(inittext_align); \
808 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
809 VMLINUX_SYMBOL(_sinittext) = .; \
811 VMLINUX_SYMBOL(_einittext) = .; \
814 #define INIT_DATA_SECTION(initsetup_align) \
815 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
817 INIT_SETUP(initsetup_align) \
824 #define BSS_SECTION(sbss_align, bss_align, stop_align) \
825 . = ALIGN(sbss_align); \
826 VMLINUX_SYMBOL(__bss_start) = .; \
829 . = ALIGN(stop_align); \
830 VMLINUX_SYMBOL(__bss_stop) = .;