2 * Copyright 2004-2009 Analog Devices Inc.
4 * Licensed under the GPL-2 or later
7 #include <asm-generic/vmlinux.lds.h>
8 #include <asm/mem_map.h>
10 #include <asm/thread_info.h>
12 OUTPUT_FORMAT("elf32-bfin")
14 _jiffies = _jiffies_64;
18 #ifdef CONFIG_RAMKERNEL
24 /* Neither the text, ro_data or bss section need to be aligned
25 * So pack them back to back
33 #ifndef CONFIG_SCHEDULE_L1
39 #ifdef CONFIG_ROMKERNEL
53 ___start___ex_table = .;
55 ___stop___ex_table = .;
62 /* Just in case the first read only is a 32-bit access */
66 #ifdef CONFIG_ROMKERNEL
68 .bss : AT(__rodata_end)
87 #if defined(CONFIG_ROMKERNEL)
88 .data : AT(LOADADDR(.bss) + SIZEOF(.bss))
94 /* This gets done first, so the glob doesn't suck it in */
95 CACHELINE_ALIGNED_DATA(32)
99 *(.data_l1.cacheline_aligned)
102 #if !L1_DATA_B_LENGTH
107 *(.data_l2.cacheline_aligned)
114 INIT_TASK_DATA(THREAD_SIZE)
118 __data_lma = LOADADDR(.data);
119 __data_len = SIZEOF(.data);
121 /* The init section should be last, so when we free it, it goes into
122 * the general memory pool, and (hopefully) will decrease fragmentation
123 * a tiny bit. The init section has a _requirement_ that it be
126 . = ALIGN(PAGE_SIZE);
129 #ifdef CONFIG_RAMKERNEL
130 INIT_TEXT_SECTION(PAGE_SIZE)
132 /* We have to discard exit text and such at runtime, not link time, to
133 * handle embedded cross-section references (alt instructions, bug
134 * table, eh_frame, etc...). We need all of our .text up front and
135 * .data after it for PCREL call issues.
143 INIT_DATA_SECTION(16)
151 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
153 .init.data : AT(__data_lma + __data_len)
165 ___per_cpu_start = .;
166 *(.data.percpu.first)
167 *(.data.percpu.page_aligned)
169 *(.data.percpu.shared_aligned)
175 __init_data_lma = LOADADDR(.init.data);
176 __init_data_len = SIZEOF(.init.data);
179 .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
185 #ifdef CONFIG_SCHEDULE_L1
191 __text_l1_lma = LOADADDR(.text_l1);
192 __text_l1_len = SIZEOF(.text_l1);
193 ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!")
195 .data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len)
203 *(.data_l1.cacheline_aligned)
211 __data_l1_lma = LOADADDR(.data_l1);
212 __data_l1_len = SIZEOF(.data_l1);
213 ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!")
215 .data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len)
228 __data_b_l1_lma = LOADADDR(.data_b_l1);
229 __data_b_l1_len = SIZEOF(.data_b_l1);
230 ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!")
232 .text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len)
246 *(.data_l2.cacheline_aligned)
254 __l2_lma = LOADADDR(.text_data_l2);
255 __l2_len = SIZEOF(.text_data_l2);
256 ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!")
258 /* Force trailing alignment of our init section so that when we
259 * free our init memory, we don't leave behind a partial page.
261 #ifdef CONFIG_RAMKERNEL
262 . = __l2_lma + __l2_len;
266 . = ALIGN(PAGE_SIZE);