2 * Common prep/pmac/chrp boot and setup code.
5 #include <linux/module.h>
6 #include <linux/string.h>
7 #include <linux/sched.h>
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/reboot.h>
11 #include <linux/delay.h>
12 #include <linux/initrd.h>
13 #include <linux/tty.h>
14 #include <linux/seq_file.h>
15 #include <linux/root_dev.h>
16 #include <linux/cpu.h>
17 #include <linux/console.h>
18 #include <linux/memblock.h>
22 #include <asm/processor.h>
23 #include <asm/pgtable.h>
24 #include <asm/setup.h>
27 #include <asm/cputable.h>
28 #include <asm/bootx.h>
29 #include <asm/btext.h>
30 #include <asm/machdep.h>
31 #include <asm/uaccess.h>
32 #include <asm/pmac_feature.h>
33 #include <asm/sections.h>
34 #include <asm/nvram.h>
37 #include <asm/serial.h>
39 #include <asm/mmu_context.h>
40 #include <asm/epapr_hcalls.h>
44 extern void bootx_init(unsigned long r4
, unsigned long phys
);
47 EXPORT_SYMBOL_GPL(boot_cpuid_phys
);
49 int smp_hw_index
[NR_CPUS
];
51 unsigned long ISA_DMA_THRESHOLD
;
52 unsigned int DMA_MODE_READ
;
53 unsigned int DMA_MODE_WRITE
;
56 * These are used in binfmt_elf.c to put aux entries on the stack
57 * for each elf executable being started.
64 * We're called here very early in the boot. We determine the machine
65 * type and call the appropriate low-level setup functions.
66 * -- Cort <cort@fsmlabs.com>
68 * Note that the kernel may be running at an address which is different
69 * from the address that it was linked at, so we must use RELOC/PTRRELOC
70 * to access static data (including strings). -- paulus
72 notrace
unsigned long __init
early_init(unsigned long dt_ptr
)
74 unsigned long offset
= reloc_offset();
75 struct cpu_spec
*spec
;
77 /* First zero the BSS -- use memset_io, some platforms don't have
79 memset_io((void __iomem
*)PTRRELOC(&__bss_start
), 0,
80 __bss_stop
- __bss_start
);
83 * Identify the CPU type and fix up code sections
84 * that depend on which cpu we have.
86 spec
= identify_cpu(offset
, mfspr(SPRN_PVR
));
88 do_feature_fixups(spec
->cpu_features
,
89 PTRRELOC(&__start___ftr_fixup
),
90 PTRRELOC(&__stop___ftr_fixup
));
92 do_feature_fixups(spec
->mmu_features
,
93 PTRRELOC(&__start___mmu_ftr_fixup
),
94 PTRRELOC(&__stop___mmu_ftr_fixup
));
96 do_lwsync_fixups(spec
->cpu_features
,
97 PTRRELOC(&__start___lwsync_fixup
),
98 PTRRELOC(&__stop___lwsync_fixup
));
102 return KERNELBASE
+ offset
;
107 * Find out what kind of machine we're on and save any data we need
108 * from the early boot process (devtree is copied on pmac by prom_init()).
109 * This is called very early on the boot process, after a minimal
110 * MMU environment has been set up but before MMU_init is called.
112 notrace
void __init
machine_init(u64 dt_ptr
)
116 /* Enable early debugging if any specified (see udbg.h) */
119 /* Do some early initialization based on the flat device tree */
120 early_init_devtree(__va(dt_ptr
));
122 epapr_paravirt_early_init();
128 setup_kdump_trampoline();
131 if (cpu_has_feature(CPU_FTR_CAN_DOZE
) ||
132 cpu_has_feature(CPU_FTR_CAN_NAP
))
133 ppc_md
.power_save
= ppc6xx_idle
;
137 if (cpu_has_feature(CPU_FTR_CAN_DOZE
) ||
138 cpu_has_feature(CPU_FTR_CAN_NAP
))
139 ppc_md
.power_save
= e500_idle
;
142 ppc_md
.progress("id mach(): done", 0x200);
145 /* Checks "l2cr=xxxx" command-line option */
146 int __init
ppc_setup_l2cr(char *str
)
148 if (cpu_has_feature(CPU_FTR_L2CR
)) {
149 unsigned long val
= simple_strtoul(str
, NULL
, 0);
150 printk(KERN_INFO
"l2cr set to %lx\n", val
);
151 _set_L2CR(0); /* force invalidate by disable cache */
152 _set_L2CR(val
); /* and enable it */
156 __setup("l2cr=", ppc_setup_l2cr
);
158 /* Checks "l3cr=xxxx" command-line option */
159 int __init
ppc_setup_l3cr(char *str
)
161 if (cpu_has_feature(CPU_FTR_L3CR
)) {
162 unsigned long val
= simple_strtoul(str
, NULL
, 0);
163 printk(KERN_INFO
"l3cr set to %lx\n", val
);
164 _set_L3CR(val
); /* and enable it */
168 __setup("l3cr=", ppc_setup_l3cr
);
170 #ifdef CONFIG_GENERIC_NVRAM
172 /* Generic nvram hooks used by drivers/char/gen_nvram.c */
173 unsigned char nvram_read_byte(int addr
)
175 if (ppc_md
.nvram_read_val
)
176 return ppc_md
.nvram_read_val(addr
);
179 EXPORT_SYMBOL(nvram_read_byte
);
181 void nvram_write_byte(unsigned char val
, int addr
)
183 if (ppc_md
.nvram_write_val
)
184 ppc_md
.nvram_write_val(addr
, val
);
186 EXPORT_SYMBOL(nvram_write_byte
);
188 ssize_t
nvram_get_size(void)
190 if (ppc_md
.nvram_size
)
191 return ppc_md
.nvram_size();
194 EXPORT_SYMBOL(nvram_get_size
);
196 void nvram_sync(void)
198 if (ppc_md
.nvram_sync
)
201 EXPORT_SYMBOL(nvram_sync
);
203 #endif /* CONFIG_NVRAM */
205 int __init
ppc_init(void)
207 /* clear the progress line */
209 ppc_md
.progress(" ", 0xffff);
211 /* call platform init */
212 if (ppc_md
.init
!= NULL
) {
218 arch_initcall(ppc_init
);
220 static void __init
irqstack_early_init(void)
224 /* interrupt stacks must be in lowmem, we get that for free on ppc32
225 * as the memblock is limited to lowmem by default */
226 for_each_possible_cpu(i
) {
227 softirq_ctx
[i
] = (struct thread_info
*)
228 __va(memblock_alloc(THREAD_SIZE
, THREAD_SIZE
));
229 hardirq_ctx
[i
] = (struct thread_info
*)
230 __va(memblock_alloc(THREAD_SIZE
, THREAD_SIZE
));
234 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
235 static void __init
exc_lvl_early_init(void)
237 unsigned int i
, hw_cpu
;
239 /* interrupt stacks must be in lowmem, we get that for free on ppc32
240 * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
241 for_each_possible_cpu(i
) {
243 hw_cpu
= get_hard_smp_processor_id(i
);
248 critirq_ctx
[hw_cpu
] = (struct thread_info
*)
249 __va(memblock_alloc(THREAD_SIZE
, THREAD_SIZE
));
251 dbgirq_ctx
[hw_cpu
] = (struct thread_info
*)
252 __va(memblock_alloc(THREAD_SIZE
, THREAD_SIZE
));
253 mcheckirq_ctx
[hw_cpu
] = (struct thread_info
*)
254 __va(memblock_alloc(THREAD_SIZE
, THREAD_SIZE
));
259 #define exc_lvl_early_init()
262 /* Warning, IO base is not yet inited */
263 void __init
setup_arch(char **cmdline_p
)
265 *cmdline_p
= boot_command_line
;
267 /* so udelay does something sensible, assume <= 1000 bogomips */
268 loops_per_jiffy
= 500000000 / HZ
;
270 unflatten_device_tree();
273 if (ppc_md
.init_early
)
276 find_legacy_serial_ports();
278 smp_setup_cpu_maps();
280 /* Register early console */
281 register_early_udbg_console();
286 * Set cache line size based on type of cpu as a default.
287 * Systems with OF can look in the properties on the cpu node(s)
288 * for a possibly more accurate value.
290 dcache_bsize
= cur_cpu_spec
->dcache_bsize
;
291 icache_bsize
= cur_cpu_spec
->icache_bsize
;
293 if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE
))
294 ucache_bsize
= icache_bsize
= dcache_bsize
;
299 init_mm
.start_code
= (unsigned long)_stext
;
300 init_mm
.end_code
= (unsigned long) _etext
;
301 init_mm
.end_data
= (unsigned long) _edata
;
302 init_mm
.brk
= klimit
;
304 exc_lvl_early_init();
306 irqstack_early_init();
309 if ( ppc_md
.progress
) ppc_md
.progress("setup_arch: initmem", 0x3eab);
311 #ifdef CONFIG_DUMMY_CONSOLE
312 conswitchp
= &dummy_con
;
315 if (ppc_md
.setup_arch
)
317 if ( ppc_md
.progress
) ppc_md
.progress("arch: exit", 0x3eab);
321 /* Initialize the MMU context management stuff */