x86: fix bogus KERN_ALERT on oops
[wrt350n-kernel.git] / arch / powerpc / kernel / setup-common.c
blob2de00f870edc34438352ae71ed54c7013228b7fa
1 /*
2 * Common boot and setup code for both 32-bit and 64-bit.
3 * Extracted from arch/powerpc/kernel/setup_64.c.
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #undef DEBUG
15 #include <linux/module.h>
16 #include <linux/string.h>
17 #include <linux/sched.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/initrd.h>
23 #include <linux/platform_device.h>
24 #include <linux/seq_file.h>
25 #include <linux/ioport.h>
26 #include <linux/console.h>
27 #include <linux/utsname.h>
28 #include <linux/screen_info.h>
29 #include <linux/root_dev.h>
30 #include <linux/notifier.h>
31 #include <linux/cpu.h>
32 #include <linux/unistd.h>
33 #include <linux/serial.h>
34 #include <linux/serial_8250.h>
35 #include <linux/debugfs.h>
36 #include <asm/io.h>
37 #include <asm/prom.h>
38 #include <asm/processor.h>
39 #include <asm/vdso_datapage.h>
40 #include <asm/pgtable.h>
41 #include <asm/smp.h>
42 #include <asm/elf.h>
43 #include <asm/machdep.h>
44 #include <asm/time.h>
45 #include <asm/cputable.h>
46 #include <asm/sections.h>
47 #include <asm/firmware.h>
48 #include <asm/btext.h>
49 #include <asm/nvram.h>
50 #include <asm/setup.h>
51 #include <asm/system.h>
52 #include <asm/rtas.h>
53 #include <asm/iommu.h>
54 #include <asm/serial.h>
55 #include <asm/cache.h>
56 #include <asm/page.h>
57 #include <asm/mmu.h>
58 #include <asm/lmb.h>
59 #include <asm/xmon.h>
61 #include "setup.h"
63 #ifdef DEBUG
64 #include <asm/udbg.h>
65 #define DBG(fmt...) udbg_printf(fmt)
66 #else
67 #define DBG(fmt...)
68 #endif
70 /* The main machine-dep calls structure
72 struct machdep_calls ppc_md;
73 EXPORT_SYMBOL(ppc_md);
74 struct machdep_calls *machine_id;
75 EXPORT_SYMBOL(machine_id);
77 unsigned long klimit = (unsigned long) _end;
79 char cmd_line[COMMAND_LINE_SIZE];
82 * This still seems to be needed... -- paulus
83 */
84 struct screen_info screen_info = {
85 .orig_x = 0,
86 .orig_y = 25,
87 .orig_video_cols = 80,
88 .orig_video_lines = 25,
89 .orig_video_isVGA = 1,
90 .orig_video_points = 16
93 #ifdef __DO_IRQ_CANON
94 /* XXX should go elsewhere eventually */
95 int ppc_do_canonicalize_irqs;
96 EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
97 #endif
99 /* also used by kexec */
100 void machine_shutdown(void)
102 if (ppc_md.machine_shutdown)
103 ppc_md.machine_shutdown();
106 void machine_restart(char *cmd)
108 machine_shutdown();
109 if (ppc_md.restart)
110 ppc_md.restart(cmd);
111 #ifdef CONFIG_SMP
112 smp_send_stop();
113 #endif
114 printk(KERN_EMERG "System Halted, OK to turn off power\n");
115 local_irq_disable();
116 while (1) ;
119 void machine_power_off(void)
121 machine_shutdown();
122 if (ppc_md.power_off)
123 ppc_md.power_off();
124 #ifdef CONFIG_SMP
125 smp_send_stop();
126 #endif
127 printk(KERN_EMERG "System Halted, OK to turn off power\n");
128 local_irq_disable();
129 while (1) ;
131 /* Used by the G5 thermal driver */
132 EXPORT_SYMBOL_GPL(machine_power_off);
134 void (*pm_power_off)(void) = machine_power_off;
135 EXPORT_SYMBOL_GPL(pm_power_off);
137 void machine_halt(void)
139 machine_shutdown();
140 if (ppc_md.halt)
141 ppc_md.halt();
142 #ifdef CONFIG_SMP
143 smp_send_stop();
144 #endif
145 printk(KERN_EMERG "System Halted, OK to turn off power\n");
146 local_irq_disable();
147 while (1) ;
151 #ifdef CONFIG_TAU
152 extern u32 cpu_temp(unsigned long cpu);
153 extern u32 cpu_temp_both(unsigned long cpu);
154 #endif /* CONFIG_TAU */
156 #ifdef CONFIG_SMP
157 DEFINE_PER_CPU(unsigned int, pvr);
158 #endif
160 static int show_cpuinfo(struct seq_file *m, void *v)
162 unsigned long cpu_id = (unsigned long)v - 1;
163 unsigned int pvr;
164 unsigned short maj;
165 unsigned short min;
167 if (cpu_id == NR_CPUS) {
168 #if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
169 unsigned long bogosum = 0;
170 int i;
171 for_each_online_cpu(i)
172 bogosum += loops_per_jiffy;
173 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
174 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
175 #endif /* CONFIG_SMP && CONFIG_PPC32 */
176 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
177 if (ppc_md.name)
178 seq_printf(m, "platform\t: %s\n", ppc_md.name);
179 if (ppc_md.show_cpuinfo != NULL)
180 ppc_md.show_cpuinfo(m);
182 return 0;
185 /* We only show online cpus: disable preempt (overzealous, I
186 * knew) to prevent cpu going down. */
187 preempt_disable();
188 if (!cpu_online(cpu_id)) {
189 preempt_enable();
190 return 0;
193 #ifdef CONFIG_SMP
194 pvr = per_cpu(pvr, cpu_id);
195 #else
196 pvr = mfspr(SPRN_PVR);
197 #endif
198 maj = (pvr >> 8) & 0xFF;
199 min = pvr & 0xFF;
201 seq_printf(m, "processor\t: %lu\n", cpu_id);
202 seq_printf(m, "cpu\t\t: ");
204 if (cur_cpu_spec->pvr_mask)
205 seq_printf(m, "%s", cur_cpu_spec->cpu_name);
206 else
207 seq_printf(m, "unknown (%08x)", pvr);
209 #ifdef CONFIG_ALTIVEC
210 if (cpu_has_feature(CPU_FTR_ALTIVEC))
211 seq_printf(m, ", altivec supported");
212 #endif /* CONFIG_ALTIVEC */
214 seq_printf(m, "\n");
216 #ifdef CONFIG_TAU
217 if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
218 #ifdef CONFIG_TAU_AVERAGE
219 /* more straightforward, but potentially misleading */
220 seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
221 cpu_temp(cpu_id));
222 #else
223 /* show the actual temp sensor range */
224 u32 temp;
225 temp = cpu_temp_both(cpu_id);
226 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
227 temp & 0xff, temp >> 16);
228 #endif
230 #endif /* CONFIG_TAU */
233 * Assume here that all clock rates are the same in a
234 * smp system. -- Cort
236 if (ppc_proc_freq)
237 seq_printf(m, "clock\t\t: %lu.%06luMHz\n",
238 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
240 if (ppc_md.show_percpuinfo != NULL)
241 ppc_md.show_percpuinfo(m, cpu_id);
243 /* If we are a Freescale core do a simple check so
244 * we dont have to keep adding cases in the future */
245 if (PVR_VER(pvr) & 0x8000) {
246 maj = PVR_MAJ(pvr);
247 min = PVR_MIN(pvr);
248 } else {
249 switch (PVR_VER(pvr)) {
250 case 0x0020: /* 403 family */
251 maj = PVR_MAJ(pvr) + 1;
252 min = PVR_MIN(pvr);
253 break;
254 case 0x1008: /* 740P/750P ?? */
255 maj = ((pvr >> 8) & 0xFF) - 1;
256 min = pvr & 0xFF;
257 break;
258 default:
259 maj = (pvr >> 8) & 0xFF;
260 min = pvr & 0xFF;
261 break;
265 seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
266 maj, min, PVR_VER(pvr), PVR_REV(pvr));
268 #ifdef CONFIG_PPC32
269 seq_printf(m, "bogomips\t: %lu.%02lu\n",
270 loops_per_jiffy / (500000/HZ),
271 (loops_per_jiffy / (5000/HZ)) % 100);
272 #endif
274 #ifdef CONFIG_SMP
275 seq_printf(m, "\n");
276 #endif
278 preempt_enable();
279 return 0;
282 static void *c_start(struct seq_file *m, loff_t *pos)
284 unsigned long i = *pos;
286 return i <= NR_CPUS ? (void *)(i + 1) : NULL;
289 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
291 ++*pos;
292 return c_start(m, pos);
295 static void c_stop(struct seq_file *m, void *v)
299 struct seq_operations cpuinfo_op = {
300 .start =c_start,
301 .next = c_next,
302 .stop = c_stop,
303 .show = show_cpuinfo,
306 void __init check_for_initrd(void)
308 #ifdef CONFIG_BLK_DEV_INITRD
309 DBG(" -> check_for_initrd() initrd_start=0x%lx initrd_end=0x%lx\n",
310 initrd_start, initrd_end);
312 /* If we were passed an initrd, set the ROOT_DEV properly if the values
313 * look sensible. If not, clear initrd reference.
315 if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
316 initrd_end > initrd_start)
317 ROOT_DEV = Root_RAM0;
318 else
319 initrd_start = initrd_end = 0;
321 if (initrd_start)
322 printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
324 DBG(" <- check_for_initrd()\n");
325 #endif /* CONFIG_BLK_DEV_INITRD */
328 #ifdef CONFIG_SMP
331 * setup_cpu_maps - initialize the following cpu maps:
332 * cpu_possible_map
333 * cpu_present_map
334 * cpu_sibling_map
336 * Having the possible map set up early allows us to restrict allocations
337 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
339 * We do not initialize the online map here; cpus set their own bits in
340 * cpu_online_map as they come up.
342 * This function is valid only for Open Firmware systems. finish_device_tree
343 * must be called before using this.
345 * While we're here, we may as well set the "physical" cpu ids in the paca.
347 * NOTE: This must match the parsing done in early_init_dt_scan_cpus.
349 void __init smp_setup_cpu_maps(void)
351 struct device_node *dn = NULL;
352 int cpu = 0;
354 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
355 const int *intserv;
356 int j, len = sizeof(u32), nthreads = 1;
358 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
359 &len);
360 if (intserv)
361 nthreads = len / sizeof(int);
362 else {
363 intserv = of_get_property(dn, "reg", NULL);
364 if (!intserv)
365 intserv = &cpu; /* assume logical == phys */
368 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
369 cpu_set(cpu, cpu_present_map);
370 set_hard_smp_processor_id(cpu, intserv[j]);
371 cpu_set(cpu, cpu_possible_map);
372 cpu++;
376 #ifdef CONFIG_PPC64
378 * On pSeries LPAR, we need to know how many cpus
379 * could possibly be added to this partition.
381 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
382 (dn = of_find_node_by_path("/rtas"))) {
383 int num_addr_cell, num_size_cell, maxcpus;
384 const unsigned int *ireg;
386 num_addr_cell = of_n_addr_cells(dn);
387 num_size_cell = of_n_size_cells(dn);
389 ireg = of_get_property(dn, "ibm,lrdr-capacity", NULL);
391 if (!ireg)
392 goto out;
394 maxcpus = ireg[num_addr_cell + num_size_cell];
396 /* Double maxcpus for processors which have SMT capability */
397 if (cpu_has_feature(CPU_FTR_SMT))
398 maxcpus *= 2;
400 if (maxcpus > NR_CPUS) {
401 printk(KERN_WARNING
402 "Partition configured for %d cpus, "
403 "operating system maximum is %d.\n",
404 maxcpus, NR_CPUS);
405 maxcpus = NR_CPUS;
406 } else
407 printk(KERN_INFO "Partition configured for %d cpus.\n",
408 maxcpus);
410 for (cpu = 0; cpu < maxcpus; cpu++)
411 cpu_set(cpu, cpu_possible_map);
412 out:
413 of_node_put(dn);
416 vdso_data->processorCount = num_present_cpus();
417 #endif /* CONFIG_PPC64 */
421 * Being that cpu_sibling_map is now a per_cpu array, then it cannot
422 * be initialized until the per_cpu areas have been created. This
423 * function is now called from setup_per_cpu_areas().
425 void __init smp_setup_cpu_sibling_map(void)
427 #if defined(CONFIG_PPC64)
428 int cpu;
431 * Do the sibling map; assume only two threads per processor.
433 for_each_possible_cpu(cpu) {
434 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
435 if (cpu_has_feature(CPU_FTR_SMT))
436 cpu_set(cpu ^ 0x1, per_cpu(cpu_sibling_map, cpu));
438 #endif /* CONFIG_PPC64 */
440 #endif /* CONFIG_SMP */
442 static __init int add_pcspkr(void)
444 struct device_node *np;
445 struct platform_device *pd;
446 int ret;
448 np = of_find_compatible_node(NULL, NULL, "pnpPNP,100");
449 of_node_put(np);
450 if (!np)
451 return -ENODEV;
453 pd = platform_device_alloc("pcspkr", -1);
454 if (!pd)
455 return -ENOMEM;
457 ret = platform_device_add(pd);
458 if (ret)
459 platform_device_put(pd);
461 return ret;
463 device_initcall(add_pcspkr);
465 void probe_machine(void)
467 extern struct machdep_calls __machine_desc_start;
468 extern struct machdep_calls __machine_desc_end;
471 * Iterate all ppc_md structures until we find the proper
472 * one for the current machine type
474 DBG("Probing machine type ...\n");
476 for (machine_id = &__machine_desc_start;
477 machine_id < &__machine_desc_end;
478 machine_id++) {
479 DBG(" %s ...", machine_id->name);
480 memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls));
481 if (ppc_md.probe()) {
482 DBG(" match !\n");
483 break;
485 DBG("\n");
487 /* What can we do if we didn't find ? */
488 if (machine_id >= &__machine_desc_end) {
489 DBG("No suitable machine found !\n");
490 for (;;);
493 printk(KERN_INFO "Using %s machine description\n", ppc_md.name);
496 /* Match a class of boards, not a specific device configuration. */
497 int check_legacy_ioport(unsigned long base_port)
499 struct device_node *parent, *np = NULL;
500 int ret = -ENODEV;
502 switch(base_port) {
503 case I8042_DATA_REG:
504 if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303")))
505 np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03");
506 if (np) {
507 parent = of_get_parent(np);
508 of_node_put(np);
509 np = parent;
510 break;
512 np = of_find_node_by_type(NULL, "8042");
513 /* Pegasos has no device_type on its 8042 node, look for the
514 * name instead */
515 if (!np)
516 np = of_find_node_by_name(NULL, "8042");
517 break;
518 case FDC_BASE: /* FDC1 */
519 np = of_find_node_by_type(NULL, "fdc");
520 break;
521 #ifdef CONFIG_PPC_PREP
522 case _PIDXR:
523 case _PNPWRP:
524 case PNPBIOS_BASE:
525 /* implement me */
526 #endif
527 default:
528 /* ipmi is supposed to fail here */
529 break;
531 if (!np)
532 return ret;
533 parent = of_get_parent(np);
534 if (parent) {
535 if (strcmp(parent->type, "isa") == 0)
536 ret = 0;
537 of_node_put(parent);
539 of_node_put(np);
540 return ret;
542 EXPORT_SYMBOL(check_legacy_ioport);
544 static int ppc_panic_event(struct notifier_block *this,
545 unsigned long event, void *ptr)
547 ppc_md.panic(ptr); /* May not return */
548 return NOTIFY_DONE;
551 static struct notifier_block ppc_panic_block = {
552 .notifier_call = ppc_panic_event,
553 .priority = INT_MIN /* may not return; must be done last */
556 void __init setup_panic(void)
558 atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
561 #ifdef CONFIG_CHECK_CACHE_COHERENCY
563 * For platforms that have configurable cache-coherency. This function
564 * checks that the cache coherency setting of the kernel matches the setting
565 * left by the firmware, as indicated in the device tree. Since a mismatch
566 * will eventually result in DMA failures, we print * and error and call
567 * BUG() in that case.
570 #ifdef CONFIG_NOT_COHERENT_CACHE
571 #define KERNEL_COHERENCY 0
572 #else
573 #define KERNEL_COHERENCY 1
574 #endif
576 static int __init check_cache_coherency(void)
578 struct device_node *np;
579 const void *prop;
580 int devtree_coherency;
582 np = of_find_node_by_path("/");
583 prop = of_get_property(np, "coherency-off", NULL);
584 of_node_put(np);
586 devtree_coherency = prop ? 0 : 1;
588 if (devtree_coherency != KERNEL_COHERENCY) {
589 printk(KERN_ERR
590 "kernel coherency:%s != device tree_coherency:%s\n",
591 KERNEL_COHERENCY ? "on" : "off",
592 devtree_coherency ? "on" : "off");
593 BUG();
596 return 0;
599 late_initcall(check_cache_coherency);
600 #endif /* CONFIG_CHECK_CACHE_COHERENCY */
602 #ifdef CONFIG_DEBUG_FS
603 struct dentry *powerpc_debugfs_root;
605 static int powerpc_debugfs_init(void)
607 powerpc_debugfs_root = debugfs_create_dir("powerpc", NULL);
609 return powerpc_debugfs_root == NULL;
611 arch_initcall(powerpc_debugfs_init);
612 #endif