Merge remote-tracking branch 's5p/for-next'
[linux-2.6/next.git] / arch / arm / kernel / setup.c
blob6c5e1342fbf515ba3e3d0e2036c696e0fc27ed48
1 /*
2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/crash_dump.h>
25 #include <linux/root_dev.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/fs.h>
30 #include <linux/proc_fs.h>
31 #include <linux/memblock.h>
32 #include <linux/bug.h>
33 #include <linux/compiler.h>
35 #include <asm/unified.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/sections.h>
41 #include <asm/setup.h>
42 #include <asm/smp_plat.h>
43 #include <asm/mach-types.h>
44 #include <asm/cacheflush.h>
45 #include <asm/cachetype.h>
46 #include <asm/tlbflush.h>
47 #include <asm/system.h>
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/traps.h>
54 #include <asm/unwind.h>
56 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
57 #include "compat.h"
58 #endif
59 #include "atags.h"
60 #include "tcm.h"
62 #ifndef MEM_SIZE
63 #define MEM_SIZE (16*1024*1024)
64 #endif
66 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
67 char fpe_type[8];
69 static int __init fpe_setup(char *line)
71 memcpy(fpe_type, line, 8);
72 return 1;
75 __setup("fpe=", fpe_setup);
76 #endif
78 extern void paging_init(struct machine_desc *desc);
79 extern void sanity_check_meminfo(void);
80 extern void reboot_setup(char *str);
82 unsigned int processor_id;
83 EXPORT_SYMBOL(processor_id);
84 unsigned int __machine_arch_type __read_mostly;
85 EXPORT_SYMBOL(__machine_arch_type);
86 unsigned int cacheid __read_mostly;
87 EXPORT_SYMBOL(cacheid);
89 unsigned int __atags_pointer __initdata;
91 unsigned int system_rev;
92 EXPORT_SYMBOL(system_rev);
94 unsigned int system_serial_low;
95 EXPORT_SYMBOL(system_serial_low);
97 unsigned int system_serial_high;
98 EXPORT_SYMBOL(system_serial_high);
100 unsigned int elf_hwcap __read_mostly;
101 EXPORT_SYMBOL(elf_hwcap);
104 #ifdef MULTI_CPU
105 struct processor processor __read_mostly;
106 #endif
107 #ifdef MULTI_TLB
108 struct cpu_tlb_fns cpu_tlb __read_mostly;
109 #endif
110 #ifdef MULTI_USER
111 struct cpu_user_fns cpu_user __read_mostly;
112 #endif
113 #ifdef MULTI_CACHE
114 struct cpu_cache_fns cpu_cache __read_mostly;
115 #endif
116 #ifdef CONFIG_OUTER_CACHE
117 struct outer_cache_fns outer_cache __read_mostly;
118 EXPORT_SYMBOL(outer_cache);
119 #endif
122 * Cached cpu_architecture() result for use by assembler code.
123 * C code should use the cpu_architecture() function instead of accessing this
124 * variable directly.
126 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
128 struct stack {
129 u32 irq[3];
130 u32 abt[3];
131 u32 und[3];
132 } ____cacheline_aligned;
134 static struct stack stacks[NR_CPUS];
136 char elf_platform[ELF_PLATFORM_SIZE];
137 EXPORT_SYMBOL(elf_platform);
139 static const char *cpu_name;
140 static const char *machine_name;
141 static char __initdata cmd_line[COMMAND_LINE_SIZE];
142 struct machine_desc *machine_desc __initdata;
144 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
145 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
146 #define ENDIANNESS ((char)endian_test.l)
148 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
151 * Standard memory resources
153 static struct resource mem_res[] = {
155 .name = "Video RAM",
156 .start = 0,
157 .end = 0,
158 .flags = IORESOURCE_MEM
161 .name = "Kernel text",
162 .start = 0,
163 .end = 0,
164 .flags = IORESOURCE_MEM
167 .name = "Kernel data",
168 .start = 0,
169 .end = 0,
170 .flags = IORESOURCE_MEM
174 #define video_ram mem_res[0]
175 #define kernel_code mem_res[1]
176 #define kernel_data mem_res[2]
178 static struct resource io_res[] = {
180 .name = "reserved",
181 .start = 0x3bc,
182 .end = 0x3be,
183 .flags = IORESOURCE_IO | IORESOURCE_BUSY
186 .name = "reserved",
187 .start = 0x378,
188 .end = 0x37f,
189 .flags = IORESOURCE_IO | IORESOURCE_BUSY
192 .name = "reserved",
193 .start = 0x278,
194 .end = 0x27f,
195 .flags = IORESOURCE_IO | IORESOURCE_BUSY
199 #define lp0 io_res[0]
200 #define lp1 io_res[1]
201 #define lp2 io_res[2]
203 static const char *proc_arch[] = {
204 "undefined/unknown",
205 "3",
206 "4",
207 "4T",
208 "5",
209 "5T",
210 "5TE",
211 "5TEJ",
212 "6TEJ",
213 "7",
214 "?(11)",
215 "?(12)",
216 "?(13)",
217 "?(14)",
218 "?(15)",
219 "?(16)",
220 "?(17)",
223 static int __get_cpu_architecture(void)
225 int cpu_arch;
227 if ((read_cpuid_id() & 0x0008f000) == 0) {
228 cpu_arch = CPU_ARCH_UNKNOWN;
229 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
230 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
231 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
232 cpu_arch = (read_cpuid_id() >> 16) & 7;
233 if (cpu_arch)
234 cpu_arch += CPU_ARCH_ARMv3;
235 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
236 unsigned int mmfr0;
238 /* Revised CPUID format. Read the Memory Model Feature
239 * Register 0 and check for VMSAv7 or PMSAv7 */
240 asm("mrc p15, 0, %0, c0, c1, 4"
241 : "=r" (mmfr0));
242 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
243 (mmfr0 & 0x000000f0) >= 0x00000030)
244 cpu_arch = CPU_ARCH_ARMv7;
245 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
246 (mmfr0 & 0x000000f0) == 0x00000020)
247 cpu_arch = CPU_ARCH_ARMv6;
248 else
249 cpu_arch = CPU_ARCH_UNKNOWN;
250 } else
251 cpu_arch = CPU_ARCH_UNKNOWN;
253 return cpu_arch;
256 int __pure cpu_architecture(void)
258 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
260 return __cpu_architecture;
263 static int cpu_has_aliasing_icache(unsigned int arch)
265 int aliasing_icache;
266 unsigned int id_reg, num_sets, line_size;
268 /* arch specifies the register format */
269 switch (arch) {
270 case CPU_ARCH_ARMv7:
271 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
272 : /* No output operands */
273 : "r" (1));
274 isb();
275 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
276 : "=r" (id_reg));
277 line_size = 4 << ((id_reg & 0x7) + 2);
278 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
279 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
280 break;
281 case CPU_ARCH_ARMv6:
282 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
283 break;
284 default:
285 /* I-cache aliases will be handled by D-cache aliasing code */
286 aliasing_icache = 0;
289 return aliasing_icache;
292 static void __init cacheid_init(void)
294 unsigned int cachetype = read_cpuid_cachetype();
295 unsigned int arch = cpu_architecture();
297 if (arch >= CPU_ARCH_ARMv6) {
298 if ((cachetype & (7 << 29)) == 4 << 29) {
299 /* ARMv7 register format */
300 arch = CPU_ARCH_ARMv7;
301 cacheid = CACHEID_VIPT_NONALIASING;
302 if ((cachetype & (3 << 14)) == 1 << 14)
303 cacheid |= CACHEID_ASID_TAGGED;
304 } else {
305 arch = CPU_ARCH_ARMv6;
306 if (cachetype & (1 << 23))
307 cacheid = CACHEID_VIPT_ALIASING;
308 else
309 cacheid = CACHEID_VIPT_NONALIASING;
311 if (cpu_has_aliasing_icache(arch))
312 cacheid |= CACHEID_VIPT_I_ALIASING;
313 } else {
314 cacheid = CACHEID_VIVT;
317 printk("CPU: %s data cache, %s instruction cache\n",
318 cache_is_vivt() ? "VIVT" :
319 cache_is_vipt_aliasing() ? "VIPT aliasing" :
320 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
321 cache_is_vivt() ? "VIVT" :
322 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
323 icache_is_vipt_aliasing() ? "VIPT aliasing" :
324 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
328 * These functions re-use the assembly code in head.S, which
329 * already provide the required functionality.
331 extern struct proc_info_list *lookup_processor_type(unsigned int);
333 void __init early_print(const char *str, ...)
335 extern void printascii(const char *);
336 char buf[256];
337 va_list ap;
339 va_start(ap, str);
340 vsnprintf(buf, sizeof(buf), str, ap);
341 va_end(ap);
343 #ifdef CONFIG_DEBUG_LL
344 printascii(buf);
345 #endif
346 printk("%s", buf);
349 static void __init feat_v6_fixup(void)
351 int id = read_cpuid_id();
353 if ((id & 0xff0f0000) != 0x41070000)
354 return;
357 * HWCAP_TLS is available only on 1136 r1p0 and later,
358 * see also kuser_get_tls_init.
360 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
361 elf_hwcap &= ~HWCAP_TLS;
365 * cpu_init - initialise one CPU.
367 * cpu_init sets up the per-CPU stacks.
369 void cpu_init(void)
371 unsigned int cpu = smp_processor_id();
372 struct stack *stk = &stacks[cpu];
374 if (cpu >= NR_CPUS) {
375 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
376 BUG();
379 cpu_proc_init();
382 * Define the placement constraint for the inline asm directive below.
383 * In Thumb-2, msr with an immediate value is not allowed.
385 #ifdef CONFIG_THUMB2_KERNEL
386 #define PLC "r"
387 #else
388 #define PLC "I"
389 #endif
392 * setup stacks for re-entrant exception handlers
394 __asm__ (
395 "msr cpsr_c, %1\n\t"
396 "add r14, %0, %2\n\t"
397 "mov sp, r14\n\t"
398 "msr cpsr_c, %3\n\t"
399 "add r14, %0, %4\n\t"
400 "mov sp, r14\n\t"
401 "msr cpsr_c, %5\n\t"
402 "add r14, %0, %6\n\t"
403 "mov sp, r14\n\t"
404 "msr cpsr_c, %7"
406 : "r" (stk),
407 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
408 "I" (offsetof(struct stack, irq[0])),
409 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
410 "I" (offsetof(struct stack, abt[0])),
411 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
412 "I" (offsetof(struct stack, und[0])),
413 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
414 : "r14");
417 static void __init setup_processor(void)
419 struct proc_info_list *list;
422 * locate processor in the list of supported processor
423 * types. The linker builds this table for us from the
424 * entries in arch/arm/mm/proc-*.S
426 list = lookup_processor_type(read_cpuid_id());
427 if (!list) {
428 printk("CPU configuration botched (ID %08x), unable "
429 "to continue.\n", read_cpuid_id());
430 while (1);
433 cpu_name = list->cpu_name;
434 __cpu_architecture = __get_cpu_architecture();
436 #ifdef MULTI_CPU
437 processor = *list->proc;
438 #endif
439 #ifdef MULTI_TLB
440 cpu_tlb = *list->tlb;
441 #endif
442 #ifdef MULTI_USER
443 cpu_user = *list->user;
444 #endif
445 #ifdef MULTI_CACHE
446 cpu_cache = *list->cache;
447 #endif
449 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
450 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
451 proc_arch[cpu_architecture()], cr_alignment);
453 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
454 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
455 elf_hwcap = list->elf_hwcap;
456 #ifndef CONFIG_ARM_THUMB
457 elf_hwcap &= ~HWCAP_THUMB;
458 #endif
460 feat_v6_fixup();
462 cacheid_init();
463 cpu_init();
466 void __init dump_machine_table(void)
468 struct machine_desc *p;
470 early_print("Available machine support:\n\nID (hex)\tNAME\n");
471 for_each_machine_desc(p)
472 early_print("%08x\t%s\n", p->nr, p->name);
474 early_print("\nPlease check your kernel config and/or bootloader.\n");
476 while (true)
477 /* can't use cpu_relax() here as it may require MMU setup */;
480 int __init arm_add_memory(phys_addr_t start, unsigned long size)
482 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
484 if (meminfo.nr_banks >= NR_BANKS) {
485 printk(KERN_CRIT "NR_BANKS too low, "
486 "ignoring memory at 0x%08llx\n", (long long)start);
487 return -EINVAL;
491 * Ensure that start/size are aligned to a page boundary.
492 * Size is appropriately rounded down, start is rounded up.
494 size -= start & ~PAGE_MASK;
495 bank->start = PAGE_ALIGN(start);
496 bank->size = size & PAGE_MASK;
499 * Check whether this memory region has non-zero size or
500 * invalid node number.
502 if (bank->size == 0)
503 return -EINVAL;
505 meminfo.nr_banks++;
506 return 0;
510 * Pick out the memory size. We look for mem=size@start,
511 * where start and size are "size[KkMm]"
513 static int __init early_mem(char *p)
515 static int usermem __initdata = 0;
516 unsigned long size;
517 phys_addr_t start;
518 char *endp;
521 * If the user specifies memory size, we
522 * blow away any automatically generated
523 * size.
525 if (usermem == 0) {
526 usermem = 1;
527 meminfo.nr_banks = 0;
530 start = PHYS_OFFSET;
531 size = memparse(p, &endp);
532 if (*endp == '@')
533 start = memparse(endp + 1, NULL);
535 arm_add_memory(start, size);
537 return 0;
539 early_param("mem", early_mem);
541 static void __init
542 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
544 #ifdef CONFIG_BLK_DEV_RAM
545 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
547 rd_image_start = image_start;
548 rd_prompt = prompt;
549 rd_doload = doload;
551 if (rd_sz)
552 rd_size = rd_sz;
553 #endif
556 static void __init request_standard_resources(struct machine_desc *mdesc)
558 struct memblock_region *region;
559 struct resource *res;
561 kernel_code.start = virt_to_phys(_text);
562 kernel_code.end = virt_to_phys(_etext - 1);
563 kernel_data.start = virt_to_phys(_sdata);
564 kernel_data.end = virt_to_phys(_end - 1);
566 for_each_memblock(memory, region) {
567 res = alloc_bootmem_low(sizeof(*res));
568 res->name = "System RAM";
569 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
570 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
571 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
573 request_resource(&iomem_resource, res);
575 if (kernel_code.start >= res->start &&
576 kernel_code.end <= res->end)
577 request_resource(res, &kernel_code);
578 if (kernel_data.start >= res->start &&
579 kernel_data.end <= res->end)
580 request_resource(res, &kernel_data);
583 if (mdesc->video_start) {
584 video_ram.start = mdesc->video_start;
585 video_ram.end = mdesc->video_end;
586 request_resource(&iomem_resource, &video_ram);
590 * Some machines don't have the possibility of ever
591 * possessing lp0, lp1 or lp2
593 if (mdesc->reserve_lp0)
594 request_resource(&ioport_resource, &lp0);
595 if (mdesc->reserve_lp1)
596 request_resource(&ioport_resource, &lp1);
597 if (mdesc->reserve_lp2)
598 request_resource(&ioport_resource, &lp2);
602 * Tag parsing.
604 * This is the new way of passing data to the kernel at boot time. Rather
605 * than passing a fixed inflexible structure to the kernel, we pass a list
606 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
607 * tag for the list to be recognised (to distinguish the tagged list from
608 * a param_struct). The list is terminated with a zero-length tag (this tag
609 * is not parsed in any way).
611 static int __init parse_tag_core(const struct tag *tag)
613 if (tag->hdr.size > 2) {
614 if ((tag->u.core.flags & 1) == 0)
615 root_mountflags &= ~MS_RDONLY;
616 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
618 return 0;
621 __tagtable(ATAG_CORE, parse_tag_core);
623 static int __init parse_tag_mem32(const struct tag *tag)
625 return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
628 __tagtable(ATAG_MEM, parse_tag_mem32);
630 #ifdef CONFIG_PHYS_ADDR_T_64BIT
631 static int __init parse_tag_mem64(const struct tag *tag)
633 /* We only use 32-bits for the size. */
634 return arm_add_memory(tag->u.mem64.start, (unsigned long)tag->u.mem64.size);
637 __tagtable(ATAG_MEM64, parse_tag_mem64);
638 #endif /* CONFIG_PHYS_ADDR_T_64BIT */
640 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
641 struct screen_info screen_info = {
642 .orig_video_lines = 30,
643 .orig_video_cols = 80,
644 .orig_video_mode = 0,
645 .orig_video_ega_bx = 0,
646 .orig_video_isVGA = 1,
647 .orig_video_points = 8
650 static int __init parse_tag_videotext(const struct tag *tag)
652 screen_info.orig_x = tag->u.videotext.x;
653 screen_info.orig_y = tag->u.videotext.y;
654 screen_info.orig_video_page = tag->u.videotext.video_page;
655 screen_info.orig_video_mode = tag->u.videotext.video_mode;
656 screen_info.orig_video_cols = tag->u.videotext.video_cols;
657 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
658 screen_info.orig_video_lines = tag->u.videotext.video_lines;
659 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
660 screen_info.orig_video_points = tag->u.videotext.video_points;
661 return 0;
664 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
665 #endif
667 static int __init parse_tag_ramdisk(const struct tag *tag)
669 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
670 (tag->u.ramdisk.flags & 2) == 0,
671 tag->u.ramdisk.start, tag->u.ramdisk.size);
672 return 0;
675 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
677 static int __init parse_tag_serialnr(const struct tag *tag)
679 system_serial_low = tag->u.serialnr.low;
680 system_serial_high = tag->u.serialnr.high;
681 return 0;
684 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
686 static int __init parse_tag_revision(const struct tag *tag)
688 system_rev = tag->u.revision.rev;
689 return 0;
692 __tagtable(ATAG_REVISION, parse_tag_revision);
694 static int __init parse_tag_cmdline(const struct tag *tag)
696 #if defined(CONFIG_CMDLINE_EXTEND)
697 strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
698 strlcat(default_command_line, tag->u.cmdline.cmdline,
699 COMMAND_LINE_SIZE);
700 #elif defined(CONFIG_CMDLINE_FORCE)
701 pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
702 #else
703 strlcpy(default_command_line, tag->u.cmdline.cmdline,
704 COMMAND_LINE_SIZE);
705 #endif
706 return 0;
709 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
712 * Scan the tag table for this tag, and call its parse function.
713 * The tag table is built by the linker from all the __tagtable
714 * declarations.
716 static int __init parse_tag(const struct tag *tag)
718 extern struct tagtable __tagtable_begin, __tagtable_end;
719 struct tagtable *t;
721 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
722 if (tag->hdr.tag == t->tag) {
723 t->parse(tag);
724 break;
727 return t < &__tagtable_end;
731 * Parse all tags in the list, checking both the global and architecture
732 * specific tag tables.
734 static void __init parse_tags(const struct tag *t)
736 for (; t->hdr.size; t = tag_next(t))
737 if (!parse_tag(t))
738 printk(KERN_WARNING
739 "Ignoring unrecognised tag 0x%08x\n",
740 t->hdr.tag);
744 * This holds our defaults.
746 static struct init_tags {
747 struct tag_header hdr1;
748 struct tag_core core;
749 struct tag_header hdr2;
750 struct tag_mem32 mem;
751 struct tag_header hdr3;
752 } init_tags __initdata = {
753 { tag_size(tag_core), ATAG_CORE },
754 { 1, PAGE_SIZE, 0xff },
755 { tag_size(tag_mem32), ATAG_MEM },
756 { MEM_SIZE },
757 { 0, ATAG_NONE }
760 static int __init customize_machine(void)
762 /* customizes platform devices, or adds new ones */
763 if (machine_desc->init_machine)
764 machine_desc->init_machine();
765 return 0;
767 arch_initcall(customize_machine);
769 #ifdef CONFIG_KEXEC
770 static inline unsigned long long get_total_mem(void)
772 unsigned long total;
774 total = max_low_pfn - min_low_pfn;
775 return total << PAGE_SHIFT;
779 * reserve_crashkernel() - reserves memory are for crash kernel
781 * This function reserves memory area given in "crashkernel=" kernel command
782 * line parameter. The memory reserved is used by a dump capture kernel when
783 * primary kernel is crashing.
785 static void __init reserve_crashkernel(void)
787 unsigned long long crash_size, crash_base;
788 unsigned long long total_mem;
789 int ret;
791 total_mem = get_total_mem();
792 ret = parse_crashkernel(boot_command_line, total_mem,
793 &crash_size, &crash_base);
794 if (ret)
795 return;
797 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
798 if (ret < 0) {
799 printk(KERN_WARNING "crashkernel reservation failed - "
800 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
801 return;
804 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
805 "for crashkernel (System RAM: %ldMB)\n",
806 (unsigned long)(crash_size >> 20),
807 (unsigned long)(crash_base >> 20),
808 (unsigned long)(total_mem >> 20));
810 crashk_res.start = crash_base;
811 crashk_res.end = crash_base + crash_size - 1;
812 insert_resource(&iomem_resource, &crashk_res);
814 #else
815 static inline void reserve_crashkernel(void) {}
816 #endif /* CONFIG_KEXEC */
818 static void __init squash_mem_tags(struct tag *tag)
820 for (; tag->hdr.size; tag = tag_next(tag))
821 if (tag->hdr.tag == ATAG_MEM)
822 tag->hdr.tag = ATAG_NONE;
825 static struct machine_desc * __init setup_machine_tags(unsigned int nr)
827 struct tag *tags = (struct tag *)&init_tags;
828 struct machine_desc *mdesc = NULL, *p;
829 char *from = default_command_line;
831 init_tags.mem.start = PHYS_OFFSET;
834 * locate machine in the list of supported machines.
836 for_each_machine_desc(p)
837 if (nr == p->nr) {
838 printk("Machine: %s\n", p->name);
839 mdesc = p;
840 break;
843 if (!mdesc) {
844 early_print("\nError: unrecognized/unsupported machine ID"
845 " (r1 = 0x%08x).\n\n", nr);
846 dump_machine_table(); /* does not return */
849 if (__atags_pointer)
850 tags = phys_to_virt(__atags_pointer);
851 else if (mdesc->boot_params) {
852 #ifdef CONFIG_MMU
854 * We still are executing with a minimal MMU mapping created
855 * with the presumption that the machine default for this
856 * is located in the first MB of RAM. Anything else will
857 * fault and silently hang the kernel at this point.
859 if (mdesc->boot_params < PHYS_OFFSET ||
860 mdesc->boot_params >= PHYS_OFFSET + SZ_1M) {
861 printk(KERN_WARNING
862 "Default boot params at physical 0x%08lx out of reach\n",
863 mdesc->boot_params);
864 } else
865 #endif
867 tags = phys_to_virt(mdesc->boot_params);
871 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
873 * If we have the old style parameters, convert them to
874 * a tag list.
876 if (tags->hdr.tag != ATAG_CORE)
877 convert_to_tag_list(tags);
878 #endif
880 if (tags->hdr.tag != ATAG_CORE) {
881 #if defined(CONFIG_OF)
883 * If CONFIG_OF is set, then assume this is a reasonably
884 * modern system that should pass boot parameters
886 early_print("Warning: Neither atags nor dtb found\n");
887 #endif
888 tags = (struct tag *)&init_tags;
891 if (mdesc->fixup)
892 mdesc->fixup(mdesc, tags, &from, &meminfo);
894 if (tags->hdr.tag == ATAG_CORE) {
895 if (meminfo.nr_banks != 0)
896 squash_mem_tags(tags);
897 save_atags(tags);
898 parse_tags(tags);
901 /* parse_early_param needs a boot_command_line */
902 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
904 return mdesc;
908 void __init setup_arch(char **cmdline_p)
910 struct machine_desc *mdesc;
912 unwind_init();
914 setup_processor();
915 mdesc = setup_machine_fdt(__atags_pointer);
916 if (!mdesc)
917 mdesc = setup_machine_tags(machine_arch_type);
918 machine_desc = mdesc;
919 machine_name = mdesc->name;
921 if (mdesc->soft_reboot)
922 reboot_setup("s");
924 init_mm.start_code = (unsigned long) _text;
925 init_mm.end_code = (unsigned long) _etext;
926 init_mm.end_data = (unsigned long) _edata;
927 init_mm.brk = (unsigned long) _end;
929 /* populate cmd_line too for later use, preserving boot_command_line */
930 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
931 *cmdline_p = cmd_line;
933 parse_early_param();
935 sanity_check_meminfo();
936 arm_memblock_init(&meminfo, mdesc);
938 paging_init(mdesc);
939 request_standard_resources(mdesc);
941 unflatten_device_tree();
943 #ifdef CONFIG_SMP
944 if (is_smp())
945 smp_init_cpus();
946 #endif
947 reserve_crashkernel();
949 tcm_init();
951 #ifdef CONFIG_ZONE_DMA
952 if (mdesc->dma_zone_size) {
953 extern unsigned long arm_dma_zone_size;
954 arm_dma_zone_size = mdesc->dma_zone_size;
956 #endif
957 #ifdef CONFIG_MULTI_IRQ_HANDLER
958 handle_arch_irq = mdesc->handle_irq;
959 #endif
961 #ifdef CONFIG_VT
962 #if defined(CONFIG_VGA_CONSOLE)
963 conswitchp = &vga_con;
964 #elif defined(CONFIG_DUMMY_CONSOLE)
965 conswitchp = &dummy_con;
966 #endif
967 #endif
968 early_trap_init();
970 if (mdesc->init_early)
971 mdesc->init_early();
975 static int __init topology_init(void)
977 int cpu;
979 for_each_possible_cpu(cpu) {
980 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
981 cpuinfo->cpu.hotpluggable = 1;
982 register_cpu(&cpuinfo->cpu, cpu);
985 return 0;
987 subsys_initcall(topology_init);
989 #ifdef CONFIG_HAVE_PROC_CPU
990 static int __init proc_cpu_init(void)
992 struct proc_dir_entry *res;
994 res = proc_mkdir("cpu", NULL);
995 if (!res)
996 return -ENOMEM;
997 return 0;
999 fs_initcall(proc_cpu_init);
1000 #endif
1002 static const char *hwcap_str[] = {
1003 "swp",
1004 "half",
1005 "thumb",
1006 "26bit",
1007 "fastmult",
1008 "fpa",
1009 "vfp",
1010 "edsp",
1011 "java",
1012 "iwmmxt",
1013 "crunch",
1014 "thumbee",
1015 "neon",
1016 "vfpv3",
1017 "vfpv3d16",
1018 "tls",
1019 "vfpv4",
1020 "idiva",
1021 "idivt",
1022 NULL
1025 static int c_show(struct seq_file *m, void *v)
1027 int i;
1029 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
1030 cpu_name, read_cpuid_id() & 15, elf_platform);
1032 #if defined(CONFIG_SMP)
1033 for_each_online_cpu(i) {
1035 * glibc reads /proc/cpuinfo to determine the number of
1036 * online processors, looking for lines beginning with
1037 * "processor". Give glibc what it expects.
1039 seq_printf(m, "processor\t: %d\n", i);
1040 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
1041 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1042 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1044 #else /* CONFIG_SMP */
1045 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1046 loops_per_jiffy / (500000/HZ),
1047 (loops_per_jiffy / (5000/HZ)) % 100);
1048 #endif
1050 /* dump out the processor features */
1051 seq_puts(m, "Features\t: ");
1053 for (i = 0; hwcap_str[i]; i++)
1054 if (elf_hwcap & (1 << i))
1055 seq_printf(m, "%s ", hwcap_str[i]);
1057 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1058 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1060 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1061 /* pre-ARM7 */
1062 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1063 } else {
1064 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1065 /* ARM7 */
1066 seq_printf(m, "CPU variant\t: 0x%02x\n",
1067 (read_cpuid_id() >> 16) & 127);
1068 } else {
1069 /* post-ARM7 */
1070 seq_printf(m, "CPU variant\t: 0x%x\n",
1071 (read_cpuid_id() >> 20) & 15);
1073 seq_printf(m, "CPU part\t: 0x%03x\n",
1074 (read_cpuid_id() >> 4) & 0xfff);
1076 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1078 seq_puts(m, "\n");
1080 seq_printf(m, "Hardware\t: %s\n", machine_name);
1081 seq_printf(m, "Revision\t: %04x\n", system_rev);
1082 seq_printf(m, "Serial\t\t: %08x%08x\n",
1083 system_serial_high, system_serial_low);
1085 return 0;
1088 static void *c_start(struct seq_file *m, loff_t *pos)
1090 return *pos < 1 ? (void *)1 : NULL;
1093 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1095 ++*pos;
1096 return NULL;
1099 static void c_stop(struct seq_file *m, void *v)
1103 const struct seq_operations cpuinfo_op = {
1104 .start = c_start,
1105 .next = c_next,
1106 .stop = c_stop,
1107 .show = c_show