e1000: Add device IDs of blade version of the 82571 quad port
[pv_ops_mirror.git] / arch / arm / kernel / setup.c
blob4de432ec903aecc4bef4b91aab249c9bd0c62edd
1 /*
2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/fs.h>
28 #include <asm/cpu.h>
29 #include <asm/elf.h>
30 #include <asm/procinfo.h>
31 #include <asm/setup.h>
32 #include <asm/mach-types.h>
33 #include <asm/cacheflush.h>
34 #include <asm/tlbflush.h>
36 #include <asm/mach/arch.h>
37 #include <asm/mach/irq.h>
38 #include <asm/mach/time.h>
40 #include "compat.h"
42 #ifndef MEM_SIZE
43 #define MEM_SIZE (16*1024*1024)
44 #endif
46 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
47 char fpe_type[8];
49 static int __init fpe_setup(char *line)
51 memcpy(fpe_type, line, 8);
52 return 1;
55 __setup("fpe=", fpe_setup);
56 #endif
58 extern void paging_init(struct meminfo *, struct machine_desc *desc);
59 extern void reboot_setup(char *str);
60 extern int root_mountflags;
61 extern void _stext, _text, _etext, __data_start, _edata, _end;
63 unsigned int processor_id;
64 unsigned int __machine_arch_type;
65 EXPORT_SYMBOL(__machine_arch_type);
67 unsigned int __atags_pointer __initdata;
69 unsigned int system_rev;
70 EXPORT_SYMBOL(system_rev);
72 unsigned int system_serial_low;
73 EXPORT_SYMBOL(system_serial_low);
75 unsigned int system_serial_high;
76 EXPORT_SYMBOL(system_serial_high);
78 unsigned int elf_hwcap;
79 EXPORT_SYMBOL(elf_hwcap);
82 #ifdef MULTI_CPU
83 struct processor processor;
84 #endif
85 #ifdef MULTI_TLB
86 struct cpu_tlb_fns cpu_tlb;
87 #endif
88 #ifdef MULTI_USER
89 struct cpu_user_fns cpu_user;
90 #endif
91 #ifdef MULTI_CACHE
92 struct cpu_cache_fns cpu_cache;
93 #endif
94 #ifdef CONFIG_OUTER_CACHE
95 struct outer_cache_fns outer_cache;
96 #endif
98 struct stack {
99 u32 irq[3];
100 u32 abt[3];
101 u32 und[3];
102 } ____cacheline_aligned;
104 static struct stack stacks[NR_CPUS];
106 char elf_platform[ELF_PLATFORM_SIZE];
107 EXPORT_SYMBOL(elf_platform);
109 unsigned long phys_initrd_start __initdata = 0;
110 unsigned long phys_initrd_size __initdata = 0;
112 static struct meminfo meminfo __initdata = { 0, };
113 static const char *cpu_name;
114 static const char *machine_name;
115 static char __initdata command_line[COMMAND_LINE_SIZE];
117 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
118 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
119 #define ENDIANNESS ((char)endian_test.l)
121 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
124 * Standard memory resources
126 static struct resource mem_res[] = {
128 .name = "Video RAM",
129 .start = 0,
130 .end = 0,
131 .flags = IORESOURCE_MEM
134 .name = "Kernel text",
135 .start = 0,
136 .end = 0,
137 .flags = IORESOURCE_MEM
140 .name = "Kernel data",
141 .start = 0,
142 .end = 0,
143 .flags = IORESOURCE_MEM
147 #define video_ram mem_res[0]
148 #define kernel_code mem_res[1]
149 #define kernel_data mem_res[2]
151 static struct resource io_res[] = {
153 .name = "reserved",
154 .start = 0x3bc,
155 .end = 0x3be,
156 .flags = IORESOURCE_IO | IORESOURCE_BUSY
159 .name = "reserved",
160 .start = 0x378,
161 .end = 0x37f,
162 .flags = IORESOURCE_IO | IORESOURCE_BUSY
165 .name = "reserved",
166 .start = 0x278,
167 .end = 0x27f,
168 .flags = IORESOURCE_IO | IORESOURCE_BUSY
172 #define lp0 io_res[0]
173 #define lp1 io_res[1]
174 #define lp2 io_res[2]
176 static const char *cache_types[16] = {
177 "write-through",
178 "write-back",
179 "write-back",
180 "undefined 3",
181 "undefined 4",
182 "undefined 5",
183 "write-back",
184 "write-back",
185 "undefined 8",
186 "undefined 9",
187 "undefined 10",
188 "undefined 11",
189 "undefined 12",
190 "undefined 13",
191 "write-back",
192 "undefined 15",
195 static const char *cache_clean[16] = {
196 "not required",
197 "read-block",
198 "cp15 c7 ops",
199 "undefined 3",
200 "undefined 4",
201 "undefined 5",
202 "cp15 c7 ops",
203 "cp15 c7 ops",
204 "undefined 8",
205 "undefined 9",
206 "undefined 10",
207 "undefined 11",
208 "undefined 12",
209 "undefined 13",
210 "cp15 c7 ops",
211 "undefined 15",
214 static const char *cache_lockdown[16] = {
215 "not supported",
216 "not supported",
217 "not supported",
218 "undefined 3",
219 "undefined 4",
220 "undefined 5",
221 "format A",
222 "format B",
223 "undefined 8",
224 "undefined 9",
225 "undefined 10",
226 "undefined 11",
227 "undefined 12",
228 "undefined 13",
229 "format C",
230 "undefined 15",
233 static const char *proc_arch[] = {
234 "undefined/unknown",
235 "3",
236 "4",
237 "4T",
238 "5",
239 "5T",
240 "5TE",
241 "5TEJ",
242 "6TEJ",
243 "7",
244 "?(11)",
245 "?(12)",
246 "?(13)",
247 "?(14)",
248 "?(15)",
249 "?(16)",
250 "?(17)",
253 #define CACHE_TYPE(x) (((x) >> 25) & 15)
254 #define CACHE_S(x) ((x) & (1 << 24))
255 #define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
256 #define CACHE_ISIZE(x) ((x) & 4095)
258 #define CACHE_SIZE(y) (((y) >> 6) & 7)
259 #define CACHE_ASSOC(y) (((y) >> 3) & 7)
260 #define CACHE_M(y) ((y) & (1 << 2))
261 #define CACHE_LINE(y) ((y) & 3)
263 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
265 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
267 printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
268 cpu, prefix,
269 mult << (8 + CACHE_SIZE(cache)),
270 (mult << CACHE_ASSOC(cache)) >> 1,
271 8 << CACHE_LINE(cache),
272 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
273 CACHE_LINE(cache)));
276 static void __init dump_cpu_info(int cpu)
278 unsigned int info = read_cpuid(CPUID_CACHETYPE);
280 if (info != processor_id) {
281 printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
282 cache_types[CACHE_TYPE(info)]);
283 if (CACHE_S(info)) {
284 dump_cache("I cache", cpu, CACHE_ISIZE(info));
285 dump_cache("D cache", cpu, CACHE_DSIZE(info));
286 } else {
287 dump_cache("cache", cpu, CACHE_ISIZE(info));
291 if (arch_is_coherent())
292 printk("Cache coherency enabled\n");
295 int cpu_architecture(void)
297 int cpu_arch;
299 if ((processor_id & 0x0008f000) == 0) {
300 cpu_arch = CPU_ARCH_UNKNOWN;
301 } else if ((processor_id & 0x0008f000) == 0x00007000) {
302 cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
303 } else if ((processor_id & 0x00080000) == 0x00000000) {
304 cpu_arch = (processor_id >> 16) & 7;
305 if (cpu_arch)
306 cpu_arch += CPU_ARCH_ARMv3;
307 } else {
308 /* the revised CPUID */
309 cpu_arch = ((processor_id >> 12) & 0xf) - 0xb + CPU_ARCH_ARMv6;
312 return cpu_arch;
316 * These functions re-use the assembly code in head.S, which
317 * already provide the required functionality.
319 extern struct proc_info_list *lookup_processor_type(unsigned int);
320 extern struct machine_desc *lookup_machine_type(unsigned int);
322 static void __init setup_processor(void)
324 struct proc_info_list *list;
327 * locate processor in the list of supported processor
328 * types. The linker builds this table for us from the
329 * entries in arch/arm/mm/proc-*.S
331 list = lookup_processor_type(processor_id);
332 if (!list) {
333 printk("CPU configuration botched (ID %08x), unable "
334 "to continue.\n", processor_id);
335 while (1);
338 cpu_name = list->cpu_name;
340 #ifdef MULTI_CPU
341 processor = *list->proc;
342 #endif
343 #ifdef MULTI_TLB
344 cpu_tlb = *list->tlb;
345 #endif
346 #ifdef MULTI_USER
347 cpu_user = *list->user;
348 #endif
349 #ifdef MULTI_CACHE
350 cpu_cache = *list->cache;
351 #endif
353 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
354 cpu_name, processor_id, (int)processor_id & 15,
355 proc_arch[cpu_architecture()], cr_alignment);
357 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
358 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
359 elf_hwcap = list->elf_hwcap;
360 #ifndef CONFIG_ARM_THUMB
361 elf_hwcap &= ~HWCAP_THUMB;
362 #endif
364 cpu_proc_init();
368 * cpu_init - initialise one CPU.
370 * cpu_init dumps the cache information, initialises SMP specific
371 * information, and sets up the per-CPU stacks.
373 void cpu_init(void)
375 unsigned int cpu = smp_processor_id();
376 struct stack *stk = &stacks[cpu];
378 if (cpu >= NR_CPUS) {
379 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
380 BUG();
383 if (system_state == SYSTEM_BOOTING)
384 dump_cpu_info(cpu);
387 * setup stacks for re-entrant exception handlers
389 __asm__ (
390 "msr cpsr_c, %1\n\t"
391 "add sp, %0, %2\n\t"
392 "msr cpsr_c, %3\n\t"
393 "add sp, %0, %4\n\t"
394 "msr cpsr_c, %5\n\t"
395 "add sp, %0, %6\n\t"
396 "msr cpsr_c, %7"
398 : "r" (stk),
399 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
400 "I" (offsetof(struct stack, irq[0])),
401 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
402 "I" (offsetof(struct stack, abt[0])),
403 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
404 "I" (offsetof(struct stack, und[0])),
405 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
406 : "r14");
409 static struct machine_desc * __init setup_machine(unsigned int nr)
411 struct machine_desc *list;
414 * locate machine in the list of supported machines.
416 list = lookup_machine_type(nr);
417 if (!list) {
418 printk("Machine configuration botched (nr %d), unable "
419 "to continue.\n", nr);
420 while (1);
423 printk("Machine: %s\n", list->name);
425 return list;
428 static void __init early_initrd(char **p)
430 unsigned long start, size;
432 start = memparse(*p, p);
433 if (**p == ',') {
434 size = memparse((*p) + 1, p);
436 phys_initrd_start = start;
437 phys_initrd_size = size;
440 __early_param("initrd=", early_initrd);
442 static void __init arm_add_memory(unsigned long start, unsigned long size)
444 struct membank *bank;
447 * Ensure that start/size are aligned to a page boundary.
448 * Size is appropriately rounded down, start is rounded up.
450 size -= start & ~PAGE_MASK;
452 bank = &meminfo.bank[meminfo.nr_banks++];
454 bank->start = PAGE_ALIGN(start);
455 bank->size = size & PAGE_MASK;
456 bank->node = PHYS_TO_NID(start);
460 * Pick out the memory size. We look for mem=size@start,
461 * where start and size are "size[KkMm]"
463 static void __init early_mem(char **p)
465 static int usermem __initdata = 0;
466 unsigned long size, start;
469 * If the user specifies memory size, we
470 * blow away any automatically generated
471 * size.
473 if (usermem == 0) {
474 usermem = 1;
475 meminfo.nr_banks = 0;
478 start = PHYS_OFFSET;
479 size = memparse(*p, p);
480 if (**p == '@')
481 start = memparse(*p + 1, p);
483 arm_add_memory(start, size);
485 __early_param("mem=", early_mem);
488 * Initial parsing of the command line.
490 static void __init parse_cmdline(char **cmdline_p, char *from)
492 char c = ' ', *to = command_line;
493 int len = 0;
495 for (;;) {
496 if (c == ' ') {
497 extern struct early_params __early_begin, __early_end;
498 struct early_params *p;
500 for (p = &__early_begin; p < &__early_end; p++) {
501 int len = strlen(p->arg);
503 if (memcmp(from, p->arg, len) == 0) {
504 if (to != command_line)
505 to -= 1;
506 from += len;
507 p->fn(&from);
509 while (*from != ' ' && *from != '\0')
510 from++;
511 break;
515 c = *from++;
516 if (!c)
517 break;
518 if (COMMAND_LINE_SIZE <= ++len)
519 break;
520 *to++ = c;
522 *to = '\0';
523 *cmdline_p = command_line;
526 static void __init
527 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
529 #ifdef CONFIG_BLK_DEV_RAM
530 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
532 rd_image_start = image_start;
533 rd_prompt = prompt;
534 rd_doload = doload;
536 if (rd_sz)
537 rd_size = rd_sz;
538 #endif
541 static void __init
542 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
544 struct resource *res;
545 int i;
547 kernel_code.start = virt_to_phys(&_text);
548 kernel_code.end = virt_to_phys(&_etext - 1);
549 kernel_data.start = virt_to_phys(&__data_start);
550 kernel_data.end = virt_to_phys(&_end - 1);
552 for (i = 0; i < mi->nr_banks; i++) {
553 unsigned long virt_start, virt_end;
555 if (mi->bank[i].size == 0)
556 continue;
558 virt_start = __phys_to_virt(mi->bank[i].start);
559 virt_end = virt_start + mi->bank[i].size - 1;
561 res = alloc_bootmem_low(sizeof(*res));
562 res->name = "System RAM";
563 res->start = __virt_to_phys(virt_start);
564 res->end = __virt_to_phys(virt_end);
565 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
567 request_resource(&iomem_resource, res);
569 if (kernel_code.start >= res->start &&
570 kernel_code.end <= res->end)
571 request_resource(res, &kernel_code);
572 if (kernel_data.start >= res->start &&
573 kernel_data.end <= res->end)
574 request_resource(res, &kernel_data);
577 if (mdesc->video_start) {
578 video_ram.start = mdesc->video_start;
579 video_ram.end = mdesc->video_end;
580 request_resource(&iomem_resource, &video_ram);
584 * Some machines don't have the possibility of ever
585 * possessing lp0, lp1 or lp2
587 if (mdesc->reserve_lp0)
588 request_resource(&ioport_resource, &lp0);
589 if (mdesc->reserve_lp1)
590 request_resource(&ioport_resource, &lp1);
591 if (mdesc->reserve_lp2)
592 request_resource(&ioport_resource, &lp2);
596 * Tag parsing.
598 * This is the new way of passing data to the kernel at boot time. Rather
599 * than passing a fixed inflexible structure to the kernel, we pass a list
600 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
601 * tag for the list to be recognised (to distinguish the tagged list from
602 * a param_struct). The list is terminated with a zero-length tag (this tag
603 * is not parsed in any way).
605 static int __init parse_tag_core(const struct tag *tag)
607 if (tag->hdr.size > 2) {
608 if ((tag->u.core.flags & 1) == 0)
609 root_mountflags &= ~MS_RDONLY;
610 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
612 return 0;
615 __tagtable(ATAG_CORE, parse_tag_core);
617 static int __init parse_tag_mem32(const struct tag *tag)
619 if (meminfo.nr_banks >= NR_BANKS) {
620 printk(KERN_WARNING
621 "Ignoring memory bank 0x%08x size %dKB\n",
622 tag->u.mem.start, tag->u.mem.size / 1024);
623 return -EINVAL;
625 arm_add_memory(tag->u.mem.start, tag->u.mem.size);
626 return 0;
629 __tagtable(ATAG_MEM, parse_tag_mem32);
631 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
632 struct screen_info screen_info = {
633 .orig_video_lines = 30,
634 .orig_video_cols = 80,
635 .orig_video_mode = 0,
636 .orig_video_ega_bx = 0,
637 .orig_video_isVGA = 1,
638 .orig_video_points = 8
641 static int __init parse_tag_videotext(const struct tag *tag)
643 screen_info.orig_x = tag->u.videotext.x;
644 screen_info.orig_y = tag->u.videotext.y;
645 screen_info.orig_video_page = tag->u.videotext.video_page;
646 screen_info.orig_video_mode = tag->u.videotext.video_mode;
647 screen_info.orig_video_cols = tag->u.videotext.video_cols;
648 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
649 screen_info.orig_video_lines = tag->u.videotext.video_lines;
650 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
651 screen_info.orig_video_points = tag->u.videotext.video_points;
652 return 0;
655 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
656 #endif
658 static int __init parse_tag_ramdisk(const struct tag *tag)
660 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
661 (tag->u.ramdisk.flags & 2) == 0,
662 tag->u.ramdisk.start, tag->u.ramdisk.size);
663 return 0;
666 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
668 static int __init parse_tag_initrd(const struct tag *tag)
670 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
671 "please update your bootloader.\n");
672 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
673 phys_initrd_size = tag->u.initrd.size;
674 return 0;
677 __tagtable(ATAG_INITRD, parse_tag_initrd);
679 static int __init parse_tag_initrd2(const struct tag *tag)
681 phys_initrd_start = tag->u.initrd.start;
682 phys_initrd_size = tag->u.initrd.size;
683 return 0;
686 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
688 static int __init parse_tag_serialnr(const struct tag *tag)
690 system_serial_low = tag->u.serialnr.low;
691 system_serial_high = tag->u.serialnr.high;
692 return 0;
695 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
697 static int __init parse_tag_revision(const struct tag *tag)
699 system_rev = tag->u.revision.rev;
700 return 0;
703 __tagtable(ATAG_REVISION, parse_tag_revision);
705 static int __init parse_tag_cmdline(const struct tag *tag)
707 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
708 return 0;
711 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
714 * Scan the tag table for this tag, and call its parse function.
715 * The tag table is built by the linker from all the __tagtable
716 * declarations.
718 static int __init parse_tag(const struct tag *tag)
720 extern struct tagtable __tagtable_begin, __tagtable_end;
721 struct tagtable *t;
723 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
724 if (tag->hdr.tag == t->tag) {
725 t->parse(tag);
726 break;
729 return t < &__tagtable_end;
733 * Parse all tags in the list, checking both the global and architecture
734 * specific tag tables.
736 static void __init parse_tags(const struct tag *t)
738 for (; t->hdr.size; t = tag_next(t))
739 if (!parse_tag(t))
740 printk(KERN_WARNING
741 "Ignoring unrecognised tag 0x%08x\n",
742 t->hdr.tag);
746 * This holds our defaults.
748 static struct init_tags {
749 struct tag_header hdr1;
750 struct tag_core core;
751 struct tag_header hdr2;
752 struct tag_mem32 mem;
753 struct tag_header hdr3;
754 } init_tags __initdata = {
755 { tag_size(tag_core), ATAG_CORE },
756 { 1, PAGE_SIZE, 0xff },
757 { tag_size(tag_mem32), ATAG_MEM },
758 { MEM_SIZE, PHYS_OFFSET },
759 { 0, ATAG_NONE }
762 static void (*init_machine)(void) __initdata;
764 static int __init customize_machine(void)
766 /* customizes platform devices, or adds new ones */
767 if (init_machine)
768 init_machine();
769 return 0;
771 arch_initcall(customize_machine);
773 void __init setup_arch(char **cmdline_p)
775 struct tag *tags = (struct tag *)&init_tags;
776 struct machine_desc *mdesc;
777 char *from = default_command_line;
779 setup_processor();
780 mdesc = setup_machine(machine_arch_type);
781 machine_name = mdesc->name;
783 if (mdesc->soft_reboot)
784 reboot_setup("s");
786 if (__atags_pointer)
787 tags = phys_to_virt(__atags_pointer);
788 else if (mdesc->boot_params)
789 tags = phys_to_virt(mdesc->boot_params);
792 * If we have the old style parameters, convert them to
793 * a tag list.
795 if (tags->hdr.tag != ATAG_CORE)
796 convert_to_tag_list(tags);
797 if (tags->hdr.tag != ATAG_CORE)
798 tags = (struct tag *)&init_tags;
800 if (mdesc->fixup)
801 mdesc->fixup(mdesc, tags, &from, &meminfo);
803 if (tags->hdr.tag == ATAG_CORE) {
804 if (meminfo.nr_banks != 0)
805 squash_mem_tags(tags);
806 parse_tags(tags);
809 init_mm.start_code = (unsigned long) &_text;
810 init_mm.end_code = (unsigned long) &_etext;
811 init_mm.end_data = (unsigned long) &_edata;
812 init_mm.brk = (unsigned long) &_end;
814 memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
815 boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
816 parse_cmdline(cmdline_p, from);
817 paging_init(&meminfo, mdesc);
818 request_standard_resources(&meminfo, mdesc);
820 #ifdef CONFIG_SMP
821 smp_init_cpus();
822 #endif
824 cpu_init();
827 * Set up various architecture-specific pointers
829 init_arch_irq = mdesc->init_irq;
830 system_timer = mdesc->timer;
831 init_machine = mdesc->init_machine;
833 #ifdef CONFIG_VT
834 #if defined(CONFIG_VGA_CONSOLE)
835 conswitchp = &vga_con;
836 #elif defined(CONFIG_DUMMY_CONSOLE)
837 conswitchp = &dummy_con;
838 #endif
839 #endif
843 static int __init topology_init(void)
845 int cpu;
847 for_each_possible_cpu(cpu) {
848 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
849 cpuinfo->cpu.hotpluggable = 1;
850 register_cpu(&cpuinfo->cpu, cpu);
853 return 0;
856 subsys_initcall(topology_init);
858 static const char *hwcap_str[] = {
859 "swp",
860 "half",
861 "thumb",
862 "26bit",
863 "fastmult",
864 "fpa",
865 "vfp",
866 "edsp",
867 "java",
868 "iwmmxt",
869 "crunch",
870 NULL
873 static void
874 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
876 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
878 seq_printf(m, "%s size\t\t: %d\n"
879 "%s assoc\t\t: %d\n"
880 "%s line length\t: %d\n"
881 "%s sets\t\t: %d\n",
882 type, mult << (8 + CACHE_SIZE(cache)),
883 type, (mult << CACHE_ASSOC(cache)) >> 1,
884 type, 8 << CACHE_LINE(cache),
885 type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
886 CACHE_LINE(cache)));
889 static int c_show(struct seq_file *m, void *v)
891 int i;
893 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
894 cpu_name, (int)processor_id & 15, elf_platform);
896 #if defined(CONFIG_SMP)
897 for_each_online_cpu(i) {
899 * glibc reads /proc/cpuinfo to determine the number of
900 * online processors, looking for lines beginning with
901 * "processor". Give glibc what it expects.
903 seq_printf(m, "processor\t: %d\n", i);
904 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
905 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
906 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
908 #else /* CONFIG_SMP */
909 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
910 loops_per_jiffy / (500000/HZ),
911 (loops_per_jiffy / (5000/HZ)) % 100);
912 #endif
914 /* dump out the processor features */
915 seq_puts(m, "Features\t: ");
917 for (i = 0; hwcap_str[i]; i++)
918 if (elf_hwcap & (1 << i))
919 seq_printf(m, "%s ", hwcap_str[i]);
921 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
922 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
924 if ((processor_id & 0x0008f000) == 0x00000000) {
925 /* pre-ARM7 */
926 seq_printf(m, "CPU part\t: %07x\n", processor_id >> 4);
927 } else {
928 if ((processor_id & 0x0008f000) == 0x00007000) {
929 /* ARM7 */
930 seq_printf(m, "CPU variant\t: 0x%02x\n",
931 (processor_id >> 16) & 127);
932 } else {
933 /* post-ARM7 */
934 seq_printf(m, "CPU variant\t: 0x%x\n",
935 (processor_id >> 20) & 15);
937 seq_printf(m, "CPU part\t: 0x%03x\n",
938 (processor_id >> 4) & 0xfff);
940 seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
943 unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
944 if (cache_info != processor_id) {
945 seq_printf(m, "Cache type\t: %s\n"
946 "Cache clean\t: %s\n"
947 "Cache lockdown\t: %s\n"
948 "Cache format\t: %s\n",
949 cache_types[CACHE_TYPE(cache_info)],
950 cache_clean[CACHE_TYPE(cache_info)],
951 cache_lockdown[CACHE_TYPE(cache_info)],
952 CACHE_S(cache_info) ? "Harvard" : "Unified");
954 if (CACHE_S(cache_info)) {
955 c_show_cache(m, "I", CACHE_ISIZE(cache_info));
956 c_show_cache(m, "D", CACHE_DSIZE(cache_info));
957 } else {
958 c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
963 seq_puts(m, "\n");
965 seq_printf(m, "Hardware\t: %s\n", machine_name);
966 seq_printf(m, "Revision\t: %04x\n", system_rev);
967 seq_printf(m, "Serial\t\t: %08x%08x\n",
968 system_serial_high, system_serial_low);
970 return 0;
973 static void *c_start(struct seq_file *m, loff_t *pos)
975 return *pos < 1 ? (void *)1 : NULL;
978 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
980 ++*pos;
981 return NULL;
984 static void c_stop(struct seq_file *m, void *v)
988 struct seq_operations cpuinfo_op = {
989 .start = c_start,
990 .next = c_next,
991 .stop = c_stop,
992 .show = c_show