generic: add __FINITDATA
[wrt350n-kernel.git] / arch / arm / kernel / setup.c
blobd3941a7b0455cf632cc3ae066ade8fbad7124f2c
1 /*
2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/fs.h>
28 #include <asm/cpu.h>
29 #include <asm/elf.h>
30 #include <asm/procinfo.h>
31 #include <asm/setup.h>
32 #include <asm/mach-types.h>
33 #include <asm/cacheflush.h>
34 #include <asm/tlbflush.h>
36 #include <asm/mach/arch.h>
37 #include <asm/mach/irq.h>
38 #include <asm/mach/time.h>
40 #include "compat.h"
41 #include "atags.h"
43 #ifndef MEM_SIZE
44 #define MEM_SIZE (16*1024*1024)
45 #endif
47 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
48 char fpe_type[8];
50 static int __init fpe_setup(char *line)
52 memcpy(fpe_type, line, 8);
53 return 1;
56 __setup("fpe=", fpe_setup);
57 #endif
59 extern void paging_init(struct meminfo *, struct machine_desc *desc);
60 extern void reboot_setup(char *str);
61 extern int root_mountflags;
62 extern void _stext, _text, _etext, __data_start, _edata, _end;
64 unsigned int processor_id;
65 EXPORT_SYMBOL(processor_id);
66 unsigned int __machine_arch_type;
67 EXPORT_SYMBOL(__machine_arch_type);
69 unsigned int __atags_pointer __initdata;
71 unsigned int system_rev;
72 EXPORT_SYMBOL(system_rev);
74 unsigned int system_serial_low;
75 EXPORT_SYMBOL(system_serial_low);
77 unsigned int system_serial_high;
78 EXPORT_SYMBOL(system_serial_high);
80 unsigned int elf_hwcap;
81 EXPORT_SYMBOL(elf_hwcap);
84 #ifdef MULTI_CPU
85 struct processor processor;
86 #endif
87 #ifdef MULTI_TLB
88 struct cpu_tlb_fns cpu_tlb;
89 #endif
90 #ifdef MULTI_USER
91 struct cpu_user_fns cpu_user;
92 #endif
93 #ifdef MULTI_CACHE
94 struct cpu_cache_fns cpu_cache;
95 #endif
96 #ifdef CONFIG_OUTER_CACHE
97 struct outer_cache_fns outer_cache;
98 #endif
100 struct stack {
101 u32 irq[3];
102 u32 abt[3];
103 u32 und[3];
104 } ____cacheline_aligned;
106 static struct stack stacks[NR_CPUS];
108 char elf_platform[ELF_PLATFORM_SIZE];
109 EXPORT_SYMBOL(elf_platform);
111 unsigned long phys_initrd_start __initdata = 0;
112 unsigned long phys_initrd_size __initdata = 0;
114 static struct meminfo meminfo __initdata = { 0, };
115 static const char *cpu_name;
116 static const char *machine_name;
117 static char __initdata command_line[COMMAND_LINE_SIZE];
119 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
120 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
121 #define ENDIANNESS ((char)endian_test.l)
123 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
126 * Standard memory resources
128 static struct resource mem_res[] = {
130 .name = "Video RAM",
131 .start = 0,
132 .end = 0,
133 .flags = IORESOURCE_MEM
136 .name = "Kernel text",
137 .start = 0,
138 .end = 0,
139 .flags = IORESOURCE_MEM
142 .name = "Kernel data",
143 .start = 0,
144 .end = 0,
145 .flags = IORESOURCE_MEM
149 #define video_ram mem_res[0]
150 #define kernel_code mem_res[1]
151 #define kernel_data mem_res[2]
153 static struct resource io_res[] = {
155 .name = "reserved",
156 .start = 0x3bc,
157 .end = 0x3be,
158 .flags = IORESOURCE_IO | IORESOURCE_BUSY
161 .name = "reserved",
162 .start = 0x378,
163 .end = 0x37f,
164 .flags = IORESOURCE_IO | IORESOURCE_BUSY
167 .name = "reserved",
168 .start = 0x278,
169 .end = 0x27f,
170 .flags = IORESOURCE_IO | IORESOURCE_BUSY
174 #define lp0 io_res[0]
175 #define lp1 io_res[1]
176 #define lp2 io_res[2]
178 static const char *cache_types[16] = {
179 "write-through",
180 "write-back",
181 "write-back",
182 "undefined 3",
183 "undefined 4",
184 "undefined 5",
185 "write-back",
186 "write-back",
187 "undefined 8",
188 "undefined 9",
189 "undefined 10",
190 "undefined 11",
191 "undefined 12",
192 "undefined 13",
193 "write-back",
194 "undefined 15",
197 static const char *cache_clean[16] = {
198 "not required",
199 "read-block",
200 "cp15 c7 ops",
201 "undefined 3",
202 "undefined 4",
203 "undefined 5",
204 "cp15 c7 ops",
205 "cp15 c7 ops",
206 "undefined 8",
207 "undefined 9",
208 "undefined 10",
209 "undefined 11",
210 "undefined 12",
211 "undefined 13",
212 "cp15 c7 ops",
213 "undefined 15",
216 static const char *cache_lockdown[16] = {
217 "not supported",
218 "not supported",
219 "not supported",
220 "undefined 3",
221 "undefined 4",
222 "undefined 5",
223 "format A",
224 "format B",
225 "undefined 8",
226 "undefined 9",
227 "undefined 10",
228 "undefined 11",
229 "undefined 12",
230 "undefined 13",
231 "format C",
232 "undefined 15",
235 static const char *proc_arch[] = {
236 "undefined/unknown",
237 "3",
238 "4",
239 "4T",
240 "5",
241 "5T",
242 "5TE",
243 "5TEJ",
244 "6TEJ",
245 "7",
246 "?(11)",
247 "?(12)",
248 "?(13)",
249 "?(14)",
250 "?(15)",
251 "?(16)",
252 "?(17)",
255 #define CACHE_TYPE(x) (((x) >> 25) & 15)
256 #define CACHE_S(x) ((x) & (1 << 24))
257 #define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
258 #define CACHE_ISIZE(x) ((x) & 4095)
260 #define CACHE_SIZE(y) (((y) >> 6) & 7)
261 #define CACHE_ASSOC(y) (((y) >> 3) & 7)
262 #define CACHE_M(y) ((y) & (1 << 2))
263 #define CACHE_LINE(y) ((y) & 3)
265 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
267 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
269 printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
270 cpu, prefix,
271 mult << (8 + CACHE_SIZE(cache)),
272 (mult << CACHE_ASSOC(cache)) >> 1,
273 8 << CACHE_LINE(cache),
274 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
275 CACHE_LINE(cache)));
278 static void __init dump_cpu_info(int cpu)
280 unsigned int info = read_cpuid(CPUID_CACHETYPE);
282 if (info != processor_id) {
283 printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
284 cache_types[CACHE_TYPE(info)]);
285 if (CACHE_S(info)) {
286 dump_cache("I cache", cpu, CACHE_ISIZE(info));
287 dump_cache("D cache", cpu, CACHE_DSIZE(info));
288 } else {
289 dump_cache("cache", cpu, CACHE_ISIZE(info));
293 if (arch_is_coherent())
294 printk("Cache coherency enabled\n");
297 int cpu_architecture(void)
299 int cpu_arch;
301 if ((processor_id & 0x0008f000) == 0) {
302 cpu_arch = CPU_ARCH_UNKNOWN;
303 } else if ((processor_id & 0x0008f000) == 0x00007000) {
304 cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
305 } else if ((processor_id & 0x00080000) == 0x00000000) {
306 cpu_arch = (processor_id >> 16) & 7;
307 if (cpu_arch)
308 cpu_arch += CPU_ARCH_ARMv3;
309 } else if ((processor_id & 0x000f0000) == 0x000f0000) {
310 unsigned int mmfr0;
312 /* Revised CPUID format. Read the Memory Model Feature
313 * Register 0 and check for VMSAv7 or PMSAv7 */
314 asm("mrc p15, 0, %0, c0, c1, 4"
315 : "=r" (mmfr0));
316 if ((mmfr0 & 0x0000000f) == 0x00000003 ||
317 (mmfr0 & 0x000000f0) == 0x00000030)
318 cpu_arch = CPU_ARCH_ARMv7;
319 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
320 (mmfr0 & 0x000000f0) == 0x00000020)
321 cpu_arch = CPU_ARCH_ARMv6;
322 else
323 cpu_arch = CPU_ARCH_UNKNOWN;
324 } else
325 cpu_arch = CPU_ARCH_UNKNOWN;
327 return cpu_arch;
331 * These functions re-use the assembly code in head.S, which
332 * already provide the required functionality.
334 extern struct proc_info_list *lookup_processor_type(unsigned int);
335 extern struct machine_desc *lookup_machine_type(unsigned int);
337 static void __init setup_processor(void)
339 struct proc_info_list *list;
342 * locate processor in the list of supported processor
343 * types. The linker builds this table for us from the
344 * entries in arch/arm/mm/proc-*.S
346 list = lookup_processor_type(processor_id);
347 if (!list) {
348 printk("CPU configuration botched (ID %08x), unable "
349 "to continue.\n", processor_id);
350 while (1);
353 cpu_name = list->cpu_name;
355 #ifdef MULTI_CPU
356 processor = *list->proc;
357 #endif
358 #ifdef MULTI_TLB
359 cpu_tlb = *list->tlb;
360 #endif
361 #ifdef MULTI_USER
362 cpu_user = *list->user;
363 #endif
364 #ifdef MULTI_CACHE
365 cpu_cache = *list->cache;
366 #endif
368 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
369 cpu_name, processor_id, (int)processor_id & 15,
370 proc_arch[cpu_architecture()], cr_alignment);
372 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
373 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
374 elf_hwcap = list->elf_hwcap;
375 #ifndef CONFIG_ARM_THUMB
376 elf_hwcap &= ~HWCAP_THUMB;
377 #endif
379 cpu_proc_init();
383 * cpu_init - initialise one CPU.
385 * cpu_init dumps the cache information, initialises SMP specific
386 * information, and sets up the per-CPU stacks.
388 void cpu_init(void)
390 unsigned int cpu = smp_processor_id();
391 struct stack *stk = &stacks[cpu];
393 if (cpu >= NR_CPUS) {
394 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
395 BUG();
398 if (system_state == SYSTEM_BOOTING)
399 dump_cpu_info(cpu);
402 * setup stacks for re-entrant exception handlers
404 __asm__ (
405 "msr cpsr_c, %1\n\t"
406 "add sp, %0, %2\n\t"
407 "msr cpsr_c, %3\n\t"
408 "add sp, %0, %4\n\t"
409 "msr cpsr_c, %5\n\t"
410 "add sp, %0, %6\n\t"
411 "msr cpsr_c, %7"
413 : "r" (stk),
414 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
415 "I" (offsetof(struct stack, irq[0])),
416 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
417 "I" (offsetof(struct stack, abt[0])),
418 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
419 "I" (offsetof(struct stack, und[0])),
420 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
421 : "r14");
424 static struct machine_desc * __init setup_machine(unsigned int nr)
426 struct machine_desc *list;
429 * locate machine in the list of supported machines.
431 list = lookup_machine_type(nr);
432 if (!list) {
433 printk("Machine configuration botched (nr %d), unable "
434 "to continue.\n", nr);
435 while (1);
438 printk("Machine: %s\n", list->name);
440 return list;
443 static void __init early_initrd(char **p)
445 unsigned long start, size;
447 start = memparse(*p, p);
448 if (**p == ',') {
449 size = memparse((*p) + 1, p);
451 phys_initrd_start = start;
452 phys_initrd_size = size;
455 __early_param("initrd=", early_initrd);
457 static void __init arm_add_memory(unsigned long start, unsigned long size)
459 struct membank *bank;
462 * Ensure that start/size are aligned to a page boundary.
463 * Size is appropriately rounded down, start is rounded up.
465 size -= start & ~PAGE_MASK;
467 bank = &meminfo.bank[meminfo.nr_banks++];
469 bank->start = PAGE_ALIGN(start);
470 bank->size = size & PAGE_MASK;
471 bank->node = PHYS_TO_NID(start);
475 * Pick out the memory size. We look for mem=size@start,
476 * where start and size are "size[KkMm]"
478 static void __init early_mem(char **p)
480 static int usermem __initdata = 0;
481 unsigned long size, start;
484 * If the user specifies memory size, we
485 * blow away any automatically generated
486 * size.
488 if (usermem == 0) {
489 usermem = 1;
490 meminfo.nr_banks = 0;
493 start = PHYS_OFFSET;
494 size = memparse(*p, p);
495 if (**p == '@')
496 start = memparse(*p + 1, p);
498 arm_add_memory(start, size);
500 __early_param("mem=", early_mem);
503 * Initial parsing of the command line.
505 static void __init parse_cmdline(char **cmdline_p, char *from)
507 char c = ' ', *to = command_line;
508 int len = 0;
510 for (;;) {
511 if (c == ' ') {
512 extern struct early_params __early_begin, __early_end;
513 struct early_params *p;
515 for (p = &__early_begin; p < &__early_end; p++) {
516 int len = strlen(p->arg);
518 if (memcmp(from, p->arg, len) == 0) {
519 if (to != command_line)
520 to -= 1;
521 from += len;
522 p->fn(&from);
524 while (*from != ' ' && *from != '\0')
525 from++;
526 break;
530 c = *from++;
531 if (!c)
532 break;
533 if (COMMAND_LINE_SIZE <= ++len)
534 break;
535 *to++ = c;
537 *to = '\0';
538 *cmdline_p = command_line;
541 static void __init
542 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
544 #ifdef CONFIG_BLK_DEV_RAM
545 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
547 rd_image_start = image_start;
548 rd_prompt = prompt;
549 rd_doload = doload;
551 if (rd_sz)
552 rd_size = rd_sz;
553 #endif
556 static void __init
557 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
559 struct resource *res;
560 int i;
562 kernel_code.start = virt_to_phys(&_text);
563 kernel_code.end = virt_to_phys(&_etext - 1);
564 kernel_data.start = virt_to_phys(&__data_start);
565 kernel_data.end = virt_to_phys(&_end - 1);
567 for (i = 0; i < mi->nr_banks; i++) {
568 unsigned long virt_start, virt_end;
570 if (mi->bank[i].size == 0)
571 continue;
573 virt_start = __phys_to_virt(mi->bank[i].start);
574 virt_end = virt_start + mi->bank[i].size - 1;
576 res = alloc_bootmem_low(sizeof(*res));
577 res->name = "System RAM";
578 res->start = __virt_to_phys(virt_start);
579 res->end = __virt_to_phys(virt_end);
580 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
582 request_resource(&iomem_resource, res);
584 if (kernel_code.start >= res->start &&
585 kernel_code.end <= res->end)
586 request_resource(res, &kernel_code);
587 if (kernel_data.start >= res->start &&
588 kernel_data.end <= res->end)
589 request_resource(res, &kernel_data);
592 if (mdesc->video_start) {
593 video_ram.start = mdesc->video_start;
594 video_ram.end = mdesc->video_end;
595 request_resource(&iomem_resource, &video_ram);
599 * Some machines don't have the possibility of ever
600 * possessing lp0, lp1 or lp2
602 if (mdesc->reserve_lp0)
603 request_resource(&ioport_resource, &lp0);
604 if (mdesc->reserve_lp1)
605 request_resource(&ioport_resource, &lp1);
606 if (mdesc->reserve_lp2)
607 request_resource(&ioport_resource, &lp2);
611 * Tag parsing.
613 * This is the new way of passing data to the kernel at boot time. Rather
614 * than passing a fixed inflexible structure to the kernel, we pass a list
615 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
616 * tag for the list to be recognised (to distinguish the tagged list from
617 * a param_struct). The list is terminated with a zero-length tag (this tag
618 * is not parsed in any way).
620 static int __init parse_tag_core(const struct tag *tag)
622 if (tag->hdr.size > 2) {
623 if ((tag->u.core.flags & 1) == 0)
624 root_mountflags &= ~MS_RDONLY;
625 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
627 return 0;
630 __tagtable(ATAG_CORE, parse_tag_core);
632 static int __init parse_tag_mem32(const struct tag *tag)
634 if (meminfo.nr_banks >= NR_BANKS) {
635 printk(KERN_WARNING
636 "Ignoring memory bank 0x%08x size %dKB\n",
637 tag->u.mem.start, tag->u.mem.size / 1024);
638 return -EINVAL;
640 arm_add_memory(tag->u.mem.start, tag->u.mem.size);
641 return 0;
644 __tagtable(ATAG_MEM, parse_tag_mem32);
646 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
647 struct screen_info screen_info = {
648 .orig_video_lines = 30,
649 .orig_video_cols = 80,
650 .orig_video_mode = 0,
651 .orig_video_ega_bx = 0,
652 .orig_video_isVGA = 1,
653 .orig_video_points = 8
656 static int __init parse_tag_videotext(const struct tag *tag)
658 screen_info.orig_x = tag->u.videotext.x;
659 screen_info.orig_y = tag->u.videotext.y;
660 screen_info.orig_video_page = tag->u.videotext.video_page;
661 screen_info.orig_video_mode = tag->u.videotext.video_mode;
662 screen_info.orig_video_cols = tag->u.videotext.video_cols;
663 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
664 screen_info.orig_video_lines = tag->u.videotext.video_lines;
665 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
666 screen_info.orig_video_points = tag->u.videotext.video_points;
667 return 0;
670 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
671 #endif
673 static int __init parse_tag_ramdisk(const struct tag *tag)
675 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
676 (tag->u.ramdisk.flags & 2) == 0,
677 tag->u.ramdisk.start, tag->u.ramdisk.size);
678 return 0;
681 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
683 static int __init parse_tag_initrd(const struct tag *tag)
685 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
686 "please update your bootloader.\n");
687 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
688 phys_initrd_size = tag->u.initrd.size;
689 return 0;
692 __tagtable(ATAG_INITRD, parse_tag_initrd);
694 static int __init parse_tag_initrd2(const struct tag *tag)
696 phys_initrd_start = tag->u.initrd.start;
697 phys_initrd_size = tag->u.initrd.size;
698 return 0;
701 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
703 static int __init parse_tag_serialnr(const struct tag *tag)
705 system_serial_low = tag->u.serialnr.low;
706 system_serial_high = tag->u.serialnr.high;
707 return 0;
710 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
712 static int __init parse_tag_revision(const struct tag *tag)
714 system_rev = tag->u.revision.rev;
715 return 0;
718 __tagtable(ATAG_REVISION, parse_tag_revision);
720 static int __init parse_tag_cmdline(const struct tag *tag)
722 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
723 return 0;
726 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
729 * Scan the tag table for this tag, and call its parse function.
730 * The tag table is built by the linker from all the __tagtable
731 * declarations.
733 static int __init parse_tag(const struct tag *tag)
735 extern struct tagtable __tagtable_begin, __tagtable_end;
736 struct tagtable *t;
738 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
739 if (tag->hdr.tag == t->tag) {
740 t->parse(tag);
741 break;
744 return t < &__tagtable_end;
748 * Parse all tags in the list, checking both the global and architecture
749 * specific tag tables.
751 static void __init parse_tags(const struct tag *t)
753 for (; t->hdr.size; t = tag_next(t))
754 if (!parse_tag(t))
755 printk(KERN_WARNING
756 "Ignoring unrecognised tag 0x%08x\n",
757 t->hdr.tag);
761 * This holds our defaults.
763 static struct init_tags {
764 struct tag_header hdr1;
765 struct tag_core core;
766 struct tag_header hdr2;
767 struct tag_mem32 mem;
768 struct tag_header hdr3;
769 } init_tags __initdata = {
770 { tag_size(tag_core), ATAG_CORE },
771 { 1, PAGE_SIZE, 0xff },
772 { tag_size(tag_mem32), ATAG_MEM },
773 { MEM_SIZE, PHYS_OFFSET },
774 { 0, ATAG_NONE }
777 static void (*init_machine)(void) __initdata;
779 static int __init customize_machine(void)
781 /* customizes platform devices, or adds new ones */
782 if (init_machine)
783 init_machine();
784 return 0;
786 arch_initcall(customize_machine);
788 void __init setup_arch(char **cmdline_p)
790 struct tag *tags = (struct tag *)&init_tags;
791 struct machine_desc *mdesc;
792 char *from = default_command_line;
794 setup_processor();
795 mdesc = setup_machine(machine_arch_type);
796 machine_name = mdesc->name;
798 if (mdesc->soft_reboot)
799 reboot_setup("s");
801 if (__atags_pointer)
802 tags = phys_to_virt(__atags_pointer);
803 else if (mdesc->boot_params)
804 tags = phys_to_virt(mdesc->boot_params);
807 * If we have the old style parameters, convert them to
808 * a tag list.
810 if (tags->hdr.tag != ATAG_CORE)
811 convert_to_tag_list(tags);
812 if (tags->hdr.tag != ATAG_CORE)
813 tags = (struct tag *)&init_tags;
815 if (mdesc->fixup)
816 mdesc->fixup(mdesc, tags, &from, &meminfo);
818 if (tags->hdr.tag == ATAG_CORE) {
819 if (meminfo.nr_banks != 0)
820 squash_mem_tags(tags);
821 save_atags(tags);
822 parse_tags(tags);
825 init_mm.start_code = (unsigned long) &_text;
826 init_mm.end_code = (unsigned long) &_etext;
827 init_mm.end_data = (unsigned long) &_edata;
828 init_mm.brk = (unsigned long) &_end;
830 memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
831 boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
832 parse_cmdline(cmdline_p, from);
833 paging_init(&meminfo, mdesc);
834 request_standard_resources(&meminfo, mdesc);
836 #ifdef CONFIG_SMP
837 smp_init_cpus();
838 #endif
840 cpu_init();
843 * Set up various architecture-specific pointers
845 init_arch_irq = mdesc->init_irq;
846 system_timer = mdesc->timer;
847 init_machine = mdesc->init_machine;
849 #ifdef CONFIG_VT
850 #if defined(CONFIG_VGA_CONSOLE)
851 conswitchp = &vga_con;
852 #elif defined(CONFIG_DUMMY_CONSOLE)
853 conswitchp = &dummy_con;
854 #endif
855 #endif
859 static int __init topology_init(void)
861 int cpu;
863 for_each_possible_cpu(cpu) {
864 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
865 cpuinfo->cpu.hotpluggable = 1;
866 register_cpu(&cpuinfo->cpu, cpu);
869 return 0;
872 subsys_initcall(topology_init);
874 static const char *hwcap_str[] = {
875 "swp",
876 "half",
877 "thumb",
878 "26bit",
879 "fastmult",
880 "fpa",
881 "vfp",
882 "edsp",
883 "java",
884 "iwmmxt",
885 "crunch",
886 NULL
889 static void
890 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
892 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
894 seq_printf(m, "%s size\t\t: %d\n"
895 "%s assoc\t\t: %d\n"
896 "%s line length\t: %d\n"
897 "%s sets\t\t: %d\n",
898 type, mult << (8 + CACHE_SIZE(cache)),
899 type, (mult << CACHE_ASSOC(cache)) >> 1,
900 type, 8 << CACHE_LINE(cache),
901 type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
902 CACHE_LINE(cache)));
905 static int c_show(struct seq_file *m, void *v)
907 int i;
909 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
910 cpu_name, (int)processor_id & 15, elf_platform);
912 #if defined(CONFIG_SMP)
913 for_each_online_cpu(i) {
915 * glibc reads /proc/cpuinfo to determine the number of
916 * online processors, looking for lines beginning with
917 * "processor". Give glibc what it expects.
919 seq_printf(m, "processor\t: %d\n", i);
920 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
921 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
922 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
924 #else /* CONFIG_SMP */
925 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
926 loops_per_jiffy / (500000/HZ),
927 (loops_per_jiffy / (5000/HZ)) % 100);
928 #endif
930 /* dump out the processor features */
931 seq_puts(m, "Features\t: ");
933 for (i = 0; hwcap_str[i]; i++)
934 if (elf_hwcap & (1 << i))
935 seq_printf(m, "%s ", hwcap_str[i]);
937 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
938 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
940 if ((processor_id & 0x0008f000) == 0x00000000) {
941 /* pre-ARM7 */
942 seq_printf(m, "CPU part\t: %07x\n", processor_id >> 4);
943 } else {
944 if ((processor_id & 0x0008f000) == 0x00007000) {
945 /* ARM7 */
946 seq_printf(m, "CPU variant\t: 0x%02x\n",
947 (processor_id >> 16) & 127);
948 } else {
949 /* post-ARM7 */
950 seq_printf(m, "CPU variant\t: 0x%x\n",
951 (processor_id >> 20) & 15);
953 seq_printf(m, "CPU part\t: 0x%03x\n",
954 (processor_id >> 4) & 0xfff);
956 seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
959 unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
960 if (cache_info != processor_id) {
961 seq_printf(m, "Cache type\t: %s\n"
962 "Cache clean\t: %s\n"
963 "Cache lockdown\t: %s\n"
964 "Cache format\t: %s\n",
965 cache_types[CACHE_TYPE(cache_info)],
966 cache_clean[CACHE_TYPE(cache_info)],
967 cache_lockdown[CACHE_TYPE(cache_info)],
968 CACHE_S(cache_info) ? "Harvard" : "Unified");
970 if (CACHE_S(cache_info)) {
971 c_show_cache(m, "I", CACHE_ISIZE(cache_info));
972 c_show_cache(m, "D", CACHE_DSIZE(cache_info));
973 } else {
974 c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
979 seq_puts(m, "\n");
981 seq_printf(m, "Hardware\t: %s\n", machine_name);
982 seq_printf(m, "Revision\t: %04x\n", system_rev);
983 seq_printf(m, "Serial\t\t: %08x%08x\n",
984 system_serial_high, system_serial_low);
986 return 0;
989 static void *c_start(struct seq_file *m, loff_t *pos)
991 return *pos < 1 ? (void *)1 : NULL;
994 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
996 ++*pos;
997 return NULL;
1000 static void c_stop(struct seq_file *m, void *v)
1004 struct seq_operations cpuinfo_op = {
1005 .start = c_start,
1006 .next = c_next,
1007 .stop = c_stop,
1008 .show = c_show