2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/crash_dump.h>
24 #include <linux/root_dev.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
32 #include <asm/unified.h>
34 #include <asm/cputype.h>
36 #include <asm/procinfo.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/smp_plat.h>
40 #include <asm/mach-types.h>
41 #include <asm/cacheflush.h>
42 #include <asm/cachetype.h>
43 #include <asm/tlbflush.h>
45 #include <asm/mach/arch.h>
46 #include <asm/mach/irq.h>
47 #include <asm/mach/time.h>
48 #include <asm/traps.h>
49 #include <asm/unwind.h>
51 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
58 #define MEM_SIZE (16*1024*1024)
61 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64 static int __init
fpe_setup(char *line
)
66 memcpy(fpe_type
, line
, 8);
70 __setup("fpe=", fpe_setup
);
73 extern void paging_init(struct machine_desc
*desc
);
74 extern void reboot_setup(char *str
);
76 unsigned int processor_id
;
77 EXPORT_SYMBOL(processor_id
);
78 unsigned int __machine_arch_type __read_mostly
;
79 EXPORT_SYMBOL(__machine_arch_type
);
80 unsigned int cacheid __read_mostly
;
81 EXPORT_SYMBOL(cacheid
);
83 unsigned int __atags_pointer __initdata
;
85 unsigned int system_rev
;
86 EXPORT_SYMBOL(system_rev
);
88 unsigned int system_serial_low
;
89 EXPORT_SYMBOL(system_serial_low
);
91 unsigned int system_serial_high
;
92 EXPORT_SYMBOL(system_serial_high
);
94 unsigned int elf_hwcap __read_mostly
;
95 EXPORT_SYMBOL(elf_hwcap
);
99 struct processor processor __read_mostly
;
102 struct cpu_tlb_fns cpu_tlb __read_mostly
;
105 struct cpu_user_fns cpu_user __read_mostly
;
108 struct cpu_cache_fns cpu_cache __read_mostly
;
110 #ifdef CONFIG_OUTER_CACHE
111 struct outer_cache_fns outer_cache __read_mostly
;
112 EXPORT_SYMBOL(outer_cache
);
119 } ____cacheline_aligned
;
121 static struct stack stacks
[NR_CPUS
];
123 char elf_platform
[ELF_PLATFORM_SIZE
];
124 EXPORT_SYMBOL(elf_platform
);
126 static const char *cpu_name
;
127 static const char *machine_name
;
128 static char __initdata cmd_line
[COMMAND_LINE_SIZE
];
129 struct machine_desc
*machine_desc __initdata
;
131 static char default_command_line
[COMMAND_LINE_SIZE
] __initdata
= CONFIG_CMDLINE
;
132 static union { char c
[4]; unsigned long l
; } endian_test __initdata
= { { 'l', '?', '?', 'b' } };
133 #define ENDIANNESS ((char)endian_test.l)
135 DEFINE_PER_CPU(struct cpuinfo_arm
, cpu_data
);
138 * Standard memory resources
140 static struct resource mem_res
[] = {
145 .flags
= IORESOURCE_MEM
148 .name
= "Kernel text",
151 .flags
= IORESOURCE_MEM
154 .name
= "Kernel data",
157 .flags
= IORESOURCE_MEM
161 #define video_ram mem_res[0]
162 #define kernel_code mem_res[1]
163 #define kernel_data mem_res[2]
165 static struct resource io_res
[] = {
170 .flags
= IORESOURCE_IO
| IORESOURCE_BUSY
176 .flags
= IORESOURCE_IO
| IORESOURCE_BUSY
182 .flags
= IORESOURCE_IO
| IORESOURCE_BUSY
186 #define lp0 io_res[0]
187 #define lp1 io_res[1]
188 #define lp2 io_res[2]
190 static const char *proc_arch
[] = {
210 int cpu_architecture(void)
214 if ((read_cpuid_id() & 0x0008f000) == 0) {
215 cpu_arch
= CPU_ARCH_UNKNOWN
;
216 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
217 cpu_arch
= (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T
: CPU_ARCH_ARMv3
;
218 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
219 cpu_arch
= (read_cpuid_id() >> 16) & 7;
221 cpu_arch
+= CPU_ARCH_ARMv3
;
222 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
225 /* Revised CPUID format. Read the Memory Model Feature
226 * Register 0 and check for VMSAv7 or PMSAv7 */
227 asm("mrc p15, 0, %0, c0, c1, 4"
229 if ((mmfr0
& 0x0000000f) >= 0x00000003 ||
230 (mmfr0
& 0x000000f0) >= 0x00000030)
231 cpu_arch
= CPU_ARCH_ARMv7
;
232 else if ((mmfr0
& 0x0000000f) == 0x00000002 ||
233 (mmfr0
& 0x000000f0) == 0x00000020)
234 cpu_arch
= CPU_ARCH_ARMv6
;
236 cpu_arch
= CPU_ARCH_UNKNOWN
;
238 cpu_arch
= CPU_ARCH_UNKNOWN
;
243 static int cpu_has_aliasing_icache(unsigned int arch
)
246 unsigned int id_reg
, num_sets
, line_size
;
248 /* arch specifies the register format */
251 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
252 : /* No output operands */
255 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
257 line_size
= 4 << ((id_reg
& 0x7) + 2);
258 num_sets
= ((id_reg
>> 13) & 0x7fff) + 1;
259 aliasing_icache
= (line_size
* num_sets
) > PAGE_SIZE
;
262 aliasing_icache
= read_cpuid_cachetype() & (1 << 11);
265 /* I-cache aliases will be handled by D-cache aliasing code */
269 return aliasing_icache
;
272 static void __init
cacheid_init(void)
274 unsigned int cachetype
= read_cpuid_cachetype();
275 unsigned int arch
= cpu_architecture();
277 if (arch
>= CPU_ARCH_ARMv6
) {
278 if ((cachetype
& (7 << 29)) == 4 << 29) {
279 /* ARMv7 register format */
280 cacheid
= CACHEID_VIPT_NONALIASING
;
281 if ((cachetype
& (3 << 14)) == 1 << 14)
282 cacheid
|= CACHEID_ASID_TAGGED
;
283 else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7
))
284 cacheid
|= CACHEID_VIPT_I_ALIASING
;
285 } else if (cachetype
& (1 << 23)) {
286 cacheid
= CACHEID_VIPT_ALIASING
;
288 cacheid
= CACHEID_VIPT_NONALIASING
;
289 if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6
))
290 cacheid
|= CACHEID_VIPT_I_ALIASING
;
293 cacheid
= CACHEID_VIVT
;
296 printk("CPU: %s data cache, %s instruction cache\n",
297 cache_is_vivt() ? "VIVT" :
298 cache_is_vipt_aliasing() ? "VIPT aliasing" :
299 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
300 cache_is_vivt() ? "VIVT" :
301 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
302 icache_is_vipt_aliasing() ? "VIPT aliasing" :
303 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
307 * These functions re-use the assembly code in head.S, which
308 * already provide the required functionality.
310 extern struct proc_info_list
*lookup_processor_type(unsigned int);
312 static void __init
early_print(const char *str
, ...)
314 extern void printascii(const char *);
319 vsnprintf(buf
, sizeof(buf
), str
, ap
);
322 #ifdef CONFIG_DEBUG_LL
328 static struct machine_desc
* __init
lookup_machine_type(unsigned int type
)
330 extern struct machine_desc __arch_info_begin
[], __arch_info_end
[];
331 struct machine_desc
*p
;
333 for (p
= __arch_info_begin
; p
< __arch_info_end
; p
++)
338 "Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n"
339 "Available machine support:\n\nID (hex)\tNAME\n", type
);
341 for (p
= __arch_info_begin
; p
< __arch_info_end
; p
++)
342 early_print("%08x\t%s\n", p
->nr
, p
->name
);
344 early_print("\nPlease check your kernel config and/or bootloader.\n");
347 /* can't use cpu_relax() here as it may require MMU setup */;
350 static void __init
feat_v6_fixup(void)
352 int id
= read_cpuid_id();
354 if ((id
& 0xff0f0000) != 0x41070000)
358 * HWCAP_TLS is available only on 1136 r1p0 and later,
359 * see also kuser_get_tls_init.
361 if ((((id
>> 4) & 0xfff) == 0xb36) && (((id
>> 20) & 3) == 0))
362 elf_hwcap
&= ~HWCAP_TLS
;
365 static void __init
setup_processor(void)
367 struct proc_info_list
*list
;
370 * locate processor in the list of supported processor
371 * types. The linker builds this table for us from the
372 * entries in arch/arm/mm/proc-*.S
374 list
= lookup_processor_type(read_cpuid_id());
376 printk("CPU configuration botched (ID %08x), unable "
377 "to continue.\n", read_cpuid_id());
381 cpu_name
= list
->cpu_name
;
384 processor
= *list
->proc
;
387 cpu_tlb
= *list
->tlb
;
390 cpu_user
= *list
->user
;
393 cpu_cache
= *list
->cache
;
396 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
397 cpu_name
, read_cpuid_id(), read_cpuid_id() & 15,
398 proc_arch
[cpu_architecture()], cr_alignment
);
400 sprintf(init_utsname()->machine
, "%s%c", list
->arch_name
, ENDIANNESS
);
401 sprintf(elf_platform
, "%s%c", list
->elf_name
, ENDIANNESS
);
402 elf_hwcap
= list
->elf_hwcap
;
403 #ifndef CONFIG_ARM_THUMB
404 elf_hwcap
&= ~HWCAP_THUMB
;
414 * cpu_init - initialise one CPU.
416 * cpu_init sets up the per-CPU stacks.
420 unsigned int cpu
= smp_processor_id();
421 struct stack
*stk
= &stacks
[cpu
];
423 if (cpu
>= NR_CPUS
) {
424 printk(KERN_CRIT
"CPU%u: bad primary CPU number\n", cpu
);
429 * Define the placement constraint for the inline asm directive below.
430 * In Thumb-2, msr with an immediate value is not allowed.
432 #ifdef CONFIG_THUMB2_KERNEL
439 * setup stacks for re-entrant exception handlers
443 "add r14, %0, %2\n\t"
446 "add r14, %0, %4\n\t"
449 "add r14, %0, %6\n\t"
454 PLC (PSR_F_BIT
| PSR_I_BIT
| IRQ_MODE
),
455 "I" (offsetof(struct stack
, irq
[0])),
456 PLC (PSR_F_BIT
| PSR_I_BIT
| ABT_MODE
),
457 "I" (offsetof(struct stack
, abt
[0])),
458 PLC (PSR_F_BIT
| PSR_I_BIT
| UND_MODE
),
459 "I" (offsetof(struct stack
, und
[0])),
460 PLC (PSR_F_BIT
| PSR_I_BIT
| SVC_MODE
)
464 static struct machine_desc
* __init
setup_machine(unsigned int nr
)
466 struct machine_desc
*list
;
469 * locate machine in the list of supported machines.
471 list
= lookup_machine_type(nr
);
473 printk("Machine configuration botched (nr %d), unable "
474 "to continue.\n", nr
);
478 printk("Machine: %s\n", list
->name
);
483 static int __init
arm_add_memory(unsigned long start
, unsigned long size
)
485 struct membank
*bank
= &meminfo
.bank
[meminfo
.nr_banks
];
487 if (meminfo
.nr_banks
>= NR_BANKS
) {
488 printk(KERN_CRIT
"NR_BANKS too low, "
489 "ignoring memory at %#lx\n", start
);
494 * Ensure that start/size are aligned to a page boundary.
495 * Size is appropriately rounded down, start is rounded up.
497 size
-= start
& ~PAGE_MASK
;
498 bank
->start
= PAGE_ALIGN(start
);
499 bank
->size
= size
& PAGE_MASK
;
502 * Check whether this memory region has non-zero size or
503 * invalid node number.
513 * Pick out the memory size. We look for mem=size@start,
514 * where start and size are "size[KkMm]"
516 static int __init
early_mem(char *p
)
518 static int usermem __initdata
= 0;
519 unsigned long size
, start
;
523 * If the user specifies memory size, we
524 * blow away any automatically generated
529 meminfo
.nr_banks
= 0;
533 size
= memparse(p
, &endp
);
535 start
= memparse(endp
+ 1, NULL
);
537 arm_add_memory(start
, size
);
541 early_param("mem", early_mem
);
544 setup_ramdisk(int doload
, int prompt
, int image_start
, unsigned int rd_sz
)
546 #ifdef CONFIG_BLK_DEV_RAM
547 extern int rd_size
, rd_image_start
, rd_prompt
, rd_doload
;
549 rd_image_start
= image_start
;
558 static void __init
request_standard_resources(struct machine_desc
*mdesc
)
560 struct memblock_region
*region
;
561 struct resource
*res
;
563 kernel_code
.start
= virt_to_phys(_text
);
564 kernel_code
.end
= virt_to_phys(_etext
- 1);
565 kernel_data
.start
= virt_to_phys(_sdata
);
566 kernel_data
.end
= virt_to_phys(_end
- 1);
568 for_each_memblock(memory
, region
) {
569 res
= alloc_bootmem_low(sizeof(*res
));
570 res
->name
= "System RAM";
571 res
->start
= __pfn_to_phys(memblock_region_memory_base_pfn(region
));
572 res
->end
= __pfn_to_phys(memblock_region_memory_end_pfn(region
)) - 1;
573 res
->flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
575 request_resource(&iomem_resource
, res
);
577 if (kernel_code
.start
>= res
->start
&&
578 kernel_code
.end
<= res
->end
)
579 request_resource(res
, &kernel_code
);
580 if (kernel_data
.start
>= res
->start
&&
581 kernel_data
.end
<= res
->end
)
582 request_resource(res
, &kernel_data
);
585 if (mdesc
->video_start
) {
586 video_ram
.start
= mdesc
->video_start
;
587 video_ram
.end
= mdesc
->video_end
;
588 request_resource(&iomem_resource
, &video_ram
);
592 * Some machines don't have the possibility of ever
593 * possessing lp0, lp1 or lp2
595 if (mdesc
->reserve_lp0
)
596 request_resource(&ioport_resource
, &lp0
);
597 if (mdesc
->reserve_lp1
)
598 request_resource(&ioport_resource
, &lp1
);
599 if (mdesc
->reserve_lp2
)
600 request_resource(&ioport_resource
, &lp2
);
606 * This is the new way of passing data to the kernel at boot time. Rather
607 * than passing a fixed inflexible structure to the kernel, we pass a list
608 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
609 * tag for the list to be recognised (to distinguish the tagged list from
610 * a param_struct). The list is terminated with a zero-length tag (this tag
611 * is not parsed in any way).
613 static int __init
parse_tag_core(const struct tag
*tag
)
615 if (tag
->hdr
.size
> 2) {
616 if ((tag
->u
.core
.flags
& 1) == 0)
617 root_mountflags
&= ~MS_RDONLY
;
618 ROOT_DEV
= old_decode_dev(tag
->u
.core
.rootdev
);
623 __tagtable(ATAG_CORE
, parse_tag_core
);
625 static int __init
parse_tag_mem32(const struct tag
*tag
)
627 return arm_add_memory(tag
->u
.mem
.start
, tag
->u
.mem
.size
);
630 __tagtable(ATAG_MEM
, parse_tag_mem32
);
632 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
633 struct screen_info screen_info
= {
634 .orig_video_lines
= 30,
635 .orig_video_cols
= 80,
636 .orig_video_mode
= 0,
637 .orig_video_ega_bx
= 0,
638 .orig_video_isVGA
= 1,
639 .orig_video_points
= 8
642 static int __init
parse_tag_videotext(const struct tag
*tag
)
644 screen_info
.orig_x
= tag
->u
.videotext
.x
;
645 screen_info
.orig_y
= tag
->u
.videotext
.y
;
646 screen_info
.orig_video_page
= tag
->u
.videotext
.video_page
;
647 screen_info
.orig_video_mode
= tag
->u
.videotext
.video_mode
;
648 screen_info
.orig_video_cols
= tag
->u
.videotext
.video_cols
;
649 screen_info
.orig_video_ega_bx
= tag
->u
.videotext
.video_ega_bx
;
650 screen_info
.orig_video_lines
= tag
->u
.videotext
.video_lines
;
651 screen_info
.orig_video_isVGA
= tag
->u
.videotext
.video_isvga
;
652 screen_info
.orig_video_points
= tag
->u
.videotext
.video_points
;
656 __tagtable(ATAG_VIDEOTEXT
, parse_tag_videotext
);
659 static int __init
parse_tag_ramdisk(const struct tag
*tag
)
661 setup_ramdisk((tag
->u
.ramdisk
.flags
& 1) == 0,
662 (tag
->u
.ramdisk
.flags
& 2) == 0,
663 tag
->u
.ramdisk
.start
, tag
->u
.ramdisk
.size
);
667 __tagtable(ATAG_RAMDISK
, parse_tag_ramdisk
);
669 static int __init
parse_tag_serialnr(const struct tag
*tag
)
671 system_serial_low
= tag
->u
.serialnr
.low
;
672 system_serial_high
= tag
->u
.serialnr
.high
;
676 __tagtable(ATAG_SERIAL
, parse_tag_serialnr
);
678 static int __init
parse_tag_revision(const struct tag
*tag
)
680 system_rev
= tag
->u
.revision
.rev
;
684 __tagtable(ATAG_REVISION
, parse_tag_revision
);
686 static int __init
parse_tag_cmdline(const struct tag
*tag
)
688 #ifndef CONFIG_CMDLINE_FORCE
689 strlcpy(default_command_line
, tag
->u
.cmdline
.cmdline
, COMMAND_LINE_SIZE
);
691 pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
692 #endif /* CONFIG_CMDLINE_FORCE */
696 __tagtable(ATAG_CMDLINE
, parse_tag_cmdline
);
699 * Scan the tag table for this tag, and call its parse function.
700 * The tag table is built by the linker from all the __tagtable
703 static int __init
parse_tag(const struct tag
*tag
)
705 extern struct tagtable __tagtable_begin
, __tagtable_end
;
708 for (t
= &__tagtable_begin
; t
< &__tagtable_end
; t
++)
709 if (tag
->hdr
.tag
== t
->tag
) {
714 return t
< &__tagtable_end
;
718 * Parse all tags in the list, checking both the global and architecture
719 * specific tag tables.
721 static void __init
parse_tags(const struct tag
*t
)
723 for (; t
->hdr
.size
; t
= tag_next(t
))
726 "Ignoring unrecognised tag 0x%08x\n",
731 * This holds our defaults.
733 static struct init_tags
{
734 struct tag_header hdr1
;
735 struct tag_core core
;
736 struct tag_header hdr2
;
737 struct tag_mem32 mem
;
738 struct tag_header hdr3
;
739 } init_tags __initdata
= {
740 { tag_size(tag_core
), ATAG_CORE
},
741 { 1, PAGE_SIZE
, 0xff },
742 { tag_size(tag_mem32
), ATAG_MEM
},
743 { MEM_SIZE
, PHYS_OFFSET
},
747 static int __init
customize_machine(void)
749 /* customizes platform devices, or adds new ones */
750 if (machine_desc
->init_machine
)
751 machine_desc
->init_machine();
754 arch_initcall(customize_machine
);
757 static inline unsigned long long get_total_mem(void)
761 total
= max_low_pfn
- min_low_pfn
;
762 return total
<< PAGE_SHIFT
;
766 * reserve_crashkernel() - reserves memory are for crash kernel
768 * This function reserves memory area given in "crashkernel=" kernel command
769 * line parameter. The memory reserved is used by a dump capture kernel when
770 * primary kernel is crashing.
772 static void __init
reserve_crashkernel(void)
774 unsigned long long crash_size
, crash_base
;
775 unsigned long long total_mem
;
778 total_mem
= get_total_mem();
779 ret
= parse_crashkernel(boot_command_line
, total_mem
,
780 &crash_size
, &crash_base
);
784 ret
= reserve_bootmem(crash_base
, crash_size
, BOOTMEM_EXCLUSIVE
);
786 printk(KERN_WARNING
"crashkernel reservation failed - "
787 "memory is in use (0x%lx)\n", (unsigned long)crash_base
);
791 printk(KERN_INFO
"Reserving %ldMB of memory at %ldMB "
792 "for crashkernel (System RAM: %ldMB)\n",
793 (unsigned long)(crash_size
>> 20),
794 (unsigned long)(crash_base
>> 20),
795 (unsigned long)(total_mem
>> 20));
797 crashk_res
.start
= crash_base
;
798 crashk_res
.end
= crash_base
+ crash_size
- 1;
799 insert_resource(&iomem_resource
, &crashk_res
);
802 static inline void reserve_crashkernel(void) {}
803 #endif /* CONFIG_KEXEC */
806 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
807 * is_kdump_kernel() to determine if we are booting after a panic. Hence
808 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
811 #ifdef CONFIG_CRASH_DUMP
813 * elfcorehdr= specifies the location of elf core header stored by the crashed
814 * kernel. This option will be passed by kexec loader to the capture kernel.
816 static int __init
setup_elfcorehdr(char *arg
)
823 elfcorehdr_addr
= memparse(arg
, &end
);
824 return end
> arg
? 0 : -EINVAL
;
826 early_param("elfcorehdr", setup_elfcorehdr
);
827 #endif /* CONFIG_CRASH_DUMP */
829 static void __init
squash_mem_tags(struct tag
*tag
)
831 for (; tag
->hdr
.size
; tag
= tag_next(tag
))
832 if (tag
->hdr
.tag
== ATAG_MEM
)
833 tag
->hdr
.tag
= ATAG_NONE
;
836 void __init
setup_arch(char **cmdline_p
)
838 struct tag
*tags
= (struct tag
*)&init_tags
;
839 struct machine_desc
*mdesc
;
840 char *from
= default_command_line
;
845 mdesc
= setup_machine(machine_arch_type
);
846 machine_desc
= mdesc
;
847 machine_name
= mdesc
->name
;
849 if (mdesc
->soft_reboot
)
853 tags
= phys_to_virt(__atags_pointer
);
854 else if (mdesc
->boot_params
)
855 tags
= phys_to_virt(mdesc
->boot_params
);
857 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
859 * If we have the old style parameters, convert them to
862 if (tags
->hdr
.tag
!= ATAG_CORE
)
863 convert_to_tag_list(tags
);
865 if (tags
->hdr
.tag
!= ATAG_CORE
)
866 tags
= (struct tag
*)&init_tags
;
869 mdesc
->fixup(mdesc
, tags
, &from
, &meminfo
);
871 if (tags
->hdr
.tag
== ATAG_CORE
) {
872 if (meminfo
.nr_banks
!= 0)
873 squash_mem_tags(tags
);
878 init_mm
.start_code
= (unsigned long) _text
;
879 init_mm
.end_code
= (unsigned long) _etext
;
880 init_mm
.end_data
= (unsigned long) _edata
;
881 init_mm
.brk
= (unsigned long) _end
;
883 /* parse_early_param needs a boot_command_line */
884 strlcpy(boot_command_line
, from
, COMMAND_LINE_SIZE
);
886 /* populate cmd_line too for later use, preserving boot_command_line */
887 strlcpy(cmd_line
, boot_command_line
, COMMAND_LINE_SIZE
);
888 *cmdline_p
= cmd_line
;
892 arm_memblock_init(&meminfo
, mdesc
);
895 request_standard_resources(mdesc
);
901 reserve_crashkernel();
906 #ifdef CONFIG_MULTI_IRQ_HANDLER
907 handle_arch_irq
= mdesc
->handle_irq
;
911 #if defined(CONFIG_VGA_CONSOLE)
912 conswitchp
= &vga_con
;
913 #elif defined(CONFIG_DUMMY_CONSOLE)
914 conswitchp
= &dummy_con
;
919 if (mdesc
->init_early
)
924 static int __init
topology_init(void)
928 for_each_possible_cpu(cpu
) {
929 struct cpuinfo_arm
*cpuinfo
= &per_cpu(cpu_data
, cpu
);
930 cpuinfo
->cpu
.hotpluggable
= 1;
931 register_cpu(&cpuinfo
->cpu
, cpu
);
936 subsys_initcall(topology_init
);
938 #ifdef CONFIG_HAVE_PROC_CPU
939 static int __init
proc_cpu_init(void)
941 struct proc_dir_entry
*res
;
943 res
= proc_mkdir("cpu", NULL
);
948 fs_initcall(proc_cpu_init
);
951 static const char *hwcap_str
[] = {
970 static int c_show(struct seq_file
*m
, void *v
)
974 seq_printf(m
, "Processor\t: %s rev %d (%s)\n",
975 cpu_name
, read_cpuid_id() & 15, elf_platform
);
977 #if defined(CONFIG_SMP)
978 for_each_online_cpu(i
) {
980 * glibc reads /proc/cpuinfo to determine the number of
981 * online processors, looking for lines beginning with
982 * "processor". Give glibc what it expects.
984 seq_printf(m
, "processor\t: %d\n", i
);
985 seq_printf(m
, "BogoMIPS\t: %lu.%02lu\n\n",
986 per_cpu(cpu_data
, i
).loops_per_jiffy
/ (500000UL/HZ
),
987 (per_cpu(cpu_data
, i
).loops_per_jiffy
/ (5000UL/HZ
)) % 100);
989 #else /* CONFIG_SMP */
990 seq_printf(m
, "BogoMIPS\t: %lu.%02lu\n",
991 loops_per_jiffy
/ (500000/HZ
),
992 (loops_per_jiffy
/ (5000/HZ
)) % 100);
995 /* dump out the processor features */
996 seq_puts(m
, "Features\t: ");
998 for (i
= 0; hwcap_str
[i
]; i
++)
999 if (elf_hwcap
& (1 << i
))
1000 seq_printf(m
, "%s ", hwcap_str
[i
]);
1002 seq_printf(m
, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1003 seq_printf(m
, "CPU architecture: %s\n", proc_arch
[cpu_architecture()]);
1005 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1007 seq_printf(m
, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1009 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1011 seq_printf(m
, "CPU variant\t: 0x%02x\n",
1012 (read_cpuid_id() >> 16) & 127);
1015 seq_printf(m
, "CPU variant\t: 0x%x\n",
1016 (read_cpuid_id() >> 20) & 15);
1018 seq_printf(m
, "CPU part\t: 0x%03x\n",
1019 (read_cpuid_id() >> 4) & 0xfff);
1021 seq_printf(m
, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1025 seq_printf(m
, "Hardware\t: %s\n", machine_name
);
1026 seq_printf(m
, "Revision\t: %04x\n", system_rev
);
1027 seq_printf(m
, "Serial\t\t: %08x%08x\n",
1028 system_serial_high
, system_serial_low
);
1033 static void *c_start(struct seq_file
*m
, loff_t
*pos
)
1035 return *pos
< 1 ? (void *)1 : NULL
;
1038 static void *c_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1044 static void c_stop(struct seq_file
*m
, void *v
)
1048 const struct seq_operations cpuinfo_op
= {