x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub
[linux/fpc-iii.git] / arch / arm / kernel / setup.c
blob2a767d262c17fbacbdeeef6088abcc62e32661de
1 /*
2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_platform.h>
22 #include <linux/init.h>
23 #include <linux/kexec.h>
24 #include <linux/of_fdt.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memblock.h>
30 #include <linux/bug.h>
31 #include <linux/compiler.h>
32 #include <linux/sort.h>
34 #include <asm/unified.h>
35 #include <asm/cp15.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/psci.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <asm/smp_plat.h>
44 #include <asm/mach-types.h>
45 #include <asm/cacheflush.h>
46 #include <asm/cachetype.h>
47 #include <asm/tlbflush.h>
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/system_info.h>
54 #include <asm/system_misc.h>
55 #include <asm/traps.h>
56 #include <asm/unwind.h>
57 #include <asm/memblock.h>
58 #include <asm/virt.h>
60 #include "atags.h"
63 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64 char fpe_type[8];
66 static int __init fpe_setup(char *line)
68 memcpy(fpe_type, line, 8);
69 return 1;
72 __setup("fpe=", fpe_setup);
73 #endif
75 extern void paging_init(const struct machine_desc *desc);
76 extern void sanity_check_meminfo(void);
77 extern enum reboot_mode reboot_mode;
78 extern void setup_dma_zone(const struct machine_desc *desc);
80 unsigned int processor_id;
81 EXPORT_SYMBOL(processor_id);
82 unsigned int __machine_arch_type __read_mostly;
83 EXPORT_SYMBOL(__machine_arch_type);
84 unsigned int cacheid __read_mostly;
85 EXPORT_SYMBOL(cacheid);
87 unsigned int __atags_pointer __initdata;
89 unsigned int system_rev;
90 EXPORT_SYMBOL(system_rev);
92 unsigned int system_serial_low;
93 EXPORT_SYMBOL(system_serial_low);
95 unsigned int system_serial_high;
96 EXPORT_SYMBOL(system_serial_high);
98 unsigned int elf_hwcap __read_mostly;
99 EXPORT_SYMBOL(elf_hwcap);
102 #ifdef MULTI_CPU
103 struct processor processor __read_mostly;
104 #endif
105 #ifdef MULTI_TLB
106 struct cpu_tlb_fns cpu_tlb __read_mostly;
107 #endif
108 #ifdef MULTI_USER
109 struct cpu_user_fns cpu_user __read_mostly;
110 #endif
111 #ifdef MULTI_CACHE
112 struct cpu_cache_fns cpu_cache __read_mostly;
113 #endif
114 #ifdef CONFIG_OUTER_CACHE
115 struct outer_cache_fns outer_cache __read_mostly;
116 EXPORT_SYMBOL(outer_cache);
117 #endif
120 * Cached cpu_architecture() result for use by assembler code.
121 * C code should use the cpu_architecture() function instead of accessing this
122 * variable directly.
124 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
126 struct stack {
127 u32 irq[3];
128 u32 abt[3];
129 u32 und[3];
130 } ____cacheline_aligned;
132 #ifndef CONFIG_CPU_V7M
133 static struct stack stacks[NR_CPUS];
134 #endif
136 char elf_platform[ELF_PLATFORM_SIZE];
137 EXPORT_SYMBOL(elf_platform);
139 static const char *cpu_name;
140 static const char *machine_name;
141 static char __initdata cmd_line[COMMAND_LINE_SIZE];
142 const struct machine_desc *machine_desc __initdata;
144 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
145 #define ENDIANNESS ((char)endian_test.l)
147 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
150 * Standard memory resources
152 static struct resource mem_res[] = {
154 .name = "Video RAM",
155 .start = 0,
156 .end = 0,
157 .flags = IORESOURCE_MEM
160 .name = "Kernel code",
161 .start = 0,
162 .end = 0,
163 .flags = IORESOURCE_MEM
166 .name = "Kernel data",
167 .start = 0,
168 .end = 0,
169 .flags = IORESOURCE_MEM
173 #define video_ram mem_res[0]
174 #define kernel_code mem_res[1]
175 #define kernel_data mem_res[2]
177 static struct resource io_res[] = {
179 .name = "reserved",
180 .start = 0x3bc,
181 .end = 0x3be,
182 .flags = IORESOURCE_IO | IORESOURCE_BUSY
185 .name = "reserved",
186 .start = 0x378,
187 .end = 0x37f,
188 .flags = IORESOURCE_IO | IORESOURCE_BUSY
191 .name = "reserved",
192 .start = 0x278,
193 .end = 0x27f,
194 .flags = IORESOURCE_IO | IORESOURCE_BUSY
198 #define lp0 io_res[0]
199 #define lp1 io_res[1]
200 #define lp2 io_res[2]
202 static const char *proc_arch[] = {
203 "undefined/unknown",
204 "3",
205 "4",
206 "4T",
207 "5",
208 "5T",
209 "5TE",
210 "5TEJ",
211 "6TEJ",
212 "7",
213 "7M",
214 "?(12)",
215 "?(13)",
216 "?(14)",
217 "?(15)",
218 "?(16)",
219 "?(17)",
222 #ifdef CONFIG_CPU_V7M
223 static int __get_cpu_architecture(void)
225 return CPU_ARCH_ARMv7M;
227 #else
228 static int __get_cpu_architecture(void)
230 int cpu_arch;
232 if ((read_cpuid_id() & 0x0008f000) == 0) {
233 cpu_arch = CPU_ARCH_UNKNOWN;
234 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
235 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
236 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
237 cpu_arch = (read_cpuid_id() >> 16) & 7;
238 if (cpu_arch)
239 cpu_arch += CPU_ARCH_ARMv3;
240 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
241 unsigned int mmfr0;
243 /* Revised CPUID format. Read the Memory Model Feature
244 * Register 0 and check for VMSAv7 or PMSAv7 */
245 asm("mrc p15, 0, %0, c0, c1, 4"
246 : "=r" (mmfr0));
247 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
248 (mmfr0 & 0x000000f0) >= 0x00000030)
249 cpu_arch = CPU_ARCH_ARMv7;
250 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
251 (mmfr0 & 0x000000f0) == 0x00000020)
252 cpu_arch = CPU_ARCH_ARMv6;
253 else
254 cpu_arch = CPU_ARCH_UNKNOWN;
255 } else
256 cpu_arch = CPU_ARCH_UNKNOWN;
258 return cpu_arch;
260 #endif
262 int __pure cpu_architecture(void)
264 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
266 return __cpu_architecture;
269 static int cpu_has_aliasing_icache(unsigned int arch)
271 int aliasing_icache;
272 unsigned int id_reg, num_sets, line_size;
274 /* PIPT caches never alias. */
275 if (icache_is_pipt())
276 return 0;
278 /* arch specifies the register format */
279 switch (arch) {
280 case CPU_ARCH_ARMv7:
281 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
282 : /* No output operands */
283 : "r" (1));
284 isb();
285 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
286 : "=r" (id_reg));
287 line_size = 4 << ((id_reg & 0x7) + 2);
288 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
289 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
290 break;
291 case CPU_ARCH_ARMv6:
292 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
293 break;
294 default:
295 /* I-cache aliases will be handled by D-cache aliasing code */
296 aliasing_icache = 0;
299 return aliasing_icache;
302 static void __init cacheid_init(void)
304 unsigned int arch = cpu_architecture();
306 if (arch == CPU_ARCH_ARMv7M) {
307 cacheid = 0;
308 } else if (arch >= CPU_ARCH_ARMv6) {
309 unsigned int cachetype = read_cpuid_cachetype();
310 if ((cachetype & (7 << 29)) == 4 << 29) {
311 /* ARMv7 register format */
312 arch = CPU_ARCH_ARMv7;
313 cacheid = CACHEID_VIPT_NONALIASING;
314 switch (cachetype & (3 << 14)) {
315 case (1 << 14):
316 cacheid |= CACHEID_ASID_TAGGED;
317 break;
318 case (3 << 14):
319 cacheid |= CACHEID_PIPT;
320 break;
322 } else {
323 arch = CPU_ARCH_ARMv6;
324 if (cachetype & (1 << 23))
325 cacheid = CACHEID_VIPT_ALIASING;
326 else
327 cacheid = CACHEID_VIPT_NONALIASING;
329 if (cpu_has_aliasing_icache(arch))
330 cacheid |= CACHEID_VIPT_I_ALIASING;
331 } else {
332 cacheid = CACHEID_VIVT;
335 printk("CPU: %s data cache, %s instruction cache\n",
336 cache_is_vivt() ? "VIVT" :
337 cache_is_vipt_aliasing() ? "VIPT aliasing" :
338 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
339 cache_is_vivt() ? "VIVT" :
340 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
341 icache_is_vipt_aliasing() ? "VIPT aliasing" :
342 icache_is_pipt() ? "PIPT" :
343 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
347 * These functions re-use the assembly code in head.S, which
348 * already provide the required functionality.
350 extern struct proc_info_list *lookup_processor_type(unsigned int);
352 void __init early_print(const char *str, ...)
354 extern void printascii(const char *);
355 char buf[256];
356 va_list ap;
358 va_start(ap, str);
359 vsnprintf(buf, sizeof(buf), str, ap);
360 va_end(ap);
362 #ifdef CONFIG_DEBUG_LL
363 printascii(buf);
364 #endif
365 printk("%s", buf);
368 static void __init cpuid_init_hwcaps(void)
370 unsigned int divide_instrs, vmsa;
372 if (cpu_architecture() < CPU_ARCH_ARMv7)
373 return;
375 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
377 switch (divide_instrs) {
378 case 2:
379 elf_hwcap |= HWCAP_IDIVA;
380 case 1:
381 elf_hwcap |= HWCAP_IDIVT;
384 /* LPAE implies atomic ldrd/strd instructions */
385 vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
386 if (vmsa >= 5)
387 elf_hwcap |= HWCAP_LPAE;
390 static void __init feat_v6_fixup(void)
392 int id = read_cpuid_id();
394 if ((id & 0xff0f0000) != 0x41070000)
395 return;
398 * HWCAP_TLS is available only on 1136 r1p0 and later,
399 * see also kuser_get_tls_init.
401 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
402 elf_hwcap &= ~HWCAP_TLS;
406 * cpu_init - initialise one CPU.
408 * cpu_init sets up the per-CPU stacks.
410 void notrace cpu_init(void)
412 #ifndef CONFIG_CPU_V7M
413 unsigned int cpu = smp_processor_id();
414 struct stack *stk = &stacks[cpu];
416 if (cpu >= NR_CPUS) {
417 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
418 BUG();
422 * This only works on resume and secondary cores. For booting on the
423 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
425 set_my_cpu_offset(per_cpu_offset(cpu));
427 cpu_proc_init();
430 * Define the placement constraint for the inline asm directive below.
431 * In Thumb-2, msr with an immediate value is not allowed.
433 #ifdef CONFIG_THUMB2_KERNEL
434 #define PLC "r"
435 #else
436 #define PLC "I"
437 #endif
440 * setup stacks for re-entrant exception handlers
442 __asm__ (
443 "msr cpsr_c, %1\n\t"
444 "add r14, %0, %2\n\t"
445 "mov sp, r14\n\t"
446 "msr cpsr_c, %3\n\t"
447 "add r14, %0, %4\n\t"
448 "mov sp, r14\n\t"
449 "msr cpsr_c, %5\n\t"
450 "add r14, %0, %6\n\t"
451 "mov sp, r14\n\t"
452 "msr cpsr_c, %7"
454 : "r" (stk),
455 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
456 "I" (offsetof(struct stack, irq[0])),
457 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
458 "I" (offsetof(struct stack, abt[0])),
459 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
460 "I" (offsetof(struct stack, und[0])),
461 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
462 : "r14");
463 #endif
466 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
468 void __init smp_setup_processor_id(void)
470 int i;
471 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
472 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
474 cpu_logical_map(0) = cpu;
475 for (i = 1; i < nr_cpu_ids; ++i)
476 cpu_logical_map(i) = i == cpu ? 0 : i;
479 * clear __my_cpu_offset on boot CPU to avoid hang caused by
480 * using percpu variable early, for example, lockdep will
481 * access percpu variable inside lock_release
483 set_my_cpu_offset(0);
485 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
488 struct mpidr_hash mpidr_hash;
489 #ifdef CONFIG_SMP
491 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
492 * level in order to build a linear index from an
493 * MPIDR value. Resulting algorithm is a collision
494 * free hash carried out through shifting and ORing
496 static void __init smp_build_mpidr_hash(void)
498 u32 i, affinity;
499 u32 fs[3], bits[3], ls, mask = 0;
501 * Pre-scan the list of MPIDRS and filter out bits that do
502 * not contribute to affinity levels, ie they never toggle.
504 for_each_possible_cpu(i)
505 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
506 pr_debug("mask of set bits 0x%x\n", mask);
508 * Find and stash the last and first bit set at all affinity levels to
509 * check how many bits are required to represent them.
511 for (i = 0; i < 3; i++) {
512 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
514 * Find the MSB bit and LSB bits position
515 * to determine how many bits are required
516 * to express the affinity level.
518 ls = fls(affinity);
519 fs[i] = affinity ? ffs(affinity) - 1 : 0;
520 bits[i] = ls - fs[i];
523 * An index can be created from the MPIDR by isolating the
524 * significant bits at each affinity level and by shifting
525 * them in order to compress the 24 bits values space to a
526 * compressed set of values. This is equivalent to hashing
527 * the MPIDR through shifting and ORing. It is a collision free
528 * hash though not minimal since some levels might contain a number
529 * of CPUs that is not an exact power of 2 and their bit
530 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
532 mpidr_hash.shift_aff[0] = fs[0];
533 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
534 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
535 (bits[1] + bits[0]);
536 mpidr_hash.mask = mask;
537 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
538 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
539 mpidr_hash.shift_aff[0],
540 mpidr_hash.shift_aff[1],
541 mpidr_hash.shift_aff[2],
542 mpidr_hash.mask,
543 mpidr_hash.bits);
545 * 4x is an arbitrary value used to warn on a hash table much bigger
546 * than expected on most systems.
548 if (mpidr_hash_size() > 4 * num_possible_cpus())
549 pr_warn("Large number of MPIDR hash buckets detected\n");
550 sync_cache_w(&mpidr_hash);
552 #endif
554 static void __init setup_processor(void)
556 struct proc_info_list *list;
559 * locate processor in the list of supported processor
560 * types. The linker builds this table for us from the
561 * entries in arch/arm/mm/proc-*.S
563 list = lookup_processor_type(read_cpuid_id());
564 if (!list) {
565 printk("CPU configuration botched (ID %08x), unable "
566 "to continue.\n", read_cpuid_id());
567 while (1);
570 cpu_name = list->cpu_name;
571 __cpu_architecture = __get_cpu_architecture();
573 #ifdef MULTI_CPU
574 processor = *list->proc;
575 #endif
576 #ifdef MULTI_TLB
577 cpu_tlb = *list->tlb;
578 #endif
579 #ifdef MULTI_USER
580 cpu_user = *list->user;
581 #endif
582 #ifdef MULTI_CACHE
583 cpu_cache = *list->cache;
584 #endif
586 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
587 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
588 proc_arch[cpu_architecture()], cr_alignment);
590 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
591 list->arch_name, ENDIANNESS);
592 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
593 list->elf_name, ENDIANNESS);
594 elf_hwcap = list->elf_hwcap;
596 cpuid_init_hwcaps();
598 #ifndef CONFIG_ARM_THUMB
599 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
600 #endif
602 feat_v6_fixup();
604 cacheid_init();
605 cpu_init();
608 void __init dump_machine_table(void)
610 const struct machine_desc *p;
612 early_print("Available machine support:\n\nID (hex)\tNAME\n");
613 for_each_machine_desc(p)
614 early_print("%08x\t%s\n", p->nr, p->name);
616 early_print("\nPlease check your kernel config and/or bootloader.\n");
618 while (true)
619 /* can't use cpu_relax() here as it may require MMU setup */;
622 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
624 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
625 u64 aligned_start;
627 if (meminfo.nr_banks >= NR_BANKS) {
628 printk(KERN_CRIT "NR_BANKS too low, "
629 "ignoring memory at 0x%08llx\n", (long long)start);
630 return -EINVAL;
634 * Ensure that start/size are aligned to a page boundary.
635 * Size is appropriately rounded down, start is rounded up.
637 size -= start & ~PAGE_MASK;
638 aligned_start = PAGE_ALIGN(start);
640 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
641 if (aligned_start > ULONG_MAX) {
642 printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
643 "32-bit physical address space\n", (long long)start);
644 return -EINVAL;
647 if (aligned_start + size > ULONG_MAX) {
648 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
649 "32-bit physical address space\n", (long long)start);
651 * To ensure bank->start + bank->size is representable in
652 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
653 * This means we lose a page after masking.
655 size = ULONG_MAX - aligned_start;
657 #endif
659 if (aligned_start < PHYS_OFFSET) {
660 if (aligned_start + size <= PHYS_OFFSET) {
661 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
662 aligned_start, aligned_start + size);
663 return -EINVAL;
666 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
667 aligned_start, (u64)PHYS_OFFSET);
669 size -= PHYS_OFFSET - aligned_start;
670 aligned_start = PHYS_OFFSET;
673 bank->start = aligned_start;
674 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
677 * Check whether this memory region has non-zero size or
678 * invalid node number.
680 if (bank->size == 0)
681 return -EINVAL;
683 meminfo.nr_banks++;
684 return 0;
688 * Pick out the memory size. We look for mem=size@start,
689 * where start and size are "size[KkMm]"
691 static int __init early_mem(char *p)
693 static int usermem __initdata = 0;
694 phys_addr_t size;
695 phys_addr_t start;
696 char *endp;
699 * If the user specifies memory size, we
700 * blow away any automatically generated
701 * size.
703 if (usermem == 0) {
704 usermem = 1;
705 meminfo.nr_banks = 0;
708 start = PHYS_OFFSET;
709 size = memparse(p, &endp);
710 if (*endp == '@')
711 start = memparse(endp + 1, NULL);
713 arm_add_memory(start, size);
715 return 0;
717 early_param("mem", early_mem);
719 static void __init request_standard_resources(const struct machine_desc *mdesc)
721 struct memblock_region *region;
722 struct resource *res;
724 kernel_code.start = virt_to_phys(_text);
725 kernel_code.end = virt_to_phys(_etext - 1);
726 kernel_data.start = virt_to_phys(_sdata);
727 kernel_data.end = virt_to_phys(_end - 1);
729 for_each_memblock(memory, region) {
730 res = alloc_bootmem_low(sizeof(*res));
731 res->name = "System RAM";
732 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
733 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
734 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
736 request_resource(&iomem_resource, res);
738 if (kernel_code.start >= res->start &&
739 kernel_code.end <= res->end)
740 request_resource(res, &kernel_code);
741 if (kernel_data.start >= res->start &&
742 kernel_data.end <= res->end)
743 request_resource(res, &kernel_data);
746 if (mdesc->video_start) {
747 video_ram.start = mdesc->video_start;
748 video_ram.end = mdesc->video_end;
749 request_resource(&iomem_resource, &video_ram);
753 * Some machines don't have the possibility of ever
754 * possessing lp0, lp1 or lp2
756 if (mdesc->reserve_lp0)
757 request_resource(&ioport_resource, &lp0);
758 if (mdesc->reserve_lp1)
759 request_resource(&ioport_resource, &lp1);
760 if (mdesc->reserve_lp2)
761 request_resource(&ioport_resource, &lp2);
764 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
765 struct screen_info screen_info = {
766 .orig_video_lines = 30,
767 .orig_video_cols = 80,
768 .orig_video_mode = 0,
769 .orig_video_ega_bx = 0,
770 .orig_video_isVGA = 1,
771 .orig_video_points = 8
773 #endif
775 static int __init customize_machine(void)
778 * customizes platform devices, or adds new ones
779 * On DT based machines, we fall back to populating the
780 * machine from the device tree, if no callback is provided,
781 * otherwise we would always need an init_machine callback.
783 if (machine_desc->init_machine)
784 machine_desc->init_machine();
785 #ifdef CONFIG_OF
786 else
787 of_platform_populate(NULL, of_default_bus_match_table,
788 NULL, NULL);
789 #endif
790 return 0;
792 arch_initcall(customize_machine);
794 static int __init init_machine_late(void)
796 if (machine_desc->init_late)
797 machine_desc->init_late();
798 return 0;
800 late_initcall(init_machine_late);
802 #ifdef CONFIG_KEXEC
803 static inline unsigned long long get_total_mem(void)
805 unsigned long total;
807 total = max_low_pfn - min_low_pfn;
808 return total << PAGE_SHIFT;
812 * reserve_crashkernel() - reserves memory are for crash kernel
814 * This function reserves memory area given in "crashkernel=" kernel command
815 * line parameter. The memory reserved is used by a dump capture kernel when
816 * primary kernel is crashing.
818 static void __init reserve_crashkernel(void)
820 unsigned long long crash_size, crash_base;
821 unsigned long long total_mem;
822 int ret;
824 total_mem = get_total_mem();
825 ret = parse_crashkernel(boot_command_line, total_mem,
826 &crash_size, &crash_base);
827 if (ret)
828 return;
830 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
831 if (ret < 0) {
832 printk(KERN_WARNING "crashkernel reservation failed - "
833 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
834 return;
837 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
838 "for crashkernel (System RAM: %ldMB)\n",
839 (unsigned long)(crash_size >> 20),
840 (unsigned long)(crash_base >> 20),
841 (unsigned long)(total_mem >> 20));
843 crashk_res.start = crash_base;
844 crashk_res.end = crash_base + crash_size - 1;
845 insert_resource(&iomem_resource, &crashk_res);
847 #else
848 static inline void reserve_crashkernel(void) {}
849 #endif /* CONFIG_KEXEC */
851 static int __init meminfo_cmp(const void *_a, const void *_b)
853 const struct membank *a = _a, *b = _b;
854 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
855 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
858 void __init hyp_mode_check(void)
860 #ifdef CONFIG_ARM_VIRT_EXT
861 sync_boot_mode();
863 if (is_hyp_mode_available()) {
864 pr_info("CPU: All CPU(s) started in HYP mode.\n");
865 pr_info("CPU: Virtualization extensions available.\n");
866 } else if (is_hyp_mode_mismatched()) {
867 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
868 __boot_cpu_mode & MODE_MASK);
869 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
870 } else
871 pr_info("CPU: All CPU(s) started in SVC mode.\n");
872 #endif
875 void __init setup_arch(char **cmdline_p)
877 const struct machine_desc *mdesc;
879 setup_processor();
880 mdesc = setup_machine_fdt(__atags_pointer);
881 if (!mdesc)
882 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
883 machine_desc = mdesc;
884 machine_name = mdesc->name;
886 setup_dma_zone(mdesc);
888 if (mdesc->reboot_mode != REBOOT_HARD)
889 reboot_mode = mdesc->reboot_mode;
891 init_mm.start_code = (unsigned long) _text;
892 init_mm.end_code = (unsigned long) _etext;
893 init_mm.end_data = (unsigned long) _edata;
894 init_mm.brk = (unsigned long) _end;
896 /* populate cmd_line too for later use, preserving boot_command_line */
897 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
898 *cmdline_p = cmd_line;
900 parse_early_param();
902 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
903 sanity_check_meminfo();
904 arm_memblock_init(&meminfo, mdesc);
906 paging_init(mdesc);
907 request_standard_resources(mdesc);
909 if (mdesc->restart)
910 arm_pm_restart = mdesc->restart;
912 unflatten_device_tree();
914 arm_dt_init_cpu_maps();
915 psci_init();
916 #ifdef CONFIG_SMP
917 if (is_smp()) {
918 if (!mdesc->smp_init || !mdesc->smp_init()) {
919 if (psci_smp_available())
920 smp_set_ops(&psci_smp_ops);
921 else if (mdesc->smp)
922 smp_set_ops(mdesc->smp);
924 smp_init_cpus();
925 smp_build_mpidr_hash();
927 #endif
929 if (!is_smp())
930 hyp_mode_check();
932 reserve_crashkernel();
934 #ifdef CONFIG_MULTI_IRQ_HANDLER
935 handle_arch_irq = mdesc->handle_irq;
936 #endif
938 #ifdef CONFIG_VT
939 #if defined(CONFIG_VGA_CONSOLE)
940 conswitchp = &vga_con;
941 #elif defined(CONFIG_DUMMY_CONSOLE)
942 conswitchp = &dummy_con;
943 #endif
944 #endif
946 if (mdesc->init_early)
947 mdesc->init_early();
951 static int __init topology_init(void)
953 int cpu;
955 for_each_possible_cpu(cpu) {
956 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
957 cpuinfo->cpu.hotpluggable = 1;
958 register_cpu(&cpuinfo->cpu, cpu);
961 return 0;
963 subsys_initcall(topology_init);
965 #ifdef CONFIG_HAVE_PROC_CPU
966 static int __init proc_cpu_init(void)
968 struct proc_dir_entry *res;
970 res = proc_mkdir("cpu", NULL);
971 if (!res)
972 return -ENOMEM;
973 return 0;
975 fs_initcall(proc_cpu_init);
976 #endif
978 static const char *hwcap_str[] = {
979 "swp",
980 "half",
981 "thumb",
982 "26bit",
983 "fastmult",
984 "fpa",
985 "vfp",
986 "edsp",
987 "java",
988 "iwmmxt",
989 "crunch",
990 "thumbee",
991 "neon",
992 "vfpv3",
993 "vfpv3d16",
994 "tls",
995 "vfpv4",
996 "idiva",
997 "idivt",
998 "vfpd32",
999 "lpae",
1000 NULL
1003 static int c_show(struct seq_file *m, void *v)
1005 int i, j;
1006 u32 cpuid;
1008 for_each_online_cpu(i) {
1010 * glibc reads /proc/cpuinfo to determine the number of
1011 * online processors, looking for lines beginning with
1012 * "processor". Give glibc what it expects.
1014 seq_printf(m, "processor\t: %d\n", i);
1015 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1016 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1017 cpu_name, cpuid & 15, elf_platform);
1019 /* dump out the processor features */
1020 seq_puts(m, "Features\t: ");
1022 for (j = 0; hwcap_str[j]; j++)
1023 if (elf_hwcap & (1 << j))
1024 seq_printf(m, "%s ", hwcap_str[j]);
1026 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1027 seq_printf(m, "CPU architecture: %s\n",
1028 proc_arch[cpu_architecture()]);
1030 if ((cpuid & 0x0008f000) == 0x00000000) {
1031 /* pre-ARM7 */
1032 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1033 } else {
1034 if ((cpuid & 0x0008f000) == 0x00007000) {
1035 /* ARM7 */
1036 seq_printf(m, "CPU variant\t: 0x%02x\n",
1037 (cpuid >> 16) & 127);
1038 } else {
1039 /* post-ARM7 */
1040 seq_printf(m, "CPU variant\t: 0x%x\n",
1041 (cpuid >> 20) & 15);
1043 seq_printf(m, "CPU part\t: 0x%03x\n",
1044 (cpuid >> 4) & 0xfff);
1046 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1049 seq_printf(m, "Hardware\t: %s\n", machine_name);
1050 seq_printf(m, "Revision\t: %04x\n", system_rev);
1051 seq_printf(m, "Serial\t\t: %08x%08x\n",
1052 system_serial_high, system_serial_low);
1054 return 0;
1057 static void *c_start(struct seq_file *m, loff_t *pos)
1059 return *pos < 1 ? (void *)1 : NULL;
1062 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1064 ++*pos;
1065 return NULL;
1068 static void c_stop(struct seq_file *m, void *v)
1072 const struct seq_operations cpuinfo_op = {
1073 .start = c_start,
1074 .next = c_next,
1075 .stop = c_stop,
1076 .show = c_show