mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / arch / arm / kernel / setup.c
bloba6d27284105a1175aba66c2021d8079ccaaa17aa
1 /*
2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/efi.h>
11 #include <linux/export.h>
12 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/ioport.h>
15 #include <linux/delay.h>
16 #include <linux/utsname.h>
17 #include <linux/initrd.h>
18 #include <linux/console.h>
19 #include <linux/bootmem.h>
20 #include <linux/seq_file.h>
21 #include <linux/screen_info.h>
22 #include <linux/of_platform.h>
23 #include <linux/init.h>
24 #include <linux/kexec.h>
25 #include <linux/of_fdt.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34 #include <linux/psci.h>
36 #include <asm/unified.h>
37 #include <asm/cp15.h>
38 #include <asm/cpu.h>
39 #include <asm/cputype.h>
40 #include <asm/efi.h>
41 #include <asm/elf.h>
42 #include <asm/early_ioremap.h>
43 #include <asm/fixmap.h>
44 #include <asm/procinfo.h>
45 #include <asm/psci.h>
46 #include <asm/sections.h>
47 #include <asm/setup.h>
48 #include <asm/smp_plat.h>
49 #include <asm/mach-types.h>
50 #include <asm/cacheflush.h>
51 #include <asm/cachetype.h>
52 #include <asm/tlbflush.h>
53 #include <asm/xen/hypervisor.h>
55 #include <asm/prom.h>
56 #include <asm/mach/arch.h>
57 #include <asm/mach/irq.h>
58 #include <asm/mach/time.h>
59 #include <asm/system_info.h>
60 #include <asm/system_misc.h>
61 #include <asm/traps.h>
62 #include <asm/unwind.h>
63 #include <asm/memblock.h>
64 #include <asm/virt.h>
66 #include "atags.h"
69 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
70 char fpe_type[8];
72 static int __init fpe_setup(char *line)
74 memcpy(fpe_type, line, 8);
75 return 1;
78 __setup("fpe=", fpe_setup);
79 #endif
81 extern void init_default_cache_policy(unsigned long);
82 extern void paging_init(const struct machine_desc *desc);
83 extern void early_mm_init(const struct machine_desc *);
84 extern void adjust_lowmem_bounds(void);
85 extern enum reboot_mode reboot_mode;
86 extern void setup_dma_zone(const struct machine_desc *desc);
88 unsigned int processor_id;
89 EXPORT_SYMBOL(processor_id);
90 unsigned int __machine_arch_type __read_mostly;
91 EXPORT_SYMBOL(__machine_arch_type);
92 unsigned int cacheid __read_mostly;
93 EXPORT_SYMBOL(cacheid);
95 unsigned int __atags_pointer __initdata;
97 unsigned int system_rev;
98 EXPORT_SYMBOL(system_rev);
100 const char *system_serial;
101 EXPORT_SYMBOL(system_serial);
103 unsigned int system_serial_low;
104 EXPORT_SYMBOL(system_serial_low);
106 unsigned int system_serial_high;
107 EXPORT_SYMBOL(system_serial_high);
109 unsigned int elf_hwcap __read_mostly;
110 EXPORT_SYMBOL(elf_hwcap);
112 unsigned int elf_hwcap2 __read_mostly;
113 EXPORT_SYMBOL(elf_hwcap2);
116 #ifdef MULTI_CPU
117 struct processor processor __ro_after_init;
118 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
119 struct processor *cpu_vtable[NR_CPUS] = {
120 [0] = &processor,
122 #endif
123 #endif
124 #ifdef MULTI_TLB
125 struct cpu_tlb_fns cpu_tlb __ro_after_init;
126 #endif
127 #ifdef MULTI_USER
128 struct cpu_user_fns cpu_user __ro_after_init;
129 #endif
130 #ifdef MULTI_CACHE
131 struct cpu_cache_fns cpu_cache __ro_after_init;
132 #endif
133 #ifdef CONFIG_OUTER_CACHE
134 struct outer_cache_fns outer_cache __ro_after_init;
135 EXPORT_SYMBOL(outer_cache);
136 #endif
139 * Cached cpu_architecture() result for use by assembler code.
140 * C code should use the cpu_architecture() function instead of accessing this
141 * variable directly.
143 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
145 struct stack {
146 u32 irq[3];
147 u32 abt[3];
148 u32 und[3];
149 u32 fiq[3];
150 } ____cacheline_aligned;
152 #ifndef CONFIG_CPU_V7M
153 static struct stack stacks[NR_CPUS];
154 #endif
156 char elf_platform[ELF_PLATFORM_SIZE];
157 EXPORT_SYMBOL(elf_platform);
159 static const char *cpu_name;
160 static const char *machine_name;
161 static char __initdata cmd_line[COMMAND_LINE_SIZE];
162 const struct machine_desc *machine_desc __initdata;
164 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
165 #define ENDIANNESS ((char)endian_test.l)
167 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
170 * Standard memory resources
172 static struct resource mem_res[] = {
174 .name = "Video RAM",
175 .start = 0,
176 .end = 0,
177 .flags = IORESOURCE_MEM
180 .name = "Kernel code",
181 .start = 0,
182 .end = 0,
183 .flags = IORESOURCE_SYSTEM_RAM
186 .name = "Kernel data",
187 .start = 0,
188 .end = 0,
189 .flags = IORESOURCE_SYSTEM_RAM
193 #define video_ram mem_res[0]
194 #define kernel_code mem_res[1]
195 #define kernel_data mem_res[2]
197 static struct resource io_res[] = {
199 .name = "reserved",
200 .start = 0x3bc,
201 .end = 0x3be,
202 .flags = IORESOURCE_IO | IORESOURCE_BUSY
205 .name = "reserved",
206 .start = 0x378,
207 .end = 0x37f,
208 .flags = IORESOURCE_IO | IORESOURCE_BUSY
211 .name = "reserved",
212 .start = 0x278,
213 .end = 0x27f,
214 .flags = IORESOURCE_IO | IORESOURCE_BUSY
218 #define lp0 io_res[0]
219 #define lp1 io_res[1]
220 #define lp2 io_res[2]
222 static const char *proc_arch[] = {
223 "undefined/unknown",
224 "3",
225 "4",
226 "4T",
227 "5",
228 "5T",
229 "5TE",
230 "5TEJ",
231 "6TEJ",
232 "7",
233 "7M",
234 "?(12)",
235 "?(13)",
236 "?(14)",
237 "?(15)",
238 "?(16)",
239 "?(17)",
242 #ifdef CONFIG_CPU_V7M
243 static int __get_cpu_architecture(void)
245 return CPU_ARCH_ARMv7M;
247 #else
248 static int __get_cpu_architecture(void)
250 int cpu_arch;
252 if ((read_cpuid_id() & 0x0008f000) == 0) {
253 cpu_arch = CPU_ARCH_UNKNOWN;
254 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
255 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
256 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
257 cpu_arch = (read_cpuid_id() >> 16) & 7;
258 if (cpu_arch)
259 cpu_arch += CPU_ARCH_ARMv3;
260 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
261 /* Revised CPUID format. Read the Memory Model Feature
262 * Register 0 and check for VMSAv7 or PMSAv7 */
263 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
264 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
265 (mmfr0 & 0x000000f0) >= 0x00000030)
266 cpu_arch = CPU_ARCH_ARMv7;
267 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
268 (mmfr0 & 0x000000f0) == 0x00000020)
269 cpu_arch = CPU_ARCH_ARMv6;
270 else
271 cpu_arch = CPU_ARCH_UNKNOWN;
272 } else
273 cpu_arch = CPU_ARCH_UNKNOWN;
275 return cpu_arch;
277 #endif
279 int __pure cpu_architecture(void)
281 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
283 return __cpu_architecture;
286 static int cpu_has_aliasing_icache(unsigned int arch)
288 int aliasing_icache;
289 unsigned int id_reg, num_sets, line_size;
291 /* PIPT caches never alias. */
292 if (icache_is_pipt())
293 return 0;
295 /* arch specifies the register format */
296 switch (arch) {
297 case CPU_ARCH_ARMv7:
298 set_csselr(CSSELR_ICACHE | CSSELR_L1);
299 isb();
300 id_reg = read_ccsidr();
301 line_size = 4 << ((id_reg & 0x7) + 2);
302 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
303 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
304 break;
305 case CPU_ARCH_ARMv6:
306 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
307 break;
308 default:
309 /* I-cache aliases will be handled by D-cache aliasing code */
310 aliasing_icache = 0;
313 return aliasing_icache;
316 static void __init cacheid_init(void)
318 unsigned int arch = cpu_architecture();
320 if (arch >= CPU_ARCH_ARMv6) {
321 unsigned int cachetype = read_cpuid_cachetype();
323 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
324 cacheid = 0;
325 } else if ((cachetype & (7 << 29)) == 4 << 29) {
326 /* ARMv7 register format */
327 arch = CPU_ARCH_ARMv7;
328 cacheid = CACHEID_VIPT_NONALIASING;
329 switch (cachetype & (3 << 14)) {
330 case (1 << 14):
331 cacheid |= CACHEID_ASID_TAGGED;
332 break;
333 case (3 << 14):
334 cacheid |= CACHEID_PIPT;
335 break;
337 } else {
338 arch = CPU_ARCH_ARMv6;
339 if (cachetype & (1 << 23))
340 cacheid = CACHEID_VIPT_ALIASING;
341 else
342 cacheid = CACHEID_VIPT_NONALIASING;
344 if (cpu_has_aliasing_icache(arch))
345 cacheid |= CACHEID_VIPT_I_ALIASING;
346 } else {
347 cacheid = CACHEID_VIVT;
350 pr_info("CPU: %s data cache, %s instruction cache\n",
351 cache_is_vivt() ? "VIVT" :
352 cache_is_vipt_aliasing() ? "VIPT aliasing" :
353 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
354 cache_is_vivt() ? "VIVT" :
355 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
356 icache_is_vipt_aliasing() ? "VIPT aliasing" :
357 icache_is_pipt() ? "PIPT" :
358 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
362 * These functions re-use the assembly code in head.S, which
363 * already provide the required functionality.
365 extern struct proc_info_list *lookup_processor_type(unsigned int);
367 void __init early_print(const char *str, ...)
369 extern void printascii(const char *);
370 char buf[256];
371 va_list ap;
373 va_start(ap, str);
374 vsnprintf(buf, sizeof(buf), str, ap);
375 va_end(ap);
377 #ifdef CONFIG_DEBUG_LL
378 printascii(buf);
379 #endif
380 printk("%s", buf);
383 #ifdef CONFIG_ARM_PATCH_IDIV
385 static inline u32 __attribute_const__ sdiv_instruction(void)
387 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
388 /* "sdiv r0, r0, r1" */
389 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
390 return __opcode_to_mem_thumb32(insn);
393 /* "sdiv r0, r0, r1" */
394 return __opcode_to_mem_arm(0xe710f110);
397 static inline u32 __attribute_const__ udiv_instruction(void)
399 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
400 /* "udiv r0, r0, r1" */
401 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
402 return __opcode_to_mem_thumb32(insn);
405 /* "udiv r0, r0, r1" */
406 return __opcode_to_mem_arm(0xe730f110);
409 static inline u32 __attribute_const__ bx_lr_instruction(void)
411 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
412 /* "bx lr; nop" */
413 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
414 return __opcode_to_mem_thumb32(insn);
417 /* "bx lr" */
418 return __opcode_to_mem_arm(0xe12fff1e);
421 static void __init patch_aeabi_idiv(void)
423 extern void __aeabi_uidiv(void);
424 extern void __aeabi_idiv(void);
425 uintptr_t fn_addr;
426 unsigned int mask;
428 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
429 if (!(elf_hwcap & mask))
430 return;
432 pr_info("CPU: div instructions available: patching division code\n");
434 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
435 asm ("" : "+g" (fn_addr));
436 ((u32 *)fn_addr)[0] = udiv_instruction();
437 ((u32 *)fn_addr)[1] = bx_lr_instruction();
438 flush_icache_range(fn_addr, fn_addr + 8);
440 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
441 asm ("" : "+g" (fn_addr));
442 ((u32 *)fn_addr)[0] = sdiv_instruction();
443 ((u32 *)fn_addr)[1] = bx_lr_instruction();
444 flush_icache_range(fn_addr, fn_addr + 8);
447 #else
448 static inline void patch_aeabi_idiv(void) { }
449 #endif
451 static void __init cpuid_init_hwcaps(void)
453 int block;
454 u32 isar5;
456 if (cpu_architecture() < CPU_ARCH_ARMv7)
457 return;
459 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
460 if (block >= 2)
461 elf_hwcap |= HWCAP_IDIVA;
462 if (block >= 1)
463 elf_hwcap |= HWCAP_IDIVT;
465 /* LPAE implies atomic ldrd/strd instructions */
466 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
467 if (block >= 5)
468 elf_hwcap |= HWCAP_LPAE;
470 /* check for supported v8 Crypto instructions */
471 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
473 block = cpuid_feature_extract_field(isar5, 4);
474 if (block >= 2)
475 elf_hwcap2 |= HWCAP2_PMULL;
476 if (block >= 1)
477 elf_hwcap2 |= HWCAP2_AES;
479 block = cpuid_feature_extract_field(isar5, 8);
480 if (block >= 1)
481 elf_hwcap2 |= HWCAP2_SHA1;
483 block = cpuid_feature_extract_field(isar5, 12);
484 if (block >= 1)
485 elf_hwcap2 |= HWCAP2_SHA2;
487 block = cpuid_feature_extract_field(isar5, 16);
488 if (block >= 1)
489 elf_hwcap2 |= HWCAP2_CRC32;
492 static void __init elf_hwcap_fixup(void)
494 unsigned id = read_cpuid_id();
497 * HWCAP_TLS is available only on 1136 r1p0 and later,
498 * see also kuser_get_tls_init.
500 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
501 ((id >> 20) & 3) == 0) {
502 elf_hwcap &= ~HWCAP_TLS;
503 return;
506 /* Verify if CPUID scheme is implemented */
507 if ((id & 0x000f0000) != 0x000f0000)
508 return;
511 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
512 * avoid advertising SWP; it may not be atomic with
513 * multiprocessing cores.
515 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
516 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
517 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
518 elf_hwcap &= ~HWCAP_SWP;
522 * cpu_init - initialise one CPU.
524 * cpu_init sets up the per-CPU stacks.
526 void notrace cpu_init(void)
528 #ifndef CONFIG_CPU_V7M
529 unsigned int cpu = smp_processor_id();
530 struct stack *stk = &stacks[cpu];
532 if (cpu >= NR_CPUS) {
533 pr_crit("CPU%u: bad primary CPU number\n", cpu);
534 BUG();
538 * This only works on resume and secondary cores. For booting on the
539 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
541 set_my_cpu_offset(per_cpu_offset(cpu));
543 cpu_proc_init();
546 * Define the placement constraint for the inline asm directive below.
547 * In Thumb-2, msr with an immediate value is not allowed.
549 #ifdef CONFIG_THUMB2_KERNEL
550 #define PLC "r"
551 #else
552 #define PLC "I"
553 #endif
556 * setup stacks for re-entrant exception handlers
558 __asm__ (
559 "msr cpsr_c, %1\n\t"
560 "add r14, %0, %2\n\t"
561 "mov sp, r14\n\t"
562 "msr cpsr_c, %3\n\t"
563 "add r14, %0, %4\n\t"
564 "mov sp, r14\n\t"
565 "msr cpsr_c, %5\n\t"
566 "add r14, %0, %6\n\t"
567 "mov sp, r14\n\t"
568 "msr cpsr_c, %7\n\t"
569 "add r14, %0, %8\n\t"
570 "mov sp, r14\n\t"
571 "msr cpsr_c, %9"
573 : "r" (stk),
574 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
575 "I" (offsetof(struct stack, irq[0])),
576 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
577 "I" (offsetof(struct stack, abt[0])),
578 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
579 "I" (offsetof(struct stack, und[0])),
580 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
581 "I" (offsetof(struct stack, fiq[0])),
582 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
583 : "r14");
584 #endif
587 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
589 void __init smp_setup_processor_id(void)
591 int i;
592 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
593 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
595 cpu_logical_map(0) = cpu;
596 for (i = 1; i < nr_cpu_ids; ++i)
597 cpu_logical_map(i) = i == cpu ? 0 : i;
600 * clear __my_cpu_offset on boot CPU to avoid hang caused by
601 * using percpu variable early, for example, lockdep will
602 * access percpu variable inside lock_release
604 set_my_cpu_offset(0);
606 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
609 struct mpidr_hash mpidr_hash;
610 #ifdef CONFIG_SMP
612 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
613 * level in order to build a linear index from an
614 * MPIDR value. Resulting algorithm is a collision
615 * free hash carried out through shifting and ORing
617 static void __init smp_build_mpidr_hash(void)
619 u32 i, affinity;
620 u32 fs[3], bits[3], ls, mask = 0;
622 * Pre-scan the list of MPIDRS and filter out bits that do
623 * not contribute to affinity levels, ie they never toggle.
625 for_each_possible_cpu(i)
626 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
627 pr_debug("mask of set bits 0x%x\n", mask);
629 * Find and stash the last and first bit set at all affinity levels to
630 * check how many bits are required to represent them.
632 for (i = 0; i < 3; i++) {
633 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
635 * Find the MSB bit and LSB bits position
636 * to determine how many bits are required
637 * to express the affinity level.
639 ls = fls(affinity);
640 fs[i] = affinity ? ffs(affinity) - 1 : 0;
641 bits[i] = ls - fs[i];
644 * An index can be created from the MPIDR by isolating the
645 * significant bits at each affinity level and by shifting
646 * them in order to compress the 24 bits values space to a
647 * compressed set of values. This is equivalent to hashing
648 * the MPIDR through shifting and ORing. It is a collision free
649 * hash though not minimal since some levels might contain a number
650 * of CPUs that is not an exact power of 2 and their bit
651 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
653 mpidr_hash.shift_aff[0] = fs[0];
654 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
655 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
656 (bits[1] + bits[0]);
657 mpidr_hash.mask = mask;
658 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
659 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
660 mpidr_hash.shift_aff[0],
661 mpidr_hash.shift_aff[1],
662 mpidr_hash.shift_aff[2],
663 mpidr_hash.mask,
664 mpidr_hash.bits);
666 * 4x is an arbitrary value used to warn on a hash table much bigger
667 * than expected on most systems.
669 if (mpidr_hash_size() > 4 * num_possible_cpus())
670 pr_warn("Large number of MPIDR hash buckets detected\n");
671 sync_cache_w(&mpidr_hash);
673 #endif
676 * locate processor in the list of supported processor types. The linker
677 * builds this table for us from the entries in arch/arm/mm/proc-*.S
679 struct proc_info_list *lookup_processor(u32 midr)
681 struct proc_info_list *list = lookup_processor_type(midr);
683 if (!list) {
684 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
685 smp_processor_id(), midr);
686 while (1)
687 /* can't use cpu_relax() here as it may require MMU setup */;
690 return list;
693 static void __init setup_processor(void)
695 unsigned int midr = read_cpuid_id();
696 struct proc_info_list *list = lookup_processor(midr);
698 cpu_name = list->cpu_name;
699 __cpu_architecture = __get_cpu_architecture();
701 init_proc_vtable(list->proc);
702 #ifdef MULTI_TLB
703 cpu_tlb = *list->tlb;
704 #endif
705 #ifdef MULTI_USER
706 cpu_user = *list->user;
707 #endif
708 #ifdef MULTI_CACHE
709 cpu_cache = *list->cache;
710 #endif
712 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
713 list->cpu_name, midr, midr & 15,
714 proc_arch[cpu_architecture()], get_cr());
716 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
717 list->arch_name, ENDIANNESS);
718 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
719 list->elf_name, ENDIANNESS);
720 elf_hwcap = list->elf_hwcap;
722 cpuid_init_hwcaps();
723 patch_aeabi_idiv();
725 #ifndef CONFIG_ARM_THUMB
726 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
727 #endif
728 #ifdef CONFIG_MMU
729 init_default_cache_policy(list->__cpu_mm_mmu_flags);
730 #endif
731 erratum_a15_798181_init();
733 elf_hwcap_fixup();
735 cacheid_init();
736 cpu_init();
739 void __init dump_machine_table(void)
741 const struct machine_desc *p;
743 early_print("Available machine support:\n\nID (hex)\tNAME\n");
744 for_each_machine_desc(p)
745 early_print("%08x\t%s\n", p->nr, p->name);
747 early_print("\nPlease check your kernel config and/or bootloader.\n");
749 while (true)
750 /* can't use cpu_relax() here as it may require MMU setup */;
753 int __init arm_add_memory(u64 start, u64 size)
755 u64 aligned_start;
758 * Ensure that start/size are aligned to a page boundary.
759 * Size is rounded down, start is rounded up.
761 aligned_start = PAGE_ALIGN(start);
762 if (aligned_start > start + size)
763 size = 0;
764 else
765 size -= aligned_start - start;
767 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
768 if (aligned_start > ULONG_MAX) {
769 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
770 (long long)start);
771 return -EINVAL;
774 if (aligned_start + size > ULONG_MAX) {
775 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
776 (long long)start);
778 * To ensure bank->start + bank->size is representable in
779 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
780 * This means we lose a page after masking.
782 size = ULONG_MAX - aligned_start;
784 #endif
786 if (aligned_start < PHYS_OFFSET) {
787 if (aligned_start + size <= PHYS_OFFSET) {
788 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
789 aligned_start, aligned_start + size);
790 return -EINVAL;
793 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
794 aligned_start, (u64)PHYS_OFFSET);
796 size -= PHYS_OFFSET - aligned_start;
797 aligned_start = PHYS_OFFSET;
800 start = aligned_start;
801 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
804 * Check whether this memory region has non-zero size or
805 * invalid node number.
807 if (size == 0)
808 return -EINVAL;
810 memblock_add(start, size);
811 return 0;
815 * Pick out the memory size. We look for mem=size@start,
816 * where start and size are "size[KkMm]"
819 static int __init early_mem(char *p)
821 static int usermem __initdata = 0;
822 u64 size;
823 u64 start;
824 char *endp;
827 * If the user specifies memory size, we
828 * blow away any automatically generated
829 * size.
831 if (usermem == 0) {
832 usermem = 1;
833 memblock_remove(memblock_start_of_DRAM(),
834 memblock_end_of_DRAM() - memblock_start_of_DRAM());
837 start = PHYS_OFFSET;
838 size = memparse(p, &endp);
839 if (*endp == '@')
840 start = memparse(endp + 1, NULL);
842 arm_add_memory(start, size);
844 return 0;
846 early_param("mem", early_mem);
848 static void __init request_standard_resources(const struct machine_desc *mdesc)
850 struct memblock_region *region;
851 struct resource *res;
853 kernel_code.start = virt_to_phys(_text);
854 kernel_code.end = virt_to_phys(__init_begin - 1);
855 kernel_data.start = virt_to_phys(_sdata);
856 kernel_data.end = virt_to_phys(_end - 1);
858 for_each_memblock(memory, region) {
859 phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
860 phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
861 unsigned long boot_alias_start;
864 * Some systems have a special memory alias which is only
865 * used for booting. We need to advertise this region to
866 * kexec-tools so they know where bootable RAM is located.
868 boot_alias_start = phys_to_idmap(start);
869 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
870 res = memblock_virt_alloc(sizeof(*res), 0);
871 res->name = "System RAM (boot alias)";
872 res->start = boot_alias_start;
873 res->end = phys_to_idmap(end);
874 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
875 request_resource(&iomem_resource, res);
878 res = memblock_virt_alloc(sizeof(*res), 0);
879 res->name = "System RAM";
880 res->start = start;
881 res->end = end;
882 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
884 request_resource(&iomem_resource, res);
886 if (kernel_code.start >= res->start &&
887 kernel_code.end <= res->end)
888 request_resource(res, &kernel_code);
889 if (kernel_data.start >= res->start &&
890 kernel_data.end <= res->end)
891 request_resource(res, &kernel_data);
894 if (mdesc->video_start) {
895 video_ram.start = mdesc->video_start;
896 video_ram.end = mdesc->video_end;
897 request_resource(&iomem_resource, &video_ram);
901 * Some machines don't have the possibility of ever
902 * possessing lp0, lp1 or lp2
904 if (mdesc->reserve_lp0)
905 request_resource(&ioport_resource, &lp0);
906 if (mdesc->reserve_lp1)
907 request_resource(&ioport_resource, &lp1);
908 if (mdesc->reserve_lp2)
909 request_resource(&ioport_resource, &lp2);
912 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
913 defined(CONFIG_EFI)
914 struct screen_info screen_info = {
915 .orig_video_lines = 30,
916 .orig_video_cols = 80,
917 .orig_video_mode = 0,
918 .orig_video_ega_bx = 0,
919 .orig_video_isVGA = 1,
920 .orig_video_points = 8
922 #endif
924 static int __init customize_machine(void)
927 * customizes platform devices, or adds new ones
928 * On DT based machines, we fall back to populating the
929 * machine from the device tree, if no callback is provided,
930 * otherwise we would always need an init_machine callback.
932 if (machine_desc->init_machine)
933 machine_desc->init_machine();
935 return 0;
937 arch_initcall(customize_machine);
939 static int __init init_machine_late(void)
941 struct device_node *root;
942 int ret;
944 if (machine_desc->init_late)
945 machine_desc->init_late();
947 root = of_find_node_by_path("/");
948 if (root) {
949 ret = of_property_read_string(root, "serial-number",
950 &system_serial);
951 if (ret)
952 system_serial = NULL;
955 if (!system_serial)
956 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
957 system_serial_high,
958 system_serial_low);
960 return 0;
962 late_initcall(init_machine_late);
964 #ifdef CONFIG_KEXEC
966 * The crash region must be aligned to 128MB to avoid
967 * zImage relocating below the reserved region.
969 #define CRASH_ALIGN (128 << 20)
971 static inline unsigned long long get_total_mem(void)
973 unsigned long total;
975 total = max_low_pfn - min_low_pfn;
976 return total << PAGE_SHIFT;
980 * reserve_crashkernel() - reserves memory are for crash kernel
982 * This function reserves memory area given in "crashkernel=" kernel command
983 * line parameter. The memory reserved is used by a dump capture kernel when
984 * primary kernel is crashing.
986 static void __init reserve_crashkernel(void)
988 unsigned long long crash_size, crash_base;
989 unsigned long long total_mem;
990 int ret;
992 total_mem = get_total_mem();
993 ret = parse_crashkernel(boot_command_line, total_mem,
994 &crash_size, &crash_base);
995 if (ret)
996 return;
998 if (crash_base <= 0) {
999 unsigned long long crash_max = idmap_to_phys((u32)~0);
1000 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1001 if (crash_max > lowmem_max)
1002 crash_max = lowmem_max;
1003 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
1004 crash_size, CRASH_ALIGN);
1005 if (!crash_base) {
1006 pr_err("crashkernel reservation failed - No suitable area found.\n");
1007 return;
1009 } else {
1010 unsigned long long start;
1012 start = memblock_find_in_range(crash_base,
1013 crash_base + crash_size,
1014 crash_size, SECTION_SIZE);
1015 if (start != crash_base) {
1016 pr_err("crashkernel reservation failed - memory is in use.\n");
1017 return;
1021 ret = memblock_reserve(crash_base, crash_size);
1022 if (ret < 0) {
1023 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1024 (unsigned long)crash_base);
1025 return;
1028 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1029 (unsigned long)(crash_size >> 20),
1030 (unsigned long)(crash_base >> 20),
1031 (unsigned long)(total_mem >> 20));
1033 /* The crashk resource must always be located in normal mem */
1034 crashk_res.start = crash_base;
1035 crashk_res.end = crash_base + crash_size - 1;
1036 insert_resource(&iomem_resource, &crashk_res);
1038 if (arm_has_idmap_alias()) {
1040 * If we have a special RAM alias for use at boot, we
1041 * need to advertise to kexec tools where the alias is.
1043 static struct resource crashk_boot_res = {
1044 .name = "Crash kernel (boot alias)",
1045 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1048 crashk_boot_res.start = phys_to_idmap(crash_base);
1049 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1050 insert_resource(&iomem_resource, &crashk_boot_res);
1053 #else
1054 static inline void reserve_crashkernel(void) {}
1055 #endif /* CONFIG_KEXEC */
1057 void __init hyp_mode_check(void)
1059 #ifdef CONFIG_ARM_VIRT_EXT
1060 sync_boot_mode();
1062 if (is_hyp_mode_available()) {
1063 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1064 pr_info("CPU: Virtualization extensions available.\n");
1065 } else if (is_hyp_mode_mismatched()) {
1066 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1067 __boot_cpu_mode & MODE_MASK);
1068 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1069 } else
1070 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1071 #endif
1074 void __init setup_arch(char **cmdline_p)
1076 const struct machine_desc *mdesc;
1078 setup_processor();
1079 mdesc = setup_machine_fdt(__atags_pointer);
1080 if (!mdesc)
1081 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
1082 machine_desc = mdesc;
1083 machine_name = mdesc->name;
1084 dump_stack_set_arch_desc("%s", mdesc->name);
1086 if (mdesc->reboot_mode != REBOOT_HARD)
1087 reboot_mode = mdesc->reboot_mode;
1089 init_mm.start_code = (unsigned long) _text;
1090 init_mm.end_code = (unsigned long) _etext;
1091 init_mm.end_data = (unsigned long) _edata;
1092 init_mm.brk = (unsigned long) _end;
1094 /* populate cmd_line too for later use, preserving boot_command_line */
1095 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1096 *cmdline_p = cmd_line;
1098 early_fixmap_init();
1099 early_ioremap_init();
1101 parse_early_param();
1103 #ifdef CONFIG_MMU
1104 early_mm_init(mdesc);
1105 #endif
1106 setup_dma_zone(mdesc);
1107 xen_early_init();
1108 efi_init();
1110 * Make sure the calculation for lowmem/highmem is set appropriately
1111 * before reserving/allocating any mmeory
1113 adjust_lowmem_bounds();
1114 arm_memblock_init(mdesc);
1115 /* Memory may have been removed so recalculate the bounds. */
1116 adjust_lowmem_bounds();
1118 early_ioremap_reset();
1120 paging_init(mdesc);
1121 request_standard_resources(mdesc);
1123 if (mdesc->restart)
1124 arm_pm_restart = mdesc->restart;
1126 unflatten_device_tree();
1128 arm_dt_init_cpu_maps();
1129 psci_dt_init();
1130 #ifdef CONFIG_SMP
1131 if (is_smp()) {
1132 if (!mdesc->smp_init || !mdesc->smp_init()) {
1133 if (psci_smp_available())
1134 smp_set_ops(&psci_smp_ops);
1135 else if (mdesc->smp)
1136 smp_set_ops(mdesc->smp);
1138 smp_init_cpus();
1139 smp_build_mpidr_hash();
1141 #endif
1143 if (!is_smp())
1144 hyp_mode_check();
1146 reserve_crashkernel();
1148 #ifdef CONFIG_MULTI_IRQ_HANDLER
1149 handle_arch_irq = mdesc->handle_irq;
1150 #endif
1152 #ifdef CONFIG_VT
1153 #if defined(CONFIG_VGA_CONSOLE)
1154 conswitchp = &vga_con;
1155 #elif defined(CONFIG_DUMMY_CONSOLE)
1156 conswitchp = &dummy_con;
1157 #endif
1158 #endif
1160 if (mdesc->init_early)
1161 mdesc->init_early();
1165 static int __init topology_init(void)
1167 int cpu;
1169 for_each_possible_cpu(cpu) {
1170 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1171 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1172 register_cpu(&cpuinfo->cpu, cpu);
1175 return 0;
1177 subsys_initcall(topology_init);
1179 #ifdef CONFIG_HAVE_PROC_CPU
1180 static int __init proc_cpu_init(void)
1182 struct proc_dir_entry *res;
1184 res = proc_mkdir("cpu", NULL);
1185 if (!res)
1186 return -ENOMEM;
1187 return 0;
1189 fs_initcall(proc_cpu_init);
1190 #endif
1192 static const char *hwcap_str[] = {
1193 "swp",
1194 "half",
1195 "thumb",
1196 "26bit",
1197 "fastmult",
1198 "fpa",
1199 "vfp",
1200 "edsp",
1201 "java",
1202 "iwmmxt",
1203 "crunch",
1204 "thumbee",
1205 "neon",
1206 "vfpv3",
1207 "vfpv3d16",
1208 "tls",
1209 "vfpv4",
1210 "idiva",
1211 "idivt",
1212 "vfpd32",
1213 "lpae",
1214 "evtstrm",
1215 NULL
1218 static const char *hwcap2_str[] = {
1219 "aes",
1220 "pmull",
1221 "sha1",
1222 "sha2",
1223 "crc32",
1224 NULL
1227 static int c_show(struct seq_file *m, void *v)
1229 int i, j;
1230 u32 cpuid;
1232 for_each_online_cpu(i) {
1234 * glibc reads /proc/cpuinfo to determine the number of
1235 * online processors, looking for lines beginning with
1236 * "processor". Give glibc what it expects.
1238 seq_printf(m, "processor\t: %d\n", i);
1239 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1240 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1241 cpu_name, cpuid & 15, elf_platform);
1243 #if defined(CONFIG_SMP)
1244 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1245 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1246 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1247 #else
1248 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1249 loops_per_jiffy / (500000/HZ),
1250 (loops_per_jiffy / (5000/HZ)) % 100);
1251 #endif
1252 /* dump out the processor features */
1253 seq_puts(m, "Features\t: ");
1255 for (j = 0; hwcap_str[j]; j++)
1256 if (elf_hwcap & (1 << j))
1257 seq_printf(m, "%s ", hwcap_str[j]);
1259 for (j = 0; hwcap2_str[j]; j++)
1260 if (elf_hwcap2 & (1 << j))
1261 seq_printf(m, "%s ", hwcap2_str[j]);
1263 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1264 seq_printf(m, "CPU architecture: %s\n",
1265 proc_arch[cpu_architecture()]);
1267 if ((cpuid & 0x0008f000) == 0x00000000) {
1268 /* pre-ARM7 */
1269 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1270 } else {
1271 if ((cpuid & 0x0008f000) == 0x00007000) {
1272 /* ARM7 */
1273 seq_printf(m, "CPU variant\t: 0x%02x\n",
1274 (cpuid >> 16) & 127);
1275 } else {
1276 /* post-ARM7 */
1277 seq_printf(m, "CPU variant\t: 0x%x\n",
1278 (cpuid >> 20) & 15);
1280 seq_printf(m, "CPU part\t: 0x%03x\n",
1281 (cpuid >> 4) & 0xfff);
1283 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1286 seq_printf(m, "Hardware\t: %s\n", machine_name);
1287 seq_printf(m, "Revision\t: %04x\n", system_rev);
1288 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1290 return 0;
1293 static void *c_start(struct seq_file *m, loff_t *pos)
1295 return *pos < 1 ? (void *)1 : NULL;
1298 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1300 ++*pos;
1301 return NULL;
1304 static void c_stop(struct seq_file *m, void *v)
1308 const struct seq_operations cpuinfo_op = {
1309 .start = c_start,
1310 .next = c_next,
1311 .stop = c_stop,
1312 .show = c_show