Linux 4.1.18
[linux/fpc-iii.git] / arch / arm / kernel / setup.c
blob6c777e908a2446020b1fc88294ace488fb6098f3
1 /*
2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_iommu.h>
22 #include <linux/of_platform.h>
23 #include <linux/init.h>
24 #include <linux/kexec.h>
25 #include <linux/of_fdt.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
35 #include <asm/unified.h>
36 #include <asm/cp15.h>
37 #include <asm/cpu.h>
38 #include <asm/cputype.h>
39 #include <asm/elf.h>
40 #include <asm/procinfo.h>
41 #include <asm/psci.h>
42 #include <asm/sections.h>
43 #include <asm/setup.h>
44 #include <asm/smp_plat.h>
45 #include <asm/mach-types.h>
46 #include <asm/cacheflush.h>
47 #include <asm/cachetype.h>
48 #include <asm/tlbflush.h>
50 #include <asm/prom.h>
51 #include <asm/mach/arch.h>
52 #include <asm/mach/irq.h>
53 #include <asm/mach/time.h>
54 #include <asm/system_info.h>
55 #include <asm/system_misc.h>
56 #include <asm/traps.h>
57 #include <asm/unwind.h>
58 #include <asm/memblock.h>
59 #include <asm/virt.h>
61 #include "atags.h"
64 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
65 char fpe_type[8];
67 static int __init fpe_setup(char *line)
69 memcpy(fpe_type, line, 8);
70 return 1;
73 __setup("fpe=", fpe_setup);
74 #endif
76 extern void init_default_cache_policy(unsigned long);
77 extern void paging_init(const struct machine_desc *desc);
78 extern void early_paging_init(const struct machine_desc *,
79 struct proc_info_list *);
80 extern void sanity_check_meminfo(void);
81 extern enum reboot_mode reboot_mode;
82 extern void setup_dma_zone(const struct machine_desc *desc);
84 unsigned int processor_id;
85 EXPORT_SYMBOL(processor_id);
86 unsigned int __machine_arch_type __read_mostly;
87 EXPORT_SYMBOL(__machine_arch_type);
88 unsigned int cacheid __read_mostly;
89 EXPORT_SYMBOL(cacheid);
91 unsigned int __atags_pointer __initdata;
93 unsigned int system_rev;
94 EXPORT_SYMBOL(system_rev);
96 unsigned int system_serial_low;
97 EXPORT_SYMBOL(system_serial_low);
99 unsigned int system_serial_high;
100 EXPORT_SYMBOL(system_serial_high);
102 unsigned int elf_hwcap __read_mostly;
103 EXPORT_SYMBOL(elf_hwcap);
105 unsigned int elf_hwcap2 __read_mostly;
106 EXPORT_SYMBOL(elf_hwcap2);
109 #ifdef MULTI_CPU
110 struct processor processor __read_mostly;
111 #endif
112 #ifdef MULTI_TLB
113 struct cpu_tlb_fns cpu_tlb __read_mostly;
114 #endif
115 #ifdef MULTI_USER
116 struct cpu_user_fns cpu_user __read_mostly;
117 #endif
118 #ifdef MULTI_CACHE
119 struct cpu_cache_fns cpu_cache __read_mostly;
120 #endif
121 #ifdef CONFIG_OUTER_CACHE
122 struct outer_cache_fns outer_cache __read_mostly;
123 EXPORT_SYMBOL(outer_cache);
124 #endif
127 * Cached cpu_architecture() result for use by assembler code.
128 * C code should use the cpu_architecture() function instead of accessing this
129 * variable directly.
131 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
133 struct stack {
134 u32 irq[3];
135 u32 abt[3];
136 u32 und[3];
137 u32 fiq[3];
138 } ____cacheline_aligned;
140 #ifndef CONFIG_CPU_V7M
141 static struct stack stacks[NR_CPUS];
142 #endif
144 char elf_platform[ELF_PLATFORM_SIZE];
145 EXPORT_SYMBOL(elf_platform);
147 static const char *cpu_name;
148 static const char *machine_name;
149 static char __initdata cmd_line[COMMAND_LINE_SIZE];
150 const struct machine_desc *machine_desc __initdata;
152 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
153 #define ENDIANNESS ((char)endian_test.l)
155 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
158 * Standard memory resources
160 static struct resource mem_res[] = {
162 .name = "Video RAM",
163 .start = 0,
164 .end = 0,
165 .flags = IORESOURCE_MEM
168 .name = "Kernel code",
169 .start = 0,
170 .end = 0,
171 .flags = IORESOURCE_MEM
174 .name = "Kernel data",
175 .start = 0,
176 .end = 0,
177 .flags = IORESOURCE_MEM
181 #define video_ram mem_res[0]
182 #define kernel_code mem_res[1]
183 #define kernel_data mem_res[2]
185 static struct resource io_res[] = {
187 .name = "reserved",
188 .start = 0x3bc,
189 .end = 0x3be,
190 .flags = IORESOURCE_IO | IORESOURCE_BUSY
193 .name = "reserved",
194 .start = 0x378,
195 .end = 0x37f,
196 .flags = IORESOURCE_IO | IORESOURCE_BUSY
199 .name = "reserved",
200 .start = 0x278,
201 .end = 0x27f,
202 .flags = IORESOURCE_IO | IORESOURCE_BUSY
206 #define lp0 io_res[0]
207 #define lp1 io_res[1]
208 #define lp2 io_res[2]
210 static const char *proc_arch[] = {
211 "undefined/unknown",
212 "3",
213 "4",
214 "4T",
215 "5",
216 "5T",
217 "5TE",
218 "5TEJ",
219 "6TEJ",
220 "7",
221 "7M",
222 "?(12)",
223 "?(13)",
224 "?(14)",
225 "?(15)",
226 "?(16)",
227 "?(17)",
230 #ifdef CONFIG_CPU_V7M
231 static int __get_cpu_architecture(void)
233 return CPU_ARCH_ARMv7M;
235 #else
236 static int __get_cpu_architecture(void)
238 int cpu_arch;
240 if ((read_cpuid_id() & 0x0008f000) == 0) {
241 cpu_arch = CPU_ARCH_UNKNOWN;
242 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
243 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
244 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
245 cpu_arch = (read_cpuid_id() >> 16) & 7;
246 if (cpu_arch)
247 cpu_arch += CPU_ARCH_ARMv3;
248 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
249 /* Revised CPUID format. Read the Memory Model Feature
250 * Register 0 and check for VMSAv7 or PMSAv7 */
251 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
252 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
253 (mmfr0 & 0x000000f0) >= 0x00000030)
254 cpu_arch = CPU_ARCH_ARMv7;
255 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
256 (mmfr0 & 0x000000f0) == 0x00000020)
257 cpu_arch = CPU_ARCH_ARMv6;
258 else
259 cpu_arch = CPU_ARCH_UNKNOWN;
260 } else
261 cpu_arch = CPU_ARCH_UNKNOWN;
263 return cpu_arch;
265 #endif
267 int __pure cpu_architecture(void)
269 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
271 return __cpu_architecture;
274 static int cpu_has_aliasing_icache(unsigned int arch)
276 int aliasing_icache;
277 unsigned int id_reg, num_sets, line_size;
279 /* PIPT caches never alias. */
280 if (icache_is_pipt())
281 return 0;
283 /* arch specifies the register format */
284 switch (arch) {
285 case CPU_ARCH_ARMv7:
286 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
287 : /* No output operands */
288 : "r" (1));
289 isb();
290 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
291 : "=r" (id_reg));
292 line_size = 4 << ((id_reg & 0x7) + 2);
293 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
294 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
295 break;
296 case CPU_ARCH_ARMv6:
297 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
298 break;
299 default:
300 /* I-cache aliases will be handled by D-cache aliasing code */
301 aliasing_icache = 0;
304 return aliasing_icache;
307 static void __init cacheid_init(void)
309 unsigned int arch = cpu_architecture();
311 if (arch == CPU_ARCH_ARMv7M) {
312 cacheid = 0;
313 } else if (arch >= CPU_ARCH_ARMv6) {
314 unsigned int cachetype = read_cpuid_cachetype();
315 if ((cachetype & (7 << 29)) == 4 << 29) {
316 /* ARMv7 register format */
317 arch = CPU_ARCH_ARMv7;
318 cacheid = CACHEID_VIPT_NONALIASING;
319 switch (cachetype & (3 << 14)) {
320 case (1 << 14):
321 cacheid |= CACHEID_ASID_TAGGED;
322 break;
323 case (3 << 14):
324 cacheid |= CACHEID_PIPT;
325 break;
327 } else {
328 arch = CPU_ARCH_ARMv6;
329 if (cachetype & (1 << 23))
330 cacheid = CACHEID_VIPT_ALIASING;
331 else
332 cacheid = CACHEID_VIPT_NONALIASING;
334 if (cpu_has_aliasing_icache(arch))
335 cacheid |= CACHEID_VIPT_I_ALIASING;
336 } else {
337 cacheid = CACHEID_VIVT;
340 pr_info("CPU: %s data cache, %s instruction cache\n",
341 cache_is_vivt() ? "VIVT" :
342 cache_is_vipt_aliasing() ? "VIPT aliasing" :
343 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
344 cache_is_vivt() ? "VIVT" :
345 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
346 icache_is_vipt_aliasing() ? "VIPT aliasing" :
347 icache_is_pipt() ? "PIPT" :
348 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
352 * These functions re-use the assembly code in head.S, which
353 * already provide the required functionality.
355 extern struct proc_info_list *lookup_processor_type(unsigned int);
357 void __init early_print(const char *str, ...)
359 extern void printascii(const char *);
360 char buf[256];
361 va_list ap;
363 va_start(ap, str);
364 vsnprintf(buf, sizeof(buf), str, ap);
365 va_end(ap);
367 #ifdef CONFIG_DEBUG_LL
368 printascii(buf);
369 #endif
370 printk("%s", buf);
373 static void __init cpuid_init_hwcaps(void)
375 int block;
376 u32 isar5;
378 if (cpu_architecture() < CPU_ARCH_ARMv7)
379 return;
381 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
382 if (block >= 2)
383 elf_hwcap |= HWCAP_IDIVA;
384 if (block >= 1)
385 elf_hwcap |= HWCAP_IDIVT;
387 /* LPAE implies atomic ldrd/strd instructions */
388 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
389 if (block >= 5)
390 elf_hwcap |= HWCAP_LPAE;
392 /* check for supported v8 Crypto instructions */
393 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
395 block = cpuid_feature_extract_field(isar5, 4);
396 if (block >= 2)
397 elf_hwcap2 |= HWCAP2_PMULL;
398 if (block >= 1)
399 elf_hwcap2 |= HWCAP2_AES;
401 block = cpuid_feature_extract_field(isar5, 8);
402 if (block >= 1)
403 elf_hwcap2 |= HWCAP2_SHA1;
405 block = cpuid_feature_extract_field(isar5, 12);
406 if (block >= 1)
407 elf_hwcap2 |= HWCAP2_SHA2;
409 block = cpuid_feature_extract_field(isar5, 16);
410 if (block >= 1)
411 elf_hwcap2 |= HWCAP2_CRC32;
414 static void __init elf_hwcap_fixup(void)
416 unsigned id = read_cpuid_id();
419 * HWCAP_TLS is available only on 1136 r1p0 and later,
420 * see also kuser_get_tls_init.
422 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
423 ((id >> 20) & 3) == 0) {
424 elf_hwcap &= ~HWCAP_TLS;
425 return;
428 /* Verify if CPUID scheme is implemented */
429 if ((id & 0x000f0000) != 0x000f0000)
430 return;
433 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
434 * avoid advertising SWP; it may not be atomic with
435 * multiprocessing cores.
437 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
438 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
439 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
440 elf_hwcap &= ~HWCAP_SWP;
444 * cpu_init - initialise one CPU.
446 * cpu_init sets up the per-CPU stacks.
448 void notrace cpu_init(void)
450 #ifndef CONFIG_CPU_V7M
451 unsigned int cpu = smp_processor_id();
452 struct stack *stk = &stacks[cpu];
454 if (cpu >= NR_CPUS) {
455 pr_crit("CPU%u: bad primary CPU number\n", cpu);
456 BUG();
460 * This only works on resume and secondary cores. For booting on the
461 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
463 set_my_cpu_offset(per_cpu_offset(cpu));
465 cpu_proc_init();
468 * Define the placement constraint for the inline asm directive below.
469 * In Thumb-2, msr with an immediate value is not allowed.
471 #ifdef CONFIG_THUMB2_KERNEL
472 #define PLC "r"
473 #else
474 #define PLC "I"
475 #endif
478 * setup stacks for re-entrant exception handlers
480 __asm__ (
481 "msr cpsr_c, %1\n\t"
482 "add r14, %0, %2\n\t"
483 "mov sp, r14\n\t"
484 "msr cpsr_c, %3\n\t"
485 "add r14, %0, %4\n\t"
486 "mov sp, r14\n\t"
487 "msr cpsr_c, %5\n\t"
488 "add r14, %0, %6\n\t"
489 "mov sp, r14\n\t"
490 "msr cpsr_c, %7\n\t"
491 "add r14, %0, %8\n\t"
492 "mov sp, r14\n\t"
493 "msr cpsr_c, %9"
495 : "r" (stk),
496 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
497 "I" (offsetof(struct stack, irq[0])),
498 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
499 "I" (offsetof(struct stack, abt[0])),
500 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
501 "I" (offsetof(struct stack, und[0])),
502 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
503 "I" (offsetof(struct stack, fiq[0])),
504 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
505 : "r14");
506 #endif
509 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
511 void __init smp_setup_processor_id(void)
513 int i;
514 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
515 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
517 cpu_logical_map(0) = cpu;
518 for (i = 1; i < nr_cpu_ids; ++i)
519 cpu_logical_map(i) = i == cpu ? 0 : i;
522 * clear __my_cpu_offset on boot CPU to avoid hang caused by
523 * using percpu variable early, for example, lockdep will
524 * access percpu variable inside lock_release
526 set_my_cpu_offset(0);
528 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
531 struct mpidr_hash mpidr_hash;
532 #ifdef CONFIG_SMP
534 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
535 * level in order to build a linear index from an
536 * MPIDR value. Resulting algorithm is a collision
537 * free hash carried out through shifting and ORing
539 static void __init smp_build_mpidr_hash(void)
541 u32 i, affinity;
542 u32 fs[3], bits[3], ls, mask = 0;
544 * Pre-scan the list of MPIDRS and filter out bits that do
545 * not contribute to affinity levels, ie they never toggle.
547 for_each_possible_cpu(i)
548 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
549 pr_debug("mask of set bits 0x%x\n", mask);
551 * Find and stash the last and first bit set at all affinity levels to
552 * check how many bits are required to represent them.
554 for (i = 0; i < 3; i++) {
555 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
557 * Find the MSB bit and LSB bits position
558 * to determine how many bits are required
559 * to express the affinity level.
561 ls = fls(affinity);
562 fs[i] = affinity ? ffs(affinity) - 1 : 0;
563 bits[i] = ls - fs[i];
566 * An index can be created from the MPIDR by isolating the
567 * significant bits at each affinity level and by shifting
568 * them in order to compress the 24 bits values space to a
569 * compressed set of values. This is equivalent to hashing
570 * the MPIDR through shifting and ORing. It is a collision free
571 * hash though not minimal since some levels might contain a number
572 * of CPUs that is not an exact power of 2 and their bit
573 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
575 mpidr_hash.shift_aff[0] = fs[0];
576 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
577 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
578 (bits[1] + bits[0]);
579 mpidr_hash.mask = mask;
580 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
581 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
582 mpidr_hash.shift_aff[0],
583 mpidr_hash.shift_aff[1],
584 mpidr_hash.shift_aff[2],
585 mpidr_hash.mask,
586 mpidr_hash.bits);
588 * 4x is an arbitrary value used to warn on a hash table much bigger
589 * than expected on most systems.
591 if (mpidr_hash_size() > 4 * num_possible_cpus())
592 pr_warn("Large number of MPIDR hash buckets detected\n");
593 sync_cache_w(&mpidr_hash);
595 #endif
597 static void __init setup_processor(void)
599 struct proc_info_list *list;
602 * locate processor in the list of supported processor
603 * types. The linker builds this table for us from the
604 * entries in arch/arm/mm/proc-*.S
606 list = lookup_processor_type(read_cpuid_id());
607 if (!list) {
608 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
609 read_cpuid_id());
610 while (1);
613 cpu_name = list->cpu_name;
614 __cpu_architecture = __get_cpu_architecture();
616 #ifdef MULTI_CPU
617 processor = *list->proc;
618 #endif
619 #ifdef MULTI_TLB
620 cpu_tlb = *list->tlb;
621 #endif
622 #ifdef MULTI_USER
623 cpu_user = *list->user;
624 #endif
625 #ifdef MULTI_CACHE
626 cpu_cache = *list->cache;
627 #endif
629 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
630 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
631 proc_arch[cpu_architecture()], get_cr());
633 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
634 list->arch_name, ENDIANNESS);
635 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
636 list->elf_name, ENDIANNESS);
637 elf_hwcap = list->elf_hwcap;
639 cpuid_init_hwcaps();
641 #ifndef CONFIG_ARM_THUMB
642 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
643 #endif
644 #ifdef CONFIG_MMU
645 init_default_cache_policy(list->__cpu_mm_mmu_flags);
646 #endif
647 erratum_a15_798181_init();
649 elf_hwcap_fixup();
651 cacheid_init();
652 cpu_init();
655 void __init dump_machine_table(void)
657 const struct machine_desc *p;
659 early_print("Available machine support:\n\nID (hex)\tNAME\n");
660 for_each_machine_desc(p)
661 early_print("%08x\t%s\n", p->nr, p->name);
663 early_print("\nPlease check your kernel config and/or bootloader.\n");
665 while (true)
666 /* can't use cpu_relax() here as it may require MMU setup */;
669 int __init arm_add_memory(u64 start, u64 size)
671 u64 aligned_start;
674 * Ensure that start/size are aligned to a page boundary.
675 * Size is rounded down, start is rounded up.
677 aligned_start = PAGE_ALIGN(start);
678 if (aligned_start > start + size)
679 size = 0;
680 else
681 size -= aligned_start - start;
683 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
684 if (aligned_start > ULONG_MAX) {
685 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
686 (long long)start);
687 return -EINVAL;
690 if (aligned_start + size > ULONG_MAX) {
691 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
692 (long long)start);
694 * To ensure bank->start + bank->size is representable in
695 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
696 * This means we lose a page after masking.
698 size = ULONG_MAX - aligned_start;
700 #endif
702 if (aligned_start < PHYS_OFFSET) {
703 if (aligned_start + size <= PHYS_OFFSET) {
704 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
705 aligned_start, aligned_start + size);
706 return -EINVAL;
709 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
710 aligned_start, (u64)PHYS_OFFSET);
712 size -= PHYS_OFFSET - aligned_start;
713 aligned_start = PHYS_OFFSET;
716 start = aligned_start;
717 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
720 * Check whether this memory region has non-zero size or
721 * invalid node number.
723 if (size == 0)
724 return -EINVAL;
726 memblock_add(start, size);
727 return 0;
731 * Pick out the memory size. We look for mem=size@start,
732 * where start and size are "size[KkMm]"
735 static int __init early_mem(char *p)
737 static int usermem __initdata = 0;
738 u64 size;
739 u64 start;
740 char *endp;
743 * If the user specifies memory size, we
744 * blow away any automatically generated
745 * size.
747 if (usermem == 0) {
748 usermem = 1;
749 memblock_remove(memblock_start_of_DRAM(),
750 memblock_end_of_DRAM() - memblock_start_of_DRAM());
753 start = PHYS_OFFSET;
754 size = memparse(p, &endp);
755 if (*endp == '@')
756 start = memparse(endp + 1, NULL);
758 arm_add_memory(start, size);
760 return 0;
762 early_param("mem", early_mem);
764 static void __init request_standard_resources(const struct machine_desc *mdesc)
766 struct memblock_region *region;
767 struct resource *res;
769 kernel_code.start = virt_to_phys(_text);
770 kernel_code.end = virt_to_phys(_etext - 1);
771 kernel_data.start = virt_to_phys(_sdata);
772 kernel_data.end = virt_to_phys(_end - 1);
774 for_each_memblock(memory, region) {
775 res = memblock_virt_alloc(sizeof(*res), 0);
776 res->name = "System RAM";
777 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
778 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
779 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
781 request_resource(&iomem_resource, res);
783 if (kernel_code.start >= res->start &&
784 kernel_code.end <= res->end)
785 request_resource(res, &kernel_code);
786 if (kernel_data.start >= res->start &&
787 kernel_data.end <= res->end)
788 request_resource(res, &kernel_data);
791 if (mdesc->video_start) {
792 video_ram.start = mdesc->video_start;
793 video_ram.end = mdesc->video_end;
794 request_resource(&iomem_resource, &video_ram);
798 * Some machines don't have the possibility of ever
799 * possessing lp0, lp1 or lp2
801 if (mdesc->reserve_lp0)
802 request_resource(&ioport_resource, &lp0);
803 if (mdesc->reserve_lp1)
804 request_resource(&ioport_resource, &lp1);
805 if (mdesc->reserve_lp2)
806 request_resource(&ioport_resource, &lp2);
809 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
810 struct screen_info screen_info = {
811 .orig_video_lines = 30,
812 .orig_video_cols = 80,
813 .orig_video_mode = 0,
814 .orig_video_ega_bx = 0,
815 .orig_video_isVGA = 1,
816 .orig_video_points = 8
818 #endif
820 static int __init customize_machine(void)
823 * customizes platform devices, or adds new ones
824 * On DT based machines, we fall back to populating the
825 * machine from the device tree, if no callback is provided,
826 * otherwise we would always need an init_machine callback.
828 of_iommu_init();
829 if (machine_desc->init_machine)
830 machine_desc->init_machine();
831 #ifdef CONFIG_OF
832 else
833 of_platform_populate(NULL, of_default_bus_match_table,
834 NULL, NULL);
835 #endif
836 return 0;
838 arch_initcall(customize_machine);
840 static int __init init_machine_late(void)
842 if (machine_desc->init_late)
843 machine_desc->init_late();
844 return 0;
846 late_initcall(init_machine_late);
848 #ifdef CONFIG_KEXEC
849 static inline unsigned long long get_total_mem(void)
851 unsigned long total;
853 total = max_low_pfn - min_low_pfn;
854 return total << PAGE_SHIFT;
858 * reserve_crashkernel() - reserves memory are for crash kernel
860 * This function reserves memory area given in "crashkernel=" kernel command
861 * line parameter. The memory reserved is used by a dump capture kernel when
862 * primary kernel is crashing.
864 static void __init reserve_crashkernel(void)
866 unsigned long long crash_size, crash_base;
867 unsigned long long total_mem;
868 int ret;
870 total_mem = get_total_mem();
871 ret = parse_crashkernel(boot_command_line, total_mem,
872 &crash_size, &crash_base);
873 if (ret)
874 return;
876 ret = memblock_reserve(crash_base, crash_size);
877 if (ret < 0) {
878 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
879 (unsigned long)crash_base);
880 return;
883 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
884 (unsigned long)(crash_size >> 20),
885 (unsigned long)(crash_base >> 20),
886 (unsigned long)(total_mem >> 20));
888 crashk_res.start = crash_base;
889 crashk_res.end = crash_base + crash_size - 1;
890 insert_resource(&iomem_resource, &crashk_res);
892 #else
893 static inline void reserve_crashkernel(void) {}
894 #endif /* CONFIG_KEXEC */
896 void __init hyp_mode_check(void)
898 #ifdef CONFIG_ARM_VIRT_EXT
899 sync_boot_mode();
901 if (is_hyp_mode_available()) {
902 pr_info("CPU: All CPU(s) started in HYP mode.\n");
903 pr_info("CPU: Virtualization extensions available.\n");
904 } else if (is_hyp_mode_mismatched()) {
905 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
906 __boot_cpu_mode & MODE_MASK);
907 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
908 } else
909 pr_info("CPU: All CPU(s) started in SVC mode.\n");
910 #endif
913 void __init setup_arch(char **cmdline_p)
915 const struct machine_desc *mdesc;
917 setup_processor();
918 mdesc = setup_machine_fdt(__atags_pointer);
919 if (!mdesc)
920 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
921 machine_desc = mdesc;
922 machine_name = mdesc->name;
923 dump_stack_set_arch_desc("%s", mdesc->name);
925 if (mdesc->reboot_mode != REBOOT_HARD)
926 reboot_mode = mdesc->reboot_mode;
928 init_mm.start_code = (unsigned long) _text;
929 init_mm.end_code = (unsigned long) _etext;
930 init_mm.end_data = (unsigned long) _edata;
931 init_mm.brk = (unsigned long) _end;
933 /* populate cmd_line too for later use, preserving boot_command_line */
934 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
935 *cmdline_p = cmd_line;
937 parse_early_param();
939 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
940 setup_dma_zone(mdesc);
941 sanity_check_meminfo();
942 arm_memblock_init(mdesc);
944 paging_init(mdesc);
945 request_standard_resources(mdesc);
947 if (mdesc->restart)
948 arm_pm_restart = mdesc->restart;
950 unflatten_device_tree();
952 arm_dt_init_cpu_maps();
953 psci_init();
954 #ifdef CONFIG_SMP
955 if (is_smp()) {
956 if (!mdesc->smp_init || !mdesc->smp_init()) {
957 if (psci_smp_available())
958 smp_set_ops(&psci_smp_ops);
959 else if (mdesc->smp)
960 smp_set_ops(mdesc->smp);
962 smp_init_cpus();
963 smp_build_mpidr_hash();
965 #endif
967 if (!is_smp())
968 hyp_mode_check();
970 reserve_crashkernel();
972 #ifdef CONFIG_MULTI_IRQ_HANDLER
973 handle_arch_irq = mdesc->handle_irq;
974 #endif
976 #ifdef CONFIG_VT
977 #if defined(CONFIG_VGA_CONSOLE)
978 conswitchp = &vga_con;
979 #elif defined(CONFIG_DUMMY_CONSOLE)
980 conswitchp = &dummy_con;
981 #endif
982 #endif
984 if (mdesc->init_early)
985 mdesc->init_early();
989 static int __init topology_init(void)
991 int cpu;
993 for_each_possible_cpu(cpu) {
994 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
995 cpuinfo->cpu.hotpluggable = 1;
996 register_cpu(&cpuinfo->cpu, cpu);
999 return 0;
1001 subsys_initcall(topology_init);
1003 #ifdef CONFIG_HAVE_PROC_CPU
1004 static int __init proc_cpu_init(void)
1006 struct proc_dir_entry *res;
1008 res = proc_mkdir("cpu", NULL);
1009 if (!res)
1010 return -ENOMEM;
1011 return 0;
1013 fs_initcall(proc_cpu_init);
1014 #endif
1016 static const char *hwcap_str[] = {
1017 "swp",
1018 "half",
1019 "thumb",
1020 "26bit",
1021 "fastmult",
1022 "fpa",
1023 "vfp",
1024 "edsp",
1025 "java",
1026 "iwmmxt",
1027 "crunch",
1028 "thumbee",
1029 "neon",
1030 "vfpv3",
1031 "vfpv3d16",
1032 "tls",
1033 "vfpv4",
1034 "idiva",
1035 "idivt",
1036 "vfpd32",
1037 "lpae",
1038 "evtstrm",
1039 NULL
1042 static const char *hwcap2_str[] = {
1043 "aes",
1044 "pmull",
1045 "sha1",
1046 "sha2",
1047 "crc32",
1048 NULL
1051 static int c_show(struct seq_file *m, void *v)
1053 int i, j;
1054 u32 cpuid;
1056 for_each_online_cpu(i) {
1058 * glibc reads /proc/cpuinfo to determine the number of
1059 * online processors, looking for lines beginning with
1060 * "processor". Give glibc what it expects.
1062 seq_printf(m, "processor\t: %d\n", i);
1063 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1064 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1065 cpu_name, cpuid & 15, elf_platform);
1067 #if defined(CONFIG_SMP)
1068 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1069 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1070 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1071 #else
1072 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1073 loops_per_jiffy / (500000/HZ),
1074 (loops_per_jiffy / (5000/HZ)) % 100);
1075 #endif
1076 /* dump out the processor features */
1077 seq_puts(m, "Features\t: ");
1079 for (j = 0; hwcap_str[j]; j++)
1080 if (elf_hwcap & (1 << j))
1081 seq_printf(m, "%s ", hwcap_str[j]);
1083 for (j = 0; hwcap2_str[j]; j++)
1084 if (elf_hwcap2 & (1 << j))
1085 seq_printf(m, "%s ", hwcap2_str[j]);
1087 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1088 seq_printf(m, "CPU architecture: %s\n",
1089 proc_arch[cpu_architecture()]);
1091 if ((cpuid & 0x0008f000) == 0x00000000) {
1092 /* pre-ARM7 */
1093 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1094 } else {
1095 if ((cpuid & 0x0008f000) == 0x00007000) {
1096 /* ARM7 */
1097 seq_printf(m, "CPU variant\t: 0x%02x\n",
1098 (cpuid >> 16) & 127);
1099 } else {
1100 /* post-ARM7 */
1101 seq_printf(m, "CPU variant\t: 0x%x\n",
1102 (cpuid >> 20) & 15);
1104 seq_printf(m, "CPU part\t: 0x%03x\n",
1105 (cpuid >> 4) & 0xfff);
1107 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1110 seq_printf(m, "Hardware\t: %s\n", machine_name);
1111 seq_printf(m, "Revision\t: %04x\n", system_rev);
1112 seq_printf(m, "Serial\t\t: %08x%08x\n",
1113 system_serial_high, system_serial_low);
1115 return 0;
1118 static void *c_start(struct seq_file *m, loff_t *pos)
1120 return *pos < 1 ? (void *)1 : NULL;
1123 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1125 ++*pos;
1126 return NULL;
1129 static void c_stop(struct seq_file *m, void *v)
1133 const struct seq_operations cpuinfo_op = {
1134 .start = c_start,
1135 .next = c_next,
1136 .stop = c_stop,
1137 .show = c_show