blk: rq_data_dir() should not return a boolean
[cris-mirror.git] / arch / arm64 / kernel / setup.c
blob6bab21f84a9ff38402e70345016ed50ae8e95e30
1 /*
2 * Based on arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/acpi.h>
21 #include <linux/export.h>
22 #include <linux/kernel.h>
23 #include <linux/stddef.h>
24 #include <linux/ioport.h>
25 #include <linux/delay.h>
26 #include <linux/utsname.h>
27 #include <linux/initrd.h>
28 #include <linux/console.h>
29 #include <linux/cache.h>
30 #include <linux/bootmem.h>
31 #include <linux/seq_file.h>
32 #include <linux/screen_info.h>
33 #include <linux/init.h>
34 #include <linux/kexec.h>
35 #include <linux/crash_dump.h>
36 #include <linux/root_dev.h>
37 #include <linux/cpu.h>
38 #include <linux/interrupt.h>
39 #include <linux/smp.h>
40 #include <linux/fs.h>
41 #include <linux/proc_fs.h>
42 #include <linux/memblock.h>
43 #include <linux/of_iommu.h>
44 #include <linux/of_fdt.h>
45 #include <linux/of_platform.h>
46 #include <linux/efi.h>
47 #include <linux/personality.h>
48 #include <linux/psci.h>
50 #include <asm/acpi.h>
51 #include <asm/fixmap.h>
52 #include <asm/cpu.h>
53 #include <asm/cputype.h>
54 #include <asm/elf.h>
55 #include <asm/cpufeature.h>
56 #include <asm/cpu_ops.h>
57 #include <asm/sections.h>
58 #include <asm/setup.h>
59 #include <asm/smp_plat.h>
60 #include <asm/cacheflush.h>
61 #include <asm/tlbflush.h>
62 #include <asm/traps.h>
63 #include <asm/memblock.h>
64 #include <asm/efi.h>
65 #include <asm/xen/hypervisor.h>
67 unsigned long elf_hwcap __read_mostly;
68 EXPORT_SYMBOL_GPL(elf_hwcap);
70 #ifdef CONFIG_COMPAT
71 #define COMPAT_ELF_HWCAP_DEFAULT \
72 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
73 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
74 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
75 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
76 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
77 COMPAT_HWCAP_LPAE)
78 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
79 unsigned int compat_elf_hwcap2 __read_mostly;
80 #endif
82 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
84 phys_addr_t __fdt_pointer __initdata;
87 * Standard memory resources
89 static struct resource mem_res[] = {
91 .name = "Kernel code",
92 .start = 0,
93 .end = 0,
94 .flags = IORESOURCE_MEM
97 .name = "Kernel data",
98 .start = 0,
99 .end = 0,
100 .flags = IORESOURCE_MEM
104 #define kernel_code mem_res[0]
105 #define kernel_data mem_res[1]
108 * The recorded values of x0 .. x3 upon kernel entry.
110 u64 __cacheline_aligned boot_args[4];
112 void __init smp_setup_processor_id(void)
114 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
115 cpu_logical_map(0) = mpidr;
118 * clear __my_cpu_offset on boot CPU to avoid hang caused by
119 * using percpu variable early, for example, lockdep will
120 * access percpu variable inside lock_release
122 set_my_cpu_offset(0);
123 pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
126 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
128 return phys_id == cpu_logical_map(cpu);
131 struct mpidr_hash mpidr_hash;
133 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
134 * level in order to build a linear index from an
135 * MPIDR value. Resulting algorithm is a collision
136 * free hash carried out through shifting and ORing
138 static void __init smp_build_mpidr_hash(void)
140 u32 i, affinity, fs[4], bits[4], ls;
141 u64 mask = 0;
143 * Pre-scan the list of MPIDRS and filter out bits that do
144 * not contribute to affinity levels, ie they never toggle.
146 for_each_possible_cpu(i)
147 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
148 pr_debug("mask of set bits %#llx\n", mask);
150 * Find and stash the last and first bit set at all affinity levels to
151 * check how many bits are required to represent them.
153 for (i = 0; i < 4; i++) {
154 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
156 * Find the MSB bit and LSB bits position
157 * to determine how many bits are required
158 * to express the affinity level.
160 ls = fls(affinity);
161 fs[i] = affinity ? ffs(affinity) - 1 : 0;
162 bits[i] = ls - fs[i];
165 * An index can be created from the MPIDR_EL1 by isolating the
166 * significant bits at each affinity level and by shifting
167 * them in order to compress the 32 bits values space to a
168 * compressed set of values. This is equivalent to hashing
169 * the MPIDR_EL1 through shifting and ORing. It is a collision free
170 * hash though not minimal since some levels might contain a number
171 * of CPUs that is not an exact power of 2 and their bit
172 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
174 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
175 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
176 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
177 (bits[1] + bits[0]);
178 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
179 fs[3] - (bits[2] + bits[1] + bits[0]);
180 mpidr_hash.mask = mask;
181 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
182 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
183 mpidr_hash.shift_aff[0],
184 mpidr_hash.shift_aff[1],
185 mpidr_hash.shift_aff[2],
186 mpidr_hash.shift_aff[3],
187 mpidr_hash.mask,
188 mpidr_hash.bits);
190 * 4x is an arbitrary value used to warn on a hash table much bigger
191 * than expected on most systems.
193 if (mpidr_hash_size() > 4 * num_possible_cpus())
194 pr_warn("Large number of MPIDR hash buckets detected\n");
195 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
198 static void __init setup_processor(void)
200 u64 features;
201 s64 block;
202 u32 cwg;
203 int cls;
205 printk("CPU: AArch64 Processor [%08x] revision %d\n",
206 read_cpuid_id(), read_cpuid_id() & 15);
208 sprintf(init_utsname()->machine, ELF_PLATFORM);
209 elf_hwcap = 0;
211 cpuinfo_store_boot_cpu();
214 * Check for sane CTR_EL0.CWG value.
216 cwg = cache_type_cwg();
217 cls = cache_line_size();
218 if (!cwg)
219 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
220 cls);
221 if (L1_CACHE_BYTES < cls)
222 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
223 L1_CACHE_BYTES, cls);
226 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
227 * The blocks we test below represent incremental functionality
228 * for non-negative values. Negative values are reserved.
230 features = read_cpuid(ID_AA64ISAR0_EL1);
231 block = cpuid_feature_extract_field(features, 4);
232 if (block > 0) {
233 switch (block) {
234 default:
235 case 2:
236 elf_hwcap |= HWCAP_PMULL;
237 case 1:
238 elf_hwcap |= HWCAP_AES;
239 case 0:
240 break;
244 if (cpuid_feature_extract_field(features, 8) > 0)
245 elf_hwcap |= HWCAP_SHA1;
247 if (cpuid_feature_extract_field(features, 12) > 0)
248 elf_hwcap |= HWCAP_SHA2;
250 if (cpuid_feature_extract_field(features, 16) > 0)
251 elf_hwcap |= HWCAP_CRC32;
253 block = cpuid_feature_extract_field(features, 20);
254 if (block > 0) {
255 switch (block) {
256 default:
257 case 2:
258 elf_hwcap |= HWCAP_ATOMICS;
259 case 1:
260 /* RESERVED */
261 case 0:
262 break;
266 #ifdef CONFIG_COMPAT
268 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
269 * the AArch32 32-bit execution state.
271 features = read_cpuid(ID_ISAR5_EL1);
272 block = cpuid_feature_extract_field(features, 4);
273 if (block > 0) {
274 switch (block) {
275 default:
276 case 2:
277 compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
278 case 1:
279 compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
280 case 0:
281 break;
285 if (cpuid_feature_extract_field(features, 8) > 0)
286 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
288 if (cpuid_feature_extract_field(features, 12) > 0)
289 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
291 if (cpuid_feature_extract_field(features, 16) > 0)
292 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
293 #endif
296 static void __init setup_machine_fdt(phys_addr_t dt_phys)
298 void *dt_virt = fixmap_remap_fdt(dt_phys);
300 if (!dt_virt || !early_init_dt_scan(dt_virt)) {
301 pr_crit("\n"
302 "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
303 "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
304 "\nPlease check your bootloader.",
305 &dt_phys, dt_virt);
307 while (true)
308 cpu_relax();
311 dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
314 static void __init request_standard_resources(void)
316 struct memblock_region *region;
317 struct resource *res;
319 kernel_code.start = virt_to_phys(_text);
320 kernel_code.end = virt_to_phys(_etext - 1);
321 kernel_data.start = virt_to_phys(_sdata);
322 kernel_data.end = virt_to_phys(_end - 1);
324 for_each_memblock(memory, region) {
325 res = alloc_bootmem_low(sizeof(*res));
326 res->name = "System RAM";
327 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
328 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
329 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
331 request_resource(&iomem_resource, res);
333 if (kernel_code.start >= res->start &&
334 kernel_code.end <= res->end)
335 request_resource(res, &kernel_code);
336 if (kernel_data.start >= res->start &&
337 kernel_data.end <= res->end)
338 request_resource(res, &kernel_data);
342 #ifdef CONFIG_BLK_DEV_INITRD
344 * Relocate initrd if it is not completely within the linear mapping.
345 * This would be the case if mem= cuts out all or part of it.
347 static void __init relocate_initrd(void)
349 phys_addr_t orig_start = __virt_to_phys(initrd_start);
350 phys_addr_t orig_end = __virt_to_phys(initrd_end);
351 phys_addr_t ram_end = memblock_end_of_DRAM();
352 phys_addr_t new_start;
353 unsigned long size, to_free = 0;
354 void *dest;
356 if (orig_end <= ram_end)
357 return;
360 * Any of the original initrd which overlaps the linear map should
361 * be freed after relocating.
363 if (orig_start < ram_end)
364 to_free = ram_end - orig_start;
366 size = orig_end - orig_start;
368 /* initrd needs to be relocated completely inside linear mapping */
369 new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),
370 size, PAGE_SIZE);
371 if (!new_start)
372 panic("Cannot relocate initrd of size %ld\n", size);
373 memblock_reserve(new_start, size);
375 initrd_start = __phys_to_virt(new_start);
376 initrd_end = initrd_start + size;
378 pr_info("Moving initrd from [%llx-%llx] to [%llx-%llx]\n",
379 orig_start, orig_start + size - 1,
380 new_start, new_start + size - 1);
382 dest = (void *)initrd_start;
384 if (to_free) {
385 memcpy(dest, (void *)__phys_to_virt(orig_start), to_free);
386 dest += to_free;
389 copy_from_early_mem(dest, orig_start + to_free, size - to_free);
391 if (to_free) {
392 pr_info("Freeing original RAMDISK from [%llx-%llx]\n",
393 orig_start, orig_start + to_free - 1);
394 memblock_free(orig_start, to_free);
397 #else
398 static inline void __init relocate_initrd(void)
401 #endif
403 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
405 void __init setup_arch(char **cmdline_p)
407 setup_processor();
409 init_mm.start_code = (unsigned long) _text;
410 init_mm.end_code = (unsigned long) _etext;
411 init_mm.end_data = (unsigned long) _edata;
412 init_mm.brk = (unsigned long) _end;
414 *cmdline_p = boot_command_line;
416 early_fixmap_init();
417 early_ioremap_init();
419 setup_machine_fdt(__fdt_pointer);
421 parse_early_param();
424 * Unmask asynchronous aborts after bringing up possible earlycon.
425 * (Report possible System Errors once we can report this occurred)
427 local_async_enable();
429 efi_init();
430 arm64_memblock_init();
432 /* Parse the ACPI tables for possible boot-time configuration */
433 acpi_boot_table_init();
435 paging_init();
436 relocate_initrd();
437 request_standard_resources();
439 early_ioremap_reset();
441 if (acpi_disabled) {
442 unflatten_device_tree();
443 psci_dt_init();
444 } else {
445 psci_acpi_init();
447 xen_early_init();
449 cpu_read_bootcpu_ops();
450 smp_init_cpus();
451 smp_build_mpidr_hash();
453 #ifdef CONFIG_VT
454 #if defined(CONFIG_VGA_CONSOLE)
455 conswitchp = &vga_con;
456 #elif defined(CONFIG_DUMMY_CONSOLE)
457 conswitchp = &dummy_con;
458 #endif
459 #endif
460 if (boot_args[1] || boot_args[2] || boot_args[3]) {
461 pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
462 "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
463 "This indicates a broken bootloader or old kernel\n",
464 boot_args[1], boot_args[2], boot_args[3]);
468 static int __init arm64_device_init(void)
470 if (of_have_populated_dt()) {
471 of_iommu_init();
472 of_platform_populate(NULL, of_default_bus_match_table,
473 NULL, NULL);
474 } else if (acpi_disabled) {
475 pr_crit("Device tree not populated\n");
477 return 0;
479 arch_initcall_sync(arm64_device_init);
481 static int __init topology_init(void)
483 int i;
485 for_each_possible_cpu(i) {
486 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
487 cpu->hotpluggable = 1;
488 register_cpu(cpu, i);
491 return 0;
493 subsys_initcall(topology_init);
495 static const char *hwcap_str[] = {
496 "fp",
497 "asimd",
498 "evtstrm",
499 "aes",
500 "pmull",
501 "sha1",
502 "sha2",
503 "crc32",
504 "atomics",
505 NULL
508 #ifdef CONFIG_COMPAT
509 static const char *compat_hwcap_str[] = {
510 "swp",
511 "half",
512 "thumb",
513 "26bit",
514 "fastmult",
515 "fpa",
516 "vfp",
517 "edsp",
518 "java",
519 "iwmmxt",
520 "crunch",
521 "thumbee",
522 "neon",
523 "vfpv3",
524 "vfpv3d16",
525 "tls",
526 "vfpv4",
527 "idiva",
528 "idivt",
529 "vfpd32",
530 "lpae",
531 "evtstrm"
534 static const char *compat_hwcap2_str[] = {
535 "aes",
536 "pmull",
537 "sha1",
538 "sha2",
539 "crc32",
540 NULL
542 #endif /* CONFIG_COMPAT */
544 static int c_show(struct seq_file *m, void *v)
546 int i, j;
548 for_each_online_cpu(i) {
549 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
550 u32 midr = cpuinfo->reg_midr;
553 * glibc reads /proc/cpuinfo to determine the number of
554 * online processors, looking for lines beginning with
555 * "processor". Give glibc what it expects.
557 seq_printf(m, "processor\t: %d\n", i);
560 * Dump out the common processor features in a single line.
561 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
562 * rather than attempting to parse this, but there's a body of
563 * software which does already (at least for 32-bit).
565 seq_puts(m, "Features\t:");
566 if (personality(current->personality) == PER_LINUX32) {
567 #ifdef CONFIG_COMPAT
568 for (j = 0; compat_hwcap_str[j]; j++)
569 if (compat_elf_hwcap & (1 << j))
570 seq_printf(m, " %s", compat_hwcap_str[j]);
572 for (j = 0; compat_hwcap2_str[j]; j++)
573 if (compat_elf_hwcap2 & (1 << j))
574 seq_printf(m, " %s", compat_hwcap2_str[j]);
575 #endif /* CONFIG_COMPAT */
576 } else {
577 for (j = 0; hwcap_str[j]; j++)
578 if (elf_hwcap & (1 << j))
579 seq_printf(m, " %s", hwcap_str[j]);
581 seq_puts(m, "\n");
583 seq_printf(m, "CPU implementer\t: 0x%02x\n",
584 MIDR_IMPLEMENTOR(midr));
585 seq_printf(m, "CPU architecture: 8\n");
586 seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
587 seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
588 seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
591 return 0;
594 static void *c_start(struct seq_file *m, loff_t *pos)
596 return *pos < 1 ? (void *)1 : NULL;
599 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
601 ++*pos;
602 return NULL;
605 static void c_stop(struct seq_file *m, void *v)
609 const struct seq_operations cpuinfo_op = {
610 .start = c_start,
611 .next = c_next,
612 .stop = c_stop,
613 .show = c_show