2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mmzone.h>
18 #include <linux/bootmem.h>
19 #include <linux/module.h>
20 #include <linux/node.h>
21 #include <linux/cpu.h>
22 #include <linux/ioport.h>
23 #include <linux/irq.h>
24 #include <linux/kexec.h>
25 #include <linux/pci.h>
26 #include <linux/swiotlb.h>
27 #include <linux/initrd.h>
29 #include <linux/highmem.h>
30 #include <linux/smp.h>
31 #include <linux/timex.h>
32 #include <linux/hugetlb.h>
33 #include <linux/start_kernel.h>
34 #include <linux/screen_info.h>
35 #include <linux/tick.h>
36 #include <asm/setup.h>
37 #include <asm/sections.h>
38 #include <asm/cacheflush.h>
39 #include <asm/pgalloc.h>
40 #include <asm/mmu_context.h>
41 #include <hv/hypervisor.h>
42 #include <arch/interrupts.h>
44 /* <linux/smp.h> doesn't provide this definition. */
46 #define setup_max_cpus 1
49 static inline int ABS(int x
) { return x
>= 0 ? x
: -x
; }
51 /* Chip information */
52 char chip_model
[64] __ro_after_init
;
55 struct screen_info screen_info
;
58 struct pglist_data node_data
[MAX_NUMNODES
] __read_mostly
;
59 EXPORT_SYMBOL(node_data
);
61 /* Information on the NUMA nodes that we compute early */
62 unsigned long node_start_pfn
[MAX_NUMNODES
];
63 unsigned long node_end_pfn
[MAX_NUMNODES
];
64 unsigned long __initdata node_memmap_pfn
[MAX_NUMNODES
];
65 unsigned long __initdata node_percpu_pfn
[MAX_NUMNODES
];
66 unsigned long __initdata node_free_pfn
[MAX_NUMNODES
];
68 static unsigned long __initdata node_percpu
[MAX_NUMNODES
];
71 * per-CPU stack and boot info.
73 DEFINE_PER_CPU(unsigned long, boot_sp
) =
74 (unsigned long)init_stack
+ THREAD_SIZE
- STACK_TOP_DELTA
;
77 DEFINE_PER_CPU(unsigned long, boot_pc
) = (unsigned long)start_kernel
;
80 * The variable must be __initdata since it references __init code.
81 * With CONFIG_SMP it is per-cpu data, which is exempt from validation.
83 unsigned long __initdata boot_pc
= (unsigned long)start_kernel
;
87 /* Page frame index of end of lowmem on each controller. */
88 unsigned long node_lowmem_end_pfn
[MAX_NUMNODES
];
90 /* Number of pages that can be mapped into lowmem. */
91 static unsigned long __initdata mappable_physpages
;
94 /* Data on which physical memory controller corresponds to which NUMA node */
95 int node_controller
[MAX_NUMNODES
] = { [0 ... MAX_NUMNODES
-1] = -1 };
98 /* Map information from VAs to PAs */
99 unsigned long pbase_map
[1 << (32 - HPAGE_SHIFT
)]
100 __ro_after_init
__attribute__((aligned(L2_CACHE_BYTES
)));
101 EXPORT_SYMBOL(pbase_map
);
103 /* Map information from PAs to VAs */
104 void *vbase_map
[NR_PA_HIGHBIT_VALUES
]
105 __ro_after_init
__attribute__((aligned(L2_CACHE_BYTES
)));
106 EXPORT_SYMBOL(vbase_map
);
109 /* Node number as a function of the high PA bits */
110 int highbits_to_node
[NR_PA_HIGHBIT_VALUES
] __ro_after_init
;
111 EXPORT_SYMBOL(highbits_to_node
);
113 static unsigned int __initdata maxmem_pfn
= -1U;
114 static unsigned int __initdata maxnodemem_pfn
[MAX_NUMNODES
] = {
115 [0 ... MAX_NUMNODES
-1] = -1U
117 static nodemask_t __initdata isolnodes
;
119 #if defined(CONFIG_PCI) && !defined(__tilegx__)
120 enum { DEFAULT_PCI_RESERVE_MB
= 64 };
121 static unsigned int __initdata pci_reserve_mb
= DEFAULT_PCI_RESERVE_MB
;
122 unsigned long __initdata pci_reserve_start_pfn
= -1U;
123 unsigned long __initdata pci_reserve_end_pfn
= -1U;
126 static int __init
setup_maxmem(char *str
)
128 unsigned long long maxmem
;
129 if (str
== NULL
|| (maxmem
= memparse(str
, NULL
)) == 0)
132 maxmem_pfn
= (maxmem
>> HPAGE_SHIFT
) << (HPAGE_SHIFT
- PAGE_SHIFT
);
133 pr_info("Forcing RAM used to no more than %dMB\n",
134 maxmem_pfn
>> (20 - PAGE_SHIFT
));
137 early_param("maxmem", setup_maxmem
);
139 static int __init
setup_maxnodemem(char *str
)
142 unsigned long long maxnodemem
;
145 node
= str
? simple_strtoul(str
, &endp
, 0) : INT_MAX
;
146 if (node
>= MAX_NUMNODES
|| *endp
!= ':')
149 maxnodemem
= memparse(endp
+1, NULL
);
150 maxnodemem_pfn
[node
] = (maxnodemem
>> HPAGE_SHIFT
) <<
151 (HPAGE_SHIFT
- PAGE_SHIFT
);
152 pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
153 node
, maxnodemem_pfn
[node
] >> (20 - PAGE_SHIFT
));
156 early_param("maxnodemem", setup_maxnodemem
);
158 struct memmap_entry
{
159 u64 addr
; /* start of memory segment */
160 u64 size
; /* size of memory segment */
162 static struct memmap_entry memmap_map
[64];
163 static int memmap_nr
;
165 static void add_memmap_region(u64 addr
, u64 size
)
167 if (memmap_nr
>= ARRAY_SIZE(memmap_map
)) {
168 pr_err("Ooops! Too many entries in the memory map!\n");
171 memmap_map
[memmap_nr
].addr
= addr
;
172 memmap_map
[memmap_nr
].size
= size
;
176 static int __init
setup_memmap(char *p
)
179 u64 start_at
, mem_size
;
184 if (!strncmp(p
, "exactmap", 8)) {
185 pr_err("\"memmap=exactmap\" not valid on tile\n");
190 mem_size
= memparse(p
, &p
);
195 pr_err("\"memmap=nn@ss\" (force RAM) invalid on tile\n");
196 } else if (*p
== '#') {
197 pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on tile\n");
198 } else if (*p
== '$') {
199 start_at
= memparse(p
+1, &p
);
200 add_memmap_region(start_at
, mem_size
);
204 maxmem_pfn
= (mem_size
>> HPAGE_SHIFT
) <<
205 (HPAGE_SHIFT
- PAGE_SHIFT
);
207 return *p
== '\0' ? 0 : -EINVAL
;
209 early_param("memmap", setup_memmap
);
211 static int __init
setup_mem(char *str
)
213 return setup_maxmem(str
);
215 early_param("mem", setup_mem
); /* compatibility with x86 */
217 static int __init
setup_isolnodes(char *str
)
219 if (str
== NULL
|| nodelist_parse(str
, isolnodes
) != 0)
222 pr_info("Set isolnodes value to '%*pbl'\n",
223 nodemask_pr_args(&isolnodes
));
226 early_param("isolnodes", setup_isolnodes
);
228 #if defined(CONFIG_PCI) && !defined(__tilegx__)
229 static int __init
setup_pci_reserve(char* str
)
231 if (str
== NULL
|| kstrtouint(str
, 0, &pci_reserve_mb
) != 0 ||
232 pci_reserve_mb
> 3 * 1024)
235 pr_info("Reserving %dMB for PCIE root complex mappings\n",
239 early_param("pci_reserve", setup_pci_reserve
);
244 * vmalloc=size forces the vmalloc area to be exactly 'size' bytes.
245 * This can be used to increase (or decrease) the vmalloc area.
247 static int __init
parse_vmalloc(char *arg
)
252 VMALLOC_RESERVE
= (memparse(arg
, &arg
) + PGDIR_SIZE
- 1) & PGDIR_MASK
;
254 /* See validate_va() for more on this test. */
255 if ((long)_VMALLOC_START
>= 0)
256 early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n",
257 VMALLOC_RESERVE
, _VMALLOC_END
- 0x80000000UL
);
261 early_param("vmalloc", parse_vmalloc
);
264 #ifdef CONFIG_HIGHMEM
266 * Determine for each controller where its lowmem is mapped and how much of
267 * it is mapped there. On controller zero, the first few megabytes are
268 * already mapped in as code at MEM_SV_START, so in principle we could
269 * start our data mappings higher up, but for now we don't bother, to avoid
270 * additional confusion.
272 * One question is whether, on systems with more than 768 Mb and
273 * controllers of different sizes, to map in a proportionate amount of
274 * each one, or to try to map the same amount from each controller.
275 * (E.g. if we have three controllers with 256MB, 1GB, and 256MB
276 * respectively, do we map 256MB from each, or do we map 128 MB, 512
277 * MB, and 128 MB respectively?) For now we use a proportionate
278 * solution like the latter.
280 * The VA/PA mapping demands that we align our decisions at 16 MB
281 * boundaries so that we can rapidly convert VA to PA.
283 static void *__init
setup_pa_va_mapping(void)
285 unsigned long curr_pages
= 0;
286 unsigned long vaddr
= PAGE_OFFSET
;
287 nodemask_t highonlynodes
= isolnodes
;
290 memset(pbase_map
, -1, sizeof(pbase_map
));
291 memset(vbase_map
, -1, sizeof(vbase_map
));
293 /* Node zero cannot be isolated for LOWMEM purposes. */
294 node_clear(0, highonlynodes
);
296 /* Count up the number of pages on non-highonlynodes controllers. */
297 mappable_physpages
= 0;
298 for_each_online_node(i
) {
299 if (!node_isset(i
, highonlynodes
))
300 mappable_physpages
+=
301 node_end_pfn
[i
] - node_start_pfn
[i
];
304 for_each_online_node(i
) {
305 unsigned long start
= node_start_pfn
[i
];
306 unsigned long end
= node_end_pfn
[i
];
307 unsigned long size
= end
- start
;
308 unsigned long vaddr_end
;
310 if (node_isset(i
, highonlynodes
)) {
311 /* Mark this controller as having no lowmem. */
312 node_lowmem_end_pfn
[i
] = start
;
317 if (mappable_physpages
> MAXMEM_PFN
) {
318 vaddr_end
= PAGE_OFFSET
+
319 (((u64
)curr_pages
* MAXMEM_PFN
/
323 vaddr_end
= PAGE_OFFSET
+ (curr_pages
<< PAGE_SHIFT
);
325 for (j
= 0; vaddr
< vaddr_end
; vaddr
+= HPAGE_SIZE
, ++j
) {
326 unsigned long this_pfn
=
327 start
+ (j
<< HUGETLB_PAGE_ORDER
);
328 pbase_map
[vaddr
>> HPAGE_SHIFT
] = this_pfn
;
329 if (vbase_map
[__pfn_to_highbits(this_pfn
)] ==
331 vbase_map
[__pfn_to_highbits(this_pfn
)] =
332 (void *)(vaddr
& HPAGE_MASK
);
334 node_lowmem_end_pfn
[i
] = start
+ (j
<< HUGETLB_PAGE_ORDER
);
335 BUG_ON(node_lowmem_end_pfn
[i
] > end
);
338 /* Return highest address of any mapped memory. */
339 return (void *)vaddr
;
341 #endif /* CONFIG_HIGHMEM */
344 * Register our most important memory mappings with the debug stub.
346 * This is up to 4 mappings for lowmem, one mapping per memory
347 * controller, plus one for our text segment.
349 static void store_permanent_mappings(void)
353 for_each_online_node(i
) {
354 HV_PhysAddr pa
= ((HV_PhysAddr
)node_start_pfn
[i
]) << PAGE_SHIFT
;
355 #ifdef CONFIG_HIGHMEM
356 HV_PhysAddr high_mapped_pa
= node_lowmem_end_pfn
[i
];
358 HV_PhysAddr high_mapped_pa
= node_end_pfn
[i
];
361 unsigned long pages
= high_mapped_pa
- node_start_pfn
[i
];
362 HV_VirtAddr addr
= (HV_VirtAddr
) __va(pa
);
363 hv_store_mapping(addr
, pages
<< PAGE_SHIFT
, pa
);
366 hv_store_mapping((HV_VirtAddr
)_text
,
367 (uint32_t)(_einittext
- _text
), 0);
371 * Use hv_inquire_physical() to populate node_{start,end}_pfn[]
372 * and node_online_map, doing suitable sanity-checking.
373 * Also set min_low_pfn, max_low_pfn, and max_pfn.
375 static void __init
setup_memory(void)
378 int highbits_seen
[NR_PA_HIGHBIT_VALUES
] = { 0 };
379 #ifdef CONFIG_HIGHMEM
385 #if defined(CONFIG_HIGHMEM) || defined(__tilegx__)
388 unsigned long physpages
= 0;
390 /* We are using a char to hold the cpu_2_node[] mapping */
391 BUILD_BUG_ON(MAX_NUMNODES
> 127);
393 /* Discover the ranges of memory available to us */
395 unsigned long start
, size
, end
, highbits
;
396 HV_PhysAddrRange range
= hv_inquire_physical(i
);
399 #ifdef CONFIG_FLATMEM
401 pr_err("Can't use discontiguous PAs: %#llx..%#llx\n",
402 range
.size
, range
.start
+ range
.size
);
407 if ((unsigned long)range
.start
) {
408 pr_err("Range not at 4GB multiple: %#llx..%#llx\n",
409 range
.start
, range
.start
+ range
.size
);
413 if ((range
.start
& (HPAGE_SIZE
-1)) != 0 ||
414 (range
.size
& (HPAGE_SIZE
-1)) != 0) {
415 unsigned long long start_pa
= range
.start
;
416 unsigned long long orig_size
= range
.size
;
417 range
.start
= (start_pa
+ HPAGE_SIZE
- 1) & HPAGE_MASK
;
418 range
.size
-= (range
.start
- start_pa
);
419 range
.size
&= HPAGE_MASK
;
420 pr_err("Range not hugepage-aligned: %#llx..%#llx: now %#llx-%#llx\n",
421 start_pa
, start_pa
+ orig_size
,
422 range
.start
, range
.start
+ range
.size
);
424 highbits
= __pa_to_highbits(range
.start
);
425 if (highbits
>= NR_PA_HIGHBIT_VALUES
) {
426 pr_err("PA high bits too high: %#llx..%#llx\n",
427 range
.start
, range
.start
+ range
.size
);
430 if (highbits_seen
[highbits
]) {
431 pr_err("Range overlaps in high bits: %#llx..%#llx\n",
432 range
.start
, range
.start
+ range
.size
);
435 highbits_seen
[highbits
] = 1;
436 if (PFN_DOWN(range
.size
) > maxnodemem_pfn
[i
]) {
437 int max_size
= maxnodemem_pfn
[i
];
439 pr_err("Maxnodemem reduced node %d to %d pages\n",
441 range
.size
= PFN_PHYS(max_size
);
443 pr_err("Maxnodemem disabled node %d\n", i
);
447 if (physpages
+ PFN_DOWN(range
.size
) > maxmem_pfn
) {
448 int max_size
= maxmem_pfn
- physpages
;
450 pr_err("Maxmem reduced node %d to %d pages\n",
452 range
.size
= PFN_PHYS(max_size
);
454 pr_err("Maxmem disabled node %d\n", i
);
458 if (i
>= MAX_NUMNODES
) {
459 pr_err("Too many PA nodes (#%d): %#llx...%#llx\n",
460 i
, range
.size
, range
.size
+ range
.start
);
464 start
= range
.start
>> PAGE_SHIFT
;
465 size
= range
.size
>> PAGE_SHIFT
;
469 if (((HV_PhysAddr
)end
<< PAGE_SHIFT
) !=
470 (range
.start
+ range
.size
)) {
471 pr_err("PAs too high to represent: %#llx..%#llx\n",
472 range
.start
, range
.start
+ range
.size
);
476 #if defined(CONFIG_PCI) && !defined(__tilegx__)
478 * Blocks that overlap the pci reserved region must
479 * have enough space to hold the maximum percpu data
480 * region at the top of the range. If there isn't
481 * enough space above the reserved region, just
484 if (start
<= pci_reserve_start_pfn
&&
485 end
> pci_reserve_start_pfn
) {
486 unsigned int per_cpu_size
=
487 __per_cpu_end
- __per_cpu_start
;
488 unsigned int percpu_pages
=
489 NR_CPUS
* (PFN_UP(per_cpu_size
) >> PAGE_SHIFT
);
490 if (end
< pci_reserve_end_pfn
+ percpu_pages
) {
491 end
= pci_reserve_start_pfn
;
492 pr_err("PCI mapping region reduced node %d to %ld pages\n",
498 for (j
= __pfn_to_highbits(start
);
499 j
<= __pfn_to_highbits(end
- 1); j
++)
500 highbits_to_node
[j
] = i
;
502 node_start_pfn
[i
] = start
;
503 node_end_pfn
[i
] = end
;
504 node_controller
[i
] = range
.controller
;
508 /* Mark node as online */
509 node_set(i
, node_online_map
);
510 node_set(i
, node_possible_map
);
515 * For 4KB pages, mem_map "struct page" data is 1% of the size
516 * of the physical memory, so can be quite big (640 MB for
517 * four 16G zones). These structures must be mapped in
518 * lowmem, and since we currently cap out at about 768 MB,
519 * it's impractical to try to use this much address space.
520 * For now, arbitrarily cap the amount of physical memory
521 * we're willing to use at 8 million pages (32GB of 4KB pages).
523 cap
= 8 * 1024 * 1024; /* 8 million pages */
524 if (physpages
> cap
) {
525 int num_nodes
= num_online_nodes();
526 int cap_each
= cap
/ num_nodes
;
527 unsigned long dropped_pages
= 0;
528 for (i
= 0; i
< num_nodes
; ++i
) {
529 int size
= node_end_pfn
[i
] - node_start_pfn
[i
];
530 if (size
> cap_each
) {
531 dropped_pages
+= (size
- cap_each
);
532 node_end_pfn
[i
] = node_start_pfn
[i
] + cap_each
;
535 physpages
-= dropped_pages
;
536 pr_warn("Only using %ldMB memory - ignoring %ldMB\n",
537 physpages
>> (20 - PAGE_SHIFT
),
538 dropped_pages
>> (20 - PAGE_SHIFT
));
539 pr_warn("Consider using a larger page size\n");
543 /* Heap starts just above the last loaded address. */
544 min_low_pfn
= PFN_UP((unsigned long)_end
- PAGE_OFFSET
);
546 #ifdef CONFIG_HIGHMEM
547 /* Find where we map lowmem from each controller. */
548 high_memory
= setup_pa_va_mapping();
550 /* Set max_low_pfn based on what node 0 can directly address. */
551 max_low_pfn
= node_lowmem_end_pfn
[0];
553 lowmem_pages
= (mappable_physpages
> MAXMEM_PFN
) ?
554 MAXMEM_PFN
: mappable_physpages
;
555 highmem_pages
= (long) (physpages
- lowmem_pages
);
557 pr_notice("%ldMB HIGHMEM available\n",
558 pages_to_mb(highmem_pages
> 0 ? highmem_pages
: 0));
559 pr_notice("%ldMB LOWMEM available\n", pages_to_mb(lowmem_pages
));
561 /* Set max_low_pfn based on what node 0 can directly address. */
562 max_low_pfn
= node_end_pfn
[0];
565 if (node_end_pfn
[0] > MAXMEM_PFN
) {
566 pr_warn("Only using %ldMB LOWMEM\n", MAXMEM
>> 20);
567 pr_warn("Use a HIGHMEM enabled kernel\n");
568 max_low_pfn
= MAXMEM_PFN
;
569 max_pfn
= MAXMEM_PFN
;
570 node_end_pfn
[0] = MAXMEM_PFN
;
572 pr_notice("%ldMB memory available\n",
573 pages_to_mb(node_end_pfn
[0]));
575 for (i
= 1; i
< MAX_NUMNODES
; ++i
) {
576 node_start_pfn
[i
] = 0;
579 high_memory
= __va(node_end_pfn
[0]);
582 for (i
= 0; i
< MAX_NUMNODES
; ++i
) {
583 int pages
= node_end_pfn
[i
] - node_start_pfn
[i
];
584 lowmem_pages
+= pages
;
586 high_memory
= pfn_to_kaddr(node_end_pfn
[i
]);
588 pr_notice("%ldMB memory available\n", pages_to_mb(lowmem_pages
));
594 * On 32-bit machines, we only put bootmem on the low controller,
595 * since PAs > 4GB can't be used in bootmem. In principle one could
596 * imagine, e.g., multiple 1 GB controllers all of which could support
597 * bootmem, but in practice using controllers this small isn't a
598 * particularly interesting scenario, so we just keep it simple and
599 * use only the first controller for bootmem on 32-bit machines.
601 static inline int node_has_bootmem(int nid
)
610 static inline unsigned long alloc_bootmem_pfn(int nid
,
614 void *kva
= __alloc_bootmem_node(NODE_DATA(nid
), size
,
616 unsigned long pfn
= kaddr_to_pfn(kva
);
617 BUG_ON(goal
&& PFN_PHYS(pfn
) != goal
);
621 static void __init
setup_bootmem_allocator_node(int i
)
623 unsigned long start
, end
, mapsize
, mapstart
;
625 if (node_has_bootmem(i
)) {
626 NODE_DATA(i
)->bdata
= &bootmem_node_data
[i
];
628 /* Share controller zero's bdata for now. */
629 NODE_DATA(i
)->bdata
= &bootmem_node_data
[0];
633 /* Skip up to after the bss in node 0. */
634 start
= (i
== 0) ? min_low_pfn
: node_start_pfn
[i
];
636 /* Only lowmem, if we're a HIGHMEM build. */
637 #ifdef CONFIG_HIGHMEM
638 end
= node_lowmem_end_pfn
[i
];
640 end
= node_end_pfn
[i
];
643 /* No memory here. */
647 /* Figure out where the bootmem bitmap is located. */
648 mapsize
= bootmem_bootmap_pages(end
- start
);
650 /* Use some space right before the heap on node 0. */
654 /* Allocate bitmap on node 0 to avoid page table issues. */
655 mapstart
= alloc_bootmem_pfn(0, PFN_PHYS(mapsize
), 0);
658 /* Initialize a node. */
659 init_bootmem_node(NODE_DATA(i
), mapstart
, start
, end
);
661 /* Free all the space back into the allocator. */
662 free_bootmem(PFN_PHYS(start
), PFN_PHYS(end
- start
));
664 #if defined(CONFIG_PCI) && !defined(__tilegx__)
666 * Throw away any memory aliased by the PCI region.
668 if (pci_reserve_start_pfn
< end
&& pci_reserve_end_pfn
> start
) {
669 start
= max(pci_reserve_start_pfn
, start
);
670 end
= min(pci_reserve_end_pfn
, end
);
671 reserve_bootmem(PFN_PHYS(start
), PFN_PHYS(end
- start
),
677 static void __init
setup_bootmem_allocator(void)
680 for (i
= 0; i
< MAX_NUMNODES
; ++i
)
681 setup_bootmem_allocator_node(i
);
683 /* Reserve any memory excluded by "memmap" arguments. */
684 for (i
= 0; i
< memmap_nr
; ++i
) {
685 struct memmap_entry
*m
= &memmap_map
[i
];
686 reserve_bootmem(m
->addr
, m
->size
, BOOTMEM_DEFAULT
);
689 #ifdef CONFIG_BLK_DEV_INITRD
691 /* Make sure the initrd memory region is not modified. */
692 if (reserve_bootmem(initrd_start
, initrd_end
- initrd_start
,
693 BOOTMEM_EXCLUSIVE
)) {
694 pr_crit("The initrd memory region has been polluted. Disabling it.\n");
699 * Translate initrd_start & initrd_end from PA to VA for
702 initrd_start
+= PAGE_OFFSET
;
703 initrd_end
+= PAGE_OFFSET
;
709 if (crashk_res
.start
!= crashk_res
.end
)
710 reserve_bootmem(crashk_res
.start
, resource_size(&crashk_res
),
715 void *__init
alloc_remap(int nid
, unsigned long size
)
717 int pages
= node_end_pfn
[nid
] - node_start_pfn
[nid
];
718 void *map
= pfn_to_kaddr(node_memmap_pfn
[nid
]);
719 BUG_ON(size
!= pages
* sizeof(struct page
));
720 memset(map
, 0, size
);
724 static int __init
percpu_size(void)
726 int size
= __per_cpu_end
- __per_cpu_start
;
727 size
+= PERCPU_MODULE_RESERVE
;
728 size
+= PERCPU_DYNAMIC_EARLY_SIZE
;
729 if (size
< PCPU_MIN_UNIT_SIZE
)
730 size
= PCPU_MIN_UNIT_SIZE
;
731 size
= roundup(size
, PAGE_SIZE
);
733 /* In several places we assume the per-cpu data fits on a huge page. */
734 BUG_ON(kdata_huge
&& size
> HPAGE_SIZE
);
738 static void __init
zone_sizes_init(void)
740 unsigned long zones_size
[MAX_NR_ZONES
] = { 0 };
741 int size
= percpu_size();
742 int num_cpus
= smp_height
* smp_width
;
743 const unsigned long dma_end
= (1UL << (32 - PAGE_SHIFT
));
747 for (i
= 0; i
< num_cpus
; ++i
)
748 node_percpu
[cpu_to_node(i
)] += size
;
750 for_each_online_node(i
) {
751 unsigned long start
= node_start_pfn
[i
];
752 unsigned long end
= node_end_pfn
[i
];
753 #ifdef CONFIG_HIGHMEM
754 unsigned long lowmem_end
= node_lowmem_end_pfn
[i
];
756 unsigned long lowmem_end
= end
;
758 int memmap_size
= (end
- start
) * sizeof(struct page
);
759 node_free_pfn
[i
] = start
;
762 * Set aside pages for per-cpu data and the mem_map array.
764 * Since the per-cpu data requires special homecaching,
765 * if we are in kdata_huge mode, we put it at the end of
766 * the lowmem region. If we're not in kdata_huge mode,
767 * we take the per-cpu pages from the bottom of the
768 * controller, since that avoids fragmenting a huge page
769 * that users might want. We always take the memmap
770 * from the bottom of the controller, since with
771 * kdata_huge that lets it be under a huge TLB entry.
773 * If the user has requested isolnodes for a controller,
774 * though, there'll be no lowmem, so we just alloc_bootmem
775 * the memmap. There will be no percpu memory either.
777 if (i
!= 0 && node_isset(i
, isolnodes
)) {
779 alloc_bootmem_pfn(0, memmap_size
, 0);
780 BUG_ON(node_percpu
[i
] != 0);
781 } else if (node_has_bootmem(start
)) {
782 unsigned long goal
= 0;
784 alloc_bootmem_pfn(i
, memmap_size
, 0);
786 goal
= PFN_PHYS(lowmem_end
) - node_percpu
[i
];
789 alloc_bootmem_pfn(i
, node_percpu
[i
],
792 /* In non-bootmem zones, just reserve some pages. */
793 node_memmap_pfn
[i
] = node_free_pfn
[i
];
794 node_free_pfn
[i
] += PFN_UP(memmap_size
);
796 node_percpu_pfn
[i
] = node_free_pfn
[i
];
797 node_free_pfn
[i
] += PFN_UP(node_percpu
[i
]);
800 lowmem_end
- PFN_UP(node_percpu
[i
]);
804 #ifdef CONFIG_HIGHMEM
805 if (start
> lowmem_end
) {
806 zones_size
[ZONE_NORMAL
] = 0;
807 zones_size
[ZONE_HIGHMEM
] = end
- start
;
809 zones_size
[ZONE_NORMAL
] = lowmem_end
- start
;
810 zones_size
[ZONE_HIGHMEM
] = end
- lowmem_end
;
813 zones_size
[ZONE_NORMAL
] = end
- start
;
816 if (start
< dma_end
) {
817 zones_size
[ZONE_DMA
] = min(zones_size
[ZONE_NORMAL
],
819 zones_size
[ZONE_NORMAL
] -= zones_size
[ZONE_DMA
];
821 zones_size
[ZONE_DMA
] = 0;
824 /* Take zone metadata from controller 0 if we're isolnode. */
825 if (node_isset(i
, isolnodes
))
826 NODE_DATA(i
)->bdata
= &bootmem_node_data
[0];
828 free_area_init_node(i
, zones_size
, start
, NULL
);
829 printk(KERN_DEBUG
" Normal zone: %ld per-cpu pages\n",
830 PFN_UP(node_percpu
[i
]));
832 /* Track the type of memory on each node */
833 if (zones_size
[ZONE_NORMAL
] || zones_size
[ZONE_DMA
])
834 node_set_state(i
, N_NORMAL_MEMORY
);
835 #ifdef CONFIG_HIGHMEM
837 node_set_state(i
, N_HIGH_MEMORY
);
846 /* which logical CPUs are on which nodes */
847 struct cpumask node_2_cpu_mask
[MAX_NUMNODES
] __ro_after_init
;
848 EXPORT_SYMBOL(node_2_cpu_mask
);
850 /* which node each logical CPU is on */
851 char cpu_2_node
[NR_CPUS
] __ro_after_init
__attribute__((aligned(L2_CACHE_BYTES
)));
852 EXPORT_SYMBOL(cpu_2_node
);
854 /* Return cpu_to_node() except for cpus not yet assigned, which return -1 */
855 static int __init
cpu_to_bound_node(int cpu
, struct cpumask
* unbound_cpus
)
857 if (!cpu_possible(cpu
) || cpumask_test_cpu(cpu
, unbound_cpus
))
860 return cpu_to_node(cpu
);
863 /* Return number of immediately-adjacent tiles sharing the same NUMA node. */
864 static int __init
node_neighbors(int node
, int cpu
,
865 struct cpumask
*unbound_cpus
)
872 if (x
> 0 && cpu_to_bound_node(cpu
-1, unbound_cpus
) == node
)
874 if (x
< w
-1 && cpu_to_bound_node(cpu
+1, unbound_cpus
) == node
)
876 if (y
> 0 && cpu_to_bound_node(cpu
-w
, unbound_cpus
) == node
)
878 if (y
< h
-1 && cpu_to_bound_node(cpu
+w
, unbound_cpus
) == node
)
883 static void __init
setup_numa_mapping(void)
885 u8 distance
[MAX_NUMNODES
][NR_CPUS
];
887 int cpu
, node
, cpus
, i
, x
, y
;
888 int num_nodes
= num_online_nodes();
889 struct cpumask unbound_cpus
;
890 nodemask_t default_nodes
;
892 cpumask_clear(&unbound_cpus
);
894 /* Get set of nodes we will use for defaults */
895 nodes_andnot(default_nodes
, node_online_map
, isolnodes
);
896 if (nodes_empty(default_nodes
)) {
897 BUG_ON(!node_isset(0, node_online_map
));
898 pr_err("Forcing NUMA node zero available as a default node\n");
899 node_set(0, default_nodes
);
902 /* Populate the distance[] array */
903 memset(distance
, -1, sizeof(distance
));
905 for (coord
.y
= 0; coord
.y
< smp_height
; ++coord
.y
) {
906 for (coord
.x
= 0; coord
.x
< smp_width
;
908 BUG_ON(cpu
>= nr_cpu_ids
);
909 if (!cpu_possible(cpu
)) {
910 cpu_2_node
[cpu
] = -1;
913 for_each_node_mask(node
, default_nodes
) {
914 HV_MemoryControllerInfo info
=
915 hv_inquire_memory_controller(
916 coord
, node_controller
[node
]);
917 distance
[node
][cpu
] =
918 ABS(info
.coord
.x
) + ABS(info
.coord
.y
);
920 cpumask_set_cpu(cpu
, &unbound_cpus
);
926 * Round-robin through the NUMA nodes until all the cpus are
927 * assigned. We could be more clever here (e.g. create four
928 * sorted linked lists on the same set of cpu nodes, and pull
929 * off them in round-robin sequence, removing from all four
930 * lists each time) but given the relatively small numbers
931 * involved, O(n^2) seem OK for a one-time cost.
933 node
= first_node(default_nodes
);
934 while (!cpumask_empty(&unbound_cpus
)) {
936 int best_distance
= INT_MAX
;
937 for (cpu
= 0; cpu
< cpus
; ++cpu
) {
938 if (cpumask_test_cpu(cpu
, &unbound_cpus
)) {
940 * Compute metric, which is how much
941 * closer the cpu is to this memory
942 * controller than the others, shifted
943 * up, and then the number of
944 * neighbors already in the node as an
945 * epsilon adjustment to try to keep
948 int d
= distance
[node
][cpu
] * num_nodes
;
949 for_each_node_mask(i
, default_nodes
) {
951 d
-= distance
[i
][cpu
];
953 d
*= 8; /* allow space for epsilon */
954 d
-= node_neighbors(node
, cpu
, &unbound_cpus
);
955 if (d
< best_distance
) {
961 BUG_ON(best_cpu
< 0);
962 cpumask_set_cpu(best_cpu
, &node_2_cpu_mask
[node
]);
963 cpu_2_node
[best_cpu
] = node
;
964 cpumask_clear_cpu(best_cpu
, &unbound_cpus
);
965 node
= next_node_in(node
, default_nodes
);
968 /* Print out node assignments and set defaults for disabled cpus */
970 for (y
= 0; y
< smp_height
; ++y
) {
971 printk(KERN_DEBUG
"NUMA cpu-to-node row %d:", y
);
972 for (x
= 0; x
< smp_width
; ++x
, ++cpu
) {
973 if (cpu_to_node(cpu
) < 0) {
975 cpu_2_node
[cpu
] = first_node(default_nodes
);
977 pr_cont(" %d", cpu_to_node(cpu
));
984 static struct cpu cpu_devices
[NR_CPUS
];
986 static int __init
topology_init(void)
990 for_each_online_node(i
)
991 register_one_node(i
);
993 for (i
= 0; i
< smp_height
* smp_width
; ++i
)
994 register_cpu(&cpu_devices
[i
], i
);
999 subsys_initcall(topology_init
);
1001 #else /* !CONFIG_NUMA */
1003 #define setup_numa_mapping() do { } while (0)
1005 #endif /* CONFIG_NUMA */
1008 * Initialize hugepage support on this cpu. We do this on all cores
1009 * early in boot: before argument parsing for the boot cpu, and after
1010 * argument parsing but before the init functions run on the secondaries.
1011 * So the values we set up here in the hypervisor may be overridden on
1012 * the boot cpu as arguments are parsed.
1014 static void init_super_pages(void)
1016 #ifdef CONFIG_HUGETLB_SUPER_PAGES
1018 for (i
= 0; i
< HUGE_SHIFT_ENTRIES
; ++i
)
1019 hv_set_pte_super_shift(i
, huge_shift
[i
]);
1024 * setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
1025 * @boot: Is this the boot cpu?
1027 * Called from setup_arch() on the boot cpu, or online_secondary().
1029 void setup_cpu(int boot
)
1031 /* The boot cpu sets up its permanent mappings much earlier. */
1033 store_permanent_mappings();
1035 /* Allow asynchronous TLB interrupts. */
1036 #if CHIP_HAS_TILE_DMA()
1037 arch_local_irq_unmask(INT_DMATLB_MISS
);
1038 arch_local_irq_unmask(INT_DMATLB_ACCESS
);
1041 arch_local_irq_unmask(INT_SINGLE_STEP_K
);
1045 * Allow user access to many generic SPRs, like the cycle
1046 * counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc.
1048 __insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0
, 1);
1051 /* Static network is not restricted. */
1052 __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0
, 1);
1056 * Set the MPL for interrupt control 0 & 1 to the corresponding
1057 * values. This includes access to the SYSTEM_SAVE and EX_CONTEXT
1058 * SPRs, as well as the interrupt mask.
1060 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0
, 1);
1061 __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1
, 1);
1063 /* Initialize IRQ support for this cpu. */
1066 #ifdef CONFIG_HARDWALL
1067 /* Reset the network state on this cpu. */
1068 reset_network_state();
1074 #ifdef CONFIG_BLK_DEV_INITRD
1076 static int __initdata set_initramfs_file
;
1077 static char __initdata initramfs_file
[128] = "initramfs";
1079 static int __init
setup_initramfs_file(char *str
)
1083 strncpy(initramfs_file
, str
, sizeof(initramfs_file
) - 1);
1084 set_initramfs_file
= 1;
1088 early_param("initramfs_file", setup_initramfs_file
);
1091 * We look for a file called "initramfs" in the hvfs. If there is one, we
1092 * allocate some memory for it and it will be unpacked to the initramfs.
1093 * If it's compressed, the initd code will uncompress it first.
1095 static void __init
load_hv_initrd(void)
1097 HV_FS_StatInfo stat
;
1101 /* If initrd has already been set, skip initramfs file in hvfs. */
1105 fd
= hv_fs_findfile((HV_VirtAddr
) initramfs_file
);
1106 if (fd
== HV_ENOENT
) {
1107 if (set_initramfs_file
) {
1108 pr_warn("No such hvfs initramfs file '%s'\n",
1112 /* Try old backwards-compatible name. */
1113 fd
= hv_fs_findfile((HV_VirtAddr
)"initramfs.cpio.gz");
1114 if (fd
== HV_ENOENT
)
1119 stat
= hv_fs_fstat(fd
);
1120 BUG_ON(stat
.size
< 0);
1121 if (stat
.flags
& HV_FS_ISDIR
) {
1122 pr_warn("Ignoring hvfs file '%s': it's a directory\n",
1126 initrd
= alloc_bootmem_pages(stat
.size
);
1127 rc
= hv_fs_pread(fd
, (HV_VirtAddr
) initrd
, stat
.size
, 0);
1128 if (rc
!= stat
.size
) {
1129 pr_err("Error reading %d bytes from hvfs file '%s': %d\n",
1130 stat
.size
, initramfs_file
, rc
);
1131 free_initrd_mem((unsigned long) initrd
, stat
.size
);
1134 initrd_start
= (unsigned long) initrd
;
1135 initrd_end
= initrd_start
+ stat
.size
;
1138 void __init
free_initrd_mem(unsigned long begin
, unsigned long end
)
1140 free_bootmem_late(__pa(begin
), end
- begin
);
1143 static int __init
setup_initrd(char *str
)
1146 unsigned long initrd_size
;
1148 initrd_size
= str
? simple_strtoul(str
, &endp
, 0) : 0;
1149 if (initrd_size
== 0 || *endp
!= '@')
1152 initrd_start
= simple_strtoul(endp
+1, &endp
, 0);
1153 if (initrd_start
== 0)
1156 initrd_end
= initrd_start
+ initrd_size
;
1160 early_param("initrd", setup_initrd
);
1163 static inline void load_hv_initrd(void) {}
1164 #endif /* CONFIG_BLK_DEV_INITRD */
1166 static void __init
validate_hv(void)
1169 * It may already be too late, but let's check our built-in
1170 * configuration against what the hypervisor is providing.
1172 unsigned long glue_size
= hv_sysconf(HV_SYSCONF_GLUE_SIZE
);
1173 int hv_page_size
= hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL
);
1174 int hv_hpage_size
= hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE
);
1175 HV_ASIDRange asid_range
;
1178 HV_Topology topology
= hv_inquire_topology();
1179 BUG_ON(topology
.coord
.x
!= 0 || topology
.coord
.y
!= 0);
1180 if (topology
.width
!= 1 || topology
.height
!= 1) {
1181 pr_warn("Warning: booting UP kernel on %dx%d grid; will ignore all but first tile\n",
1182 topology
.width
, topology
.height
);
1186 if (PAGE_OFFSET
+ HV_GLUE_START_CPA
+ glue_size
> (unsigned long)_text
)
1187 early_panic("Hypervisor glue size %ld is too big!\n",
1189 if (hv_page_size
!= PAGE_SIZE
)
1190 early_panic("Hypervisor page size %#x != our %#lx\n",
1191 hv_page_size
, PAGE_SIZE
);
1192 if (hv_hpage_size
!= HPAGE_SIZE
)
1193 early_panic("Hypervisor huge page size %#x != our %#lx\n",
1194 hv_hpage_size
, HPAGE_SIZE
);
1198 * Some hypervisor APIs take a pointer to a bitmap array
1199 * whose size is at least the number of cpus on the chip.
1200 * We use a struct cpumask for this, so it must be big enough.
1202 if ((smp_height
* smp_width
) > nr_cpu_ids
)
1203 early_panic("Hypervisor %d x %d grid too big for Linux NR_CPUS %u\n",
1204 smp_height
, smp_width
, nr_cpu_ids
);
1208 * Check that we're using allowed ASIDs, and initialize the
1209 * various asid variables to their appropriate initial states.
1211 asid_range
= hv_inquire_asid(0);
1212 min_asid
= asid_range
.start
;
1213 __this_cpu_write(current_asid
, min_asid
);
1214 max_asid
= asid_range
.start
+ asid_range
.size
- 1;
1216 if (hv_confstr(HV_CONFSTR_CHIP_MODEL
, (HV_VirtAddr
)chip_model
,
1217 sizeof(chip_model
)) < 0) {
1218 pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n");
1219 strlcpy(chip_model
, "unknown", sizeof(chip_model
));
1223 static void __init
validate_va(void)
1225 #ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */
1227 * Similarly, make sure we're only using allowed VAs.
1228 * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_START,
1229 * and 0 .. KERNEL_HIGH_VADDR.
1230 * In addition, make sure we CAN'T use the end of memory, since
1231 * we use the last chunk of each pgd for the pgd_list.
1233 int i
, user_kernel_ok
= 0;
1234 unsigned long max_va
= 0;
1235 unsigned long list_va
=
1236 ((PGD_LIST_OFFSET
/ sizeof(pgd_t
)) << PGDIR_SHIFT
);
1238 for (i
= 0; ; ++i
) {
1239 HV_VirtAddrRange range
= hv_inquire_virtual(i
);
1240 if (range
.size
== 0)
1242 if (range
.start
<= MEM_USER_INTRPT
&&
1243 range
.start
+ range
.size
>= MEM_HV_START
)
1245 if (range
.start
== 0)
1246 max_va
= range
.size
;
1247 BUG_ON(range
.start
+ range
.size
> list_va
);
1249 if (!user_kernel_ok
)
1250 early_panic("Hypervisor not configured for user/kernel VAs\n");
1252 early_panic("Hypervisor not configured for low VAs\n");
1253 if (max_va
< KERNEL_HIGH_VADDR
)
1254 early_panic("Hypervisor max VA %#lx smaller than %#lx\n",
1255 max_va
, KERNEL_HIGH_VADDR
);
1257 /* Kernel PCs must have their high bit set; see intvec.S. */
1258 if ((long)VMALLOC_START
>= 0)
1259 early_panic("Linux VMALLOC region below the 2GB line (%#lx)!\n"
1260 "Reconfigure the kernel with smaller VMALLOC_RESERVE\n",
1266 * cpu_lotar_map lists all the cpus that are valid for the supervisor
1267 * to cache data on at a page level, i.e. what cpus can be placed in
1268 * the LOTAR field of a PTE. It is equivalent to the set of possible
1269 * cpus plus any other cpus that are willing to share their cache.
1270 * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR).
1272 struct cpumask __ro_after_init cpu_lotar_map
;
1273 EXPORT_SYMBOL(cpu_lotar_map
);
1276 * hash_for_home_map lists all the tiles that hash-for-home data
1277 * will be cached on. Note that this may includes tiles that are not
1278 * valid for this supervisor to use otherwise (e.g. if a hypervisor
1279 * device is being shared between multiple supervisors).
1280 * It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE).
1282 struct cpumask hash_for_home_map
;
1283 EXPORT_SYMBOL(hash_for_home_map
);
1286 * cpu_cacheable_map lists all the cpus whose caches the hypervisor can
1287 * flush on our behalf. It is set to cpu_possible_mask OR'ed with
1288 * hash_for_home_map, and it is what should be passed to
1289 * hv_flush_remote() to flush all caches. Note that if there are
1290 * dedicated hypervisor driver tiles that have authorized use of their
1291 * cache, those tiles will only appear in cpu_lotar_map, NOT in
1292 * cpu_cacheable_map, as they are a special case.
1294 struct cpumask __ro_after_init cpu_cacheable_map
;
1295 EXPORT_SYMBOL(cpu_cacheable_map
);
1297 static __initdata
struct cpumask disabled_map
;
1299 static int __init
disabled_cpus(char *str
)
1301 int boot_cpu
= smp_processor_id();
1303 if (str
== NULL
|| cpulist_parse_crop(str
, &disabled_map
) != 0)
1305 if (cpumask_test_cpu(boot_cpu
, &disabled_map
)) {
1306 pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu
);
1307 cpumask_clear_cpu(boot_cpu
, &disabled_map
);
1312 early_param("disabled_cpus", disabled_cpus
);
1314 void __init
print_disabled_cpus(void)
1316 if (!cpumask_empty(&disabled_map
))
1317 pr_info("CPUs not available for Linux: %*pbl\n",
1318 cpumask_pr_args(&disabled_map
));
1321 static void __init
setup_cpu_maps(void)
1323 struct cpumask hv_disabled_map
, cpu_possible_init
;
1324 int boot_cpu
= smp_processor_id();
1327 /* Learn which cpus are allowed by the hypervisor. */
1328 rc
= hv_inquire_tiles(HV_INQ_TILES_AVAIL
,
1329 (HV_VirtAddr
) cpumask_bits(&cpu_possible_init
),
1330 sizeof(cpu_cacheable_map
));
1332 early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc
);
1333 if (!cpumask_test_cpu(boot_cpu
, &cpu_possible_init
))
1334 early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu
);
1336 /* Compute the cpus disabled by the hvconfig file. */
1337 cpumask_complement(&hv_disabled_map
, &cpu_possible_init
);
1339 /* Include them with the cpus disabled by "disabled_cpus". */
1340 cpumask_or(&disabled_map
, &disabled_map
, &hv_disabled_map
);
1343 * Disable every cpu after "setup_max_cpus". But don't mark
1344 * as disabled the cpus that are outside of our initial rectangle,
1345 * since that turns out to be confusing.
1347 cpus
= 1; /* this cpu */
1348 cpumask_set_cpu(boot_cpu
, &disabled_map
); /* ignore this cpu */
1349 for (i
= 0; cpus
< setup_max_cpus
; ++i
)
1350 if (!cpumask_test_cpu(i
, &disabled_map
))
1352 for (; i
< smp_height
* smp_width
; ++i
)
1353 cpumask_set_cpu(i
, &disabled_map
);
1354 cpumask_clear_cpu(boot_cpu
, &disabled_map
); /* reset this cpu */
1355 for (i
= smp_height
* smp_width
; i
< NR_CPUS
; ++i
)
1356 cpumask_clear_cpu(i
, &disabled_map
);
1359 * Setup cpu_possible map as every cpu allocated to us, minus
1360 * the results of any "disabled_cpus" settings.
1362 cpumask_andnot(&cpu_possible_init
, &cpu_possible_init
, &disabled_map
);
1363 init_cpu_possible(&cpu_possible_init
);
1365 /* Learn which cpus are valid for LOTAR caching. */
1366 rc
= hv_inquire_tiles(HV_INQ_TILES_LOTAR
,
1367 (HV_VirtAddr
) cpumask_bits(&cpu_lotar_map
),
1368 sizeof(cpu_lotar_map
));
1370 pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
1371 cpu_lotar_map
= *cpu_possible_mask
;
1374 /* Retrieve set of CPUs used for hash-for-home caching */
1375 rc
= hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE
,
1376 (HV_VirtAddr
) hash_for_home_map
.bits
,
1377 sizeof(hash_for_home_map
));
1379 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc
);
1380 cpumask_or(&cpu_cacheable_map
, cpu_possible_mask
, &hash_for_home_map
);
1384 static int __init
dataplane(char *str
)
1386 pr_warn("WARNING: dataplane support disabled in this kernel\n");
1390 early_param("dataplane", dataplane
);
1392 #ifdef CONFIG_NO_HZ_FULL
1393 /* Warn if hypervisor shared cpus are marked as nohz_full. */
1394 static int __init
check_nohz_full_cpus(void)
1396 struct cpumask shared
;
1399 if (hv_inquire_tiles(HV_INQ_TILES_SHARED
,
1400 (HV_VirtAddr
) shared
.bits
, sizeof(shared
)) < 0) {
1401 pr_warn("WARNING: No support for inquiring hv shared tiles\n");
1404 for_each_cpu(cpu
, &shared
) {
1405 if (tick_nohz_full_cpu(cpu
))
1406 pr_warn("WARNING: nohz_full cpu %d receives hypervisor interrupts!\n",
1411 arch_initcall(check_nohz_full_cpus
);
1414 #ifdef CONFIG_CMDLINE_BOOL
1415 static char __initdata builtin_cmdline
[COMMAND_LINE_SIZE
] = CONFIG_CMDLINE
;
1418 void __init
setup_arch(char **cmdline_p
)
1422 #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
1423 len
= hv_get_command_line((HV_VirtAddr
) boot_command_line
,
1425 if (boot_command_line
[0])
1426 pr_warn("WARNING: ignoring dynamic command line \"%s\"\n",
1428 strlcpy(boot_command_line
, builtin_cmdline
, COMMAND_LINE_SIZE
);
1431 #if defined(CONFIG_CMDLINE_BOOL)
1432 if (builtin_cmdline
[0]) {
1433 int builtin_len
= strlcpy(boot_command_line
, builtin_cmdline
,
1435 if (builtin_len
< COMMAND_LINE_SIZE
-1)
1436 boot_command_line
[builtin_len
++] = ' ';
1437 hv_cmdline
= &boot_command_line
[builtin_len
];
1438 len
= COMMAND_LINE_SIZE
- builtin_len
;
1442 hv_cmdline
= boot_command_line
;
1443 len
= COMMAND_LINE_SIZE
;
1445 len
= hv_get_command_line((HV_VirtAddr
) hv_cmdline
, len
);
1446 if (len
< 0 || len
> COMMAND_LINE_SIZE
)
1447 early_panic("hv_get_command_line failed: %d\n", len
);
1450 *cmdline_p
= boot_command_line
;
1452 /* Set disabled_map and setup_max_cpus very early */
1453 parse_early_param();
1455 /* Make sure the kernel is compatible with the hypervisor. */
1462 #if defined(CONFIG_PCI) && !defined(__tilegx__)
1464 * Initialize the PCI structures. This is done before memory
1465 * setup so that we know whether or not a pci_reserve region
1468 if (tile_pci_init() == 0)
1471 /* PCI systems reserve a region just below 4GB for mapping iomem. */
1472 pci_reserve_end_pfn
= (1 << (32 - PAGE_SHIFT
));
1473 pci_reserve_start_pfn
= pci_reserve_end_pfn
-
1474 (pci_reserve_mb
<< (20 - PAGE_SHIFT
));
1477 init_mm
.start_code
= (unsigned long) _text
;
1478 init_mm
.end_code
= (unsigned long) _etext
;
1479 init_mm
.end_data
= (unsigned long) _edata
;
1480 init_mm
.brk
= (unsigned long) _end
;
1483 store_permanent_mappings();
1484 setup_bootmem_allocator();
1487 * NOTE: before this point _nobody_ is allowed to allocate
1488 * any memory using the bootmem allocator.
1491 #ifdef CONFIG_SWIOTLB
1496 setup_numa_mapping();
1506 * Set up per-cpu memory.
1509 unsigned long __per_cpu_offset
[NR_CPUS
] __ro_after_init
;
1510 EXPORT_SYMBOL(__per_cpu_offset
);
1512 static size_t __initdata pfn_offset
[MAX_NUMNODES
] = { 0 };
1513 static unsigned long __initdata percpu_pfn
[NR_CPUS
] = { 0 };
1516 * As the percpu code allocates pages, we return the pages from the
1517 * end of the node for the specified cpu.
1519 static void *__init
pcpu_fc_alloc(unsigned int cpu
, size_t size
, size_t align
)
1521 int nid
= cpu_to_node(cpu
);
1522 unsigned long pfn
= node_percpu_pfn
[nid
] + pfn_offset
[nid
];
1524 BUG_ON(size
% PAGE_SIZE
!= 0);
1525 pfn_offset
[nid
] += size
/ PAGE_SIZE
;
1526 BUG_ON(node_percpu
[nid
] < size
);
1527 node_percpu
[nid
] -= size
;
1528 if (percpu_pfn
[cpu
] == 0)
1529 percpu_pfn
[cpu
] = pfn
;
1530 return pfn_to_kaddr(pfn
);
1534 * Pages reserved for percpu memory are not freeable, and in any case we are
1535 * on a short path to panic() in setup_per_cpu_area() at this point anyway.
1537 static void __init
pcpu_fc_free(void *ptr
, size_t size
)
1542 * Set up vmalloc page tables using bootmem for the percpu code.
1544 static void __init
pcpu_fc_populate_pte(unsigned long addr
)
1551 BUG_ON(pgd_addr_invalid(addr
));
1552 if (addr
< VMALLOC_START
|| addr
>= VMALLOC_END
)
1553 panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx; try increasing CONFIG_VMALLOC_RESERVE\n",
1554 addr
, VMALLOC_START
, VMALLOC_END
);
1556 pgd
= swapper_pg_dir
+ pgd_index(addr
);
1557 pud
= pud_offset(pgd
, addr
);
1558 BUG_ON(!pud_present(*pud
));
1559 pmd
= pmd_offset(pud
, addr
);
1560 if (pmd_present(*pmd
)) {
1561 BUG_ON(pmd_huge_page(*pmd
));
1563 pte
= __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE
,
1564 HV_PAGE_TABLE_ALIGN
, 0);
1565 pmd_populate_kernel(&init_mm
, pmd
, pte
);
1569 void __init
setup_per_cpu_areas(void)
1572 unsigned long delta
, pfn
, lowmem_va
;
1573 unsigned long size
= percpu_size();
1577 rc
= pcpu_page_first_chunk(PERCPU_MODULE_RESERVE
, pcpu_fc_alloc
,
1578 pcpu_fc_free
, pcpu_fc_populate_pte
);
1580 panic("Cannot initialize percpu area (err=%d)", rc
);
1582 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
1583 for_each_possible_cpu(cpu
) {
1584 __per_cpu_offset
[cpu
] = delta
+ pcpu_unit_offsets
[cpu
];
1586 /* finv the copy out of cache so we can change homecache */
1587 ptr
= pcpu_base_addr
+ pcpu_unit_offsets
[cpu
];
1588 __finv_buffer(ptr
, size
);
1589 pfn
= percpu_pfn
[cpu
];
1591 /* Rewrite the page tables to cache on that cpu */
1592 pg
= pfn_to_page(pfn
);
1593 for (i
= 0; i
< size
; i
+= PAGE_SIZE
, ++pfn
, ++pg
) {
1595 /* Update the vmalloc mapping and page home. */
1596 unsigned long addr
= (unsigned long)ptr
+ i
;
1597 pte_t
*ptep
= virt_to_kpte(addr
);
1599 BUG_ON(pfn
!= pte_pfn(pte
));
1600 pte
= hv_pte_set_mode(pte
, HV_PTE_MODE_CACHE_TILE_L3
);
1601 pte
= set_remote_cache_cpu(pte
, cpu
);
1602 set_pte_at(&init_mm
, addr
, ptep
, pte
);
1604 /* Update the lowmem mapping for consistency. */
1605 lowmem_va
= (unsigned long)pfn_to_kaddr(pfn
);
1606 ptep
= virt_to_kpte(lowmem_va
);
1607 if (pte_huge(*ptep
)) {
1608 printk(KERN_DEBUG
"early shatter of huge page at %#lx\n",
1610 shatter_pmd((pmd_t
*)ptep
);
1611 ptep
= virt_to_kpte(lowmem_va
);
1612 BUG_ON(pte_huge(*ptep
));
1614 BUG_ON(pfn
!= pte_pfn(*ptep
));
1615 set_pte_at(&init_mm
, lowmem_va
, ptep
, pte
);
1619 /* Set our thread pointer appropriately. */
1620 set_my_cpu_offset(__per_cpu_offset
[smp_processor_id()]);
1622 /* Make sure the finv's have completed. */
1625 /* Flush the TLB so we reference it properly from here on out. */
1626 local_flush_tlb_all();
1629 static struct resource data_resource
= {
1630 .name
= "Kernel data",
1633 .flags
= IORESOURCE_BUSY
| IORESOURCE_SYSTEM_RAM
1636 static struct resource code_resource
= {
1637 .name
= "Kernel code",
1640 .flags
= IORESOURCE_BUSY
| IORESOURCE_SYSTEM_RAM
1644 * On Pro, we reserve all resources above 4GB so that PCI won't try to put
1645 * mappings above 4GB.
1647 #if defined(CONFIG_PCI) && !defined(__tilegx__)
1648 static struct resource
* __init
1649 insert_non_bus_resource(void)
1651 struct resource
*res
=
1652 kzalloc(sizeof(struct resource
), GFP_ATOMIC
);
1655 res
->name
= "Non-Bus Physical Address Space";
1656 res
->start
= (1ULL << 32);
1658 res
->flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
;
1659 if (insert_resource(&iomem_resource
, res
)) {
1667 static struct resource
* __init
1668 insert_ram_resource(u64 start_pfn
, u64 end_pfn
, bool reserved
)
1670 struct resource
*res
=
1671 kzalloc(sizeof(struct resource
), GFP_ATOMIC
);
1674 res
->start
= start_pfn
<< PAGE_SHIFT
;
1675 res
->end
= (end_pfn
<< PAGE_SHIFT
) - 1;
1676 res
->flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
;
1678 res
->name
= "Reserved";
1680 res
->name
= "System RAM";
1681 res
->flags
|= IORESOURCE_SYSRAM
;
1683 if (insert_resource(&iomem_resource
, res
)) {
1691 * Request address space for all standard resources
1693 * If the system includes PCI root complex drivers, we need to create
1694 * a window just below 4GB where PCI BARs can be mapped.
1696 static int __init
request_standard_resources(void)
1699 enum { CODE_DELTA
= MEM_SV_START
- PAGE_OFFSET
};
1701 #if defined(CONFIG_PCI) && !defined(__tilegx__)
1702 insert_non_bus_resource();
1705 for_each_online_node(i
) {
1706 u64 start_pfn
= node_start_pfn
[i
];
1707 u64 end_pfn
= node_end_pfn
[i
];
1709 #if defined(CONFIG_PCI) && !defined(__tilegx__)
1710 if (start_pfn
<= pci_reserve_start_pfn
&&
1711 end_pfn
> pci_reserve_start_pfn
) {
1712 if (end_pfn
> pci_reserve_end_pfn
)
1713 insert_ram_resource(pci_reserve_end_pfn
,
1715 end_pfn
= pci_reserve_start_pfn
;
1718 insert_ram_resource(start_pfn
, end_pfn
, 0);
1721 code_resource
.start
= __pa(_text
- CODE_DELTA
);
1722 code_resource
.end
= __pa(_etext
- CODE_DELTA
)-1;
1723 data_resource
.start
= __pa(_sdata
);
1724 data_resource
.end
= __pa(_end
)-1;
1726 insert_resource(&iomem_resource
, &code_resource
);
1727 insert_resource(&iomem_resource
, &data_resource
);
1729 /* Mark any "memmap" regions busy for the resource manager. */
1730 for (i
= 0; i
< memmap_nr
; ++i
) {
1731 struct memmap_entry
*m
= &memmap_map
[i
];
1732 insert_ram_resource(PFN_DOWN(m
->addr
),
1733 PFN_UP(m
->addr
+ m
->size
- 1), 1);
1737 insert_resource(&iomem_resource
, &crashk_res
);
1743 subsys_initcall(request_standard_resources
);