2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mmzone.h>
18 #include <linux/bootmem.h>
19 #include <linux/module.h>
20 #include <linux/node.h>
21 #include <linux/cpu.h>
22 #include <linux/ioport.h>
23 #include <linux/irq.h>
24 #include <linux/kexec.h>
25 #include <linux/pci.h>
26 #include <linux/initrd.h>
28 #include <linux/highmem.h>
29 #include <linux/smp.h>
30 #include <linux/timex.h>
31 #include <asm/setup.h>
32 #include <asm/sections.h>
33 #include <asm/cacheflush.h>
34 #include <asm/pgalloc.h>
35 #include <asm/mmu_context.h>
36 #include <hv/hypervisor.h>
37 #include <arch/interrupts.h>
39 /* <linux/smp.h> doesn't provide this definition. */
41 #define setup_max_cpus 1
44 static inline int ABS(int x
) { return x
>= 0 ? x
: -x
; }
46 /* Chip information */
47 char chip_model
[64] __write_once
;
49 struct pglist_data node_data
[MAX_NUMNODES
] __read_mostly
;
50 EXPORT_SYMBOL(node_data
);
52 /* We only create bootmem data on node 0. */
53 static bootmem_data_t __initdata node0_bdata
;
55 /* Information on the NUMA nodes that we compute early */
56 unsigned long __cpuinitdata node_start_pfn
[MAX_NUMNODES
];
57 unsigned long __cpuinitdata node_end_pfn
[MAX_NUMNODES
];
58 unsigned long __initdata node_memmap_pfn
[MAX_NUMNODES
];
59 unsigned long __initdata node_percpu_pfn
[MAX_NUMNODES
];
60 unsigned long __initdata node_free_pfn
[MAX_NUMNODES
];
62 static unsigned long __initdata node_percpu
[MAX_NUMNODES
];
65 /* Page frame index of end of lowmem on each controller. */
66 unsigned long __cpuinitdata node_lowmem_end_pfn
[MAX_NUMNODES
];
68 /* Number of pages that can be mapped into lowmem. */
69 static unsigned long __initdata mappable_physpages
;
72 /* Data on which physical memory controller corresponds to which NUMA node */
73 int node_controller
[MAX_NUMNODES
] = { [0 ... MAX_NUMNODES
-1] = -1 };
76 /* Map information from VAs to PAs */
77 unsigned long pbase_map
[1 << (32 - HPAGE_SHIFT
)]
78 __write_once
__attribute__((aligned(L2_CACHE_BYTES
)));
79 EXPORT_SYMBOL(pbase_map
);
81 /* Map information from PAs to VAs */
82 void *vbase_map
[NR_PA_HIGHBIT_VALUES
]
83 __write_once
__attribute__((aligned(L2_CACHE_BYTES
)));
84 EXPORT_SYMBOL(vbase_map
);
87 /* Node number as a function of the high PA bits */
88 int highbits_to_node
[NR_PA_HIGHBIT_VALUES
] __write_once
;
89 EXPORT_SYMBOL(highbits_to_node
);
91 static unsigned int __initdata maxmem_pfn
= -1U;
92 static unsigned int __initdata maxnodemem_pfn
[MAX_NUMNODES
] = {
93 [0 ... MAX_NUMNODES
-1] = -1U
95 static nodemask_t __initdata isolnodes
;
98 enum { DEFAULT_PCI_RESERVE_MB
= 64 };
99 static unsigned int __initdata pci_reserve_mb
= DEFAULT_PCI_RESERVE_MB
;
100 unsigned long __initdata pci_reserve_start_pfn
= -1U;
101 unsigned long __initdata pci_reserve_end_pfn
= -1U;
104 static int __init
setup_maxmem(char *str
)
107 if (str
== NULL
|| strict_strtol(str
, 0, &maxmem_mb
) != 0 ||
111 maxmem_pfn
= (maxmem_mb
>> (HPAGE_SHIFT
- 20)) <<
112 (HPAGE_SHIFT
- PAGE_SHIFT
);
113 pr_info("Forcing RAM used to no more than %dMB\n",
114 maxmem_pfn
>> (20 - PAGE_SHIFT
));
117 early_param("maxmem", setup_maxmem
);
119 static int __init
setup_maxnodemem(char *str
)
122 long maxnodemem_mb
, node
;
124 node
= str
? simple_strtoul(str
, &endp
, 0) : INT_MAX
;
125 if (node
>= MAX_NUMNODES
|| *endp
!= ':' ||
126 strict_strtol(endp
+1, 0, &maxnodemem_mb
) != 0)
129 maxnodemem_pfn
[node
] = (maxnodemem_mb
>> (HPAGE_SHIFT
- 20)) <<
130 (HPAGE_SHIFT
- PAGE_SHIFT
);
131 pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
132 node
, maxnodemem_pfn
[node
] >> (20 - PAGE_SHIFT
));
135 early_param("maxnodemem", setup_maxnodemem
);
137 static int __init
setup_isolnodes(char *str
)
139 char buf
[MAX_NUMNODES
* 5];
140 if (str
== NULL
|| nodelist_parse(str
, isolnodes
) != 0)
143 nodelist_scnprintf(buf
, sizeof(buf
), isolnodes
);
144 pr_info("Set isolnodes value to '%s'\n", buf
);
147 early_param("isolnodes", setup_isolnodes
);
150 static int __init
setup_pci_reserve(char* str
)
154 if (str
== NULL
|| strict_strtoul(str
, 0, &mb
) != 0 ||
159 pr_info("Reserving %dMB for PCIE root complex mappings\n",
163 early_param("pci_reserve", setup_pci_reserve
);
168 * vmalloc=size forces the vmalloc area to be exactly 'size' bytes.
169 * This can be used to increase (or decrease) the vmalloc area.
171 static int __init
parse_vmalloc(char *arg
)
176 VMALLOC_RESERVE
= (memparse(arg
, &arg
) + PGDIR_SIZE
- 1) & PGDIR_MASK
;
178 /* See validate_va() for more on this test. */
179 if ((long)_VMALLOC_START
>= 0)
180 early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n",
181 VMALLOC_RESERVE
, _VMALLOC_END
- 0x80000000UL
);
185 early_param("vmalloc", parse_vmalloc
);
188 #ifdef CONFIG_HIGHMEM
190 * Determine for each controller where its lowmem is mapped and how much of
191 * it is mapped there. On controller zero, the first few megabytes are
192 * already mapped in as code at MEM_SV_INTRPT, so in principle we could
193 * start our data mappings higher up, but for now we don't bother, to avoid
194 * additional confusion.
196 * One question is whether, on systems with more than 768 Mb and
197 * controllers of different sizes, to map in a proportionate amount of
198 * each one, or to try to map the same amount from each controller.
199 * (E.g. if we have three controllers with 256MB, 1GB, and 256MB
200 * respectively, do we map 256MB from each, or do we map 128 MB, 512
201 * MB, and 128 MB respectively?) For now we use a proportionate
202 * solution like the latter.
204 * The VA/PA mapping demands that we align our decisions at 16 MB
205 * boundaries so that we can rapidly convert VA to PA.
207 static void *__init
setup_pa_va_mapping(void)
209 unsigned long curr_pages
= 0;
210 unsigned long vaddr
= PAGE_OFFSET
;
211 nodemask_t highonlynodes
= isolnodes
;
214 memset(pbase_map
, -1, sizeof(pbase_map
));
215 memset(vbase_map
, -1, sizeof(vbase_map
));
217 /* Node zero cannot be isolated for LOWMEM purposes. */
218 node_clear(0, highonlynodes
);
220 /* Count up the number of pages on non-highonlynodes controllers. */
221 mappable_physpages
= 0;
222 for_each_online_node(i
) {
223 if (!node_isset(i
, highonlynodes
))
224 mappable_physpages
+=
225 node_end_pfn
[i
] - node_start_pfn
[i
];
228 for_each_online_node(i
) {
229 unsigned long start
= node_start_pfn
[i
];
230 unsigned long end
= node_end_pfn
[i
];
231 unsigned long size
= end
- start
;
232 unsigned long vaddr_end
;
234 if (node_isset(i
, highonlynodes
)) {
235 /* Mark this controller as having no lowmem. */
236 node_lowmem_end_pfn
[i
] = start
;
241 if (mappable_physpages
> MAXMEM_PFN
) {
242 vaddr_end
= PAGE_OFFSET
+
243 (((u64
)curr_pages
* MAXMEM_PFN
/
247 vaddr_end
= PAGE_OFFSET
+ (curr_pages
<< PAGE_SHIFT
);
249 for (j
= 0; vaddr
< vaddr_end
; vaddr
+= HPAGE_SIZE
, ++j
) {
250 unsigned long this_pfn
=
251 start
+ (j
<< HUGETLB_PAGE_ORDER
);
252 pbase_map
[vaddr
>> HPAGE_SHIFT
] = this_pfn
;
253 if (vbase_map
[__pfn_to_highbits(this_pfn
)] ==
255 vbase_map
[__pfn_to_highbits(this_pfn
)] =
256 (void *)(vaddr
& HPAGE_MASK
);
258 node_lowmem_end_pfn
[i
] = start
+ (j
<< HUGETLB_PAGE_ORDER
);
259 BUG_ON(node_lowmem_end_pfn
[i
] > end
);
262 /* Return highest address of any mapped memory. */
263 return (void *)vaddr
;
265 #endif /* CONFIG_HIGHMEM */
268 * Register our most important memory mappings with the debug stub.
270 * This is up to 4 mappings for lowmem, one mapping per memory
271 * controller, plus one for our text segment.
273 static void __cpuinit
store_permanent_mappings(void)
277 for_each_online_node(i
) {
278 HV_PhysAddr pa
= ((HV_PhysAddr
)node_start_pfn
[i
]) << PAGE_SHIFT
;
279 #ifdef CONFIG_HIGHMEM
280 HV_PhysAddr high_mapped_pa
= node_lowmem_end_pfn
[i
];
282 HV_PhysAddr high_mapped_pa
= node_end_pfn
[i
];
285 unsigned long pages
= high_mapped_pa
- node_start_pfn
[i
];
286 HV_VirtAddr addr
= (HV_VirtAddr
) __va(pa
);
287 hv_store_mapping(addr
, pages
<< PAGE_SHIFT
, pa
);
290 hv_store_mapping((HV_VirtAddr
)_stext
,
291 (uint32_t)(_einittext
- _stext
), 0);
295 * Use hv_inquire_physical() to populate node_{start,end}_pfn[]
296 * and node_online_map, doing suitable sanity-checking.
297 * Also set min_low_pfn, max_low_pfn, and max_pfn.
299 static void __init
setup_memory(void)
302 int highbits_seen
[NR_PA_HIGHBIT_VALUES
] = { 0 };
303 #ifdef CONFIG_HIGHMEM
309 #if defined(CONFIG_HIGHMEM) || defined(__tilegx__)
313 /* We are using a char to hold the cpu_2_node[] mapping */
314 BUILD_BUG_ON(MAX_NUMNODES
> 127);
316 /* Discover the ranges of memory available to us */
318 unsigned long start
, size
, end
, highbits
;
319 HV_PhysAddrRange range
= hv_inquire_physical(i
);
322 #ifdef CONFIG_FLATMEM
324 pr_err("Can't use discontiguous PAs: %#llx..%#llx\n",
325 range
.size
, range
.start
+ range
.size
);
330 if ((unsigned long)range
.start
) {
331 pr_err("Range not at 4GB multiple: %#llx..%#llx\n",
332 range
.start
, range
.start
+ range
.size
);
336 if ((range
.start
& (HPAGE_SIZE
-1)) != 0 ||
337 (range
.size
& (HPAGE_SIZE
-1)) != 0) {
338 unsigned long long start_pa
= range
.start
;
339 unsigned long long orig_size
= range
.size
;
340 range
.start
= (start_pa
+ HPAGE_SIZE
- 1) & HPAGE_MASK
;
341 range
.size
-= (range
.start
- start_pa
);
342 range
.size
&= HPAGE_MASK
;
343 pr_err("Range not hugepage-aligned: %#llx..%#llx:"
344 " now %#llx-%#llx\n",
345 start_pa
, start_pa
+ orig_size
,
346 range
.start
, range
.start
+ range
.size
);
348 highbits
= __pa_to_highbits(range
.start
);
349 if (highbits
>= NR_PA_HIGHBIT_VALUES
) {
350 pr_err("PA high bits too high: %#llx..%#llx\n",
351 range
.start
, range
.start
+ range
.size
);
354 if (highbits_seen
[highbits
]) {
355 pr_err("Range overlaps in high bits: %#llx..%#llx\n",
356 range
.start
, range
.start
+ range
.size
);
359 highbits_seen
[highbits
] = 1;
360 if (PFN_DOWN(range
.size
) > maxnodemem_pfn
[i
]) {
361 int max_size
= maxnodemem_pfn
[i
];
363 pr_err("Maxnodemem reduced node %d to"
364 " %d pages\n", i
, max_size
);
365 range
.size
= PFN_PHYS(max_size
);
367 pr_err("Maxnodemem disabled node %d\n", i
);
371 if (num_physpages
+ PFN_DOWN(range
.size
) > maxmem_pfn
) {
372 int max_size
= maxmem_pfn
- num_physpages
;
374 pr_err("Maxmem reduced node %d to %d pages\n",
376 range
.size
= PFN_PHYS(max_size
);
378 pr_err("Maxmem disabled node %d\n", i
);
382 if (i
>= MAX_NUMNODES
) {
383 pr_err("Too many PA nodes (#%d): %#llx...%#llx\n",
384 i
, range
.size
, range
.size
+ range
.start
);
388 start
= range
.start
>> PAGE_SHIFT
;
389 size
= range
.size
>> PAGE_SHIFT
;
393 if (((HV_PhysAddr
)end
<< PAGE_SHIFT
) !=
394 (range
.start
+ range
.size
)) {
395 pr_err("PAs too high to represent: %#llx..%#llx\n",
396 range
.start
, range
.start
+ range
.size
);
402 * Blocks that overlap the pci reserved region must
403 * have enough space to hold the maximum percpu data
404 * region at the top of the range. If there isn't
405 * enough space above the reserved region, just
408 if (start
<= pci_reserve_start_pfn
&&
409 end
> pci_reserve_start_pfn
) {
410 unsigned int per_cpu_size
=
411 __per_cpu_end
- __per_cpu_start
;
412 unsigned int percpu_pages
=
413 NR_CPUS
* (PFN_UP(per_cpu_size
) >> PAGE_SHIFT
);
414 if (end
< pci_reserve_end_pfn
+ percpu_pages
) {
415 end
= pci_reserve_start_pfn
;
416 pr_err("PCI mapping region reduced node %d to"
417 " %ld pages\n", i
, end
- start
);
422 for (j
= __pfn_to_highbits(start
);
423 j
<= __pfn_to_highbits(end
- 1); j
++)
424 highbits_to_node
[j
] = i
;
426 node_start_pfn
[i
] = start
;
427 node_end_pfn
[i
] = end
;
428 node_controller
[i
] = range
.controller
;
429 num_physpages
+= size
;
432 /* Mark node as online */
433 node_set(i
, node_online_map
);
434 node_set(i
, node_possible_map
);
439 * For 4KB pages, mem_map "struct page" data is 1% of the size
440 * of the physical memory, so can be quite big (640 MB for
441 * four 16G zones). These structures must be mapped in
442 * lowmem, and since we currently cap out at about 768 MB,
443 * it's impractical to try to use this much address space.
444 * For now, arbitrarily cap the amount of physical memory
445 * we're willing to use at 8 million pages (32GB of 4KB pages).
447 cap
= 8 * 1024 * 1024; /* 8 million pages */
448 if (num_physpages
> cap
) {
449 int num_nodes
= num_online_nodes();
450 int cap_each
= cap
/ num_nodes
;
451 unsigned long dropped_pages
= 0;
452 for (i
= 0; i
< num_nodes
; ++i
) {
453 int size
= node_end_pfn
[i
] - node_start_pfn
[i
];
454 if (size
> cap_each
) {
455 dropped_pages
+= (size
- cap_each
);
456 node_end_pfn
[i
] = node_start_pfn
[i
] + cap_each
;
459 num_physpages
-= dropped_pages
;
460 pr_warning("Only using %ldMB memory;"
461 " ignoring %ldMB.\n",
462 num_physpages
>> (20 - PAGE_SHIFT
),
463 dropped_pages
>> (20 - PAGE_SHIFT
));
464 pr_warning("Consider using a larger page size.\n");
468 /* Heap starts just above the last loaded address. */
469 min_low_pfn
= PFN_UP((unsigned long)_end
- PAGE_OFFSET
);
471 #ifdef CONFIG_HIGHMEM
472 /* Find where we map lowmem from each controller. */
473 high_memory
= setup_pa_va_mapping();
475 /* Set max_low_pfn based on what node 0 can directly address. */
476 max_low_pfn
= node_lowmem_end_pfn
[0];
478 lowmem_pages
= (mappable_physpages
> MAXMEM_PFN
) ?
479 MAXMEM_PFN
: mappable_physpages
;
480 highmem_pages
= (long) (num_physpages
- lowmem_pages
);
482 pr_notice("%ldMB HIGHMEM available.\n",
483 pages_to_mb(highmem_pages
> 0 ? highmem_pages
: 0));
484 pr_notice("%ldMB LOWMEM available.\n",
485 pages_to_mb(lowmem_pages
));
487 /* Set max_low_pfn based on what node 0 can directly address. */
488 max_low_pfn
= node_end_pfn
[0];
491 if (node_end_pfn
[0] > MAXMEM_PFN
) {
492 pr_warning("Only using %ldMB LOWMEM.\n",
494 pr_warning("Use a HIGHMEM enabled kernel.\n");
495 max_low_pfn
= MAXMEM_PFN
;
496 max_pfn
= MAXMEM_PFN
;
497 num_physpages
= MAXMEM_PFN
;
498 node_end_pfn
[0] = MAXMEM_PFN
;
500 pr_notice("%ldMB memory available.\n",
501 pages_to_mb(node_end_pfn
[0]));
503 for (i
= 1; i
< MAX_NUMNODES
; ++i
) {
504 node_start_pfn
[i
] = 0;
507 high_memory
= __va(node_end_pfn
[0]);
510 for (i
= 0; i
< MAX_NUMNODES
; ++i
) {
511 int pages
= node_end_pfn
[i
] - node_start_pfn
[i
];
512 lowmem_pages
+= pages
;
514 high_memory
= pfn_to_kaddr(node_end_pfn
[i
]);
516 pr_notice("%ldMB memory available.\n",
517 pages_to_mb(lowmem_pages
));
522 static void __init
setup_bootmem_allocator(void)
524 unsigned long bootmap_size
, first_alloc_pfn
, last_alloc_pfn
;
526 /* Provide a node 0 bdata. */
527 NODE_DATA(0)->bdata
= &node0_bdata
;
530 /* Don't let boot memory alias the PCI region. */
531 last_alloc_pfn
= min(max_low_pfn
, pci_reserve_start_pfn
);
533 last_alloc_pfn
= max_low_pfn
;
537 * Initialize the boot-time allocator (with low memory only):
538 * The first argument says where to put the bitmap, and the
539 * second says where the end of allocatable memory is.
541 bootmap_size
= init_bootmem(min_low_pfn
, last_alloc_pfn
);
544 * Let the bootmem allocator use all the space we've given it
545 * except for its own bitmap.
547 first_alloc_pfn
= min_low_pfn
+ PFN_UP(bootmap_size
);
548 if (first_alloc_pfn
>= last_alloc_pfn
)
549 early_panic("Not enough memory on controller 0 for bootmem\n");
551 free_bootmem(PFN_PHYS(first_alloc_pfn
),
552 PFN_PHYS(last_alloc_pfn
- first_alloc_pfn
));
555 if (crashk_res
.start
!= crashk_res
.end
)
556 reserve_bootmem(crashk_res
.start
,
557 crashk_res
.end
- crashk_res
.start
+ 1, 0);
561 void *__init
alloc_remap(int nid
, unsigned long size
)
563 int pages
= node_end_pfn
[nid
] - node_start_pfn
[nid
];
564 void *map
= pfn_to_kaddr(node_memmap_pfn
[nid
]);
565 BUG_ON(size
!= pages
* sizeof(struct page
));
566 memset(map
, 0, size
);
570 static int __init
percpu_size(void)
572 int size
= __per_cpu_end
- __per_cpu_start
;
573 size
+= PERCPU_MODULE_RESERVE
;
574 size
+= PERCPU_DYNAMIC_EARLY_SIZE
;
575 if (size
< PCPU_MIN_UNIT_SIZE
)
576 size
= PCPU_MIN_UNIT_SIZE
;
577 size
= roundup(size
, PAGE_SIZE
);
579 /* In several places we assume the per-cpu data fits on a huge page. */
580 BUG_ON(kdata_huge
&& size
> HPAGE_SIZE
);
584 static inline unsigned long alloc_bootmem_pfn(int size
, unsigned long goal
)
586 void *kva
= __alloc_bootmem(size
, PAGE_SIZE
, goal
);
587 unsigned long pfn
= kaddr_to_pfn(kva
);
588 BUG_ON(goal
&& PFN_PHYS(pfn
) != goal
);
592 static void __init
zone_sizes_init(void)
594 unsigned long zones_size
[MAX_NR_ZONES
] = { 0 };
595 int size
= percpu_size();
596 int num_cpus
= smp_height
* smp_width
;
599 for (i
= 0; i
< num_cpus
; ++i
)
600 node_percpu
[cpu_to_node(i
)] += size
;
602 for_each_online_node(i
) {
603 unsigned long start
= node_start_pfn
[i
];
604 unsigned long end
= node_end_pfn
[i
];
605 #ifdef CONFIG_HIGHMEM
606 unsigned long lowmem_end
= node_lowmem_end_pfn
[i
];
608 unsigned long lowmem_end
= end
;
610 int memmap_size
= (end
- start
) * sizeof(struct page
);
611 node_free_pfn
[i
] = start
;
614 * Set aside pages for per-cpu data and the mem_map array.
616 * Since the per-cpu data requires special homecaching,
617 * if we are in kdata_huge mode, we put it at the end of
618 * the lowmem region. If we're not in kdata_huge mode,
619 * we take the per-cpu pages from the bottom of the
620 * controller, since that avoids fragmenting a huge page
621 * that users might want. We always take the memmap
622 * from the bottom of the controller, since with
623 * kdata_huge that lets it be under a huge TLB entry.
625 * If the user has requested isolnodes for a controller,
626 * though, there'll be no lowmem, so we just alloc_bootmem
627 * the memmap. There will be no percpu memory either.
629 if (__pfn_to_highbits(start
) == 0) {
630 /* In low PAs, allocate via bootmem. */
631 unsigned long goal
= 0;
633 alloc_bootmem_pfn(memmap_size
, goal
);
635 goal
= PFN_PHYS(lowmem_end
) - node_percpu
[i
];
638 alloc_bootmem_pfn(node_percpu
[i
], goal
);
639 } else if (cpu_isset(i
, isolnodes
)) {
640 node_memmap_pfn
[i
] = alloc_bootmem_pfn(memmap_size
, 0);
641 BUG_ON(node_percpu
[i
] != 0);
643 /* In high PAs, just reserve some pages. */
644 node_memmap_pfn
[i
] = node_free_pfn
[i
];
645 node_free_pfn
[i
] += PFN_UP(memmap_size
);
647 node_percpu_pfn
[i
] = node_free_pfn
[i
];
648 node_free_pfn
[i
] += PFN_UP(node_percpu
[i
]);
651 lowmem_end
- PFN_UP(node_percpu
[i
]);
655 #ifdef CONFIG_HIGHMEM
656 if (start
> lowmem_end
) {
657 zones_size
[ZONE_NORMAL
] = 0;
658 zones_size
[ZONE_HIGHMEM
] = end
- start
;
660 zones_size
[ZONE_NORMAL
] = lowmem_end
- start
;
661 zones_size
[ZONE_HIGHMEM
] = end
- lowmem_end
;
664 zones_size
[ZONE_NORMAL
] = end
- start
;
668 * Everyone shares node 0's bootmem allocator, but
669 * we use alloc_remap(), above, to put the actual
670 * struct page array on the individual controllers,
671 * which is most of the data that we actually care about.
672 * We can't place bootmem allocators on the other
673 * controllers since the bootmem allocator can only
674 * operate on 32-bit physical addresses.
676 NODE_DATA(i
)->bdata
= NODE_DATA(0)->bdata
;
678 free_area_init_node(i
, zones_size
, start
, NULL
);
679 printk(KERN_DEBUG
" Normal zone: %ld per-cpu pages\n",
680 PFN_UP(node_percpu
[i
]));
682 /* Track the type of memory on each node */
683 if (zones_size
[ZONE_NORMAL
])
684 node_set_state(i
, N_NORMAL_MEMORY
);
685 #ifdef CONFIG_HIGHMEM
687 node_set_state(i
, N_HIGH_MEMORY
);
696 /* which logical CPUs are on which nodes */
697 struct cpumask node_2_cpu_mask
[MAX_NUMNODES
] __write_once
;
698 EXPORT_SYMBOL(node_2_cpu_mask
);
700 /* which node each logical CPU is on */
701 char cpu_2_node
[NR_CPUS
] __write_once
__attribute__((aligned(L2_CACHE_BYTES
)));
702 EXPORT_SYMBOL(cpu_2_node
);
704 /* Return cpu_to_node() except for cpus not yet assigned, which return -1 */
705 static int __init
cpu_to_bound_node(int cpu
, struct cpumask
* unbound_cpus
)
707 if (!cpu_possible(cpu
) || cpumask_test_cpu(cpu
, unbound_cpus
))
710 return cpu_to_node(cpu
);
713 /* Return number of immediately-adjacent tiles sharing the same NUMA node. */
714 static int __init
node_neighbors(int node
, int cpu
,
715 struct cpumask
*unbound_cpus
)
722 if (x
> 0 && cpu_to_bound_node(cpu
-1, unbound_cpus
) == node
)
724 if (x
< w
-1 && cpu_to_bound_node(cpu
+1, unbound_cpus
) == node
)
726 if (y
> 0 && cpu_to_bound_node(cpu
-w
, unbound_cpus
) == node
)
728 if (y
< h
-1 && cpu_to_bound_node(cpu
+w
, unbound_cpus
) == node
)
733 static void __init
setup_numa_mapping(void)
735 int distance
[MAX_NUMNODES
][NR_CPUS
];
737 int cpu
, node
, cpus
, i
, x
, y
;
738 int num_nodes
= num_online_nodes();
739 struct cpumask unbound_cpus
;
740 nodemask_t default_nodes
;
742 cpumask_clear(&unbound_cpus
);
744 /* Get set of nodes we will use for defaults */
745 nodes_andnot(default_nodes
, node_online_map
, isolnodes
);
746 if (nodes_empty(default_nodes
)) {
747 BUG_ON(!node_isset(0, node_online_map
));
748 pr_err("Forcing NUMA node zero available as a default node\n");
749 node_set(0, default_nodes
);
752 /* Populate the distance[] array */
753 memset(distance
, -1, sizeof(distance
));
755 for (coord
.y
= 0; coord
.y
< smp_height
; ++coord
.y
) {
756 for (coord
.x
= 0; coord
.x
< smp_width
;
758 BUG_ON(cpu
>= nr_cpu_ids
);
759 if (!cpu_possible(cpu
)) {
760 cpu_2_node
[cpu
] = -1;
763 for_each_node_mask(node
, default_nodes
) {
764 HV_MemoryControllerInfo info
=
765 hv_inquire_memory_controller(
766 coord
, node_controller
[node
]);
767 distance
[node
][cpu
] =
768 ABS(info
.coord
.x
) + ABS(info
.coord
.y
);
770 cpumask_set_cpu(cpu
, &unbound_cpus
);
776 * Round-robin through the NUMA nodes until all the cpus are
777 * assigned. We could be more clever here (e.g. create four
778 * sorted linked lists on the same set of cpu nodes, and pull
779 * off them in round-robin sequence, removing from all four
780 * lists each time) but given the relatively small numbers
781 * involved, O(n^2) seem OK for a one-time cost.
783 node
= first_node(default_nodes
);
784 while (!cpumask_empty(&unbound_cpus
)) {
786 int best_distance
= INT_MAX
;
787 for (cpu
= 0; cpu
< cpus
; ++cpu
) {
788 if (cpumask_test_cpu(cpu
, &unbound_cpus
)) {
790 * Compute metric, which is how much
791 * closer the cpu is to this memory
792 * controller than the others, shifted
793 * up, and then the number of
794 * neighbors already in the node as an
795 * epsilon adjustment to try to keep
798 int d
= distance
[node
][cpu
] * num_nodes
;
799 for_each_node_mask(i
, default_nodes
) {
801 d
-= distance
[i
][cpu
];
803 d
*= 8; /* allow space for epsilon */
804 d
-= node_neighbors(node
, cpu
, &unbound_cpus
);
805 if (d
< best_distance
) {
811 BUG_ON(best_cpu
< 0);
812 cpumask_set_cpu(best_cpu
, &node_2_cpu_mask
[node
]);
813 cpu_2_node
[best_cpu
] = node
;
814 cpumask_clear_cpu(best_cpu
, &unbound_cpus
);
815 node
= next_node(node
, default_nodes
);
816 if (node
== MAX_NUMNODES
)
817 node
= first_node(default_nodes
);
820 /* Print out node assignments and set defaults for disabled cpus */
822 for (y
= 0; y
< smp_height
; ++y
) {
823 printk(KERN_DEBUG
"NUMA cpu-to-node row %d:", y
);
824 for (x
= 0; x
< smp_width
; ++x
, ++cpu
) {
825 if (cpu_to_node(cpu
) < 0) {
827 cpu_2_node
[cpu
] = first_node(default_nodes
);
829 pr_cont(" %d", cpu_to_node(cpu
));
836 static struct cpu cpu_devices
[NR_CPUS
];
838 static int __init
topology_init(void)
842 for_each_online_node(i
)
843 register_one_node(i
);
845 for (i
= 0; i
< smp_height
* smp_width
; ++i
)
846 register_cpu(&cpu_devices
[i
], i
);
851 subsys_initcall(topology_init
);
853 #else /* !CONFIG_NUMA */
855 #define setup_numa_mapping() do { } while (0)
857 #endif /* CONFIG_NUMA */
860 * setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
861 * @boot: Is this the boot cpu?
863 * Called from setup_arch() on the boot cpu, or online_secondary().
865 void __cpuinit
setup_cpu(int boot
)
867 /* The boot cpu sets up its permanent mappings much earlier. */
869 store_permanent_mappings();
871 /* Allow asynchronous TLB interrupts. */
872 #if CHIP_HAS_TILE_DMA()
873 arch_local_irq_unmask(INT_DMATLB_MISS
);
874 arch_local_irq_unmask(INT_DMATLB_ACCESS
);
876 #if CHIP_HAS_SN_PROC()
877 arch_local_irq_unmask(INT_SNITLB_MISS
);
880 arch_local_irq_unmask(INT_SINGLE_STEP_K
);
884 * Allow user access to many generic SPRs, like the cycle
885 * counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc.
887 __insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0
, 1);
890 /* Static network is not restricted. */
891 __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0
, 1);
893 #if CHIP_HAS_SN_PROC()
894 __insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0
, 1);
895 __insn_mtspr(SPR_MPL_SN_CPL_SET_0
, 1);
899 * Set the MPL for interrupt control 0 & 1 to the corresponding
900 * values. This includes access to the SYSTEM_SAVE and EX_CONTEXT
901 * SPRs, as well as the interrupt mask.
903 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0
, 1);
904 __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1
, 1);
906 /* Initialize IRQ support for this cpu. */
909 #ifdef CONFIG_HARDWALL
910 /* Reset the network state on this cpu. */
911 reset_network_state();
915 #ifdef CONFIG_BLK_DEV_INITRD
917 static int __initdata set_initramfs_file
;
918 static char __initdata initramfs_file
[128] = "initramfs.cpio.gz";
920 static int __init
setup_initramfs_file(char *str
)
924 strncpy(initramfs_file
, str
, sizeof(initramfs_file
) - 1);
925 set_initramfs_file
= 1;
929 early_param("initramfs_file", setup_initramfs_file
);
932 * We look for an additional "initramfs.cpio.gz" file in the hvfs.
933 * If there is one, we allocate some memory for it and it will be
934 * unpacked to the initramfs after any built-in initramfs_data.
936 static void __init
load_hv_initrd(void)
942 fd
= hv_fs_findfile((HV_VirtAddr
) initramfs_file
);
943 if (fd
== HV_ENOENT
) {
944 if (set_initramfs_file
)
945 pr_warning("No such hvfs initramfs file '%s'\n",
950 stat
= hv_fs_fstat(fd
);
951 BUG_ON(stat
.size
< 0);
952 if (stat
.flags
& HV_FS_ISDIR
) {
953 pr_warning("Ignoring hvfs file '%s': it's a directory.\n",
957 initrd
= alloc_bootmem_pages(stat
.size
);
958 rc
= hv_fs_pread(fd
, (HV_VirtAddr
) initrd
, stat
.size
, 0);
959 if (rc
!= stat
.size
) {
960 pr_err("Error reading %d bytes from hvfs file '%s': %d\n",
961 stat
.size
, initramfs_file
, rc
);
962 free_initrd_mem((unsigned long) initrd
, stat
.size
);
965 initrd_start
= (unsigned long) initrd
;
966 initrd_end
= initrd_start
+ stat
.size
;
969 void __init
free_initrd_mem(unsigned long begin
, unsigned long end
)
971 free_bootmem(__pa(begin
), end
- begin
);
975 static inline void load_hv_initrd(void) {}
976 #endif /* CONFIG_BLK_DEV_INITRD */
978 static void __init
validate_hv(void)
981 * It may already be too late, but let's check our built-in
982 * configuration against what the hypervisor is providing.
984 unsigned long glue_size
= hv_sysconf(HV_SYSCONF_GLUE_SIZE
);
985 int hv_page_size
= hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL
);
986 int hv_hpage_size
= hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE
);
987 HV_ASIDRange asid_range
;
990 HV_Topology topology
= hv_inquire_topology();
991 BUG_ON(topology
.coord
.x
!= 0 || topology
.coord
.y
!= 0);
992 if (topology
.width
!= 1 || topology
.height
!= 1) {
993 pr_warning("Warning: booting UP kernel on %dx%d grid;"
994 " will ignore all but first tile.\n",
995 topology
.width
, topology
.height
);
999 if (PAGE_OFFSET
+ HV_GLUE_START_CPA
+ glue_size
> (unsigned long)_text
)
1000 early_panic("Hypervisor glue size %ld is too big!\n",
1002 if (hv_page_size
!= PAGE_SIZE
)
1003 early_panic("Hypervisor page size %#x != our %#lx\n",
1004 hv_page_size
, PAGE_SIZE
);
1005 if (hv_hpage_size
!= HPAGE_SIZE
)
1006 early_panic("Hypervisor huge page size %#x != our %#lx\n",
1007 hv_hpage_size
, HPAGE_SIZE
);
1011 * Some hypervisor APIs take a pointer to a bitmap array
1012 * whose size is at least the number of cpus on the chip.
1013 * We use a struct cpumask for this, so it must be big enough.
1015 if ((smp_height
* smp_width
) > nr_cpu_ids
)
1016 early_panic("Hypervisor %d x %d grid too big for Linux"
1017 " NR_CPUS %d\n", smp_height
, smp_width
,
1022 * Check that we're using allowed ASIDs, and initialize the
1023 * various asid variables to their appropriate initial states.
1025 asid_range
= hv_inquire_asid(0);
1026 __get_cpu_var(current_asid
) = min_asid
= asid_range
.start
;
1027 max_asid
= asid_range
.start
+ asid_range
.size
- 1;
1029 if (hv_confstr(HV_CONFSTR_CHIP_MODEL
, (HV_VirtAddr
)chip_model
,
1030 sizeof(chip_model
)) < 0) {
1031 pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n");
1032 strlcpy(chip_model
, "unknown", sizeof(chip_model
));
1036 static void __init
validate_va(void)
1038 #ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */
1040 * Similarly, make sure we're only using allowed VAs.
1041 * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT,
1042 * and 0 .. KERNEL_HIGH_VADDR.
1043 * In addition, make sure we CAN'T use the end of memory, since
1044 * we use the last chunk of each pgd for the pgd_list.
1046 int i
, user_kernel_ok
= 0;
1047 unsigned long max_va
= 0;
1048 unsigned long list_va
=
1049 ((PGD_LIST_OFFSET
/ sizeof(pgd_t
)) << PGDIR_SHIFT
);
1051 for (i
= 0; ; ++i
) {
1052 HV_VirtAddrRange range
= hv_inquire_virtual(i
);
1053 if (range
.size
== 0)
1055 if (range
.start
<= MEM_USER_INTRPT
&&
1056 range
.start
+ range
.size
>= MEM_HV_INTRPT
)
1058 if (range
.start
== 0)
1059 max_va
= range
.size
;
1060 BUG_ON(range
.start
+ range
.size
> list_va
);
1062 if (!user_kernel_ok
)
1063 early_panic("Hypervisor not configured for user/kernel VAs\n");
1065 early_panic("Hypervisor not configured for low VAs\n");
1066 if (max_va
< KERNEL_HIGH_VADDR
)
1067 early_panic("Hypervisor max VA %#lx smaller than %#lx\n",
1068 max_va
, KERNEL_HIGH_VADDR
);
1070 /* Kernel PCs must have their high bit set; see intvec.S. */
1071 if ((long)VMALLOC_START
>= 0)
1073 "Linux VMALLOC region below the 2GB line (%#lx)!\n"
1074 "Reconfigure the kernel with fewer NR_HUGE_VMAPS\n"
1075 "or smaller VMALLOC_RESERVE.\n",
1081 * cpu_lotar_map lists all the cpus that are valid for the supervisor
1082 * to cache data on at a page level, i.e. what cpus can be placed in
1083 * the LOTAR field of a PTE. It is equivalent to the set of possible
1084 * cpus plus any other cpus that are willing to share their cache.
1085 * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR).
1087 struct cpumask __write_once cpu_lotar_map
;
1088 EXPORT_SYMBOL(cpu_lotar_map
);
1090 #if CHIP_HAS_CBOX_HOME_MAP()
1092 * hash_for_home_map lists all the tiles that hash-for-home data
1093 * will be cached on. Note that this may includes tiles that are not
1094 * valid for this supervisor to use otherwise (e.g. if a hypervisor
1095 * device is being shared between multiple supervisors).
1096 * It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE).
1098 struct cpumask hash_for_home_map
;
1099 EXPORT_SYMBOL(hash_for_home_map
);
1103 * cpu_cacheable_map lists all the cpus whose caches the hypervisor can
1104 * flush on our behalf. It is set to cpu_possible_map OR'ed with
1105 * hash_for_home_map, and it is what should be passed to
1106 * hv_flush_remote() to flush all caches. Note that if there are
1107 * dedicated hypervisor driver tiles that have authorized use of their
1108 * cache, those tiles will only appear in cpu_lotar_map, NOT in
1109 * cpu_cacheable_map, as they are a special case.
1111 struct cpumask __write_once cpu_cacheable_map
;
1112 EXPORT_SYMBOL(cpu_cacheable_map
);
1114 static __initdata
struct cpumask disabled_map
;
1116 static int __init
disabled_cpus(char *str
)
1118 int boot_cpu
= smp_processor_id();
1120 if (str
== NULL
|| cpulist_parse_crop(str
, &disabled_map
) != 0)
1122 if (cpumask_test_cpu(boot_cpu
, &disabled_map
)) {
1123 pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu
);
1124 cpumask_clear_cpu(boot_cpu
, &disabled_map
);
1129 early_param("disabled_cpus", disabled_cpus
);
1131 void __init
print_disabled_cpus(void)
1133 if (!cpumask_empty(&disabled_map
)) {
1135 cpulist_scnprintf(buf
, sizeof(buf
), &disabled_map
);
1136 pr_info("CPUs not available for Linux: %s\n", buf
);
1140 static void __init
setup_cpu_maps(void)
1142 struct cpumask hv_disabled_map
, cpu_possible_init
;
1143 int boot_cpu
= smp_processor_id();
1146 /* Learn which cpus are allowed by the hypervisor. */
1147 rc
= hv_inquire_tiles(HV_INQ_TILES_AVAIL
,
1148 (HV_VirtAddr
) cpumask_bits(&cpu_possible_init
),
1149 sizeof(cpu_cacheable_map
));
1151 early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc
);
1152 if (!cpumask_test_cpu(boot_cpu
, &cpu_possible_init
))
1153 early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu
);
1155 /* Compute the cpus disabled by the hvconfig file. */
1156 cpumask_complement(&hv_disabled_map
, &cpu_possible_init
);
1158 /* Include them with the cpus disabled by "disabled_cpus". */
1159 cpumask_or(&disabled_map
, &disabled_map
, &hv_disabled_map
);
1162 * Disable every cpu after "setup_max_cpus". But don't mark
1163 * as disabled the cpus that are outside of our initial rectangle,
1164 * since that turns out to be confusing.
1166 cpus
= 1; /* this cpu */
1167 cpumask_set_cpu(boot_cpu
, &disabled_map
); /* ignore this cpu */
1168 for (i
= 0; cpus
< setup_max_cpus
; ++i
)
1169 if (!cpumask_test_cpu(i
, &disabled_map
))
1171 for (; i
< smp_height
* smp_width
; ++i
)
1172 cpumask_set_cpu(i
, &disabled_map
);
1173 cpumask_clear_cpu(boot_cpu
, &disabled_map
); /* reset this cpu */
1174 for (i
= smp_height
* smp_width
; i
< NR_CPUS
; ++i
)
1175 cpumask_clear_cpu(i
, &disabled_map
);
1178 * Setup cpu_possible map as every cpu allocated to us, minus
1179 * the results of any "disabled_cpus" settings.
1181 cpumask_andnot(&cpu_possible_init
, &cpu_possible_init
, &disabled_map
);
1182 init_cpu_possible(&cpu_possible_init
);
1184 /* Learn which cpus are valid for LOTAR caching. */
1185 rc
= hv_inquire_tiles(HV_INQ_TILES_LOTAR
,
1186 (HV_VirtAddr
) cpumask_bits(&cpu_lotar_map
),
1187 sizeof(cpu_lotar_map
));
1189 pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
1190 cpu_lotar_map
= cpu_possible_map
;
1193 #if CHIP_HAS_CBOX_HOME_MAP()
1194 /* Retrieve set of CPUs used for hash-for-home caching */
1195 rc
= hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE
,
1196 (HV_VirtAddr
) hash_for_home_map
.bits
,
1197 sizeof(hash_for_home_map
));
1199 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc
);
1200 cpumask_or(&cpu_cacheable_map
, &cpu_possible_map
, &hash_for_home_map
);
1202 cpu_cacheable_map
= cpu_possible_map
;
1207 static int __init
dataplane(char *str
)
1209 pr_warning("WARNING: dataplane support disabled in this kernel\n");
1213 early_param("dataplane", dataplane
);
1215 #ifdef CONFIG_CMDLINE_BOOL
1216 static char __initdata builtin_cmdline
[COMMAND_LINE_SIZE
] = CONFIG_CMDLINE
;
1219 void __init
setup_arch(char **cmdline_p
)
1223 #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
1224 len
= hv_get_command_line((HV_VirtAddr
) boot_command_line
,
1226 if (boot_command_line
[0])
1227 pr_warning("WARNING: ignoring dynamic command line \"%s\"\n",
1229 strlcpy(boot_command_line
, builtin_cmdline
, COMMAND_LINE_SIZE
);
1232 #if defined(CONFIG_CMDLINE_BOOL)
1233 if (builtin_cmdline
[0]) {
1234 int builtin_len
= strlcpy(boot_command_line
, builtin_cmdline
,
1236 if (builtin_len
< COMMAND_LINE_SIZE
-1)
1237 boot_command_line
[builtin_len
++] = ' ';
1238 hv_cmdline
= &boot_command_line
[builtin_len
];
1239 len
= COMMAND_LINE_SIZE
- builtin_len
;
1243 hv_cmdline
= boot_command_line
;
1244 len
= COMMAND_LINE_SIZE
;
1246 len
= hv_get_command_line((HV_VirtAddr
) hv_cmdline
, len
);
1247 if (len
< 0 || len
> COMMAND_LINE_SIZE
)
1248 early_panic("hv_get_command_line failed: %d\n", len
);
1251 *cmdline_p
= boot_command_line
;
1253 /* Set disabled_map and setup_max_cpus very early */
1254 parse_early_param();
1256 /* Make sure the kernel is compatible with the hypervisor. */
1265 * Initialize the PCI structures. This is done before memory
1266 * setup so that we know whether or not a pci_reserve region
1269 if (tile_pci_init() == 0)
1272 /* PCI systems reserve a region just below 4GB for mapping iomem. */
1273 pci_reserve_end_pfn
= (1 << (32 - PAGE_SHIFT
));
1274 pci_reserve_start_pfn
= pci_reserve_end_pfn
-
1275 (pci_reserve_mb
<< (20 - PAGE_SHIFT
));
1278 init_mm
.start_code
= (unsigned long) _text
;
1279 init_mm
.end_code
= (unsigned long) _etext
;
1280 init_mm
.end_data
= (unsigned long) _edata
;
1281 init_mm
.brk
= (unsigned long) _end
;
1284 store_permanent_mappings();
1285 setup_bootmem_allocator();
1288 * NOTE: before this point _nobody_ is allowed to allocate
1289 * any memory using the bootmem allocator.
1293 setup_numa_mapping();
1303 * Set up per-cpu memory.
1306 unsigned long __per_cpu_offset
[NR_CPUS
] __write_once
;
1307 EXPORT_SYMBOL(__per_cpu_offset
);
1309 static size_t __initdata pfn_offset
[MAX_NUMNODES
] = { 0 };
1310 static unsigned long __initdata percpu_pfn
[NR_CPUS
] = { 0 };
1313 * As the percpu code allocates pages, we return the pages from the
1314 * end of the node for the specified cpu.
1316 static void *__init
pcpu_fc_alloc(unsigned int cpu
, size_t size
, size_t align
)
1318 int nid
= cpu_to_node(cpu
);
1319 unsigned long pfn
= node_percpu_pfn
[nid
] + pfn_offset
[nid
];
1321 BUG_ON(size
% PAGE_SIZE
!= 0);
1322 pfn_offset
[nid
] += size
/ PAGE_SIZE
;
1323 BUG_ON(node_percpu
[nid
] < size
);
1324 node_percpu
[nid
] -= size
;
1325 if (percpu_pfn
[cpu
] == 0)
1326 percpu_pfn
[cpu
] = pfn
;
1327 return pfn_to_kaddr(pfn
);
1331 * Pages reserved for percpu memory are not freeable, and in any case we are
1332 * on a short path to panic() in setup_per_cpu_area() at this point anyway.
1334 static void __init
pcpu_fc_free(void *ptr
, size_t size
)
1339 * Set up vmalloc page tables using bootmem for the percpu code.
1341 static void __init
pcpu_fc_populate_pte(unsigned long addr
)
1348 BUG_ON(pgd_addr_invalid(addr
));
1349 if (addr
< VMALLOC_START
|| addr
>= VMALLOC_END
)
1350 panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
1351 " try increasing CONFIG_VMALLOC_RESERVE\n",
1352 addr
, VMALLOC_START
, VMALLOC_END
);
1354 pgd
= swapper_pg_dir
+ pgd_index(addr
);
1355 pud
= pud_offset(pgd
, addr
);
1356 BUG_ON(!pud_present(*pud
));
1357 pmd
= pmd_offset(pud
, addr
);
1358 if (pmd_present(*pmd
)) {
1359 BUG_ON(pmd_huge_page(*pmd
));
1361 pte
= __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE
,
1362 HV_PAGE_TABLE_ALIGN
, 0);
1363 pmd_populate_kernel(&init_mm
, pmd
, pte
);
1367 void __init
setup_per_cpu_areas(void)
1370 unsigned long delta
, pfn
, lowmem_va
;
1371 unsigned long size
= percpu_size();
1375 rc
= pcpu_page_first_chunk(PERCPU_MODULE_RESERVE
, pcpu_fc_alloc
,
1376 pcpu_fc_free
, pcpu_fc_populate_pte
);
1378 panic("Cannot initialize percpu area (err=%d)", rc
);
1380 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
1381 for_each_possible_cpu(cpu
) {
1382 __per_cpu_offset
[cpu
] = delta
+ pcpu_unit_offsets
[cpu
];
1384 /* finv the copy out of cache so we can change homecache */
1385 ptr
= pcpu_base_addr
+ pcpu_unit_offsets
[cpu
];
1386 __finv_buffer(ptr
, size
);
1387 pfn
= percpu_pfn
[cpu
];
1389 /* Rewrite the page tables to cache on that cpu */
1390 pg
= pfn_to_page(pfn
);
1391 for (i
= 0; i
< size
; i
+= PAGE_SIZE
, ++pfn
, ++pg
) {
1393 /* Update the vmalloc mapping and page home. */
1395 virt_to_pte(NULL
, (unsigned long)ptr
+ i
);
1397 BUG_ON(pfn
!= pte_pfn(pte
));
1398 pte
= hv_pte_set_mode(pte
, HV_PTE_MODE_CACHE_TILE_L3
);
1399 pte
= set_remote_cache_cpu(pte
, cpu
);
1402 /* Update the lowmem mapping for consistency. */
1403 lowmem_va
= (unsigned long)pfn_to_kaddr(pfn
);
1404 ptep
= virt_to_pte(NULL
, lowmem_va
);
1405 if (pte_huge(*ptep
)) {
1406 printk(KERN_DEBUG
"early shatter of huge page"
1407 " at %#lx\n", lowmem_va
);
1408 shatter_pmd((pmd_t
*)ptep
);
1409 ptep
= virt_to_pte(NULL
, lowmem_va
);
1410 BUG_ON(pte_huge(*ptep
));
1412 BUG_ON(pfn
!= pte_pfn(*ptep
));
1417 /* Set our thread pointer appropriately. */
1418 set_my_cpu_offset(__per_cpu_offset
[smp_processor_id()]);
1420 /* Make sure the finv's have completed. */
1423 /* Flush the TLB so we reference it properly from here on out. */
1424 local_flush_tlb_all();
1427 static struct resource data_resource
= {
1428 .name
= "Kernel data",
1431 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
1434 static struct resource code_resource
= {
1435 .name
= "Kernel code",
1438 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
1442 * We reserve all resources above 4GB so that PCI won't try to put
1443 * mappings above 4GB; the standard allows that for some devices but
1444 * the probing code trunates values to 32 bits.
1447 static struct resource
* __init
1448 insert_non_bus_resource(void)
1450 struct resource
*res
=
1451 kzalloc(sizeof(struct resource
), GFP_ATOMIC
);
1452 res
->name
= "Non-Bus Physical Address Space";
1453 res
->start
= (1ULL << 32);
1455 res
->flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
;
1456 if (insert_resource(&iomem_resource
, res
)) {
1464 static struct resource
* __init
1465 insert_ram_resource(u64 start_pfn
, u64 end_pfn
)
1467 struct resource
*res
=
1468 kzalloc(sizeof(struct resource
), GFP_ATOMIC
);
1469 res
->name
= "System RAM";
1470 res
->start
= start_pfn
<< PAGE_SHIFT
;
1471 res
->end
= (end_pfn
<< PAGE_SHIFT
) - 1;
1472 res
->flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
;
1473 if (insert_resource(&iomem_resource
, res
)) {
1481 * Request address space for all standard resources
1483 * If the system includes PCI root complex drivers, we need to create
1484 * a window just below 4GB where PCI BARs can be mapped.
1486 static int __init
request_standard_resources(void)
1489 enum { CODE_DELTA
= MEM_SV_INTRPT
- PAGE_OFFSET
};
1491 iomem_resource
.end
= -1LL;
1493 insert_non_bus_resource();
1496 for_each_online_node(i
) {
1497 u64 start_pfn
= node_start_pfn
[i
];
1498 u64 end_pfn
= node_end_pfn
[i
];
1501 if (start_pfn
<= pci_reserve_start_pfn
&&
1502 end_pfn
> pci_reserve_start_pfn
) {
1503 if (end_pfn
> pci_reserve_end_pfn
)
1504 insert_ram_resource(pci_reserve_end_pfn
,
1506 end_pfn
= pci_reserve_start_pfn
;
1509 insert_ram_resource(start_pfn
, end_pfn
);
1512 code_resource
.start
= __pa(_text
- CODE_DELTA
);
1513 code_resource
.end
= __pa(_etext
- CODE_DELTA
)-1;
1514 data_resource
.start
= __pa(_sdata
);
1515 data_resource
.end
= __pa(_end
)-1;
1517 insert_resource(&iomem_resource
, &code_resource
);
1518 insert_resource(&iomem_resource
, &data_resource
);
1521 insert_resource(&iomem_resource
, &crashk_res
);
1527 subsys_initcall(request_standard_resources
);