1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved.
4 * Copyright (c) 2001 Intel Corp.
5 * Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
6 * Copyright (c) 2002 NEC Corp.
7 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
8 * Copyright (c) 2004 Silicon Graphics, Inc
9 * Russ Anderson <rja@sgi.com>
10 * Jesse Barnes <jbarnes@sgi.com>
11 * Jack Steiner <steiner@sgi.com>
15 * Platform initialization for Discontig Memory
18 #include <linux/kernel.h>
20 #include <linux/nmi.h>
21 #include <linux/swap.h>
22 #include <linux/memblock.h>
23 #include <linux/acpi.h>
24 #include <linux/efi.h>
25 #include <linux/nodemask.h>
26 #include <linux/slab.h>
27 #include <asm/pgalloc.h>
29 #include <asm/meminit.h>
31 #include <asm/sections.h>
34 * Track per-node information needed to setup the boot memory allocator, the
35 * per-node areas, and the real VM.
37 struct early_node_data
{
38 struct ia64_node_data
*node_data
;
39 unsigned long pernode_addr
;
40 unsigned long pernode_size
;
41 unsigned long min_pfn
;
42 unsigned long max_pfn
;
45 static struct early_node_data mem_data
[MAX_NUMNODES
] __initdata
;
46 static nodemask_t memory_less_mask __initdata
;
48 pg_data_t
*pgdat_list
[MAX_NUMNODES
];
51 * To prevent cache aliasing effects, align per-node structures so that they
52 * start at addresses that are strided by node number.
54 #define MAX_NODE_ALIGN_OFFSET (32 * 1024 * 1024)
55 #define NODEDATA_ALIGN(addr, node) \
56 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + \
57 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
60 * build_node_maps - callback to setup mem_data structs for each node
61 * @start: physical start of range
62 * @len: length of range
63 * @node: node where this range resides
65 * Detect extents of each piece of memory that we wish to
66 * treat as a virtually contiguous block (i.e. each node). Each such block
67 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
68 * if necessary. Any non-existent pages will simply be part of the virtual
71 static int __init
build_node_maps(unsigned long start
, unsigned long len
,
74 unsigned long spfn
, epfn
, end
= start
+ len
;
76 epfn
= GRANULEROUNDUP(end
) >> PAGE_SHIFT
;
77 spfn
= GRANULEROUNDDOWN(start
) >> PAGE_SHIFT
;
79 if (!mem_data
[node
].min_pfn
) {
80 mem_data
[node
].min_pfn
= spfn
;
81 mem_data
[node
].max_pfn
= epfn
;
83 mem_data
[node
].min_pfn
= min(spfn
, mem_data
[node
].min_pfn
);
84 mem_data
[node
].max_pfn
= max(epfn
, mem_data
[node
].max_pfn
);
91 * early_nr_cpus_node - return number of cpus on a given node
92 * @node: node to check
94 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
95 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
96 * called yet. Note that node 0 will also count all non-existent cpus.
98 static int __meminit
early_nr_cpus_node(int node
)
102 for_each_possible_early_cpu(cpu
)
103 if (node
== node_cpuid
[cpu
].nid
)
110 * compute_pernodesize - compute size of pernode data
111 * @node: the node id.
113 static unsigned long __meminit
compute_pernodesize(int node
)
115 unsigned long pernodesize
= 0, cpus
;
117 cpus
= early_nr_cpus_node(node
);
118 pernodesize
+= PERCPU_PAGE_SIZE
* cpus
;
119 pernodesize
+= node
* L1_CACHE_BYTES
;
120 pernodesize
+= L1_CACHE_ALIGN(sizeof(pg_data_t
));
121 pernodesize
+= L1_CACHE_ALIGN(sizeof(struct ia64_node_data
));
122 pernodesize
+= L1_CACHE_ALIGN(sizeof(pg_data_t
));
123 pernodesize
= PAGE_ALIGN(pernodesize
);
128 * per_cpu_node_setup - setup per-cpu areas on each node
129 * @cpu_data: per-cpu area on this node
130 * @node: node to setup
132 * Copy the static per-cpu data into the region we just set aside and then
133 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
134 * the end of the area.
136 static void *per_cpu_node_setup(void *cpu_data
, int node
)
141 for_each_possible_early_cpu(cpu
) {
142 void *src
= cpu
== 0 ? __cpu0_per_cpu
: __phys_per_cpu_start
;
144 if (node
!= node_cpuid
[cpu
].nid
)
147 memcpy(__va(cpu_data
), src
, __per_cpu_end
- __per_cpu_start
);
148 __per_cpu_offset
[cpu
] = (char *)__va(cpu_data
) -
152 * percpu area for cpu0 is moved from the __init area
153 * which is setup by head.S and used till this point.
154 * Update ar.k3. This move is ensures that percpu
155 * area for cpu0 is on the correct node and its
156 * virtual address isn't insanely far from other
157 * percpu areas which is important for congruent
161 ia64_set_kr(IA64_KR_PER_CPU_DATA
,
162 (unsigned long)cpu_data
-
163 (unsigned long)__per_cpu_start
);
165 cpu_data
+= PERCPU_PAGE_SIZE
;
173 * setup_per_cpu_areas - setup percpu areas
175 * Arch code has already allocated and initialized percpu areas. All
176 * this function has to do is to teach the determined layout to the
177 * dynamic percpu allocator, which happens to be more complex than
178 * creating whole new ones using helpers.
180 void __init
setup_per_cpu_areas(void)
182 struct pcpu_alloc_info
*ai
;
183 struct pcpu_group_info
*uninitialized_var(gi
);
184 unsigned int *cpu_map
;
186 unsigned long base_offset
;
188 ssize_t static_size
, reserved_size
, dyn_size
;
189 int node
, prev_node
, unit
, nr_units
;
191 ai
= pcpu_alloc_alloc_info(MAX_NUMNODES
, nr_cpu_ids
);
193 panic("failed to allocate pcpu_alloc_info");
194 cpu_map
= ai
->groups
[0].cpu_map
;
197 base
= (void *)ULONG_MAX
;
198 for_each_possible_cpu(cpu
)
200 (void *)(__per_cpu_offset
[cpu
] + __per_cpu_start
));
201 base_offset
= (void *)__per_cpu_start
- base
;
203 /* build cpu_map, units are grouped by node */
206 for_each_possible_cpu(cpu
)
207 if (node
== node_cpuid
[cpu
].nid
)
208 cpu_map
[unit
++] = cpu
;
211 /* set basic parameters */
212 static_size
= __per_cpu_end
- __per_cpu_start
;
213 reserved_size
= PERCPU_MODULE_RESERVE
;
214 dyn_size
= PERCPU_PAGE_SIZE
- static_size
- reserved_size
;
216 panic("percpu area overflow static=%zd reserved=%zd\n",
217 static_size
, reserved_size
);
219 ai
->static_size
= static_size
;
220 ai
->reserved_size
= reserved_size
;
221 ai
->dyn_size
= dyn_size
;
222 ai
->unit_size
= PERCPU_PAGE_SIZE
;
223 ai
->atom_size
= PAGE_SIZE
;
224 ai
->alloc_size
= PERCPU_PAGE_SIZE
;
227 * CPUs are put into groups according to node. Walk cpu_map
228 * and create new groups at node boundaries.
230 prev_node
= NUMA_NO_NODE
;
232 for (unit
= 0; unit
< nr_units
; unit
++) {
234 node
= node_cpuid
[cpu
].nid
;
236 if (node
== prev_node
) {
242 gi
= &ai
->groups
[ai
->nr_groups
++];
244 gi
->base_offset
= __per_cpu_offset
[cpu
] + base_offset
;
245 gi
->cpu_map
= &cpu_map
[unit
];
248 pcpu_setup_first_chunk(ai
, base
);
249 pcpu_free_alloc_info(ai
);
254 * fill_pernode - initialize pernode data.
255 * @node: the node id.
256 * @pernode: physical address of pernode data
257 * @pernodesize: size of the pernode data
259 static void __init
fill_pernode(int node
, unsigned long pernode
,
260 unsigned long pernodesize
)
263 int cpus
= early_nr_cpus_node(node
);
265 mem_data
[node
].pernode_addr
= pernode
;
266 mem_data
[node
].pernode_size
= pernodesize
;
267 memset(__va(pernode
), 0, pernodesize
);
269 cpu_data
= (void *)pernode
;
270 pernode
+= PERCPU_PAGE_SIZE
* cpus
;
271 pernode
+= node
* L1_CACHE_BYTES
;
273 pgdat_list
[node
] = __va(pernode
);
274 pernode
+= L1_CACHE_ALIGN(sizeof(pg_data_t
));
276 mem_data
[node
].node_data
= __va(pernode
);
277 pernode
+= L1_CACHE_ALIGN(sizeof(struct ia64_node_data
));
278 pernode
+= L1_CACHE_ALIGN(sizeof(pg_data_t
));
280 cpu_data
= per_cpu_node_setup(cpu_data
, node
);
286 * find_pernode_space - allocate memory for memory map and per-node structures
287 * @start: physical start of range
288 * @len: length of range
289 * @node: node where this range resides
291 * This routine reserves space for the per-cpu data struct, the list of
292 * pg_data_ts and the per-node data struct. Each node will have something like
293 * the following in the first chunk of addr. space large enough to hold it.
295 * ________________________
297 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
298 * | PERCPU_PAGE_SIZE * | start and length big enough
299 * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus.
300 * |------------------------|
301 * | local pg_data_t * |
302 * |------------------------|
303 * | local ia64_node_data |
304 * |------------------------|
306 * |________________________|
308 * Once this space has been set aside, the bootmem maps are initialized. We
309 * could probably move the allocation of the per-cpu and ia64_node_data space
310 * outside of this function and use alloc_bootmem_node(), but doing it here
311 * is straightforward and we get the alignments we want so...
313 static int __init
find_pernode_space(unsigned long start
, unsigned long len
,
316 unsigned long spfn
, epfn
;
317 unsigned long pernodesize
= 0, pernode
;
319 spfn
= start
>> PAGE_SHIFT
;
320 epfn
= (start
+ len
) >> PAGE_SHIFT
;
323 * Make sure this memory falls within this node's usable memory
324 * since we may have thrown some away in build_maps().
326 if (spfn
< mem_data
[node
].min_pfn
|| epfn
> mem_data
[node
].max_pfn
)
329 /* Don't setup this node's local space twice... */
330 if (mem_data
[node
].pernode_addr
)
334 * Calculate total size needed, incl. what's necessary
335 * for good alignment and alias prevention.
337 pernodesize
= compute_pernodesize(node
);
338 pernode
= NODEDATA_ALIGN(start
, node
);
340 /* Is this range big enough for what we want to store here? */
341 if (start
+ len
> (pernode
+ pernodesize
))
342 fill_pernode(node
, pernode
, pernodesize
);
348 * reserve_pernode_space - reserve memory for per-node space
350 * Reserve the space used by the bootmem maps & per-node space in the boot
351 * allocator so that when we actually create the real mem maps we don't
354 static void __init
reserve_pernode_space(void)
356 unsigned long base
, size
;
359 for_each_online_node(node
) {
360 if (node_isset(node
, memory_less_mask
))
363 /* Now the per-node space */
364 size
= mem_data
[node
].pernode_size
;
365 base
= __pa(mem_data
[node
].pernode_addr
);
366 memblock_reserve(base
, size
);
370 static void __meminit
scatter_node_data(void)
376 * for_each_online_node() can't be used at here.
377 * node_online_map is not set for hot-added nodes at this time,
378 * because we are halfway through initialization of the new node's
379 * structures. If for_each_online_node() is used, a new node's
380 * pg_data_ptrs will be not initialized. Instead of using it,
381 * pgdat_list[] is checked.
383 for_each_node(node
) {
384 if (pgdat_list
[node
]) {
385 dst
= LOCAL_DATA_ADDR(pgdat_list
[node
])->pg_data_ptrs
;
386 memcpy(dst
, pgdat_list
, sizeof(pgdat_list
));
392 * initialize_pernode_data - fixup per-cpu & per-node pointers
394 * Each node's per-node area has a copy of the global pg_data_t list, so
395 * we copy that to each node here, as well as setting the per-cpu pointer
396 * to the local node data structure.
398 static void __init
initialize_pernode_data(void)
405 /* Set the node_data pointer for each per-cpu struct */
406 for_each_possible_early_cpu(cpu
) {
407 node
= node_cpuid
[cpu
].nid
;
408 per_cpu(ia64_cpu_info
, cpu
).node_data
=
409 mem_data
[node
].node_data
;
413 struct cpuinfo_ia64
*cpu0_cpu_info
;
415 node
= node_cpuid
[cpu
].nid
;
416 cpu0_cpu_info
= (struct cpuinfo_ia64
*)(__phys_per_cpu_start
+
417 ((char *)&ia64_cpu_info
- __per_cpu_start
));
418 cpu0_cpu_info
->node_data
= mem_data
[node
].node_data
;
420 #endif /* CONFIG_SMP */
424 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
425 * node but fall back to any other node when __alloc_bootmem_node fails
428 * @pernodesize: size of this node's pernode data
430 static void __init
*memory_less_node_alloc(int nid
, unsigned long pernodesize
)
434 int bestnode
= NUMA_NO_NODE
, node
, anynode
= 0;
436 for_each_online_node(node
) {
437 if (node_isset(node
, memory_less_mask
))
439 else if (node_distance(nid
, node
) < best
) {
440 best
= node_distance(nid
, node
);
446 if (bestnode
== NUMA_NO_NODE
)
449 ptr
= memblock_alloc_try_nid(pernodesize
, PERCPU_PAGE_SIZE
,
450 __pa(MAX_DMA_ADDRESS
),
451 MEMBLOCK_ALLOC_ACCESSIBLE
,
454 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%lx\n",
455 __func__
, pernodesize
, PERCPU_PAGE_SIZE
, bestnode
,
456 __pa(MAX_DMA_ADDRESS
));
462 * memory_less_nodes - allocate and initialize CPU only nodes pernode
465 static void __init
memory_less_nodes(void)
467 unsigned long pernodesize
;
471 for_each_node_mask(node
, memory_less_mask
) {
472 pernodesize
= compute_pernodesize(node
);
473 pernode
= memory_less_node_alloc(node
, pernodesize
);
474 fill_pernode(node
, __pa(pernode
), pernodesize
);
481 * find_memory - walk the EFI memory map and setup the bootmem allocator
483 * Called early in boot to setup the bootmem allocator, and to
484 * allocate the per-cpu and per-node structures.
486 void __init
find_memory(void)
491 efi_memmap_walk(filter_memory
, register_active_ranges
);
493 if (num_online_nodes() == 0) {
494 printk(KERN_ERR
"node info missing!\n");
498 nodes_or(memory_less_mask
, memory_less_mask
, node_online_map
);
502 /* These actually end up getting called by call_pernode_memory() */
503 efi_memmap_walk(filter_rsvd_memory
, build_node_maps
);
504 efi_memmap_walk(filter_rsvd_memory
, find_pernode_space
);
505 efi_memmap_walk(find_max_min_low_pfn
, NULL
);
507 for_each_online_node(node
)
508 if (mem_data
[node
].min_pfn
)
509 node_clear(node
, memory_less_mask
);
511 reserve_pernode_space();
513 initialize_pernode_data();
515 max_pfn
= max_low_pfn
;
522 * per_cpu_init - setup per-cpu variables
524 * find_pernode_space() does most of this already, we just need to set
525 * local_per_cpu_offset
527 void *per_cpu_init(void)
530 static int first_time
= 1;
534 for_each_possible_early_cpu(cpu
)
535 per_cpu(local_per_cpu_offset
, cpu
) = __per_cpu_offset
[cpu
];
538 return __per_cpu_start
+ __per_cpu_offset
[smp_processor_id()];
540 #endif /* CONFIG_SMP */
543 * call_pernode_memory - use SRAT to call callback functions with node info
544 * @start: physical start of range
545 * @len: length of range
546 * @arg: function to call for each range
548 * efi_memmap_walk() knows nothing about layout of memory across nodes. Find
549 * out to which node a block of memory belongs. Ignore memory that we cannot
550 * identify, and split blocks that run across multiple nodes.
552 * Take this opportunity to round the start address up and the end address
553 * down to page boundaries.
555 void call_pernode_memory(unsigned long start
, unsigned long len
, void *arg
)
557 unsigned long rs
, re
, end
= start
+ len
;
558 void (*func
)(unsigned long, unsigned long, int);
561 start
= PAGE_ALIGN(start
);
568 if (!num_node_memblks
) {
569 /* No SRAT table, so assume one node (node 0) */
571 (*func
)(start
, end
- start
, 0);
575 for (i
= 0; i
< num_node_memblks
; i
++) {
576 rs
= max(start
, node_memblk
[i
].start_paddr
);
577 re
= min(end
, node_memblk
[i
].start_paddr
+
578 node_memblk
[i
].size
);
581 (*func
)(rs
, re
- rs
, node_memblk
[i
].nid
);
589 * paging_init - setup page tables
591 * paging_init() sets up the page tables for each node of the system and frees
592 * the bootmem allocator memory for general use.
594 void __init
paging_init(void)
596 unsigned long max_dma
;
597 unsigned long pfn_offset
= 0;
598 unsigned long max_pfn
= 0;
600 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
602 max_dma
= virt_to_phys((void *) MAX_DMA_ADDRESS
) >> PAGE_SHIFT
;
604 sparse_memory_present_with_active_regions(MAX_NUMNODES
);
607 #ifdef CONFIG_VIRTUAL_MEM_MAP
608 VMALLOC_END
-= PAGE_ALIGN(ALIGN(max_low_pfn
, MAX_ORDER_NR_PAGES
) *
609 sizeof(struct page
));
610 vmem_map
= (struct page
*) VMALLOC_END
;
611 efi_memmap_walk(create_mem_map_page_table
, NULL
);
612 printk("Virtual mem_map starts at 0x%p\n", vmem_map
);
615 for_each_online_node(node
) {
616 pfn_offset
= mem_data
[node
].min_pfn
;
618 #ifdef CONFIG_VIRTUAL_MEM_MAP
619 NODE_DATA(node
)->node_mem_map
= vmem_map
+ pfn_offset
;
621 if (mem_data
[node
].max_pfn
> max_pfn
)
622 max_pfn
= mem_data
[node
].max_pfn
;
625 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
626 #ifdef CONFIG_ZONE_DMA32
627 max_zone_pfns
[ZONE_DMA32
] = max_dma
;
629 max_zone_pfns
[ZONE_NORMAL
] = max_pfn
;
630 free_area_init_nodes(max_zone_pfns
);
632 zero_page_memmap_ptr
= virt_to_page(ia64_imva(empty_zero_page
));
635 #ifdef CONFIG_MEMORY_HOTPLUG
636 pg_data_t
*arch_alloc_nodedata(int nid
)
638 unsigned long size
= compute_pernodesize(nid
);
640 return kzalloc(size
, GFP_KERNEL
);
643 void arch_free_nodedata(pg_data_t
*pgdat
)
648 void arch_refresh_nodedata(int update_node
, pg_data_t
*update_pgdat
)
650 pgdat_list
[update_node
] = update_pgdat
;
655 #ifdef CONFIG_SPARSEMEM_VMEMMAP
656 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
,
657 struct vmem_altmap
*altmap
)
659 return vmemmap_populate_basepages(start
, end
, node
);
662 void vmemmap_free(unsigned long start
, unsigned long end
,
663 struct vmem_altmap
*altmap
)