1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved.
4 * Copyright (c) 2001 Intel Corp.
5 * Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
6 * Copyright (c) 2002 NEC Corp.
7 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
8 * Copyright (c) 2004 Silicon Graphics, Inc
9 * Russ Anderson <rja@sgi.com>
10 * Jesse Barnes <jbarnes@sgi.com>
11 * Jack Steiner <steiner@sgi.com>
15 * Platform initialization for Discontig Memory
18 #include <linux/kernel.h>
20 #include <linux/nmi.h>
21 #include <linux/swap.h>
22 #include <linux/memblock.h>
23 #include <linux/acpi.h>
24 #include <linux/efi.h>
25 #include <linux/nodemask.h>
26 #include <linux/slab.h>
27 #include <asm/pgalloc.h>
29 #include <asm/meminit.h>
31 #include <asm/sections.h>
34 * Track per-node information needed to setup the boot memory allocator, the
35 * per-node areas, and the real VM.
37 struct early_node_data
{
38 struct ia64_node_data
*node_data
;
39 unsigned long pernode_addr
;
40 unsigned long pernode_size
;
41 unsigned long min_pfn
;
42 unsigned long max_pfn
;
45 static struct early_node_data mem_data
[MAX_NUMNODES
] __initdata
;
46 static nodemask_t memory_less_mask __initdata
;
48 pg_data_t
*pgdat_list
[MAX_NUMNODES
];
51 * To prevent cache aliasing effects, align per-node structures so that they
52 * start at addresses that are strided by node number.
54 #define MAX_NODE_ALIGN_OFFSET (32 * 1024 * 1024)
55 #define NODEDATA_ALIGN(addr, node) \
56 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + \
57 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
60 * build_node_maps - callback to setup mem_data structs for each node
61 * @start: physical start of range
62 * @len: length of range
63 * @node: node where this range resides
65 * Detect extents of each piece of memory that we wish to
66 * treat as a virtually contiguous block (i.e. each node). Each such block
67 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
68 * if necessary. Any non-existent pages will simply be part of the virtual
71 static int __init
build_node_maps(unsigned long start
, unsigned long len
,
74 unsigned long spfn
, epfn
, end
= start
+ len
;
76 epfn
= GRANULEROUNDUP(end
) >> PAGE_SHIFT
;
77 spfn
= GRANULEROUNDDOWN(start
) >> PAGE_SHIFT
;
79 if (!mem_data
[node
].min_pfn
) {
80 mem_data
[node
].min_pfn
= spfn
;
81 mem_data
[node
].max_pfn
= epfn
;
83 mem_data
[node
].min_pfn
= min(spfn
, mem_data
[node
].min_pfn
);
84 mem_data
[node
].max_pfn
= max(epfn
, mem_data
[node
].max_pfn
);
91 * early_nr_cpus_node - return number of cpus on a given node
92 * @node: node to check
94 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
95 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
96 * called yet. Note that node 0 will also count all non-existent cpus.
98 static int __meminit
early_nr_cpus_node(int node
)
102 for_each_possible_early_cpu(cpu
)
103 if (node
== node_cpuid
[cpu
].nid
)
110 * compute_pernodesize - compute size of pernode data
111 * @node: the node id.
113 static unsigned long __meminit
compute_pernodesize(int node
)
115 unsigned long pernodesize
= 0, cpus
;
117 cpus
= early_nr_cpus_node(node
);
118 pernodesize
+= PERCPU_PAGE_SIZE
* cpus
;
119 pernodesize
+= node
* L1_CACHE_BYTES
;
120 pernodesize
+= L1_CACHE_ALIGN(sizeof(pg_data_t
));
121 pernodesize
+= L1_CACHE_ALIGN(sizeof(struct ia64_node_data
));
122 pernodesize
+= L1_CACHE_ALIGN(sizeof(pg_data_t
));
123 pernodesize
= PAGE_ALIGN(pernodesize
);
128 * per_cpu_node_setup - setup per-cpu areas on each node
129 * @cpu_data: per-cpu area on this node
130 * @node: node to setup
132 * Copy the static per-cpu data into the region we just set aside and then
133 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
134 * the end of the area.
136 static void *per_cpu_node_setup(void *cpu_data
, int node
)
141 for_each_possible_early_cpu(cpu
) {
142 void *src
= cpu
== 0 ? __cpu0_per_cpu
: __phys_per_cpu_start
;
144 if (node
!= node_cpuid
[cpu
].nid
)
147 memcpy(__va(cpu_data
), src
, __per_cpu_end
- __per_cpu_start
);
148 __per_cpu_offset
[cpu
] = (char *)__va(cpu_data
) -
152 * percpu area for cpu0 is moved from the __init area
153 * which is setup by head.S and used till this point.
154 * Update ar.k3. This move is ensures that percpu
155 * area for cpu0 is on the correct node and its
156 * virtual address isn't insanely far from other
157 * percpu areas which is important for congruent
161 ia64_set_kr(IA64_KR_PER_CPU_DATA
,
162 (unsigned long)cpu_data
-
163 (unsigned long)__per_cpu_start
);
165 cpu_data
+= PERCPU_PAGE_SIZE
;
173 * setup_per_cpu_areas - setup percpu areas
175 * Arch code has already allocated and initialized percpu areas. All
176 * this function has to do is to teach the determined layout to the
177 * dynamic percpu allocator, which happens to be more complex than
178 * creating whole new ones using helpers.
180 void __init
setup_per_cpu_areas(void)
182 struct pcpu_alloc_info
*ai
;
183 struct pcpu_group_info
*uninitialized_var(gi
);
184 unsigned int *cpu_map
;
186 unsigned long base_offset
;
188 ssize_t static_size
, reserved_size
, dyn_size
;
189 int node
, prev_node
, unit
, nr_units
, rc
;
191 ai
= pcpu_alloc_alloc_info(MAX_NUMNODES
, nr_cpu_ids
);
193 panic("failed to allocate pcpu_alloc_info");
194 cpu_map
= ai
->groups
[0].cpu_map
;
197 base
= (void *)ULONG_MAX
;
198 for_each_possible_cpu(cpu
)
200 (void *)(__per_cpu_offset
[cpu
] + __per_cpu_start
));
201 base_offset
= (void *)__per_cpu_start
- base
;
203 /* build cpu_map, units are grouped by node */
206 for_each_possible_cpu(cpu
)
207 if (node
== node_cpuid
[cpu
].nid
)
208 cpu_map
[unit
++] = cpu
;
211 /* set basic parameters */
212 static_size
= __per_cpu_end
- __per_cpu_start
;
213 reserved_size
= PERCPU_MODULE_RESERVE
;
214 dyn_size
= PERCPU_PAGE_SIZE
- static_size
- reserved_size
;
216 panic("percpu area overflow static=%zd reserved=%zd\n",
217 static_size
, reserved_size
);
219 ai
->static_size
= static_size
;
220 ai
->reserved_size
= reserved_size
;
221 ai
->dyn_size
= dyn_size
;
222 ai
->unit_size
= PERCPU_PAGE_SIZE
;
223 ai
->atom_size
= PAGE_SIZE
;
224 ai
->alloc_size
= PERCPU_PAGE_SIZE
;
227 * CPUs are put into groups according to node. Walk cpu_map
228 * and create new groups at node boundaries.
230 prev_node
= NUMA_NO_NODE
;
232 for (unit
= 0; unit
< nr_units
; unit
++) {
234 node
= node_cpuid
[cpu
].nid
;
236 if (node
== prev_node
) {
242 gi
= &ai
->groups
[ai
->nr_groups
++];
244 gi
->base_offset
= __per_cpu_offset
[cpu
] + base_offset
;
245 gi
->cpu_map
= &cpu_map
[unit
];
248 rc
= pcpu_setup_first_chunk(ai
, base
);
250 panic("failed to setup percpu area (err=%d)", rc
);
252 pcpu_free_alloc_info(ai
);
257 * fill_pernode - initialize pernode data.
258 * @node: the node id.
259 * @pernode: physical address of pernode data
260 * @pernodesize: size of the pernode data
262 static void __init
fill_pernode(int node
, unsigned long pernode
,
263 unsigned long pernodesize
)
266 int cpus
= early_nr_cpus_node(node
);
268 mem_data
[node
].pernode_addr
= pernode
;
269 mem_data
[node
].pernode_size
= pernodesize
;
270 memset(__va(pernode
), 0, pernodesize
);
272 cpu_data
= (void *)pernode
;
273 pernode
+= PERCPU_PAGE_SIZE
* cpus
;
274 pernode
+= node
* L1_CACHE_BYTES
;
276 pgdat_list
[node
] = __va(pernode
);
277 pernode
+= L1_CACHE_ALIGN(sizeof(pg_data_t
));
279 mem_data
[node
].node_data
= __va(pernode
);
280 pernode
+= L1_CACHE_ALIGN(sizeof(struct ia64_node_data
));
281 pernode
+= L1_CACHE_ALIGN(sizeof(pg_data_t
));
283 cpu_data
= per_cpu_node_setup(cpu_data
, node
);
289 * find_pernode_space - allocate memory for memory map and per-node structures
290 * @start: physical start of range
291 * @len: length of range
292 * @node: node where this range resides
294 * This routine reserves space for the per-cpu data struct, the list of
295 * pg_data_ts and the per-node data struct. Each node will have something like
296 * the following in the first chunk of addr. space large enough to hold it.
298 * ________________________
300 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
301 * | PERCPU_PAGE_SIZE * | start and length big enough
302 * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus.
303 * |------------------------|
304 * | local pg_data_t * |
305 * |------------------------|
306 * | local ia64_node_data |
307 * |------------------------|
309 * |________________________|
311 * Once this space has been set aside, the bootmem maps are initialized. We
312 * could probably move the allocation of the per-cpu and ia64_node_data space
313 * outside of this function and use alloc_bootmem_node(), but doing it here
314 * is straightforward and we get the alignments we want so...
316 static int __init
find_pernode_space(unsigned long start
, unsigned long len
,
319 unsigned long spfn
, epfn
;
320 unsigned long pernodesize
= 0, pernode
;
322 spfn
= start
>> PAGE_SHIFT
;
323 epfn
= (start
+ len
) >> PAGE_SHIFT
;
326 * Make sure this memory falls within this node's usable memory
327 * since we may have thrown some away in build_maps().
329 if (spfn
< mem_data
[node
].min_pfn
|| epfn
> mem_data
[node
].max_pfn
)
332 /* Don't setup this node's local space twice... */
333 if (mem_data
[node
].pernode_addr
)
337 * Calculate total size needed, incl. what's necessary
338 * for good alignment and alias prevention.
340 pernodesize
= compute_pernodesize(node
);
341 pernode
= NODEDATA_ALIGN(start
, node
);
343 /* Is this range big enough for what we want to store here? */
344 if (start
+ len
> (pernode
+ pernodesize
))
345 fill_pernode(node
, pernode
, pernodesize
);
351 * reserve_pernode_space - reserve memory for per-node space
353 * Reserve the space used by the bootmem maps & per-node space in the boot
354 * allocator so that when we actually create the real mem maps we don't
357 static void __init
reserve_pernode_space(void)
359 unsigned long base
, size
;
362 for_each_online_node(node
) {
363 if (node_isset(node
, memory_less_mask
))
366 /* Now the per-node space */
367 size
= mem_data
[node
].pernode_size
;
368 base
= __pa(mem_data
[node
].pernode_addr
);
369 memblock_reserve(base
, size
);
373 static void __meminit
scatter_node_data(void)
379 * for_each_online_node() can't be used at here.
380 * node_online_map is not set for hot-added nodes at this time,
381 * because we are halfway through initialization of the new node's
382 * structures. If for_each_online_node() is used, a new node's
383 * pg_data_ptrs will be not initialized. Instead of using it,
384 * pgdat_list[] is checked.
386 for_each_node(node
) {
387 if (pgdat_list
[node
]) {
388 dst
= LOCAL_DATA_ADDR(pgdat_list
[node
])->pg_data_ptrs
;
389 memcpy(dst
, pgdat_list
, sizeof(pgdat_list
));
395 * initialize_pernode_data - fixup per-cpu & per-node pointers
397 * Each node's per-node area has a copy of the global pg_data_t list, so
398 * we copy that to each node here, as well as setting the per-cpu pointer
399 * to the local node data structure. The active_cpus field of the per-node
400 * structure gets setup by the platform_cpu_init() function later.
402 static void __init
initialize_pernode_data(void)
409 /* Set the node_data pointer for each per-cpu struct */
410 for_each_possible_early_cpu(cpu
) {
411 node
= node_cpuid
[cpu
].nid
;
412 per_cpu(ia64_cpu_info
, cpu
).node_data
=
413 mem_data
[node
].node_data
;
417 struct cpuinfo_ia64
*cpu0_cpu_info
;
419 node
= node_cpuid
[cpu
].nid
;
420 cpu0_cpu_info
= (struct cpuinfo_ia64
*)(__phys_per_cpu_start
+
421 ((char *)&ia64_cpu_info
- __per_cpu_start
));
422 cpu0_cpu_info
->node_data
= mem_data
[node
].node_data
;
424 #endif /* CONFIG_SMP */
428 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
429 * node but fall back to any other node when __alloc_bootmem_node fails
432 * @pernodesize: size of this node's pernode data
434 static void __init
*memory_less_node_alloc(int nid
, unsigned long pernodesize
)
438 int bestnode
= NUMA_NO_NODE
, node
, anynode
= 0;
440 for_each_online_node(node
) {
441 if (node_isset(node
, memory_less_mask
))
443 else if (node_distance(nid
, node
) < best
) {
444 best
= node_distance(nid
, node
);
450 if (bestnode
== NUMA_NO_NODE
)
453 ptr
= memblock_alloc_try_nid(pernodesize
, PERCPU_PAGE_SIZE
,
454 __pa(MAX_DMA_ADDRESS
),
455 MEMBLOCK_ALLOC_ACCESSIBLE
,
458 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%lx\n",
459 __func__
, pernodesize
, PERCPU_PAGE_SIZE
, bestnode
,
460 __pa(MAX_DMA_ADDRESS
));
466 * memory_less_nodes - allocate and initialize CPU only nodes pernode
469 static void __init
memory_less_nodes(void)
471 unsigned long pernodesize
;
475 for_each_node_mask(node
, memory_less_mask
) {
476 pernodesize
= compute_pernodesize(node
);
477 pernode
= memory_less_node_alloc(node
, pernodesize
);
478 fill_pernode(node
, __pa(pernode
), pernodesize
);
485 * find_memory - walk the EFI memory map and setup the bootmem allocator
487 * Called early in boot to setup the bootmem allocator, and to
488 * allocate the per-cpu and per-node structures.
490 void __init
find_memory(void)
495 efi_memmap_walk(filter_memory
, register_active_ranges
);
497 if (num_online_nodes() == 0) {
498 printk(KERN_ERR
"node info missing!\n");
502 nodes_or(memory_less_mask
, memory_less_mask
, node_online_map
);
506 /* These actually end up getting called by call_pernode_memory() */
507 efi_memmap_walk(filter_rsvd_memory
, build_node_maps
);
508 efi_memmap_walk(filter_rsvd_memory
, find_pernode_space
);
509 efi_memmap_walk(find_max_min_low_pfn
, NULL
);
511 for_each_online_node(node
)
512 if (mem_data
[node
].min_pfn
)
513 node_clear(node
, memory_less_mask
);
515 reserve_pernode_space();
517 initialize_pernode_data();
519 max_pfn
= max_low_pfn
;
526 * per_cpu_init - setup per-cpu variables
528 * find_pernode_space() does most of this already, we just need to set
529 * local_per_cpu_offset
531 void *per_cpu_init(void)
534 static int first_time
= 1;
538 for_each_possible_early_cpu(cpu
)
539 per_cpu(local_per_cpu_offset
, cpu
) = __per_cpu_offset
[cpu
];
542 return __per_cpu_start
+ __per_cpu_offset
[smp_processor_id()];
544 #endif /* CONFIG_SMP */
547 * call_pernode_memory - use SRAT to call callback functions with node info
548 * @start: physical start of range
549 * @len: length of range
550 * @arg: function to call for each range
552 * efi_memmap_walk() knows nothing about layout of memory across nodes. Find
553 * out to which node a block of memory belongs. Ignore memory that we cannot
554 * identify, and split blocks that run across multiple nodes.
556 * Take this opportunity to round the start address up and the end address
557 * down to page boundaries.
559 void call_pernode_memory(unsigned long start
, unsigned long len
, void *arg
)
561 unsigned long rs
, re
, end
= start
+ len
;
562 void (*func
)(unsigned long, unsigned long, int);
565 start
= PAGE_ALIGN(start
);
572 if (!num_node_memblks
) {
573 /* No SRAT table, so assume one node (node 0) */
575 (*func
)(start
, end
- start
, 0);
579 for (i
= 0; i
< num_node_memblks
; i
++) {
580 rs
= max(start
, node_memblk
[i
].start_paddr
);
581 re
= min(end
, node_memblk
[i
].start_paddr
+
582 node_memblk
[i
].size
);
585 (*func
)(rs
, re
- rs
, node_memblk
[i
].nid
);
593 * paging_init - setup page tables
595 * paging_init() sets up the page tables for each node of the system and frees
596 * the bootmem allocator memory for general use.
598 void __init
paging_init(void)
600 unsigned long max_dma
;
601 unsigned long pfn_offset
= 0;
602 unsigned long max_pfn
= 0;
604 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
606 max_dma
= virt_to_phys((void *) MAX_DMA_ADDRESS
) >> PAGE_SHIFT
;
608 sparse_memory_present_with_active_regions(MAX_NUMNODES
);
611 #ifdef CONFIG_VIRTUAL_MEM_MAP
612 VMALLOC_END
-= PAGE_ALIGN(ALIGN(max_low_pfn
, MAX_ORDER_NR_PAGES
) *
613 sizeof(struct page
));
614 vmem_map
= (struct page
*) VMALLOC_END
;
615 efi_memmap_walk(create_mem_map_page_table
, NULL
);
616 printk("Virtual mem_map starts at 0x%p\n", vmem_map
);
619 for_each_online_node(node
) {
620 pfn_offset
= mem_data
[node
].min_pfn
;
622 #ifdef CONFIG_VIRTUAL_MEM_MAP
623 NODE_DATA(node
)->node_mem_map
= vmem_map
+ pfn_offset
;
625 if (mem_data
[node
].max_pfn
> max_pfn
)
626 max_pfn
= mem_data
[node
].max_pfn
;
629 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
630 #ifdef CONFIG_ZONE_DMA32
631 max_zone_pfns
[ZONE_DMA32
] = max_dma
;
633 max_zone_pfns
[ZONE_NORMAL
] = max_pfn
;
634 free_area_init_nodes(max_zone_pfns
);
636 zero_page_memmap_ptr
= virt_to_page(ia64_imva(empty_zero_page
));
639 #ifdef CONFIG_MEMORY_HOTPLUG
640 pg_data_t
*arch_alloc_nodedata(int nid
)
642 unsigned long size
= compute_pernodesize(nid
);
644 return kzalloc(size
, GFP_KERNEL
);
647 void arch_free_nodedata(pg_data_t
*pgdat
)
652 void arch_refresh_nodedata(int update_node
, pg_data_t
*update_pgdat
)
654 pgdat_list
[update_node
] = update_pgdat
;
659 #ifdef CONFIG_SPARSEMEM_VMEMMAP
660 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
,
661 struct vmem_altmap
*altmap
)
663 return vmemmap_populate_basepages(start
, end
, node
);
666 void vmemmap_free(unsigned long start
, unsigned long end
,
667 struct vmem_altmap
*altmap
)