1 // SPDX-License-Identifier: GPL-2.0
5 #include <linux/kernel.h>
6 #include <linux/errno.h>
7 #include <linux/topology.h>
8 #include <linux/memblock.h>
9 #include <linux/numa_memblks.h>
11 #include <acpi/acpi_numa.h>
13 #define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
14 #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
16 int emu_nid_to_phys
[MAX_NUMNODES
];
17 static char *emu_cmdline __initdata
;
19 int __init
numa_emu_cmdline(char *str
)
25 static int __init
emu_find_memblk_by_nid(int nid
, const struct numa_meminfo
*mi
)
29 for (i
= 0; i
< mi
->nr_blks
; i
++)
30 if (mi
->blk
[i
].nid
== nid
)
35 static u64 __init
mem_hole_size(u64 start
, u64 end
)
37 unsigned long start_pfn
= PFN_UP(start
);
38 unsigned long end_pfn
= PFN_DOWN(end
);
40 if (start_pfn
< end_pfn
)
41 return PFN_PHYS(absent_pages_in_range(start_pfn
, end_pfn
));
46 * Sets up nid to range from @start to @end. The return value is -errno if
47 * something went wrong, 0 otherwise.
49 static int __init
emu_setup_memblk(struct numa_meminfo
*ei
,
50 struct numa_meminfo
*pi
,
51 int nid
, int phys_blk
, u64 size
)
53 struct numa_memblk
*eb
= &ei
->blk
[ei
->nr_blks
];
54 struct numa_memblk
*pb
= &pi
->blk
[phys_blk
];
56 if (ei
->nr_blks
>= NR_NODE_MEMBLKS
) {
57 pr_err("NUMA: Too many emulated memblks, failing emulation\n");
62 eb
->start
= pb
->start
;
63 eb
->end
= pb
->start
+ size
;
66 if (emu_nid_to_phys
[nid
] == NUMA_NO_NODE
)
67 emu_nid_to_phys
[nid
] = pb
->nid
;
70 if (pb
->start
>= pb
->end
) {
71 WARN_ON_ONCE(pb
->start
> pb
->end
);
72 numa_remove_memblk_from(phys_blk
, pi
);
75 printk(KERN_INFO
"Faking node %d at [mem %#018Lx-%#018Lx] (%LuMB)\n",
76 nid
, eb
->start
, eb
->end
- 1, (eb
->end
- eb
->start
) >> 20);
81 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
84 * Returns zero on success or negative on error.
86 static int __init
split_nodes_interleave(struct numa_meminfo
*ei
,
87 struct numa_meminfo
*pi
,
88 u64 addr
, u64 max_addr
, int nr_nodes
)
90 nodemask_t physnode_mask
= numa_nodes_parsed
;
98 if (nr_nodes
> MAX_NUMNODES
) {
99 pr_info("numa=fake=%d too large, reducing to %d\n",
100 nr_nodes
, MAX_NUMNODES
);
101 nr_nodes
= MAX_NUMNODES
;
105 * Calculate target node size. x86_32 freaks on __udivdi3() so do
106 * the division in ulong number of pages and convert back.
108 size
= max_addr
- addr
- mem_hole_size(addr
, max_addr
);
109 size
= PFN_PHYS((unsigned long)(size
>> PAGE_SHIFT
) / nr_nodes
);
112 * Calculate the number of big nodes that can be allocated as a result
113 * of consolidating the remainder.
115 big
= ((size
& ~FAKE_NODE_MIN_HASH_MASK
) * nr_nodes
) /
118 size
&= FAKE_NODE_MIN_HASH_MASK
;
120 pr_err("Not enough memory for each node. "
121 "NUMA emulation disabled.\n");
126 * Continue to fill physical nodes with fake nodes until there is no
127 * memory left on any of them.
129 while (!nodes_empty(physnode_mask
)) {
130 for_each_node_mask(i
, physnode_mask
) {
131 u64 dma32_end
= numa_emu_dma_end();
132 u64 start
, limit
, end
;
135 phys_blk
= emu_find_memblk_by_nid(i
, pi
);
137 node_clear(i
, physnode_mask
);
140 start
= pi
->blk
[phys_blk
].start
;
141 limit
= pi
->blk
[phys_blk
].end
;
145 end
+= FAKE_NODE_MIN_SIZE
;
148 * Continue to add memory to this fake node if its
149 * non-reserved memory is less than the per-node size.
151 while (end
- start
- mem_hole_size(start
, end
) < size
) {
152 end
+= FAKE_NODE_MIN_SIZE
;
160 * If there won't be at least FAKE_NODE_MIN_SIZE of
161 * non-reserved memory in ZONE_DMA32 for the next node,
162 * this one must extend to the boundary.
164 if (end
< dma32_end
&& dma32_end
- end
-
165 mem_hole_size(end
, dma32_end
) < FAKE_NODE_MIN_SIZE
)
169 * If there won't be enough non-reserved memory for the
170 * next node, this one must extend to the end of the
173 if (limit
- end
- mem_hole_size(end
, limit
) < size
)
176 ret
= emu_setup_memblk(ei
, pi
, nid
++ % nr_nodes
,
178 min(end
, limit
) - start
);
187 * Returns the end address of a node so that there is at least `size' amount of
188 * non-reserved memory or `max_addr' is reached.
190 static u64 __init
find_end_of_node(u64 start
, u64 max_addr
, u64 size
)
192 u64 end
= start
+ size
;
194 while (end
- start
- mem_hole_size(start
, end
) < size
) {
195 end
+= FAKE_NODE_MIN_SIZE
;
196 if (end
> max_addr
) {
204 static u64
uniform_size(u64 max_addr
, u64 base
, u64 hole
, int nr_nodes
)
206 unsigned long max_pfn
= PHYS_PFN(max_addr
);
207 unsigned long base_pfn
= PHYS_PFN(base
);
208 unsigned long hole_pfns
= PHYS_PFN(hole
);
210 return PFN_PHYS((max_pfn
- base_pfn
- hole_pfns
) / nr_nodes
);
214 * Sets up fake nodes of `size' interleaved over physical nodes ranging from
215 * `addr' to `max_addr'.
217 * Returns zero on success or negative on error.
219 static int __init
split_nodes_size_interleave_uniform(struct numa_meminfo
*ei
,
220 struct numa_meminfo
*pi
,
221 u64 addr
, u64 max_addr
, u64 size
,
222 int nr_nodes
, struct numa_memblk
*pblk
,
225 nodemask_t physnode_mask
= numa_nodes_parsed
;
226 int i
, ret
, uniform
= 0;
229 if ((!size
&& !nr_nodes
) || (nr_nodes
&& !pblk
))
233 * In the 'uniform' case split the passed in physical node by
234 * nr_nodes, in the non-uniform case, ignore the passed in
235 * physical block and try to create nodes of at least size
238 * In the uniform case, split the nodes strictly by physical
239 * capacity, i.e. ignore holes. In the non-uniform case account
240 * for holes and treat @size as a minimum floor.
243 nr_nodes
= MAX_NUMNODES
;
245 nodes_clear(physnode_mask
);
246 node_set(pblk
->nid
, physnode_mask
);
251 min_size
= uniform_size(max_addr
, addr
, 0, nr_nodes
);
255 * The limit on emulated nodes is MAX_NUMNODES, so the
256 * size per node is increased accordingly if the
257 * requested size is too small. This creates a uniform
258 * distribution of node sizes across the entire machine
259 * (but not necessarily over physical nodes).
261 min_size
= uniform_size(max_addr
, addr
,
262 mem_hole_size(addr
, max_addr
), nr_nodes
);
264 min_size
= ALIGN(max(min_size
, FAKE_NODE_MIN_SIZE
), FAKE_NODE_MIN_SIZE
);
265 if (size
< min_size
) {
266 pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
267 size
>> 20, min_size
>> 20);
270 size
= ALIGN_DOWN(size
, FAKE_NODE_MIN_SIZE
);
273 * Fill physical nodes with fake nodes of size until there is no memory
274 * left on any of them.
276 while (!nodes_empty(physnode_mask
)) {
277 for_each_node_mask(i
, physnode_mask
) {
278 u64 dma32_end
= numa_emu_dma_end();
279 u64 start
, limit
, end
;
282 phys_blk
= emu_find_memblk_by_nid(i
, pi
);
284 node_clear(i
, physnode_mask
);
288 start
= pi
->blk
[phys_blk
].start
;
289 limit
= pi
->blk
[phys_blk
].end
;
294 end
= find_end_of_node(start
, limit
, size
);
296 * If there won't be at least FAKE_NODE_MIN_SIZE of
297 * non-reserved memory in ZONE_DMA32 for the next node,
298 * this one must extend to the boundary.
300 if (end
< dma32_end
&& dma32_end
- end
-
301 mem_hole_size(end
, dma32_end
) < FAKE_NODE_MIN_SIZE
)
305 * If there won't be enough non-reserved memory for the
306 * next node, this one must extend to the end of the
309 if ((limit
- end
- mem_hole_size(end
, limit
) < size
)
313 ret
= emu_setup_memblk(ei
, pi
, nid
++ % MAX_NUMNODES
,
315 min(end
, limit
) - start
);
323 static int __init
split_nodes_size_interleave(struct numa_meminfo
*ei
,
324 struct numa_meminfo
*pi
,
325 u64 addr
, u64 max_addr
, u64 size
)
327 return split_nodes_size_interleave_uniform(ei
, pi
, addr
, max_addr
, size
,
331 static int __init
setup_emu2phys_nid(int *dfl_phys_nid
)
333 int i
, max_emu_nid
= 0;
335 *dfl_phys_nid
= NUMA_NO_NODE
;
336 for (i
= 0; i
< ARRAY_SIZE(emu_nid_to_phys
); i
++) {
337 if (emu_nid_to_phys
[i
] != NUMA_NO_NODE
) {
339 if (*dfl_phys_nid
== NUMA_NO_NODE
)
340 *dfl_phys_nid
= emu_nid_to_phys
[i
];
348 * numa_emulation - Emulate NUMA nodes
349 * @numa_meminfo: NUMA configuration to massage
350 * @numa_dist_cnt: The size of the physical NUMA distance table
352 * Emulate NUMA nodes according to the numa=fake kernel parameter.
353 * @numa_meminfo contains the physical memory configuration and is modified
354 * to reflect the emulated configuration on success. @numa_dist_cnt is
355 * used to determine the size of the physical distance table.
357 * On success, the following modifications are made.
359 * - @numa_meminfo is updated to reflect the emulated nodes.
361 * - __apicid_to_node[] is updated such that APIC IDs are mapped to the
364 * - NUMA distance table is rebuilt to represent distances between emulated
365 * nodes. The distances are determined considering how emulated nodes
366 * are mapped to physical nodes and match the actual distances.
368 * - emu_nid_to_phys[] reflects how emulated nodes are mapped to physical
369 * nodes. This is used by numa_add_cpu() and numa_remove_cpu().
371 * If emulation is not enabled or fails, emu_nid_to_phys[] is filled with
372 * identity mapping and no other modification is made.
374 void __init
numa_emulation(struct numa_meminfo
*numa_meminfo
, int numa_dist_cnt
)
376 static struct numa_meminfo ei __initdata
;
377 static struct numa_meminfo pi __initdata
;
378 const u64 max_addr
= PFN_PHYS(max_pfn
);
379 u8
*phys_dist
= NULL
;
380 size_t phys_size
= numa_dist_cnt
* numa_dist_cnt
* sizeof(phys_dist
[0]);
381 int max_emu_nid
, dfl_phys_nid
;
383 nodemask_t physnode_mask
= numa_nodes_parsed
;
388 memset(&ei
, 0, sizeof(ei
));
391 for (i
= 0; i
< MAX_NUMNODES
; i
++)
392 emu_nid_to_phys
[i
] = NUMA_NO_NODE
;
395 * If the numa=fake command-line contains a 'M' or 'G', it represents
396 * the fixed node size. Otherwise, if it is just a single number N,
397 * split the system RAM into N fake nodes.
399 if (strchr(emu_cmdline
, 'U')) {
403 n
= simple_strtoul(emu_cmdline
, &emu_cmdline
, 0);
405 for_each_node_mask(i
, physnode_mask
) {
407 * The reason we pass in blk[0] is due to
408 * numa_remove_memblk_from() called by
409 * emu_setup_memblk() will delete entry 0
410 * and then move everything else up in the pi.blk
411 * array. Therefore we should always be looking
414 ret
= split_nodes_size_interleave_uniform(&ei
, &pi
,
415 pi
.blk
[0].start
, pi
.blk
[0].end
, 0,
420 pr_info("%s: phys: %d only got %d of %ld nodes, failing\n",
421 __func__
, i
, ret
, n
);
427 } else if (strchr(emu_cmdline
, 'M') || strchr(emu_cmdline
, 'G')) {
430 size
= memparse(emu_cmdline
, &emu_cmdline
);
431 ret
= split_nodes_size_interleave(&ei
, &pi
, 0, max_addr
, size
);
435 n
= simple_strtoul(emu_cmdline
, &emu_cmdline
, 0);
436 ret
= split_nodes_interleave(&ei
, &pi
, 0, max_addr
, n
);
438 if (*emu_cmdline
== ':')
444 if (numa_cleanup_meminfo(&ei
) < 0) {
445 pr_warn("NUMA: Warning: constructed meminfo invalid, disabling emulation\n");
449 /* copy the physical distance table */
451 phys_dist
= memblock_alloc(phys_size
, PAGE_SIZE
);
453 pr_warn("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
457 for (i
= 0; i
< numa_dist_cnt
; i
++)
458 for (j
= 0; j
< numa_dist_cnt
; j
++)
459 phys_dist
[i
* numa_dist_cnt
+ j
] =
464 * Determine the max emulated nid and the default phys nid to use
465 * for unmapped nodes.
467 max_emu_nid
= setup_emu2phys_nid(&dfl_phys_nid
);
469 /* Make sure numa_nodes_parsed only contains emulated nodes */
470 nodes_clear(numa_nodes_parsed
);
471 for (i
= 0; i
< ARRAY_SIZE(ei
.blk
); i
++)
472 if (ei
.blk
[i
].start
!= ei
.blk
[i
].end
&&
473 ei
.blk
[i
].nid
!= NUMA_NO_NODE
)
474 node_set(ei
.blk
[i
].nid
, numa_nodes_parsed
);
476 /* fix pxm_to_node_map[] and node_to_pxm_map[] to avoid collision
477 * with faked numa nodes, particularly during later memory hotplug
478 * handling, and also update numa_nodes_parsed accordingly.
480 ret
= fix_pxm_node_maps(max_emu_nid
);
487 numa_emu_update_cpu_to_node(emu_nid_to_phys
, max_emu_nid
+ 1);
489 /* make sure all emulated nodes are mapped to a physical node */
490 for (i
= 0; i
< max_emu_nid
+ 1; i
++)
491 if (emu_nid_to_phys
[i
] == NUMA_NO_NODE
)
492 emu_nid_to_phys
[i
] = dfl_phys_nid
;
494 /* transform distance table */
495 numa_reset_distance();
496 for (i
= 0; i
< max_emu_nid
+ 1; i
++) {
497 for (j
= 0; j
< max_emu_nid
+ 1; j
++) {
498 int physi
= emu_nid_to_phys
[i
];
499 int physj
= emu_nid_to_phys
[j
];
502 if (get_option(&emu_cmdline
, &dist
) == 2)
504 else if (physi
>= numa_dist_cnt
|| physj
>= numa_dist_cnt
)
505 dist
= physi
== physj
?
506 LOCAL_DISTANCE
: REMOTE_DISTANCE
;
508 dist
= phys_dist
[physi
* numa_dist_cnt
+ physj
];
510 numa_set_distance(i
, j
, dist
);
513 for (i
= 0; i
< numa_distance_cnt
; i
++) {
514 for (j
= 0; j
< numa_distance_cnt
; j
++) {
518 /* distance between fake nodes is already ok */
519 if (emu_nid_to_phys
[i
] != NUMA_NO_NODE
&&
520 emu_nid_to_phys
[j
] != NUMA_NO_NODE
)
522 if (emu_nid_to_phys
[i
] != NUMA_NO_NODE
)
523 physi
= emu_nid_to_phys
[i
];
525 physi
= i
- max_emu_nid
;
526 if (emu_nid_to_phys
[j
] != NUMA_NO_NODE
)
527 physj
= emu_nid_to_phys
[j
];
529 physj
= j
- max_emu_nid
;
530 dist
= phys_dist
[physi
* numa_dist_cnt
+ physj
];
531 numa_set_distance(i
, j
, dist
);
535 /* free the copied physical distance table */
536 memblock_free(phys_dist
, phys_size
);
540 numa_nodes_parsed
= physnode_mask
;
541 /* No emulation. Build identity emu_nid_to_phys[] for numa_add_cpu() */
542 for (i
= 0; i
< ARRAY_SIZE(emu_nid_to_phys
); i
++)
543 emu_nid_to_phys
[i
] = i
;
546 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
547 void numa_add_cpu(unsigned int cpu
)
551 nid
= early_cpu_to_node(cpu
);
552 BUG_ON(nid
== NUMA_NO_NODE
|| !node_online(nid
));
554 physnid
= emu_nid_to_phys
[nid
];
557 * Map the cpu to each emulated node that is allocated on the physical
558 * node of the cpu's apic id.
560 for_each_online_node(nid
)
561 if (emu_nid_to_phys
[nid
] == physnid
)
562 cpumask_set_cpu(cpu
, node_to_cpumask_map
[nid
]);
565 void numa_remove_cpu(unsigned int cpu
)
569 for_each_online_node(i
)
570 cpumask_clear_cpu(cpu
, node_to_cpumask_map
[i
]);
572 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
573 static void numa_set_cpumask(unsigned int cpu
, bool enable
)
577 nid
= early_cpu_to_node(cpu
);
578 if (nid
== NUMA_NO_NODE
) {
579 /* early_cpu_to_node() already emits a warning and trace */
583 physnid
= emu_nid_to_phys
[nid
];
585 for_each_online_node(nid
) {
586 if (emu_nid_to_phys
[nid
] != physnid
)
589 debug_cpumask_set_cpu(cpu
, nid
, enable
);
593 void numa_add_cpu(unsigned int cpu
)
595 numa_set_cpumask(cpu
, true);
598 void numa_remove_cpu(unsigned int cpu
)
600 numa_set_cpumask(cpu
, false);
602 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */