2 * NUMA support for s390
4 * NUMA emulation (aka fake NUMA) distributes the available memory to nodes
5 * without using real topology information about the physical memory of the
8 * It distributes the available CPUs to nodes while respecting the original
9 * machine topology information. This is done by trying to avoid to separate
10 * CPUs which reside on the same book or even on the same MC.
12 * Because the current Linux scheduler code requires a stable cpu to node
13 * mapping, cores are pinned to nodes when the first CPU thread is set online.
15 * Copyright IBM Corp. 2015
18 #define KMSG_COMPONENT "numa_emu"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 #include <linux/kernel.h>
22 #include <linux/cpumask.h>
23 #include <linux/memblock.h>
24 #include <linux/node.h>
25 #include <linux/memory.h>
26 #include <linux/slab.h>
28 #include <asm/topology.h>
29 #include "numa_mode.h"
32 /* Distances between the different system components */
39 /* Node distance reported to common code */
40 #define EMU_NODE_DIST 10
42 /* Node ID for free (not yet pinned) cores */
43 #define NODE_ID_FREE -1
45 /* Different levels of toptree */
46 enum toptree_level
{CORE
, MC
, BOOK
, NODE
, TOPOLOGY
};
48 /* The two toptree IDs */
49 enum {TOPTREE_ID_PHYS
, TOPTREE_ID_NUMA
};
51 /* Number of NUMA nodes */
52 static int emu_nodes
= 1;
53 /* NUMA stripe size */
54 static unsigned long emu_size
;
57 * Node to core pinning information updates are protected by
58 * "sched_domains_mutex".
61 s32 to_node_id
[CONFIG_NR_CPUS
]; /* Pinned core to node mapping */
62 int total
; /* Total number of pinned cores */
63 int per_node_target
; /* Cores per node without extra cores */
64 int per_node
[MAX_NUMNODES
]; /* Number of cores pinned to node */
68 * Pin a core to a node
70 static void pin_core_to_node(int core_id
, int node_id
)
72 if (emu_cores
->to_node_id
[core_id
] == NODE_ID_FREE
) {
73 emu_cores
->per_node
[node_id
]++;
74 emu_cores
->to_node_id
[core_id
] = node_id
;
77 WARN_ON(emu_cores
->to_node_id
[core_id
] != node_id
);
82 * Number of pinned cores of a node
84 static int cores_pinned(struct toptree
*node
)
86 return emu_cores
->per_node
[node
->id
];
90 * ID of the node where the core is pinned (or NODE_ID_FREE)
92 static int core_pinned_to_node_id(struct toptree
*core
)
94 return emu_cores
->to_node_id
[core
->id
];
98 * Number of cores in the tree that are not yet pinned
100 static int cores_free(struct toptree
*tree
)
102 struct toptree
*core
;
105 toptree_for_each(core
, tree
, CORE
) {
106 if (core_pinned_to_node_id(core
) == NODE_ID_FREE
)
113 * Return node of core
115 static struct toptree
*core_node(struct toptree
*core
)
117 return core
->parent
->parent
->parent
;
121 * Return book of core
123 static struct toptree
*core_book(struct toptree
*core
)
125 return core
->parent
->parent
;
131 static struct toptree
*core_mc(struct toptree
*core
)
137 * Distance between two cores
139 static int dist_core_to_core(struct toptree
*core1
, struct toptree
*core2
)
141 if (core_book(core1
)->id
!= core_book(core2
)->id
)
143 if (core_mc(core1
)->id
!= core_mc(core2
)->id
)
145 /* Same core or sibling on same MC */
150 * Distance of a node to a core
152 static int dist_node_to_core(struct toptree
*node
, struct toptree
*core
)
154 struct toptree
*core_node
;
155 int dist_min
= DIST_MAX
;
157 toptree_for_each(core_node
, node
, CORE
)
158 dist_min
= min(dist_min
, dist_core_to_core(core_node
, core
));
159 return dist_min
== DIST_MAX
? DIST_EMPTY
: dist_min
;
163 * Unify will delete empty nodes, therefore recreate nodes.
165 static void toptree_unify_tree(struct toptree
*tree
)
170 for (nid
= 0; nid
< emu_nodes
; nid
++)
171 toptree_get_child(tree
, nid
);
175 * Find the best/nearest node for a given core and ensure that no node
176 * gets more than "emu_cores->per_node_target + extra" cores.
178 static struct toptree
*node_for_core(struct toptree
*numa
, struct toptree
*core
,
181 struct toptree
*node
, *node_best
= NULL
;
182 int dist_cur
, dist_best
, cores_target
;
184 cores_target
= emu_cores
->per_node_target
+ extra
;
185 dist_best
= DIST_MAX
;
187 toptree_for_each(node
, numa
, NODE
) {
188 /* Already pinned cores must use their nodes */
189 if (core_pinned_to_node_id(core
) == node
->id
) {
193 /* Skip nodes that already have enough cores */
194 if (cores_pinned(node
) >= cores_target
)
196 dist_cur
= dist_node_to_core(node
, core
);
197 if (dist_cur
< dist_best
) {
198 dist_best
= dist_cur
;
206 * Find the best node for each core with respect to "extra" core count
208 static void toptree_to_numa_single(struct toptree
*numa
, struct toptree
*phys
,
211 struct toptree
*node
, *core
, *tmp
;
213 toptree_for_each_safe(core
, tmp
, phys
, CORE
) {
214 node
= node_for_core(numa
, core
, extra
);
217 toptree_move(core
, node
);
218 pin_core_to_node(core
->id
, node
->id
);
223 * Move structures of given level to specified NUMA node
225 static void move_level_to_numa_node(struct toptree
*node
, struct toptree
*phys
,
226 enum toptree_level level
, bool perfect
)
228 int cores_free
, cores_target
= emu_cores
->per_node_target
;
229 struct toptree
*cur
, *tmp
;
231 toptree_for_each_safe(cur
, tmp
, phys
, level
) {
232 cores_free
= cores_target
- toptree_count(node
, CORE
);
234 if (cores_free
== toptree_count(cur
, CORE
))
235 toptree_move(cur
, node
);
237 if (cores_free
>= toptree_count(cur
, CORE
))
238 toptree_move(cur
, node
);
244 * Move structures of a given level to NUMA nodes. If "perfect" is specified
245 * move only perfectly fitting structures. Otherwise move also smaller
246 * than needed structures.
248 static void move_level_to_numa(struct toptree
*numa
, struct toptree
*phys
,
249 enum toptree_level level
, bool perfect
)
251 struct toptree
*node
;
253 toptree_for_each(node
, numa
, NODE
)
254 move_level_to_numa_node(node
, phys
, level
, perfect
);
258 * For the first run try to move the big structures
260 static void toptree_to_numa_first(struct toptree
*numa
, struct toptree
*phys
)
262 struct toptree
*core
;
264 /* Always try to move perfectly fitting structures first */
265 move_level_to_numa(numa
, phys
, BOOK
, true);
266 move_level_to_numa(numa
, phys
, BOOK
, false);
267 move_level_to_numa(numa
, phys
, MC
, true);
268 move_level_to_numa(numa
, phys
, MC
, false);
269 /* Now pin all the moved cores */
270 toptree_for_each(core
, numa
, CORE
)
271 pin_core_to_node(core
->id
, core_node(core
)->id
);
275 * Allocate new topology and create required nodes
277 static struct toptree
*toptree_new(int id
, int nodes
)
279 struct toptree
*tree
;
282 tree
= toptree_alloc(TOPOLOGY
, id
);
285 for (nid
= 0; nid
< nodes
; nid
++) {
286 if (!toptree_get_child(tree
, nid
))
291 panic("NUMA emulation could not allocate topology");
295 * Allocate and initialize core to node mapping
297 static void create_core_to_node_map(void)
301 emu_cores
= kzalloc(sizeof(*emu_cores
), GFP_KERNEL
);
302 if (emu_cores
== NULL
)
303 panic("Could not allocate cores to node memory");
304 for (i
= 0; i
< ARRAY_SIZE(emu_cores
->to_node_id
); i
++)
305 emu_cores
->to_node_id
[i
] = NODE_ID_FREE
;
309 * Move cores from physical topology into NUMA target topology
310 * and try to keep as much of the physical topology as possible.
312 static struct toptree
*toptree_to_numa(struct toptree
*phys
)
314 static int first
= 1;
315 struct toptree
*numa
;
318 cores_total
= emu_cores
->total
+ cores_free(phys
);
319 emu_cores
->per_node_target
= cores_total
/ emu_nodes
;
320 numa
= toptree_new(TOPTREE_ID_NUMA
, emu_nodes
);
322 toptree_to_numa_first(numa
, phys
);
325 toptree_to_numa_single(numa
, phys
, 0);
326 toptree_to_numa_single(numa
, phys
, 1);
327 toptree_unify_tree(numa
);
329 WARN_ON(cpumask_weight(&phys
->mask
));
334 * Create a toptree out of the physical topology that we got from the hypervisor
336 static struct toptree
*toptree_from_topology(void)
338 struct toptree
*phys
, *node
, *book
, *mc
, *core
;
339 struct cpu_topology_s390
*top
;
342 phys
= toptree_new(TOPTREE_ID_PHYS
, 1);
344 for_each_online_cpu(cpu
) {
345 top
= &per_cpu(cpu_topology
, cpu
);
346 node
= toptree_get_child(phys
, 0);
347 book
= toptree_get_child(node
, top
->book_id
);
348 mc
= toptree_get_child(book
, top
->socket_id
);
349 core
= toptree_get_child(mc
, top
->core_id
);
350 if (!book
|| !mc
|| !core
)
351 panic("NUMA emulation could not allocate memory");
352 cpumask_set_cpu(cpu
, &core
->mask
);
353 toptree_update_mask(mc
);
359 * Add toptree core to topology and create correct CPU masks
361 static void topology_add_core(struct toptree
*core
)
363 struct cpu_topology_s390
*top
;
366 for_each_cpu(cpu
, &core
->mask
) {
367 top
= &per_cpu(cpu_topology
, cpu
);
368 cpumask_copy(&top
->thread_mask
, &core
->mask
);
369 cpumask_copy(&top
->core_mask
, &core_mc(core
)->mask
);
370 cpumask_copy(&top
->book_mask
, &core_book(core
)->mask
);
371 cpumask_set_cpu(cpu
, node_to_cpumask_map
[core_node(core
)->id
]);
372 top
->node_id
= core_node(core
)->id
;
377 * Apply toptree to topology and create CPU masks
379 static void toptree_to_topology(struct toptree
*numa
)
381 struct toptree
*core
;
384 /* Clear all node masks */
385 for (i
= 0; i
< MAX_NUMNODES
; i
++)
386 cpumask_clear(node_to_cpumask_map
[i
]);
388 /* Rebuild all masks */
389 toptree_for_each(core
, numa
, CORE
)
390 topology_add_core(core
);
394 * Show the node to core mapping
396 static void print_node_to_core_map(void)
400 if (!numa_debug_enabled
)
402 printk(KERN_DEBUG
"NUMA node to core mapping\n");
403 for (nid
= 0; nid
< emu_nodes
; nid
++) {
404 printk(KERN_DEBUG
" node %3d: ", nid
);
405 for (cid
= 0; cid
< ARRAY_SIZE(emu_cores
->to_node_id
); cid
++) {
406 if (emu_cores
->to_node_id
[cid
] == nid
)
407 printk(KERN_CONT
"%d ", cid
);
409 printk(KERN_CONT
"\n");
414 * Transfer physical topology into a NUMA topology and modify CPU masks
415 * according to the NUMA topology.
417 * Must be called with "sched_domains_mutex" lock held.
419 static void emu_update_cpu_topology(void)
421 struct toptree
*phys
, *numa
;
423 if (emu_cores
== NULL
)
424 create_core_to_node_map();
425 phys
= toptree_from_topology();
426 numa
= toptree_to_numa(phys
);
428 toptree_to_topology(numa
);
430 print_node_to_core_map();
434 * If emu_size is not set, use CONFIG_EMU_SIZE. Then round to minimum
435 * alignment (needed for memory hotplug).
437 static unsigned long emu_setup_size_adjust(unsigned long size
)
439 size
= size
? : CONFIG_EMU_SIZE
;
440 size
= roundup(size
, memory_block_size_bytes());
445 * If we have not enough memory for the specified nodes, reduce the node count.
447 static int emu_setup_nodes_adjust(int nodes
)
451 nodes_max
= memblock
.memory
.total_size
/ emu_size
;
452 nodes_max
= max(nodes_max
, 1);
453 if (nodes_max
>= nodes
)
455 pr_warn("Not enough memory for %d nodes, reducing node count\n", nodes
);
462 static void emu_setup(void)
464 emu_size
= emu_setup_size_adjust(emu_size
);
465 emu_nodes
= emu_setup_nodes_adjust(emu_nodes
);
466 pr_info("Creating %d nodes with memory stripe size %ld MB\n",
467 emu_nodes
, emu_size
>> 20);
471 * Return node id for given page number
473 static int emu_pfn_to_nid(unsigned long pfn
)
475 return (pfn
/ (emu_size
>> PAGE_SHIFT
)) % emu_nodes
;
481 static unsigned long emu_align(void)
487 * Return distance between two nodes
489 static int emu_distance(int node1
, int node2
)
491 return (node1
!= node2
) * EMU_NODE_DIST
;
495 * Define callbacks for generic s390 NUMA infrastructure
497 const struct numa_mode numa_mode_emu
= {
500 .update_cpu_topology
= emu_update_cpu_topology
,
501 .__pfn_to_nid
= emu_pfn_to_nid
,
503 .distance
= emu_distance
,
507 * Kernel parameter: emu_nodes=<n>
509 static int __init
early_parse_emu_nodes(char *p
)
513 if (kstrtoint(p
, 0, &count
) != 0 || count
<= 0)
517 emu_nodes
= min(count
, MAX_NUMNODES
);
520 early_param("emu_nodes", early_parse_emu_nodes
);
523 * Kernel parameter: emu_size=[<n>[k|M|G|T]]
525 static int __init
early_parse_emu_size(char *p
)
527 emu_size
= memparse(p
, NULL
);
530 early_param("emu_size", early_parse_emu_size
);