1 // SPDX-License-Identifier: GPL-2.0
3 * NUMA support for s390
5 * Implement NUMA core code.
7 * Copyright IBM Corp. 2015
10 #define KMSG_COMPONENT "numa"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/mmzone.h>
15 #include <linux/cpumask.h>
16 #include <linux/bootmem.h>
17 #include <linux/memblock.h>
18 #include <linux/slab.h>
19 #include <linux/node.h>
22 #include "numa_mode.h"
24 pg_data_t
*node_data
[MAX_NUMNODES
];
25 EXPORT_SYMBOL(node_data
);
27 cpumask_t node_to_cpumask_map
[MAX_NUMNODES
];
28 EXPORT_SYMBOL(node_to_cpumask_map
);
30 static void plain_setup(void)
32 node_set(0, node_possible_map
);
35 const struct numa_mode numa_mode_plain
= {
40 static const struct numa_mode
*mode
= &numa_mode_plain
;
42 int numa_pfn_to_nid(unsigned long pfn
)
44 return mode
->__pfn_to_nid
? mode
->__pfn_to_nid(pfn
) : 0;
47 void numa_update_cpu_topology(void)
49 if (mode
->update_cpu_topology
)
50 mode
->update_cpu_topology();
53 int __node_distance(int a
, int b
)
55 return mode
->distance
? mode
->distance(a
, b
) : 0;
58 int numa_debug_enabled
;
61 * alloc_node_data() - Allocate node data
63 static __init pg_data_t
*alloc_node_data(void)
67 res
= (pg_data_t
*) memblock_alloc(sizeof(pg_data_t
), 8);
68 memset(res
, 0, sizeof(pg_data_t
));
73 * numa_setup_memory() - Assign bootmem to nodes
75 * The memory is first added to memblock without any respect to nodes.
76 * This is fixed before remaining memblock memory is handed over to the
78 * An important side effect is that large bootmem allocations might easily
79 * cross node boundaries, which can be needed for large allocations with
80 * smaller memory stripes in each node (i.e. when using NUMA emulation).
82 * Memory defines nodes:
83 * Therefore this routine also sets the nodes online with memory.
85 static void __init
numa_setup_memory(void)
87 unsigned long cur_base
, align
, end_of_dram
;
90 end_of_dram
= memblock_end_of_DRAM();
91 align
= mode
->align
? mode
->align() : ULONG_MAX
;
94 * Step through all available memory and assign it to the nodes
95 * indicated by the mode implementation.
96 * All nodes which are seen here will be set online.
100 nid
= numa_pfn_to_nid(PFN_DOWN(cur_base
));
101 node_set_online(nid
);
102 memblock_set_node(cur_base
, align
, &memblock
.memory
, nid
);
104 } while (cur_base
< end_of_dram
);
106 /* Allocate and fill out node_data */
107 for (nid
= 0; nid
< MAX_NUMNODES
; nid
++)
108 NODE_DATA(nid
) = alloc_node_data();
110 for_each_online_node(nid
) {
111 unsigned long start_pfn
, end_pfn
;
112 unsigned long t_start
, t_end
;
115 start_pfn
= ULONG_MAX
;
117 for_each_mem_pfn_range(i
, nid
, &t_start
, &t_end
, NULL
) {
118 if (t_start
< start_pfn
)
123 NODE_DATA(nid
)->node_spanned_pages
= end_pfn
- start_pfn
;
124 NODE_DATA(nid
)->node_id
= nid
;
129 * numa_setup() - Earliest initialization
131 * Assign the mode and call the mode's setup routine.
133 void __init
numa_setup(void)
135 pr_info("NUMA mode: %s\n", mode
->name
);
136 nodes_clear(node_possible_map
);
144 * numa_init_early() - Initialization initcall
146 * This runs when only one CPU is online and before the first
147 * topology update is called for by the scheduler.
149 static int __init
numa_init_early(void)
151 /* Attach all possible CPUs to node 0 for now. */
152 cpumask_copy(&node_to_cpumask_map
[0], cpu_possible_mask
);
155 early_initcall(numa_init_early
);
158 * numa_init_late() - Initialization initcall
160 * Register NUMA nodes.
162 static int __init
numa_init_late(void)
166 for_each_online_node(nid
)
167 register_one_node(nid
);
170 arch_initcall(numa_init_late
);
172 static int __init
parse_debug(char *parm
)
174 numa_debug_enabled
= 1;
177 early_param("numa_debug", parse_debug
);
179 static int __init
parse_numa(char *parm
)
181 if (strcmp(parm
, numa_mode_plain
.name
) == 0)
182 mode
= &numa_mode_plain
;
183 #ifdef CONFIG_NUMA_EMU
184 if (strcmp(parm
, numa_mode_emu
.name
) == 0)
185 mode
= &numa_mode_emu
;
189 early_param("numa", parse_numa
);