2 * NUMA support for s390
4 * Implement NUMA core code.
6 * Copyright IBM Corp. 2015
9 #define KMSG_COMPONENT "numa"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/kernel.h>
13 #include <linux/mmzone.h>
14 #include <linux/cpumask.h>
15 #include <linux/bootmem.h>
16 #include <linux/memblock.h>
17 #include <linux/slab.h>
18 #include <linux/node.h>
21 #include "numa_mode.h"
23 pg_data_t
*node_data
[MAX_NUMNODES
];
24 EXPORT_SYMBOL(node_data
);
26 cpumask_t node_to_cpumask_map
[MAX_NUMNODES
];
27 EXPORT_SYMBOL(node_to_cpumask_map
);
29 static void plain_setup(void)
31 node_set(0, node_possible_map
);
34 const struct numa_mode numa_mode_plain
= {
39 static const struct numa_mode
*mode
= &numa_mode_plain
;
41 int numa_pfn_to_nid(unsigned long pfn
)
43 return mode
->__pfn_to_nid
? mode
->__pfn_to_nid(pfn
) : 0;
46 void numa_update_cpu_topology(void)
48 if (mode
->update_cpu_topology
)
49 mode
->update_cpu_topology();
52 int __node_distance(int a
, int b
)
54 return mode
->distance
? mode
->distance(a
, b
) : 0;
57 int numa_debug_enabled
;
60 * alloc_node_data() - Allocate node data
62 static __init pg_data_t
*alloc_node_data(void)
66 res
= (pg_data_t
*) memblock_alloc(sizeof(pg_data_t
), 8);
67 memset(res
, 0, sizeof(pg_data_t
));
72 * numa_setup_memory() - Assign bootmem to nodes
74 * The memory is first added to memblock without any respect to nodes.
75 * This is fixed before remaining memblock memory is handed over to the
77 * An important side effect is that large bootmem allocations might easily
78 * cross node boundaries, which can be needed for large allocations with
79 * smaller memory stripes in each node (i.e. when using NUMA emulation).
81 * Memory defines nodes:
82 * Therefore this routine also sets the nodes online with memory.
84 static void __init
numa_setup_memory(void)
86 unsigned long cur_base
, align
, end_of_dram
;
89 end_of_dram
= memblock_end_of_DRAM();
90 align
= mode
->align
? mode
->align() : ULONG_MAX
;
93 * Step through all available memory and assign it to the nodes
94 * indicated by the mode implementation.
95 * All nodes which are seen here will be set online.
99 nid
= numa_pfn_to_nid(PFN_DOWN(cur_base
));
100 node_set_online(nid
);
101 memblock_set_node(cur_base
, align
, &memblock
.memory
, nid
);
103 } while (cur_base
< end_of_dram
);
105 /* Allocate and fill out node_data */
106 for (nid
= 0; nid
< MAX_NUMNODES
; nid
++)
107 NODE_DATA(nid
) = alloc_node_data();
109 for_each_online_node(nid
) {
110 unsigned long start_pfn
, end_pfn
;
111 unsigned long t_start
, t_end
;
114 start_pfn
= ULONG_MAX
;
116 for_each_mem_pfn_range(i
, nid
, &t_start
, &t_end
, NULL
) {
117 if (t_start
< start_pfn
)
122 NODE_DATA(nid
)->node_spanned_pages
= end_pfn
- start_pfn
;
123 NODE_DATA(nid
)->node_id
= nid
;
128 * numa_setup() - Earliest initialization
130 * Assign the mode and call the mode's setup routine.
132 void __init
numa_setup(void)
134 pr_info("NUMA mode: %s\n", mode
->name
);
135 nodes_clear(node_possible_map
);
143 * numa_init_early() - Initialization initcall
145 * This runs when only one CPU is online and before the first
146 * topology update is called for by the scheduler.
148 static int __init
numa_init_early(void)
150 /* Attach all possible CPUs to node 0 for now. */
151 cpumask_copy(&node_to_cpumask_map
[0], cpu_possible_mask
);
154 early_initcall(numa_init_early
);
157 * numa_init_late() - Initialization initcall
159 * Register NUMA nodes.
161 static int __init
numa_init_late(void)
165 for_each_online_node(nid
)
166 register_one_node(nid
);
169 arch_initcall(numa_init_late
);
171 static int __init
parse_debug(char *parm
)
173 numa_debug_enabled
= 1;
176 early_param("numa_debug", parse_debug
);
178 static int __init
parse_numa(char *parm
)
180 if (strcmp(parm
, numa_mode_plain
.name
) == 0)
181 mode
= &numa_mode_plain
;
182 #ifdef CONFIG_NUMA_EMU
183 if (strcmp(parm
, numa_mode_emu
.name
) == 0)
184 mode
= &numa_mode_emu
;
188 early_param("numa", parse_numa
);