drm/modes: Fix drm_mode_vrefres() docs
[drm/drm-misc.git] / arch / x86 / mm / numa.c
blob64e5cdb2460ac24c37df5af46a4dd3cdc0bf6975
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Common code for 32 and 64-bit NUMA */
3 #include <linux/acpi.h>
4 #include <linux/kernel.h>
5 #include <linux/mm.h>
6 #include <linux/of.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/memblock.h>
10 #include <linux/mmzone.h>
11 #include <linux/ctype.h>
12 #include <linux/nodemask.h>
13 #include <linux/sched.h>
14 #include <linux/topology.h>
15 #include <linux/sort.h>
16 #include <linux/numa_memblks.h>
18 #include <asm/e820/api.h>
19 #include <asm/proto.h>
20 #include <asm/dma.h>
21 #include <asm/amd_nb.h>
23 #include "numa_internal.h"
25 int numa_off;
27 static __init int numa_setup(char *opt)
29 if (!opt)
30 return -EINVAL;
31 if (!strncmp(opt, "off", 3))
32 numa_off = 1;
33 if (!strncmp(opt, "fake=", 5))
34 return numa_emu_cmdline(opt + 5);
35 if (!strncmp(opt, "noacpi", 6))
36 disable_srat();
37 if (!strncmp(opt, "nohmat", 6))
38 disable_hmat();
39 return 0;
41 early_param("numa", numa_setup);
44 * apicid, cpu, node mappings
46 s16 __apicid_to_node[MAX_LOCAL_APIC] = {
47 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
50 int numa_cpu_node(int cpu)
52 u32 apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
54 if (apicid != BAD_APICID)
55 return __apicid_to_node[apicid];
56 return NUMA_NO_NODE;
59 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
60 EXPORT_SYMBOL(node_to_cpumask_map);
63 * Map cpu index to node index
65 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
66 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
68 void numa_set_node(int cpu, int node)
70 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
72 /* early setting, no percpu area yet */
73 if (cpu_to_node_map) {
74 cpu_to_node_map[cpu] = node;
75 return;
78 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
79 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
80 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
81 dump_stack();
82 return;
84 #endif
85 per_cpu(x86_cpu_to_node_map, cpu) = node;
87 set_cpu_numa_node(cpu, node);
90 void numa_clear_node(int cpu)
92 numa_set_node(cpu, NUMA_NO_NODE);
96 * Allocate node_to_cpumask_map based on number of available nodes
97 * Requires node_possible_map to be valid.
99 * Note: cpumask_of_node() is not valid until after this is done.
100 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
102 void __init setup_node_to_cpumask_map(void)
104 unsigned int node;
106 /* setup nr_node_ids if not done yet */
107 if (nr_node_ids == MAX_NUMNODES)
108 setup_nr_node_ids();
110 /* allocate the map */
111 for (node = 0; node < nr_node_ids; node++)
112 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
114 /* cpumask_of_node() will now work */
115 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
118 static int __init numa_register_nodes(void)
120 int nid;
122 if (!memblock_validate_numa_coverage(SZ_1M))
123 return -EINVAL;
125 /* Finally register nodes. */
126 for_each_node_mask(nid, node_possible_map) {
127 unsigned long start_pfn, end_pfn;
130 * Note, get_pfn_range_for_nid() depends on
131 * memblock_set_node() having already happened
133 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
134 if (start_pfn >= end_pfn)
135 continue;
137 alloc_node_data(nid);
138 node_set_online(nid);
141 /* Dump memblock with node info and return. */
142 memblock_dump_all();
143 return 0;
147 * There are unfortunately some poorly designed mainboards around that
148 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
149 * mapping. To avoid this fill in the mapping for all possible CPUs,
150 * as the number of CPUs is not known yet. We round robin the existing
151 * nodes.
153 static void __init numa_init_array(void)
155 int rr, i;
157 rr = first_node(node_online_map);
158 for (i = 0; i < nr_cpu_ids; i++) {
159 if (early_cpu_to_node(i) != NUMA_NO_NODE)
160 continue;
161 numa_set_node(i, rr);
162 rr = next_node_in(rr, node_online_map);
166 static int __init numa_init(int (*init_func)(void))
168 int i;
169 int ret;
171 for (i = 0; i < MAX_LOCAL_APIC; i++)
172 set_apicid_to_node(i, NUMA_NO_NODE);
174 ret = numa_memblks_init(init_func, /* memblock_force_top_down */ true);
175 if (ret < 0)
176 return ret;
178 ret = numa_register_nodes();
179 if (ret < 0)
180 return ret;
182 for (i = 0; i < nr_cpu_ids; i++) {
183 int nid = early_cpu_to_node(i);
185 if (nid == NUMA_NO_NODE)
186 continue;
187 if (!node_online(nid))
188 numa_clear_node(i);
190 numa_init_array();
192 return 0;
196 * dummy_numa_init - Fallback dummy NUMA init
198 * Used if there's no underlying NUMA architecture, NUMA initialization
199 * fails, or NUMA is disabled on the command line.
201 * Must online at least one node and add memory blocks that cover all
202 * allowed memory. This function must not fail.
204 static int __init dummy_numa_init(void)
206 printk(KERN_INFO "%s\n",
207 numa_off ? "NUMA turned off" : "No NUMA configuration found");
208 printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
209 0LLU, PFN_PHYS(max_pfn) - 1);
211 node_set(0, numa_nodes_parsed);
212 numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
214 return 0;
218 * x86_numa_init - Initialize NUMA
220 * Try each configured NUMA initialization method until one succeeds. The
221 * last fallback is dummy single node config encompassing whole memory and
222 * never fails.
224 void __init x86_numa_init(void)
226 if (!numa_off) {
227 #ifdef CONFIG_ACPI_NUMA
228 if (!numa_init(x86_acpi_numa_init))
229 return;
230 #endif
231 #ifdef CONFIG_AMD_NUMA
232 if (!numa_init(amd_numa_init))
233 return;
234 #endif
235 if (acpi_disabled && !numa_init(of_numa_init))
236 return;
239 numa_init(dummy_numa_init);
244 * A node may exist which has one or more Generic Initiators but no CPUs and no
245 * memory.
247 * This function must be called after init_cpu_to_node(), to ensure that any
248 * memoryless CPU nodes have already been brought online, and before the
249 * node_data[nid] is needed for zone list setup in build_all_zonelists().
251 * When this function is called, any nodes containing either memory and/or CPUs
252 * will already be online and there is no need to do anything extra, even if
253 * they also contain one or more Generic Initiators.
255 void __init init_gi_nodes(void)
257 int nid;
260 * Exclude this node from
261 * bringup_nonboot_cpus
262 * cpu_up
263 * __try_online_node
264 * register_one_node
265 * because node_subsys is not initialized yet.
266 * TODO remove dependency on node_online
268 for_each_node_state(nid, N_GENERIC_INITIATOR)
269 if (!node_online(nid))
270 node_set_online(nid);
274 * Setup early cpu_to_node.
276 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
277 * and apicid_to_node[] tables have valid entries for a CPU.
278 * This means we skip cpu_to_node[] initialisation for NUMA
279 * emulation and faking node case (when running a kernel compiled
280 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
281 * is already initialized in a round robin manner at numa_init_array,
282 * prior to this call, and this initialization is good enough
283 * for the fake NUMA cases.
285 * Called before the per_cpu areas are setup.
287 void __init init_cpu_to_node(void)
289 int cpu;
290 u32 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
292 BUG_ON(cpu_to_apicid == NULL);
294 for_each_possible_cpu(cpu) {
295 int node = numa_cpu_node(cpu);
297 if (node == NUMA_NO_NODE)
298 continue;
301 * Exclude this node from
302 * bringup_nonboot_cpus
303 * cpu_up
304 * __try_online_node
305 * register_one_node
306 * because node_subsys is not initialized yet.
307 * TODO remove dependency on node_online
309 if (!node_online(node))
310 node_set_online(node);
312 numa_set_node(cpu, node);
316 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
318 # ifndef CONFIG_NUMA_EMU
319 void numa_add_cpu(unsigned int cpu)
321 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
324 void numa_remove_cpu(unsigned int cpu)
326 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
328 # endif /* !CONFIG_NUMA_EMU */
330 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
332 int __cpu_to_node(int cpu)
334 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
335 printk(KERN_WARNING
336 "cpu_to_node(%d): usage too early!\n", cpu);
337 dump_stack();
338 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
340 return per_cpu(x86_cpu_to_node_map, cpu);
342 EXPORT_SYMBOL(__cpu_to_node);
345 * Same function as cpu_to_node() but used if called before the
346 * per_cpu areas are setup.
348 int early_cpu_to_node(int cpu)
350 if (early_per_cpu_ptr(x86_cpu_to_node_map))
351 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
353 if (!cpu_possible(cpu)) {
354 printk(KERN_WARNING
355 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
356 dump_stack();
357 return NUMA_NO_NODE;
359 return per_cpu(x86_cpu_to_node_map, cpu);
362 void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable)
364 struct cpumask *mask;
366 if (node == NUMA_NO_NODE) {
367 /* early_cpu_to_node() already emits a warning and trace */
368 return;
370 mask = node_to_cpumask_map[node];
371 if (!cpumask_available(mask)) {
372 pr_err("node_to_cpumask_map[%i] NULL\n", node);
373 dump_stack();
374 return;
377 if (enable)
378 cpumask_set_cpu(cpu, mask);
379 else
380 cpumask_clear_cpu(cpu, mask);
382 printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
383 enable ? "numa_add_cpu" : "numa_remove_cpu",
384 cpu, node, cpumask_pr_args(mask));
385 return;
388 # ifndef CONFIG_NUMA_EMU
389 static void numa_set_cpumask(int cpu, bool enable)
391 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
394 void numa_add_cpu(unsigned int cpu)
396 numa_set_cpumask(cpu, true);
399 void numa_remove_cpu(unsigned int cpu)
401 numa_set_cpumask(cpu, false);
403 # endif /* !CONFIG_NUMA_EMU */
406 * Returns a pointer to the bitmask of CPUs on Node 'node'.
408 const struct cpumask *cpumask_of_node(int node)
410 if ((unsigned)node >= nr_node_ids) {
411 printk(KERN_WARNING
412 "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n",
413 node, nr_node_ids);
414 dump_stack();
415 return cpu_none_mask;
417 if (!cpumask_available(node_to_cpumask_map[node])) {
418 printk(KERN_WARNING
419 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
420 node);
421 dump_stack();
422 return cpu_online_mask;
424 return node_to_cpumask_map[node];
426 EXPORT_SYMBOL(cpumask_of_node);
428 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
430 #ifdef CONFIG_NUMA_EMU
431 void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys,
432 unsigned int nr_emu_nids)
434 int i, j;
437 * Transform __apicid_to_node table to use emulated nids by
438 * reverse-mapping phys_nid. The maps should always exist but fall
439 * back to zero just in case.
441 for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) {
442 if (__apicid_to_node[i] == NUMA_NO_NODE)
443 continue;
444 for (j = 0; j < nr_emu_nids; j++)
445 if (__apicid_to_node[i] == emu_nid_to_phys[j])
446 break;
447 __apicid_to_node[i] = j < nr_emu_nids ? j : 0;
451 u64 __init numa_emu_dma_end(void)
453 return PFN_PHYS(MAX_DMA32_PFN);
455 #endif /* CONFIG_NUMA_EMU */