2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/memblock.h>
11 #include <linux/mmzone.h>
12 #include <linux/ctype.h>
13 #include <linux/module.h>
14 #include <linux/nodemask.h>
15 #include <linux/sched.h>
18 #include <asm/proto.h>
22 #include <asm/amd_nb.h>
24 struct pglist_data
*node_data
[MAX_NUMNODES
] __read_mostly
;
25 EXPORT_SYMBOL(node_data
);
27 struct memnode memnode
;
29 s16 apicid_to_node
[MAX_LOCAL_APIC
] __cpuinitdata
= {
30 [0 ... MAX_LOCAL_APIC
-1] = NUMA_NO_NODE
33 int numa_off __initdata
;
34 static unsigned long __initdata nodemap_addr
;
35 static unsigned long __initdata nodemap_size
;
38 * Map cpu index to node index
40 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map
, NUMA_NO_NODE
);
41 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map
);
44 * Given a shift value, try to populate memnodemap[]
47 * 0 if memnodmap[] too small (of shift too small)
48 * -1 if node overlap or lost ram (shift too big)
50 static int __init
populate_memnodemap(const struct bootnode
*nodes
,
51 int numnodes
, int shift
, int *nodeids
)
53 unsigned long addr
, end
;
56 memset(memnodemap
, 0xff, sizeof(s16
)*memnodemapsize
);
57 for (i
= 0; i
< numnodes
; i
++) {
58 addr
= nodes
[i
].start
;
62 if ((end
>> shift
) >= memnodemapsize
)
65 if (memnodemap
[addr
>> shift
] != NUMA_NO_NODE
)
69 memnodemap
[addr
>> shift
] = i
;
71 memnodemap
[addr
>> shift
] = nodeids
[i
];
73 addr
+= (1UL << shift
);
80 static int __init
allocate_cachealigned_memnodemap(void)
84 memnodemap
= memnode
.embedded_map
;
85 if (memnodemapsize
<= ARRAY_SIZE(memnode
.embedded_map
))
89 nodemap_size
= roundup(sizeof(s16
) * memnodemapsize
, L1_CACHE_BYTES
);
90 nodemap_addr
= memblock_find_in_range(addr
, max_pfn
<<PAGE_SHIFT
,
91 nodemap_size
, L1_CACHE_BYTES
);
92 if (nodemap_addr
== MEMBLOCK_ERROR
) {
94 "NUMA: Unable to allocate Memory to Node hash map\n");
95 nodemap_addr
= nodemap_size
= 0;
98 memnodemap
= phys_to_virt(nodemap_addr
);
99 memblock_x86_reserve_range(nodemap_addr
, nodemap_addr
+ nodemap_size
, "MEMNODEMAP");
101 printk(KERN_DEBUG
"NUMA: Allocated memnodemap from %lx - %lx\n",
102 nodemap_addr
, nodemap_addr
+ nodemap_size
);
107 * The LSB of all start and end addresses in the node map is the value of the
108 * maximum possible shift.
110 static int __init
extract_lsb_from_nodes(const struct bootnode
*nodes
,
113 int i
, nodes_used
= 0;
114 unsigned long start
, end
;
115 unsigned long bitfield
= 0, memtop
= 0;
117 for (i
= 0; i
< numnodes
; i
++) {
118 start
= nodes
[i
].start
;
130 i
= find_first_bit(&bitfield
, sizeof(unsigned long)*8);
131 memnodemapsize
= (memtop
>> i
)+1;
135 int __init
compute_hash_shift(struct bootnode
*nodes
, int numnodes
,
140 shift
= extract_lsb_from_nodes(nodes
, numnodes
);
141 if (allocate_cachealigned_memnodemap())
143 printk(KERN_DEBUG
"NUMA: Using %d for the hash shift.\n",
146 if (populate_memnodemap(nodes
, numnodes
, shift
, nodeids
) != 1) {
147 printk(KERN_INFO
"Your memory is not aligned you need to "
148 "rebuild your kernel with a bigger NODEMAPSIZE "
149 "shift=%d\n", shift
);
155 int __meminit
__early_pfn_to_nid(unsigned long pfn
)
157 return phys_to_nid(pfn
<< PAGE_SHIFT
);
160 static void * __init
early_node_mem(int nodeid
, unsigned long start
,
161 unsigned long end
, unsigned long size
,
167 * put it on high as possible
168 * something will go with NODE_DATA
170 if (start
< (MAX_DMA_PFN
<<PAGE_SHIFT
))
171 start
= MAX_DMA_PFN
<<PAGE_SHIFT
;
172 if (start
< (MAX_DMA32_PFN
<<PAGE_SHIFT
) &&
173 end
> (MAX_DMA32_PFN
<<PAGE_SHIFT
))
174 start
= MAX_DMA32_PFN
<<PAGE_SHIFT
;
175 mem
= memblock_x86_find_in_range_node(nodeid
, start
, end
, size
, align
);
176 if (mem
!= MEMBLOCK_ERROR
)
179 /* extend the search scope */
180 end
= max_pfn_mapped
<< PAGE_SHIFT
;
181 start
= MAX_DMA_PFN
<< PAGE_SHIFT
;
182 mem
= memblock_find_in_range(start
, end
, size
, align
);
183 if (mem
!= MEMBLOCK_ERROR
)
186 printk(KERN_ERR
"Cannot find %lu bytes in node %d\n",
192 /* Initialize bootmem allocator for a node */
194 setup_node_bootmem(int nodeid
, unsigned long start
, unsigned long end
)
196 unsigned long start_pfn
, last_pfn
, nodedata_phys
;
197 const int pgdat_size
= roundup(sizeof(pg_data_t
), PAGE_SIZE
);
204 * Don't confuse VM with a node that doesn't have the
205 * minimum amount of memory:
207 if (end
&& (end
- start
) < NODE_MIN_SIZE
)
210 start
= roundup(start
, ZONE_ALIGN
);
212 printk(KERN_INFO
"Initmem setup node %d %016lx-%016lx\n", nodeid
,
215 start_pfn
= start
>> PAGE_SHIFT
;
216 last_pfn
= end
>> PAGE_SHIFT
;
218 node_data
[nodeid
] = early_node_mem(nodeid
, start
, end
, pgdat_size
,
220 if (node_data
[nodeid
] == NULL
)
222 nodedata_phys
= __pa(node_data
[nodeid
]);
223 memblock_x86_reserve_range(nodedata_phys
, nodedata_phys
+ pgdat_size
, "NODE_DATA");
224 printk(KERN_INFO
" NODE_DATA [%016lx - %016lx]\n", nodedata_phys
,
225 nodedata_phys
+ pgdat_size
- 1);
226 nid
= phys_to_nid(nodedata_phys
);
228 printk(KERN_INFO
" NODE_DATA(%d) on node %d\n", nodeid
, nid
);
230 memset(NODE_DATA(nodeid
), 0, sizeof(pg_data_t
));
231 NODE_DATA(nodeid
)->node_id
= nodeid
;
232 NODE_DATA(nodeid
)->node_start_pfn
= start_pfn
;
233 NODE_DATA(nodeid
)->node_spanned_pages
= last_pfn
- start_pfn
;
235 node_set_online(nodeid
);
239 * There are unfortunately some poorly designed mainboards around that
240 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
241 * mapping. To avoid this fill in the mapping for all possible CPUs,
242 * as the number of CPUs is not known yet. We round robin the existing
245 void __init
numa_init_array(void)
249 rr
= first_node(node_online_map
);
250 for (i
= 0; i
< nr_cpu_ids
; i
++) {
251 if (early_cpu_to_node(i
) != NUMA_NO_NODE
)
253 numa_set_node(i
, rr
);
254 rr
= next_node(rr
, node_online_map
);
255 if (rr
== MAX_NUMNODES
)
256 rr
= first_node(node_online_map
);
260 #ifdef CONFIG_NUMA_EMU
262 static struct bootnode nodes
[MAX_NUMNODES
] __initdata
;
263 static struct bootnode physnodes
[MAX_NUMNODES
] __initdata
;
264 static char *cmdline __initdata
;
266 static int __init
setup_physnodes(unsigned long start
, unsigned long end
,
273 #ifdef CONFIG_ACPI_NUMA
275 nr_nodes
= acpi_get_nodes(physnodes
);
277 #ifdef CONFIG_AMD_NUMA
279 nr_nodes
= amd_get_nodes(physnodes
);
282 * Basic sanity checking on the physical node map: there may be errors
283 * if the SRAT or AMD code incorrectly reported the topology or the mem=
284 * kernel parameter is used.
286 for (i
= 0; i
< nr_nodes
; i
++) {
287 if (physnodes
[i
].start
== physnodes
[i
].end
)
289 if (physnodes
[i
].start
> end
) {
290 physnodes
[i
].end
= physnodes
[i
].start
;
293 if (physnodes
[i
].end
< start
) {
294 physnodes
[i
].start
= physnodes
[i
].end
;
297 if (physnodes
[i
].start
< start
)
298 physnodes
[i
].start
= start
;
299 if (physnodes
[i
].end
> end
)
300 physnodes
[i
].end
= end
;
304 * Remove all nodes that have no memory or were truncated because of the
305 * limited address range.
307 for (i
= 0; i
< nr_nodes
; i
++) {
308 if (physnodes
[i
].start
== physnodes
[i
].end
)
310 physnodes
[ret
].start
= physnodes
[i
].start
;
311 physnodes
[ret
].end
= physnodes
[i
].end
;
316 * If no physical topology was detected, a single node is faked to cover
317 * the entire address space.
320 physnodes
[ret
].start
= start
;
321 physnodes
[ret
].end
= end
;
328 * Setups up nid to range from addr to addr + size. If the end
329 * boundary is greater than max_addr, then max_addr is used instead.
330 * The return value is 0 if there is additional memory left for
331 * allocation past addr and -1 otherwise. addr is adjusted to be at
332 * the end of the node.
334 static int __init
setup_node_range(int nid
, u64
*addr
, u64 size
, u64 max_addr
)
337 nodes
[nid
].start
= *addr
;
339 if (*addr
>= max_addr
) {
343 nodes
[nid
].end
= *addr
;
344 node_set(nid
, node_possible_map
);
345 printk(KERN_INFO
"Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid
,
346 nodes
[nid
].start
, nodes
[nid
].end
,
347 (nodes
[nid
].end
- nodes
[nid
].start
) >> 20);
352 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
353 * to max_addr. The return value is the number of nodes allocated.
355 static int __init
split_nodes_interleave(u64 addr
, u64 max_addr
,
356 int nr_phys_nodes
, int nr_nodes
)
358 nodemask_t physnode_mask
= NODE_MASK_NONE
;
366 if (nr_nodes
> MAX_NUMNODES
) {
367 pr_info("numa=fake=%d too large, reducing to %d\n",
368 nr_nodes
, MAX_NUMNODES
);
369 nr_nodes
= MAX_NUMNODES
;
372 size
= (max_addr
- addr
- memblock_x86_hole_size(addr
, max_addr
)) / nr_nodes
;
374 * Calculate the number of big nodes that can be allocated as a result
375 * of consolidating the remainder.
377 big
= ((size
& ~FAKE_NODE_MIN_HASH_MASK
) * nr_nodes
) /
380 size
&= FAKE_NODE_MIN_HASH_MASK
;
382 pr_err("Not enough memory for each node. "
383 "NUMA emulation disabled.\n");
387 for (i
= 0; i
< nr_phys_nodes
; i
++)
388 if (physnodes
[i
].start
!= physnodes
[i
].end
)
389 node_set(i
, physnode_mask
);
392 * Continue to fill physical nodes with fake nodes until there is no
393 * memory left on any of them.
395 while (nodes_weight(physnode_mask
)) {
396 for_each_node_mask(i
, physnode_mask
) {
397 u64 end
= physnodes
[i
].start
+ size
;
398 u64 dma32_end
= PFN_PHYS(MAX_DMA32_PFN
);
401 end
+= FAKE_NODE_MIN_SIZE
;
404 * Continue to add memory to this fake node if its
405 * non-reserved memory is less than the per-node size.
407 while (end
- physnodes
[i
].start
-
408 memblock_x86_hole_size(physnodes
[i
].start
, end
) < size
) {
409 end
+= FAKE_NODE_MIN_SIZE
;
410 if (end
> physnodes
[i
].end
) {
411 end
= physnodes
[i
].end
;
417 * If there won't be at least FAKE_NODE_MIN_SIZE of
418 * non-reserved memory in ZONE_DMA32 for the next node,
419 * this one must extend to the boundary.
421 if (end
< dma32_end
&& dma32_end
- end
-
422 memblock_x86_hole_size(end
, dma32_end
) < FAKE_NODE_MIN_SIZE
)
426 * If there won't be enough non-reserved memory for the
427 * next node, this one must extend to the end of the
430 if (physnodes
[i
].end
- end
-
431 memblock_x86_hole_size(end
, physnodes
[i
].end
) < size
)
432 end
= physnodes
[i
].end
;
435 * Avoid allocating more nodes than requested, which can
436 * happen as a result of rounding down each node's size
437 * to FAKE_NODE_MIN_SIZE.
439 if (nodes_weight(physnode_mask
) + ret
>= nr_nodes
)
440 end
= physnodes
[i
].end
;
442 if (setup_node_range(ret
++, &physnodes
[i
].start
,
443 end
- physnodes
[i
].start
,
444 physnodes
[i
].end
) < 0)
445 node_clear(i
, physnode_mask
);
452 * Returns the end address of a node so that there is at least `size' amount of
453 * non-reserved memory or `max_addr' is reached.
455 static u64 __init
find_end_of_node(u64 start
, u64 max_addr
, u64 size
)
457 u64 end
= start
+ size
;
459 while (end
- start
- memblock_x86_hole_size(start
, end
) < size
) {
460 end
+= FAKE_NODE_MIN_SIZE
;
461 if (end
> max_addr
) {
470 * Sets up fake nodes of `size' interleaved over physical nodes ranging from
471 * `addr' to `max_addr'. The return value is the number of nodes allocated.
473 static int __init
split_nodes_size_interleave(u64 addr
, u64 max_addr
, u64 size
)
475 nodemask_t physnode_mask
= NODE_MASK_NONE
;
483 * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
484 * increased accordingly if the requested size is too small. This
485 * creates a uniform distribution of node sizes across the entire
486 * machine (but not necessarily over physical nodes).
488 min_size
= (max_addr
- addr
- memblock_x86_hole_size(addr
, max_addr
)) /
490 min_size
= max(min_size
, FAKE_NODE_MIN_SIZE
);
491 if ((min_size
& FAKE_NODE_MIN_HASH_MASK
) < min_size
)
492 min_size
= (min_size
+ FAKE_NODE_MIN_SIZE
) &
493 FAKE_NODE_MIN_HASH_MASK
;
494 if (size
< min_size
) {
495 pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
496 size
>> 20, min_size
>> 20);
499 size
&= FAKE_NODE_MIN_HASH_MASK
;
501 for (i
= 0; i
< MAX_NUMNODES
; i
++)
502 if (physnodes
[i
].start
!= physnodes
[i
].end
)
503 node_set(i
, physnode_mask
);
505 * Fill physical nodes with fake nodes of size until there is no memory
506 * left on any of them.
508 while (nodes_weight(physnode_mask
)) {
509 for_each_node_mask(i
, physnode_mask
) {
510 u64 dma32_end
= MAX_DMA32_PFN
<< PAGE_SHIFT
;
513 end
= find_end_of_node(physnodes
[i
].start
,
514 physnodes
[i
].end
, size
);
516 * If there won't be at least FAKE_NODE_MIN_SIZE of
517 * non-reserved memory in ZONE_DMA32 for the next node,
518 * this one must extend to the boundary.
520 if (end
< dma32_end
&& dma32_end
- end
-
521 memblock_x86_hole_size(end
, dma32_end
) < FAKE_NODE_MIN_SIZE
)
525 * If there won't be enough non-reserved memory for the
526 * next node, this one must extend to the end of the
529 if (physnodes
[i
].end
- end
-
530 memblock_x86_hole_size(end
, physnodes
[i
].end
) < size
)
531 end
= physnodes
[i
].end
;
534 * Setup the fake node that will be allocated as bootmem
535 * later. If setup_node_range() returns non-zero, there
536 * is no more memory available on this physical node.
538 if (setup_node_range(ret
++, &physnodes
[i
].start
,
539 end
- physnodes
[i
].start
,
540 physnodes
[i
].end
) < 0)
541 node_clear(i
, physnode_mask
);
548 * Sets up the system RAM area from start_pfn to last_pfn according to the
549 * numa=fake command-line option.
551 static int __init
numa_emulation(unsigned long start_pfn
,
552 unsigned long last_pfn
, int acpi
, int amd
)
554 u64 addr
= start_pfn
<< PAGE_SHIFT
;
555 u64 max_addr
= last_pfn
<< PAGE_SHIFT
;
560 num_phys_nodes
= setup_physnodes(addr
, max_addr
, acpi
, amd
);
562 * If the numa=fake command-line contains a 'M' or 'G', it represents
563 * the fixed node size. Otherwise, if it is just a single number N,
564 * split the system RAM into N fake nodes.
566 if (strchr(cmdline
, 'M') || strchr(cmdline
, 'G')) {
569 size
= memparse(cmdline
, &cmdline
);
570 num_nodes
= split_nodes_size_interleave(addr
, max_addr
, size
);
574 n
= simple_strtoul(cmdline
, NULL
, 0);
575 num_nodes
= split_nodes_interleave(addr
, max_addr
, num_phys_nodes
, n
);
580 memnode_shift
= compute_hash_shift(nodes
, num_nodes
, NULL
);
581 if (memnode_shift
< 0) {
583 printk(KERN_ERR
"No NUMA hash function found. NUMA emulation "
589 * We need to vacate all active ranges that may have been registered for
590 * the e820 memory map.
592 remove_all_active_ranges();
593 for_each_node_mask(i
, node_possible_map
) {
594 memblock_x86_register_active_regions(i
, nodes
[i
].start
>> PAGE_SHIFT
,
595 nodes
[i
].end
>> PAGE_SHIFT
);
596 setup_node_bootmem(i
, nodes
[i
].start
, nodes
[i
].end
);
598 acpi_fake_nodes(nodes
, num_nodes
);
602 #endif /* CONFIG_NUMA_EMU */
604 void __init
initmem_init(unsigned long start_pfn
, unsigned long last_pfn
,
609 nodes_clear(node_possible_map
);
610 nodes_clear(node_online_map
);
612 #ifdef CONFIG_NUMA_EMU
613 if (cmdline
&& !numa_emulation(start_pfn
, last_pfn
, acpi
, amd
))
615 nodes_clear(node_possible_map
);
616 nodes_clear(node_online_map
);
619 #ifdef CONFIG_ACPI_NUMA
620 if (!numa_off
&& acpi
&& !acpi_scan_nodes(start_pfn
<< PAGE_SHIFT
,
621 last_pfn
<< PAGE_SHIFT
))
623 nodes_clear(node_possible_map
);
624 nodes_clear(node_online_map
);
627 #ifdef CONFIG_AMD_NUMA
628 if (!numa_off
&& amd
&& !amd_scan_nodes())
630 nodes_clear(node_possible_map
);
631 nodes_clear(node_online_map
);
633 printk(KERN_INFO
"%s\n",
634 numa_off
? "NUMA turned off" : "No NUMA configuration found");
636 printk(KERN_INFO
"Faking a node at %016lx-%016lx\n",
637 start_pfn
<< PAGE_SHIFT
,
638 last_pfn
<< PAGE_SHIFT
);
639 /* setup dummy node covering all memory */
641 memnodemap
= memnode
.embedded_map
;
644 node_set(0, node_possible_map
);
645 for (i
= 0; i
< nr_cpu_ids
; i
++)
647 memblock_x86_register_active_regions(0, start_pfn
, last_pfn
);
648 setup_node_bootmem(0, start_pfn
<< PAGE_SHIFT
, last_pfn
<< PAGE_SHIFT
);
651 unsigned long __init
numa_free_all_bootmem(void)
653 unsigned long pages
= 0;
656 for_each_online_node(i
)
657 pages
+= free_all_bootmem_node(NODE_DATA(i
));
659 pages
+= free_all_memory_core_early(MAX_NUMNODES
);
664 static __init
int numa_setup(char *opt
)
668 if (!strncmp(opt
, "off", 3))
670 #ifdef CONFIG_NUMA_EMU
671 if (!strncmp(opt
, "fake=", 5))
674 #ifdef CONFIG_ACPI_NUMA
675 if (!strncmp(opt
, "noacpi", 6))
680 early_param("numa", numa_setup
);
684 static __init
int find_near_online_node(int node
)
687 int min_val
= INT_MAX
;
690 for_each_online_node(n
) {
691 val
= node_distance(node
, n
);
703 * Setup early cpu_to_node.
705 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
706 * and apicid_to_node[] tables have valid entries for a CPU.
707 * This means we skip cpu_to_node[] initialisation for NUMA
708 * emulation and faking node case (when running a kernel compiled
709 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
710 * is already initialized in a round robin manner at numa_init_array,
711 * prior to this call, and this initialization is good enough
712 * for the fake NUMA cases.
714 * Called before the per_cpu areas are setup.
716 void __init
init_cpu_to_node(void)
719 u16
*cpu_to_apicid
= early_per_cpu_ptr(x86_cpu_to_apicid
);
721 BUG_ON(cpu_to_apicid
== NULL
);
723 for_each_possible_cpu(cpu
) {
725 u16 apicid
= cpu_to_apicid
[cpu
];
727 if (apicid
== BAD_APICID
)
729 node
= apicid_to_node
[apicid
];
730 if (node
== NUMA_NO_NODE
)
732 if (!node_online(node
))
733 node
= find_near_online_node(node
);
734 numa_set_node(cpu
, node
);
740 void __cpuinit
numa_set_node(int cpu
, int node
)
742 int *cpu_to_node_map
= early_per_cpu_ptr(x86_cpu_to_node_map
);
744 /* early setting, no percpu area yet */
745 if (cpu_to_node_map
) {
746 cpu_to_node_map
[cpu
] = node
;
750 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
751 if (cpu
>= nr_cpu_ids
|| !cpu_possible(cpu
)) {
752 printk(KERN_ERR
"numa_set_node: invalid cpu# (%d)\n", cpu
);
757 per_cpu(x86_cpu_to_node_map
, cpu
) = node
;
759 if (node
!= NUMA_NO_NODE
)
760 set_cpu_numa_node(cpu
, node
);
763 void __cpuinit
numa_clear_node(int cpu
)
765 numa_set_node(cpu
, NUMA_NO_NODE
);
768 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
770 void __cpuinit
numa_add_cpu(int cpu
)
772 cpumask_set_cpu(cpu
, node_to_cpumask_map
[early_cpu_to_node(cpu
)]);
775 void __cpuinit
numa_remove_cpu(int cpu
)
777 cpumask_clear_cpu(cpu
, node_to_cpumask_map
[early_cpu_to_node(cpu
)]);
780 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
783 * --------- debug versions of the numa functions ---------
785 static void __cpuinit
numa_set_cpumask(int cpu
, int enable
)
787 int node
= early_cpu_to_node(cpu
);
788 struct cpumask
*mask
;
791 mask
= node_to_cpumask_map
[node
];
793 printk(KERN_ERR
"node_to_cpumask_map[%i] NULL\n", node
);
799 cpumask_set_cpu(cpu
, mask
);
801 cpumask_clear_cpu(cpu
, mask
);
803 cpulist_scnprintf(buf
, sizeof(buf
), mask
);
804 printk(KERN_DEBUG
"%s cpu %d node %d: mask now %s\n",
805 enable
? "numa_add_cpu" : "numa_remove_cpu", cpu
, node
, buf
);
808 void __cpuinit
numa_add_cpu(int cpu
)
810 numa_set_cpumask(cpu
, 1);
813 void __cpuinit
numa_remove_cpu(int cpu
)
815 numa_set_cpumask(cpu
, 0);
818 int __cpu_to_node(int cpu
)
820 if (early_per_cpu_ptr(x86_cpu_to_node_map
)) {
822 "cpu_to_node(%d): usage too early!\n", cpu
);
824 return early_per_cpu_ptr(x86_cpu_to_node_map
)[cpu
];
826 return per_cpu(x86_cpu_to_node_map
, cpu
);
828 EXPORT_SYMBOL(__cpu_to_node
);
831 * Same function as cpu_to_node() but used if called before the
832 * per_cpu areas are setup.
834 int early_cpu_to_node(int cpu
)
836 if (early_per_cpu_ptr(x86_cpu_to_node_map
))
837 return early_per_cpu_ptr(x86_cpu_to_node_map
)[cpu
];
839 if (!cpu_possible(cpu
)) {
841 "early_cpu_to_node(%d): no per_cpu area!\n", cpu
);
845 return per_cpu(x86_cpu_to_node_map
, cpu
);
849 * --------- end of debug versions of the numa functions ---------
852 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */