IPVS: netns, svc counters moved in ip_vs_ctl,c
[linux/fpc-iii.git] / arch / x86 / mm / numa_64.c
blob7762a517d69d9233a7a6e419a5eaa9ae6c94ee53
1 /*
2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 */
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/memblock.h>
11 #include <linux/mmzone.h>
12 #include <linux/ctype.h>
13 #include <linux/module.h>
14 #include <linux/nodemask.h>
15 #include <linux/sched.h>
17 #include <asm/e820.h>
18 #include <asm/proto.h>
19 #include <asm/dma.h>
20 #include <asm/numa.h>
21 #include <asm/acpi.h>
22 #include <asm/amd_nb.h>
24 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
25 EXPORT_SYMBOL(node_data);
27 struct memnode memnode;
29 s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
30 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
33 int numa_off __initdata;
34 static unsigned long __initdata nodemap_addr;
35 static unsigned long __initdata nodemap_size;
38 * Map cpu index to node index
40 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
41 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
44 * Given a shift value, try to populate memnodemap[]
45 * Returns :
46 * 1 if OK
47 * 0 if memnodmap[] too small (of shift too small)
48 * -1 if node overlap or lost ram (shift too big)
50 static int __init populate_memnodemap(const struct bootnode *nodes,
51 int numnodes, int shift, int *nodeids)
53 unsigned long addr, end;
54 int i, res = -1;
56 memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
57 for (i = 0; i < numnodes; i++) {
58 addr = nodes[i].start;
59 end = nodes[i].end;
60 if (addr >= end)
61 continue;
62 if ((end >> shift) >= memnodemapsize)
63 return 0;
64 do {
65 if (memnodemap[addr >> shift] != NUMA_NO_NODE)
66 return -1;
68 if (!nodeids)
69 memnodemap[addr >> shift] = i;
70 else
71 memnodemap[addr >> shift] = nodeids[i];
73 addr += (1UL << shift);
74 } while (addr < end);
75 res = 1;
77 return res;
80 static int __init allocate_cachealigned_memnodemap(void)
82 unsigned long addr;
84 memnodemap = memnode.embedded_map;
85 if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
86 return 0;
88 addr = 0x8000;
89 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
90 nodemap_addr = memblock_find_in_range(addr, max_pfn<<PAGE_SHIFT,
91 nodemap_size, L1_CACHE_BYTES);
92 if (nodemap_addr == MEMBLOCK_ERROR) {
93 printk(KERN_ERR
94 "NUMA: Unable to allocate Memory to Node hash map\n");
95 nodemap_addr = nodemap_size = 0;
96 return -1;
98 memnodemap = phys_to_virt(nodemap_addr);
99 memblock_x86_reserve_range(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
101 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
102 nodemap_addr, nodemap_addr + nodemap_size);
103 return 0;
107 * The LSB of all start and end addresses in the node map is the value of the
108 * maximum possible shift.
110 static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
111 int numnodes)
113 int i, nodes_used = 0;
114 unsigned long start, end;
115 unsigned long bitfield = 0, memtop = 0;
117 for (i = 0; i < numnodes; i++) {
118 start = nodes[i].start;
119 end = nodes[i].end;
120 if (start >= end)
121 continue;
122 bitfield |= start;
123 nodes_used++;
124 if (end > memtop)
125 memtop = end;
127 if (nodes_used <= 1)
128 i = 63;
129 else
130 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
131 memnodemapsize = (memtop >> i)+1;
132 return i;
135 int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
136 int *nodeids)
138 int shift;
140 shift = extract_lsb_from_nodes(nodes, numnodes);
141 if (allocate_cachealigned_memnodemap())
142 return -1;
143 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
144 shift);
146 if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) {
147 printk(KERN_INFO "Your memory is not aligned you need to "
148 "rebuild your kernel with a bigger NODEMAPSIZE "
149 "shift=%d\n", shift);
150 return -1;
152 return shift;
155 int __meminit __early_pfn_to_nid(unsigned long pfn)
157 return phys_to_nid(pfn << PAGE_SHIFT);
160 static void * __init early_node_mem(int nodeid, unsigned long start,
161 unsigned long end, unsigned long size,
162 unsigned long align)
164 unsigned long mem;
167 * put it on high as possible
168 * something will go with NODE_DATA
170 if (start < (MAX_DMA_PFN<<PAGE_SHIFT))
171 start = MAX_DMA_PFN<<PAGE_SHIFT;
172 if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) &&
173 end > (MAX_DMA32_PFN<<PAGE_SHIFT))
174 start = MAX_DMA32_PFN<<PAGE_SHIFT;
175 mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align);
176 if (mem != MEMBLOCK_ERROR)
177 return __va(mem);
179 /* extend the search scope */
180 end = max_pfn_mapped << PAGE_SHIFT;
181 start = MAX_DMA_PFN << PAGE_SHIFT;
182 mem = memblock_find_in_range(start, end, size, align);
183 if (mem != MEMBLOCK_ERROR)
184 return __va(mem);
186 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
187 size, nodeid);
189 return NULL;
192 /* Initialize bootmem allocator for a node */
193 void __init
194 setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
196 unsigned long start_pfn, last_pfn, nodedata_phys;
197 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
198 int nid;
200 if (!end)
201 return;
204 * Don't confuse VM with a node that doesn't have the
205 * minimum amount of memory:
207 if (end && (end - start) < NODE_MIN_SIZE)
208 return;
210 start = roundup(start, ZONE_ALIGN);
212 printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", nodeid,
213 start, end);
215 start_pfn = start >> PAGE_SHIFT;
216 last_pfn = end >> PAGE_SHIFT;
218 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size,
219 SMP_CACHE_BYTES);
220 if (node_data[nodeid] == NULL)
221 return;
222 nodedata_phys = __pa(node_data[nodeid]);
223 memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
224 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
225 nodedata_phys + pgdat_size - 1);
226 nid = phys_to_nid(nodedata_phys);
227 if (nid != nodeid)
228 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
230 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
231 NODE_DATA(nodeid)->node_id = nodeid;
232 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
233 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
235 node_set_online(nodeid);
239 * There are unfortunately some poorly designed mainboards around that
240 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
241 * mapping. To avoid this fill in the mapping for all possible CPUs,
242 * as the number of CPUs is not known yet. We round robin the existing
243 * nodes.
245 void __init numa_init_array(void)
247 int rr, i;
249 rr = first_node(node_online_map);
250 for (i = 0; i < nr_cpu_ids; i++) {
251 if (early_cpu_to_node(i) != NUMA_NO_NODE)
252 continue;
253 numa_set_node(i, rr);
254 rr = next_node(rr, node_online_map);
255 if (rr == MAX_NUMNODES)
256 rr = first_node(node_online_map);
260 #ifdef CONFIG_NUMA_EMU
261 /* Numa emulation */
262 static struct bootnode nodes[MAX_NUMNODES] __initdata;
263 static struct bootnode physnodes[MAX_NUMNODES] __initdata;
264 static char *cmdline __initdata;
266 static int __init setup_physnodes(unsigned long start, unsigned long end,
267 int acpi, int amd)
269 int nr_nodes = 0;
270 int ret = 0;
271 int i;
273 #ifdef CONFIG_ACPI_NUMA
274 if (acpi)
275 nr_nodes = acpi_get_nodes(physnodes);
276 #endif
277 #ifdef CONFIG_AMD_NUMA
278 if (amd)
279 nr_nodes = amd_get_nodes(physnodes);
280 #endif
282 * Basic sanity checking on the physical node map: there may be errors
283 * if the SRAT or AMD code incorrectly reported the topology or the mem=
284 * kernel parameter is used.
286 for (i = 0; i < nr_nodes; i++) {
287 if (physnodes[i].start == physnodes[i].end)
288 continue;
289 if (physnodes[i].start > end) {
290 physnodes[i].end = physnodes[i].start;
291 continue;
293 if (physnodes[i].end < start) {
294 physnodes[i].start = physnodes[i].end;
295 continue;
297 if (physnodes[i].start < start)
298 physnodes[i].start = start;
299 if (physnodes[i].end > end)
300 physnodes[i].end = end;
304 * Remove all nodes that have no memory or were truncated because of the
305 * limited address range.
307 for (i = 0; i < nr_nodes; i++) {
308 if (physnodes[i].start == physnodes[i].end)
309 continue;
310 physnodes[ret].start = physnodes[i].start;
311 physnodes[ret].end = physnodes[i].end;
312 ret++;
316 * If no physical topology was detected, a single node is faked to cover
317 * the entire address space.
319 if (!ret) {
320 physnodes[ret].start = start;
321 physnodes[ret].end = end;
322 ret = 1;
324 return ret;
328 * Setups up nid to range from addr to addr + size. If the end
329 * boundary is greater than max_addr, then max_addr is used instead.
330 * The return value is 0 if there is additional memory left for
331 * allocation past addr and -1 otherwise. addr is adjusted to be at
332 * the end of the node.
334 static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr)
336 int ret = 0;
337 nodes[nid].start = *addr;
338 *addr += size;
339 if (*addr >= max_addr) {
340 *addr = max_addr;
341 ret = -1;
343 nodes[nid].end = *addr;
344 node_set(nid, node_possible_map);
345 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
346 nodes[nid].start, nodes[nid].end,
347 (nodes[nid].end - nodes[nid].start) >> 20);
348 return ret;
352 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
353 * to max_addr. The return value is the number of nodes allocated.
355 static int __init split_nodes_interleave(u64 addr, u64 max_addr,
356 int nr_phys_nodes, int nr_nodes)
358 nodemask_t physnode_mask = NODE_MASK_NONE;
359 u64 size;
360 int big;
361 int ret = 0;
362 int i;
364 if (nr_nodes <= 0)
365 return -1;
366 if (nr_nodes > MAX_NUMNODES) {
367 pr_info("numa=fake=%d too large, reducing to %d\n",
368 nr_nodes, MAX_NUMNODES);
369 nr_nodes = MAX_NUMNODES;
372 size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes;
374 * Calculate the number of big nodes that can be allocated as a result
375 * of consolidating the remainder.
377 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) /
378 FAKE_NODE_MIN_SIZE;
380 size &= FAKE_NODE_MIN_HASH_MASK;
381 if (!size) {
382 pr_err("Not enough memory for each node. "
383 "NUMA emulation disabled.\n");
384 return -1;
387 for (i = 0; i < nr_phys_nodes; i++)
388 if (physnodes[i].start != physnodes[i].end)
389 node_set(i, physnode_mask);
392 * Continue to fill physical nodes with fake nodes until there is no
393 * memory left on any of them.
395 while (nodes_weight(physnode_mask)) {
396 for_each_node_mask(i, physnode_mask) {
397 u64 end = physnodes[i].start + size;
398 u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
400 if (ret < big)
401 end += FAKE_NODE_MIN_SIZE;
404 * Continue to add memory to this fake node if its
405 * non-reserved memory is less than the per-node size.
407 while (end - physnodes[i].start -
408 memblock_x86_hole_size(physnodes[i].start, end) < size) {
409 end += FAKE_NODE_MIN_SIZE;
410 if (end > physnodes[i].end) {
411 end = physnodes[i].end;
412 break;
417 * If there won't be at least FAKE_NODE_MIN_SIZE of
418 * non-reserved memory in ZONE_DMA32 for the next node,
419 * this one must extend to the boundary.
421 if (end < dma32_end && dma32_end - end -
422 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
423 end = dma32_end;
426 * If there won't be enough non-reserved memory for the
427 * next node, this one must extend to the end of the
428 * physical node.
430 if (physnodes[i].end - end -
431 memblock_x86_hole_size(end, physnodes[i].end) < size)
432 end = physnodes[i].end;
435 * Avoid allocating more nodes than requested, which can
436 * happen as a result of rounding down each node's size
437 * to FAKE_NODE_MIN_SIZE.
439 if (nodes_weight(physnode_mask) + ret >= nr_nodes)
440 end = physnodes[i].end;
442 if (setup_node_range(ret++, &physnodes[i].start,
443 end - physnodes[i].start,
444 physnodes[i].end) < 0)
445 node_clear(i, physnode_mask);
448 return ret;
452 * Returns the end address of a node so that there is at least `size' amount of
453 * non-reserved memory or `max_addr' is reached.
455 static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
457 u64 end = start + size;
459 while (end - start - memblock_x86_hole_size(start, end) < size) {
460 end += FAKE_NODE_MIN_SIZE;
461 if (end > max_addr) {
462 end = max_addr;
463 break;
466 return end;
470 * Sets up fake nodes of `size' interleaved over physical nodes ranging from
471 * `addr' to `max_addr'. The return value is the number of nodes allocated.
473 static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
475 nodemask_t physnode_mask = NODE_MASK_NONE;
476 u64 min_size;
477 int ret = 0;
478 int i;
480 if (!size)
481 return -1;
483 * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
484 * increased accordingly if the requested size is too small. This
485 * creates a uniform distribution of node sizes across the entire
486 * machine (but not necessarily over physical nodes).
488 min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
489 MAX_NUMNODES;
490 min_size = max(min_size, FAKE_NODE_MIN_SIZE);
491 if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
492 min_size = (min_size + FAKE_NODE_MIN_SIZE) &
493 FAKE_NODE_MIN_HASH_MASK;
494 if (size < min_size) {
495 pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
496 size >> 20, min_size >> 20);
497 size = min_size;
499 size &= FAKE_NODE_MIN_HASH_MASK;
501 for (i = 0; i < MAX_NUMNODES; i++)
502 if (physnodes[i].start != physnodes[i].end)
503 node_set(i, physnode_mask);
505 * Fill physical nodes with fake nodes of size until there is no memory
506 * left on any of them.
508 while (nodes_weight(physnode_mask)) {
509 for_each_node_mask(i, physnode_mask) {
510 u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT;
511 u64 end;
513 end = find_end_of_node(physnodes[i].start,
514 physnodes[i].end, size);
516 * If there won't be at least FAKE_NODE_MIN_SIZE of
517 * non-reserved memory in ZONE_DMA32 for the next node,
518 * this one must extend to the boundary.
520 if (end < dma32_end && dma32_end - end -
521 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
522 end = dma32_end;
525 * If there won't be enough non-reserved memory for the
526 * next node, this one must extend to the end of the
527 * physical node.
529 if (physnodes[i].end - end -
530 memblock_x86_hole_size(end, physnodes[i].end) < size)
531 end = physnodes[i].end;
534 * Setup the fake node that will be allocated as bootmem
535 * later. If setup_node_range() returns non-zero, there
536 * is no more memory available on this physical node.
538 if (setup_node_range(ret++, &physnodes[i].start,
539 end - physnodes[i].start,
540 physnodes[i].end) < 0)
541 node_clear(i, physnode_mask);
544 return ret;
548 * Sets up the system RAM area from start_pfn to last_pfn according to the
549 * numa=fake command-line option.
551 static int __init numa_emulation(unsigned long start_pfn,
552 unsigned long last_pfn, int acpi, int amd)
554 u64 addr = start_pfn << PAGE_SHIFT;
555 u64 max_addr = last_pfn << PAGE_SHIFT;
556 int num_phys_nodes;
557 int num_nodes;
558 int i;
560 num_phys_nodes = setup_physnodes(addr, max_addr, acpi, amd);
562 * If the numa=fake command-line contains a 'M' or 'G', it represents
563 * the fixed node size. Otherwise, if it is just a single number N,
564 * split the system RAM into N fake nodes.
566 if (strchr(cmdline, 'M') || strchr(cmdline, 'G')) {
567 u64 size;
569 size = memparse(cmdline, &cmdline);
570 num_nodes = split_nodes_size_interleave(addr, max_addr, size);
571 } else {
572 unsigned long n;
574 n = simple_strtoul(cmdline, NULL, 0);
575 num_nodes = split_nodes_interleave(addr, max_addr, num_phys_nodes, n);
578 if (num_nodes < 0)
579 return num_nodes;
580 memnode_shift = compute_hash_shift(nodes, num_nodes, NULL);
581 if (memnode_shift < 0) {
582 memnode_shift = 0;
583 printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
584 "disabled.\n");
585 return -1;
589 * We need to vacate all active ranges that may have been registered for
590 * the e820 memory map.
592 remove_all_active_ranges();
593 for_each_node_mask(i, node_possible_map) {
594 memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
595 nodes[i].end >> PAGE_SHIFT);
596 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
598 acpi_fake_nodes(nodes, num_nodes);
599 numa_init_array();
600 return 0;
602 #endif /* CONFIG_NUMA_EMU */
604 void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
605 int acpi, int amd)
607 int i;
609 nodes_clear(node_possible_map);
610 nodes_clear(node_online_map);
612 #ifdef CONFIG_NUMA_EMU
613 if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd))
614 return;
615 nodes_clear(node_possible_map);
616 nodes_clear(node_online_map);
617 #endif
619 #ifdef CONFIG_ACPI_NUMA
620 if (!numa_off && acpi && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
621 last_pfn << PAGE_SHIFT))
622 return;
623 nodes_clear(node_possible_map);
624 nodes_clear(node_online_map);
625 #endif
627 #ifdef CONFIG_AMD_NUMA
628 if (!numa_off && amd && !amd_scan_nodes())
629 return;
630 nodes_clear(node_possible_map);
631 nodes_clear(node_online_map);
632 #endif
633 printk(KERN_INFO "%s\n",
634 numa_off ? "NUMA turned off" : "No NUMA configuration found");
636 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
637 start_pfn << PAGE_SHIFT,
638 last_pfn << PAGE_SHIFT);
639 /* setup dummy node covering all memory */
640 memnode_shift = 63;
641 memnodemap = memnode.embedded_map;
642 memnodemap[0] = 0;
643 node_set_online(0);
644 node_set(0, node_possible_map);
645 for (i = 0; i < nr_cpu_ids; i++)
646 numa_set_node(i, 0);
647 memblock_x86_register_active_regions(0, start_pfn, last_pfn);
648 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
651 unsigned long __init numa_free_all_bootmem(void)
653 unsigned long pages = 0;
654 int i;
656 for_each_online_node(i)
657 pages += free_all_bootmem_node(NODE_DATA(i));
659 pages += free_all_memory_core_early(MAX_NUMNODES);
661 return pages;
664 static __init int numa_setup(char *opt)
666 if (!opt)
667 return -EINVAL;
668 if (!strncmp(opt, "off", 3))
669 numa_off = 1;
670 #ifdef CONFIG_NUMA_EMU
671 if (!strncmp(opt, "fake=", 5))
672 cmdline = opt + 5;
673 #endif
674 #ifdef CONFIG_ACPI_NUMA
675 if (!strncmp(opt, "noacpi", 6))
676 acpi_numa = -1;
677 #endif
678 return 0;
680 early_param("numa", numa_setup);
682 #ifdef CONFIG_NUMA
684 static __init int find_near_online_node(int node)
686 int n, val;
687 int min_val = INT_MAX;
688 int best_node = -1;
690 for_each_online_node(n) {
691 val = node_distance(node, n);
693 if (val < min_val) {
694 min_val = val;
695 best_node = n;
699 return best_node;
703 * Setup early cpu_to_node.
705 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
706 * and apicid_to_node[] tables have valid entries for a CPU.
707 * This means we skip cpu_to_node[] initialisation for NUMA
708 * emulation and faking node case (when running a kernel compiled
709 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
710 * is already initialized in a round robin manner at numa_init_array,
711 * prior to this call, and this initialization is good enough
712 * for the fake NUMA cases.
714 * Called before the per_cpu areas are setup.
716 void __init init_cpu_to_node(void)
718 int cpu;
719 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
721 BUG_ON(cpu_to_apicid == NULL);
723 for_each_possible_cpu(cpu) {
724 int node;
725 u16 apicid = cpu_to_apicid[cpu];
727 if (apicid == BAD_APICID)
728 continue;
729 node = apicid_to_node[apicid];
730 if (node == NUMA_NO_NODE)
731 continue;
732 if (!node_online(node))
733 node = find_near_online_node(node);
734 numa_set_node(cpu, node);
737 #endif
740 void __cpuinit numa_set_node(int cpu, int node)
742 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
744 /* early setting, no percpu area yet */
745 if (cpu_to_node_map) {
746 cpu_to_node_map[cpu] = node;
747 return;
750 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
751 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
752 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
753 dump_stack();
754 return;
756 #endif
757 per_cpu(x86_cpu_to_node_map, cpu) = node;
759 if (node != NUMA_NO_NODE)
760 set_cpu_numa_node(cpu, node);
763 void __cpuinit numa_clear_node(int cpu)
765 numa_set_node(cpu, NUMA_NO_NODE);
768 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
770 void __cpuinit numa_add_cpu(int cpu)
772 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
775 void __cpuinit numa_remove_cpu(int cpu)
777 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
780 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
783 * --------- debug versions of the numa functions ---------
785 static void __cpuinit numa_set_cpumask(int cpu, int enable)
787 int node = early_cpu_to_node(cpu);
788 struct cpumask *mask;
789 char buf[64];
791 mask = node_to_cpumask_map[node];
792 if (mask == NULL) {
793 printk(KERN_ERR "node_to_cpumask_map[%i] NULL\n", node);
794 dump_stack();
795 return;
798 if (enable)
799 cpumask_set_cpu(cpu, mask);
800 else
801 cpumask_clear_cpu(cpu, mask);
803 cpulist_scnprintf(buf, sizeof(buf), mask);
804 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
805 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
808 void __cpuinit numa_add_cpu(int cpu)
810 numa_set_cpumask(cpu, 1);
813 void __cpuinit numa_remove_cpu(int cpu)
815 numa_set_cpumask(cpu, 0);
818 int __cpu_to_node(int cpu)
820 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
821 printk(KERN_WARNING
822 "cpu_to_node(%d): usage too early!\n", cpu);
823 dump_stack();
824 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
826 return per_cpu(x86_cpu_to_node_map, cpu);
828 EXPORT_SYMBOL(__cpu_to_node);
831 * Same function as cpu_to_node() but used if called before the
832 * per_cpu areas are setup.
834 int early_cpu_to_node(int cpu)
836 if (early_per_cpu_ptr(x86_cpu_to_node_map))
837 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
839 if (!cpu_possible(cpu)) {
840 printk(KERN_WARNING
841 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
842 dump_stack();
843 return NUMA_NO_NODE;
845 return per_cpu(x86_cpu_to_node_map, cpu);
849 * --------- end of debug versions of the numa functions ---------
852 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */