2 * ACPI 3.0 based NUMA setup
3 * Copyright 2004 Andi Kleen, SuSE Labs.
5 * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
7 * Called from acpi_numa_init while reading the SRAT and SLIT tables.
8 * Assumes all memory regions belonging to a single proximity domain
9 * are in one chunk. Holes between them will be included in the node.
12 #include <linux/kernel.h>
13 #include <linux/acpi.h>
14 #include <linux/mmzone.h>
15 #include <linux/bitmap.h>
16 #include <linux/module.h>
17 #include <linux/topology.h>
18 #include <linux/bootmem.h>
19 #include <linux/memblock.h>
21 #include <asm/proto.h>
25 #include <asm/uv/uv.h>
27 int acpi_numa __initdata
;
29 static struct acpi_table_slit
*acpi_slit
;
31 static nodemask_t nodes_parsed __initdata
;
32 static nodemask_t cpu_nodes_parsed __initdata
;
33 static struct bootnode nodes
[MAX_NUMNODES
] __initdata
;
34 static struct bootnode nodes_add
[MAX_NUMNODES
];
36 static int num_node_memblks __initdata
;
37 static struct bootnode node_memblk_range
[NR_NODE_MEMBLKS
] __initdata
;
38 static int memblk_nodeid
[NR_NODE_MEMBLKS
] __initdata
;
40 static __init
int setup_node(int pxm
)
42 return acpi_map_pxm_to_node(pxm
);
45 static __init
int conflicting_memblks(unsigned long start
, unsigned long end
)
48 for (i
= 0; i
< num_node_memblks
; i
++) {
49 struct bootnode
*nd
= &node_memblk_range
[i
];
50 if (nd
->start
== nd
->end
)
52 if (nd
->end
> start
&& nd
->start
< end
)
53 return memblk_nodeid
[i
];
54 if (nd
->end
== end
&& nd
->start
== start
)
55 return memblk_nodeid
[i
];
60 static __init
void cutoff_node(int i
, unsigned long start
, unsigned long end
)
62 struct bootnode
*nd
= &nodes
[i
];
64 if (nd
->start
< start
) {
66 if (nd
->end
< nd
->start
)
71 if (nd
->start
> nd
->end
)
76 static __init
void bad_srat(void)
79 printk(KERN_ERR
"SRAT: SRAT not used.\n");
81 for (i
= 0; i
< MAX_LOCAL_APIC
; i
++)
82 apicid_to_node
[i
] = NUMA_NO_NODE
;
83 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
84 nodes
[i
].start
= nodes
[i
].end
= 0;
85 nodes_add
[i
].start
= nodes_add
[i
].end
= 0;
87 remove_all_active_ranges();
90 static __init
inline int srat_disabled(void)
92 return numa_off
|| acpi_numa
< 0;
95 /* Callback for SLIT parsing */
96 void __init
acpi_numa_slit_init(struct acpi_table_slit
*slit
)
101 length
= slit
->header
.length
;
102 phys
= memblock_find_in_range(0, max_pfn_mapped
<<PAGE_SHIFT
, length
,
105 if (phys
== MEMBLOCK_ERROR
)
106 panic(" Can not save slit!\n");
108 acpi_slit
= __va(phys
);
109 memcpy(acpi_slit
, slit
, length
);
110 memblock_x86_reserve_range(phys
, phys
+ length
, "ACPI SLIT");
113 /* Callback for Proximity Domain -> x2APIC mapping */
115 acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity
*pa
)
122 if (pa
->header
.length
< sizeof(struct acpi_srat_x2apic_cpu_affinity
)) {
126 if ((pa
->flags
& ACPI_SRAT_CPU_ENABLED
) == 0)
128 pxm
= pa
->proximity_domain
;
129 node
= setup_node(pxm
);
131 printk(KERN_ERR
"SRAT: Too many proximity domains %x\n", pxm
);
136 apic_id
= pa
->apic_id
;
137 apicid_to_node
[apic_id
] = node
;
138 node_set(node
, cpu_nodes_parsed
);
140 printk(KERN_INFO
"SRAT: PXM %u -> APIC 0x%04x -> Node %u\n",
144 /* Callback for Proximity Domain -> LAPIC mapping */
146 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity
*pa
)
153 if (pa
->header
.length
!= sizeof(struct acpi_srat_cpu_affinity
)) {
157 if ((pa
->flags
& ACPI_SRAT_CPU_ENABLED
) == 0)
159 pxm
= pa
->proximity_domain_lo
;
160 node
= setup_node(pxm
);
162 printk(KERN_ERR
"SRAT: Too many proximity domains %x\n", pxm
);
167 if (get_uv_system_type() >= UV_X2APIC
)
168 apic_id
= (pa
->apic_id
<< 8) | pa
->local_sapic_eid
;
170 apic_id
= pa
->apic_id
;
171 apicid_to_node
[apic_id
] = node
;
172 node_set(node
, cpu_nodes_parsed
);
174 printk(KERN_INFO
"SRAT: PXM %u -> APIC 0x%02x -> Node %u\n",
178 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
179 static inline int save_add_info(void) {return 1;}
181 static inline int save_add_info(void) {return 0;}
185 * This code supports one contiguous hot add area per node
188 update_nodes_add(int node
, unsigned long start
, unsigned long end
)
190 unsigned long s_pfn
= start
>> PAGE_SHIFT
;
191 unsigned long e_pfn
= end
>> PAGE_SHIFT
;
193 struct bootnode
*nd
= &nodes_add
[node
];
195 /* I had some trouble with strange memory hotadd regions breaking
196 the boot. Be very strict here and reject anything unexpected.
197 If you want working memory hotadd write correct SRATs.
199 The node size check is a basic sanity check to guard against
201 if ((signed long)(end
- start
) < NODE_MIN_SIZE
) {
202 printk(KERN_ERR
"SRAT: Hotplug area too small\n");
206 /* This check might be a bit too strict, but I'm keeping it for now. */
207 if (absent_pages_in_range(s_pfn
, e_pfn
) != e_pfn
- s_pfn
) {
209 "SRAT: Hotplug area %lu -> %lu has existing memory\n",
216 if (nd
->start
== nd
->end
) {
221 if (nd
->start
== end
) {
225 if (nd
->end
== start
) {
230 printk(KERN_ERR
"SRAT: Hotplug zone not continuous. Partly ignored\n");
234 node_set(node
, cpu_nodes_parsed
);
235 printk(KERN_INFO
"SRAT: hot plug zone found %Lx - %Lx\n",
240 /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
242 acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity
*ma
)
244 struct bootnode
*nd
, oldnode
;
245 unsigned long start
, end
;
251 if (ma
->header
.length
!= sizeof(struct acpi_srat_mem_affinity
)) {
255 if ((ma
->flags
& ACPI_SRAT_MEM_ENABLED
) == 0)
258 if ((ma
->flags
& ACPI_SRAT_MEM_HOT_PLUGGABLE
) && !save_add_info())
260 start
= ma
->base_address
;
261 end
= start
+ ma
->length
;
262 pxm
= ma
->proximity_domain
;
263 node
= setup_node(pxm
);
265 printk(KERN_ERR
"SRAT: Too many proximity domains.\n");
269 i
= conflicting_memblks(start
, end
);
272 "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n",
273 pxm
, start
, end
, nodes
[i
].start
, nodes
[i
].end
);
276 "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n",
277 pxm
, start
, end
, node_to_pxm(i
),
278 nodes
[i
].start
, nodes
[i
].end
);
284 if (!node_test_and_set(node
, nodes_parsed
)) {
288 if (start
< nd
->start
)
294 printk(KERN_INFO
"SRAT: Node %u PXM %u %lx-%lx\n", node
, pxm
,
297 if (ma
->flags
& ACPI_SRAT_MEM_HOT_PLUGGABLE
) {
298 update_nodes_add(node
, start
, end
);
299 /* restore nodes[node] */
301 if ((nd
->start
| nd
->end
) == 0)
302 node_clear(node
, nodes_parsed
);
305 node_memblk_range
[num_node_memblks
].start
= start
;
306 node_memblk_range
[num_node_memblks
].end
= end
;
307 memblk_nodeid
[num_node_memblks
] = node
;
311 /* Sanity check to catch more bad SRATs (they are amazingly common).
312 Make sure the PXMs cover all memory. */
313 static int __init
nodes_cover_memory(const struct bootnode
*nodes
)
316 unsigned long pxmram
, e820ram
;
319 for_each_node_mask(i
, nodes_parsed
) {
320 unsigned long s
= nodes
[i
].start
>> PAGE_SHIFT
;
321 unsigned long e
= nodes
[i
].end
>> PAGE_SHIFT
;
323 pxmram
-= __absent_pages_in_range(i
, s
, e
);
324 if ((long)pxmram
< 0)
328 e820ram
= max_pfn
- (memblock_x86_hole_size(0, max_pfn
<<PAGE_SHIFT
)>>PAGE_SHIFT
);
329 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
330 if ((long)(e820ram
- pxmram
) >= (1<<(20 - PAGE_SHIFT
))) {
332 "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
333 (pxmram
<< PAGE_SHIFT
) >> 20,
334 (e820ram
<< PAGE_SHIFT
) >> 20);
340 void __init
acpi_numa_arch_fixup(void) {}
342 int __init
acpi_get_nodes(struct bootnode
*physnodes
)
347 for_each_node_mask(i
, nodes_parsed
) {
348 physnodes
[ret
].start
= nodes
[i
].start
;
349 physnodes
[ret
].end
= nodes
[i
].end
;
355 /* Use the information discovered above to actually set up the nodes. */
356 int __init
acpi_scan_nodes(unsigned long start
, unsigned long end
)
363 /* First clean up the node list */
364 for (i
= 0; i
< MAX_NUMNODES
; i
++)
365 cutoff_node(i
, start
, end
);
368 * Join together blocks on the same node, holes between
369 * which don't overlap with memory on other nodes.
371 for (i
= 0; i
< num_node_memblks
; ++i
) {
374 for (j
= i
+ 1; j
< num_node_memblks
; ++j
) {
375 unsigned long start
, end
;
377 if (memblk_nodeid
[i
] != memblk_nodeid
[j
])
379 start
= min(node_memblk_range
[i
].end
,
380 node_memblk_range
[j
].end
);
381 end
= max(node_memblk_range
[i
].start
,
382 node_memblk_range
[j
].start
);
383 for (k
= 0; k
< num_node_memblks
; ++k
) {
384 if (memblk_nodeid
[i
] == memblk_nodeid
[k
])
386 if (start
< node_memblk_range
[k
].end
&&
387 end
> node_memblk_range
[k
].start
)
390 if (k
< num_node_memblks
)
392 start
= min(node_memblk_range
[i
].start
,
393 node_memblk_range
[j
].start
);
394 end
= max(node_memblk_range
[i
].end
,
395 node_memblk_range
[j
].end
);
396 printk(KERN_INFO
"SRAT: Node %d "
397 "[%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
399 node_memblk_range
[i
].start
,
400 node_memblk_range
[i
].end
,
401 node_memblk_range
[j
].start
,
402 node_memblk_range
[j
].end
,
404 node_memblk_range
[i
].start
= start
;
405 node_memblk_range
[i
].end
= end
;
406 k
= --num_node_memblks
- j
;
407 memmove(memblk_nodeid
+ j
, memblk_nodeid
+ j
+1,
408 k
* sizeof(*memblk_nodeid
));
409 memmove(node_memblk_range
+ j
, node_memblk_range
+ j
+1,
410 k
* sizeof(*node_memblk_range
));
415 memnode_shift
= compute_hash_shift(node_memblk_range
, num_node_memblks
,
417 if (memnode_shift
< 0) {
419 "SRAT: No NUMA node hash function found. Contact maintainer\n");
424 for (i
= 0; i
< num_node_memblks
; i
++)
425 memblock_x86_register_active_regions(memblk_nodeid
[i
],
426 node_memblk_range
[i
].start
>> PAGE_SHIFT
,
427 node_memblk_range
[i
].end
>> PAGE_SHIFT
);
429 /* for out of order entries in SRAT */
431 if (!nodes_cover_memory(nodes
)) {
436 /* Account for nodes with cpus and no memory */
437 nodes_or(node_possible_map
, nodes_parsed
, cpu_nodes_parsed
);
439 /* Finally register nodes */
440 for_each_node_mask(i
, node_possible_map
)
441 setup_node_bootmem(i
, nodes
[i
].start
, nodes
[i
].end
);
442 /* Try again in case setup_node_bootmem missed one due
443 to missing bootmem */
444 for_each_node_mask(i
, node_possible_map
)
446 setup_node_bootmem(i
, nodes
[i
].start
, nodes
[i
].end
);
448 for (i
= 0; i
< nr_cpu_ids
; i
++) {
449 int node
= early_cpu_to_node(i
);
451 if (node
== NUMA_NO_NODE
)
453 if (!node_online(node
))
460 #ifdef CONFIG_NUMA_EMU
461 static int fake_node_to_pxm_map
[MAX_NUMNODES
] __initdata
= {
462 [0 ... MAX_NUMNODES
-1] = PXM_INVAL
464 static s16 fake_apicid_to_node
[MAX_LOCAL_APIC
] __initdata
= {
465 [0 ... MAX_LOCAL_APIC
-1] = NUMA_NO_NODE
467 static int __init
find_node_by_addr(unsigned long addr
)
469 int ret
= NUMA_NO_NODE
;
472 for_each_node_mask(i
, nodes_parsed
) {
474 * Find the real node that this emulated node appears on. For
475 * the sake of simplicity, we only use a real node's starting
476 * address to determine which emulated node it appears on.
478 if (addr
>= nodes
[i
].start
&& addr
< nodes
[i
].end
) {
487 * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID
488 * mappings that respect the real ACPI topology but reflect our emulated
489 * environment. For each emulated node, we find which real node it appears on
490 * and create PXM to NID mappings for those fake nodes which mirror that
491 * locality. SLIT will now represent the correct distances between emulated
492 * nodes as a result of the real topology.
494 void __init
acpi_fake_nodes(const struct bootnode
*fake_nodes
, int num_nodes
)
498 printk(KERN_INFO
"Faking PXM affinity for fake nodes on real "
500 for (i
= 0; i
< num_nodes
; i
++) {
503 nid
= find_node_by_addr(fake_nodes
[i
].start
);
504 if (nid
== NUMA_NO_NODE
)
506 pxm
= node_to_pxm(nid
);
507 if (pxm
== PXM_INVAL
)
509 fake_node_to_pxm_map
[i
] = pxm
;
511 * For each apicid_to_node mapping that exists for this real
512 * node, it must now point to the fake node ID.
514 for (j
= 0; j
< MAX_LOCAL_APIC
; j
++)
515 if (apicid_to_node
[j
] == nid
&&
516 fake_apicid_to_node
[j
] == NUMA_NO_NODE
)
517 fake_apicid_to_node
[j
] = i
;
519 for (i
= 0; i
< num_nodes
; i
++)
520 __acpi_map_pxm_to_node(fake_node_to_pxm_map
[i
], i
);
521 memcpy(apicid_to_node
, fake_apicid_to_node
, sizeof(apicid_to_node
));
523 nodes_clear(nodes_parsed
);
524 for (i
= 0; i
< num_nodes
; i
++)
525 if (fake_nodes
[i
].start
!= fake_nodes
[i
].end
)
526 node_set(i
, nodes_parsed
);
529 static int null_slit_node_compare(int a
, int b
)
531 return node_to_pxm(a
) == node_to_pxm(b
);
534 static int null_slit_node_compare(int a
, int b
)
538 #endif /* CONFIG_NUMA_EMU */
540 int __node_distance(int a
, int b
)
545 return null_slit_node_compare(a
, b
) ? LOCAL_DISTANCE
:
547 index
= acpi_slit
->locality_count
* node_to_pxm(a
);
548 return acpi_slit
->entry
[index
+ node_to_pxm(b
)];
551 EXPORT_SYMBOL(__node_distance
);
553 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || defined(CONFIG_ACPI_HOTPLUG_MEMORY)
554 int memory_add_physaddr_to_nid(u64 start
)
559 if (nodes_add
[i
].start
<= start
&& nodes_add
[i
].end
> start
)
564 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid
);