4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
21 #include <asm/machdep.h>
22 #include <asm/abs_addr.h>
24 static int numa_enabled
= 1;
26 static int numa_debug
;
27 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
30 #define ARRAY_INITIALISER -1
32 #define ARRAY_INITIALISER 0
35 int numa_cpu_lookup_table
[NR_CPUS
] = { [ 0 ... (NR_CPUS
- 1)] =
37 char *numa_memory_lookup_table
;
38 cpumask_t numa_cpumask_lookup_table
[MAX_NUMNODES
];
39 int nr_cpus_in_node
[MAX_NUMNODES
] = { [0 ... (MAX_NUMNODES
-1)] = 0};
41 struct pglist_data
*node_data
[MAX_NUMNODES
];
42 bootmem_data_t __initdata plat_node_bdata
[MAX_NUMNODES
];
43 static int min_common_depth
;
46 * We need somewhere to store start/span for each node until we have
47 * allocated the real node_data structures.
50 unsigned long node_start_pfn
;
51 unsigned long node_end_pfn
;
52 unsigned long node_present_pages
;
53 } init_node_data
[MAX_NUMNODES
] __initdata
;
55 EXPORT_SYMBOL(node_data
);
56 EXPORT_SYMBOL(numa_cpu_lookup_table
);
57 EXPORT_SYMBOL(numa_memory_lookup_table
);
58 EXPORT_SYMBOL(numa_cpumask_lookup_table
);
59 EXPORT_SYMBOL(nr_cpus_in_node
);
61 static inline void map_cpu_to_node(int cpu
, int node
)
63 numa_cpu_lookup_table
[cpu
] = node
;
64 if (!(cpu_isset(cpu
, numa_cpumask_lookup_table
[node
]))) {
65 cpu_set(cpu
, numa_cpumask_lookup_table
[node
]);
66 nr_cpus_in_node
[node
]++;
70 #ifdef CONFIG_HOTPLUG_CPU
71 static void unmap_cpu_from_node(unsigned long cpu
)
73 int node
= numa_cpu_lookup_table
[cpu
];
75 dbg("removing cpu %lu from node %d\n", cpu
, node
);
77 if (cpu_isset(cpu
, numa_cpumask_lookup_table
[node
])) {
78 cpu_clear(cpu
, numa_cpumask_lookup_table
[node
]);
79 nr_cpus_in_node
[node
]--;
81 printk(KERN_ERR
"WARNING: cpu %lu not found in node %d\n",
85 #endif /* CONFIG_HOTPLUG_CPU */
87 static struct device_node
* __devinit
find_cpu_node(unsigned int cpu
)
89 unsigned int hw_cpuid
= get_hard_smp_processor_id(cpu
);
90 struct device_node
*cpu_node
= NULL
;
91 unsigned int *interrupt_server
, *reg
;
94 while ((cpu_node
= of_find_node_by_type(cpu_node
, "cpu")) != NULL
) {
95 /* Try interrupt server first */
96 interrupt_server
= (unsigned int *)get_property(cpu_node
,
97 "ibm,ppc-interrupt-server#s", &len
);
99 len
= len
/ sizeof(u32
);
101 if (interrupt_server
&& (len
> 0)) {
103 if (interrupt_server
[len
] == hw_cpuid
)
107 reg
= (unsigned int *)get_property(cpu_node
,
109 if (reg
&& (len
> 0) && (reg
[0] == hw_cpuid
))
117 /* must hold reference to node during call */
118 static int *of_get_associativity(struct device_node
*dev
)
120 return (unsigned int *)get_property(dev
, "ibm,associativity", NULL
);
123 static int of_node_numa_domain(struct device_node
*device
)
128 if (min_common_depth
== -1)
131 tmp
= of_get_associativity(device
);
132 if (tmp
&& (tmp
[0] >= min_common_depth
)) {
133 numa_domain
= tmp
[min_common_depth
];
135 dbg("WARNING: no NUMA information for %s\n",
143 * In theory, the "ibm,associativity" property may contain multiple
144 * associativity lists because a resource may be multiply connected
145 * into the machine. This resource then has different associativity
146 * characteristics relative to its multiple connections. We ignore
147 * this for now. We also assume that all cpu and memory sets have
148 * their distances represented at a common level. This won't be
149 * true for heirarchical NUMA.
151 * In any case the ibm,associativity-reference-points should give
152 * the correct depth for a normal NUMA system.
154 * - Dave Hansen <haveblue@us.ibm.com>
156 static int __init
find_min_common_depth(void)
159 unsigned int *ref_points
;
160 struct device_node
*rtas_root
;
163 rtas_root
= of_find_node_by_path("/rtas");
169 * this property is 2 32-bit integers, each representing a level of
170 * depth in the associativity nodes. The first is for an SMP
171 * configuration (should be all 0's) and the second is for a normal
172 * NUMA configuration.
174 ref_points
= (unsigned int *)get_property(rtas_root
,
175 "ibm,associativity-reference-points", &len
);
177 if ((len
>= 1) && ref_points
) {
178 depth
= ref_points
[1];
180 dbg("WARNING: could not find NUMA "
181 "associativity reference point\n");
184 of_node_put(rtas_root
);
189 static int __init
get_mem_addr_cells(void)
191 struct device_node
*memory
= NULL
;
194 memory
= of_find_node_by_type(memory
, "memory");
196 return 0; /* it won't matter */
198 rc
= prom_n_addr_cells(memory
);
202 static int __init
get_mem_size_cells(void)
204 struct device_node
*memory
= NULL
;
207 memory
= of_find_node_by_type(memory
, "memory");
209 return 0; /* it won't matter */
210 rc
= prom_n_size_cells(memory
);
214 static unsigned long read_n_cells(int n
, unsigned int **buf
)
216 unsigned long result
= 0;
219 result
= (result
<< 32) | **buf
;
226 * Figure out to which domain a cpu belongs and stick it there.
227 * Return the id of the domain used.
229 static int numa_setup_cpu(unsigned long lcpu
)
232 struct device_node
*cpu
= find_cpu_node(lcpu
);
239 numa_domain
= of_node_numa_domain(cpu
);
241 if (numa_domain
>= num_online_nodes()) {
243 * POWER4 LPAR uses 0xffff as invalid node,
244 * dont warn in this case.
246 if (numa_domain
!= 0xffff)
247 printk(KERN_ERR
"WARNING: cpu %ld "
248 "maps to invalid NUMA node %d\n",
253 node_set_online(numa_domain
);
255 map_cpu_to_node(lcpu
, numa_domain
);
262 static int cpu_numa_callback(struct notifier_block
*nfb
,
263 unsigned long action
,
266 unsigned long lcpu
= (unsigned long)hcpu
;
267 int ret
= NOTIFY_DONE
;
271 if (min_common_depth
== -1 || !numa_enabled
)
272 map_cpu_to_node(lcpu
, 0);
274 numa_setup_cpu(lcpu
);
277 #ifdef CONFIG_HOTPLUG_CPU
279 case CPU_UP_CANCELED
:
280 unmap_cpu_from_node(lcpu
);
289 * Check and possibly modify a memory region to enforce the memory limit.
291 * Returns the size the region should have to enforce the memory limit.
292 * This will either be the original value of size, a truncated value,
293 * or zero. If the returned value of size is 0 the region should be
294 * discarded as it lies wholy above the memory limit.
296 static unsigned long __init
numa_enforce_memory_limit(unsigned long start
, unsigned long size
)
299 * We use lmb_end_of_DRAM() in here instead of memory_limit because
300 * we've already adjusted it for the limit and it takes care of
301 * having memory holes below the limit.
303 extern unsigned long memory_limit
;
308 if (start
+ size
<= lmb_end_of_DRAM())
311 if (start
>= lmb_end_of_DRAM())
314 return lmb_end_of_DRAM() - start
;
317 static int __init
parse_numa_properties(void)
319 struct device_node
*cpu
= NULL
;
320 struct device_node
*memory
= NULL
;
321 int addr_cells
, size_cells
;
323 long entries
= lmb_end_of_DRAM() >> MEMORY_INCREMENT_SHIFT
;
326 if (numa_enabled
== 0) {
327 printk(KERN_WARNING
"NUMA disabled by user\n");
331 numa_memory_lookup_table
=
332 (char *)abs_to_virt(lmb_alloc(entries
* sizeof(char), 1));
333 memset(numa_memory_lookup_table
, 0, entries
* sizeof(char));
335 for (i
= 0; i
< entries
; i
++)
336 numa_memory_lookup_table
[i
] = ARRAY_INITIALISER
;
338 min_common_depth
= find_min_common_depth();
340 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth
);
341 if (min_common_depth
< 0)
342 return min_common_depth
;
344 max_domain
= numa_setup_cpu(boot_cpuid
);
347 * Even though we connect cpus to numa domains later in SMP init,
348 * we need to know the maximum node id now. This is because each
349 * node id must have NODE_DATA etc backing it.
350 * As a result of hotplug we could still have cpus appear later on
351 * with larger node ids. In that case we force the cpu into node 0.
356 cpu
= find_cpu_node(i
);
359 numa_domain
= of_node_numa_domain(cpu
);
362 if (numa_domain
< MAX_NUMNODES
&&
363 max_domain
< numa_domain
)
364 max_domain
= numa_domain
;
368 addr_cells
= get_mem_addr_cells();
369 size_cells
= get_mem_size_cells();
371 while ((memory
= of_find_node_by_type(memory
, "memory")) != NULL
) {
376 unsigned int *memcell_buf
;
379 memcell_buf
= (unsigned int *)get_property(memory
, "reg", &len
);
380 if (!memcell_buf
|| len
<= 0)
383 ranges
= memory
->n_addrs
;
385 /* these are order-sensitive, and modify the buffer pointer */
386 start
= read_n_cells(addr_cells
, &memcell_buf
);
387 size
= read_n_cells(size_cells
, &memcell_buf
);
389 start
= _ALIGN_DOWN(start
, MEMORY_INCREMENT
);
390 size
= _ALIGN_UP(size
, MEMORY_INCREMENT
);
392 numa_domain
= of_node_numa_domain(memory
);
394 if (numa_domain
>= MAX_NUMNODES
) {
395 if (numa_domain
!= 0xffff)
396 printk(KERN_ERR
"WARNING: memory at %lx maps "
397 "to invalid NUMA node %d\n", start
,
402 if (max_domain
< numa_domain
)
403 max_domain
= numa_domain
;
405 if (! (size
= numa_enforce_memory_limit(start
, size
))) {
413 * Initialize new node struct, or add to an existing one.
415 if (init_node_data
[numa_domain
].node_end_pfn
) {
416 if ((start
/ PAGE_SIZE
) <
417 init_node_data
[numa_domain
].node_start_pfn
)
418 init_node_data
[numa_domain
].node_start_pfn
=
420 if (((start
/ PAGE_SIZE
) + (size
/ PAGE_SIZE
)) >
421 init_node_data
[numa_domain
].node_end_pfn
)
422 init_node_data
[numa_domain
].node_end_pfn
=
423 (start
/ PAGE_SIZE
) +
426 init_node_data
[numa_domain
].node_present_pages
+=
429 node_set_online(numa_domain
);
431 init_node_data
[numa_domain
].node_start_pfn
=
433 init_node_data
[numa_domain
].node_end_pfn
=
434 init_node_data
[numa_domain
].node_start_pfn
+
436 init_node_data
[numa_domain
].node_present_pages
=
440 for (i
= start
; i
< (start
+size
); i
+= MEMORY_INCREMENT
)
441 numa_memory_lookup_table
[i
>> MEMORY_INCREMENT_SHIFT
] =
443 memory_present(numa_domain
, start
>> PAGE_SHIFT
,
444 (start
+ size
) >> PAGE_SHIFT
);
450 for (i
= 0; i
<= max_domain
; i
++)
456 static void __init
setup_nonnuma(void)
458 unsigned long top_of_ram
= lmb_end_of_DRAM();
459 unsigned long total_ram
= lmb_phys_mem_size();
462 printk(KERN_INFO
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
463 top_of_ram
, total_ram
);
464 printk(KERN_INFO
"Memory hole size: %ldMB\n",
465 (top_of_ram
- total_ram
) >> 20);
467 if (!numa_memory_lookup_table
) {
468 long entries
= top_of_ram
>> MEMORY_INCREMENT_SHIFT
;
469 numa_memory_lookup_table
=
470 (char *)abs_to_virt(lmb_alloc(entries
* sizeof(char), 1));
471 memset(numa_memory_lookup_table
, 0, entries
* sizeof(char));
472 for (i
= 0; i
< entries
; i
++)
473 numa_memory_lookup_table
[i
] = ARRAY_INITIALISER
;
476 map_cpu_to_node(boot_cpuid
, 0);
480 init_node_data
[0].node_start_pfn
= 0;
481 init_node_data
[0].node_end_pfn
= lmb_end_of_DRAM() / PAGE_SIZE
;
482 init_node_data
[0].node_present_pages
= total_ram
/ PAGE_SIZE
;
484 for (i
= 0 ; i
< top_of_ram
; i
+= MEMORY_INCREMENT
)
485 numa_memory_lookup_table
[i
>> MEMORY_INCREMENT_SHIFT
] = 0;
486 memory_present(0, 0, init_node_data
[0].node_end_pfn
);
489 static void __init
dump_numa_topology(void)
494 if (min_common_depth
== -1 || !numa_enabled
)
497 for_each_online_node(node
) {
500 printk(KERN_INFO
"Node %d Memory:", node
);
504 for (i
= 0; i
< lmb_end_of_DRAM(); i
+= MEMORY_INCREMENT
) {
505 if (numa_memory_lookup_table
[i
>> MEMORY_INCREMENT_SHIFT
] == node
) {
524 * Allocate some memory, satisfying the lmb or bootmem allocator where
525 * required. nid is the preferred node and end is the physical address of
526 * the highest address in the node.
528 * Returns the physical address of the memory.
530 static unsigned long careful_allocation(int nid
, unsigned long size
,
531 unsigned long align
, unsigned long end
)
533 unsigned long ret
= lmb_alloc_base(size
, align
, end
);
535 /* retry over all memory */
537 ret
= lmb_alloc_base(size
, align
, lmb_end_of_DRAM());
540 panic("numa.c: cannot allocate %lu bytes on node %d",
544 * If the memory came from a previously allocated node, we must
545 * retry with the bootmem allocator.
547 if (pa_to_nid(ret
) < nid
) {
548 nid
= pa_to_nid(ret
);
549 ret
= (unsigned long)__alloc_bootmem_node(NODE_DATA(nid
),
553 panic("numa.c: cannot allocate %lu bytes on node %d",
556 ret
= virt_to_abs(ret
);
558 dbg("alloc_bootmem %lx %lx\n", ret
, size
);
564 void __init
do_init_bootmem(void)
567 int addr_cells
, size_cells
;
568 struct device_node
*memory
= NULL
;
569 static struct notifier_block ppc64_numa_nb
= {
570 .notifier_call
= cpu_numa_callback
,
571 .priority
= 1 /* Must run before sched domains notifier. */
575 max_low_pfn
= lmb_end_of_DRAM() >> PAGE_SHIFT
;
576 max_pfn
= max_low_pfn
;
578 if (parse_numa_properties())
581 dump_numa_topology();
583 register_cpu_notifier(&ppc64_numa_nb
);
585 for_each_online_node(nid
) {
586 unsigned long start_paddr
, end_paddr
;
588 unsigned long bootmem_paddr
;
589 unsigned long bootmap_pages
;
591 start_paddr
= init_node_data
[nid
].node_start_pfn
* PAGE_SIZE
;
592 end_paddr
= init_node_data
[nid
].node_end_pfn
* PAGE_SIZE
;
594 /* Allocate the node structure node local if possible */
595 NODE_DATA(nid
) = (struct pglist_data
*)careful_allocation(nid
,
596 sizeof(struct pglist_data
),
597 SMP_CACHE_BYTES
, end_paddr
);
598 NODE_DATA(nid
) = abs_to_virt(NODE_DATA(nid
));
599 memset(NODE_DATA(nid
), 0, sizeof(struct pglist_data
));
601 dbg("node %d\n", nid
);
602 dbg("NODE_DATA() = %p\n", NODE_DATA(nid
));
604 NODE_DATA(nid
)->bdata
= &plat_node_bdata
[nid
];
605 NODE_DATA(nid
)->node_start_pfn
=
606 init_node_data
[nid
].node_start_pfn
;
607 NODE_DATA(nid
)->node_spanned_pages
=
608 end_paddr
- start_paddr
;
610 if (NODE_DATA(nid
)->node_spanned_pages
== 0)
613 dbg("start_paddr = %lx\n", start_paddr
);
614 dbg("end_paddr = %lx\n", end_paddr
);
616 bootmap_pages
= bootmem_bootmap_pages((end_paddr
- start_paddr
) >> PAGE_SHIFT
);
618 bootmem_paddr
= careful_allocation(nid
,
619 bootmap_pages
<< PAGE_SHIFT
,
620 PAGE_SIZE
, end_paddr
);
621 memset(abs_to_virt(bootmem_paddr
), 0,
622 bootmap_pages
<< PAGE_SHIFT
);
623 dbg("bootmap_paddr = %lx\n", bootmem_paddr
);
625 init_bootmem_node(NODE_DATA(nid
), bootmem_paddr
>> PAGE_SHIFT
,
626 start_paddr
>> PAGE_SHIFT
,
627 end_paddr
>> PAGE_SHIFT
);
630 * We need to do another scan of all memory sections to
631 * associate memory with the correct node.
633 addr_cells
= get_mem_addr_cells();
634 size_cells
= get_mem_size_cells();
636 while ((memory
= of_find_node_by_type(memory
, "memory")) != NULL
) {
637 unsigned long mem_start
, mem_size
;
638 int numa_domain
, ranges
;
639 unsigned int *memcell_buf
;
642 memcell_buf
= (unsigned int *)get_property(memory
, "reg", &len
);
643 if (!memcell_buf
|| len
<= 0)
646 ranges
= memory
->n_addrs
; /* ranges in cell */
648 mem_start
= read_n_cells(addr_cells
, &memcell_buf
);
649 mem_size
= read_n_cells(size_cells
, &memcell_buf
);
651 numa_domain
= of_node_numa_domain(memory
);
652 if (numa_domain
>= MAX_NUMNODES
)
657 if (numa_domain
!= nid
)
660 mem_size
= numa_enforce_memory_limit(mem_start
, mem_size
);
662 dbg("free_bootmem %lx %lx\n", mem_start
, mem_size
);
663 free_bootmem_node(NODE_DATA(nid
), mem_start
, mem_size
);
666 if (--ranges
) /* process all ranges in cell */
671 * Mark reserved regions on this node
673 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
674 unsigned long physbase
= lmb
.reserved
.region
[i
].physbase
;
675 unsigned long size
= lmb
.reserved
.region
[i
].size
;
677 if (pa_to_nid(physbase
) != nid
&&
678 pa_to_nid(physbase
+size
-1) != nid
)
681 if (physbase
< end_paddr
&&
682 (physbase
+size
) > start_paddr
) {
684 if (physbase
< start_paddr
) {
685 size
-= start_paddr
- physbase
;
686 physbase
= start_paddr
;
689 if (size
> end_paddr
- physbase
)
690 size
= end_paddr
- physbase
;
692 dbg("reserve_bootmem %lx %lx\n", physbase
,
694 reserve_bootmem_node(NODE_DATA(nid
), physbase
,
701 void __init
paging_init(void)
703 unsigned long zones_size
[MAX_NR_ZONES
];
704 unsigned long zholes_size
[MAX_NR_ZONES
];
707 memset(zones_size
, 0, sizeof(zones_size
));
708 memset(zholes_size
, 0, sizeof(zholes_size
));
710 for_each_online_node(nid
) {
711 unsigned long start_pfn
;
712 unsigned long end_pfn
;
714 start_pfn
= init_node_data
[nid
].node_start_pfn
;
715 end_pfn
= init_node_data
[nid
].node_end_pfn
;
717 zones_size
[ZONE_DMA
] = end_pfn
- start_pfn
;
718 zholes_size
[ZONE_DMA
] = zones_size
[ZONE_DMA
] -
719 init_node_data
[nid
].node_present_pages
;
721 dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid
,
722 zones_size
[ZONE_DMA
], start_pfn
, zholes_size
[ZONE_DMA
]);
724 free_area_init_node(nid
, NODE_DATA(nid
), zones_size
,
725 start_pfn
, zholes_size
);
729 static int __init
early_numa(char *p
)
734 if (strstr(p
, "off"))
737 if (strstr(p
, "debug"))
742 early_param("numa", early_numa
);