Linux 3.17-rc2
[linux/fpc-iii.git] / arch / powerpc / mm / numa.c
blobd7737a542fd7d5f5af82bde3ec417262575175af
1 /*
2 * pSeries NUMA support
4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/mmzone.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/memblock.h>
21 #include <linux/of.h>
22 #include <linux/pfn.h>
23 #include <linux/cpuset.h>
24 #include <linux/node.h>
25 #include <linux/stop_machine.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/uaccess.h>
29 #include <linux/slab.h>
30 #include <asm/cputhreads.h>
31 #include <asm/sparsemem.h>
32 #include <asm/prom.h>
33 #include <asm/smp.h>
34 #include <asm/cputhreads.h>
35 #include <asm/topology.h>
36 #include <asm/firmware.h>
37 #include <asm/paca.h>
38 #include <asm/hvcall.h>
39 #include <asm/setup.h>
40 #include <asm/vdso.h>
42 static int numa_enabled = 1;
44 static char *cmdline __initdata;
46 static int numa_debug;
47 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
49 int numa_cpu_lookup_table[NR_CPUS];
50 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
51 struct pglist_data *node_data[MAX_NUMNODES];
53 EXPORT_SYMBOL(numa_cpu_lookup_table);
54 EXPORT_SYMBOL(node_to_cpumask_map);
55 EXPORT_SYMBOL(node_data);
57 static int min_common_depth;
58 static int n_mem_addr_cells, n_mem_size_cells;
59 static int form1_affinity;
61 #define MAX_DISTANCE_REF_POINTS 4
62 static int distance_ref_points_depth;
63 static const __be32 *distance_ref_points;
64 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
67 * Allocate node_to_cpumask_map based on number of available nodes
68 * Requires node_possible_map to be valid.
70 * Note: cpumask_of_node() is not valid until after this is done.
72 static void __init setup_node_to_cpumask_map(void)
74 unsigned int node;
76 /* setup nr_node_ids if not done yet */
77 if (nr_node_ids == MAX_NUMNODES)
78 setup_nr_node_ids();
80 /* allocate the map */
81 for (node = 0; node < nr_node_ids; node++)
82 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
84 /* cpumask_of_node() will now work */
85 dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
88 static int __init fake_numa_create_new_node(unsigned long end_pfn,
89 unsigned int *nid)
91 unsigned long long mem;
92 char *p = cmdline;
93 static unsigned int fake_nid;
94 static unsigned long long curr_boundary;
97 * Modify node id, iff we started creating NUMA nodes
98 * We want to continue from where we left of the last time
100 if (fake_nid)
101 *nid = fake_nid;
103 * In case there are no more arguments to parse, the
104 * node_id should be the same as the last fake node id
105 * (we've handled this above).
107 if (!p)
108 return 0;
110 mem = memparse(p, &p);
111 if (!mem)
112 return 0;
114 if (mem < curr_boundary)
115 return 0;
117 curr_boundary = mem;
119 if ((end_pfn << PAGE_SHIFT) > mem) {
121 * Skip commas and spaces
123 while (*p == ',' || *p == ' ' || *p == '\t')
124 p++;
126 cmdline = p;
127 fake_nid++;
128 *nid = fake_nid;
129 dbg("created new fake_node with id %d\n", fake_nid);
130 return 1;
132 return 0;
136 * get_node_active_region - Return active region containing pfn
137 * Active range returned is empty if none found.
138 * @pfn: The page to return the region for
139 * @node_ar: Returned set to the active region containing @pfn
141 static void __init get_node_active_region(unsigned long pfn,
142 struct node_active_region *node_ar)
144 unsigned long start_pfn, end_pfn;
145 int i, nid;
147 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
148 if (pfn >= start_pfn && pfn < end_pfn) {
149 node_ar->nid = nid;
150 node_ar->start_pfn = start_pfn;
151 node_ar->end_pfn = end_pfn;
152 break;
157 static void reset_numa_cpu_lookup_table(void)
159 unsigned int cpu;
161 for_each_possible_cpu(cpu)
162 numa_cpu_lookup_table[cpu] = -1;
165 static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
167 numa_cpu_lookup_table[cpu] = node;
170 static void map_cpu_to_node(int cpu, int node)
172 update_numa_cpu_lookup_table(cpu, node);
174 dbg("adding cpu %d to node %d\n", cpu, node);
176 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
177 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
180 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
181 static void unmap_cpu_from_node(unsigned long cpu)
183 int node = numa_cpu_lookup_table[cpu];
185 dbg("removing cpu %lu from node %d\n", cpu, node);
187 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
188 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
189 } else {
190 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
191 cpu, node);
194 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
196 /* must hold reference to node during call */
197 static const __be32 *of_get_associativity(struct device_node *dev)
199 return of_get_property(dev, "ibm,associativity", NULL);
203 * Returns the property linux,drconf-usable-memory if
204 * it exists (the property exists only in kexec/kdump kernels,
205 * added by kexec-tools)
207 static const __be32 *of_get_usable_memory(struct device_node *memory)
209 const __be32 *prop;
210 u32 len;
211 prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
212 if (!prop || len < sizeof(unsigned int))
213 return NULL;
214 return prop;
217 int __node_distance(int a, int b)
219 int i;
220 int distance = LOCAL_DISTANCE;
222 if (!form1_affinity)
223 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
225 for (i = 0; i < distance_ref_points_depth; i++) {
226 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
227 break;
229 /* Double the distance for each NUMA level */
230 distance *= 2;
233 return distance;
235 EXPORT_SYMBOL(__node_distance);
237 static void initialize_distance_lookup_table(int nid,
238 const __be32 *associativity)
240 int i;
242 if (!form1_affinity)
243 return;
245 for (i = 0; i < distance_ref_points_depth; i++) {
246 const __be32 *entry;
248 entry = &associativity[be32_to_cpu(distance_ref_points[i])];
249 distance_lookup_table[nid][i] = of_read_number(entry, 1);
253 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
254 * info is found.
256 static int associativity_to_nid(const __be32 *associativity)
258 int nid = -1;
260 if (min_common_depth == -1)
261 goto out;
263 if (of_read_number(associativity, 1) >= min_common_depth)
264 nid = of_read_number(&associativity[min_common_depth], 1);
266 /* POWER4 LPAR uses 0xffff as invalid node */
267 if (nid == 0xffff || nid >= MAX_NUMNODES)
268 nid = -1;
270 if (nid > 0 &&
271 of_read_number(associativity, 1) >= distance_ref_points_depth)
272 initialize_distance_lookup_table(nid, associativity);
274 out:
275 return nid;
278 /* Returns the nid associated with the given device tree node,
279 * or -1 if not found.
281 static int of_node_to_nid_single(struct device_node *device)
283 int nid = -1;
284 const __be32 *tmp;
286 tmp = of_get_associativity(device);
287 if (tmp)
288 nid = associativity_to_nid(tmp);
289 return nid;
292 /* Walk the device tree upwards, looking for an associativity id */
293 int of_node_to_nid(struct device_node *device)
295 struct device_node *tmp;
296 int nid = -1;
298 of_node_get(device);
299 while (device) {
300 nid = of_node_to_nid_single(device);
301 if (nid != -1)
302 break;
304 tmp = device;
305 device = of_get_parent(tmp);
306 of_node_put(tmp);
308 of_node_put(device);
310 return nid;
312 EXPORT_SYMBOL_GPL(of_node_to_nid);
314 static int __init find_min_common_depth(void)
316 int depth;
317 struct device_node *root;
319 if (firmware_has_feature(FW_FEATURE_OPAL))
320 root = of_find_node_by_path("/ibm,opal");
321 else
322 root = of_find_node_by_path("/rtas");
323 if (!root)
324 root = of_find_node_by_path("/");
327 * This property is a set of 32-bit integers, each representing
328 * an index into the ibm,associativity nodes.
330 * With form 0 affinity the first integer is for an SMP configuration
331 * (should be all 0's) and the second is for a normal NUMA
332 * configuration. We have only one level of NUMA.
334 * With form 1 affinity the first integer is the most significant
335 * NUMA boundary and the following are progressively less significant
336 * boundaries. There can be more than one level of NUMA.
338 distance_ref_points = of_get_property(root,
339 "ibm,associativity-reference-points",
340 &distance_ref_points_depth);
342 if (!distance_ref_points) {
343 dbg("NUMA: ibm,associativity-reference-points not found.\n");
344 goto err;
347 distance_ref_points_depth /= sizeof(int);
349 if (firmware_has_feature(FW_FEATURE_OPAL) ||
350 firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
351 dbg("Using form 1 affinity\n");
352 form1_affinity = 1;
355 if (form1_affinity) {
356 depth = of_read_number(distance_ref_points, 1);
357 } else {
358 if (distance_ref_points_depth < 2) {
359 printk(KERN_WARNING "NUMA: "
360 "short ibm,associativity-reference-points\n");
361 goto err;
364 depth = of_read_number(&distance_ref_points[1], 1);
368 * Warn and cap if the hardware supports more than
369 * MAX_DISTANCE_REF_POINTS domains.
371 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
372 printk(KERN_WARNING "NUMA: distance array capped at "
373 "%d entries\n", MAX_DISTANCE_REF_POINTS);
374 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
377 of_node_put(root);
378 return depth;
380 err:
381 of_node_put(root);
382 return -1;
385 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
387 struct device_node *memory = NULL;
389 memory = of_find_node_by_type(memory, "memory");
390 if (!memory)
391 panic("numa.c: No memory nodes found!");
393 *n_addr_cells = of_n_addr_cells(memory);
394 *n_size_cells = of_n_size_cells(memory);
395 of_node_put(memory);
398 static unsigned long read_n_cells(int n, const __be32 **buf)
400 unsigned long result = 0;
402 while (n--) {
403 result = (result << 32) | of_read_number(*buf, 1);
404 (*buf)++;
406 return result;
410 * Read the next memblock list entry from the ibm,dynamic-memory property
411 * and return the information in the provided of_drconf_cell structure.
413 static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp)
415 const __be32 *cp;
417 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
419 cp = *cellp;
420 drmem->drc_index = of_read_number(cp, 1);
421 drmem->reserved = of_read_number(&cp[1], 1);
422 drmem->aa_index = of_read_number(&cp[2], 1);
423 drmem->flags = of_read_number(&cp[3], 1);
425 *cellp = cp + 4;
429 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
431 * The layout of the ibm,dynamic-memory property is a number N of memblock
432 * list entries followed by N memblock list entries. Each memblock list entry
433 * contains information as laid out in the of_drconf_cell struct above.
435 static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm)
437 const __be32 *prop;
438 u32 len, entries;
440 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
441 if (!prop || len < sizeof(unsigned int))
442 return 0;
444 entries = of_read_number(prop++, 1);
446 /* Now that we know the number of entries, revalidate the size
447 * of the property read in to ensure we have everything
449 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
450 return 0;
452 *dm = prop;
453 return entries;
457 * Retrieve and validate the ibm,lmb-size property for drconf memory
458 * from the device tree.
460 static u64 of_get_lmb_size(struct device_node *memory)
462 const __be32 *prop;
463 u32 len;
465 prop = of_get_property(memory, "ibm,lmb-size", &len);
466 if (!prop || len < sizeof(unsigned int))
467 return 0;
469 return read_n_cells(n_mem_size_cells, &prop);
472 struct assoc_arrays {
473 u32 n_arrays;
474 u32 array_sz;
475 const __be32 *arrays;
479 * Retrieve and validate the list of associativity arrays for drconf
480 * memory from the ibm,associativity-lookup-arrays property of the
481 * device tree..
483 * The layout of the ibm,associativity-lookup-arrays property is a number N
484 * indicating the number of associativity arrays, followed by a number M
485 * indicating the size of each associativity array, followed by a list
486 * of N associativity arrays.
488 static int of_get_assoc_arrays(struct device_node *memory,
489 struct assoc_arrays *aa)
491 const __be32 *prop;
492 u32 len;
494 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
495 if (!prop || len < 2 * sizeof(unsigned int))
496 return -1;
498 aa->n_arrays = of_read_number(prop++, 1);
499 aa->array_sz = of_read_number(prop++, 1);
501 /* Now that we know the number of arrays and size of each array,
502 * revalidate the size of the property read in.
504 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
505 return -1;
507 aa->arrays = prop;
508 return 0;
512 * This is like of_node_to_nid_single() for memory represented in the
513 * ibm,dynamic-reconfiguration-memory node.
515 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
516 struct assoc_arrays *aa)
518 int default_nid = 0;
519 int nid = default_nid;
520 int index;
522 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
523 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
524 drmem->aa_index < aa->n_arrays) {
525 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
526 nid = of_read_number(&aa->arrays[index], 1);
528 if (nid == 0xffff || nid >= MAX_NUMNODES)
529 nid = default_nid;
532 return nid;
536 * Figure out to which domain a cpu belongs and stick it there.
537 * Return the id of the domain used.
539 static int numa_setup_cpu(unsigned long lcpu)
541 int nid;
542 struct device_node *cpu;
545 * If a valid cpu-to-node mapping is already available, use it
546 * directly instead of querying the firmware, since it represents
547 * the most recent mapping notified to us by the platform (eg: VPHN).
549 if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
550 map_cpu_to_node(lcpu, nid);
551 return nid;
554 cpu = of_get_cpu_node(lcpu, NULL);
556 if (!cpu) {
557 WARN_ON(1);
558 nid = 0;
559 goto out;
562 nid = of_node_to_nid_single(cpu);
564 if (nid < 0 || !node_online(nid))
565 nid = first_online_node;
566 out:
567 map_cpu_to_node(lcpu, nid);
569 of_node_put(cpu);
571 return nid;
574 static void verify_cpu_node_mapping(int cpu, int node)
576 int base, sibling, i;
578 /* Verify that all the threads in the core belong to the same node */
579 base = cpu_first_thread_sibling(cpu);
581 for (i = 0; i < threads_per_core; i++) {
582 sibling = base + i;
584 if (sibling == cpu || cpu_is_offline(sibling))
585 continue;
587 if (cpu_to_node(sibling) != node) {
588 WARN(1, "CPU thread siblings %d and %d don't belong"
589 " to the same node!\n", cpu, sibling);
590 break;
595 static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
596 void *hcpu)
598 unsigned long lcpu = (unsigned long)hcpu;
599 int ret = NOTIFY_DONE, nid;
601 switch (action) {
602 case CPU_UP_PREPARE:
603 case CPU_UP_PREPARE_FROZEN:
604 nid = numa_setup_cpu(lcpu);
605 verify_cpu_node_mapping((int)lcpu, nid);
606 ret = NOTIFY_OK;
607 break;
608 #ifdef CONFIG_HOTPLUG_CPU
609 case CPU_DEAD:
610 case CPU_DEAD_FROZEN:
611 case CPU_UP_CANCELED:
612 case CPU_UP_CANCELED_FROZEN:
613 unmap_cpu_from_node(lcpu);
614 ret = NOTIFY_OK;
615 break;
616 #endif
618 return ret;
622 * Check and possibly modify a memory region to enforce the memory limit.
624 * Returns the size the region should have to enforce the memory limit.
625 * This will either be the original value of size, a truncated value,
626 * or zero. If the returned value of size is 0 the region should be
627 * discarded as it lies wholly above the memory limit.
629 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
630 unsigned long size)
633 * We use memblock_end_of_DRAM() in here instead of memory_limit because
634 * we've already adjusted it for the limit and it takes care of
635 * having memory holes below the limit. Also, in the case of
636 * iommu_is_off, memory_limit is not set but is implicitly enforced.
639 if (start + size <= memblock_end_of_DRAM())
640 return size;
642 if (start >= memblock_end_of_DRAM())
643 return 0;
645 return memblock_end_of_DRAM() - start;
649 * Reads the counter for a given entry in
650 * linux,drconf-usable-memory property
652 static inline int __init read_usm_ranges(const __be32 **usm)
655 * For each lmb in ibm,dynamic-memory a corresponding
656 * entry in linux,drconf-usable-memory property contains
657 * a counter followed by that many (base, size) duple.
658 * read the counter from linux,drconf-usable-memory
660 return read_n_cells(n_mem_size_cells, usm);
664 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
665 * node. This assumes n_mem_{addr,size}_cells have been set.
667 static void __init parse_drconf_memory(struct device_node *memory)
669 const __be32 *uninitialized_var(dm), *usm;
670 unsigned int n, rc, ranges, is_kexec_kdump = 0;
671 unsigned long lmb_size, base, size, sz;
672 int nid;
673 struct assoc_arrays aa = { .arrays = NULL };
675 n = of_get_drconf_memory(memory, &dm);
676 if (!n)
677 return;
679 lmb_size = of_get_lmb_size(memory);
680 if (!lmb_size)
681 return;
683 rc = of_get_assoc_arrays(memory, &aa);
684 if (rc)
685 return;
687 /* check if this is a kexec/kdump kernel */
688 usm = of_get_usable_memory(memory);
689 if (usm != NULL)
690 is_kexec_kdump = 1;
692 for (; n != 0; --n) {
693 struct of_drconf_cell drmem;
695 read_drconf_cell(&drmem, &dm);
697 /* skip this block if the reserved bit is set in flags (0x80)
698 or if the block is not assigned to this partition (0x8) */
699 if ((drmem.flags & DRCONF_MEM_RESERVED)
700 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
701 continue;
703 base = drmem.base_addr;
704 size = lmb_size;
705 ranges = 1;
707 if (is_kexec_kdump) {
708 ranges = read_usm_ranges(&usm);
709 if (!ranges) /* there are no (base, size) duple */
710 continue;
712 do {
713 if (is_kexec_kdump) {
714 base = read_n_cells(n_mem_addr_cells, &usm);
715 size = read_n_cells(n_mem_size_cells, &usm);
717 nid = of_drconf_to_nid_single(&drmem, &aa);
718 fake_numa_create_new_node(
719 ((base + size) >> PAGE_SHIFT),
720 &nid);
721 node_set_online(nid);
722 sz = numa_enforce_memory_limit(base, size);
723 if (sz)
724 memblock_set_node(base, sz,
725 &memblock.memory, nid);
726 } while (--ranges);
730 static int __init parse_numa_properties(void)
732 struct device_node *memory;
733 int default_nid = 0;
734 unsigned long i;
736 if (numa_enabled == 0) {
737 printk(KERN_WARNING "NUMA disabled by user\n");
738 return -1;
741 min_common_depth = find_min_common_depth();
743 if (min_common_depth < 0)
744 return min_common_depth;
746 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
749 * Even though we connect cpus to numa domains later in SMP
750 * init, we need to know the node ids now. This is because
751 * each node to be onlined must have NODE_DATA etc backing it.
753 for_each_present_cpu(i) {
754 struct device_node *cpu;
755 int nid;
757 cpu = of_get_cpu_node(i, NULL);
758 BUG_ON(!cpu);
759 nid = of_node_to_nid_single(cpu);
760 of_node_put(cpu);
763 * Don't fall back to default_nid yet -- we will plug
764 * cpus into nodes once the memory scan has discovered
765 * the topology.
767 if (nid < 0)
768 continue;
769 node_set_online(nid);
772 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
774 for_each_node_by_type(memory, "memory") {
775 unsigned long start;
776 unsigned long size;
777 int nid;
778 int ranges;
779 const __be32 *memcell_buf;
780 unsigned int len;
782 memcell_buf = of_get_property(memory,
783 "linux,usable-memory", &len);
784 if (!memcell_buf || len <= 0)
785 memcell_buf = of_get_property(memory, "reg", &len);
786 if (!memcell_buf || len <= 0)
787 continue;
789 /* ranges in cell */
790 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
791 new_range:
792 /* these are order-sensitive, and modify the buffer pointer */
793 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
794 size = read_n_cells(n_mem_size_cells, &memcell_buf);
797 * Assumption: either all memory nodes or none will
798 * have associativity properties. If none, then
799 * everything goes to default_nid.
801 nid = of_node_to_nid_single(memory);
802 if (nid < 0)
803 nid = default_nid;
805 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
806 node_set_online(nid);
808 if (!(size = numa_enforce_memory_limit(start, size))) {
809 if (--ranges)
810 goto new_range;
811 else
812 continue;
815 memblock_set_node(start, size, &memblock.memory, nid);
817 if (--ranges)
818 goto new_range;
822 * Now do the same thing for each MEMBLOCK listed in the
823 * ibm,dynamic-memory property in the
824 * ibm,dynamic-reconfiguration-memory node.
826 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
827 if (memory)
828 parse_drconf_memory(memory);
830 return 0;
833 static void __init setup_nonnuma(void)
835 unsigned long top_of_ram = memblock_end_of_DRAM();
836 unsigned long total_ram = memblock_phys_mem_size();
837 unsigned long start_pfn, end_pfn;
838 unsigned int nid = 0;
839 struct memblock_region *reg;
841 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
842 top_of_ram, total_ram);
843 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
844 (top_of_ram - total_ram) >> 20);
846 for_each_memblock(memory, reg) {
847 start_pfn = memblock_region_memory_base_pfn(reg);
848 end_pfn = memblock_region_memory_end_pfn(reg);
850 fake_numa_create_new_node(end_pfn, &nid);
851 memblock_set_node(PFN_PHYS(start_pfn),
852 PFN_PHYS(end_pfn - start_pfn),
853 &memblock.memory, nid);
854 node_set_online(nid);
858 void __init dump_numa_cpu_topology(void)
860 unsigned int node;
861 unsigned int cpu, count;
863 if (min_common_depth == -1 || !numa_enabled)
864 return;
866 for_each_online_node(node) {
867 printk(KERN_DEBUG "Node %d CPUs:", node);
869 count = 0;
871 * If we used a CPU iterator here we would miss printing
872 * the holes in the cpumap.
874 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
875 if (cpumask_test_cpu(cpu,
876 node_to_cpumask_map[node])) {
877 if (count == 0)
878 printk(" %u", cpu);
879 ++count;
880 } else {
881 if (count > 1)
882 printk("-%u", cpu - 1);
883 count = 0;
887 if (count > 1)
888 printk("-%u", nr_cpu_ids - 1);
889 printk("\n");
893 static void __init dump_numa_memory_topology(void)
895 unsigned int node;
896 unsigned int count;
898 if (min_common_depth == -1 || !numa_enabled)
899 return;
901 for_each_online_node(node) {
902 unsigned long i;
904 printk(KERN_DEBUG "Node %d Memory:", node);
906 count = 0;
908 for (i = 0; i < memblock_end_of_DRAM();
909 i += (1 << SECTION_SIZE_BITS)) {
910 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
911 if (count == 0)
912 printk(" 0x%lx", i);
913 ++count;
914 } else {
915 if (count > 0)
916 printk("-0x%lx", i);
917 count = 0;
921 if (count > 0)
922 printk("-0x%lx", i);
923 printk("\n");
928 * Allocate some memory, satisfying the memblock or bootmem allocator where
929 * required. nid is the preferred node and end is the physical address of
930 * the highest address in the node.
932 * Returns the virtual address of the memory.
934 static void __init *careful_zallocation(int nid, unsigned long size,
935 unsigned long align,
936 unsigned long end_pfn)
938 void *ret;
939 int new_nid;
940 unsigned long ret_paddr;
942 ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
944 /* retry over all memory */
945 if (!ret_paddr)
946 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
948 if (!ret_paddr)
949 panic("numa.c: cannot allocate %lu bytes for node %d",
950 size, nid);
952 ret = __va(ret_paddr);
955 * We initialize the nodes in numeric order: 0, 1, 2...
956 * and hand over control from the MEMBLOCK allocator to the
957 * bootmem allocator. If this function is called for
958 * node 5, then we know that all nodes <5 are using the
959 * bootmem allocator instead of the MEMBLOCK allocator.
961 * So, check the nid from which this allocation came
962 * and double check to see if we need to use bootmem
963 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
964 * since it would be useless.
966 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
967 if (new_nid < nid) {
968 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
969 size, align, 0);
971 dbg("alloc_bootmem %p %lx\n", ret, size);
974 memset(ret, 0, size);
975 return ret;
978 static struct notifier_block ppc64_numa_nb = {
979 .notifier_call = cpu_numa_callback,
980 .priority = 1 /* Must run before sched domains notifier. */
983 static void __init mark_reserved_regions_for_nid(int nid)
985 struct pglist_data *node = NODE_DATA(nid);
986 struct memblock_region *reg;
988 for_each_memblock(reserved, reg) {
989 unsigned long physbase = reg->base;
990 unsigned long size = reg->size;
991 unsigned long start_pfn = physbase >> PAGE_SHIFT;
992 unsigned long end_pfn = PFN_UP(physbase + size);
993 struct node_active_region node_ar;
994 unsigned long node_end_pfn = pgdat_end_pfn(node);
997 * Check to make sure that this memblock.reserved area is
998 * within the bounds of the node that we care about.
999 * Checking the nid of the start and end points is not
1000 * sufficient because the reserved area could span the
1001 * entire node.
1003 if (end_pfn <= node->node_start_pfn ||
1004 start_pfn >= node_end_pfn)
1005 continue;
1007 get_node_active_region(start_pfn, &node_ar);
1008 while (start_pfn < end_pfn &&
1009 node_ar.start_pfn < node_ar.end_pfn) {
1010 unsigned long reserve_size = size;
1012 * if reserved region extends past active region
1013 * then trim size to active region
1015 if (end_pfn > node_ar.end_pfn)
1016 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
1017 - physbase;
1019 * Only worry about *this* node, others may not
1020 * yet have valid NODE_DATA().
1022 if (node_ar.nid == nid) {
1023 dbg("reserve_bootmem %lx %lx nid=%d\n",
1024 physbase, reserve_size, node_ar.nid);
1025 reserve_bootmem_node(NODE_DATA(node_ar.nid),
1026 physbase, reserve_size,
1027 BOOTMEM_DEFAULT);
1030 * if reserved region is contained in the active region
1031 * then done.
1033 if (end_pfn <= node_ar.end_pfn)
1034 break;
1037 * reserved region extends past the active region
1038 * get next active region that contains this
1039 * reserved region
1041 start_pfn = node_ar.end_pfn;
1042 physbase = start_pfn << PAGE_SHIFT;
1043 size = size - reserve_size;
1044 get_node_active_region(start_pfn, &node_ar);
1050 void __init do_init_bootmem(void)
1052 int nid, cpu;
1054 min_low_pfn = 0;
1055 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1056 max_pfn = max_low_pfn;
1058 if (parse_numa_properties())
1059 setup_nonnuma();
1060 else
1061 dump_numa_memory_topology();
1063 for_each_online_node(nid) {
1064 unsigned long start_pfn, end_pfn;
1065 void *bootmem_vaddr;
1066 unsigned long bootmap_pages;
1068 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1071 * Allocate the node structure node local if possible
1073 * Be careful moving this around, as it relies on all
1074 * previous nodes' bootmem to be initialized and have
1075 * all reserved areas marked.
1077 NODE_DATA(nid) = careful_zallocation(nid,
1078 sizeof(struct pglist_data),
1079 SMP_CACHE_BYTES, end_pfn);
1081 dbg("node %d\n", nid);
1082 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
1084 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
1085 NODE_DATA(nid)->node_start_pfn = start_pfn;
1086 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1088 if (NODE_DATA(nid)->node_spanned_pages == 0)
1089 continue;
1091 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1092 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1094 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1095 bootmem_vaddr = careful_zallocation(nid,
1096 bootmap_pages << PAGE_SHIFT,
1097 PAGE_SIZE, end_pfn);
1099 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1101 init_bootmem_node(NODE_DATA(nid),
1102 __pa(bootmem_vaddr) >> PAGE_SHIFT,
1103 start_pfn, end_pfn);
1105 free_bootmem_with_active_regions(nid, end_pfn);
1107 * Be very careful about moving this around. Future
1108 * calls to careful_zallocation() depend on this getting
1109 * done correctly.
1111 mark_reserved_regions_for_nid(nid);
1112 sparse_memory_present_with_active_regions(nid);
1115 init_bootmem_done = 1;
1118 * Now bootmem is initialised we can create the node to cpumask
1119 * lookup tables and setup the cpu callback to populate them.
1121 setup_node_to_cpumask_map();
1123 reset_numa_cpu_lookup_table();
1124 register_cpu_notifier(&ppc64_numa_nb);
1126 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
1127 * even before we online them, so that we can use cpu_to_{node,mem}
1128 * early in boot, cf. smp_prepare_cpus().
1130 for_each_possible_cpu(cpu) {
1131 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
1132 (void *)(unsigned long)cpu);
1136 void __init paging_init(void)
1138 unsigned long max_zone_pfns[MAX_NR_ZONES];
1139 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1140 max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
1141 free_area_init_nodes(max_zone_pfns);
1144 static int __init early_numa(char *p)
1146 if (!p)
1147 return 0;
1149 if (strstr(p, "off"))
1150 numa_enabled = 0;
1152 if (strstr(p, "debug"))
1153 numa_debug = 1;
1155 p = strstr(p, "fake=");
1156 if (p)
1157 cmdline = p + strlen("fake=");
1159 return 0;
1161 early_param("numa", early_numa);
1163 #ifdef CONFIG_MEMORY_HOTPLUG
1165 * Find the node associated with a hot added memory section for
1166 * memory represented in the device tree by the property
1167 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1169 static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1170 unsigned long scn_addr)
1172 const __be32 *dm;
1173 unsigned int drconf_cell_cnt, rc;
1174 unsigned long lmb_size;
1175 struct assoc_arrays aa;
1176 int nid = -1;
1178 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1179 if (!drconf_cell_cnt)
1180 return -1;
1182 lmb_size = of_get_lmb_size(memory);
1183 if (!lmb_size)
1184 return -1;
1186 rc = of_get_assoc_arrays(memory, &aa);
1187 if (rc)
1188 return -1;
1190 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1191 struct of_drconf_cell drmem;
1193 read_drconf_cell(&drmem, &dm);
1195 /* skip this block if it is reserved or not assigned to
1196 * this partition */
1197 if ((drmem.flags & DRCONF_MEM_RESERVED)
1198 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1199 continue;
1201 if ((scn_addr < drmem.base_addr)
1202 || (scn_addr >= (drmem.base_addr + lmb_size)))
1203 continue;
1205 nid = of_drconf_to_nid_single(&drmem, &aa);
1206 break;
1209 return nid;
1213 * Find the node associated with a hot added memory section for memory
1214 * represented in the device tree as a node (i.e. memory@XXXX) for
1215 * each memblock.
1217 static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1219 struct device_node *memory;
1220 int nid = -1;
1222 for_each_node_by_type(memory, "memory") {
1223 unsigned long start, size;
1224 int ranges;
1225 const __be32 *memcell_buf;
1226 unsigned int len;
1228 memcell_buf = of_get_property(memory, "reg", &len);
1229 if (!memcell_buf || len <= 0)
1230 continue;
1232 /* ranges in cell */
1233 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1235 while (ranges--) {
1236 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1237 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1239 if ((scn_addr < start) || (scn_addr >= (start + size)))
1240 continue;
1242 nid = of_node_to_nid_single(memory);
1243 break;
1246 if (nid >= 0)
1247 break;
1250 of_node_put(memory);
1252 return nid;
1256 * Find the node associated with a hot added memory section. Section
1257 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1258 * sections are fully contained within a single MEMBLOCK.
1260 int hot_add_scn_to_nid(unsigned long scn_addr)
1262 struct device_node *memory = NULL;
1263 int nid, found = 0;
1265 if (!numa_enabled || (min_common_depth < 0))
1266 return first_online_node;
1268 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1269 if (memory) {
1270 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1271 of_node_put(memory);
1272 } else {
1273 nid = hot_add_node_scn_to_nid(scn_addr);
1276 if (nid < 0 || !node_online(nid))
1277 nid = first_online_node;
1279 if (NODE_DATA(nid)->node_spanned_pages)
1280 return nid;
1282 for_each_online_node(nid) {
1283 if (NODE_DATA(nid)->node_spanned_pages) {
1284 found = 1;
1285 break;
1289 BUG_ON(!found);
1290 return nid;
1293 static u64 hot_add_drconf_memory_max(void)
1295 struct device_node *memory = NULL;
1296 unsigned int drconf_cell_cnt = 0;
1297 u64 lmb_size = 0;
1298 const __be32 *dm = NULL;
1300 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1301 if (memory) {
1302 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1303 lmb_size = of_get_lmb_size(memory);
1304 of_node_put(memory);
1306 return lmb_size * drconf_cell_cnt;
1310 * memory_hotplug_max - return max address of memory that may be added
1312 * This is currently only used on systems that support drconfig memory
1313 * hotplug.
1315 u64 memory_hotplug_max(void)
1317 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1319 #endif /* CONFIG_MEMORY_HOTPLUG */
1321 /* Virtual Processor Home Node (VPHN) support */
1322 #ifdef CONFIG_PPC_SPLPAR
1323 struct topology_update_data {
1324 struct topology_update_data *next;
1325 unsigned int cpu;
1326 int old_nid;
1327 int new_nid;
1330 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1331 static cpumask_t cpu_associativity_changes_mask;
1332 static int vphn_enabled;
1333 static int prrn_enabled;
1334 static void reset_topology_timer(void);
1337 * Store the current values of the associativity change counters in the
1338 * hypervisor.
1340 static void setup_cpu_associativity_change_counters(void)
1342 int cpu;
1344 /* The VPHN feature supports a maximum of 8 reference points */
1345 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1347 for_each_possible_cpu(cpu) {
1348 int i;
1349 u8 *counts = vphn_cpu_change_counts[cpu];
1350 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1352 for (i = 0; i < distance_ref_points_depth; i++)
1353 counts[i] = hypervisor_counts[i];
1358 * The hypervisor maintains a set of 8 associativity change counters in
1359 * the VPA of each cpu that correspond to the associativity levels in the
1360 * ibm,associativity-reference-points property. When an associativity
1361 * level changes, the corresponding counter is incremented.
1363 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1364 * node associativity levels have changed.
1366 * Returns the number of cpus with unhandled associativity changes.
1368 static int update_cpu_associativity_changes_mask(void)
1370 int cpu;
1371 cpumask_t *changes = &cpu_associativity_changes_mask;
1373 for_each_possible_cpu(cpu) {
1374 int i, changed = 0;
1375 u8 *counts = vphn_cpu_change_counts[cpu];
1376 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1378 for (i = 0; i < distance_ref_points_depth; i++) {
1379 if (hypervisor_counts[i] != counts[i]) {
1380 counts[i] = hypervisor_counts[i];
1381 changed = 1;
1384 if (changed) {
1385 cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1386 cpu = cpu_last_thread_sibling(cpu);
1390 return cpumask_weight(changes);
1394 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1395 * the complete property we have to add the length in the first cell.
1397 #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1400 * Convert the associativity domain numbers returned from the hypervisor
1401 * to the sequence they would appear in the ibm,associativity property.
1403 static int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
1405 int i, nr_assoc_doms = 0;
1406 const __be16 *field = (const __be16 *) packed;
1408 #define VPHN_FIELD_UNUSED (0xffff)
1409 #define VPHN_FIELD_MSB (0x8000)
1410 #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1412 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1413 if (be16_to_cpup(field) == VPHN_FIELD_UNUSED) {
1414 /* All significant fields processed, and remaining
1415 * fields contain the reserved value of all 1's.
1416 * Just store them.
1418 unpacked[i] = *((__be32 *)field);
1419 field += 2;
1420 } else if (be16_to_cpup(field) & VPHN_FIELD_MSB) {
1421 /* Data is in the lower 15 bits of this field */
1422 unpacked[i] = cpu_to_be32(
1423 be16_to_cpup(field) & VPHN_FIELD_MASK);
1424 field++;
1425 nr_assoc_doms++;
1426 } else {
1427 /* Data is in the lower 15 bits of this field
1428 * concatenated with the next 16 bit field
1430 unpacked[i] = *((__be32 *)field);
1431 field += 2;
1432 nr_assoc_doms++;
1436 /* The first cell contains the length of the property */
1437 unpacked[0] = cpu_to_be32(nr_assoc_doms);
1439 return nr_assoc_doms;
1443 * Retrieve the new associativity information for a virtual processor's
1444 * home node.
1446 static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1448 long rc;
1449 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1450 u64 flags = 1;
1451 int hwcpu = get_hard_smp_processor_id(cpu);
1453 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1454 vphn_unpack_associativity(retbuf, associativity);
1456 return rc;
1459 static long vphn_get_associativity(unsigned long cpu,
1460 __be32 *associativity)
1462 long rc;
1464 rc = hcall_vphn(cpu, associativity);
1466 switch (rc) {
1467 case H_FUNCTION:
1468 printk(KERN_INFO
1469 "VPHN is not supported. Disabling polling...\n");
1470 stop_topology_update();
1471 break;
1472 case H_HARDWARE:
1473 printk(KERN_ERR
1474 "hcall_vphn() experienced a hardware fault "
1475 "preventing VPHN. Disabling polling...\n");
1476 stop_topology_update();
1479 return rc;
1483 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1484 * characteristics change. This function doesn't perform any locking and is
1485 * only safe to call from stop_machine().
1487 static int update_cpu_topology(void *data)
1489 struct topology_update_data *update;
1490 unsigned long cpu;
1492 if (!data)
1493 return -EINVAL;
1495 cpu = smp_processor_id();
1497 for (update = data; update; update = update->next) {
1498 if (cpu != update->cpu)
1499 continue;
1501 unmap_cpu_from_node(update->cpu);
1502 map_cpu_to_node(update->cpu, update->new_nid);
1503 vdso_getcpu_init();
1506 return 0;
1509 static int update_lookup_table(void *data)
1511 struct topology_update_data *update;
1513 if (!data)
1514 return -EINVAL;
1517 * Upon topology update, the numa-cpu lookup table needs to be updated
1518 * for all threads in the core, including offline CPUs, to ensure that
1519 * future hotplug operations respect the cpu-to-node associativity
1520 * properly.
1522 for (update = data; update; update = update->next) {
1523 int nid, base, j;
1525 nid = update->new_nid;
1526 base = cpu_first_thread_sibling(update->cpu);
1528 for (j = 0; j < threads_per_core; j++) {
1529 update_numa_cpu_lookup_table(base + j, nid);
1533 return 0;
1537 * Update the node maps and sysfs entries for each cpu whose home node
1538 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1540 int arch_update_cpu_topology(void)
1542 unsigned int cpu, sibling, changed = 0;
1543 struct topology_update_data *updates, *ud;
1544 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1545 cpumask_t updated_cpus;
1546 struct device *dev;
1547 int weight, new_nid, i = 0;
1549 weight = cpumask_weight(&cpu_associativity_changes_mask);
1550 if (!weight)
1551 return 0;
1553 updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1554 if (!updates)
1555 return 0;
1557 cpumask_clear(&updated_cpus);
1559 for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1561 * If siblings aren't flagged for changes, updates list
1562 * will be too short. Skip on this update and set for next
1563 * update.
1565 if (!cpumask_subset(cpu_sibling_mask(cpu),
1566 &cpu_associativity_changes_mask)) {
1567 pr_info("Sibling bits not set for associativity "
1568 "change, cpu%d\n", cpu);
1569 cpumask_or(&cpu_associativity_changes_mask,
1570 &cpu_associativity_changes_mask,
1571 cpu_sibling_mask(cpu));
1572 cpu = cpu_last_thread_sibling(cpu);
1573 continue;
1576 /* Use associativity from first thread for all siblings */
1577 vphn_get_associativity(cpu, associativity);
1578 new_nid = associativity_to_nid(associativity);
1579 if (new_nid < 0 || !node_online(new_nid))
1580 new_nid = first_online_node;
1582 if (new_nid == numa_cpu_lookup_table[cpu]) {
1583 cpumask_andnot(&cpu_associativity_changes_mask,
1584 &cpu_associativity_changes_mask,
1585 cpu_sibling_mask(cpu));
1586 cpu = cpu_last_thread_sibling(cpu);
1587 continue;
1590 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1591 ud = &updates[i++];
1592 ud->cpu = sibling;
1593 ud->new_nid = new_nid;
1594 ud->old_nid = numa_cpu_lookup_table[sibling];
1595 cpumask_set_cpu(sibling, &updated_cpus);
1596 if (i < weight)
1597 ud->next = &updates[i];
1599 cpu = cpu_last_thread_sibling(cpu);
1603 * In cases where we have nothing to update (because the updates list
1604 * is too short or because the new topology is same as the old one),
1605 * skip invoking update_cpu_topology() via stop-machine(). This is
1606 * necessary (and not just a fast-path optimization) since stop-machine
1607 * can end up electing a random CPU to run update_cpu_topology(), and
1608 * thus trick us into setting up incorrect cpu-node mappings (since
1609 * 'updates' is kzalloc()'ed).
1611 * And for the similar reason, we will skip all the following updating.
1613 if (!cpumask_weight(&updated_cpus))
1614 goto out;
1616 stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1619 * Update the numa-cpu lookup table with the new mappings, even for
1620 * offline CPUs. It is best to perform this update from the stop-
1621 * machine context.
1623 stop_machine(update_lookup_table, &updates[0],
1624 cpumask_of(raw_smp_processor_id()));
1626 for (ud = &updates[0]; ud; ud = ud->next) {
1627 unregister_cpu_under_node(ud->cpu, ud->old_nid);
1628 register_cpu_under_node(ud->cpu, ud->new_nid);
1630 dev = get_cpu_device(ud->cpu);
1631 if (dev)
1632 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1633 cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1634 changed = 1;
1637 out:
1638 kfree(updates);
1639 return changed;
1642 static void topology_work_fn(struct work_struct *work)
1644 rebuild_sched_domains();
1646 static DECLARE_WORK(topology_work, topology_work_fn);
1648 static void topology_schedule_update(void)
1650 schedule_work(&topology_work);
1653 static void topology_timer_fn(unsigned long ignored)
1655 if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1656 topology_schedule_update();
1657 else if (vphn_enabled) {
1658 if (update_cpu_associativity_changes_mask() > 0)
1659 topology_schedule_update();
1660 reset_topology_timer();
1663 static struct timer_list topology_timer =
1664 TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1666 static void reset_topology_timer(void)
1668 topology_timer.data = 0;
1669 topology_timer.expires = jiffies + 60 * HZ;
1670 mod_timer(&topology_timer, topology_timer.expires);
1673 #ifdef CONFIG_SMP
1675 static void stage_topology_update(int core_id)
1677 cpumask_or(&cpu_associativity_changes_mask,
1678 &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1679 reset_topology_timer();
1682 static int dt_update_callback(struct notifier_block *nb,
1683 unsigned long action, void *data)
1685 struct of_prop_reconfig *update;
1686 int rc = NOTIFY_DONE;
1688 switch (action) {
1689 case OF_RECONFIG_UPDATE_PROPERTY:
1690 update = (struct of_prop_reconfig *)data;
1691 if (!of_prop_cmp(update->dn->type, "cpu") &&
1692 !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1693 u32 core_id;
1694 of_property_read_u32(update->dn, "reg", &core_id);
1695 stage_topology_update(core_id);
1696 rc = NOTIFY_OK;
1698 break;
1701 return rc;
1704 static struct notifier_block dt_update_nb = {
1705 .notifier_call = dt_update_callback,
1708 #endif
1711 * Start polling for associativity changes.
1713 int start_topology_update(void)
1715 int rc = 0;
1717 if (firmware_has_feature(FW_FEATURE_PRRN)) {
1718 if (!prrn_enabled) {
1719 prrn_enabled = 1;
1720 vphn_enabled = 0;
1721 #ifdef CONFIG_SMP
1722 rc = of_reconfig_notifier_register(&dt_update_nb);
1723 #endif
1725 } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
1726 lppaca_shared_proc(get_lppaca())) {
1727 if (!vphn_enabled) {
1728 prrn_enabled = 0;
1729 vphn_enabled = 1;
1730 setup_cpu_associativity_change_counters();
1731 init_timer_deferrable(&topology_timer);
1732 reset_topology_timer();
1736 return rc;
1740 * Disable polling for VPHN associativity changes.
1742 int stop_topology_update(void)
1744 int rc = 0;
1746 if (prrn_enabled) {
1747 prrn_enabled = 0;
1748 #ifdef CONFIG_SMP
1749 rc = of_reconfig_notifier_unregister(&dt_update_nb);
1750 #endif
1751 } else if (vphn_enabled) {
1752 vphn_enabled = 0;
1753 rc = del_timer_sync(&topology_timer);
1756 return rc;
1759 int prrn_is_enabled(void)
1761 return prrn_enabled;
1764 static int topology_read(struct seq_file *file, void *v)
1766 if (vphn_enabled || prrn_enabled)
1767 seq_puts(file, "on\n");
1768 else
1769 seq_puts(file, "off\n");
1771 return 0;
1774 static int topology_open(struct inode *inode, struct file *file)
1776 return single_open(file, topology_read, NULL);
1779 static ssize_t topology_write(struct file *file, const char __user *buf,
1780 size_t count, loff_t *off)
1782 char kbuf[4]; /* "on" or "off" plus null. */
1783 int read_len;
1785 read_len = count < 3 ? count : 3;
1786 if (copy_from_user(kbuf, buf, read_len))
1787 return -EINVAL;
1789 kbuf[read_len] = '\0';
1791 if (!strncmp(kbuf, "on", 2))
1792 start_topology_update();
1793 else if (!strncmp(kbuf, "off", 3))
1794 stop_topology_update();
1795 else
1796 return -EINVAL;
1798 return count;
1801 static const struct file_operations topology_ops = {
1802 .read = seq_read,
1803 .write = topology_write,
1804 .open = topology_open,
1805 .release = single_release
1808 static int topology_update_init(void)
1810 start_topology_update();
1811 proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops);
1813 return 0;
1815 device_initcall(topology_update_init);
1816 #endif /* CONFIG_PPC_SPLPAR */