1 // SPDX-License-Identifier: GPL-2.0
3 * Basic Node interface support
6 #include <linux/module.h>
7 #include <linux/init.h>
9 #include <linux/memory.h>
10 #include <linux/vmstat.h>
11 #include <linux/notifier.h>
12 #include <linux/node.h>
13 #include <linux/hugetlb.h>
14 #include <linux/compaction.h>
15 #include <linux/cpumask.h>
16 #include <linux/topology.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/device.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/swap.h>
22 #include <linux/slab.h>
24 static struct bus_type node_subsys
= {
30 static ssize_t
node_read_cpumap(struct device
*dev
, bool list
, char *buf
)
34 struct node
*node_dev
= to_node(dev
);
36 /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
37 BUILD_BUG_ON((NR_CPUS
/32 * 9) > (PAGE_SIZE
-1));
39 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
42 cpumask_and(mask
, cpumask_of_node(node_dev
->dev
.id
), cpu_online_mask
);
43 n
= cpumap_print_to_pagebuf(list
, buf
, mask
);
44 free_cpumask_var(mask
);
49 static inline ssize_t
node_read_cpumask(struct device
*dev
,
50 struct device_attribute
*attr
, char *buf
)
52 return node_read_cpumap(dev
, false, buf
);
54 static inline ssize_t
node_read_cpulist(struct device
*dev
,
55 struct device_attribute
*attr
, char *buf
)
57 return node_read_cpumap(dev
, true, buf
);
60 static DEVICE_ATTR(cpumap
, S_IRUGO
, node_read_cpumask
, NULL
);
61 static DEVICE_ATTR(cpulist
, S_IRUGO
, node_read_cpulist
, NULL
);
64 * struct node_access_nodes - Access class device to hold user visible
65 * relationships to other nodes.
66 * @dev: Device for this memory access class
67 * @list_node: List element in the node's access list
68 * @access: The access class rank
70 struct node_access_nodes
{
72 struct list_head list_node
;
74 #ifdef CONFIG_HMEM_REPORTING
75 struct node_hmem_attrs hmem_attrs
;
78 #define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
80 static struct attribute
*node_init_access_node_attrs
[] = {
84 static struct attribute
*node_targ_access_node_attrs
[] = {
88 static const struct attribute_group initiators
= {
90 .attrs
= node_init_access_node_attrs
,
93 static const struct attribute_group targets
= {
95 .attrs
= node_targ_access_node_attrs
,
98 static const struct attribute_group
*node_access_node_groups
[] = {
104 static void node_remove_accesses(struct node
*node
)
106 struct node_access_nodes
*c
, *cnext
;
108 list_for_each_entry_safe(c
, cnext
, &node
->access_list
, list_node
) {
109 list_del(&c
->list_node
);
110 device_unregister(&c
->dev
);
114 static void node_access_release(struct device
*dev
)
116 kfree(to_access_nodes(dev
));
119 static struct node_access_nodes
*node_init_node_access(struct node
*node
,
122 struct node_access_nodes
*access_node
;
125 list_for_each_entry(access_node
, &node
->access_list
, list_node
)
126 if (access_node
->access
== access
)
129 access_node
= kzalloc(sizeof(*access_node
), GFP_KERNEL
);
133 access_node
->access
= access
;
134 dev
= &access_node
->dev
;
135 dev
->parent
= &node
->dev
;
136 dev
->release
= node_access_release
;
137 dev
->groups
= node_access_node_groups
;
138 if (dev_set_name(dev
, "access%u", access
))
141 if (device_register(dev
))
144 pm_runtime_no_callbacks(dev
);
145 list_add_tail(&access_node
->list_node
, &node
->access_list
);
148 kfree_const(dev
->kobj
.name
);
154 #ifdef CONFIG_HMEM_REPORTING
155 #define ACCESS_ATTR(name) \
156 static ssize_t name##_show(struct device *dev, \
157 struct device_attribute *attr, \
160 return sprintf(buf, "%u\n", to_access_nodes(dev)->hmem_attrs.name); \
162 static DEVICE_ATTR_RO(name);
164 ACCESS_ATTR(read_bandwidth
)
165 ACCESS_ATTR(read_latency
)
166 ACCESS_ATTR(write_bandwidth
)
167 ACCESS_ATTR(write_latency
)
169 static struct attribute
*access_attrs
[] = {
170 &dev_attr_read_bandwidth
.attr
,
171 &dev_attr_read_latency
.attr
,
172 &dev_attr_write_bandwidth
.attr
,
173 &dev_attr_write_latency
.attr
,
178 * node_set_perf_attrs - Set the performance values for given access class
179 * @nid: Node identifier to be set
180 * @hmem_attrs: Heterogeneous memory performance attributes
181 * @access: The access class the for the given attributes
183 void node_set_perf_attrs(unsigned int nid
, struct node_hmem_attrs
*hmem_attrs
,
186 struct node_access_nodes
*c
;
190 if (WARN_ON_ONCE(!node_online(nid
)))
193 node
= node_devices
[nid
];
194 c
= node_init_node_access(node
, access
);
198 c
->hmem_attrs
= *hmem_attrs
;
199 for (i
= 0; access_attrs
[i
] != NULL
; i
++) {
200 if (sysfs_add_file_to_group(&c
->dev
.kobj
, access_attrs
[i
],
202 pr_info("failed to add performance attribute to node %d\n",
210 * struct node_cache_info - Internal tracking for memory node caches
211 * @dev: Device represeting the cache level
212 * @node: List element for tracking in the node
213 * @cache_attrs:Attributes for this cache level
215 struct node_cache_info
{
217 struct list_head node
;
218 struct node_cache_attrs cache_attrs
;
220 #define to_cache_info(device) container_of(device, struct node_cache_info, dev)
222 #define CACHE_ATTR(name, fmt) \
223 static ssize_t name##_show(struct device *dev, \
224 struct device_attribute *attr, \
227 return sprintf(buf, fmt "\n", to_cache_info(dev)->cache_attrs.name);\
229 DEVICE_ATTR_RO(name);
231 CACHE_ATTR(size
, "%llu")
232 CACHE_ATTR(line_size
, "%u")
233 CACHE_ATTR(indexing
, "%u")
234 CACHE_ATTR(write_policy
, "%u")
236 static struct attribute
*cache_attrs
[] = {
237 &dev_attr_indexing
.attr
,
239 &dev_attr_line_size
.attr
,
240 &dev_attr_write_policy
.attr
,
243 ATTRIBUTE_GROUPS(cache
);
245 static void node_cache_release(struct device
*dev
)
250 static void node_cacheinfo_release(struct device
*dev
)
252 struct node_cache_info
*info
= to_cache_info(dev
);
256 static void node_init_cache_dev(struct node
*node
)
260 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
264 dev
->parent
= &node
->dev
;
265 dev
->release
= node_cache_release
;
266 if (dev_set_name(dev
, "memory_side_cache"))
269 if (device_register(dev
))
272 pm_runtime_no_callbacks(dev
);
273 node
->cache_dev
= dev
;
276 kfree_const(dev
->kobj
.name
);
282 * node_add_cache() - add cache attribute to a memory node
283 * @nid: Node identifier that has new cache attributes
284 * @cache_attrs: Attributes for the cache being added
286 void node_add_cache(unsigned int nid
, struct node_cache_attrs
*cache_attrs
)
288 struct node_cache_info
*info
;
292 if (!node_online(nid
) || !node_devices
[nid
])
295 node
= node_devices
[nid
];
296 list_for_each_entry(info
, &node
->cache_attrs
, node
) {
297 if (info
->cache_attrs
.level
== cache_attrs
->level
) {
299 "attempt to add duplicate cache level:%d\n",
305 if (!node
->cache_dev
)
306 node_init_cache_dev(node
);
307 if (!node
->cache_dev
)
310 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
315 dev
->parent
= node
->cache_dev
;
316 dev
->release
= node_cacheinfo_release
;
317 dev
->groups
= cache_groups
;
318 if (dev_set_name(dev
, "index%d", cache_attrs
->level
))
321 info
->cache_attrs
= *cache_attrs
;
322 if (device_register(dev
)) {
323 dev_warn(&node
->dev
, "failed to add cache level:%d\n",
327 pm_runtime_no_callbacks(dev
);
328 list_add_tail(&info
->node
, &node
->cache_attrs
);
331 kfree_const(dev
->kobj
.name
);
336 static void node_remove_caches(struct node
*node
)
338 struct node_cache_info
*info
, *next
;
340 if (!node
->cache_dev
)
343 list_for_each_entry_safe(info
, next
, &node
->cache_attrs
, node
) {
344 list_del(&info
->node
);
345 device_unregister(&info
->dev
);
347 device_unregister(node
->cache_dev
);
350 static void node_init_caches(unsigned int nid
)
352 INIT_LIST_HEAD(&node_devices
[nid
]->cache_attrs
);
355 static void node_init_caches(unsigned int nid
) { }
356 static void node_remove_caches(struct node
*node
) { }
359 #define K(x) ((x) << (PAGE_SHIFT - 10))
360 static ssize_t
node_read_meminfo(struct device
*dev
,
361 struct device_attribute
*attr
, char *buf
)
365 struct pglist_data
*pgdat
= NODE_DATA(nid
);
367 unsigned long sreclaimable
, sunreclaimable
;
369 si_meminfo_node(&i
, nid
);
370 sreclaimable
= node_page_state(pgdat
, NR_SLAB_RECLAIMABLE
);
371 sunreclaimable
= node_page_state(pgdat
, NR_SLAB_UNRECLAIMABLE
);
373 "Node %d MemTotal: %8lu kB\n"
374 "Node %d MemFree: %8lu kB\n"
375 "Node %d MemUsed: %8lu kB\n"
376 "Node %d Active: %8lu kB\n"
377 "Node %d Inactive: %8lu kB\n"
378 "Node %d Active(anon): %8lu kB\n"
379 "Node %d Inactive(anon): %8lu kB\n"
380 "Node %d Active(file): %8lu kB\n"
381 "Node %d Inactive(file): %8lu kB\n"
382 "Node %d Unevictable: %8lu kB\n"
383 "Node %d Mlocked: %8lu kB\n",
386 nid
, K(i
.totalram
- i
.freeram
),
387 nid
, K(node_page_state(pgdat
, NR_ACTIVE_ANON
) +
388 node_page_state(pgdat
, NR_ACTIVE_FILE
)),
389 nid
, K(node_page_state(pgdat
, NR_INACTIVE_ANON
) +
390 node_page_state(pgdat
, NR_INACTIVE_FILE
)),
391 nid
, K(node_page_state(pgdat
, NR_ACTIVE_ANON
)),
392 nid
, K(node_page_state(pgdat
, NR_INACTIVE_ANON
)),
393 nid
, K(node_page_state(pgdat
, NR_ACTIVE_FILE
)),
394 nid
, K(node_page_state(pgdat
, NR_INACTIVE_FILE
)),
395 nid
, K(node_page_state(pgdat
, NR_UNEVICTABLE
)),
396 nid
, K(sum_zone_node_page_state(nid
, NR_MLOCK
)));
398 #ifdef CONFIG_HIGHMEM
399 n
+= sprintf(buf
+ n
,
400 "Node %d HighTotal: %8lu kB\n"
401 "Node %d HighFree: %8lu kB\n"
402 "Node %d LowTotal: %8lu kB\n"
403 "Node %d LowFree: %8lu kB\n",
406 nid
, K(i
.totalram
- i
.totalhigh
),
407 nid
, K(i
.freeram
- i
.freehigh
));
409 n
+= sprintf(buf
+ n
,
410 "Node %d Dirty: %8lu kB\n"
411 "Node %d Writeback: %8lu kB\n"
412 "Node %d FilePages: %8lu kB\n"
413 "Node %d Mapped: %8lu kB\n"
414 "Node %d AnonPages: %8lu kB\n"
415 "Node %d Shmem: %8lu kB\n"
416 "Node %d KernelStack: %8lu kB\n"
417 "Node %d PageTables: %8lu kB\n"
418 "Node %d NFS_Unstable: %8lu kB\n"
419 "Node %d Bounce: %8lu kB\n"
420 "Node %d WritebackTmp: %8lu kB\n"
421 "Node %d KReclaimable: %8lu kB\n"
422 "Node %d Slab: %8lu kB\n"
423 "Node %d SReclaimable: %8lu kB\n"
424 "Node %d SUnreclaim: %8lu kB\n"
425 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
426 "Node %d AnonHugePages: %8lu kB\n"
427 "Node %d ShmemHugePages: %8lu kB\n"
428 "Node %d ShmemPmdMapped: %8lu kB\n"
431 nid
, K(node_page_state(pgdat
, NR_FILE_DIRTY
)),
432 nid
, K(node_page_state(pgdat
, NR_WRITEBACK
)),
433 nid
, K(node_page_state(pgdat
, NR_FILE_PAGES
)),
434 nid
, K(node_page_state(pgdat
, NR_FILE_MAPPED
)),
435 nid
, K(node_page_state(pgdat
, NR_ANON_MAPPED
)),
437 nid
, sum_zone_node_page_state(nid
, NR_KERNEL_STACK_KB
),
438 nid
, K(sum_zone_node_page_state(nid
, NR_PAGETABLE
)),
439 nid
, K(node_page_state(pgdat
, NR_UNSTABLE_NFS
)),
440 nid
, K(sum_zone_node_page_state(nid
, NR_BOUNCE
)),
441 nid
, K(node_page_state(pgdat
, NR_WRITEBACK_TEMP
)),
442 nid
, K(sreclaimable
+
443 node_page_state(pgdat
, NR_KERNEL_MISC_RECLAIMABLE
)),
444 nid
, K(sreclaimable
+ sunreclaimable
),
445 nid
, K(sreclaimable
),
446 nid
, K(sunreclaimable
)
447 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
449 nid
, K(node_page_state(pgdat
, NR_ANON_THPS
) *
451 nid
, K(node_page_state(pgdat
, NR_SHMEM_THPS
) *
453 nid
, K(node_page_state(pgdat
, NR_SHMEM_PMDMAPPED
) *
457 n
+= hugetlb_report_node_meminfo(nid
, buf
+ n
);
462 static DEVICE_ATTR(meminfo
, S_IRUGO
, node_read_meminfo
, NULL
);
464 static ssize_t
node_read_numastat(struct device
*dev
,
465 struct device_attribute
*attr
, char *buf
)
471 "interleave_hit %lu\n"
474 sum_zone_numa_state(dev
->id
, NUMA_HIT
),
475 sum_zone_numa_state(dev
->id
, NUMA_MISS
),
476 sum_zone_numa_state(dev
->id
, NUMA_FOREIGN
),
477 sum_zone_numa_state(dev
->id
, NUMA_INTERLEAVE_HIT
),
478 sum_zone_numa_state(dev
->id
, NUMA_LOCAL
),
479 sum_zone_numa_state(dev
->id
, NUMA_OTHER
));
481 static DEVICE_ATTR(numastat
, S_IRUGO
, node_read_numastat
, NULL
);
483 static ssize_t
node_read_vmstat(struct device
*dev
,
484 struct device_attribute
*attr
, char *buf
)
487 struct pglist_data
*pgdat
= NODE_DATA(nid
);
491 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
492 n
+= sprintf(buf
+n
, "%s %lu\n", vmstat_text
[i
],
493 sum_zone_node_page_state(nid
, i
));
496 for (i
= 0; i
< NR_VM_NUMA_STAT_ITEMS
; i
++)
497 n
+= sprintf(buf
+n
, "%s %lu\n",
498 vmstat_text
[i
+ NR_VM_ZONE_STAT_ITEMS
],
499 sum_zone_numa_state(nid
, i
));
502 for (i
= 0; i
< NR_VM_NODE_STAT_ITEMS
; i
++)
503 n
+= sprintf(buf
+n
, "%s %lu\n",
504 vmstat_text
[i
+ NR_VM_ZONE_STAT_ITEMS
+
505 NR_VM_NUMA_STAT_ITEMS
],
506 node_page_state(pgdat
, i
));
510 static DEVICE_ATTR(vmstat
, S_IRUGO
, node_read_vmstat
, NULL
);
512 static ssize_t
node_read_distance(struct device
*dev
,
513 struct device_attribute
*attr
, char *buf
)
520 * buf is currently PAGE_SIZE in length and each node needs 4 chars
521 * at the most (distance + space or newline).
523 BUILD_BUG_ON(MAX_NUMNODES
* 4 > PAGE_SIZE
);
525 for_each_online_node(i
)
526 len
+= sprintf(buf
+ len
, "%s%d", i
? " " : "", node_distance(nid
, i
));
528 len
+= sprintf(buf
+ len
, "\n");
531 static DEVICE_ATTR(distance
, S_IRUGO
, node_read_distance
, NULL
);
533 static struct attribute
*node_dev_attrs
[] = {
534 &dev_attr_cpumap
.attr
,
535 &dev_attr_cpulist
.attr
,
536 &dev_attr_meminfo
.attr
,
537 &dev_attr_numastat
.attr
,
538 &dev_attr_distance
.attr
,
539 &dev_attr_vmstat
.attr
,
542 ATTRIBUTE_GROUPS(node_dev
);
544 #ifdef CONFIG_HUGETLBFS
546 * hugetlbfs per node attributes registration interface:
547 * When/if hugetlb[fs] subsystem initializes [sometime after this module],
548 * it will register its per node attributes for all online nodes with
549 * memory. It will also call register_hugetlbfs_with_node(), below, to
550 * register its attribute registration functions with this node driver.
551 * Once these hooks have been initialized, the node driver will call into
552 * the hugetlb module to [un]register attributes for hot-plugged nodes.
554 static node_registration_func_t __hugetlb_register_node
;
555 static node_registration_func_t __hugetlb_unregister_node
;
557 static inline bool hugetlb_register_node(struct node
*node
)
559 if (__hugetlb_register_node
&&
560 node_state(node
->dev
.id
, N_MEMORY
)) {
561 __hugetlb_register_node(node
);
567 static inline void hugetlb_unregister_node(struct node
*node
)
569 if (__hugetlb_unregister_node
)
570 __hugetlb_unregister_node(node
);
573 void register_hugetlbfs_with_node(node_registration_func_t doregister
,
574 node_registration_func_t unregister
)
576 __hugetlb_register_node
= doregister
;
577 __hugetlb_unregister_node
= unregister
;
580 static inline void hugetlb_register_node(struct node
*node
) {}
582 static inline void hugetlb_unregister_node(struct node
*node
) {}
585 static void node_device_release(struct device
*dev
)
587 struct node
*node
= to_node(dev
);
589 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
591 * We schedule the work only when a memory section is
592 * onlined/offlined on this node. When we come here,
593 * all the memory on this node has been offlined,
594 * so we won't enqueue new work to this work.
596 * The work is using node->node_work, so we should
597 * flush work before freeing the memory.
599 flush_work(&node
->node_work
);
605 * register_node - Setup a sysfs device for a node.
606 * @num - Node number to use when creating the device.
608 * Initialize and register the node device.
610 static int register_node(struct node
*node
, int num
)
615 node
->dev
.bus
= &node_subsys
;
616 node
->dev
.release
= node_device_release
;
617 node
->dev
.groups
= node_dev_groups
;
618 error
= device_register(&node
->dev
);
621 put_device(&node
->dev
);
623 hugetlb_register_node(node
);
625 compaction_register_node(node
);
631 * unregister_node - unregister a node device
632 * @node: node going away
634 * Unregisters a node device @node. All the devices on the node must be
635 * unregistered before calling this function.
637 void unregister_node(struct node
*node
)
639 hugetlb_unregister_node(node
); /* no-op, if memoryless node */
640 node_remove_accesses(node
);
641 node_remove_caches(node
);
642 device_unregister(&node
->dev
);
645 struct node
*node_devices
[MAX_NUMNODES
];
648 * register cpu under node
650 int register_cpu_under_node(unsigned int cpu
, unsigned int nid
)
655 if (!node_online(nid
))
658 obj
= get_cpu_device(cpu
);
662 ret
= sysfs_create_link(&node_devices
[nid
]->dev
.kobj
,
664 kobject_name(&obj
->kobj
));
668 return sysfs_create_link(&obj
->kobj
,
669 &node_devices
[nid
]->dev
.kobj
,
670 kobject_name(&node_devices
[nid
]->dev
.kobj
));
674 * register_memory_node_under_compute_node - link memory node to its compute
675 * node for a given access class.
676 * @mem_node: Memory node number
677 * @cpu_node: Cpu node number
678 * @access: Access class to register
681 * For use with platforms that may have separate memory and compute nodes.
682 * This function will export node relationships linking which memory
683 * initiator nodes can access memory targets at a given ranked access
686 int register_memory_node_under_compute_node(unsigned int mem_nid
,
687 unsigned int cpu_nid
,
690 struct node
*init_node
, *targ_node
;
691 struct node_access_nodes
*initiator
, *target
;
694 if (!node_online(cpu_nid
) || !node_online(mem_nid
))
697 init_node
= node_devices
[cpu_nid
];
698 targ_node
= node_devices
[mem_nid
];
699 initiator
= node_init_node_access(init_node
, access
);
700 target
= node_init_node_access(targ_node
, access
);
701 if (!initiator
|| !target
)
704 ret
= sysfs_add_link_to_group(&initiator
->dev
.kobj
, "targets",
705 &targ_node
->dev
.kobj
,
706 dev_name(&targ_node
->dev
));
710 ret
= sysfs_add_link_to_group(&target
->dev
.kobj
, "initiators",
711 &init_node
->dev
.kobj
,
712 dev_name(&init_node
->dev
));
718 sysfs_remove_link_from_group(&initiator
->dev
.kobj
, "targets",
719 dev_name(&targ_node
->dev
));
723 int unregister_cpu_under_node(unsigned int cpu
, unsigned int nid
)
727 if (!node_online(nid
))
730 obj
= get_cpu_device(cpu
);
734 sysfs_remove_link(&node_devices
[nid
]->dev
.kobj
,
735 kobject_name(&obj
->kobj
));
736 sysfs_remove_link(&obj
->kobj
,
737 kobject_name(&node_devices
[nid
]->dev
.kobj
));
742 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
743 static int __ref
get_nid_for_pfn(unsigned long pfn
)
745 if (!pfn_valid_within(pfn
))
747 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
748 if (system_state
< SYSTEM_RUNNING
)
749 return early_pfn_to_nid(pfn
);
751 return pfn_to_nid(pfn
);
754 /* register memory section under specified node if it spans that node */
755 int register_mem_sect_under_node(struct memory_block
*mem_blk
, void *arg
)
757 int ret
, nid
= *(int *)arg
;
758 unsigned long pfn
, sect_start_pfn
, sect_end_pfn
;
762 sect_start_pfn
= section_nr_to_pfn(mem_blk
->start_section_nr
);
763 sect_end_pfn
= section_nr_to_pfn(mem_blk
->end_section_nr
);
764 sect_end_pfn
+= PAGES_PER_SECTION
- 1;
765 for (pfn
= sect_start_pfn
; pfn
<= sect_end_pfn
; pfn
++) {
769 * memory block could have several absent sections from start.
770 * skip pfn range from absent section
772 if (!pfn_present(pfn
)) {
773 pfn
= round_down(pfn
+ PAGES_PER_SECTION
,
774 PAGES_PER_SECTION
) - 1;
779 * We need to check if page belongs to nid only for the boot
780 * case, during hotplug we know that all pages in the memory
781 * block belong to the same node.
783 if (system_state
== SYSTEM_BOOTING
) {
784 page_nid
= get_nid_for_pfn(pfn
);
790 ret
= sysfs_create_link_nowarn(&node_devices
[nid
]->dev
.kobj
,
792 kobject_name(&mem_blk
->dev
.kobj
));
796 return sysfs_create_link_nowarn(&mem_blk
->dev
.kobj
,
797 &node_devices
[nid
]->dev
.kobj
,
798 kobject_name(&node_devices
[nid
]->dev
.kobj
));
800 /* mem section does not span the specified node */
804 /* unregister memory section under all nodes that it spans */
805 int unregister_mem_sect_under_nodes(struct memory_block
*mem_blk
,
806 unsigned long phys_index
)
808 NODEMASK_ALLOC(nodemask_t
, unlinked_nodes
, GFP_KERNEL
);
809 unsigned long pfn
, sect_start_pfn
, sect_end_pfn
;
812 NODEMASK_FREE(unlinked_nodes
);
817 nodes_clear(*unlinked_nodes
);
819 sect_start_pfn
= section_nr_to_pfn(phys_index
);
820 sect_end_pfn
= sect_start_pfn
+ PAGES_PER_SECTION
- 1;
821 for (pfn
= sect_start_pfn
; pfn
<= sect_end_pfn
; pfn
++) {
824 nid
= get_nid_for_pfn(pfn
);
827 if (!node_online(nid
))
829 if (node_test_and_set(nid
, *unlinked_nodes
))
831 sysfs_remove_link(&node_devices
[nid
]->dev
.kobj
,
832 kobject_name(&mem_blk
->dev
.kobj
));
833 sysfs_remove_link(&mem_blk
->dev
.kobj
,
834 kobject_name(&node_devices
[nid
]->dev
.kobj
));
836 NODEMASK_FREE(unlinked_nodes
);
840 int link_mem_sections(int nid
, unsigned long start_pfn
, unsigned long end_pfn
)
842 return walk_memory_range(start_pfn
, end_pfn
, (void *)&nid
,
843 register_mem_sect_under_node
);
846 #ifdef CONFIG_HUGETLBFS
848 * Handle per node hstate attribute [un]registration on transistions
849 * to/from memoryless state.
851 static void node_hugetlb_work(struct work_struct
*work
)
853 struct node
*node
= container_of(work
, struct node
, node_work
);
856 * We only get here when a node transitions to/from memoryless state.
857 * We can detect which transition occurred by examining whether the
858 * node has memory now. hugetlb_register_node() already check this
859 * so we try to register the attributes. If that fails, then the
860 * node has transitioned to memoryless, try to unregister the
863 if (!hugetlb_register_node(node
))
864 hugetlb_unregister_node(node
);
867 static void init_node_hugetlb_work(int nid
)
869 INIT_WORK(&node_devices
[nid
]->node_work
, node_hugetlb_work
);
872 static int node_memory_callback(struct notifier_block
*self
,
873 unsigned long action
, void *arg
)
875 struct memory_notify
*mnb
= arg
;
876 int nid
= mnb
->status_change_nid
;
882 * offload per node hstate [un]registration to a work thread
883 * when transitioning to/from memoryless state.
885 if (nid
!= NUMA_NO_NODE
)
886 schedule_work(&node_devices
[nid
]->node_work
);
889 case MEM_GOING_ONLINE
:
890 case MEM_GOING_OFFLINE
:
891 case MEM_CANCEL_ONLINE
:
892 case MEM_CANCEL_OFFLINE
:
899 #endif /* CONFIG_HUGETLBFS */
900 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
902 #if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \
903 !defined(CONFIG_HUGETLBFS)
904 static inline int node_memory_callback(struct notifier_block
*self
,
905 unsigned long action
, void *arg
)
910 static void init_node_hugetlb_work(int nid
) { }
914 int __register_one_node(int nid
)
919 node_devices
[nid
] = kzalloc(sizeof(struct node
), GFP_KERNEL
);
920 if (!node_devices
[nid
])
923 error
= register_node(node_devices
[nid
], nid
);
925 /* link cpu under this node */
926 for_each_present_cpu(cpu
) {
927 if (cpu_to_node(cpu
) == nid
)
928 register_cpu_under_node(cpu
, nid
);
931 INIT_LIST_HEAD(&node_devices
[nid
]->access_list
);
932 /* initialize work queue for memory hot plug */
933 init_node_hugetlb_work(nid
);
934 node_init_caches(nid
);
939 void unregister_one_node(int nid
)
941 if (!node_devices
[nid
])
944 unregister_node(node_devices
[nid
]);
945 node_devices
[nid
] = NULL
;
949 * node states attributes
952 static ssize_t
print_nodes_state(enum node_states state
, char *buf
)
956 n
= scnprintf(buf
, PAGE_SIZE
- 1, "%*pbl",
957 nodemask_pr_args(&node_states
[state
]));
964 struct device_attribute attr
;
965 enum node_states state
;
968 static ssize_t
show_node_state(struct device
*dev
,
969 struct device_attribute
*attr
, char *buf
)
971 struct node_attr
*na
= container_of(attr
, struct node_attr
, attr
);
972 return print_nodes_state(na
->state
, buf
);
975 #define _NODE_ATTR(name, state) \
976 { __ATTR(name, 0444, show_node_state, NULL), state }
978 static struct node_attr node_state_attr
[] = {
979 [N_POSSIBLE
] = _NODE_ATTR(possible
, N_POSSIBLE
),
980 [N_ONLINE
] = _NODE_ATTR(online
, N_ONLINE
),
981 [N_NORMAL_MEMORY
] = _NODE_ATTR(has_normal_memory
, N_NORMAL_MEMORY
),
982 #ifdef CONFIG_HIGHMEM
983 [N_HIGH_MEMORY
] = _NODE_ATTR(has_high_memory
, N_HIGH_MEMORY
),
985 [N_MEMORY
] = _NODE_ATTR(has_memory
, N_MEMORY
),
986 [N_CPU
] = _NODE_ATTR(has_cpu
, N_CPU
),
989 static struct attribute
*node_state_attrs
[] = {
990 &node_state_attr
[N_POSSIBLE
].attr
.attr
,
991 &node_state_attr
[N_ONLINE
].attr
.attr
,
992 &node_state_attr
[N_NORMAL_MEMORY
].attr
.attr
,
993 #ifdef CONFIG_HIGHMEM
994 &node_state_attr
[N_HIGH_MEMORY
].attr
.attr
,
996 &node_state_attr
[N_MEMORY
].attr
.attr
,
997 &node_state_attr
[N_CPU
].attr
.attr
,
1001 static struct attribute_group memory_root_attr_group
= {
1002 .attrs
= node_state_attrs
,
1005 static const struct attribute_group
*cpu_root_attr_groups
[] = {
1006 &memory_root_attr_group
,
1010 #define NODE_CALLBACK_PRI 2 /* lower than SLAB */
1011 static int __init
register_node_type(void)
1015 BUILD_BUG_ON(ARRAY_SIZE(node_state_attr
) != NR_NODE_STATES
);
1016 BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs
)-1 != NR_NODE_STATES
);
1018 ret
= subsys_system_register(&node_subsys
, cpu_root_attr_groups
);
1020 static struct notifier_block node_memory_callback_nb
= {
1021 .notifier_call
= node_memory_callback
,
1022 .priority
= NODE_CALLBACK_PRI
,
1024 register_hotmemory_notifier(&node_memory_callback_nb
);
1028 * Note: we're not going to unregister the node class if we fail
1029 * to register the node state class attribute files.
1033 postcore_initcall(register_node_type
);