1 // SPDX-License-Identifier: GPL-2.0
3 * Memory subsystem support
5 * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
6 * Dave Hansen <haveblue@us.ibm.com>
8 * This file provides the necessary infrastructure to represent
9 * a SPARSEMEM-memory-model system's physical memory in /sysfs.
10 * All arch-independent code that assumes MEMORY_HOTPLUG requires
11 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/topology.h>
17 #include <linux/capability.h>
18 #include <linux/device.h>
19 #include <linux/memory.h>
20 #include <linux/memory_hotplug.h>
22 #include <linux/mutex.h>
23 #include <linux/stat.h>
24 #include <linux/slab.h>
26 #include <linux/atomic.h>
27 #include <linux/uaccess.h>
29 static DEFINE_MUTEX(mem_sysfs_mutex
);
31 #define MEMORY_CLASS_NAME "memory"
33 #define to_memory_block(dev) container_of(dev, struct memory_block, dev)
35 static int sections_per_block
;
37 static inline unsigned long base_memory_block_id(unsigned long section_nr
)
39 return section_nr
/ sections_per_block
;
42 static inline unsigned long pfn_to_block_id(unsigned long pfn
)
44 return base_memory_block_id(pfn_to_section_nr(pfn
));
47 static inline unsigned long phys_to_block_id(unsigned long phys
)
49 return pfn_to_block_id(PFN_DOWN(phys
));
52 static int memory_subsys_online(struct device
*dev
);
53 static int memory_subsys_offline(struct device
*dev
);
55 static struct bus_type memory_subsys
= {
56 .name
= MEMORY_CLASS_NAME
,
57 .dev_name
= MEMORY_CLASS_NAME
,
58 .online
= memory_subsys_online
,
59 .offline
= memory_subsys_offline
,
62 static BLOCKING_NOTIFIER_HEAD(memory_chain
);
64 int register_memory_notifier(struct notifier_block
*nb
)
66 return blocking_notifier_chain_register(&memory_chain
, nb
);
68 EXPORT_SYMBOL(register_memory_notifier
);
70 void unregister_memory_notifier(struct notifier_block
*nb
)
72 blocking_notifier_chain_unregister(&memory_chain
, nb
);
74 EXPORT_SYMBOL(unregister_memory_notifier
);
76 static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain
);
78 int register_memory_isolate_notifier(struct notifier_block
*nb
)
80 return atomic_notifier_chain_register(&memory_isolate_chain
, nb
);
82 EXPORT_SYMBOL(register_memory_isolate_notifier
);
84 void unregister_memory_isolate_notifier(struct notifier_block
*nb
)
86 atomic_notifier_chain_unregister(&memory_isolate_chain
, nb
);
88 EXPORT_SYMBOL(unregister_memory_isolate_notifier
);
90 static void memory_block_release(struct device
*dev
)
92 struct memory_block
*mem
= to_memory_block(dev
);
97 unsigned long __weak
memory_block_size_bytes(void)
99 return MIN_MEMORY_BLOCK_SIZE
;
101 EXPORT_SYMBOL_GPL(memory_block_size_bytes
);
104 * Show the first physical section index (number) of this memory block.
106 static ssize_t
phys_index_show(struct device
*dev
,
107 struct device_attribute
*attr
, char *buf
)
109 struct memory_block
*mem
= to_memory_block(dev
);
110 unsigned long phys_index
;
112 phys_index
= mem
->start_section_nr
/ sections_per_block
;
113 return sprintf(buf
, "%08lx\n", phys_index
);
117 * Show whether the memory block is likely to be offlineable (or is already
118 * offline). Once offline, the memory block could be removed. The return
119 * value does, however, not indicate that there is a way to remove the
122 static ssize_t
removable_show(struct device
*dev
, struct device_attribute
*attr
,
125 struct memory_block
*mem
= to_memory_block(dev
);
129 if (mem
->state
!= MEM_ONLINE
)
132 for (i
= 0; i
< sections_per_block
; i
++) {
133 if (!present_section_nr(mem
->start_section_nr
+ i
))
135 pfn
= section_nr_to_pfn(mem
->start_section_nr
+ i
);
136 ret
&= is_mem_section_removable(pfn
, PAGES_PER_SECTION
);
140 return sprintf(buf
, "%d\n", ret
);
144 * online, offline, going offline, etc.
146 static ssize_t
state_show(struct device
*dev
, struct device_attribute
*attr
,
149 struct memory_block
*mem
= to_memory_block(dev
);
153 * We can probably put these states in a nice little array
154 * so that they're not open-coded
156 switch (mem
->state
) {
158 len
= sprintf(buf
, "online\n");
161 len
= sprintf(buf
, "offline\n");
163 case MEM_GOING_OFFLINE
:
164 len
= sprintf(buf
, "going-offline\n");
167 len
= sprintf(buf
, "ERROR-UNKNOWN-%ld\n",
176 int memory_notify(unsigned long val
, void *v
)
178 return blocking_notifier_call_chain(&memory_chain
, val
, v
);
181 int memory_isolate_notify(unsigned long val
, void *v
)
183 return atomic_notifier_call_chain(&memory_isolate_chain
, val
, v
);
187 * The probe routines leave the pages uninitialized, just as the bootmem code
188 * does. Make sure we do not access them, but instead use only information from
191 static bool pages_correctly_probed(unsigned long start_pfn
)
193 unsigned long section_nr
= pfn_to_section_nr(start_pfn
);
194 unsigned long section_nr_end
= section_nr
+ sections_per_block
;
195 unsigned long pfn
= start_pfn
;
198 * memmap between sections is not contiguous except with
199 * SPARSEMEM_VMEMMAP. We lookup the page once per section
200 * and assume memmap is contiguous within each section
202 for (; section_nr
< section_nr_end
; section_nr
++) {
203 if (WARN_ON_ONCE(!pfn_valid(pfn
)))
206 if (!present_section_nr(section_nr
)) {
207 pr_warn("section %ld pfn[%lx, %lx) not present\n",
208 section_nr
, pfn
, pfn
+ PAGES_PER_SECTION
);
210 } else if (!valid_section_nr(section_nr
)) {
211 pr_warn("section %ld pfn[%lx, %lx) no valid memmap\n",
212 section_nr
, pfn
, pfn
+ PAGES_PER_SECTION
);
214 } else if (online_section_nr(section_nr
)) {
215 pr_warn("section %ld pfn[%lx, %lx) is already online\n",
216 section_nr
, pfn
, pfn
+ PAGES_PER_SECTION
);
219 pfn
+= PAGES_PER_SECTION
;
226 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
227 * OK to have direct references to sparsemem variables in here.
230 memory_block_action(unsigned long start_section_nr
, unsigned long action
,
233 unsigned long start_pfn
;
234 unsigned long nr_pages
= PAGES_PER_SECTION
* sections_per_block
;
237 start_pfn
= section_nr_to_pfn(start_section_nr
);
241 if (!pages_correctly_probed(start_pfn
))
244 ret
= online_pages(start_pfn
, nr_pages
, online_type
);
247 ret
= offline_pages(start_pfn
, nr_pages
);
250 WARN(1, KERN_WARNING
"%s(%ld, %ld) unknown action: "
251 "%ld\n", __func__
, start_section_nr
, action
, action
);
258 static int memory_block_change_state(struct memory_block
*mem
,
259 unsigned long to_state
, unsigned long from_state_req
)
263 if (mem
->state
!= from_state_req
)
266 if (to_state
== MEM_OFFLINE
)
267 mem
->state
= MEM_GOING_OFFLINE
;
269 ret
= memory_block_action(mem
->start_section_nr
, to_state
,
272 mem
->state
= ret
? from_state_req
: to_state
;
277 /* The device lock serializes operations on memory_subsys_[online|offline] */
278 static int memory_subsys_online(struct device
*dev
)
280 struct memory_block
*mem
= to_memory_block(dev
);
283 if (mem
->state
== MEM_ONLINE
)
287 * If we are called from state_store(), online_type will be
288 * set >= 0 Otherwise we were called from the device online
289 * attribute and need to set the online_type.
291 if (mem
->online_type
< 0)
292 mem
->online_type
= MMOP_ONLINE_KEEP
;
294 ret
= memory_block_change_state(mem
, MEM_ONLINE
, MEM_OFFLINE
);
296 /* clear online_type */
297 mem
->online_type
= -1;
302 static int memory_subsys_offline(struct device
*dev
)
304 struct memory_block
*mem
= to_memory_block(dev
);
306 if (mem
->state
== MEM_OFFLINE
)
309 /* Can't offline block with non-present sections */
310 if (mem
->section_count
!= sections_per_block
)
313 return memory_block_change_state(mem
, MEM_OFFLINE
, MEM_ONLINE
);
316 static ssize_t
state_store(struct device
*dev
, struct device_attribute
*attr
,
317 const char *buf
, size_t count
)
319 struct memory_block
*mem
= to_memory_block(dev
);
320 int ret
, online_type
;
322 ret
= lock_device_hotplug_sysfs();
326 if (sysfs_streq(buf
, "online_kernel"))
327 online_type
= MMOP_ONLINE_KERNEL
;
328 else if (sysfs_streq(buf
, "online_movable"))
329 online_type
= MMOP_ONLINE_MOVABLE
;
330 else if (sysfs_streq(buf
, "online"))
331 online_type
= MMOP_ONLINE_KEEP
;
332 else if (sysfs_streq(buf
, "offline"))
333 online_type
= MMOP_OFFLINE
;
339 switch (online_type
) {
340 case MMOP_ONLINE_KERNEL
:
341 case MMOP_ONLINE_MOVABLE
:
342 case MMOP_ONLINE_KEEP
:
343 /* mem->online_type is protected by device_hotplug_lock */
344 mem
->online_type
= online_type
;
345 ret
= device_online(&mem
->dev
);
348 ret
= device_offline(&mem
->dev
);
351 ret
= -EINVAL
; /* should never happen */
355 unlock_device_hotplug();
366 * phys_device is a bad name for this. What I really want
367 * is a way to differentiate between memory ranges that
368 * are part of physical devices that constitute
369 * a complete removable unit or fru.
370 * i.e. do these ranges belong to the same physical device,
371 * s.t. if I offline all of these sections I can then
372 * remove the physical device?
374 static ssize_t
phys_device_show(struct device
*dev
,
375 struct device_attribute
*attr
, char *buf
)
377 struct memory_block
*mem
= to_memory_block(dev
);
378 return sprintf(buf
, "%d\n", mem
->phys_device
);
381 #ifdef CONFIG_MEMORY_HOTREMOVE
382 static void print_allowed_zone(char *buf
, int nid
, unsigned long start_pfn
,
383 unsigned long nr_pages
, int online_type
,
384 struct zone
*default_zone
)
388 zone
= zone_for_pfn_range(online_type
, nid
, start_pfn
, nr_pages
);
389 if (zone
!= default_zone
) {
391 strcat(buf
, zone
->name
);
395 static ssize_t
valid_zones_show(struct device
*dev
,
396 struct device_attribute
*attr
, char *buf
)
398 struct memory_block
*mem
= to_memory_block(dev
);
399 unsigned long start_pfn
= section_nr_to_pfn(mem
->start_section_nr
);
400 unsigned long nr_pages
= PAGES_PER_SECTION
* sections_per_block
;
401 unsigned long valid_start_pfn
, valid_end_pfn
;
402 struct zone
*default_zone
;
406 * Check the existing zone. Make sure that we do that only on the
407 * online nodes otherwise the page_zone is not reliable
409 if (mem
->state
== MEM_ONLINE
) {
411 * The block contains more than one zone can not be offlined.
412 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
414 if (!test_pages_in_a_zone(start_pfn
, start_pfn
+ nr_pages
,
415 &valid_start_pfn
, &valid_end_pfn
))
416 return sprintf(buf
, "none\n");
417 start_pfn
= valid_start_pfn
;
418 strcat(buf
, page_zone(pfn_to_page(start_pfn
))->name
);
423 default_zone
= zone_for_pfn_range(MMOP_ONLINE_KEEP
, nid
, start_pfn
, nr_pages
);
424 strcat(buf
, default_zone
->name
);
426 print_allowed_zone(buf
, nid
, start_pfn
, nr_pages
, MMOP_ONLINE_KERNEL
,
428 print_allowed_zone(buf
, nid
, start_pfn
, nr_pages
, MMOP_ONLINE_MOVABLE
,
435 static DEVICE_ATTR_RO(valid_zones
);
438 static DEVICE_ATTR_RO(phys_index
);
439 static DEVICE_ATTR_RW(state
);
440 static DEVICE_ATTR_RO(phys_device
);
441 static DEVICE_ATTR_RO(removable
);
444 * Show the memory block size (shared by all memory blocks).
446 static ssize_t
block_size_bytes_show(struct device
*dev
,
447 struct device_attribute
*attr
, char *buf
)
449 return sprintf(buf
, "%lx\n", memory_block_size_bytes());
452 static DEVICE_ATTR_RO(block_size_bytes
);
455 * Memory auto online policy.
458 static ssize_t
auto_online_blocks_show(struct device
*dev
,
459 struct device_attribute
*attr
, char *buf
)
461 if (memhp_auto_online
)
462 return sprintf(buf
, "online\n");
464 return sprintf(buf
, "offline\n");
467 static ssize_t
auto_online_blocks_store(struct device
*dev
,
468 struct device_attribute
*attr
,
469 const char *buf
, size_t count
)
471 if (sysfs_streq(buf
, "online"))
472 memhp_auto_online
= true;
473 else if (sysfs_streq(buf
, "offline"))
474 memhp_auto_online
= false;
481 static DEVICE_ATTR_RW(auto_online_blocks
);
484 * Some architectures will have custom drivers to do this, and
485 * will not need to do it from userspace. The fake hot-add code
486 * as well as ppc64 will do all of their discovery in userspace
487 * and will require this interface.
489 #ifdef CONFIG_ARCH_MEMORY_PROBE
490 static ssize_t
probe_store(struct device
*dev
, struct device_attribute
*attr
,
491 const char *buf
, size_t count
)
495 unsigned long pages_per_block
= PAGES_PER_SECTION
* sections_per_block
;
497 ret
= kstrtoull(buf
, 0, &phys_addr
);
501 if (phys_addr
& ((pages_per_block
<< PAGE_SHIFT
) - 1))
504 ret
= lock_device_hotplug_sysfs();
508 nid
= memory_add_physaddr_to_nid(phys_addr
);
509 ret
= __add_memory(nid
, phys_addr
,
510 MIN_MEMORY_BLOCK_SIZE
* sections_per_block
);
517 unlock_device_hotplug();
521 static DEVICE_ATTR_WO(probe
);
524 #ifdef CONFIG_MEMORY_FAILURE
526 * Support for offlining pages of memory
529 /* Soft offline a page */
530 static ssize_t
soft_offline_page_store(struct device
*dev
,
531 struct device_attribute
*attr
,
532 const char *buf
, size_t count
)
536 if (!capable(CAP_SYS_ADMIN
))
538 if (kstrtoull(buf
, 0, &pfn
) < 0)
543 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
544 if (!pfn_to_online_page(pfn
))
546 ret
= soft_offline_page(pfn_to_page(pfn
), 0);
547 return ret
== 0 ? count
: ret
;
550 /* Forcibly offline a page, including killing processes. */
551 static ssize_t
hard_offline_page_store(struct device
*dev
,
552 struct device_attribute
*attr
,
553 const char *buf
, size_t count
)
557 if (!capable(CAP_SYS_ADMIN
))
559 if (kstrtoull(buf
, 0, &pfn
) < 0)
562 ret
= memory_failure(pfn
, 0);
563 return ret
? ret
: count
;
566 static DEVICE_ATTR_WO(soft_offline_page
);
567 static DEVICE_ATTR_WO(hard_offline_page
);
571 * Note that phys_device is optional. It is here to allow for
572 * differentiation between which *physical* devices each
573 * section belongs to...
575 int __weak
arch_get_memory_phys_device(unsigned long start_pfn
)
580 /* A reference for the returned memory block device is acquired. */
581 static struct memory_block
*find_memory_block_by_id(unsigned long block_id
)
585 dev
= subsys_find_device_by_id(&memory_subsys
, block_id
, NULL
);
586 return dev
? to_memory_block(dev
) : NULL
;
590 * For now, we have a linear search to go find the appropriate
591 * memory_block corresponding to a particular phys_index. If
592 * this gets to be a real problem, we can always use a radix
593 * tree or something here.
595 * This could be made generic for all device subsystems.
597 struct memory_block
*find_memory_block(struct mem_section
*section
)
599 unsigned long block_id
= base_memory_block_id(__section_nr(section
));
601 return find_memory_block_by_id(block_id
);
604 static struct attribute
*memory_memblk_attrs
[] = {
605 &dev_attr_phys_index
.attr
,
606 &dev_attr_state
.attr
,
607 &dev_attr_phys_device
.attr
,
608 &dev_attr_removable
.attr
,
609 #ifdef CONFIG_MEMORY_HOTREMOVE
610 &dev_attr_valid_zones
.attr
,
615 static struct attribute_group memory_memblk_attr_group
= {
616 .attrs
= memory_memblk_attrs
,
619 static const struct attribute_group
*memory_memblk_attr_groups
[] = {
620 &memory_memblk_attr_group
,
625 * register_memory - Setup a sysfs device for a memory block
628 int register_memory(struct memory_block
*memory
)
632 memory
->dev
.bus
= &memory_subsys
;
633 memory
->dev
.id
= memory
->start_section_nr
/ sections_per_block
;
634 memory
->dev
.release
= memory_block_release
;
635 memory
->dev
.groups
= memory_memblk_attr_groups
;
636 memory
->dev
.offline
= memory
->state
== MEM_OFFLINE
;
638 ret
= device_register(&memory
->dev
);
640 put_device(&memory
->dev
);
645 static int init_memory_block(struct memory_block
**memory
,
646 unsigned long block_id
, unsigned long state
)
648 struct memory_block
*mem
;
649 unsigned long start_pfn
;
652 mem
= find_memory_block_by_id(block_id
);
654 put_device(&mem
->dev
);
657 mem
= kzalloc(sizeof(*mem
), GFP_KERNEL
);
661 mem
->start_section_nr
= block_id
* sections_per_block
;
663 start_pfn
= section_nr_to_pfn(mem
->start_section_nr
);
664 mem
->phys_device
= arch_get_memory_phys_device(start_pfn
);
665 mem
->nid
= NUMA_NO_NODE
;
667 ret
= register_memory(mem
);
673 static int add_memory_block(unsigned long base_section_nr
)
675 int ret
, section_count
= 0;
676 struct memory_block
*mem
;
679 for (nr
= base_section_nr
; nr
< base_section_nr
+ sections_per_block
;
681 if (present_section_nr(nr
))
684 if (section_count
== 0)
686 ret
= init_memory_block(&mem
, base_memory_block_id(base_section_nr
),
690 mem
->section_count
= section_count
;
694 static void unregister_memory(struct memory_block
*memory
)
696 if (WARN_ON_ONCE(memory
->dev
.bus
!= &memory_subsys
))
699 /* drop the ref. we got via find_memory_block() */
700 put_device(&memory
->dev
);
701 device_unregister(&memory
->dev
);
705 * Create memory block devices for the given memory area. Start and size
706 * have to be aligned to memory block granularity. Memory block devices
707 * will be initialized as offline.
709 int create_memory_block_devices(unsigned long start
, unsigned long size
)
711 const unsigned long start_block_id
= pfn_to_block_id(PFN_DOWN(start
));
712 unsigned long end_block_id
= pfn_to_block_id(PFN_DOWN(start
+ size
));
713 struct memory_block
*mem
;
714 unsigned long block_id
;
717 if (WARN_ON_ONCE(!IS_ALIGNED(start
, memory_block_size_bytes()) ||
718 !IS_ALIGNED(size
, memory_block_size_bytes())))
721 mutex_lock(&mem_sysfs_mutex
);
722 for (block_id
= start_block_id
; block_id
!= end_block_id
; block_id
++) {
723 ret
= init_memory_block(&mem
, block_id
, MEM_OFFLINE
);
726 mem
->section_count
= sections_per_block
;
729 end_block_id
= block_id
;
730 for (block_id
= start_block_id
; block_id
!= end_block_id
;
732 mem
= find_memory_block_by_id(block_id
);
733 mem
->section_count
= 0;
734 unregister_memory(mem
);
737 mutex_unlock(&mem_sysfs_mutex
);
742 * Remove memory block devices for the given memory area. Start and size
743 * have to be aligned to memory block granularity. Memory block devices
744 * have to be offline.
746 void remove_memory_block_devices(unsigned long start
, unsigned long size
)
748 const unsigned long start_block_id
= pfn_to_block_id(PFN_DOWN(start
));
749 const unsigned long end_block_id
= pfn_to_block_id(PFN_DOWN(start
+ size
));
750 struct memory_block
*mem
;
751 unsigned long block_id
;
753 if (WARN_ON_ONCE(!IS_ALIGNED(start
, memory_block_size_bytes()) ||
754 !IS_ALIGNED(size
, memory_block_size_bytes())))
757 mutex_lock(&mem_sysfs_mutex
);
758 for (block_id
= start_block_id
; block_id
!= end_block_id
; block_id
++) {
759 mem
= find_memory_block_by_id(block_id
);
760 if (WARN_ON_ONCE(!mem
))
762 mem
->section_count
= 0;
763 unregister_memory_block_under_nodes(mem
);
764 unregister_memory(mem
);
766 mutex_unlock(&mem_sysfs_mutex
);
769 /* return true if the memory block is offlined, otherwise, return false */
770 bool is_memblock_offlined(struct memory_block
*mem
)
772 return mem
->state
== MEM_OFFLINE
;
775 static struct attribute
*memory_root_attrs
[] = {
776 #ifdef CONFIG_ARCH_MEMORY_PROBE
777 &dev_attr_probe
.attr
,
780 #ifdef CONFIG_MEMORY_FAILURE
781 &dev_attr_soft_offline_page
.attr
,
782 &dev_attr_hard_offline_page
.attr
,
785 &dev_attr_block_size_bytes
.attr
,
786 &dev_attr_auto_online_blocks
.attr
,
790 static struct attribute_group memory_root_attr_group
= {
791 .attrs
= memory_root_attrs
,
794 static const struct attribute_group
*memory_root_attr_groups
[] = {
795 &memory_root_attr_group
,
800 * Initialize the sysfs support for memory devices...
802 void __init
memory_dev_init(void)
806 unsigned long block_sz
, nr
;
808 /* Validate the configured memory block size */
809 block_sz
= memory_block_size_bytes();
810 if (!is_power_of_2(block_sz
) || block_sz
< MIN_MEMORY_BLOCK_SIZE
)
811 panic("Memory block size not suitable: 0x%lx\n", block_sz
);
812 sections_per_block
= block_sz
/ MIN_MEMORY_BLOCK_SIZE
;
814 ret
= subsys_system_register(&memory_subsys
, memory_root_attr_groups
);
819 * Create entries for memory sections that were found
820 * during boot and have been initialized
822 mutex_lock(&mem_sysfs_mutex
);
823 for (nr
= 0; nr
<= __highest_present_section_nr
;
824 nr
+= sections_per_block
) {
825 err
= add_memory_block(nr
);
829 mutex_unlock(&mem_sysfs_mutex
);
833 panic("%s() failed: %d\n", __func__
, ret
);
837 * walk_memory_blocks - walk through all present memory blocks overlapped
838 * by the range [start, start + size)
840 * @start: start address of the memory range
841 * @size: size of the memory range
842 * @arg: argument passed to func
843 * @func: callback for each memory section walked
845 * This function walks through all present memory blocks overlapped by the
846 * range [start, start + size), calling func on each memory block.
848 * In case func() returns an error, walking is aborted and the error is
851 int walk_memory_blocks(unsigned long start
, unsigned long size
,
852 void *arg
, walk_memory_blocks_func_t func
)
854 const unsigned long start_block_id
= phys_to_block_id(start
);
855 const unsigned long end_block_id
= phys_to_block_id(start
+ size
- 1);
856 struct memory_block
*mem
;
857 unsigned long block_id
;
863 for (block_id
= start_block_id
; block_id
<= end_block_id
; block_id
++) {
864 mem
= find_memory_block_by_id(block_id
);
868 ret
= func(mem
, arg
);
869 put_device(&mem
->dev
);