1 // SPDX-License-Identifier: GPL-2.0
3 * Memory subsystem support
5 * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
6 * Dave Hansen <haveblue@us.ibm.com>
8 * This file provides the necessary infrastructure to represent
9 * a SPARSEMEM-memory-model system's physical memory in /sysfs.
10 * All arch-independent code that assumes MEMORY_HOTPLUG requires
11 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/topology.h>
17 #include <linux/capability.h>
18 #include <linux/device.h>
19 #include <linux/memory.h>
20 #include <linux/memory_hotplug.h>
22 #include <linux/stat.h>
23 #include <linux/slab.h>
25 #include <linux/atomic.h>
26 #include <linux/uaccess.h>
28 #define MEMORY_CLASS_NAME "memory"
30 #define to_memory_block(dev) container_of(dev, struct memory_block, dev)
32 static int sections_per_block
;
34 static inline unsigned long base_memory_block_id(unsigned long section_nr
)
36 return section_nr
/ sections_per_block
;
39 static inline unsigned long pfn_to_block_id(unsigned long pfn
)
41 return base_memory_block_id(pfn_to_section_nr(pfn
));
44 static inline unsigned long phys_to_block_id(unsigned long phys
)
46 return pfn_to_block_id(PFN_DOWN(phys
));
49 static int memory_subsys_online(struct device
*dev
);
50 static int memory_subsys_offline(struct device
*dev
);
52 static struct bus_type memory_subsys
= {
53 .name
= MEMORY_CLASS_NAME
,
54 .dev_name
= MEMORY_CLASS_NAME
,
55 .online
= memory_subsys_online
,
56 .offline
= memory_subsys_offline
,
59 static BLOCKING_NOTIFIER_HEAD(memory_chain
);
61 int register_memory_notifier(struct notifier_block
*nb
)
63 return blocking_notifier_chain_register(&memory_chain
, nb
);
65 EXPORT_SYMBOL(register_memory_notifier
);
67 void unregister_memory_notifier(struct notifier_block
*nb
)
69 blocking_notifier_chain_unregister(&memory_chain
, nb
);
71 EXPORT_SYMBOL(unregister_memory_notifier
);
73 static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain
);
75 int register_memory_isolate_notifier(struct notifier_block
*nb
)
77 return atomic_notifier_chain_register(&memory_isolate_chain
, nb
);
79 EXPORT_SYMBOL(register_memory_isolate_notifier
);
81 void unregister_memory_isolate_notifier(struct notifier_block
*nb
)
83 atomic_notifier_chain_unregister(&memory_isolate_chain
, nb
);
85 EXPORT_SYMBOL(unregister_memory_isolate_notifier
);
87 static void memory_block_release(struct device
*dev
)
89 struct memory_block
*mem
= to_memory_block(dev
);
94 unsigned long __weak
memory_block_size_bytes(void)
96 return MIN_MEMORY_BLOCK_SIZE
;
98 EXPORT_SYMBOL_GPL(memory_block_size_bytes
);
101 * Show the first physical section index (number) of this memory block.
103 static ssize_t
phys_index_show(struct device
*dev
,
104 struct device_attribute
*attr
, char *buf
)
106 struct memory_block
*mem
= to_memory_block(dev
);
107 unsigned long phys_index
;
109 phys_index
= mem
->start_section_nr
/ sections_per_block
;
110 return sprintf(buf
, "%08lx\n", phys_index
);
114 * Show whether the memory block is likely to be offlineable (or is already
115 * offline). Once offline, the memory block could be removed. The return
116 * value does, however, not indicate that there is a way to remove the
119 static ssize_t
removable_show(struct device
*dev
, struct device_attribute
*attr
,
122 struct memory_block
*mem
= to_memory_block(dev
);
126 if (mem
->state
!= MEM_ONLINE
)
129 for (i
= 0; i
< sections_per_block
; i
++) {
130 if (!present_section_nr(mem
->start_section_nr
+ i
))
132 pfn
= section_nr_to_pfn(mem
->start_section_nr
+ i
);
133 ret
&= is_mem_section_removable(pfn
, PAGES_PER_SECTION
);
137 return sprintf(buf
, "%d\n", ret
);
141 * online, offline, going offline, etc.
143 static ssize_t
state_show(struct device
*dev
, struct device_attribute
*attr
,
146 struct memory_block
*mem
= to_memory_block(dev
);
150 * We can probably put these states in a nice little array
151 * so that they're not open-coded
153 switch (mem
->state
) {
155 len
= sprintf(buf
, "online\n");
158 len
= sprintf(buf
, "offline\n");
160 case MEM_GOING_OFFLINE
:
161 len
= sprintf(buf
, "going-offline\n");
164 len
= sprintf(buf
, "ERROR-UNKNOWN-%ld\n",
173 int memory_notify(unsigned long val
, void *v
)
175 return blocking_notifier_call_chain(&memory_chain
, val
, v
);
178 int memory_isolate_notify(unsigned long val
, void *v
)
180 return atomic_notifier_call_chain(&memory_isolate_chain
, val
, v
);
184 * The probe routines leave the pages uninitialized, just as the bootmem code
185 * does. Make sure we do not access them, but instead use only information from
188 static bool pages_correctly_probed(unsigned long start_pfn
)
190 unsigned long section_nr
= pfn_to_section_nr(start_pfn
);
191 unsigned long section_nr_end
= section_nr
+ sections_per_block
;
192 unsigned long pfn
= start_pfn
;
195 * memmap between sections is not contiguous except with
196 * SPARSEMEM_VMEMMAP. We lookup the page once per section
197 * and assume memmap is contiguous within each section
199 for (; section_nr
< section_nr_end
; section_nr
++) {
200 if (WARN_ON_ONCE(!pfn_valid(pfn
)))
203 if (!present_section_nr(section_nr
)) {
204 pr_warn("section %ld pfn[%lx, %lx) not present\n",
205 section_nr
, pfn
, pfn
+ PAGES_PER_SECTION
);
207 } else if (!valid_section_nr(section_nr
)) {
208 pr_warn("section %ld pfn[%lx, %lx) no valid memmap\n",
209 section_nr
, pfn
, pfn
+ PAGES_PER_SECTION
);
211 } else if (online_section_nr(section_nr
)) {
212 pr_warn("section %ld pfn[%lx, %lx) is already online\n",
213 section_nr
, pfn
, pfn
+ PAGES_PER_SECTION
);
216 pfn
+= PAGES_PER_SECTION
;
223 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
224 * OK to have direct references to sparsemem variables in here.
227 memory_block_action(unsigned long start_section_nr
, unsigned long action
,
230 unsigned long start_pfn
;
231 unsigned long nr_pages
= PAGES_PER_SECTION
* sections_per_block
;
234 start_pfn
= section_nr_to_pfn(start_section_nr
);
238 if (!pages_correctly_probed(start_pfn
))
241 ret
= online_pages(start_pfn
, nr_pages
, online_type
);
244 ret
= offline_pages(start_pfn
, nr_pages
);
247 WARN(1, KERN_WARNING
"%s(%ld, %ld) unknown action: "
248 "%ld\n", __func__
, start_section_nr
, action
, action
);
255 static int memory_block_change_state(struct memory_block
*mem
,
256 unsigned long to_state
, unsigned long from_state_req
)
260 if (mem
->state
!= from_state_req
)
263 if (to_state
== MEM_OFFLINE
)
264 mem
->state
= MEM_GOING_OFFLINE
;
266 ret
= memory_block_action(mem
->start_section_nr
, to_state
,
269 mem
->state
= ret
? from_state_req
: to_state
;
274 /* The device lock serializes operations on memory_subsys_[online|offline] */
275 static int memory_subsys_online(struct device
*dev
)
277 struct memory_block
*mem
= to_memory_block(dev
);
280 if (mem
->state
== MEM_ONLINE
)
284 * If we are called from state_store(), online_type will be
285 * set >= 0 Otherwise we were called from the device online
286 * attribute and need to set the online_type.
288 if (mem
->online_type
< 0)
289 mem
->online_type
= MMOP_ONLINE_KEEP
;
291 ret
= memory_block_change_state(mem
, MEM_ONLINE
, MEM_OFFLINE
);
293 /* clear online_type */
294 mem
->online_type
= -1;
299 static int memory_subsys_offline(struct device
*dev
)
301 struct memory_block
*mem
= to_memory_block(dev
);
303 if (mem
->state
== MEM_OFFLINE
)
306 /* Can't offline block with non-present sections */
307 if (mem
->section_count
!= sections_per_block
)
310 return memory_block_change_state(mem
, MEM_OFFLINE
, MEM_ONLINE
);
313 static ssize_t
state_store(struct device
*dev
, struct device_attribute
*attr
,
314 const char *buf
, size_t count
)
316 struct memory_block
*mem
= to_memory_block(dev
);
317 int ret
, online_type
;
319 ret
= lock_device_hotplug_sysfs();
323 if (sysfs_streq(buf
, "online_kernel"))
324 online_type
= MMOP_ONLINE_KERNEL
;
325 else if (sysfs_streq(buf
, "online_movable"))
326 online_type
= MMOP_ONLINE_MOVABLE
;
327 else if (sysfs_streq(buf
, "online"))
328 online_type
= MMOP_ONLINE_KEEP
;
329 else if (sysfs_streq(buf
, "offline"))
330 online_type
= MMOP_OFFLINE
;
336 switch (online_type
) {
337 case MMOP_ONLINE_KERNEL
:
338 case MMOP_ONLINE_MOVABLE
:
339 case MMOP_ONLINE_KEEP
:
340 /* mem->online_type is protected by device_hotplug_lock */
341 mem
->online_type
= online_type
;
342 ret
= device_online(&mem
->dev
);
345 ret
= device_offline(&mem
->dev
);
348 ret
= -EINVAL
; /* should never happen */
352 unlock_device_hotplug();
363 * phys_device is a bad name for this. What I really want
364 * is a way to differentiate between memory ranges that
365 * are part of physical devices that constitute
366 * a complete removable unit or fru.
367 * i.e. do these ranges belong to the same physical device,
368 * s.t. if I offline all of these sections I can then
369 * remove the physical device?
371 static ssize_t
phys_device_show(struct device
*dev
,
372 struct device_attribute
*attr
, char *buf
)
374 struct memory_block
*mem
= to_memory_block(dev
);
375 return sprintf(buf
, "%d\n", mem
->phys_device
);
378 #ifdef CONFIG_MEMORY_HOTREMOVE
379 static void print_allowed_zone(char *buf
, int nid
, unsigned long start_pfn
,
380 unsigned long nr_pages
, int online_type
,
381 struct zone
*default_zone
)
385 zone
= zone_for_pfn_range(online_type
, nid
, start_pfn
, nr_pages
);
386 if (zone
!= default_zone
) {
388 strcat(buf
, zone
->name
);
392 static ssize_t
valid_zones_show(struct device
*dev
,
393 struct device_attribute
*attr
, char *buf
)
395 struct memory_block
*mem
= to_memory_block(dev
);
396 unsigned long start_pfn
= section_nr_to_pfn(mem
->start_section_nr
);
397 unsigned long nr_pages
= PAGES_PER_SECTION
* sections_per_block
;
398 unsigned long valid_start_pfn
, valid_end_pfn
;
399 struct zone
*default_zone
;
403 * Check the existing zone. Make sure that we do that only on the
404 * online nodes otherwise the page_zone is not reliable
406 if (mem
->state
== MEM_ONLINE
) {
408 * The block contains more than one zone can not be offlined.
409 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
411 if (!test_pages_in_a_zone(start_pfn
, start_pfn
+ nr_pages
,
412 &valid_start_pfn
, &valid_end_pfn
))
413 return sprintf(buf
, "none\n");
414 start_pfn
= valid_start_pfn
;
415 strcat(buf
, page_zone(pfn_to_page(start_pfn
))->name
);
420 default_zone
= zone_for_pfn_range(MMOP_ONLINE_KEEP
, nid
, start_pfn
, nr_pages
);
421 strcat(buf
, default_zone
->name
);
423 print_allowed_zone(buf
, nid
, start_pfn
, nr_pages
, MMOP_ONLINE_KERNEL
,
425 print_allowed_zone(buf
, nid
, start_pfn
, nr_pages
, MMOP_ONLINE_MOVABLE
,
432 static DEVICE_ATTR_RO(valid_zones
);
435 static DEVICE_ATTR_RO(phys_index
);
436 static DEVICE_ATTR_RW(state
);
437 static DEVICE_ATTR_RO(phys_device
);
438 static DEVICE_ATTR_RO(removable
);
441 * Show the memory block size (shared by all memory blocks).
443 static ssize_t
block_size_bytes_show(struct device
*dev
,
444 struct device_attribute
*attr
, char *buf
)
446 return sprintf(buf
, "%lx\n", memory_block_size_bytes());
449 static DEVICE_ATTR_RO(block_size_bytes
);
452 * Memory auto online policy.
455 static ssize_t
auto_online_blocks_show(struct device
*dev
,
456 struct device_attribute
*attr
, char *buf
)
458 if (memhp_auto_online
)
459 return sprintf(buf
, "online\n");
461 return sprintf(buf
, "offline\n");
464 static ssize_t
auto_online_blocks_store(struct device
*dev
,
465 struct device_attribute
*attr
,
466 const char *buf
, size_t count
)
468 if (sysfs_streq(buf
, "online"))
469 memhp_auto_online
= true;
470 else if (sysfs_streq(buf
, "offline"))
471 memhp_auto_online
= false;
478 static DEVICE_ATTR_RW(auto_online_blocks
);
481 * Some architectures will have custom drivers to do this, and
482 * will not need to do it from userspace. The fake hot-add code
483 * as well as ppc64 will do all of their discovery in userspace
484 * and will require this interface.
486 #ifdef CONFIG_ARCH_MEMORY_PROBE
487 static ssize_t
probe_store(struct device
*dev
, struct device_attribute
*attr
,
488 const char *buf
, size_t count
)
492 unsigned long pages_per_block
= PAGES_PER_SECTION
* sections_per_block
;
494 ret
= kstrtoull(buf
, 0, &phys_addr
);
498 if (phys_addr
& ((pages_per_block
<< PAGE_SHIFT
) - 1))
501 ret
= lock_device_hotplug_sysfs();
505 nid
= memory_add_physaddr_to_nid(phys_addr
);
506 ret
= __add_memory(nid
, phys_addr
,
507 MIN_MEMORY_BLOCK_SIZE
* sections_per_block
);
514 unlock_device_hotplug();
518 static DEVICE_ATTR_WO(probe
);
521 #ifdef CONFIG_MEMORY_FAILURE
523 * Support for offlining pages of memory
526 /* Soft offline a page */
527 static ssize_t
soft_offline_page_store(struct device
*dev
,
528 struct device_attribute
*attr
,
529 const char *buf
, size_t count
)
533 if (!capable(CAP_SYS_ADMIN
))
535 if (kstrtoull(buf
, 0, &pfn
) < 0)
538 ret
= soft_offline_page(pfn
, 0);
539 return ret
== 0 ? count
: ret
;
542 /* Forcibly offline a page, including killing processes. */
543 static ssize_t
hard_offline_page_store(struct device
*dev
,
544 struct device_attribute
*attr
,
545 const char *buf
, size_t count
)
549 if (!capable(CAP_SYS_ADMIN
))
551 if (kstrtoull(buf
, 0, &pfn
) < 0)
554 ret
= memory_failure(pfn
, 0);
555 return ret
? ret
: count
;
558 static DEVICE_ATTR_WO(soft_offline_page
);
559 static DEVICE_ATTR_WO(hard_offline_page
);
563 * Note that phys_device is optional. It is here to allow for
564 * differentiation between which *physical* devices each
565 * section belongs to...
567 int __weak
arch_get_memory_phys_device(unsigned long start_pfn
)
572 /* A reference for the returned memory block device is acquired. */
573 static struct memory_block
*find_memory_block_by_id(unsigned long block_id
)
577 dev
= subsys_find_device_by_id(&memory_subsys
, block_id
, NULL
);
578 return dev
? to_memory_block(dev
) : NULL
;
582 * For now, we have a linear search to go find the appropriate
583 * memory_block corresponding to a particular phys_index. If
584 * this gets to be a real problem, we can always use a radix
585 * tree or something here.
587 * This could be made generic for all device subsystems.
589 struct memory_block
*find_memory_block(struct mem_section
*section
)
591 unsigned long block_id
= base_memory_block_id(__section_nr(section
));
593 return find_memory_block_by_id(block_id
);
596 static struct attribute
*memory_memblk_attrs
[] = {
597 &dev_attr_phys_index
.attr
,
598 &dev_attr_state
.attr
,
599 &dev_attr_phys_device
.attr
,
600 &dev_attr_removable
.attr
,
601 #ifdef CONFIG_MEMORY_HOTREMOVE
602 &dev_attr_valid_zones
.attr
,
607 static struct attribute_group memory_memblk_attr_group
= {
608 .attrs
= memory_memblk_attrs
,
611 static const struct attribute_group
*memory_memblk_attr_groups
[] = {
612 &memory_memblk_attr_group
,
617 * register_memory - Setup a sysfs device for a memory block
620 int register_memory(struct memory_block
*memory
)
624 memory
->dev
.bus
= &memory_subsys
;
625 memory
->dev
.id
= memory
->start_section_nr
/ sections_per_block
;
626 memory
->dev
.release
= memory_block_release
;
627 memory
->dev
.groups
= memory_memblk_attr_groups
;
628 memory
->dev
.offline
= memory
->state
== MEM_OFFLINE
;
630 ret
= device_register(&memory
->dev
);
632 put_device(&memory
->dev
);
637 static int init_memory_block(struct memory_block
**memory
,
638 unsigned long block_id
, unsigned long state
)
640 struct memory_block
*mem
;
641 unsigned long start_pfn
;
644 mem
= find_memory_block_by_id(block_id
);
646 put_device(&mem
->dev
);
649 mem
= kzalloc(sizeof(*mem
), GFP_KERNEL
);
653 mem
->start_section_nr
= block_id
* sections_per_block
;
655 start_pfn
= section_nr_to_pfn(mem
->start_section_nr
);
656 mem
->phys_device
= arch_get_memory_phys_device(start_pfn
);
657 mem
->nid
= NUMA_NO_NODE
;
659 ret
= register_memory(mem
);
665 static int add_memory_block(unsigned long base_section_nr
)
667 int ret
, section_count
= 0;
668 struct memory_block
*mem
;
671 for (nr
= base_section_nr
; nr
< base_section_nr
+ sections_per_block
;
673 if (present_section_nr(nr
))
676 if (section_count
== 0)
678 ret
= init_memory_block(&mem
, base_memory_block_id(base_section_nr
),
682 mem
->section_count
= section_count
;
686 static void unregister_memory(struct memory_block
*memory
)
688 if (WARN_ON_ONCE(memory
->dev
.bus
!= &memory_subsys
))
691 /* drop the ref. we got via find_memory_block() */
692 put_device(&memory
->dev
);
693 device_unregister(&memory
->dev
);
697 * Create memory block devices for the given memory area. Start and size
698 * have to be aligned to memory block granularity. Memory block devices
699 * will be initialized as offline.
701 * Called under device_hotplug_lock.
703 int create_memory_block_devices(unsigned long start
, unsigned long size
)
705 const unsigned long start_block_id
= pfn_to_block_id(PFN_DOWN(start
));
706 unsigned long end_block_id
= pfn_to_block_id(PFN_DOWN(start
+ size
));
707 struct memory_block
*mem
;
708 unsigned long block_id
;
711 if (WARN_ON_ONCE(!IS_ALIGNED(start
, memory_block_size_bytes()) ||
712 !IS_ALIGNED(size
, memory_block_size_bytes())))
715 for (block_id
= start_block_id
; block_id
!= end_block_id
; block_id
++) {
716 ret
= init_memory_block(&mem
, block_id
, MEM_OFFLINE
);
719 mem
->section_count
= sections_per_block
;
722 end_block_id
= block_id
;
723 for (block_id
= start_block_id
; block_id
!= end_block_id
;
725 mem
= find_memory_block_by_id(block_id
);
726 if (WARN_ON_ONCE(!mem
))
728 mem
->section_count
= 0;
729 unregister_memory(mem
);
736 * Remove memory block devices for the given memory area. Start and size
737 * have to be aligned to memory block granularity. Memory block devices
738 * have to be offline.
740 * Called under device_hotplug_lock.
742 void remove_memory_block_devices(unsigned long start
, unsigned long size
)
744 const unsigned long start_block_id
= pfn_to_block_id(PFN_DOWN(start
));
745 const unsigned long end_block_id
= pfn_to_block_id(PFN_DOWN(start
+ size
));
746 struct memory_block
*mem
;
747 unsigned long block_id
;
749 if (WARN_ON_ONCE(!IS_ALIGNED(start
, memory_block_size_bytes()) ||
750 !IS_ALIGNED(size
, memory_block_size_bytes())))
753 for (block_id
= start_block_id
; block_id
!= end_block_id
; block_id
++) {
754 mem
= find_memory_block_by_id(block_id
);
755 if (WARN_ON_ONCE(!mem
))
757 mem
->section_count
= 0;
758 unregister_memory_block_under_nodes(mem
);
759 unregister_memory(mem
);
763 /* return true if the memory block is offlined, otherwise, return false */
764 bool is_memblock_offlined(struct memory_block
*mem
)
766 return mem
->state
== MEM_OFFLINE
;
769 static struct attribute
*memory_root_attrs
[] = {
770 #ifdef CONFIG_ARCH_MEMORY_PROBE
771 &dev_attr_probe
.attr
,
774 #ifdef CONFIG_MEMORY_FAILURE
775 &dev_attr_soft_offline_page
.attr
,
776 &dev_attr_hard_offline_page
.attr
,
779 &dev_attr_block_size_bytes
.attr
,
780 &dev_attr_auto_online_blocks
.attr
,
784 static struct attribute_group memory_root_attr_group
= {
785 .attrs
= memory_root_attrs
,
788 static const struct attribute_group
*memory_root_attr_groups
[] = {
789 &memory_root_attr_group
,
794 * Initialize the sysfs support for memory devices. At the time this function
795 * is called, we cannot have concurrent creation/deletion of memory block
796 * devices, the device_hotplug_lock is not needed.
798 void __init
memory_dev_init(void)
801 unsigned long block_sz
, nr
;
803 /* Validate the configured memory block size */
804 block_sz
= memory_block_size_bytes();
805 if (!is_power_of_2(block_sz
) || block_sz
< MIN_MEMORY_BLOCK_SIZE
)
806 panic("Memory block size not suitable: 0x%lx\n", block_sz
);
807 sections_per_block
= block_sz
/ MIN_MEMORY_BLOCK_SIZE
;
809 ret
= subsys_system_register(&memory_subsys
, memory_root_attr_groups
);
811 panic("%s() failed to register subsystem: %d\n", __func__
, ret
);
814 * Create entries for memory sections that were found
815 * during boot and have been initialized
817 for (nr
= 0; nr
<= __highest_present_section_nr
;
818 nr
+= sections_per_block
) {
819 ret
= add_memory_block(nr
);
821 panic("%s() failed to add memory block: %d\n", __func__
,
827 * walk_memory_blocks - walk through all present memory blocks overlapped
828 * by the range [start, start + size)
830 * @start: start address of the memory range
831 * @size: size of the memory range
832 * @arg: argument passed to func
833 * @func: callback for each memory section walked
835 * This function walks through all present memory blocks overlapped by the
836 * range [start, start + size), calling func on each memory block.
838 * In case func() returns an error, walking is aborted and the error is
841 int walk_memory_blocks(unsigned long start
, unsigned long size
,
842 void *arg
, walk_memory_blocks_func_t func
)
844 const unsigned long start_block_id
= phys_to_block_id(start
);
845 const unsigned long end_block_id
= phys_to_block_id(start
+ size
- 1);
846 struct memory_block
*mem
;
847 unsigned long block_id
;
853 for (block_id
= start_block_id
; block_id
<= end_block_id
; block_id
++) {
854 mem
= find_memory_block_by_id(block_id
);
858 ret
= func(mem
, arg
);
859 put_device(&mem
->dev
);
866 struct for_each_memory_block_cb_data
{
867 walk_memory_blocks_func_t func
;
871 static int for_each_memory_block_cb(struct device
*dev
, void *data
)
873 struct memory_block
*mem
= to_memory_block(dev
);
874 struct for_each_memory_block_cb_data
*cb_data
= data
;
876 return cb_data
->func(mem
, cb_data
->arg
);
880 * for_each_memory_block - walk through all present memory blocks
882 * @arg: argument passed to func
883 * @func: callback for each memory block walked
885 * This function walks through all present memory blocks, calling func on
888 * In case func() returns an error, walking is aborted and the error is
891 int for_each_memory_block(void *arg
, walk_memory_blocks_func_t func
)
893 struct for_each_memory_block_cb_data cb_data
= {
898 return bus_for_each_dev(&memory_subsys
, NULL
, &cb_data
,
899 for_each_memory_block_cb
);