x86/boot: Rename overlapping memcpy() to memmove()
[linux/fpc-iii.git] / drivers / base / memory.c
blobf46dba8b7092e305ca6e7b404bbf7a2ef8ce2a12
1 /*
2 * Memory subsystem support
4 * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
5 * Dave Hansen <haveblue@us.ibm.com>
7 * This file provides the necessary infrastructure to represent
8 * a SPARSEMEM-memory-model system's physical memory in /sysfs.
9 * All arch-independent code that assumes MEMORY_HOTPLUG requires
10 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/topology.h>
16 #include <linux/capability.h>
17 #include <linux/device.h>
18 #include <linux/memory.h>
19 #include <linux/memory_hotplug.h>
20 #include <linux/mm.h>
21 #include <linux/mutex.h>
22 #include <linux/stat.h>
23 #include <linux/slab.h>
25 #include <linux/atomic.h>
26 #include <asm/uaccess.h>
28 static DEFINE_MUTEX(mem_sysfs_mutex);
30 #define MEMORY_CLASS_NAME "memory"
32 #define to_memory_block(dev) container_of(dev, struct memory_block, dev)
34 static int sections_per_block;
36 static inline int base_memory_block_id(int section_nr)
38 return section_nr / sections_per_block;
41 static int memory_subsys_online(struct device *dev);
42 static int memory_subsys_offline(struct device *dev);
44 static struct bus_type memory_subsys = {
45 .name = MEMORY_CLASS_NAME,
46 .dev_name = MEMORY_CLASS_NAME,
47 .online = memory_subsys_online,
48 .offline = memory_subsys_offline,
51 static BLOCKING_NOTIFIER_HEAD(memory_chain);
53 int register_memory_notifier(struct notifier_block *nb)
55 return blocking_notifier_chain_register(&memory_chain, nb);
57 EXPORT_SYMBOL(register_memory_notifier);
59 void unregister_memory_notifier(struct notifier_block *nb)
61 blocking_notifier_chain_unregister(&memory_chain, nb);
63 EXPORT_SYMBOL(unregister_memory_notifier);
65 static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
67 int register_memory_isolate_notifier(struct notifier_block *nb)
69 return atomic_notifier_chain_register(&memory_isolate_chain, nb);
71 EXPORT_SYMBOL(register_memory_isolate_notifier);
73 void unregister_memory_isolate_notifier(struct notifier_block *nb)
75 atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
77 EXPORT_SYMBOL(unregister_memory_isolate_notifier);
79 static void memory_block_release(struct device *dev)
81 struct memory_block *mem = to_memory_block(dev);
83 kfree(mem);
86 unsigned long __weak memory_block_size_bytes(void)
88 return MIN_MEMORY_BLOCK_SIZE;
91 static unsigned long get_memory_block_size(void)
93 unsigned long block_sz;
95 block_sz = memory_block_size_bytes();
97 /* Validate blk_sz is a power of 2 and not less than section size */
98 if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) {
99 WARN_ON(1);
100 block_sz = MIN_MEMORY_BLOCK_SIZE;
103 return block_sz;
107 * use this as the physical section index that this memsection
108 * uses.
111 static ssize_t show_mem_start_phys_index(struct device *dev,
112 struct device_attribute *attr, char *buf)
114 struct memory_block *mem = to_memory_block(dev);
115 unsigned long phys_index;
117 phys_index = mem->start_section_nr / sections_per_block;
118 return sprintf(buf, "%08lx\n", phys_index);
122 * Show whether the section of memory is likely to be hot-removable
124 static ssize_t show_mem_removable(struct device *dev,
125 struct device_attribute *attr, char *buf)
127 unsigned long i, pfn;
128 int ret = 1;
129 struct memory_block *mem = to_memory_block(dev);
131 for (i = 0; i < sections_per_block; i++) {
132 if (!present_section_nr(mem->start_section_nr + i))
133 continue;
134 pfn = section_nr_to_pfn(mem->start_section_nr + i);
135 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
138 return sprintf(buf, "%d\n", ret);
142 * online, offline, going offline, etc.
144 static ssize_t show_mem_state(struct device *dev,
145 struct device_attribute *attr, char *buf)
147 struct memory_block *mem = to_memory_block(dev);
148 ssize_t len = 0;
151 * We can probably put these states in a nice little array
152 * so that they're not open-coded
154 switch (mem->state) {
155 case MEM_ONLINE:
156 len = sprintf(buf, "online\n");
157 break;
158 case MEM_OFFLINE:
159 len = sprintf(buf, "offline\n");
160 break;
161 case MEM_GOING_OFFLINE:
162 len = sprintf(buf, "going-offline\n");
163 break;
164 default:
165 len = sprintf(buf, "ERROR-UNKNOWN-%ld\n",
166 mem->state);
167 WARN_ON(1);
168 break;
171 return len;
174 int memory_notify(unsigned long val, void *v)
176 return blocking_notifier_call_chain(&memory_chain, val, v);
179 int memory_isolate_notify(unsigned long val, void *v)
181 return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
185 * The probe routines leave the pages reserved, just as the bootmem code does.
186 * Make sure they're still that way.
188 static bool pages_correctly_reserved(unsigned long start_pfn)
190 int i, j;
191 struct page *page;
192 unsigned long pfn = start_pfn;
195 * memmap between sections is not contiguous except with
196 * SPARSEMEM_VMEMMAP. We lookup the page once per section
197 * and assume memmap is contiguous within each section
199 for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
200 if (WARN_ON_ONCE(!pfn_valid(pfn)))
201 return false;
202 page = pfn_to_page(pfn);
204 for (j = 0; j < PAGES_PER_SECTION; j++) {
205 if (PageReserved(page + j))
206 continue;
208 printk(KERN_WARNING "section number %ld page number %d "
209 "not reserved, was it already online?\n",
210 pfn_to_section_nr(pfn), j);
212 return false;
216 return true;
220 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
221 * OK to have direct references to sparsemem variables in here.
222 * Must already be protected by mem_hotplug_begin().
224 static int
225 memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
227 unsigned long start_pfn;
228 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
229 struct page *first_page;
230 int ret;
232 start_pfn = section_nr_to_pfn(phys_index);
233 first_page = pfn_to_page(start_pfn);
235 switch (action) {
236 case MEM_ONLINE:
237 if (!pages_correctly_reserved(start_pfn))
238 return -EBUSY;
240 ret = online_pages(start_pfn, nr_pages, online_type);
241 break;
242 case MEM_OFFLINE:
243 ret = offline_pages(start_pfn, nr_pages);
244 break;
245 default:
246 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
247 "%ld\n", __func__, phys_index, action, action);
248 ret = -EINVAL;
251 return ret;
254 int memory_block_change_state(struct memory_block *mem,
255 unsigned long to_state, unsigned long from_state_req)
257 int ret = 0;
259 if (mem->state != from_state_req)
260 return -EINVAL;
262 if (to_state == MEM_OFFLINE)
263 mem->state = MEM_GOING_OFFLINE;
265 ret = memory_block_action(mem->start_section_nr, to_state,
266 mem->online_type);
268 mem->state = ret ? from_state_req : to_state;
270 return ret;
273 /* The device lock serializes operations on memory_subsys_[online|offline] */
274 static int memory_subsys_online(struct device *dev)
276 struct memory_block *mem = to_memory_block(dev);
277 int ret;
279 if (mem->state == MEM_ONLINE)
280 return 0;
283 * If we are called from store_mem_state(), online_type will be
284 * set >= 0 Otherwise we were called from the device online
285 * attribute and need to set the online_type.
287 if (mem->online_type < 0)
288 mem->online_type = MMOP_ONLINE_KEEP;
290 /* Already under protection of mem_hotplug_begin() */
291 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
293 /* clear online_type */
294 mem->online_type = -1;
296 return ret;
299 static int memory_subsys_offline(struct device *dev)
301 struct memory_block *mem = to_memory_block(dev);
303 if (mem->state == MEM_OFFLINE)
304 return 0;
306 /* Can't offline block with non-present sections */
307 if (mem->section_count != sections_per_block)
308 return -EINVAL;
310 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
313 static ssize_t
314 store_mem_state(struct device *dev,
315 struct device_attribute *attr, const char *buf, size_t count)
317 struct memory_block *mem = to_memory_block(dev);
318 int ret, online_type;
320 ret = lock_device_hotplug_sysfs();
321 if (ret)
322 return ret;
324 if (sysfs_streq(buf, "online_kernel"))
325 online_type = MMOP_ONLINE_KERNEL;
326 else if (sysfs_streq(buf, "online_movable"))
327 online_type = MMOP_ONLINE_MOVABLE;
328 else if (sysfs_streq(buf, "online"))
329 online_type = MMOP_ONLINE_KEEP;
330 else if (sysfs_streq(buf, "offline"))
331 online_type = MMOP_OFFLINE;
332 else {
333 ret = -EINVAL;
334 goto err;
338 * Memory hotplug needs to hold mem_hotplug_begin() for probe to find
339 * the correct memory block to online before doing device_online(dev),
340 * which will take dev->mutex. Take the lock early to prevent an
341 * inversion, memory_subsys_online() callbacks will be implemented by
342 * assuming it's already protected.
344 mem_hotplug_begin();
346 switch (online_type) {
347 case MMOP_ONLINE_KERNEL:
348 case MMOP_ONLINE_MOVABLE:
349 case MMOP_ONLINE_KEEP:
350 mem->online_type = online_type;
351 ret = device_online(&mem->dev);
352 break;
353 case MMOP_OFFLINE:
354 ret = device_offline(&mem->dev);
355 break;
356 default:
357 ret = -EINVAL; /* should never happen */
360 mem_hotplug_done();
361 err:
362 unlock_device_hotplug();
364 if (ret)
365 return ret;
366 return count;
370 * phys_device is a bad name for this. What I really want
371 * is a way to differentiate between memory ranges that
372 * are part of physical devices that constitute
373 * a complete removable unit or fru.
374 * i.e. do these ranges belong to the same physical device,
375 * s.t. if I offline all of these sections I can then
376 * remove the physical device?
378 static ssize_t show_phys_device(struct device *dev,
379 struct device_attribute *attr, char *buf)
381 struct memory_block *mem = to_memory_block(dev);
382 return sprintf(buf, "%d\n", mem->phys_device);
385 #ifdef CONFIG_MEMORY_HOTREMOVE
386 static ssize_t show_valid_zones(struct device *dev,
387 struct device_attribute *attr, char *buf)
389 struct memory_block *mem = to_memory_block(dev);
390 unsigned long start_pfn, end_pfn;
391 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
392 struct page *first_page;
393 struct zone *zone;
395 start_pfn = section_nr_to_pfn(mem->start_section_nr);
396 end_pfn = start_pfn + nr_pages;
397 first_page = pfn_to_page(start_pfn);
399 /* The block contains more than one zone can not be offlined. */
400 if (!test_pages_in_a_zone(start_pfn, end_pfn))
401 return sprintf(buf, "none\n");
403 zone = page_zone(first_page);
405 if (zone_idx(zone) == ZONE_MOVABLE - 1) {
406 /*The mem block is the last memoryblock of this zone.*/
407 if (end_pfn == zone_end_pfn(zone))
408 return sprintf(buf, "%s %s\n",
409 zone->name, (zone + 1)->name);
412 if (zone_idx(zone) == ZONE_MOVABLE) {
413 /*The mem block is the first memoryblock of ZONE_MOVABLE.*/
414 if (start_pfn == zone->zone_start_pfn)
415 return sprintf(buf, "%s %s\n",
416 zone->name, (zone - 1)->name);
419 return sprintf(buf, "%s\n", zone->name);
421 static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL);
422 #endif
424 static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
425 static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
426 static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
427 static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
430 * Block size attribute stuff
432 static ssize_t
433 print_block_size(struct device *dev, struct device_attribute *attr,
434 char *buf)
436 return sprintf(buf, "%lx\n", get_memory_block_size());
439 static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
442 * Memory auto online policy.
445 static ssize_t
446 show_auto_online_blocks(struct device *dev, struct device_attribute *attr,
447 char *buf)
449 if (memhp_auto_online)
450 return sprintf(buf, "online\n");
451 else
452 return sprintf(buf, "offline\n");
455 static ssize_t
456 store_auto_online_blocks(struct device *dev, struct device_attribute *attr,
457 const char *buf, size_t count)
459 if (sysfs_streq(buf, "online"))
460 memhp_auto_online = true;
461 else if (sysfs_streq(buf, "offline"))
462 memhp_auto_online = false;
463 else
464 return -EINVAL;
466 return count;
469 static DEVICE_ATTR(auto_online_blocks, 0644, show_auto_online_blocks,
470 store_auto_online_blocks);
473 * Some architectures will have custom drivers to do this, and
474 * will not need to do it from userspace. The fake hot-add code
475 * as well as ppc64 will do all of their discovery in userspace
476 * and will require this interface.
478 #ifdef CONFIG_ARCH_MEMORY_PROBE
479 static ssize_t
480 memory_probe_store(struct device *dev, struct device_attribute *attr,
481 const char *buf, size_t count)
483 u64 phys_addr;
484 int nid, ret;
485 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
487 ret = kstrtoull(buf, 0, &phys_addr);
488 if (ret)
489 return ret;
491 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
492 return -EINVAL;
494 nid = memory_add_physaddr_to_nid(phys_addr);
495 ret = add_memory(nid, phys_addr,
496 MIN_MEMORY_BLOCK_SIZE * sections_per_block);
498 if (ret)
499 goto out;
501 ret = count;
502 out:
503 return ret;
506 static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
507 #endif
509 #ifdef CONFIG_MEMORY_FAILURE
511 * Support for offlining pages of memory
514 /* Soft offline a page */
515 static ssize_t
516 store_soft_offline_page(struct device *dev,
517 struct device_attribute *attr,
518 const char *buf, size_t count)
520 int ret;
521 u64 pfn;
522 if (!capable(CAP_SYS_ADMIN))
523 return -EPERM;
524 if (kstrtoull(buf, 0, &pfn) < 0)
525 return -EINVAL;
526 pfn >>= PAGE_SHIFT;
527 if (!pfn_valid(pfn))
528 return -ENXIO;
529 ret = soft_offline_page(pfn_to_page(pfn), 0);
530 return ret == 0 ? count : ret;
533 /* Forcibly offline a page, including killing processes. */
534 static ssize_t
535 store_hard_offline_page(struct device *dev,
536 struct device_attribute *attr,
537 const char *buf, size_t count)
539 int ret;
540 u64 pfn;
541 if (!capable(CAP_SYS_ADMIN))
542 return -EPERM;
543 if (kstrtoull(buf, 0, &pfn) < 0)
544 return -EINVAL;
545 pfn >>= PAGE_SHIFT;
546 ret = memory_failure(pfn, 0, 0);
547 return ret ? ret : count;
550 static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page);
551 static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page);
552 #endif
555 * Note that phys_device is optional. It is here to allow for
556 * differentiation between which *physical* devices each
557 * section belongs to...
559 int __weak arch_get_memory_phys_device(unsigned long start_pfn)
561 return 0;
565 * A reference for the returned object is held and the reference for the
566 * hinted object is released.
568 struct memory_block *find_memory_block_hinted(struct mem_section *section,
569 struct memory_block *hint)
571 int block_id = base_memory_block_id(__section_nr(section));
572 struct device *hintdev = hint ? &hint->dev : NULL;
573 struct device *dev;
575 dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev);
576 if (hint)
577 put_device(&hint->dev);
578 if (!dev)
579 return NULL;
580 return to_memory_block(dev);
584 * For now, we have a linear search to go find the appropriate
585 * memory_block corresponding to a particular phys_index. If
586 * this gets to be a real problem, we can always use a radix
587 * tree or something here.
589 * This could be made generic for all device subsystems.
591 struct memory_block *find_memory_block(struct mem_section *section)
593 return find_memory_block_hinted(section, NULL);
596 static struct attribute *memory_memblk_attrs[] = {
597 &dev_attr_phys_index.attr,
598 &dev_attr_state.attr,
599 &dev_attr_phys_device.attr,
600 &dev_attr_removable.attr,
601 #ifdef CONFIG_MEMORY_HOTREMOVE
602 &dev_attr_valid_zones.attr,
603 #endif
604 NULL
607 static struct attribute_group memory_memblk_attr_group = {
608 .attrs = memory_memblk_attrs,
611 static const struct attribute_group *memory_memblk_attr_groups[] = {
612 &memory_memblk_attr_group,
613 NULL,
617 * register_memory - Setup a sysfs device for a memory block
619 static
620 int register_memory(struct memory_block *memory)
622 memory->dev.bus = &memory_subsys;
623 memory->dev.id = memory->start_section_nr / sections_per_block;
624 memory->dev.release = memory_block_release;
625 memory->dev.groups = memory_memblk_attr_groups;
626 memory->dev.offline = memory->state == MEM_OFFLINE;
628 return device_register(&memory->dev);
631 static int init_memory_block(struct memory_block **memory,
632 struct mem_section *section, unsigned long state)
634 struct memory_block *mem;
635 unsigned long start_pfn;
636 int scn_nr;
637 int ret = 0;
639 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
640 if (!mem)
641 return -ENOMEM;
643 scn_nr = __section_nr(section);
644 mem->start_section_nr =
645 base_memory_block_id(scn_nr) * sections_per_block;
646 mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
647 mem->state = state;
648 start_pfn = section_nr_to_pfn(mem->start_section_nr);
649 mem->phys_device = arch_get_memory_phys_device(start_pfn);
651 ret = register_memory(mem);
653 *memory = mem;
654 return ret;
657 static int add_memory_block(int base_section_nr)
659 struct memory_block *mem;
660 int i, ret, section_count = 0, section_nr;
662 for (i = base_section_nr;
663 (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS;
664 i++) {
665 if (!present_section_nr(i))
666 continue;
667 if (section_count == 0)
668 section_nr = i;
669 section_count++;
672 if (section_count == 0)
673 return 0;
674 ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE);
675 if (ret)
676 return ret;
677 mem->section_count = section_count;
678 return 0;
681 static bool is_zone_device_section(struct mem_section *ms)
683 struct page *page;
685 page = sparse_decode_mem_map(ms->section_mem_map, __section_nr(ms));
686 return is_zone_device_page(page);
690 * need an interface for the VM to add new memory regions,
691 * but without onlining it.
693 int register_new_memory(int nid, struct mem_section *section)
695 int ret = 0;
696 struct memory_block *mem;
698 if (is_zone_device_section(section))
699 return 0;
701 mutex_lock(&mem_sysfs_mutex);
703 mem = find_memory_block(section);
704 if (mem) {
705 mem->section_count++;
706 put_device(&mem->dev);
707 } else {
708 ret = init_memory_block(&mem, section, MEM_OFFLINE);
709 if (ret)
710 goto out;
711 mem->section_count++;
714 if (mem->section_count == sections_per_block)
715 ret = register_mem_sect_under_node(mem, nid);
716 out:
717 mutex_unlock(&mem_sysfs_mutex);
718 return ret;
721 #ifdef CONFIG_MEMORY_HOTREMOVE
722 static void
723 unregister_memory(struct memory_block *memory)
725 BUG_ON(memory->dev.bus != &memory_subsys);
727 /* drop the ref. we got in remove_memory_block() */
728 put_device(&memory->dev);
729 device_unregister(&memory->dev);
732 static int remove_memory_section(unsigned long node_id,
733 struct mem_section *section, int phys_device)
735 struct memory_block *mem;
737 if (is_zone_device_section(section))
738 return 0;
740 mutex_lock(&mem_sysfs_mutex);
741 mem = find_memory_block(section);
742 unregister_mem_sect_under_nodes(mem, __section_nr(section));
744 mem->section_count--;
745 if (mem->section_count == 0)
746 unregister_memory(mem);
747 else
748 put_device(&mem->dev);
750 mutex_unlock(&mem_sysfs_mutex);
751 return 0;
754 int unregister_memory_section(struct mem_section *section)
756 if (!present_section(section))
757 return -EINVAL;
759 return remove_memory_section(0, section, 0);
761 #endif /* CONFIG_MEMORY_HOTREMOVE */
763 /* return true if the memory block is offlined, otherwise, return false */
764 bool is_memblock_offlined(struct memory_block *mem)
766 return mem->state == MEM_OFFLINE;
769 static struct attribute *memory_root_attrs[] = {
770 #ifdef CONFIG_ARCH_MEMORY_PROBE
771 &dev_attr_probe.attr,
772 #endif
774 #ifdef CONFIG_MEMORY_FAILURE
775 &dev_attr_soft_offline_page.attr,
776 &dev_attr_hard_offline_page.attr,
777 #endif
779 &dev_attr_block_size_bytes.attr,
780 &dev_attr_auto_online_blocks.attr,
781 NULL
784 static struct attribute_group memory_root_attr_group = {
785 .attrs = memory_root_attrs,
788 static const struct attribute_group *memory_root_attr_groups[] = {
789 &memory_root_attr_group,
790 NULL,
794 * Initialize the sysfs support for memory devices...
796 int __init memory_dev_init(void)
798 unsigned int i;
799 int ret;
800 int err;
801 unsigned long block_sz;
803 ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
804 if (ret)
805 goto out;
807 block_sz = get_memory_block_size();
808 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
811 * Create entries for memory sections that were found
812 * during boot and have been initialized
814 mutex_lock(&mem_sysfs_mutex);
815 for (i = 0; i < NR_MEM_SECTIONS; i += sections_per_block) {
816 err = add_memory_block(i);
817 if (!ret)
818 ret = err;
820 mutex_unlock(&mem_sysfs_mutex);
822 out:
823 if (ret)
824 printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
825 return ret;