sh_eth: fix EESIPR values for SH77{34|63}
[linux/fpc-iii.git] / drivers / base / memory.c
blob8ab8ea1253e62310a68d9e6bf039d8d866ee4019
1 /*
2 * Memory subsystem support
4 * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
5 * Dave Hansen <haveblue@us.ibm.com>
7 * This file provides the necessary infrastructure to represent
8 * a SPARSEMEM-memory-model system's physical memory in /sysfs.
9 * All arch-independent code that assumes MEMORY_HOTPLUG requires
10 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/topology.h>
16 #include <linux/capability.h>
17 #include <linux/device.h>
18 #include <linux/memory.h>
19 #include <linux/memory_hotplug.h>
20 #include <linux/mm.h>
21 #include <linux/mutex.h>
22 #include <linux/stat.h>
23 #include <linux/slab.h>
25 #include <linux/atomic.h>
26 #include <linux/uaccess.h>
28 static DEFINE_MUTEX(mem_sysfs_mutex);
30 #define MEMORY_CLASS_NAME "memory"
32 #define to_memory_block(dev) container_of(dev, struct memory_block, dev)
34 static int sections_per_block;
36 static inline int base_memory_block_id(int section_nr)
38 return section_nr / sections_per_block;
41 static int memory_subsys_online(struct device *dev);
42 static int memory_subsys_offline(struct device *dev);
44 static struct bus_type memory_subsys = {
45 .name = MEMORY_CLASS_NAME,
46 .dev_name = MEMORY_CLASS_NAME,
47 .online = memory_subsys_online,
48 .offline = memory_subsys_offline,
51 static BLOCKING_NOTIFIER_HEAD(memory_chain);
53 int register_memory_notifier(struct notifier_block *nb)
55 return blocking_notifier_chain_register(&memory_chain, nb);
57 EXPORT_SYMBOL(register_memory_notifier);
59 void unregister_memory_notifier(struct notifier_block *nb)
61 blocking_notifier_chain_unregister(&memory_chain, nb);
63 EXPORT_SYMBOL(unregister_memory_notifier);
65 static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
67 int register_memory_isolate_notifier(struct notifier_block *nb)
69 return atomic_notifier_chain_register(&memory_isolate_chain, nb);
71 EXPORT_SYMBOL(register_memory_isolate_notifier);
73 void unregister_memory_isolate_notifier(struct notifier_block *nb)
75 atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
77 EXPORT_SYMBOL(unregister_memory_isolate_notifier);
79 static void memory_block_release(struct device *dev)
81 struct memory_block *mem = to_memory_block(dev);
83 kfree(mem);
86 unsigned long __weak memory_block_size_bytes(void)
88 return MIN_MEMORY_BLOCK_SIZE;
91 static unsigned long get_memory_block_size(void)
93 unsigned long block_sz;
95 block_sz = memory_block_size_bytes();
97 /* Validate blk_sz is a power of 2 and not less than section size */
98 if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) {
99 WARN_ON(1);
100 block_sz = MIN_MEMORY_BLOCK_SIZE;
103 return block_sz;
107 * use this as the physical section index that this memsection
108 * uses.
111 static ssize_t show_mem_start_phys_index(struct device *dev,
112 struct device_attribute *attr, char *buf)
114 struct memory_block *mem = to_memory_block(dev);
115 unsigned long phys_index;
117 phys_index = mem->start_section_nr / sections_per_block;
118 return sprintf(buf, "%08lx\n", phys_index);
122 * Show whether the section of memory is likely to be hot-removable
124 static ssize_t show_mem_removable(struct device *dev,
125 struct device_attribute *attr, char *buf)
127 unsigned long i, pfn;
128 int ret = 1;
129 struct memory_block *mem = to_memory_block(dev);
131 for (i = 0; i < sections_per_block; i++) {
132 if (!present_section_nr(mem->start_section_nr + i))
133 continue;
134 pfn = section_nr_to_pfn(mem->start_section_nr + i);
135 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
138 return sprintf(buf, "%d\n", ret);
142 * online, offline, going offline, etc.
144 static ssize_t show_mem_state(struct device *dev,
145 struct device_attribute *attr, char *buf)
147 struct memory_block *mem = to_memory_block(dev);
148 ssize_t len = 0;
151 * We can probably put these states in a nice little array
152 * so that they're not open-coded
154 switch (mem->state) {
155 case MEM_ONLINE:
156 len = sprintf(buf, "online\n");
157 break;
158 case MEM_OFFLINE:
159 len = sprintf(buf, "offline\n");
160 break;
161 case MEM_GOING_OFFLINE:
162 len = sprintf(buf, "going-offline\n");
163 break;
164 default:
165 len = sprintf(buf, "ERROR-UNKNOWN-%ld\n",
166 mem->state);
167 WARN_ON(1);
168 break;
171 return len;
174 int memory_notify(unsigned long val, void *v)
176 return blocking_notifier_call_chain(&memory_chain, val, v);
179 int memory_isolate_notify(unsigned long val, void *v)
181 return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
185 * The probe routines leave the pages reserved, just as the bootmem code does.
186 * Make sure they're still that way.
188 static bool pages_correctly_reserved(unsigned long start_pfn)
190 int i, j;
191 struct page *page;
192 unsigned long pfn = start_pfn;
195 * memmap between sections is not contiguous except with
196 * SPARSEMEM_VMEMMAP. We lookup the page once per section
197 * and assume memmap is contiguous within each section
199 for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
200 if (WARN_ON_ONCE(!pfn_valid(pfn)))
201 return false;
202 page = pfn_to_page(pfn);
204 for (j = 0; j < PAGES_PER_SECTION; j++) {
205 if (PageReserved(page + j))
206 continue;
208 printk(KERN_WARNING "section number %ld page number %d "
209 "not reserved, was it already online?\n",
210 pfn_to_section_nr(pfn), j);
212 return false;
216 return true;
220 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
221 * OK to have direct references to sparsemem variables in here.
222 * Must already be protected by mem_hotplug_begin().
224 static int
225 memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
227 unsigned long start_pfn;
228 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
229 int ret;
231 start_pfn = section_nr_to_pfn(phys_index);
233 switch (action) {
234 case MEM_ONLINE:
235 if (!pages_correctly_reserved(start_pfn))
236 return -EBUSY;
238 ret = online_pages(start_pfn, nr_pages, online_type);
239 break;
240 case MEM_OFFLINE:
241 ret = offline_pages(start_pfn, nr_pages);
242 break;
243 default:
244 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
245 "%ld\n", __func__, phys_index, action, action);
246 ret = -EINVAL;
249 return ret;
252 int memory_block_change_state(struct memory_block *mem,
253 unsigned long to_state, unsigned long from_state_req)
255 int ret = 0;
257 if (mem->state != from_state_req)
258 return -EINVAL;
260 if (to_state == MEM_OFFLINE)
261 mem->state = MEM_GOING_OFFLINE;
263 ret = memory_block_action(mem->start_section_nr, to_state,
264 mem->online_type);
266 mem->state = ret ? from_state_req : to_state;
268 return ret;
271 /* The device lock serializes operations on memory_subsys_[online|offline] */
272 static int memory_subsys_online(struct device *dev)
274 struct memory_block *mem = to_memory_block(dev);
275 int ret;
277 if (mem->state == MEM_ONLINE)
278 return 0;
281 * If we are called from store_mem_state(), online_type will be
282 * set >= 0 Otherwise we were called from the device online
283 * attribute and need to set the online_type.
285 if (mem->online_type < 0)
286 mem->online_type = MMOP_ONLINE_KEEP;
288 /* Already under protection of mem_hotplug_begin() */
289 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
291 /* clear online_type */
292 mem->online_type = -1;
294 return ret;
297 static int memory_subsys_offline(struct device *dev)
299 struct memory_block *mem = to_memory_block(dev);
301 if (mem->state == MEM_OFFLINE)
302 return 0;
304 /* Can't offline block with non-present sections */
305 if (mem->section_count != sections_per_block)
306 return -EINVAL;
308 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
311 static ssize_t
312 store_mem_state(struct device *dev,
313 struct device_attribute *attr, const char *buf, size_t count)
315 struct memory_block *mem = to_memory_block(dev);
316 int ret, online_type;
318 ret = lock_device_hotplug_sysfs();
319 if (ret)
320 return ret;
322 if (sysfs_streq(buf, "online_kernel"))
323 online_type = MMOP_ONLINE_KERNEL;
324 else if (sysfs_streq(buf, "online_movable"))
325 online_type = MMOP_ONLINE_MOVABLE;
326 else if (sysfs_streq(buf, "online"))
327 online_type = MMOP_ONLINE_KEEP;
328 else if (sysfs_streq(buf, "offline"))
329 online_type = MMOP_OFFLINE;
330 else {
331 ret = -EINVAL;
332 goto err;
336 * Memory hotplug needs to hold mem_hotplug_begin() for probe to find
337 * the correct memory block to online before doing device_online(dev),
338 * which will take dev->mutex. Take the lock early to prevent an
339 * inversion, memory_subsys_online() callbacks will be implemented by
340 * assuming it's already protected.
342 mem_hotplug_begin();
344 switch (online_type) {
345 case MMOP_ONLINE_KERNEL:
346 case MMOP_ONLINE_MOVABLE:
347 case MMOP_ONLINE_KEEP:
348 mem->online_type = online_type;
349 ret = device_online(&mem->dev);
350 break;
351 case MMOP_OFFLINE:
352 ret = device_offline(&mem->dev);
353 break;
354 default:
355 ret = -EINVAL; /* should never happen */
358 mem_hotplug_done();
359 err:
360 unlock_device_hotplug();
362 if (ret < 0)
363 return ret;
364 if (ret)
365 return -EINVAL;
367 return count;
371 * phys_device is a bad name for this. What I really want
372 * is a way to differentiate between memory ranges that
373 * are part of physical devices that constitute
374 * a complete removable unit or fru.
375 * i.e. do these ranges belong to the same physical device,
376 * s.t. if I offline all of these sections I can then
377 * remove the physical device?
379 static ssize_t show_phys_device(struct device *dev,
380 struct device_attribute *attr, char *buf)
382 struct memory_block *mem = to_memory_block(dev);
383 return sprintf(buf, "%d\n", mem->phys_device);
386 #ifdef CONFIG_MEMORY_HOTREMOVE
387 static ssize_t show_valid_zones(struct device *dev,
388 struct device_attribute *attr, char *buf)
390 struct memory_block *mem = to_memory_block(dev);
391 unsigned long start_pfn, end_pfn;
392 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
393 struct page *first_page;
394 struct zone *zone;
395 int zone_shift = 0;
397 start_pfn = section_nr_to_pfn(mem->start_section_nr);
398 end_pfn = start_pfn + nr_pages;
399 first_page = pfn_to_page(start_pfn);
401 /* The block contains more than one zone can not be offlined. */
402 if (!test_pages_in_a_zone(start_pfn, end_pfn))
403 return sprintf(buf, "none\n");
405 zone = page_zone(first_page);
407 /* MMOP_ONLINE_KEEP */
408 sprintf(buf, "%s", zone->name);
410 /* MMOP_ONLINE_KERNEL */
411 zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL);
412 if (zone_shift) {
413 strcat(buf, " ");
414 strcat(buf, (zone + zone_shift)->name);
417 /* MMOP_ONLINE_MOVABLE */
418 zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE);
419 if (zone_shift) {
420 strcat(buf, " ");
421 strcat(buf, (zone + zone_shift)->name);
424 strcat(buf, "\n");
426 return strlen(buf);
428 static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL);
429 #endif
431 static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
432 static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
433 static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
434 static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
437 * Block size attribute stuff
439 static ssize_t
440 print_block_size(struct device *dev, struct device_attribute *attr,
441 char *buf)
443 return sprintf(buf, "%lx\n", get_memory_block_size());
446 static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
449 * Memory auto online policy.
452 static ssize_t
453 show_auto_online_blocks(struct device *dev, struct device_attribute *attr,
454 char *buf)
456 if (memhp_auto_online)
457 return sprintf(buf, "online\n");
458 else
459 return sprintf(buf, "offline\n");
462 static ssize_t
463 store_auto_online_blocks(struct device *dev, struct device_attribute *attr,
464 const char *buf, size_t count)
466 if (sysfs_streq(buf, "online"))
467 memhp_auto_online = true;
468 else if (sysfs_streq(buf, "offline"))
469 memhp_auto_online = false;
470 else
471 return -EINVAL;
473 return count;
476 static DEVICE_ATTR(auto_online_blocks, 0644, show_auto_online_blocks,
477 store_auto_online_blocks);
480 * Some architectures will have custom drivers to do this, and
481 * will not need to do it from userspace. The fake hot-add code
482 * as well as ppc64 will do all of their discovery in userspace
483 * and will require this interface.
485 #ifdef CONFIG_ARCH_MEMORY_PROBE
486 static ssize_t
487 memory_probe_store(struct device *dev, struct device_attribute *attr,
488 const char *buf, size_t count)
490 u64 phys_addr;
491 int nid, ret;
492 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
494 ret = kstrtoull(buf, 0, &phys_addr);
495 if (ret)
496 return ret;
498 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
499 return -EINVAL;
501 nid = memory_add_physaddr_to_nid(phys_addr);
502 ret = add_memory(nid, phys_addr,
503 MIN_MEMORY_BLOCK_SIZE * sections_per_block);
505 if (ret)
506 goto out;
508 ret = count;
509 out:
510 return ret;
513 static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
514 #endif
516 #ifdef CONFIG_MEMORY_FAILURE
518 * Support for offlining pages of memory
521 /* Soft offline a page */
522 static ssize_t
523 store_soft_offline_page(struct device *dev,
524 struct device_attribute *attr,
525 const char *buf, size_t count)
527 int ret;
528 u64 pfn;
529 if (!capable(CAP_SYS_ADMIN))
530 return -EPERM;
531 if (kstrtoull(buf, 0, &pfn) < 0)
532 return -EINVAL;
533 pfn >>= PAGE_SHIFT;
534 if (!pfn_valid(pfn))
535 return -ENXIO;
536 ret = soft_offline_page(pfn_to_page(pfn), 0);
537 return ret == 0 ? count : ret;
540 /* Forcibly offline a page, including killing processes. */
541 static ssize_t
542 store_hard_offline_page(struct device *dev,
543 struct device_attribute *attr,
544 const char *buf, size_t count)
546 int ret;
547 u64 pfn;
548 if (!capable(CAP_SYS_ADMIN))
549 return -EPERM;
550 if (kstrtoull(buf, 0, &pfn) < 0)
551 return -EINVAL;
552 pfn >>= PAGE_SHIFT;
553 ret = memory_failure(pfn, 0, 0);
554 return ret ? ret : count;
557 static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page);
558 static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page);
559 #endif
562 * Note that phys_device is optional. It is here to allow for
563 * differentiation between which *physical* devices each
564 * section belongs to...
566 int __weak arch_get_memory_phys_device(unsigned long start_pfn)
568 return 0;
572 * A reference for the returned object is held and the reference for the
573 * hinted object is released.
575 struct memory_block *find_memory_block_hinted(struct mem_section *section,
576 struct memory_block *hint)
578 int block_id = base_memory_block_id(__section_nr(section));
579 struct device *hintdev = hint ? &hint->dev : NULL;
580 struct device *dev;
582 dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev);
583 if (hint)
584 put_device(&hint->dev);
585 if (!dev)
586 return NULL;
587 return to_memory_block(dev);
591 * For now, we have a linear search to go find the appropriate
592 * memory_block corresponding to a particular phys_index. If
593 * this gets to be a real problem, we can always use a radix
594 * tree or something here.
596 * This could be made generic for all device subsystems.
598 struct memory_block *find_memory_block(struct mem_section *section)
600 return find_memory_block_hinted(section, NULL);
603 static struct attribute *memory_memblk_attrs[] = {
604 &dev_attr_phys_index.attr,
605 &dev_attr_state.attr,
606 &dev_attr_phys_device.attr,
607 &dev_attr_removable.attr,
608 #ifdef CONFIG_MEMORY_HOTREMOVE
609 &dev_attr_valid_zones.attr,
610 #endif
611 NULL
614 static struct attribute_group memory_memblk_attr_group = {
615 .attrs = memory_memblk_attrs,
618 static const struct attribute_group *memory_memblk_attr_groups[] = {
619 &memory_memblk_attr_group,
620 NULL,
624 * register_memory - Setup a sysfs device for a memory block
626 static
627 int register_memory(struct memory_block *memory)
629 memory->dev.bus = &memory_subsys;
630 memory->dev.id = memory->start_section_nr / sections_per_block;
631 memory->dev.release = memory_block_release;
632 memory->dev.groups = memory_memblk_attr_groups;
633 memory->dev.offline = memory->state == MEM_OFFLINE;
635 return device_register(&memory->dev);
638 static int init_memory_block(struct memory_block **memory,
639 struct mem_section *section, unsigned long state)
641 struct memory_block *mem;
642 unsigned long start_pfn;
643 int scn_nr;
644 int ret = 0;
646 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
647 if (!mem)
648 return -ENOMEM;
650 scn_nr = __section_nr(section);
651 mem->start_section_nr =
652 base_memory_block_id(scn_nr) * sections_per_block;
653 mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
654 mem->state = state;
655 start_pfn = section_nr_to_pfn(mem->start_section_nr);
656 mem->phys_device = arch_get_memory_phys_device(start_pfn);
658 ret = register_memory(mem);
660 *memory = mem;
661 return ret;
664 static int add_memory_block(int base_section_nr)
666 struct memory_block *mem;
667 int i, ret, section_count = 0, section_nr;
669 for (i = base_section_nr;
670 (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS;
671 i++) {
672 if (!present_section_nr(i))
673 continue;
674 if (section_count == 0)
675 section_nr = i;
676 section_count++;
679 if (section_count == 0)
680 return 0;
681 ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE);
682 if (ret)
683 return ret;
684 mem->section_count = section_count;
685 return 0;
688 static bool is_zone_device_section(struct mem_section *ms)
690 struct page *page;
692 page = sparse_decode_mem_map(ms->section_mem_map, __section_nr(ms));
693 return is_zone_device_page(page);
697 * need an interface for the VM to add new memory regions,
698 * but without onlining it.
700 int register_new_memory(int nid, struct mem_section *section)
702 int ret = 0;
703 struct memory_block *mem;
705 if (is_zone_device_section(section))
706 return 0;
708 mutex_lock(&mem_sysfs_mutex);
710 mem = find_memory_block(section);
711 if (mem) {
712 mem->section_count++;
713 put_device(&mem->dev);
714 } else {
715 ret = init_memory_block(&mem, section, MEM_OFFLINE);
716 if (ret)
717 goto out;
718 mem->section_count++;
721 if (mem->section_count == sections_per_block)
722 ret = register_mem_sect_under_node(mem, nid);
723 out:
724 mutex_unlock(&mem_sysfs_mutex);
725 return ret;
728 #ifdef CONFIG_MEMORY_HOTREMOVE
729 static void
730 unregister_memory(struct memory_block *memory)
732 BUG_ON(memory->dev.bus != &memory_subsys);
734 /* drop the ref. we got in remove_memory_block() */
735 put_device(&memory->dev);
736 device_unregister(&memory->dev);
739 static int remove_memory_section(unsigned long node_id,
740 struct mem_section *section, int phys_device)
742 struct memory_block *mem;
744 if (is_zone_device_section(section))
745 return 0;
747 mutex_lock(&mem_sysfs_mutex);
748 mem = find_memory_block(section);
749 unregister_mem_sect_under_nodes(mem, __section_nr(section));
751 mem->section_count--;
752 if (mem->section_count == 0)
753 unregister_memory(mem);
754 else
755 put_device(&mem->dev);
757 mutex_unlock(&mem_sysfs_mutex);
758 return 0;
761 int unregister_memory_section(struct mem_section *section)
763 if (!present_section(section))
764 return -EINVAL;
766 return remove_memory_section(0, section, 0);
768 #endif /* CONFIG_MEMORY_HOTREMOVE */
770 /* return true if the memory block is offlined, otherwise, return false */
771 bool is_memblock_offlined(struct memory_block *mem)
773 return mem->state == MEM_OFFLINE;
776 static struct attribute *memory_root_attrs[] = {
777 #ifdef CONFIG_ARCH_MEMORY_PROBE
778 &dev_attr_probe.attr,
779 #endif
781 #ifdef CONFIG_MEMORY_FAILURE
782 &dev_attr_soft_offline_page.attr,
783 &dev_attr_hard_offline_page.attr,
784 #endif
786 &dev_attr_block_size_bytes.attr,
787 &dev_attr_auto_online_blocks.attr,
788 NULL
791 static struct attribute_group memory_root_attr_group = {
792 .attrs = memory_root_attrs,
795 static const struct attribute_group *memory_root_attr_groups[] = {
796 &memory_root_attr_group,
797 NULL,
801 * Initialize the sysfs support for memory devices...
803 int __init memory_dev_init(void)
805 unsigned int i;
806 int ret;
807 int err;
808 unsigned long block_sz;
810 ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
811 if (ret)
812 goto out;
814 block_sz = get_memory_block_size();
815 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
818 * Create entries for memory sections that were found
819 * during boot and have been initialized
821 mutex_lock(&mem_sysfs_mutex);
822 for (i = 0; i < NR_MEM_SECTIONS; i += sections_per_block) {
823 err = add_memory_block(i);
824 if (!ret)
825 ret = err;
827 mutex_unlock(&mem_sysfs_mutex);
829 out:
830 if (ret)
831 printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
832 return ret;