2 * Collaborative memory management interface.
4 * Copyright (C) 2008 IBM Corporation
5 * Author(s): Brian King (brking@linux.vnet.ibm.com),
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/ctype.h>
24 #include <linux/delay.h>
25 #include <linux/errno.h>
27 #include <linux/gfp.h>
28 #include <linux/kthread.h>
29 #include <linux/module.h>
30 #include <linux/oom.h>
31 #include <linux/reboot.h>
32 #include <linux/sched.h>
33 #include <linux/stringify.h>
34 #include <linux/swap.h>
35 #include <linux/device.h>
36 #include <asm/firmware.h>
37 #include <asm/hvcall.h>
39 #include <asm/pgalloc.h>
40 #include <asm/uaccess.h>
41 #include <linux/memory.h>
42 #include <asm/plpar_wrappers.h>
44 #define CMM_DRIVER_VERSION "1.0.0"
45 #define CMM_DEFAULT_DELAY 1
46 #define CMM_HOTPLUG_DELAY 5
49 #define CMM_OOM_KB 1024
50 #define CMM_MIN_MEM_MB 256
51 #define KB2PAGES(_p) ((_p)>>(PAGE_SHIFT-10))
52 #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
54 * The priority level tries to ensure that this notifier is called as
55 * late as possible to reduce thrashing in the shared memory pool.
57 #define CMM_MEM_HOTPLUG_PRI 1
58 #define CMM_MEM_ISOLATE_PRI 15
60 static unsigned int delay
= CMM_DEFAULT_DELAY
;
61 static unsigned int hotplug_delay
= CMM_HOTPLUG_DELAY
;
62 static unsigned int oom_kb
= CMM_OOM_KB
;
63 static unsigned int cmm_debug
= CMM_DEBUG
;
64 static unsigned int cmm_disabled
= CMM_DISABLE
;
65 static unsigned long min_mem_mb
= CMM_MIN_MEM_MB
;
66 static struct device cmm_dev
;
68 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
69 MODULE_DESCRIPTION("IBM System p Collaborative Memory Manager");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(CMM_DRIVER_VERSION
);
73 module_param_named(delay
, delay
, uint
, S_IRUGO
| S_IWUSR
);
74 MODULE_PARM_DESC(delay
, "Delay (in seconds) between polls to query hypervisor paging requests. "
75 "[Default=" __stringify(CMM_DEFAULT_DELAY
) "]");
76 module_param_named(hotplug_delay
, hotplug_delay
, uint
, S_IRUGO
| S_IWUSR
);
77 MODULE_PARM_DESC(delay
, "Delay (in seconds) after memory hotplug remove "
78 "before loaning resumes. "
79 "[Default=" __stringify(CMM_HOTPLUG_DELAY
) "]");
80 module_param_named(oom_kb
, oom_kb
, uint
, S_IRUGO
| S_IWUSR
);
81 MODULE_PARM_DESC(oom_kb
, "Amount of memory in kb to free on OOM. "
82 "[Default=" __stringify(CMM_OOM_KB
) "]");
83 module_param_named(min_mem_mb
, min_mem_mb
, ulong
, S_IRUGO
| S_IWUSR
);
84 MODULE_PARM_DESC(min_mem_mb
, "Minimum amount of memory (in MB) to not balloon. "
85 "[Default=" __stringify(CMM_MIN_MEM_MB
) "]");
86 module_param_named(debug
, cmm_debug
, uint
, S_IRUGO
| S_IWUSR
);
87 MODULE_PARM_DESC(debug
, "Enable module debugging logging. Set to 1 to enable. "
88 "[Default=" __stringify(CMM_DEBUG
) "]");
90 #define CMM_NR_PAGES ((PAGE_SIZE - sizeof(void *) - sizeof(unsigned long)) / sizeof(unsigned long))
92 #define cmm_dbg(...) if (cmm_debug) { printk(KERN_INFO "cmm: "__VA_ARGS__); }
94 struct cmm_page_array
{
95 struct cmm_page_array
*next
;
97 unsigned long page
[CMM_NR_PAGES
];
100 static unsigned long loaned_pages
;
101 static unsigned long loaned_pages_target
;
102 static unsigned long oom_freed_pages
;
104 static struct cmm_page_array
*cmm_page_list
;
105 static DEFINE_SPINLOCK(cmm_lock
);
107 static DEFINE_MUTEX(hotplug_mutex
);
108 static int hotplug_occurred
; /* protected by the hotplug mutex */
110 static struct task_struct
*cmm_thread_ptr
;
113 * cmm_alloc_pages - Allocate pages and mark them as loaned
114 * @nr: number of pages to allocate
117 * number of pages requested to be allocated which were not
119 static long cmm_alloc_pages(long nr
)
121 struct cmm_page_array
*pa
, *npa
;
125 cmm_dbg("Begin request for %ld pages\n", nr
);
128 /* Exit if a hotplug operation is in progress or occurred */
129 if (mutex_trylock(&hotplug_mutex
)) {
130 if (hotplug_occurred
) {
131 mutex_unlock(&hotplug_mutex
);
134 mutex_unlock(&hotplug_mutex
);
139 addr
= __get_free_page(GFP_NOIO
| __GFP_NOWARN
|
140 __GFP_NORETRY
| __GFP_NOMEMALLOC
);
143 spin_lock(&cmm_lock
);
145 if (!pa
|| pa
->index
>= CMM_NR_PAGES
) {
146 /* Need a new page for the page list. */
147 spin_unlock(&cmm_lock
);
148 npa
= (struct cmm_page_array
*)__get_free_page(
149 GFP_NOIO
| __GFP_NOWARN
|
150 __GFP_NORETRY
| __GFP_NOMEMALLOC
);
152 pr_info("%s: Can not allocate new page list\n", __func__
);
156 spin_lock(&cmm_lock
);
159 if (!pa
|| pa
->index
>= CMM_NR_PAGES
) {
165 free_page((unsigned long) npa
);
168 if ((rc
= plpar_page_set_loaned(__pa(addr
)))) {
169 pr_err("%s: Can not set page to loaned. rc=%ld\n", __func__
, rc
);
170 spin_unlock(&cmm_lock
);
175 pa
->page
[pa
->index
++] = addr
;
178 spin_unlock(&cmm_lock
);
182 cmm_dbg("End request with %ld pages unfulfilled\n", nr
);
187 * cmm_free_pages - Free pages and mark them as active
188 * @nr: number of pages to free
191 * number of pages requested to be freed which were not
193 static long cmm_free_pages(long nr
)
195 struct cmm_page_array
*pa
;
198 cmm_dbg("Begin free of %ld pages.\n", nr
);
199 spin_lock(&cmm_lock
);
202 if (!pa
|| pa
->index
<= 0)
204 addr
= pa
->page
[--pa
->index
];
206 if (pa
->index
== 0) {
208 free_page((unsigned long) cmm_page_list
);
212 plpar_page_set_active(__pa(addr
));
218 spin_unlock(&cmm_lock
);
219 cmm_dbg("End request with %ld pages unfulfilled\n", nr
);
224 * cmm_oom_notify - OOM notifier
225 * @self: notifier block struct
227 * @parm: returned - number of pages freed
232 static int cmm_oom_notify(struct notifier_block
*self
,
233 unsigned long dummy
, void *parm
)
235 unsigned long *freed
= parm
;
236 long nr
= KB2PAGES(oom_kb
);
238 cmm_dbg("OOM processing started\n");
239 nr
= cmm_free_pages(nr
);
240 loaned_pages_target
= loaned_pages
;
241 *freed
+= KB2PAGES(oom_kb
) - nr
;
242 oom_freed_pages
+= KB2PAGES(oom_kb
) - nr
;
243 cmm_dbg("OOM processing complete\n");
248 * cmm_get_mpp - Read memory performance parameters
250 * Makes hcall to query the current page loan request from the hypervisor.
255 static void cmm_get_mpp(void)
258 struct hvcall_mpp_data mpp_data
;
259 signed long active_pages_target
, page_loan_request
, target
;
260 signed long total_pages
= totalram_pages
+ loaned_pages
;
261 signed long min_mem_pages
= (min_mem_mb
* 1024 * 1024) / PAGE_SIZE
;
263 rc
= h_get_mpp(&mpp_data
);
268 page_loan_request
= div_s64((s64
)mpp_data
.loan_request
, PAGE_SIZE
);
269 target
= page_loan_request
+ (signed long)loaned_pages
;
271 if (target
< 0 || total_pages
< min_mem_pages
)
274 if (target
> oom_freed_pages
)
275 target
-= oom_freed_pages
;
279 active_pages_target
= total_pages
- target
;
281 if (min_mem_pages
> active_pages_target
)
282 target
= total_pages
- min_mem_pages
;
287 loaned_pages_target
= target
;
289 cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
290 page_loan_request
, loaned_pages
, loaned_pages_target
,
291 oom_freed_pages
, totalram_pages
);
294 static struct notifier_block cmm_oom_nb
= {
295 .notifier_call
= cmm_oom_notify
299 * cmm_thread - CMM task thread
305 static int cmm_thread(void *dummy
)
307 unsigned long timeleft
;
310 timeleft
= msleep_interruptible(delay
* 1000);
312 if (kthread_should_stop() || timeleft
)
315 if (mutex_trylock(&hotplug_mutex
)) {
316 if (hotplug_occurred
) {
317 hotplug_occurred
= 0;
318 mutex_unlock(&hotplug_mutex
);
319 cmm_dbg("Hotplug operation has occurred, "
320 "loaning activity suspended "
323 timeleft
= msleep_interruptible(hotplug_delay
*
325 if (kthread_should_stop() || timeleft
)
329 mutex_unlock(&hotplug_mutex
);
331 cmm_dbg("Hotplug operation in progress, activity "
338 if (loaned_pages_target
> loaned_pages
) {
339 if (cmm_alloc_pages(loaned_pages_target
- loaned_pages
))
340 loaned_pages_target
= loaned_pages
;
341 } else if (loaned_pages_target
< loaned_pages
)
342 cmm_free_pages(loaned_pages
- loaned_pages_target
);
347 #define CMM_SHOW(name, format, args...) \
348 static ssize_t show_##name(struct device *dev, \
349 struct device_attribute *attr, \
352 return sprintf(buf, format, ##args); \
354 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
356 CMM_SHOW(loaned_kb
, "%lu\n", PAGES2KB(loaned_pages
));
357 CMM_SHOW(loaned_target_kb
, "%lu\n", PAGES2KB(loaned_pages_target
));
359 static ssize_t
show_oom_pages(struct device
*dev
,
360 struct device_attribute
*attr
, char *buf
)
362 return sprintf(buf
, "%lu\n", PAGES2KB(oom_freed_pages
));
365 static ssize_t
store_oom_pages(struct device
*dev
,
366 struct device_attribute
*attr
,
367 const char *buf
, size_t count
)
369 unsigned long val
= simple_strtoul (buf
, NULL
, 10);
371 if (!capable(CAP_SYS_ADMIN
))
380 static DEVICE_ATTR(oom_freed_kb
, S_IWUSR
| S_IRUGO
,
381 show_oom_pages
, store_oom_pages
);
383 static struct device_attribute
*cmm_attrs
[] = {
385 &dev_attr_loaned_target_kb
,
386 &dev_attr_oom_freed_kb
,
389 static struct bus_type cmm_subsys
= {
395 * cmm_sysfs_register - Register with sysfs
398 * 0 on success / other on failure
400 static int cmm_sysfs_register(struct device
*dev
)
404 if ((rc
= subsys_system_register(&cmm_subsys
, NULL
)))
408 dev
->bus
= &cmm_subsys
;
410 if ((rc
= device_register(dev
)))
411 goto subsys_unregister
;
413 for (i
= 0; i
< ARRAY_SIZE(cmm_attrs
); i
++) {
414 if ((rc
= device_create_file(dev
, cmm_attrs
[i
])))
422 device_remove_file(dev
, cmm_attrs
[i
]);
423 device_unregister(dev
);
425 bus_unregister(&cmm_subsys
);
430 * cmm_unregister_sysfs - Unregister from sysfs
433 static void cmm_unregister_sysfs(struct device
*dev
)
437 for (i
= 0; i
< ARRAY_SIZE(cmm_attrs
); i
++)
438 device_remove_file(dev
, cmm_attrs
[i
]);
439 device_unregister(dev
);
440 bus_unregister(&cmm_subsys
);
444 * cmm_reboot_notifier - Make sure pages are not still marked as "loaned"
447 static int cmm_reboot_notifier(struct notifier_block
*nb
,
448 unsigned long action
, void *unused
)
450 if (action
== SYS_RESTART
) {
452 kthread_stop(cmm_thread_ptr
);
453 cmm_thread_ptr
= NULL
;
454 cmm_free_pages(loaned_pages
);
459 static struct notifier_block cmm_reboot_nb
= {
460 .notifier_call
= cmm_reboot_notifier
,
464 * cmm_count_pages - Count the number of pages loaned in a particular range.
466 * @arg: memory_isolate_notify structure with address range and count
471 static unsigned long cmm_count_pages(void *arg
)
473 struct memory_isolate_notify
*marg
= arg
;
474 struct cmm_page_array
*pa
;
475 unsigned long start
= (unsigned long)pfn_to_kaddr(marg
->start_pfn
);
476 unsigned long end
= start
+ (marg
->nr_pages
<< PAGE_SHIFT
);
479 spin_lock(&cmm_lock
);
482 if ((unsigned long)pa
>= start
&& (unsigned long)pa
< end
)
484 for (idx
= 0; idx
< pa
->index
; idx
++)
485 if (pa
->page
[idx
] >= start
&& pa
->page
[idx
] < end
)
489 spin_unlock(&cmm_lock
);
494 * cmm_memory_isolate_cb - Handle memory isolation notifier calls
495 * @self: notifier block struct
496 * @action: action to take
497 * @arg: struct memory_isolate_notify data for handler
500 * NOTIFY_OK or notifier error based on subfunction return value
502 static int cmm_memory_isolate_cb(struct notifier_block
*self
,
503 unsigned long action
, void *arg
)
507 if (action
== MEM_ISOLATE_COUNT
)
508 ret
= cmm_count_pages(arg
);
510 return notifier_from_errno(ret
);
513 static struct notifier_block cmm_mem_isolate_nb
= {
514 .notifier_call
= cmm_memory_isolate_cb
,
515 .priority
= CMM_MEM_ISOLATE_PRI
519 * cmm_mem_going_offline - Unloan pages where memory is to be removed
520 * @arg: memory_notify structure with page range to be offlined
525 static int cmm_mem_going_offline(void *arg
)
527 struct memory_notify
*marg
= arg
;
528 unsigned long start_page
= (unsigned long)pfn_to_kaddr(marg
->start_pfn
);
529 unsigned long end_page
= start_page
+ (marg
->nr_pages
<< PAGE_SHIFT
);
530 struct cmm_page_array
*pa_curr
, *pa_last
, *npa
;
532 unsigned long freed
= 0;
534 cmm_dbg("Memory going offline, searching 0x%lx (%ld pages).\n",
535 start_page
, marg
->nr_pages
);
536 spin_lock(&cmm_lock
);
538 /* Search the page list for pages in the range to be offlined */
539 pa_last
= pa_curr
= cmm_page_list
;
541 for (idx
= (pa_curr
->index
- 1); (idx
+ 1) > 0; idx
--) {
542 if ((pa_curr
->page
[idx
] < start_page
) ||
543 (pa_curr
->page
[idx
] >= end_page
))
546 plpar_page_set_active(__pa(pa_curr
->page
[idx
]));
547 free_page(pa_curr
->page
[idx
]);
551 pa_curr
->page
[idx
] = pa_last
->page
[--pa_last
->index
];
552 if (pa_last
->index
== 0) {
553 if (pa_curr
== pa_last
)
554 pa_curr
= pa_last
->next
;
555 pa_last
= pa_last
->next
;
556 free_page((unsigned long)cmm_page_list
);
557 cmm_page_list
= pa_last
;
561 pa_curr
= pa_curr
->next
;
564 /* Search for page list structures in the range to be offlined */
566 pa_curr
= cmm_page_list
;
568 if (((unsigned long)pa_curr
>= start_page
) &&
569 ((unsigned long)pa_curr
< end_page
)) {
570 npa
= (struct cmm_page_array
*)__get_free_page(
571 GFP_NOIO
| __GFP_NOWARN
|
572 __GFP_NORETRY
| __GFP_NOMEMALLOC
);
574 spin_unlock(&cmm_lock
);
575 cmm_dbg("Failed to allocate memory for list "
576 "management. Memory hotplug "
580 memcpy(npa
, pa_curr
, PAGE_SIZE
);
581 if (pa_curr
== cmm_page_list
)
585 free_page((unsigned long) pa_curr
);
591 pa_curr
= pa_curr
->next
;
594 spin_unlock(&cmm_lock
);
595 cmm_dbg("Released %ld pages in the search range.\n", freed
);
601 * cmm_memory_cb - Handle memory hotplug notifier calls
602 * @self: notifier block struct
603 * @action: action to take
604 * @arg: struct memory_notify data for handler
607 * NOTIFY_OK or notifier error based on subfunction return value
610 static int cmm_memory_cb(struct notifier_block
*self
,
611 unsigned long action
, void *arg
)
616 case MEM_GOING_OFFLINE
:
617 mutex_lock(&hotplug_mutex
);
618 hotplug_occurred
= 1;
619 ret
= cmm_mem_going_offline(arg
);
622 case MEM_CANCEL_OFFLINE
:
623 mutex_unlock(&hotplug_mutex
);
624 cmm_dbg("Memory offline operation complete.\n");
626 case MEM_GOING_ONLINE
:
628 case MEM_CANCEL_ONLINE
:
632 return notifier_from_errno(ret
);
635 static struct notifier_block cmm_mem_nb
= {
636 .notifier_call
= cmm_memory_cb
,
637 .priority
= CMM_MEM_HOTPLUG_PRI
641 * cmm_init - Module initialization
644 * 0 on success / other on failure
646 static int cmm_init(void)
650 if (!firmware_has_feature(FW_FEATURE_CMO
))
653 if ((rc
= register_oom_notifier(&cmm_oom_nb
)) < 0)
656 if ((rc
= register_reboot_notifier(&cmm_reboot_nb
)))
657 goto out_oom_notifier
;
659 if ((rc
= cmm_sysfs_register(&cmm_dev
)))
660 goto out_reboot_notifier
;
662 if (register_memory_notifier(&cmm_mem_nb
) ||
663 register_memory_isolate_notifier(&cmm_mem_isolate_nb
))
664 goto out_unregister_notifier
;
669 cmm_thread_ptr
= kthread_run(cmm_thread
, NULL
, "cmmthread");
670 if (IS_ERR(cmm_thread_ptr
)) {
671 rc
= PTR_ERR(cmm_thread_ptr
);
672 goto out_unregister_notifier
;
677 out_unregister_notifier
:
678 unregister_memory_notifier(&cmm_mem_nb
);
679 unregister_memory_isolate_notifier(&cmm_mem_isolate_nb
);
680 cmm_unregister_sysfs(&cmm_dev
);
682 unregister_reboot_notifier(&cmm_reboot_nb
);
684 unregister_oom_notifier(&cmm_oom_nb
);
689 * cmm_exit - Module exit
694 static void cmm_exit(void)
697 kthread_stop(cmm_thread_ptr
);
698 unregister_oom_notifier(&cmm_oom_nb
);
699 unregister_reboot_notifier(&cmm_reboot_nb
);
700 unregister_memory_notifier(&cmm_mem_nb
);
701 unregister_memory_isolate_notifier(&cmm_mem_isolate_nb
);
702 cmm_free_pages(loaned_pages
);
703 cmm_unregister_sysfs(&cmm_dev
);
707 * cmm_set_disable - Disable/Enable CMM
710 * 0 on success / other on failure
712 static int cmm_set_disable(const char *val
, struct kernel_param
*kp
)
714 int disable
= simple_strtoul(val
, NULL
, 10);
716 if (disable
!= 0 && disable
!= 1)
719 if (disable
&& !cmm_disabled
) {
721 kthread_stop(cmm_thread_ptr
);
722 cmm_thread_ptr
= NULL
;
723 cmm_free_pages(loaned_pages
);
724 } else if (!disable
&& cmm_disabled
) {
725 cmm_thread_ptr
= kthread_run(cmm_thread
, NULL
, "cmmthread");
726 if (IS_ERR(cmm_thread_ptr
))
727 return PTR_ERR(cmm_thread_ptr
);
730 cmm_disabled
= disable
;
734 module_param_call(disable
, cmm_set_disable
, param_get_uint
,
735 &cmm_disabled
, S_IRUGO
| S_IWUSR
);
736 MODULE_PARM_DESC(disable
, "Disable CMM. Set to 1 to disable. "
737 "[Default=" __stringify(CMM_DISABLE
) "]");
739 module_init(cmm_init
);
740 module_exit(cmm_exit
);