2 * Copyright IBM Corp. 2008
4 * Guest page hinting for unused pages.
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/types.h>
13 #include <linux/gfp.h>
14 #include <linux/init.h>
16 #define ESSA_SET_STABLE 1
17 #define ESSA_SET_UNUSED 2
19 static int cmma_flag
= 1;
21 static int __init
cmma(char *str
)
26 if (strcmp(parm
, "yes") == 0 || strcmp(parm
, "on") == 0) {
31 if (strcmp(parm
, "no") == 0 || strcmp(parm
, "off") == 0)
35 __setup("cmma=", cmma
);
37 static inline int cmma_test_essa(void)
39 register unsigned long tmp
asm("0") = 0;
40 register int rc
asm("1") = -EOPNOTSUPP
;
43 " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
47 : "+&d" (rc
), "+&d" (tmp
));
51 void __init
cmma_init(void)
59 static inline void set_page_unstable(struct page
*page
, int order
)
63 for (i
= 0; i
< (1 << order
); i
++)
64 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
66 : "a" (page_to_phys(page
+ i
)),
67 "i" (ESSA_SET_UNUSED
));
70 void arch_free_page(struct page
*page
, int order
)
74 set_page_unstable(page
, order
);
77 static inline void set_page_stable(struct page
*page
, int order
)
81 for (i
= 0; i
< (1 << order
); i
++)
82 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
84 : "a" (page_to_phys(page
+ i
)),
85 "i" (ESSA_SET_STABLE
));
88 void arch_alloc_page(struct page
*page
, int order
)
92 set_page_stable(page
, order
);
95 void arch_set_page_states(int make_stable
)
97 unsigned long flags
, order
, t
;
105 drain_local_pages(NULL
);
106 for_each_populated_zone(zone
) {
107 spin_lock_irqsave(&zone
->lock
, flags
);
108 for_each_migratetype_order(order
, t
) {
109 list_for_each(l
, &zone
->free_area
[order
].free_list
[t
]) {
110 page
= list_entry(l
, struct page
, lru
);
112 set_page_stable(page
, order
);
114 set_page_unstable(page
, order
);
117 spin_unlock_irqrestore(&zone
->lock
, flags
);