mm, page_alloc: set alloc_flags only once in slowpath
[linux/fpc-iii.git] / drivers / xen / tmem.c
blob4ac2ca8a76561952798f7c49bdd292719d0439a8
1 /*
2 * Xen implementation for transcendent memory (tmem)
4 * Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
5 * Author: Dan Magenheimer
6 */
8 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/pagemap.h>
15 #include <linux/cleancache.h>
16 #include <linux/frontswap.h>
18 #include <xen/xen.h>
19 #include <xen/interface/xen.h>
20 #include <xen/page.h>
21 #include <asm/xen/hypercall.h>
22 #include <asm/xen/hypervisor.h>
23 #include <xen/tmem.h>
25 #ifndef CONFIG_XEN_TMEM_MODULE
26 bool __read_mostly tmem_enabled = false;
28 static int __init enable_tmem(char *s)
30 tmem_enabled = true;
31 return 1;
33 __setup("tmem", enable_tmem);
34 #endif
36 #ifdef CONFIG_CLEANCACHE
37 static bool cleancache __read_mostly = true;
38 module_param(cleancache, bool, S_IRUGO);
39 static bool selfballooning __read_mostly = true;
40 module_param(selfballooning, bool, S_IRUGO);
41 #endif /* CONFIG_CLEANCACHE */
43 #ifdef CONFIG_FRONTSWAP
44 static bool frontswap __read_mostly = true;
45 module_param(frontswap, bool, S_IRUGO);
46 #else /* CONFIG_FRONTSWAP */
47 #define frontswap (0)
48 #endif /* CONFIG_FRONTSWAP */
50 #ifdef CONFIG_XEN_SELFBALLOONING
51 static bool selfshrinking __read_mostly = true;
52 module_param(selfshrinking, bool, S_IRUGO);
53 #endif /* CONFIG_XEN_SELFBALLOONING */
55 #define TMEM_CONTROL 0
56 #define TMEM_NEW_POOL 1
57 #define TMEM_DESTROY_POOL 2
58 #define TMEM_NEW_PAGE 3
59 #define TMEM_PUT_PAGE 4
60 #define TMEM_GET_PAGE 5
61 #define TMEM_FLUSH_PAGE 6
62 #define TMEM_FLUSH_OBJECT 7
63 #define TMEM_READ 8
64 #define TMEM_WRITE 9
65 #define TMEM_XCHG 10
67 /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
68 #define TMEM_POOL_PERSIST 1
69 #define TMEM_POOL_SHARED 2
70 #define TMEM_POOL_PAGESIZE_SHIFT 4
71 #define TMEM_VERSION_SHIFT 24
74 struct tmem_pool_uuid {
75 u64 uuid_lo;
76 u64 uuid_hi;
79 struct tmem_oid {
80 u64 oid[3];
83 #define TMEM_POOL_PRIVATE_UUID { 0, 0 }
85 /* flags for tmem_ops.new_pool */
86 #define TMEM_POOL_PERSIST 1
87 #define TMEM_POOL_SHARED 2
89 /* xen tmem foundation ops/hypercalls */
91 static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
92 u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
94 struct tmem_op op;
95 int rc = 0;
97 op.cmd = tmem_cmd;
98 op.pool_id = tmem_pool;
99 op.u.gen.oid[0] = oid.oid[0];
100 op.u.gen.oid[1] = oid.oid[1];
101 op.u.gen.oid[2] = oid.oid[2];
102 op.u.gen.index = index;
103 op.u.gen.tmem_offset = tmem_offset;
104 op.u.gen.pfn_offset = pfn_offset;
105 op.u.gen.len = len;
106 set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
107 rc = HYPERVISOR_tmem_op(&op);
108 return rc;
111 static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
112 u32 flags, unsigned long pagesize)
114 struct tmem_op op;
115 int rc = 0, pageshift;
117 for (pageshift = 0; pagesize != 1; pageshift++)
118 pagesize >>= 1;
119 flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
120 flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
121 op.cmd = TMEM_NEW_POOL;
122 op.u.new.uuid[0] = uuid.uuid_lo;
123 op.u.new.uuid[1] = uuid.uuid_hi;
124 op.u.new.flags = flags;
125 rc = HYPERVISOR_tmem_op(&op);
126 return rc;
129 /* xen generic tmem ops */
131 static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
132 u32 index, struct page *page)
134 return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
135 xen_page_to_gfn(page), 0, 0, 0);
138 static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
139 u32 index, struct page *page)
141 return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
142 xen_page_to_gfn(page), 0, 0, 0);
145 static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
147 return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
148 0, 0, 0, 0);
151 static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
153 return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
157 #ifdef CONFIG_CLEANCACHE
158 static int xen_tmem_destroy_pool(u32 pool_id)
160 struct tmem_oid oid = { { 0 } };
162 return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
165 /* cleancache ops */
167 static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
168 pgoff_t index, struct page *page)
170 u32 ind = (u32) index;
171 struct tmem_oid oid = *(struct tmem_oid *)&key;
173 if (pool < 0)
174 return;
175 if (ind != index)
176 return;
177 mb(); /* ensure page is quiescent; tmem may address it with an alias */
178 (void)xen_tmem_put_page((u32)pool, oid, ind, page);
181 static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
182 pgoff_t index, struct page *page)
184 u32 ind = (u32) index;
185 struct tmem_oid oid = *(struct tmem_oid *)&key;
186 int ret;
188 /* translate return values to linux semantics */
189 if (pool < 0)
190 return -1;
191 if (ind != index)
192 return -1;
193 ret = xen_tmem_get_page((u32)pool, oid, ind, page);
194 if (ret == 1)
195 return 0;
196 else
197 return -1;
200 static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
201 pgoff_t index)
203 u32 ind = (u32) index;
204 struct tmem_oid oid = *(struct tmem_oid *)&key;
206 if (pool < 0)
207 return;
208 if (ind != index)
209 return;
210 (void)xen_tmem_flush_page((u32)pool, oid, ind);
213 static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
215 struct tmem_oid oid = *(struct tmem_oid *)&key;
217 if (pool < 0)
218 return;
219 (void)xen_tmem_flush_object((u32)pool, oid);
222 static void tmem_cleancache_flush_fs(int pool)
224 if (pool < 0)
225 return;
226 (void)xen_tmem_destroy_pool((u32)pool);
229 static int tmem_cleancache_init_fs(size_t pagesize)
231 struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
233 return xen_tmem_new_pool(uuid_private, 0, pagesize);
236 static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
238 struct tmem_pool_uuid shared_uuid;
240 shared_uuid.uuid_lo = *(u64 *)uuid;
241 shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
242 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
245 static const struct cleancache_ops tmem_cleancache_ops = {
246 .put_page = tmem_cleancache_put_page,
247 .get_page = tmem_cleancache_get_page,
248 .invalidate_page = tmem_cleancache_flush_page,
249 .invalidate_inode = tmem_cleancache_flush_inode,
250 .invalidate_fs = tmem_cleancache_flush_fs,
251 .init_shared_fs = tmem_cleancache_init_shared_fs,
252 .init_fs = tmem_cleancache_init_fs
254 #endif
256 #ifdef CONFIG_FRONTSWAP
257 /* frontswap tmem operations */
259 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
260 static int tmem_frontswap_poolid;
263 * Swizzling increases objects per swaptype, increasing tmem concurrency
264 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
266 #define SWIZ_BITS 4
267 #define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
268 #define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
269 #define iswiz(_ind) (_ind >> SWIZ_BITS)
271 static inline struct tmem_oid oswiz(unsigned type, u32 ind)
273 struct tmem_oid oid = { .oid = { 0 } };
274 oid.oid[0] = _oswiz(type, ind);
275 return oid;
278 /* returns 0 if the page was successfully put into frontswap, -1 if not */
279 static int tmem_frontswap_store(unsigned type, pgoff_t offset,
280 struct page *page)
282 u64 ind64 = (u64)offset;
283 u32 ind = (u32)offset;
284 int pool = tmem_frontswap_poolid;
285 int ret;
287 if (pool < 0)
288 return -1;
289 if (ind64 != ind)
290 return -1;
291 mb(); /* ensure page is quiescent; tmem may address it with an alias */
292 ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), page);
293 /* translate Xen tmem return values to linux semantics */
294 if (ret == 1)
295 return 0;
296 else
297 return -1;
301 * returns 0 if the page was successfully gotten from frontswap, -1 if
302 * was not present (should never happen!)
304 static int tmem_frontswap_load(unsigned type, pgoff_t offset,
305 struct page *page)
307 u64 ind64 = (u64)offset;
308 u32 ind = (u32)offset;
309 int pool = tmem_frontswap_poolid;
310 int ret;
312 if (pool < 0)
313 return -1;
314 if (ind64 != ind)
315 return -1;
316 ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), page);
317 /* translate Xen tmem return values to linux semantics */
318 if (ret == 1)
319 return 0;
320 else
321 return -1;
324 /* flush a single page from frontswap */
325 static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
327 u64 ind64 = (u64)offset;
328 u32 ind = (u32)offset;
329 int pool = tmem_frontswap_poolid;
331 if (pool < 0)
332 return;
333 if (ind64 != ind)
334 return;
335 (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
338 /* flush all pages from the passed swaptype */
339 static void tmem_frontswap_flush_area(unsigned type)
341 int pool = tmem_frontswap_poolid;
342 int ind;
344 if (pool < 0)
345 return;
346 for (ind = SWIZ_MASK; ind >= 0; ind--)
347 (void)xen_tmem_flush_object(pool, oswiz(type, ind));
350 static void tmem_frontswap_init(unsigned ignored)
352 struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
354 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
355 if (tmem_frontswap_poolid < 0)
356 tmem_frontswap_poolid =
357 xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
360 static struct frontswap_ops tmem_frontswap_ops = {
361 .store = tmem_frontswap_store,
362 .load = tmem_frontswap_load,
363 .invalidate_page = tmem_frontswap_flush_page,
364 .invalidate_area = tmem_frontswap_flush_area,
365 .init = tmem_frontswap_init
367 #endif
369 static int __init xen_tmem_init(void)
371 if (!xen_domain())
372 return 0;
373 #ifdef CONFIG_FRONTSWAP
374 if (tmem_enabled && frontswap) {
375 char *s = "";
377 tmem_frontswap_poolid = -1;
378 frontswap_register_ops(&tmem_frontswap_ops);
379 pr_info("frontswap enabled, RAM provided by Xen Transcendent Memory%s\n",
382 #endif
383 #ifdef CONFIG_CLEANCACHE
384 BUILD_BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
385 if (tmem_enabled && cleancache) {
386 int err;
388 err = cleancache_register_ops(&tmem_cleancache_ops);
389 if (err)
390 pr_warn("xen-tmem: failed to enable cleancache: %d\n",
391 err);
392 else
393 pr_info("cleancache enabled, RAM provided by "
394 "Xen Transcendent Memory\n");
396 #endif
397 #ifdef CONFIG_XEN_SELFBALLOONING
399 * There is no point of driving pages to the swap system if they
400 * aren't going anywhere in tmem universe.
402 if (!frontswap) {
403 selfshrinking = false;
404 selfballooning = false;
406 xen_selfballoon_init(selfballooning, selfshrinking);
407 #endif
408 return 0;
411 module_init(xen_tmem_init)
412 MODULE_LICENSE("GPL");
413 MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
414 MODULE_DESCRIPTION("Shim to Xen transcendent memory");