4 * This code provides the generic "frontend" layer to call a matching
5 * "backend" driver implementation of frontswap. See
6 * Documentation/vm/frontswap.txt for more information.
8 * Copyright (C) 2009-2012 Oracle Corp. All rights reserved.
9 * Author: Dan Magenheimer
11 * This work is licensed under the terms of the GNU GPL, version 2.
14 #include <linux/mman.h>
15 #include <linux/swap.h>
16 #include <linux/swapops.h>
17 #include <linux/security.h>
18 #include <linux/module.h>
19 #include <linux/debugfs.h>
20 #include <linux/frontswap.h>
21 #include <linux/swapfile.h>
24 * frontswap_ops are added by frontswap_register_ops, and provide the
25 * frontswap "backend" implementation functions. Multiple implementations
26 * may be registered, but implementations can never deregister. This
27 * is a simple singly-linked list of all registered implementations.
29 static struct frontswap_ops
*frontswap_ops __read_mostly
;
31 #define for_each_frontswap_ops(ops) \
32 for ((ops) = frontswap_ops; (ops); (ops) = (ops)->next)
35 * If enabled, frontswap_store will return failure even on success. As
36 * a result, the swap subsystem will always write the page to swap, in
37 * effect converting frontswap into a writethrough cache. In this mode,
38 * there is no direct reduction in swap writes, but a frontswap backend
39 * can unilaterally "reclaim" any pages in use with no data loss, thus
40 * providing increases control over maximum memory usage due to frontswap.
42 static bool frontswap_writethrough_enabled __read_mostly
;
45 * If enabled, the underlying tmem implementation is capable of doing
46 * exclusive gets, so frontswap_load, on a successful tmem_get must
47 * mark the page as no longer in frontswap AND mark it dirty.
49 static bool frontswap_tmem_exclusive_gets_enabled __read_mostly
;
51 #ifdef CONFIG_DEBUG_FS
53 * Counters available via /sys/kernel/debug/frontswap (if debugfs is
54 * properly configured). These are for information only so are not protected
55 * against increment races.
57 static u64 frontswap_loads
;
58 static u64 frontswap_succ_stores
;
59 static u64 frontswap_failed_stores
;
60 static u64 frontswap_invalidates
;
62 static inline void inc_frontswap_loads(void) {
65 static inline void inc_frontswap_succ_stores(void) {
66 frontswap_succ_stores
++;
68 static inline void inc_frontswap_failed_stores(void) {
69 frontswap_failed_stores
++;
71 static inline void inc_frontswap_invalidates(void) {
72 frontswap_invalidates
++;
75 static inline void inc_frontswap_loads(void) { }
76 static inline void inc_frontswap_succ_stores(void) { }
77 static inline void inc_frontswap_failed_stores(void) { }
78 static inline void inc_frontswap_invalidates(void) { }
82 * Due to the asynchronous nature of the backends loading potentially
83 * _after_ the swap system has been activated, we have chokepoints
84 * on all frontswap functions to not call the backend until the backend
87 * This would not guards us against the user deciding to call swapoff right as
88 * we are calling the backend to initialize (so swapon is in action).
89 * Fortunatly for us, the swapon_mutex has been taked by the callee so we are
90 * OK. The other scenario where calls to frontswap_store (called via
91 * swap_writepage) is racing with frontswap_invalidate_area (called via
92 * swapoff) is again guarded by the swap subsystem.
94 * While no backend is registered all calls to frontswap_[store|load|
95 * invalidate_area|invalidate_page] are ignored or fail.
97 * The time between the backend being registered and the swap file system
98 * calling the backend (via the frontswap_* functions) is indeterminate as
99 * frontswap_ops is not atomic_t (or a value guarded by a spinlock).
100 * That is OK as we are comfortable missing some of these calls to the newly
101 * registered backend.
103 * Obviously the opposite (unloading the backend) must be done after all
104 * the frontswap_[store|load|invalidate_area|invalidate_page] start
105 * ignoring or failing the requests. However, there is currently no way
106 * to unload a backend once it is registered.
110 * Register operations for frontswap
112 void frontswap_register_ops(struct frontswap_ops
*ops
)
114 DECLARE_BITMAP(a
, MAX_SWAPFILES
);
115 DECLARE_BITMAP(b
, MAX_SWAPFILES
);
116 struct swap_info_struct
*si
;
119 bitmap_zero(a
, MAX_SWAPFILES
);
120 bitmap_zero(b
, MAX_SWAPFILES
);
122 spin_lock(&swap_lock
);
123 plist_for_each_entry(si
, &swap_active_head
, list
) {
124 if (!WARN_ON(!si
->frontswap_map
))
125 set_bit(si
->type
, a
);
127 spin_unlock(&swap_lock
);
129 /* the new ops needs to know the currently active swap devices */
130 for_each_set_bit(i
, a
, MAX_SWAPFILES
)
134 * Setting frontswap_ops must happen after the ops->init() calls
135 * above; cmpxchg implies smp_mb() which will ensure the init is
136 * complete at this point.
139 ops
->next
= frontswap_ops
;
140 } while (cmpxchg(&frontswap_ops
, ops
->next
, ops
) != ops
->next
);
142 spin_lock(&swap_lock
);
143 plist_for_each_entry(si
, &swap_active_head
, list
) {
144 if (si
->frontswap_map
)
145 set_bit(si
->type
, b
);
147 spin_unlock(&swap_lock
);
150 * On the very unlikely chance that a swap device was added or
151 * removed between setting the "a" list bits and the ops init
152 * calls, we re-check and do init or invalidate for any changed
155 if (unlikely(!bitmap_equal(a
, b
, MAX_SWAPFILES
))) {
156 for (i
= 0; i
< MAX_SWAPFILES
; i
++) {
157 if (!test_bit(i
, a
) && test_bit(i
, b
))
159 else if (test_bit(i
, a
) && !test_bit(i
, b
))
160 ops
->invalidate_area(i
);
164 EXPORT_SYMBOL(frontswap_register_ops
);
167 * Enable/disable frontswap writethrough (see above).
169 void frontswap_writethrough(bool enable
)
171 frontswap_writethrough_enabled
= enable
;
173 EXPORT_SYMBOL(frontswap_writethrough
);
176 * Enable/disable frontswap exclusive gets (see above).
178 void frontswap_tmem_exclusive_gets(bool enable
)
180 frontswap_tmem_exclusive_gets_enabled
= enable
;
182 EXPORT_SYMBOL(frontswap_tmem_exclusive_gets
);
185 * Called when a swap device is swapon'd.
187 void __frontswap_init(unsigned type
, unsigned long *map
)
189 struct swap_info_struct
*sis
= swap_info
[type
];
190 struct frontswap_ops
*ops
;
195 * p->frontswap is a bitmap that we MUST have to figure out which page
196 * has gone in frontswap. Without it there is no point of continuing.
201 * Irregardless of whether the frontswap backend has been loaded
202 * before this function or it will be later, we _MUST_ have the
203 * p->frontswap set to something valid to work properly.
205 frontswap_map_set(sis
, map
);
207 for_each_frontswap_ops(ops
)
210 EXPORT_SYMBOL(__frontswap_init
);
212 bool __frontswap_test(struct swap_info_struct
*sis
,
215 if (sis
->frontswap_map
)
216 return test_bit(offset
, sis
->frontswap_map
);
219 EXPORT_SYMBOL(__frontswap_test
);
221 static inline void __frontswap_set(struct swap_info_struct
*sis
,
224 set_bit(offset
, sis
->frontswap_map
);
225 atomic_inc(&sis
->frontswap_pages
);
228 static inline void __frontswap_clear(struct swap_info_struct
*sis
,
231 clear_bit(offset
, sis
->frontswap_map
);
232 atomic_dec(&sis
->frontswap_pages
);
236 * "Store" data from a page to frontswap and associate it with the page's
237 * swaptype and offset. Page must be locked and in the swap cache.
238 * If frontswap already contains a page with matching swaptype and
239 * offset, the frontswap implementation may either overwrite the data and
240 * return success or invalidate the page from frontswap and return failure.
242 int __frontswap_store(struct page
*page
)
245 swp_entry_t entry
= { .val
= page_private(page
), };
246 int type
= swp_type(entry
);
247 struct swap_info_struct
*sis
= swap_info
[type
];
248 pgoff_t offset
= swp_offset(entry
);
249 struct frontswap_ops
*ops
;
252 * Return if no backend registed.
253 * Don't need to inc frontswap_failed_stores here.
258 BUG_ON(!PageLocked(page
));
262 * If a dup, we must remove the old page first; we can't leave the
263 * old page no matter if the store of the new page succeeds or fails,
264 * and we can't rely on the new page replacing the old page as we may
265 * not store to the same implementation that contains the old page.
267 if (__frontswap_test(sis
, offset
)) {
268 __frontswap_clear(sis
, offset
);
269 for_each_frontswap_ops(ops
)
270 ops
->invalidate_page(type
, offset
);
273 /* Try to store in each implementation, until one succeeds. */
274 for_each_frontswap_ops(ops
) {
275 ret
= ops
->store(type
, offset
, page
);
276 if (!ret
) /* successful store */
280 __frontswap_set(sis
, offset
);
281 inc_frontswap_succ_stores();
283 inc_frontswap_failed_stores();
285 if (frontswap_writethrough_enabled
)
286 /* report failure so swap also writes to swap device */
290 EXPORT_SYMBOL(__frontswap_store
);
293 * "Get" data from frontswap associated with swaptype and offset that were
294 * specified when the data was put to frontswap and use it to fill the
295 * specified page with data. Page must be locked and in the swap cache.
297 int __frontswap_load(struct page
*page
)
300 swp_entry_t entry
= { .val
= page_private(page
), };
301 int type
= swp_type(entry
);
302 struct swap_info_struct
*sis
= swap_info
[type
];
303 pgoff_t offset
= swp_offset(entry
);
304 struct frontswap_ops
*ops
;
309 BUG_ON(!PageLocked(page
));
311 if (!__frontswap_test(sis
, offset
))
314 /* Try loading from each implementation, until one succeeds. */
315 for_each_frontswap_ops(ops
) {
316 ret
= ops
->load(type
, offset
, page
);
317 if (!ret
) /* successful load */
321 inc_frontswap_loads();
322 if (frontswap_tmem_exclusive_gets_enabled
) {
324 __frontswap_clear(sis
, offset
);
329 EXPORT_SYMBOL(__frontswap_load
);
332 * Invalidate any data from frontswap associated with the specified swaptype
333 * and offset so that a subsequent "get" will fail.
335 void __frontswap_invalidate_page(unsigned type
, pgoff_t offset
)
337 struct swap_info_struct
*sis
= swap_info
[type
];
338 struct frontswap_ops
*ops
;
344 if (!__frontswap_test(sis
, offset
))
347 for_each_frontswap_ops(ops
)
348 ops
->invalidate_page(type
, offset
);
349 __frontswap_clear(sis
, offset
);
350 inc_frontswap_invalidates();
352 EXPORT_SYMBOL(__frontswap_invalidate_page
);
355 * Invalidate all data from frontswap associated with all offsets for the
356 * specified swaptype.
358 void __frontswap_invalidate_area(unsigned type
)
360 struct swap_info_struct
*sis
= swap_info
[type
];
361 struct frontswap_ops
*ops
;
367 if (sis
->frontswap_map
== NULL
)
370 for_each_frontswap_ops(ops
)
371 ops
->invalidate_area(type
);
372 atomic_set(&sis
->frontswap_pages
, 0);
373 bitmap_zero(sis
->frontswap_map
, sis
->max
);
375 EXPORT_SYMBOL(__frontswap_invalidate_area
);
377 static unsigned long __frontswap_curr_pages(void)
379 unsigned long totalpages
= 0;
380 struct swap_info_struct
*si
= NULL
;
382 assert_spin_locked(&swap_lock
);
383 plist_for_each_entry(si
, &swap_active_head
, list
)
384 totalpages
+= atomic_read(&si
->frontswap_pages
);
388 static int __frontswap_unuse_pages(unsigned long total
, unsigned long *unused
,
392 struct swap_info_struct
*si
= NULL
;
393 int si_frontswap_pages
;
394 unsigned long total_pages_to_unuse
= total
;
395 unsigned long pages
= 0, pages_to_unuse
= 0;
397 assert_spin_locked(&swap_lock
);
398 plist_for_each_entry(si
, &swap_active_head
, list
) {
399 si_frontswap_pages
= atomic_read(&si
->frontswap_pages
);
400 if (total_pages_to_unuse
< si_frontswap_pages
) {
401 pages
= pages_to_unuse
= total_pages_to_unuse
;
403 pages
= si_frontswap_pages
;
404 pages_to_unuse
= 0; /* unuse all */
406 /* ensure there is enough RAM to fetch pages from frontswap */
407 if (security_vm_enough_memory_mm(current
->mm
, pages
)) {
411 vm_unacct_memory(pages
);
412 *unused
= pages_to_unuse
;
422 * Used to check if it's necessory and feasible to unuse pages.
423 * Return 1 when nothing to do, 0 when need to shink pages,
424 * error code when there is an error.
426 static int __frontswap_shrink(unsigned long target_pages
,
427 unsigned long *pages_to_unuse
,
430 unsigned long total_pages
= 0, total_pages_to_unuse
;
432 assert_spin_locked(&swap_lock
);
434 total_pages
= __frontswap_curr_pages();
435 if (total_pages
<= target_pages
) {
440 total_pages_to_unuse
= total_pages
- target_pages
;
441 return __frontswap_unuse_pages(total_pages_to_unuse
, pages_to_unuse
, type
);
445 * Frontswap, like a true swap device, may unnecessarily retain pages
446 * under certain circumstances; "shrink" frontswap is essentially a
447 * "partial swapoff" and works by calling try_to_unuse to attempt to
448 * unuse enough frontswap pages to attempt to -- subject to memory
449 * constraints -- reduce the number of pages in frontswap to the
450 * number given in the parameter target_pages.
452 void frontswap_shrink(unsigned long target_pages
)
454 unsigned long pages_to_unuse
= 0;
455 int uninitialized_var(type
), ret
;
458 * we don't want to hold swap_lock while doing a very
459 * lengthy try_to_unuse, but swap_list may change
460 * so restart scan from swap_active_head each time
462 spin_lock(&swap_lock
);
463 ret
= __frontswap_shrink(target_pages
, &pages_to_unuse
, &type
);
464 spin_unlock(&swap_lock
);
466 try_to_unuse(type
, true, pages_to_unuse
);
469 EXPORT_SYMBOL(frontswap_shrink
);
472 * Count and return the number of frontswap pages across all
473 * swap devices. This is exported so that backend drivers can
474 * determine current usage without reading debugfs.
476 unsigned long frontswap_curr_pages(void)
478 unsigned long totalpages
= 0;
480 spin_lock(&swap_lock
);
481 totalpages
= __frontswap_curr_pages();
482 spin_unlock(&swap_lock
);
486 EXPORT_SYMBOL(frontswap_curr_pages
);
488 static int __init
init_frontswap(void)
490 #ifdef CONFIG_DEBUG_FS
491 struct dentry
*root
= debugfs_create_dir("frontswap", NULL
);
494 debugfs_create_u64("loads", S_IRUGO
, root
, &frontswap_loads
);
495 debugfs_create_u64("succ_stores", S_IRUGO
, root
, &frontswap_succ_stores
);
496 debugfs_create_u64("failed_stores", S_IRUGO
, root
,
497 &frontswap_failed_stores
);
498 debugfs_create_u64("invalidates", S_IRUGO
,
499 root
, &frontswap_invalidates
);
504 module_init(init_frontswap
);