2 * zswap.c - zswap driver file
4 * zswap is a backend for frontswap that takes pages that are in the process
5 * of being swapped out and attempts to compress and store them in a
6 * RAM-based memory pool. This can result in a significant I/O reduction on
7 * the swap device and, in the case where decompressing from RAM is faster
8 * than reading from the swap device, can also improve workload performance.
10 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/module.h>
26 #include <linux/cpu.h>
27 #include <linux/highmem.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/types.h>
31 #include <linux/atomic.h>
32 #include <linux/frontswap.h>
33 #include <linux/rbtree.h>
34 #include <linux/swap.h>
35 #include <linux/crypto.h>
36 #include <linux/mempool.h>
37 #include <linux/zpool.h>
39 #include <linux/mm_types.h>
40 #include <linux/page-flags.h>
41 #include <linux/swapops.h>
42 #include <linux/writeback.h>
43 #include <linux/pagemap.h>
45 /*********************************
47 **********************************/
48 /* Total bytes used by the compressed storage */
49 static u64 zswap_pool_total_size
;
50 /* The number of compressed pages currently stored in zswap */
51 static atomic_t zswap_stored_pages
= ATOMIC_INIT(0);
54 * The statistics below are not protected from concurrent access for
55 * performance reasons so they may not be a 100% accurate. However,
56 * they do provide useful information on roughly how many times a
57 * certain event is occurring.
60 /* Pool limit was hit (see zswap_max_pool_percent) */
61 static u64 zswap_pool_limit_hit
;
62 /* Pages written back when pool limit was reached */
63 static u64 zswap_written_back_pages
;
64 /* Store failed due to a reclaim failure after pool limit was reached */
65 static u64 zswap_reject_reclaim_fail
;
66 /* Compressed page was too big for the allocator to (optimally) store */
67 static u64 zswap_reject_compress_poor
;
68 /* Store failed because underlying allocator could not get memory */
69 static u64 zswap_reject_alloc_fail
;
70 /* Store failed because the entry metadata could not be allocated (rare) */
71 static u64 zswap_reject_kmemcache_fail
;
72 /* Duplicate store was encountered (rare) */
73 static u64 zswap_duplicate_entry
;
75 /*********************************
77 **********************************/
79 /* Enable/disable zswap (disabled by default) */
80 static bool zswap_enabled
;
81 static int zswap_enabled_param_set(const char *,
82 const struct kernel_param
*);
83 static struct kernel_param_ops zswap_enabled_param_ops
= {
84 .set
= zswap_enabled_param_set
,
85 .get
= param_get_bool
,
87 module_param_cb(enabled
, &zswap_enabled_param_ops
, &zswap_enabled
, 0644);
89 /* Crypto compressor to use */
90 #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
91 static char *zswap_compressor
= ZSWAP_COMPRESSOR_DEFAULT
;
92 static int zswap_compressor_param_set(const char *,
93 const struct kernel_param
*);
94 static struct kernel_param_ops zswap_compressor_param_ops
= {
95 .set
= zswap_compressor_param_set
,
96 .get
= param_get_charp
,
97 .free
= param_free_charp
,
99 module_param_cb(compressor
, &zswap_compressor_param_ops
,
100 &zswap_compressor
, 0644);
102 /* Compressed storage zpool to use */
103 #define ZSWAP_ZPOOL_DEFAULT "zbud"
104 static char *zswap_zpool_type
= ZSWAP_ZPOOL_DEFAULT
;
105 static int zswap_zpool_param_set(const char *, const struct kernel_param
*);
106 static struct kernel_param_ops zswap_zpool_param_ops
= {
107 .set
= zswap_zpool_param_set
,
108 .get
= param_get_charp
,
109 .free
= param_free_charp
,
111 module_param_cb(zpool
, &zswap_zpool_param_ops
, &zswap_zpool_type
, 0644);
113 /* The maximum percentage of memory that the compressed pool can occupy */
114 static unsigned int zswap_max_pool_percent
= 20;
115 module_param_named(max_pool_percent
, zswap_max_pool_percent
, uint
, 0644);
117 /*********************************
119 **********************************/
123 struct crypto_comp
* __percpu
*tfm
;
125 struct list_head list
;
126 struct work_struct work
;
127 struct notifier_block notifier
;
128 char tfm_name
[CRYPTO_MAX_ALG_NAME
];
134 * This structure contains the metadata for tracking a single compressed
137 * rbnode - links the entry into red-black tree for the appropriate swap type
138 * offset - the swap offset for the entry. Index into the red-black tree.
139 * refcount - the number of outstanding reference to the entry. This is needed
140 * to protect against premature freeing of the entry by code
141 * concurrent calls to load, invalidate, and writeback. The lock
142 * for the zswap_tree structure that contains the entry must
143 * be held while changing the refcount. Since the lock must
144 * be held, there is no reason to also make refcount atomic.
145 * length - the length in bytes of the compressed page data. Needed during
147 * pool - the zswap_pool the entry's data is in
148 * handle - zpool allocation handle that stores the compressed page data
151 struct rb_node rbnode
;
155 struct zswap_pool
*pool
;
156 unsigned long handle
;
159 struct zswap_header
{
160 swp_entry_t swpentry
;
164 * The tree lock in the zswap_tree struct protects a few things:
166 * - the refcount field of each entry in the tree
169 struct rb_root rbroot
;
173 static struct zswap_tree
*zswap_trees
[MAX_SWAPFILES
];
175 /* RCU-protected iteration */
176 static LIST_HEAD(zswap_pools
);
177 /* protects zswap_pools list modification */
178 static DEFINE_SPINLOCK(zswap_pools_lock
);
179 /* pool counter to provide unique names to zpool */
180 static atomic_t zswap_pools_count
= ATOMIC_INIT(0);
182 /* used by param callback function */
183 static bool zswap_init_started
;
185 /* fatal error during init */
186 static bool zswap_init_failed
;
188 /*********************************
189 * helpers and fwd declarations
190 **********************************/
192 #define zswap_pool_debug(msg, p) \
193 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
194 zpool_get_type((p)->zpool))
196 static int zswap_writeback_entry(struct zpool
*pool
, unsigned long handle
);
197 static int zswap_pool_get(struct zswap_pool
*pool
);
198 static void zswap_pool_put(struct zswap_pool
*pool
);
200 static const struct zpool_ops zswap_zpool_ops
= {
201 .evict
= zswap_writeback_entry
204 static bool zswap_is_full(void)
206 return totalram_pages
* zswap_max_pool_percent
/ 100 <
207 DIV_ROUND_UP(zswap_pool_total_size
, PAGE_SIZE
);
210 static void zswap_update_total_size(void)
212 struct zswap_pool
*pool
;
217 list_for_each_entry_rcu(pool
, &zswap_pools
, list
)
218 total
+= zpool_get_total_size(pool
->zpool
);
222 zswap_pool_total_size
= total
;
225 /*********************************
226 * zswap entry functions
227 **********************************/
228 static struct kmem_cache
*zswap_entry_cache
;
230 static int __init
zswap_entry_cache_create(void)
232 zswap_entry_cache
= KMEM_CACHE(zswap_entry
, 0);
233 return zswap_entry_cache
== NULL
;
236 static void __init
zswap_entry_cache_destroy(void)
238 kmem_cache_destroy(zswap_entry_cache
);
241 static struct zswap_entry
*zswap_entry_cache_alloc(gfp_t gfp
)
243 struct zswap_entry
*entry
;
244 entry
= kmem_cache_alloc(zswap_entry_cache
, gfp
);
248 RB_CLEAR_NODE(&entry
->rbnode
);
252 static void zswap_entry_cache_free(struct zswap_entry
*entry
)
254 kmem_cache_free(zswap_entry_cache
, entry
);
257 /*********************************
259 **********************************/
260 static struct zswap_entry
*zswap_rb_search(struct rb_root
*root
, pgoff_t offset
)
262 struct rb_node
*node
= root
->rb_node
;
263 struct zswap_entry
*entry
;
266 entry
= rb_entry(node
, struct zswap_entry
, rbnode
);
267 if (entry
->offset
> offset
)
268 node
= node
->rb_left
;
269 else if (entry
->offset
< offset
)
270 node
= node
->rb_right
;
278 * In the case that a entry with the same offset is found, a pointer to
279 * the existing entry is stored in dupentry and the function returns -EEXIST
281 static int zswap_rb_insert(struct rb_root
*root
, struct zswap_entry
*entry
,
282 struct zswap_entry
**dupentry
)
284 struct rb_node
**link
= &root
->rb_node
, *parent
= NULL
;
285 struct zswap_entry
*myentry
;
289 myentry
= rb_entry(parent
, struct zswap_entry
, rbnode
);
290 if (myentry
->offset
> entry
->offset
)
291 link
= &(*link
)->rb_left
;
292 else if (myentry
->offset
< entry
->offset
)
293 link
= &(*link
)->rb_right
;
299 rb_link_node(&entry
->rbnode
, parent
, link
);
300 rb_insert_color(&entry
->rbnode
, root
);
304 static void zswap_rb_erase(struct rb_root
*root
, struct zswap_entry
*entry
)
306 if (!RB_EMPTY_NODE(&entry
->rbnode
)) {
307 rb_erase(&entry
->rbnode
, root
);
308 RB_CLEAR_NODE(&entry
->rbnode
);
313 * Carries out the common pattern of freeing and entry's zpool allocation,
314 * freeing the entry itself, and decrementing the number of stored pages.
316 static void zswap_free_entry(struct zswap_entry
*entry
)
318 zpool_free(entry
->pool
->zpool
, entry
->handle
);
319 zswap_pool_put(entry
->pool
);
320 zswap_entry_cache_free(entry
);
321 atomic_dec(&zswap_stored_pages
);
322 zswap_update_total_size();
325 /* caller must hold the tree lock */
326 static void zswap_entry_get(struct zswap_entry
*entry
)
331 /* caller must hold the tree lock
332 * remove from the tree and free it, if nobody reference the entry
334 static void zswap_entry_put(struct zswap_tree
*tree
,
335 struct zswap_entry
*entry
)
337 int refcount
= --entry
->refcount
;
339 BUG_ON(refcount
< 0);
341 zswap_rb_erase(&tree
->rbroot
, entry
);
342 zswap_free_entry(entry
);
346 /* caller must hold the tree lock */
347 static struct zswap_entry
*zswap_entry_find_get(struct rb_root
*root
,
350 struct zswap_entry
*entry
;
352 entry
= zswap_rb_search(root
, offset
);
354 zswap_entry_get(entry
);
359 /*********************************
361 **********************************/
362 static DEFINE_PER_CPU(u8
*, zswap_dstmem
);
364 static int __zswap_cpu_dstmem_notifier(unsigned long action
, unsigned long cpu
)
370 dst
= kmalloc_node(PAGE_SIZE
* 2, GFP_KERNEL
, cpu_to_node(cpu
));
372 pr_err("can't allocate compressor buffer\n");
375 per_cpu(zswap_dstmem
, cpu
) = dst
;
378 case CPU_UP_CANCELED
:
379 dst
= per_cpu(zswap_dstmem
, cpu
);
381 per_cpu(zswap_dstmem
, cpu
) = NULL
;
389 static int zswap_cpu_dstmem_notifier(struct notifier_block
*nb
,
390 unsigned long action
, void *pcpu
)
392 return __zswap_cpu_dstmem_notifier(action
, (unsigned long)pcpu
);
395 static struct notifier_block zswap_dstmem_notifier
= {
396 .notifier_call
= zswap_cpu_dstmem_notifier
,
399 static int __init
zswap_cpu_dstmem_init(void)
403 cpu_notifier_register_begin();
404 for_each_online_cpu(cpu
)
405 if (__zswap_cpu_dstmem_notifier(CPU_UP_PREPARE
, cpu
) ==
408 __register_cpu_notifier(&zswap_dstmem_notifier
);
409 cpu_notifier_register_done();
413 for_each_online_cpu(cpu
)
414 __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED
, cpu
);
415 cpu_notifier_register_done();
419 static void zswap_cpu_dstmem_destroy(void)
423 cpu_notifier_register_begin();
424 for_each_online_cpu(cpu
)
425 __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED
, cpu
);
426 __unregister_cpu_notifier(&zswap_dstmem_notifier
);
427 cpu_notifier_register_done();
430 static int __zswap_cpu_comp_notifier(struct zswap_pool
*pool
,
431 unsigned long action
, unsigned long cpu
)
433 struct crypto_comp
*tfm
;
437 if (WARN_ON(*per_cpu_ptr(pool
->tfm
, cpu
)))
439 tfm
= crypto_alloc_comp(pool
->tfm_name
, 0, 0);
440 if (IS_ERR_OR_NULL(tfm
)) {
441 pr_err("could not alloc crypto comp %s : %ld\n",
442 pool
->tfm_name
, PTR_ERR(tfm
));
445 *per_cpu_ptr(pool
->tfm
, cpu
) = tfm
;
448 case CPU_UP_CANCELED
:
449 tfm
= *per_cpu_ptr(pool
->tfm
, cpu
);
450 if (!IS_ERR_OR_NULL(tfm
))
451 crypto_free_comp(tfm
);
452 *per_cpu_ptr(pool
->tfm
, cpu
) = NULL
;
460 static int zswap_cpu_comp_notifier(struct notifier_block
*nb
,
461 unsigned long action
, void *pcpu
)
463 unsigned long cpu
= (unsigned long)pcpu
;
464 struct zswap_pool
*pool
= container_of(nb
, typeof(*pool
), notifier
);
466 return __zswap_cpu_comp_notifier(pool
, action
, cpu
);
469 static int zswap_cpu_comp_init(struct zswap_pool
*pool
)
473 memset(&pool
->notifier
, 0, sizeof(pool
->notifier
));
474 pool
->notifier
.notifier_call
= zswap_cpu_comp_notifier
;
476 cpu_notifier_register_begin();
477 for_each_online_cpu(cpu
)
478 if (__zswap_cpu_comp_notifier(pool
, CPU_UP_PREPARE
, cpu
) ==
481 __register_cpu_notifier(&pool
->notifier
);
482 cpu_notifier_register_done();
486 for_each_online_cpu(cpu
)
487 __zswap_cpu_comp_notifier(pool
, CPU_UP_CANCELED
, cpu
);
488 cpu_notifier_register_done();
492 static void zswap_cpu_comp_destroy(struct zswap_pool
*pool
)
496 cpu_notifier_register_begin();
497 for_each_online_cpu(cpu
)
498 __zswap_cpu_comp_notifier(pool
, CPU_UP_CANCELED
, cpu
);
499 __unregister_cpu_notifier(&pool
->notifier
);
500 cpu_notifier_register_done();
503 /*********************************
505 **********************************/
507 static struct zswap_pool
*__zswap_pool_current(void)
509 struct zswap_pool
*pool
;
511 pool
= list_first_or_null_rcu(&zswap_pools
, typeof(*pool
), list
);
517 static struct zswap_pool
*zswap_pool_current(void)
519 assert_spin_locked(&zswap_pools_lock
);
521 return __zswap_pool_current();
524 static struct zswap_pool
*zswap_pool_current_get(void)
526 struct zswap_pool
*pool
;
530 pool
= __zswap_pool_current();
531 if (!pool
|| !zswap_pool_get(pool
))
539 static struct zswap_pool
*zswap_pool_last_get(void)
541 struct zswap_pool
*pool
, *last
= NULL
;
545 list_for_each_entry_rcu(pool
, &zswap_pools
, list
)
547 if (!WARN_ON(!last
) && !zswap_pool_get(last
))
555 /* type and compressor must be null-terminated */
556 static struct zswap_pool
*zswap_pool_find_get(char *type
, char *compressor
)
558 struct zswap_pool
*pool
;
560 assert_spin_locked(&zswap_pools_lock
);
562 list_for_each_entry_rcu(pool
, &zswap_pools
, list
) {
563 if (strcmp(pool
->tfm_name
, compressor
))
565 if (strcmp(zpool_get_type(pool
->zpool
), type
))
567 /* if we can't get it, it's about to be destroyed */
568 if (!zswap_pool_get(pool
))
576 static struct zswap_pool
*zswap_pool_create(char *type
, char *compressor
)
578 struct zswap_pool
*pool
;
579 char name
[38]; /* 'zswap' + 32 char (max) num + \0 */
580 gfp_t gfp
= __GFP_NORETRY
| __GFP_NOWARN
| __GFP_KSWAPD_RECLAIM
;
582 pool
= kzalloc(sizeof(*pool
), GFP_KERNEL
);
584 pr_err("pool alloc failed\n");
588 /* unique name for each pool specifically required by zsmalloc */
589 snprintf(name
, 38, "zswap%x", atomic_inc_return(&zswap_pools_count
));
591 pool
->zpool
= zpool_create_pool(type
, name
, gfp
, &zswap_zpool_ops
);
593 pr_err("%s zpool not available\n", type
);
596 pr_debug("using %s zpool\n", zpool_get_type(pool
->zpool
));
598 strlcpy(pool
->tfm_name
, compressor
, sizeof(pool
->tfm_name
));
599 pool
->tfm
= alloc_percpu(struct crypto_comp
*);
601 pr_err("percpu alloc failed\n");
605 if (zswap_cpu_comp_init(pool
))
607 pr_debug("using %s compressor\n", pool
->tfm_name
);
609 /* being the current pool takes 1 ref; this func expects the
610 * caller to always add the new pool as the current pool
612 kref_init(&pool
->kref
);
613 INIT_LIST_HEAD(&pool
->list
);
615 zswap_pool_debug("created", pool
);
620 free_percpu(pool
->tfm
);
622 zpool_destroy_pool(pool
->zpool
);
627 static __init
struct zswap_pool
*__zswap_pool_create_fallback(void)
629 if (!crypto_has_comp(zswap_compressor
, 0, 0)) {
630 if (!strcmp(zswap_compressor
, ZSWAP_COMPRESSOR_DEFAULT
)) {
631 pr_err("default compressor %s not available\n",
635 pr_err("compressor %s not available, using default %s\n",
636 zswap_compressor
, ZSWAP_COMPRESSOR_DEFAULT
);
637 param_free_charp(&zswap_compressor
);
638 zswap_compressor
= ZSWAP_COMPRESSOR_DEFAULT
;
640 if (!zpool_has_pool(zswap_zpool_type
)) {
641 if (!strcmp(zswap_zpool_type
, ZSWAP_ZPOOL_DEFAULT
)) {
642 pr_err("default zpool %s not available\n",
646 pr_err("zpool %s not available, using default %s\n",
647 zswap_zpool_type
, ZSWAP_ZPOOL_DEFAULT
);
648 param_free_charp(&zswap_zpool_type
);
649 zswap_zpool_type
= ZSWAP_ZPOOL_DEFAULT
;
652 return zswap_pool_create(zswap_zpool_type
, zswap_compressor
);
655 static void zswap_pool_destroy(struct zswap_pool
*pool
)
657 zswap_pool_debug("destroying", pool
);
659 zswap_cpu_comp_destroy(pool
);
660 free_percpu(pool
->tfm
);
661 zpool_destroy_pool(pool
->zpool
);
665 static int __must_check
zswap_pool_get(struct zswap_pool
*pool
)
667 return kref_get_unless_zero(&pool
->kref
);
670 static void __zswap_pool_release(struct work_struct
*work
)
672 struct zswap_pool
*pool
= container_of(work
, typeof(*pool
), work
);
676 /* nobody should have been able to get a kref... */
677 WARN_ON(kref_get_unless_zero(&pool
->kref
));
679 /* pool is now off zswap_pools list and has no references. */
680 zswap_pool_destroy(pool
);
683 static void __zswap_pool_empty(struct kref
*kref
)
685 struct zswap_pool
*pool
;
687 pool
= container_of(kref
, typeof(*pool
), kref
);
689 spin_lock(&zswap_pools_lock
);
691 WARN_ON(pool
== zswap_pool_current());
693 list_del_rcu(&pool
->list
);
695 INIT_WORK(&pool
->work
, __zswap_pool_release
);
696 schedule_work(&pool
->work
);
698 spin_unlock(&zswap_pools_lock
);
701 static void zswap_pool_put(struct zswap_pool
*pool
)
703 kref_put(&pool
->kref
, __zswap_pool_empty
);
706 /*********************************
708 **********************************/
710 /* val must be a null-terminated string */
711 static int __zswap_param_set(const char *val
, const struct kernel_param
*kp
,
712 char *type
, char *compressor
)
714 struct zswap_pool
*pool
, *put_pool
= NULL
;
715 char *s
= strstrip((char *)val
);
718 if (zswap_init_failed
) {
719 pr_err("can't set param, initialization failed\n");
723 /* no change required */
724 if (!strcmp(s
, *(char **)kp
->arg
))
727 /* if this is load-time (pre-init) param setting,
728 * don't create a pool; that's done during init.
730 if (!zswap_init_started
)
731 return param_set_charp(s
, kp
);
734 if (!zpool_has_pool(s
)) {
735 pr_err("zpool %s not available\n", s
);
739 } else if (!compressor
) {
740 if (!crypto_has_comp(s
, 0, 0)) {
741 pr_err("compressor %s not available\n", s
);
750 spin_lock(&zswap_pools_lock
);
752 pool
= zswap_pool_find_get(type
, compressor
);
754 zswap_pool_debug("using existing", pool
);
755 WARN_ON(pool
== zswap_pool_current());
756 list_del_rcu(&pool
->list
);
759 spin_unlock(&zswap_pools_lock
);
762 pool
= zswap_pool_create(type
, compressor
);
765 ret
= param_set_charp(s
, kp
);
769 spin_lock(&zswap_pools_lock
);
772 put_pool
= zswap_pool_current();
773 list_add_rcu(&pool
->list
, &zswap_pools
);
775 /* add the possibly pre-existing pool to the end of the pools
776 * list; if it's new (and empty) then it'll be removed and
777 * destroyed by the put after we drop the lock
779 list_add_tail_rcu(&pool
->list
, &zswap_pools
);
783 spin_unlock(&zswap_pools_lock
);
785 /* drop the ref from either the old current pool,
786 * or the new pool we failed to add
789 zswap_pool_put(put_pool
);
794 static int zswap_compressor_param_set(const char *val
,
795 const struct kernel_param
*kp
)
797 return __zswap_param_set(val
, kp
, zswap_zpool_type
, NULL
);
800 static int zswap_zpool_param_set(const char *val
,
801 const struct kernel_param
*kp
)
803 return __zswap_param_set(val
, kp
, NULL
, zswap_compressor
);
806 static int zswap_enabled_param_set(const char *val
,
807 const struct kernel_param
*kp
)
809 if (zswap_init_failed
) {
810 pr_err("can't enable, initialization failed\n");
814 return param_set_bool(val
, kp
);
817 /*********************************
819 **********************************/
820 /* return enum for zswap_get_swap_cache_page */
821 enum zswap_get_swap_ret
{
823 ZSWAP_SWAPCACHE_EXIST
,
824 ZSWAP_SWAPCACHE_FAIL
,
828 * zswap_get_swap_cache_page
830 * This is an adaption of read_swap_cache_async()
832 * This function tries to find a page with the given swap entry
833 * in the swapper_space address space (the swap cache). If the page
834 * is found, it is returned in retpage. Otherwise, a page is allocated,
835 * added to the swap cache, and returned in retpage.
837 * If success, the swap cache page is returned in retpage
838 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
839 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
840 * the new page is added to swapcache and locked
841 * Returns ZSWAP_SWAPCACHE_FAIL on error
843 static int zswap_get_swap_cache_page(swp_entry_t entry
,
844 struct page
**retpage
)
846 bool page_was_allocated
;
848 *retpage
= __read_swap_cache_async(entry
, GFP_KERNEL
,
849 NULL
, 0, &page_was_allocated
);
850 if (page_was_allocated
)
851 return ZSWAP_SWAPCACHE_NEW
;
853 return ZSWAP_SWAPCACHE_FAIL
;
854 return ZSWAP_SWAPCACHE_EXIST
;
858 * Attempts to free an entry by adding a page to the swap cache,
859 * decompressing the entry data into the page, and issuing a
860 * bio write to write the page back to the swap device.
862 * This can be thought of as a "resumed writeback" of the page
863 * to the swap device. We are basically resuming the same swap
864 * writeback path that was intercepted with the frontswap_store()
865 * in the first place. After the page has been decompressed into
866 * the swap cache, the compressed version stored by zswap can be
869 static int zswap_writeback_entry(struct zpool
*pool
, unsigned long handle
)
871 struct zswap_header
*zhdr
;
872 swp_entry_t swpentry
;
873 struct zswap_tree
*tree
;
875 struct zswap_entry
*entry
;
877 struct crypto_comp
*tfm
;
881 struct writeback_control wbc
= {
882 .sync_mode
= WB_SYNC_NONE
,
885 /* extract swpentry from data */
886 zhdr
= zpool_map_handle(pool
, handle
, ZPOOL_MM_RO
);
887 swpentry
= zhdr
->swpentry
; /* here */
888 zpool_unmap_handle(pool
, handle
);
889 tree
= zswap_trees
[swp_type(swpentry
)];
890 offset
= swp_offset(swpentry
);
892 /* find and ref zswap entry */
893 spin_lock(&tree
->lock
);
894 entry
= zswap_entry_find_get(&tree
->rbroot
, offset
);
896 /* entry was invalidated */
897 spin_unlock(&tree
->lock
);
900 spin_unlock(&tree
->lock
);
901 BUG_ON(offset
!= entry
->offset
);
903 /* try to allocate swap cache page */
904 switch (zswap_get_swap_cache_page(swpentry
, &page
)) {
905 case ZSWAP_SWAPCACHE_FAIL
: /* no memory or invalidate happened */
909 case ZSWAP_SWAPCACHE_EXIST
:
910 /* page is already in the swap cache, ignore for now */
915 case ZSWAP_SWAPCACHE_NEW
: /* page is locked */
918 src
= (u8
*)zpool_map_handle(entry
->pool
->zpool
, entry
->handle
,
919 ZPOOL_MM_RO
) + sizeof(struct zswap_header
);
920 dst
= kmap_atomic(page
);
921 tfm
= *get_cpu_ptr(entry
->pool
->tfm
);
922 ret
= crypto_comp_decompress(tfm
, src
, entry
->length
,
924 put_cpu_ptr(entry
->pool
->tfm
);
926 zpool_unmap_handle(entry
->pool
->zpool
, entry
->handle
);
928 BUG_ON(dlen
!= PAGE_SIZE
);
930 /* page is up to date */
931 SetPageUptodate(page
);
934 /* move it to the tail of the inactive list after end_writeback */
935 SetPageReclaim(page
);
937 /* start writeback */
938 __swap_writepage(page
, &wbc
, end_swap_bio_write
);
940 zswap_written_back_pages
++;
942 spin_lock(&tree
->lock
);
943 /* drop local reference */
944 zswap_entry_put(tree
, entry
);
947 * There are two possible situations for entry here:
948 * (1) refcount is 1(normal case), entry is valid and on the tree
949 * (2) refcount is 0, entry is freed and not on the tree
950 * because invalidate happened during writeback
951 * search the tree and free the entry if find entry
953 if (entry
== zswap_rb_search(&tree
->rbroot
, offset
))
954 zswap_entry_put(tree
, entry
);
955 spin_unlock(&tree
->lock
);
960 * if we get here due to ZSWAP_SWAPCACHE_EXIST
961 * a load may happening concurrently
962 * it is safe and okay to not free the entry
963 * if we free the entry in the following put
964 * it it either okay to return !0
967 spin_lock(&tree
->lock
);
968 zswap_entry_put(tree
, entry
);
969 spin_unlock(&tree
->lock
);
975 static int zswap_shrink(void)
977 struct zswap_pool
*pool
;
980 pool
= zswap_pool_last_get();
984 ret
= zpool_shrink(pool
->zpool
, 1, NULL
);
986 zswap_pool_put(pool
);
991 /*********************************
993 **********************************/
994 /* attempts to compress and store an single page */
995 static int zswap_frontswap_store(unsigned type
, pgoff_t offset
,
998 struct zswap_tree
*tree
= zswap_trees
[type
];
999 struct zswap_entry
*entry
, *dupentry
;
1000 struct crypto_comp
*tfm
;
1002 unsigned int dlen
= PAGE_SIZE
, len
;
1003 unsigned long handle
;
1006 struct zswap_header
*zhdr
;
1008 if (!zswap_enabled
|| !tree
) {
1013 /* reclaim space if needed */
1014 if (zswap_is_full()) {
1015 zswap_pool_limit_hit
++;
1016 if (zswap_shrink()) {
1017 zswap_reject_reclaim_fail
++;
1022 /* A second zswap_is_full() check after
1023 * zswap_shrink() to make sure it's now
1024 * under the max_pool_percent
1026 if (zswap_is_full()) {
1032 /* allocate entry */
1033 entry
= zswap_entry_cache_alloc(GFP_KERNEL
);
1035 zswap_reject_kmemcache_fail
++;
1040 /* if entry is successfully added, it keeps the reference */
1041 entry
->pool
= zswap_pool_current_get();
1048 dst
= get_cpu_var(zswap_dstmem
);
1049 tfm
= *get_cpu_ptr(entry
->pool
->tfm
);
1050 src
= kmap_atomic(page
);
1051 ret
= crypto_comp_compress(tfm
, src
, PAGE_SIZE
, dst
, &dlen
);
1053 put_cpu_ptr(entry
->pool
->tfm
);
1060 len
= dlen
+ sizeof(struct zswap_header
);
1061 ret
= zpool_malloc(entry
->pool
->zpool
, len
,
1062 __GFP_NORETRY
| __GFP_NOWARN
| __GFP_KSWAPD_RECLAIM
,
1064 if (ret
== -ENOSPC
) {
1065 zswap_reject_compress_poor
++;
1069 zswap_reject_alloc_fail
++;
1072 zhdr
= zpool_map_handle(entry
->pool
->zpool
, handle
, ZPOOL_MM_RW
);
1073 zhdr
->swpentry
= swp_entry(type
, offset
);
1074 buf
= (u8
*)(zhdr
+ 1);
1075 memcpy(buf
, dst
, dlen
);
1076 zpool_unmap_handle(entry
->pool
->zpool
, handle
);
1077 put_cpu_var(zswap_dstmem
);
1079 /* populate entry */
1080 entry
->offset
= offset
;
1081 entry
->handle
= handle
;
1082 entry
->length
= dlen
;
1085 spin_lock(&tree
->lock
);
1087 ret
= zswap_rb_insert(&tree
->rbroot
, entry
, &dupentry
);
1088 if (ret
== -EEXIST
) {
1089 zswap_duplicate_entry
++;
1090 /* remove from rbtree */
1091 zswap_rb_erase(&tree
->rbroot
, dupentry
);
1092 zswap_entry_put(tree
, dupentry
);
1094 } while (ret
== -EEXIST
);
1095 spin_unlock(&tree
->lock
);
1098 atomic_inc(&zswap_stored_pages
);
1099 zswap_update_total_size();
1104 put_cpu_var(zswap_dstmem
);
1105 zswap_pool_put(entry
->pool
);
1107 zswap_entry_cache_free(entry
);
1113 * returns 0 if the page was successfully decompressed
1114 * return -1 on entry not found or error
1116 static int zswap_frontswap_load(unsigned type
, pgoff_t offset
,
1119 struct zswap_tree
*tree
= zswap_trees
[type
];
1120 struct zswap_entry
*entry
;
1121 struct crypto_comp
*tfm
;
1127 spin_lock(&tree
->lock
);
1128 entry
= zswap_entry_find_get(&tree
->rbroot
, offset
);
1130 /* entry was written back */
1131 spin_unlock(&tree
->lock
);
1134 spin_unlock(&tree
->lock
);
1138 src
= (u8
*)zpool_map_handle(entry
->pool
->zpool
, entry
->handle
,
1139 ZPOOL_MM_RO
) + sizeof(struct zswap_header
);
1140 dst
= kmap_atomic(page
);
1141 tfm
= *get_cpu_ptr(entry
->pool
->tfm
);
1142 ret
= crypto_comp_decompress(tfm
, src
, entry
->length
, dst
, &dlen
);
1143 put_cpu_ptr(entry
->pool
->tfm
);
1145 zpool_unmap_handle(entry
->pool
->zpool
, entry
->handle
);
1148 spin_lock(&tree
->lock
);
1149 zswap_entry_put(tree
, entry
);
1150 spin_unlock(&tree
->lock
);
1155 /* frees an entry in zswap */
1156 static void zswap_frontswap_invalidate_page(unsigned type
, pgoff_t offset
)
1158 struct zswap_tree
*tree
= zswap_trees
[type
];
1159 struct zswap_entry
*entry
;
1162 spin_lock(&tree
->lock
);
1163 entry
= zswap_rb_search(&tree
->rbroot
, offset
);
1165 /* entry was written back */
1166 spin_unlock(&tree
->lock
);
1170 /* remove from rbtree */
1171 zswap_rb_erase(&tree
->rbroot
, entry
);
1173 /* drop the initial reference from entry creation */
1174 zswap_entry_put(tree
, entry
);
1176 spin_unlock(&tree
->lock
);
1179 /* frees all zswap entries for the given swap type */
1180 static void zswap_frontswap_invalidate_area(unsigned type
)
1182 struct zswap_tree
*tree
= zswap_trees
[type
];
1183 struct zswap_entry
*entry
, *n
;
1188 /* walk the tree and free everything */
1189 spin_lock(&tree
->lock
);
1190 rbtree_postorder_for_each_entry_safe(entry
, n
, &tree
->rbroot
, rbnode
)
1191 zswap_free_entry(entry
);
1192 tree
->rbroot
= RB_ROOT
;
1193 spin_unlock(&tree
->lock
);
1195 zswap_trees
[type
] = NULL
;
1198 static void zswap_frontswap_init(unsigned type
)
1200 struct zswap_tree
*tree
;
1202 tree
= kzalloc(sizeof(struct zswap_tree
), GFP_KERNEL
);
1204 pr_err("alloc failed, zswap disabled for swap type %d\n", type
);
1208 tree
->rbroot
= RB_ROOT
;
1209 spin_lock_init(&tree
->lock
);
1210 zswap_trees
[type
] = tree
;
1213 static struct frontswap_ops zswap_frontswap_ops
= {
1214 .store
= zswap_frontswap_store
,
1215 .load
= zswap_frontswap_load
,
1216 .invalidate_page
= zswap_frontswap_invalidate_page
,
1217 .invalidate_area
= zswap_frontswap_invalidate_area
,
1218 .init
= zswap_frontswap_init
1221 /*********************************
1223 **********************************/
1224 #ifdef CONFIG_DEBUG_FS
1225 #include <linux/debugfs.h>
1227 static struct dentry
*zswap_debugfs_root
;
1229 static int __init
zswap_debugfs_init(void)
1231 if (!debugfs_initialized())
1234 zswap_debugfs_root
= debugfs_create_dir("zswap", NULL
);
1235 if (!zswap_debugfs_root
)
1238 debugfs_create_u64("pool_limit_hit", S_IRUGO
,
1239 zswap_debugfs_root
, &zswap_pool_limit_hit
);
1240 debugfs_create_u64("reject_reclaim_fail", S_IRUGO
,
1241 zswap_debugfs_root
, &zswap_reject_reclaim_fail
);
1242 debugfs_create_u64("reject_alloc_fail", S_IRUGO
,
1243 zswap_debugfs_root
, &zswap_reject_alloc_fail
);
1244 debugfs_create_u64("reject_kmemcache_fail", S_IRUGO
,
1245 zswap_debugfs_root
, &zswap_reject_kmemcache_fail
);
1246 debugfs_create_u64("reject_compress_poor", S_IRUGO
,
1247 zswap_debugfs_root
, &zswap_reject_compress_poor
);
1248 debugfs_create_u64("written_back_pages", S_IRUGO
,
1249 zswap_debugfs_root
, &zswap_written_back_pages
);
1250 debugfs_create_u64("duplicate_entry", S_IRUGO
,
1251 zswap_debugfs_root
, &zswap_duplicate_entry
);
1252 debugfs_create_u64("pool_total_size", S_IRUGO
,
1253 zswap_debugfs_root
, &zswap_pool_total_size
);
1254 debugfs_create_atomic_t("stored_pages", S_IRUGO
,
1255 zswap_debugfs_root
, &zswap_stored_pages
);
1260 static void __exit
zswap_debugfs_exit(void)
1262 debugfs_remove_recursive(zswap_debugfs_root
);
1265 static int __init
zswap_debugfs_init(void)
1270 static void __exit
zswap_debugfs_exit(void) { }
1273 /*********************************
1274 * module init and exit
1275 **********************************/
1276 static int __init
init_zswap(void)
1278 struct zswap_pool
*pool
;
1280 zswap_init_started
= true;
1282 if (zswap_entry_cache_create()) {
1283 pr_err("entry cache creation failed\n");
1287 if (zswap_cpu_dstmem_init()) {
1288 pr_err("dstmem alloc failed\n");
1292 pool
= __zswap_pool_create_fallback();
1294 pr_err("pool creation failed\n");
1297 pr_info("loaded using pool %s/%s\n", pool
->tfm_name
,
1298 zpool_get_type(pool
->zpool
));
1300 list_add(&pool
->list
, &zswap_pools
);
1302 frontswap_register_ops(&zswap_frontswap_ops
);
1303 if (zswap_debugfs_init())
1304 pr_warn("debugfs initialization failed\n");
1308 zswap_cpu_dstmem_destroy();
1310 zswap_entry_cache_destroy();
1312 /* if built-in, we aren't unloaded on failure; don't allow use */
1313 zswap_init_failed
= true;
1314 zswap_enabled
= false;
1317 /* must be late so crypto has time to come up */
1318 late_initcall(init_zswap
);
1320 MODULE_LICENSE("GPL");
1321 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1322 MODULE_DESCRIPTION("Compressed cache for swap pages");