x86/xen: resume timer irqs early
[linux/fpc-iii.git] / mm / zswap.c
blob6b862518d817ba83f88a403c579a834bc1c0528b
1 /*
2 * zswap.c - zswap driver file
4 * zswap is a backend for frontswap that takes pages that are in the process
5 * of being swapped out and attempts to compress and store them in a
6 * RAM-based memory pool. This can result in a significant I/O reduction on
7 * the swap device and, in the case where decompressing from RAM is faster
8 * than reading from the swap device, can also improve workload performance.
10 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/module.h>
26 #include <linux/cpu.h>
27 #include <linux/highmem.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/types.h>
31 #include <linux/atomic.h>
32 #include <linux/frontswap.h>
33 #include <linux/rbtree.h>
34 #include <linux/swap.h>
35 #include <linux/crypto.h>
36 #include <linux/mempool.h>
37 #include <linux/zbud.h>
39 #include <linux/mm_types.h>
40 #include <linux/page-flags.h>
41 #include <linux/swapops.h>
42 #include <linux/writeback.h>
43 #include <linux/pagemap.h>
45 /*********************************
46 * statistics
47 **********************************/
48 /* Number of memory pages used by the compressed pool */
49 static u64 zswap_pool_pages;
50 /* The number of compressed pages currently stored in zswap */
51 static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
54 * The statistics below are not protected from concurrent access for
55 * performance reasons so they may not be a 100% accurate. However,
56 * they do provide useful information on roughly how many times a
57 * certain event is occurring.
60 /* Pool limit was hit (see zswap_max_pool_percent) */
61 static u64 zswap_pool_limit_hit;
62 /* Pages written back when pool limit was reached */
63 static u64 zswap_written_back_pages;
64 /* Store failed due to a reclaim failure after pool limit was reached */
65 static u64 zswap_reject_reclaim_fail;
66 /* Compressed page was too big for the allocator to (optimally) store */
67 static u64 zswap_reject_compress_poor;
68 /* Store failed because underlying allocator could not get memory */
69 static u64 zswap_reject_alloc_fail;
70 /* Store failed because the entry metadata could not be allocated (rare) */
71 static u64 zswap_reject_kmemcache_fail;
72 /* Duplicate store was encountered (rare) */
73 static u64 zswap_duplicate_entry;
75 /*********************************
76 * tunables
77 **********************************/
78 /* Enable/disable zswap (disabled by default, fixed at boot for now) */
79 static bool zswap_enabled __read_mostly;
80 module_param_named(enabled, zswap_enabled, bool, 0);
82 /* Compressor to be used by zswap (fixed at boot for now) */
83 #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
84 static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
85 module_param_named(compressor, zswap_compressor, charp, 0);
87 /* The maximum percentage of memory that the compressed pool can occupy */
88 static unsigned int zswap_max_pool_percent = 20;
89 module_param_named(max_pool_percent,
90 zswap_max_pool_percent, uint, 0644);
92 /*********************************
93 * compression functions
94 **********************************/
95 /* per-cpu compression transforms */
96 static struct crypto_comp * __percpu *zswap_comp_pcpu_tfms;
98 enum comp_op {
99 ZSWAP_COMPOP_COMPRESS,
100 ZSWAP_COMPOP_DECOMPRESS
103 static int zswap_comp_op(enum comp_op op, const u8 *src, unsigned int slen,
104 u8 *dst, unsigned int *dlen)
106 struct crypto_comp *tfm;
107 int ret;
109 tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, get_cpu());
110 switch (op) {
111 case ZSWAP_COMPOP_COMPRESS:
112 ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
113 break;
114 case ZSWAP_COMPOP_DECOMPRESS:
115 ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
116 break;
117 default:
118 ret = -EINVAL;
121 put_cpu();
122 return ret;
125 static int __init zswap_comp_init(void)
127 if (!crypto_has_comp(zswap_compressor, 0, 0)) {
128 pr_info("%s compressor not available\n", zswap_compressor);
129 /* fall back to default compressor */
130 zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
131 if (!crypto_has_comp(zswap_compressor, 0, 0))
132 /* can't even load the default compressor */
133 return -ENODEV;
135 pr_info("using %s compressor\n", zswap_compressor);
137 /* alloc percpu transforms */
138 zswap_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
139 if (!zswap_comp_pcpu_tfms)
140 return -ENOMEM;
141 return 0;
144 static void zswap_comp_exit(void)
146 /* free percpu transforms */
147 if (zswap_comp_pcpu_tfms)
148 free_percpu(zswap_comp_pcpu_tfms);
151 /*********************************
152 * data structures
153 **********************************/
155 * struct zswap_entry
157 * This structure contains the metadata for tracking a single compressed
158 * page within zswap.
160 * rbnode - links the entry into red-black tree for the appropriate swap type
161 * refcount - the number of outstanding reference to the entry. This is needed
162 * to protect against premature freeing of the entry by code
163 * concurent calls to load, invalidate, and writeback. The lock
164 * for the zswap_tree structure that contains the entry must
165 * be held while changing the refcount. Since the lock must
166 * be held, there is no reason to also make refcount atomic.
167 * offset - the swap offset for the entry. Index into the red-black tree.
168 * handle - zsmalloc allocation handle that stores the compressed page data
169 * length - the length in bytes of the compressed page data. Needed during
170 * decompression
172 struct zswap_entry {
173 struct rb_node rbnode;
174 pgoff_t offset;
175 int refcount;
176 unsigned int length;
177 unsigned long handle;
180 struct zswap_header {
181 swp_entry_t swpentry;
185 * The tree lock in the zswap_tree struct protects a few things:
186 * - the rbtree
187 * - the refcount field of each entry in the tree
189 struct zswap_tree {
190 struct rb_root rbroot;
191 spinlock_t lock;
192 struct zbud_pool *pool;
195 static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
197 /*********************************
198 * zswap entry functions
199 **********************************/
200 static struct kmem_cache *zswap_entry_cache;
202 static int zswap_entry_cache_create(void)
204 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
205 return (zswap_entry_cache == NULL);
208 static void zswap_entry_cache_destory(void)
210 kmem_cache_destroy(zswap_entry_cache);
213 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
215 struct zswap_entry *entry;
216 entry = kmem_cache_alloc(zswap_entry_cache, gfp);
217 if (!entry)
218 return NULL;
219 entry->refcount = 1;
220 return entry;
223 static void zswap_entry_cache_free(struct zswap_entry *entry)
225 kmem_cache_free(zswap_entry_cache, entry);
228 /* caller must hold the tree lock */
229 static void zswap_entry_get(struct zswap_entry *entry)
231 entry->refcount++;
234 /* caller must hold the tree lock */
235 static int zswap_entry_put(struct zswap_entry *entry)
237 entry->refcount--;
238 return entry->refcount;
241 /*********************************
242 * rbtree functions
243 **********************************/
244 static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
246 struct rb_node *node = root->rb_node;
247 struct zswap_entry *entry;
249 while (node) {
250 entry = rb_entry(node, struct zswap_entry, rbnode);
251 if (entry->offset > offset)
252 node = node->rb_left;
253 else if (entry->offset < offset)
254 node = node->rb_right;
255 else
256 return entry;
258 return NULL;
262 * In the case that a entry with the same offset is found, a pointer to
263 * the existing entry is stored in dupentry and the function returns -EEXIST
265 static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
266 struct zswap_entry **dupentry)
268 struct rb_node **link = &root->rb_node, *parent = NULL;
269 struct zswap_entry *myentry;
271 while (*link) {
272 parent = *link;
273 myentry = rb_entry(parent, struct zswap_entry, rbnode);
274 if (myentry->offset > entry->offset)
275 link = &(*link)->rb_left;
276 else if (myentry->offset < entry->offset)
277 link = &(*link)->rb_right;
278 else {
279 *dupentry = myentry;
280 return -EEXIST;
283 rb_link_node(&entry->rbnode, parent, link);
284 rb_insert_color(&entry->rbnode, root);
285 return 0;
288 /*********************************
289 * per-cpu code
290 **********************************/
291 static DEFINE_PER_CPU(u8 *, zswap_dstmem);
293 static int __zswap_cpu_notifier(unsigned long action, unsigned long cpu)
295 struct crypto_comp *tfm;
296 u8 *dst;
298 switch (action) {
299 case CPU_UP_PREPARE:
300 tfm = crypto_alloc_comp(zswap_compressor, 0, 0);
301 if (IS_ERR(tfm)) {
302 pr_err("can't allocate compressor transform\n");
303 return NOTIFY_BAD;
305 *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = tfm;
306 dst = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
307 if (!dst) {
308 pr_err("can't allocate compressor buffer\n");
309 crypto_free_comp(tfm);
310 *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL;
311 return NOTIFY_BAD;
313 per_cpu(zswap_dstmem, cpu) = dst;
314 break;
315 case CPU_DEAD:
316 case CPU_UP_CANCELED:
317 tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu);
318 if (tfm) {
319 crypto_free_comp(tfm);
320 *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL;
322 dst = per_cpu(zswap_dstmem, cpu);
323 kfree(dst);
324 per_cpu(zswap_dstmem, cpu) = NULL;
325 break;
326 default:
327 break;
329 return NOTIFY_OK;
332 static int zswap_cpu_notifier(struct notifier_block *nb,
333 unsigned long action, void *pcpu)
335 unsigned long cpu = (unsigned long)pcpu;
336 return __zswap_cpu_notifier(action, cpu);
339 static struct notifier_block zswap_cpu_notifier_block = {
340 .notifier_call = zswap_cpu_notifier
343 static int zswap_cpu_init(void)
345 unsigned long cpu;
347 get_online_cpus();
348 for_each_online_cpu(cpu)
349 if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK)
350 goto cleanup;
351 register_cpu_notifier(&zswap_cpu_notifier_block);
352 put_online_cpus();
353 return 0;
355 cleanup:
356 for_each_online_cpu(cpu)
357 __zswap_cpu_notifier(CPU_UP_CANCELED, cpu);
358 put_online_cpus();
359 return -ENOMEM;
362 /*********************************
363 * helpers
364 **********************************/
365 static bool zswap_is_full(void)
367 return (totalram_pages * zswap_max_pool_percent / 100 <
368 zswap_pool_pages);
372 * Carries out the common pattern of freeing and entry's zsmalloc allocation,
373 * freeing the entry itself, and decrementing the number of stored pages.
375 static void zswap_free_entry(struct zswap_tree *tree, struct zswap_entry *entry)
377 zbud_free(tree->pool, entry->handle);
378 zswap_entry_cache_free(entry);
379 atomic_dec(&zswap_stored_pages);
380 zswap_pool_pages = zbud_get_pool_size(tree->pool);
383 /*********************************
384 * writeback code
385 **********************************/
386 /* return enum for zswap_get_swap_cache_page */
387 enum zswap_get_swap_ret {
388 ZSWAP_SWAPCACHE_NEW,
389 ZSWAP_SWAPCACHE_EXIST,
390 ZSWAP_SWAPCACHE_FAIL,
394 * zswap_get_swap_cache_page
396 * This is an adaption of read_swap_cache_async()
398 * This function tries to find a page with the given swap entry
399 * in the swapper_space address space (the swap cache). If the page
400 * is found, it is returned in retpage. Otherwise, a page is allocated,
401 * added to the swap cache, and returned in retpage.
403 * If success, the swap cache page is returned in retpage
404 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
405 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
406 * the new page is added to swapcache and locked
407 * Returns ZSWAP_SWAPCACHE_FAIL on error
409 static int zswap_get_swap_cache_page(swp_entry_t entry,
410 struct page **retpage)
412 struct page *found_page, *new_page = NULL;
413 struct address_space *swapper_space = swap_address_space(entry);
414 int err;
416 *retpage = NULL;
417 do {
419 * First check the swap cache. Since this is normally
420 * called after lookup_swap_cache() failed, re-calling
421 * that would confuse statistics.
423 found_page = find_get_page(swapper_space, entry.val);
424 if (found_page)
425 break;
428 * Get a new page to read into from swap.
430 if (!new_page) {
431 new_page = alloc_page(GFP_KERNEL);
432 if (!new_page)
433 break; /* Out of memory */
437 * call radix_tree_preload() while we can wait.
439 err = radix_tree_preload(GFP_KERNEL);
440 if (err)
441 break;
444 * Swap entry may have been freed since our caller observed it.
446 err = swapcache_prepare(entry);
447 if (err == -EEXIST) { /* seems racy */
448 radix_tree_preload_end();
449 continue;
451 if (err) { /* swp entry is obsolete ? */
452 radix_tree_preload_end();
453 break;
456 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
457 __set_page_locked(new_page);
458 SetPageSwapBacked(new_page);
459 err = __add_to_swap_cache(new_page, entry);
460 if (likely(!err)) {
461 radix_tree_preload_end();
462 lru_cache_add_anon(new_page);
463 *retpage = new_page;
464 return ZSWAP_SWAPCACHE_NEW;
466 radix_tree_preload_end();
467 ClearPageSwapBacked(new_page);
468 __clear_page_locked(new_page);
470 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
471 * clear SWAP_HAS_CACHE flag.
473 swapcache_free(entry, NULL);
474 } while (err != -ENOMEM);
476 if (new_page)
477 page_cache_release(new_page);
478 if (!found_page)
479 return ZSWAP_SWAPCACHE_FAIL;
480 *retpage = found_page;
481 return ZSWAP_SWAPCACHE_EXIST;
485 * Attempts to free an entry by adding a page to the swap cache,
486 * decompressing the entry data into the page, and issuing a
487 * bio write to write the page back to the swap device.
489 * This can be thought of as a "resumed writeback" of the page
490 * to the swap device. We are basically resuming the same swap
491 * writeback path that was intercepted with the frontswap_store()
492 * in the first place. After the page has been decompressed into
493 * the swap cache, the compressed version stored by zswap can be
494 * freed.
496 static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
498 struct zswap_header *zhdr;
499 swp_entry_t swpentry;
500 struct zswap_tree *tree;
501 pgoff_t offset;
502 struct zswap_entry *entry;
503 struct page *page;
504 u8 *src, *dst;
505 unsigned int dlen;
506 int ret, refcount;
507 struct writeback_control wbc = {
508 .sync_mode = WB_SYNC_NONE,
511 /* extract swpentry from data */
512 zhdr = zbud_map(pool, handle);
513 swpentry = zhdr->swpentry; /* here */
514 zbud_unmap(pool, handle);
515 tree = zswap_trees[swp_type(swpentry)];
516 offset = swp_offset(swpentry);
517 BUG_ON(pool != tree->pool);
519 /* find and ref zswap entry */
520 spin_lock(&tree->lock);
521 entry = zswap_rb_search(&tree->rbroot, offset);
522 if (!entry) {
523 /* entry was invalidated */
524 spin_unlock(&tree->lock);
525 return 0;
527 zswap_entry_get(entry);
528 spin_unlock(&tree->lock);
529 BUG_ON(offset != entry->offset);
531 /* try to allocate swap cache page */
532 switch (zswap_get_swap_cache_page(swpentry, &page)) {
533 case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
534 ret = -ENOMEM;
535 goto fail;
537 case ZSWAP_SWAPCACHE_EXIST:
538 /* page is already in the swap cache, ignore for now */
539 page_cache_release(page);
540 ret = -EEXIST;
541 goto fail;
543 case ZSWAP_SWAPCACHE_NEW: /* page is locked */
544 /* decompress */
545 dlen = PAGE_SIZE;
546 src = (u8 *)zbud_map(tree->pool, entry->handle) +
547 sizeof(struct zswap_header);
548 dst = kmap_atomic(page);
549 ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src,
550 entry->length, dst, &dlen);
551 kunmap_atomic(dst);
552 zbud_unmap(tree->pool, entry->handle);
553 BUG_ON(ret);
554 BUG_ON(dlen != PAGE_SIZE);
556 /* page is up to date */
557 SetPageUptodate(page);
560 /* start writeback */
561 __swap_writepage(page, &wbc, end_swap_bio_write);
562 page_cache_release(page);
563 zswap_written_back_pages++;
565 spin_lock(&tree->lock);
567 /* drop local reference */
568 zswap_entry_put(entry);
569 /* drop the initial reference from entry creation */
570 refcount = zswap_entry_put(entry);
573 * There are three possible values for refcount here:
574 * (1) refcount is 1, load is in progress, unlink from rbtree,
575 * load will free
576 * (2) refcount is 0, (normal case) entry is valid,
577 * remove from rbtree and free entry
578 * (3) refcount is -1, invalidate happened during writeback,
579 * free entry
581 if (refcount >= 0) {
582 /* no invalidate yet, remove from rbtree */
583 rb_erase(&entry->rbnode, &tree->rbroot);
585 spin_unlock(&tree->lock);
586 if (refcount <= 0) {
587 /* free the entry */
588 zswap_free_entry(tree, entry);
589 return 0;
591 return -EAGAIN;
593 fail:
594 spin_lock(&tree->lock);
595 refcount = zswap_entry_put(entry);
596 if (refcount <= 0) {
597 /* invalidate happened, consider writeback as success */
598 zswap_free_entry(tree, entry);
599 ret = 0;
601 spin_unlock(&tree->lock);
602 return ret;
605 /*********************************
606 * frontswap hooks
607 **********************************/
608 /* attempts to compress and store an single page */
609 static int zswap_frontswap_store(unsigned type, pgoff_t offset,
610 struct page *page)
612 struct zswap_tree *tree = zswap_trees[type];
613 struct zswap_entry *entry, *dupentry;
614 int ret;
615 unsigned int dlen = PAGE_SIZE, len;
616 unsigned long handle;
617 char *buf;
618 u8 *src, *dst;
619 struct zswap_header *zhdr;
621 if (!tree) {
622 ret = -ENODEV;
623 goto reject;
626 /* reclaim space if needed */
627 if (zswap_is_full()) {
628 zswap_pool_limit_hit++;
629 if (zbud_reclaim_page(tree->pool, 8)) {
630 zswap_reject_reclaim_fail++;
631 ret = -ENOMEM;
632 goto reject;
636 /* allocate entry */
637 entry = zswap_entry_cache_alloc(GFP_KERNEL);
638 if (!entry) {
639 zswap_reject_kmemcache_fail++;
640 ret = -ENOMEM;
641 goto reject;
644 /* compress */
645 dst = get_cpu_var(zswap_dstmem);
646 src = kmap_atomic(page);
647 ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen);
648 kunmap_atomic(src);
649 if (ret) {
650 ret = -EINVAL;
651 goto freepage;
654 /* store */
655 len = dlen + sizeof(struct zswap_header);
656 ret = zbud_alloc(tree->pool, len, __GFP_NORETRY | __GFP_NOWARN,
657 &handle);
658 if (ret == -ENOSPC) {
659 zswap_reject_compress_poor++;
660 goto freepage;
662 if (ret) {
663 zswap_reject_alloc_fail++;
664 goto freepage;
666 zhdr = zbud_map(tree->pool, handle);
667 zhdr->swpentry = swp_entry(type, offset);
668 buf = (u8 *)(zhdr + 1);
669 memcpy(buf, dst, dlen);
670 zbud_unmap(tree->pool, handle);
671 put_cpu_var(zswap_dstmem);
673 /* populate entry */
674 entry->offset = offset;
675 entry->handle = handle;
676 entry->length = dlen;
678 /* map */
679 spin_lock(&tree->lock);
680 do {
681 ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
682 if (ret == -EEXIST) {
683 zswap_duplicate_entry++;
684 /* remove from rbtree */
685 rb_erase(&dupentry->rbnode, &tree->rbroot);
686 if (!zswap_entry_put(dupentry)) {
687 /* free */
688 zswap_free_entry(tree, dupentry);
691 } while (ret == -EEXIST);
692 spin_unlock(&tree->lock);
694 /* update stats */
695 atomic_inc(&zswap_stored_pages);
696 zswap_pool_pages = zbud_get_pool_size(tree->pool);
698 return 0;
700 freepage:
701 put_cpu_var(zswap_dstmem);
702 zswap_entry_cache_free(entry);
703 reject:
704 return ret;
708 * returns 0 if the page was successfully decompressed
709 * return -1 on entry not found or error
711 static int zswap_frontswap_load(unsigned type, pgoff_t offset,
712 struct page *page)
714 struct zswap_tree *tree = zswap_trees[type];
715 struct zswap_entry *entry;
716 u8 *src, *dst;
717 unsigned int dlen;
718 int refcount, ret;
720 /* find */
721 spin_lock(&tree->lock);
722 entry = zswap_rb_search(&tree->rbroot, offset);
723 if (!entry) {
724 /* entry was written back */
725 spin_unlock(&tree->lock);
726 return -1;
728 zswap_entry_get(entry);
729 spin_unlock(&tree->lock);
731 /* decompress */
732 dlen = PAGE_SIZE;
733 src = (u8 *)zbud_map(tree->pool, entry->handle) +
734 sizeof(struct zswap_header);
735 dst = kmap_atomic(page);
736 ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length,
737 dst, &dlen);
738 kunmap_atomic(dst);
739 zbud_unmap(tree->pool, entry->handle);
740 BUG_ON(ret);
742 spin_lock(&tree->lock);
743 refcount = zswap_entry_put(entry);
744 if (likely(refcount)) {
745 spin_unlock(&tree->lock);
746 return 0;
748 spin_unlock(&tree->lock);
751 * We don't have to unlink from the rbtree because
752 * zswap_writeback_entry() or zswap_frontswap_invalidate page()
753 * has already done this for us if we are the last reference.
755 /* free */
757 zswap_free_entry(tree, entry);
759 return 0;
762 /* frees an entry in zswap */
763 static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
765 struct zswap_tree *tree = zswap_trees[type];
766 struct zswap_entry *entry;
767 int refcount;
769 /* find */
770 spin_lock(&tree->lock);
771 entry = zswap_rb_search(&tree->rbroot, offset);
772 if (!entry) {
773 /* entry was written back */
774 spin_unlock(&tree->lock);
775 return;
778 /* remove from rbtree */
779 rb_erase(&entry->rbnode, &tree->rbroot);
781 /* drop the initial reference from entry creation */
782 refcount = zswap_entry_put(entry);
784 spin_unlock(&tree->lock);
786 if (refcount) {
787 /* writeback in progress, writeback will free */
788 return;
791 /* free */
792 zswap_free_entry(tree, entry);
795 /* frees all zswap entries for the given swap type */
796 static void zswap_frontswap_invalidate_area(unsigned type)
798 struct zswap_tree *tree = zswap_trees[type];
799 struct zswap_entry *entry, *n;
801 if (!tree)
802 return;
804 /* walk the tree and free everything */
805 spin_lock(&tree->lock);
806 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) {
807 zbud_free(tree->pool, entry->handle);
808 zswap_entry_cache_free(entry);
809 atomic_dec(&zswap_stored_pages);
811 tree->rbroot = RB_ROOT;
812 spin_unlock(&tree->lock);
814 zbud_destroy_pool(tree->pool);
815 kfree(tree);
816 zswap_trees[type] = NULL;
819 static struct zbud_ops zswap_zbud_ops = {
820 .evict = zswap_writeback_entry
823 static void zswap_frontswap_init(unsigned type)
825 struct zswap_tree *tree;
827 tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL);
828 if (!tree)
829 goto err;
830 tree->pool = zbud_create_pool(GFP_KERNEL, &zswap_zbud_ops);
831 if (!tree->pool)
832 goto freetree;
833 tree->rbroot = RB_ROOT;
834 spin_lock_init(&tree->lock);
835 zswap_trees[type] = tree;
836 return;
838 freetree:
839 kfree(tree);
840 err:
841 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
844 static struct frontswap_ops zswap_frontswap_ops = {
845 .store = zswap_frontswap_store,
846 .load = zswap_frontswap_load,
847 .invalidate_page = zswap_frontswap_invalidate_page,
848 .invalidate_area = zswap_frontswap_invalidate_area,
849 .init = zswap_frontswap_init
852 /*********************************
853 * debugfs functions
854 **********************************/
855 #ifdef CONFIG_DEBUG_FS
856 #include <linux/debugfs.h>
858 static struct dentry *zswap_debugfs_root;
860 static int __init zswap_debugfs_init(void)
862 if (!debugfs_initialized())
863 return -ENODEV;
865 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
866 if (!zswap_debugfs_root)
867 return -ENOMEM;
869 debugfs_create_u64("pool_limit_hit", S_IRUGO,
870 zswap_debugfs_root, &zswap_pool_limit_hit);
871 debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
872 zswap_debugfs_root, &zswap_reject_reclaim_fail);
873 debugfs_create_u64("reject_alloc_fail", S_IRUGO,
874 zswap_debugfs_root, &zswap_reject_alloc_fail);
875 debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
876 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
877 debugfs_create_u64("reject_compress_poor", S_IRUGO,
878 zswap_debugfs_root, &zswap_reject_compress_poor);
879 debugfs_create_u64("written_back_pages", S_IRUGO,
880 zswap_debugfs_root, &zswap_written_back_pages);
881 debugfs_create_u64("duplicate_entry", S_IRUGO,
882 zswap_debugfs_root, &zswap_duplicate_entry);
883 debugfs_create_u64("pool_pages", S_IRUGO,
884 zswap_debugfs_root, &zswap_pool_pages);
885 debugfs_create_atomic_t("stored_pages", S_IRUGO,
886 zswap_debugfs_root, &zswap_stored_pages);
888 return 0;
891 static void __exit zswap_debugfs_exit(void)
893 debugfs_remove_recursive(zswap_debugfs_root);
895 #else
896 static int __init zswap_debugfs_init(void)
898 return 0;
901 static void __exit zswap_debugfs_exit(void) { }
902 #endif
904 /*********************************
905 * module init and exit
906 **********************************/
907 static int __init init_zswap(void)
909 if (!zswap_enabled)
910 return 0;
912 pr_info("loading zswap\n");
913 if (zswap_entry_cache_create()) {
914 pr_err("entry cache creation failed\n");
915 goto error;
917 if (zswap_comp_init()) {
918 pr_err("compressor initialization failed\n");
919 goto compfail;
921 if (zswap_cpu_init()) {
922 pr_err("per-cpu initialization failed\n");
923 goto pcpufail;
925 frontswap_register_ops(&zswap_frontswap_ops);
926 if (zswap_debugfs_init())
927 pr_warn("debugfs initialization failed\n");
928 return 0;
929 pcpufail:
930 zswap_comp_exit();
931 compfail:
932 zswap_entry_cache_destory();
933 error:
934 return -ENOMEM;
936 /* must be late so crypto has time to come up */
937 late_initcall(init_zswap);
939 MODULE_LICENSE("GPL");
940 MODULE_AUTHOR("Seth Jennings <sjenning@linux.vnet.ibm.com>");
941 MODULE_DESCRIPTION("Compressed cache for swap pages");