2 * zsmalloc memory allocator
4 * Copyright (C) 2011 Nitin Gupta
5 * Copyright (C) 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the license that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 * This allocator is designed for use with zram. Thus, the allocator is
16 * supposed to work well under low memory conditions. In particular, it
17 * never attempts higher order page allocation which is very likely to
18 * fail under memory pressure. On the other hand, if we just use single
19 * (0-order) pages, it would suffer from very high fragmentation --
20 * any object of size PAGE_SIZE/2 or larger would occupy an entire page.
21 * This was one of the major issues with its predecessor (xvmalloc).
23 * To overcome these issues, zsmalloc allocates a bunch of 0-order pages
24 * and links them together using various 'struct page' fields. These linked
25 * pages act as a single higher-order page i.e. an object can span 0-order
26 * page boundaries. The code refers to these linked pages as a single entity
29 * For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE
30 * since this satisfies the requirements of all its current users (in the
31 * worst case, page is incompressible and is thus stored "as-is" i.e. in
32 * uncompressed form). For allocation requests larger than this size, failure
33 * is returned (see zs_malloc).
35 * Additionally, zs_malloc() does not return a dereferenceable pointer.
36 * Instead, it returns an opaque handle (unsigned long) which encodes actual
37 * location of the allocated object. The reason for this indirection is that
38 * zsmalloc does not keep zspages permanently mapped since that would cause
39 * issues on 32-bit systems where the VA region for kernel space mappings
40 * is very small. So, before using the allocating memory, the object has to
41 * be mapped using zs_map_object() to get a usable pointer and subsequently
42 * unmapped using zs_unmap_object().
44 * Following is how we use various fields and flags of underlying
45 * struct page(s) to form a zspage.
47 * Usage of struct page fields:
48 * page->first_page: points to the first component (0-order) page
49 * page->index (union with page->freelist): offset of the first object
50 * starting in this page. For the first page, this is
51 * always 0, so we use this field (aka freelist) to point
52 * to the first free object in zspage.
53 * page->lru: links together all component pages (except the first page)
56 * For _first_ page only:
58 * page->private (union with page->first_page): refers to the
59 * component page after the first page
60 * page->freelist: points to the first free object in zspage.
61 * Free objects are linked together using in-place
63 * page->objects: maximum number of objects we can store in this
64 * zspage (class->zspage_order * PAGE_SIZE / class->size)
65 * page->lru: links together first pages of various zspages.
66 * Basically forming list of zspages in a fullness group.
67 * page->mapping: class index and fullness group of the zspage
69 * Usage of struct page flags:
70 * PG_private: identifies the first component page
71 * PG_private2: identifies the last component page
75 #ifdef CONFIG_ZSMALLOC_DEBUG
79 #include <linux/module.h>
80 #include <linux/kernel.h>
81 #include <linux/bitops.h>
82 #include <linux/errno.h>
83 #include <linux/highmem.h>
84 #include <linux/string.h>
85 #include <linux/slab.h>
86 #include <asm/tlbflush.h>
87 #include <asm/pgtable.h>
88 #include <linux/cpumask.h>
89 #include <linux/cpu.h>
90 #include <linux/vmalloc.h>
91 #include <linux/hardirq.h>
92 #include <linux/spinlock.h>
93 #include <linux/types.h>
94 #include <linux/debugfs.h>
95 #include <linux/zsmalloc.h>
96 #include <linux/zpool.h>
99 * This must be power of 2 and greater than of equal to sizeof(link_free).
100 * These two conditions ensure that any 'struct link_free' itself doesn't
101 * span more than 1 page which avoids complex case of mapping 2 pages simply
102 * to restore link_free pointer values.
107 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
108 * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
110 #define ZS_MAX_ZSPAGE_ORDER 2
111 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
114 * Object location (<PFN>, <obj_idx>) is encoded as
115 * as single (unsigned long) handle value.
117 * Note that object index <obj_idx> is relative to system
118 * page <PFN> it is stored in, so for each sub-page belonging
119 * to a zspage, obj_idx starts with 0.
121 * This is made more complicated by various memory models and PAE.
124 #ifndef MAX_PHYSMEM_BITS
125 #ifdef CONFIG_HIGHMEM64G
126 #define MAX_PHYSMEM_BITS 36
127 #else /* !CONFIG_HIGHMEM64G */
129 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
132 #define MAX_PHYSMEM_BITS BITS_PER_LONG
135 #define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
136 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
137 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
139 #define MAX(a, b) ((a) >= (b) ? (a) : (b))
140 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
141 #define ZS_MIN_ALLOC_SIZE \
142 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
143 #define ZS_MAX_ALLOC_SIZE PAGE_SIZE
146 * On systems with 4K page size, this gives 255 size classes! There is a
148 * - Large number of size classes is potentially wasteful as free page are
149 * spread across these classes
150 * - Small number of size classes causes large internal fragmentation
151 * - Probably its better to use specific size classes (empirically
152 * determined). NOTE: all those class sizes must be set as multiple of
153 * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
155 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
158 #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8)
161 * We do not maintain any list for completely empty or full pages
163 enum fullness_group
{
166 _ZS_NR_FULLNESS_GROUPS
,
178 #ifdef CONFIG_ZSMALLOC_STAT
180 static struct dentry
*zs_stat_root
;
182 struct zs_size_stat
{
183 unsigned long objs
[NR_ZS_STAT_TYPE
];
189 * number of size_classes
191 static int zs_size_classes
;
194 * We assign a page to ZS_ALMOST_EMPTY fullness group when:
196 * n = number of allocated objects
197 * N = total number of objects zspage can store
198 * f = fullness_threshold_frac
200 * Similarly, we assign zspage to:
201 * ZS_ALMOST_FULL when n > N / f
202 * ZS_EMPTY when n == 0
203 * ZS_FULL when n == N
205 * (see: fix_fullness_group())
207 static const int fullness_threshold_frac
= 4;
211 * Size of objects stored in this class. Must be multiple
217 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
218 int pages_per_zspage
;
220 #ifdef CONFIG_ZSMALLOC_STAT
221 struct zs_size_stat stats
;
226 struct page
*fullness_list
[_ZS_NR_FULLNESS_GROUPS
];
230 * Placed within free objects to form a singly linked list.
231 * For every zspage, first_page->freelist gives head of this list.
233 * This must be power of 2 and less than or equal to ZS_ALIGN
236 /* Handle of next free chunk (encodes <PFN, obj_idx>) */
243 struct size_class
**size_class
;
245 gfp_t flags
; /* allocation flags used when growing pool */
246 atomic_long_t pages_allocated
;
248 #ifdef CONFIG_ZSMALLOC_STAT
249 struct dentry
*stat_dentry
;
254 * A zspage's class index and fullness group
255 * are encoded in its (first)page->mapping
257 #define CLASS_IDX_BITS 28
258 #define FULLNESS_BITS 4
259 #define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
260 #define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
262 struct mapping_area
{
263 #ifdef CONFIG_PGTABLE_MAPPING
264 struct vm_struct
*vm
; /* vm area for mapping object that span pages */
266 char *vm_buf
; /* copy buffer for objects that span pages */
268 char *vm_addr
; /* address of kmap_atomic()'ed pages */
269 enum zs_mapmode vm_mm
; /* mapping mode */
276 static void *zs_zpool_create(char *name
, gfp_t gfp
, struct zpool_ops
*zpool_ops
)
278 return zs_create_pool(name
, gfp
);
281 static void zs_zpool_destroy(void *pool
)
283 zs_destroy_pool(pool
);
286 static int zs_zpool_malloc(void *pool
, size_t size
, gfp_t gfp
,
287 unsigned long *handle
)
289 *handle
= zs_malloc(pool
, size
);
290 return *handle
? 0 : -1;
292 static void zs_zpool_free(void *pool
, unsigned long handle
)
294 zs_free(pool
, handle
);
297 static int zs_zpool_shrink(void *pool
, unsigned int pages
,
298 unsigned int *reclaimed
)
303 static void *zs_zpool_map(void *pool
, unsigned long handle
,
304 enum zpool_mapmode mm
)
306 enum zs_mapmode zs_mm
;
315 case ZPOOL_MM_RW
: /* fallthru */
321 return zs_map_object(pool
, handle
, zs_mm
);
323 static void zs_zpool_unmap(void *pool
, unsigned long handle
)
325 zs_unmap_object(pool
, handle
);
328 static u64
zs_zpool_total_size(void *pool
)
330 return zs_get_total_pages(pool
) << PAGE_SHIFT
;
333 static struct zpool_driver zs_zpool_driver
= {
335 .owner
= THIS_MODULE
,
336 .create
= zs_zpool_create
,
337 .destroy
= zs_zpool_destroy
,
338 .malloc
= zs_zpool_malloc
,
339 .free
= zs_zpool_free
,
340 .shrink
= zs_zpool_shrink
,
342 .unmap
= zs_zpool_unmap
,
343 .total_size
= zs_zpool_total_size
,
346 MODULE_ALIAS("zpool-zsmalloc");
347 #endif /* CONFIG_ZPOOL */
349 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
350 static DEFINE_PER_CPU(struct mapping_area
, zs_map_area
);
352 static int is_first_page(struct page
*page
)
354 return PagePrivate(page
);
357 static int is_last_page(struct page
*page
)
359 return PagePrivate2(page
);
362 static void get_zspage_mapping(struct page
*page
, unsigned int *class_idx
,
363 enum fullness_group
*fullness
)
366 BUG_ON(!is_first_page(page
));
368 m
= (unsigned long)page
->mapping
;
369 *fullness
= m
& FULLNESS_MASK
;
370 *class_idx
= (m
>> FULLNESS_BITS
) & CLASS_IDX_MASK
;
373 static void set_zspage_mapping(struct page
*page
, unsigned int class_idx
,
374 enum fullness_group fullness
)
377 BUG_ON(!is_first_page(page
));
379 m
= ((class_idx
& CLASS_IDX_MASK
) << FULLNESS_BITS
) |
380 (fullness
& FULLNESS_MASK
);
381 page
->mapping
= (struct address_space
*)m
;
385 * zsmalloc divides the pool into various size classes where each
386 * class maintains a list of zspages where each zspage is divided
387 * into equal sized chunks. Each allocation falls into one of these
388 * classes depending on its size. This function returns index of the
389 * size class which has chunk size big enough to hold the give size.
391 static int get_size_class_index(int size
)
395 if (likely(size
> ZS_MIN_ALLOC_SIZE
))
396 idx
= DIV_ROUND_UP(size
- ZS_MIN_ALLOC_SIZE
,
397 ZS_SIZE_CLASS_DELTA
);
403 * For each size class, zspages are divided into different groups
404 * depending on how "full" they are. This was done so that we could
405 * easily find empty or nearly empty zspages when we try to shrink
406 * the pool (not yet implemented). This function returns fullness
407 * status of the given page.
409 static enum fullness_group
get_fullness_group(struct page
*page
)
411 int inuse
, max_objects
;
412 enum fullness_group fg
;
413 BUG_ON(!is_first_page(page
));
416 max_objects
= page
->objects
;
420 else if (inuse
== max_objects
)
422 else if (inuse
<= max_objects
/ fullness_threshold_frac
)
423 fg
= ZS_ALMOST_EMPTY
;
431 * Each size class maintains various freelists and zspages are assigned
432 * to one of these freelists based on the number of live objects they
433 * have. This functions inserts the given zspage into the freelist
434 * identified by <class, fullness_group>.
436 static void insert_zspage(struct page
*page
, struct size_class
*class,
437 enum fullness_group fullness
)
441 BUG_ON(!is_first_page(page
));
443 if (fullness
>= _ZS_NR_FULLNESS_GROUPS
)
446 head
= &class->fullness_list
[fullness
];
448 list_add_tail(&page
->lru
, &(*head
)->lru
);
454 * This function removes the given zspage from the freelist identified
455 * by <class, fullness_group>.
457 static void remove_zspage(struct page
*page
, struct size_class
*class,
458 enum fullness_group fullness
)
462 BUG_ON(!is_first_page(page
));
464 if (fullness
>= _ZS_NR_FULLNESS_GROUPS
)
467 head
= &class->fullness_list
[fullness
];
469 if (list_empty(&(*head
)->lru
))
471 else if (*head
== page
)
472 *head
= (struct page
*)list_entry((*head
)->lru
.next
,
475 list_del_init(&page
->lru
);
479 * Each size class maintains zspages in different fullness groups depending
480 * on the number of live objects they contain. When allocating or freeing
481 * objects, the fullness status of the page can change, say, from ALMOST_FULL
482 * to ALMOST_EMPTY when freeing an object. This function checks if such
483 * a status change has occurred for the given page and accordingly moves the
484 * page from the freelist of the old fullness group to that of the new
487 static enum fullness_group
fix_fullness_group(struct zs_pool
*pool
,
491 struct size_class
*class;
492 enum fullness_group currfg
, newfg
;
494 BUG_ON(!is_first_page(page
));
496 get_zspage_mapping(page
, &class_idx
, &currfg
);
497 newfg
= get_fullness_group(page
);
501 class = pool
->size_class
[class_idx
];
502 remove_zspage(page
, class, currfg
);
503 insert_zspage(page
, class, newfg
);
504 set_zspage_mapping(page
, class_idx
, newfg
);
511 * We have to decide on how many pages to link together
512 * to form a zspage for each size class. This is important
513 * to reduce wastage due to unusable space left at end of
514 * each zspage which is given as:
515 * wastage = Zp - Zp % size_class
516 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
518 * For example, for size class of 3/8 * PAGE_SIZE, we should
519 * link together 3 PAGE_SIZE sized pages to form a zspage
520 * since then we can perfectly fit in 8 such objects.
522 static int get_pages_per_zspage(int class_size
)
524 int i
, max_usedpc
= 0;
525 /* zspage order which gives maximum used size per KB */
526 int max_usedpc_order
= 1;
528 for (i
= 1; i
<= ZS_MAX_PAGES_PER_ZSPAGE
; i
++) {
532 zspage_size
= i
* PAGE_SIZE
;
533 waste
= zspage_size
% class_size
;
534 usedpc
= (zspage_size
- waste
) * 100 / zspage_size
;
536 if (usedpc
> max_usedpc
) {
538 max_usedpc_order
= i
;
542 return max_usedpc_order
;
546 * A single 'zspage' is composed of many system pages which are
547 * linked together using fields in struct page. This function finds
548 * the first/head page, given any component page of a zspage.
550 static struct page
*get_first_page(struct page
*page
)
552 if (is_first_page(page
))
555 return page
->first_page
;
558 static struct page
*get_next_page(struct page
*page
)
562 if (is_last_page(page
))
564 else if (is_first_page(page
))
565 next
= (struct page
*)page_private(page
);
567 next
= list_entry(page
->lru
.next
, struct page
, lru
);
573 * Encode <page, obj_idx> as a single handle value.
574 * On hardware platforms with physical memory starting at 0x0 the pfn
575 * could be 0 so we ensure that the handle will never be 0 by adjusting the
576 * encoded obj_idx value before encoding.
578 static void *obj_location_to_handle(struct page
*page
, unsigned long obj_idx
)
580 unsigned long handle
;
587 handle
= page_to_pfn(page
) << OBJ_INDEX_BITS
;
588 handle
|= ((obj_idx
+ 1) & OBJ_INDEX_MASK
);
590 return (void *)handle
;
594 * Decode <page, obj_idx> pair from the given object handle. We adjust the
595 * decoded obj_idx back to its original value since it was adjusted in
596 * obj_location_to_handle().
598 static void obj_handle_to_location(unsigned long handle
, struct page
**page
,
599 unsigned long *obj_idx
)
601 *page
= pfn_to_page(handle
>> OBJ_INDEX_BITS
);
602 *obj_idx
= (handle
& OBJ_INDEX_MASK
) - 1;
605 static unsigned long obj_idx_to_offset(struct page
*page
,
606 unsigned long obj_idx
, int class_size
)
608 unsigned long off
= 0;
610 if (!is_first_page(page
))
613 return off
+ obj_idx
* class_size
;
616 static void reset_page(struct page
*page
)
618 clear_bit(PG_private
, &page
->flags
);
619 clear_bit(PG_private_2
, &page
->flags
);
620 set_page_private(page
, 0);
621 page
->mapping
= NULL
;
622 page
->freelist
= NULL
;
623 page_mapcount_reset(page
);
626 static void free_zspage(struct page
*first_page
)
628 struct page
*nextp
, *tmp
, *head_extra
;
630 BUG_ON(!is_first_page(first_page
));
631 BUG_ON(first_page
->inuse
);
633 head_extra
= (struct page
*)page_private(first_page
);
635 reset_page(first_page
);
636 __free_page(first_page
);
638 /* zspage with only 1 system page */
642 list_for_each_entry_safe(nextp
, tmp
, &head_extra
->lru
, lru
) {
643 list_del(&nextp
->lru
);
647 reset_page(head_extra
);
648 __free_page(head_extra
);
651 /* Initialize a newly allocated zspage */
652 static void init_zspage(struct page
*first_page
, struct size_class
*class)
654 unsigned long off
= 0;
655 struct page
*page
= first_page
;
657 BUG_ON(!is_first_page(first_page
));
659 struct page
*next_page
;
660 struct link_free
*link
;
665 * page->index stores offset of first object starting
666 * in the page. For the first page, this is always 0,
667 * so we use first_page->index (aka ->freelist) to store
668 * head of corresponding zspage's freelist.
670 if (page
!= first_page
)
673 vaddr
= kmap_atomic(page
);
674 link
= (struct link_free
*)vaddr
+ off
/ sizeof(*link
);
676 while ((off
+= class->size
) < PAGE_SIZE
) {
677 link
->next
= obj_location_to_handle(page
, i
++);
678 link
+= class->size
/ sizeof(*link
);
682 * We now come to the last (full or partial) object on this
683 * page, which must point to the first object on the next
686 next_page
= get_next_page(page
);
687 link
->next
= obj_location_to_handle(next_page
, 0);
688 kunmap_atomic(vaddr
);
695 * Allocate a zspage for the given size class
697 static struct page
*alloc_zspage(struct size_class
*class, gfp_t flags
)
700 struct page
*first_page
= NULL
, *uninitialized_var(prev_page
);
703 * Allocate individual pages and link them together as:
704 * 1. first page->private = first sub-page
705 * 2. all sub-pages are linked together using page->lru
706 * 3. each sub-page is linked to the first page using page->first_page
708 * For each size class, First/Head pages are linked together using
709 * page->lru. Also, we set PG_private to identify the first page
710 * (i.e. no other sub-page has this flag set) and PG_private_2 to
711 * identify the last page.
714 for (i
= 0; i
< class->pages_per_zspage
; i
++) {
717 page
= alloc_page(flags
);
721 INIT_LIST_HEAD(&page
->lru
);
722 if (i
== 0) { /* first page */
723 SetPagePrivate(page
);
724 set_page_private(page
, 0);
726 first_page
->inuse
= 0;
729 set_page_private(first_page
, (unsigned long)page
);
731 page
->first_page
= first_page
;
733 list_add(&page
->lru
, &prev_page
->lru
);
734 if (i
== class->pages_per_zspage
- 1) /* last page */
735 SetPagePrivate2(page
);
739 init_zspage(first_page
, class);
741 first_page
->freelist
= obj_location_to_handle(first_page
, 0);
742 /* Maximum number of objects we can store in this zspage */
743 first_page
->objects
= class->pages_per_zspage
* PAGE_SIZE
/ class->size
;
745 error
= 0; /* Success */
748 if (unlikely(error
) && first_page
) {
749 free_zspage(first_page
);
756 static struct page
*find_get_zspage(struct size_class
*class)
761 for (i
= 0; i
< _ZS_NR_FULLNESS_GROUPS
; i
++) {
762 page
= class->fullness_list
[i
];
770 #ifdef CONFIG_PGTABLE_MAPPING
771 static inline int __zs_cpu_up(struct mapping_area
*area
)
774 * Make sure we don't leak memory if a cpu UP notification
775 * and zs_init() race and both call zs_cpu_up() on the same cpu
779 area
->vm
= alloc_vm_area(PAGE_SIZE
* 2, NULL
);
785 static inline void __zs_cpu_down(struct mapping_area
*area
)
788 free_vm_area(area
->vm
);
792 static inline void *__zs_map_object(struct mapping_area
*area
,
793 struct page
*pages
[2], int off
, int size
)
795 BUG_ON(map_vm_area(area
->vm
, PAGE_KERNEL
, pages
));
796 area
->vm_addr
= area
->vm
->addr
;
797 return area
->vm_addr
+ off
;
800 static inline void __zs_unmap_object(struct mapping_area
*area
,
801 struct page
*pages
[2], int off
, int size
)
803 unsigned long addr
= (unsigned long)area
->vm_addr
;
805 unmap_kernel_range(addr
, PAGE_SIZE
* 2);
808 #else /* CONFIG_PGTABLE_MAPPING */
810 static inline int __zs_cpu_up(struct mapping_area
*area
)
813 * Make sure we don't leak memory if a cpu UP notification
814 * and zs_init() race and both call zs_cpu_up() on the same cpu
818 area
->vm_buf
= kmalloc(ZS_MAX_ALLOC_SIZE
, GFP_KERNEL
);
824 static inline void __zs_cpu_down(struct mapping_area
*area
)
830 static void *__zs_map_object(struct mapping_area
*area
,
831 struct page
*pages
[2], int off
, int size
)
835 char *buf
= area
->vm_buf
;
837 /* disable page faults to match kmap_atomic() return conditions */
840 /* no read fastpath */
841 if (area
->vm_mm
== ZS_MM_WO
)
844 sizes
[0] = PAGE_SIZE
- off
;
845 sizes
[1] = size
- sizes
[0];
847 /* copy object to per-cpu buffer */
848 addr
= kmap_atomic(pages
[0]);
849 memcpy(buf
, addr
+ off
, sizes
[0]);
851 addr
= kmap_atomic(pages
[1]);
852 memcpy(buf
+ sizes
[0], addr
, sizes
[1]);
858 static void __zs_unmap_object(struct mapping_area
*area
,
859 struct page
*pages
[2], int off
, int size
)
863 char *buf
= area
->vm_buf
;
865 /* no write fastpath */
866 if (area
->vm_mm
== ZS_MM_RO
)
869 sizes
[0] = PAGE_SIZE
- off
;
870 sizes
[1] = size
- sizes
[0];
872 /* copy per-cpu buffer to object */
873 addr
= kmap_atomic(pages
[0]);
874 memcpy(addr
+ off
, buf
, sizes
[0]);
876 addr
= kmap_atomic(pages
[1]);
877 memcpy(addr
, buf
+ sizes
[0], sizes
[1]);
881 /* enable page faults to match kunmap_atomic() return conditions */
885 #endif /* CONFIG_PGTABLE_MAPPING */
887 static int zs_cpu_notifier(struct notifier_block
*nb
, unsigned long action
,
890 int ret
, cpu
= (long)pcpu
;
891 struct mapping_area
*area
;
895 area
= &per_cpu(zs_map_area
, cpu
);
896 ret
= __zs_cpu_up(area
);
898 return notifier_from_errno(ret
);
901 case CPU_UP_CANCELED
:
902 area
= &per_cpu(zs_map_area
, cpu
);
910 static struct notifier_block zs_cpu_nb
= {
911 .notifier_call
= zs_cpu_notifier
914 static int zs_register_cpu_notifier(void)
916 int cpu
, uninitialized_var(ret
);
918 cpu_notifier_register_begin();
920 __register_cpu_notifier(&zs_cpu_nb
);
921 for_each_online_cpu(cpu
) {
922 ret
= zs_cpu_notifier(NULL
, CPU_UP_PREPARE
, (void *)(long)cpu
);
923 if (notifier_to_errno(ret
))
927 cpu_notifier_register_done();
928 return notifier_to_errno(ret
);
931 static void zs_unregister_cpu_notifier(void)
935 cpu_notifier_register_begin();
937 for_each_online_cpu(cpu
)
938 zs_cpu_notifier(NULL
, CPU_DEAD
, (void *)(long)cpu
);
939 __unregister_cpu_notifier(&zs_cpu_nb
);
941 cpu_notifier_register_done();
944 static void init_zs_size_classes(void)
948 nr
= (ZS_MAX_ALLOC_SIZE
- ZS_MIN_ALLOC_SIZE
) / ZS_SIZE_CLASS_DELTA
+ 1;
949 if ((ZS_MAX_ALLOC_SIZE
- ZS_MIN_ALLOC_SIZE
) % ZS_SIZE_CLASS_DELTA
)
952 zs_size_classes
= nr
;
955 static unsigned int get_maxobj_per_zspage(int size
, int pages_per_zspage
)
957 return pages_per_zspage
* PAGE_SIZE
/ size
;
960 static bool can_merge(struct size_class
*prev
, int size
, int pages_per_zspage
)
962 if (prev
->pages_per_zspage
!= pages_per_zspage
)
965 if (get_maxobj_per_zspage(prev
->size
, prev
->pages_per_zspage
)
966 != get_maxobj_per_zspage(size
, pages_per_zspage
))
972 #ifdef CONFIG_ZSMALLOC_STAT
974 static inline void zs_stat_inc(struct size_class
*class,
975 enum zs_stat_type type
, unsigned long cnt
)
977 class->stats
.objs
[type
] += cnt
;
980 static inline void zs_stat_dec(struct size_class
*class,
981 enum zs_stat_type type
, unsigned long cnt
)
983 class->stats
.objs
[type
] -= cnt
;
986 static inline unsigned long zs_stat_get(struct size_class
*class,
987 enum zs_stat_type type
)
989 return class->stats
.objs
[type
];
992 static int __init
zs_stat_init(void)
994 if (!debugfs_initialized())
997 zs_stat_root
= debugfs_create_dir("zsmalloc", NULL
);
1004 static void __exit
zs_stat_exit(void)
1006 debugfs_remove_recursive(zs_stat_root
);
1009 static int zs_stats_size_show(struct seq_file
*s
, void *v
)
1012 struct zs_pool
*pool
= s
->private;
1013 struct size_class
*class;
1014 int objs_per_zspage
;
1015 unsigned long obj_allocated
, obj_used
, pages_used
;
1016 unsigned long total_objs
= 0, total_used_objs
= 0, total_pages
= 0;
1018 seq_printf(s
, " %5s %5s %13s %10s %10s\n", "class", "size",
1019 "obj_allocated", "obj_used", "pages_used");
1021 for (i
= 0; i
< zs_size_classes
; i
++) {
1022 class = pool
->size_class
[i
];
1024 if (class->index
!= i
)
1027 spin_lock(&class->lock
);
1028 obj_allocated
= zs_stat_get(class, OBJ_ALLOCATED
);
1029 obj_used
= zs_stat_get(class, OBJ_USED
);
1030 spin_unlock(&class->lock
);
1032 objs_per_zspage
= get_maxobj_per_zspage(class->size
,
1033 class->pages_per_zspage
);
1034 pages_used
= obj_allocated
/ objs_per_zspage
*
1035 class->pages_per_zspage
;
1037 seq_printf(s
, " %5u %5u %10lu %10lu %10lu\n", i
,
1038 class->size
, obj_allocated
, obj_used
, pages_used
);
1040 total_objs
+= obj_allocated
;
1041 total_used_objs
+= obj_used
;
1042 total_pages
+= pages_used
;
1046 seq_printf(s
, " %5s %5s %10lu %10lu %10lu\n", "Total", "",
1047 total_objs
, total_used_objs
, total_pages
);
1052 static int zs_stats_size_open(struct inode
*inode
, struct file
*file
)
1054 return single_open(file
, zs_stats_size_show
, inode
->i_private
);
1057 static const struct file_operations zs_stat_size_ops
= {
1058 .open
= zs_stats_size_open
,
1060 .llseek
= seq_lseek
,
1061 .release
= single_release
,
1064 static int zs_pool_stat_create(char *name
, struct zs_pool
*pool
)
1066 struct dentry
*entry
;
1071 entry
= debugfs_create_dir(name
, zs_stat_root
);
1073 pr_warn("debugfs dir <%s> creation failed\n", name
);
1076 pool
->stat_dentry
= entry
;
1078 entry
= debugfs_create_file("obj_in_classes", S_IFREG
| S_IRUGO
,
1079 pool
->stat_dentry
, pool
, &zs_stat_size_ops
);
1081 pr_warn("%s: debugfs file entry <%s> creation failed\n",
1082 name
, "obj_in_classes");
1089 static void zs_pool_stat_destroy(struct zs_pool
*pool
)
1091 debugfs_remove_recursive(pool
->stat_dentry
);
1094 #else /* CONFIG_ZSMALLOC_STAT */
1096 static inline void zs_stat_inc(struct size_class
*class,
1097 enum zs_stat_type type
, unsigned long cnt
)
1101 static inline void zs_stat_dec(struct size_class
*class,
1102 enum zs_stat_type type
, unsigned long cnt
)
1106 static inline unsigned long zs_stat_get(struct size_class
*class,
1107 enum zs_stat_type type
)
1112 static int __init
zs_stat_init(void)
1117 static void __exit
zs_stat_exit(void)
1121 static inline int zs_pool_stat_create(char *name
, struct zs_pool
*pool
)
1126 static inline void zs_pool_stat_destroy(struct zs_pool
*pool
)
1132 unsigned long zs_get_total_pages(struct zs_pool
*pool
)
1134 return atomic_long_read(&pool
->pages_allocated
);
1136 EXPORT_SYMBOL_GPL(zs_get_total_pages
);
1139 * zs_map_object - get address of allocated object from handle.
1140 * @pool: pool from which the object was allocated
1141 * @handle: handle returned from zs_malloc
1143 * Before using an object allocated from zs_malloc, it must be mapped using
1144 * this function. When done with the object, it must be unmapped using
1147 * Only one object can be mapped per cpu at a time. There is no protection
1148 * against nested mappings.
1150 * This function returns with preemption and page faults disabled.
1152 void *zs_map_object(struct zs_pool
*pool
, unsigned long handle
,
1156 unsigned long obj_idx
, off
;
1158 unsigned int class_idx
;
1159 enum fullness_group fg
;
1160 struct size_class
*class;
1161 struct mapping_area
*area
;
1162 struct page
*pages
[2];
1167 * Because we use per-cpu mapping areas shared among the
1168 * pools/users, we can't allow mapping in interrupt context
1169 * because it can corrupt another users mappings.
1171 BUG_ON(in_interrupt());
1173 obj_handle_to_location(handle
, &page
, &obj_idx
);
1174 get_zspage_mapping(get_first_page(page
), &class_idx
, &fg
);
1175 class = pool
->size_class
[class_idx
];
1176 off
= obj_idx_to_offset(page
, obj_idx
, class->size
);
1178 area
= &get_cpu_var(zs_map_area
);
1180 if (off
+ class->size
<= PAGE_SIZE
) {
1181 /* this object is contained entirely within a page */
1182 area
->vm_addr
= kmap_atomic(page
);
1183 return area
->vm_addr
+ off
;
1186 /* this object spans two pages */
1188 pages
[1] = get_next_page(page
);
1191 return __zs_map_object(area
, pages
, off
, class->size
);
1193 EXPORT_SYMBOL_GPL(zs_map_object
);
1195 void zs_unmap_object(struct zs_pool
*pool
, unsigned long handle
)
1198 unsigned long obj_idx
, off
;
1200 unsigned int class_idx
;
1201 enum fullness_group fg
;
1202 struct size_class
*class;
1203 struct mapping_area
*area
;
1207 obj_handle_to_location(handle
, &page
, &obj_idx
);
1208 get_zspage_mapping(get_first_page(page
), &class_idx
, &fg
);
1209 class = pool
->size_class
[class_idx
];
1210 off
= obj_idx_to_offset(page
, obj_idx
, class->size
);
1212 area
= this_cpu_ptr(&zs_map_area
);
1213 if (off
+ class->size
<= PAGE_SIZE
)
1214 kunmap_atomic(area
->vm_addr
);
1216 struct page
*pages
[2];
1219 pages
[1] = get_next_page(page
);
1222 __zs_unmap_object(area
, pages
, off
, class->size
);
1224 put_cpu_var(zs_map_area
);
1226 EXPORT_SYMBOL_GPL(zs_unmap_object
);
1229 * zs_malloc - Allocate block of given size from pool.
1230 * @pool: pool to allocate from
1231 * @size: size of block to allocate
1233 * On success, handle to the allocated object is returned,
1235 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
1237 unsigned long zs_malloc(struct zs_pool
*pool
, size_t size
)
1240 struct link_free
*link
;
1241 struct size_class
*class;
1244 struct page
*first_page
, *m_page
;
1245 unsigned long m_objidx
, m_offset
;
1247 if (unlikely(!size
|| size
> ZS_MAX_ALLOC_SIZE
))
1250 class = pool
->size_class
[get_size_class_index(size
)];
1252 spin_lock(&class->lock
);
1253 first_page
= find_get_zspage(class);
1256 spin_unlock(&class->lock
);
1257 first_page
= alloc_zspage(class, pool
->flags
);
1258 if (unlikely(!first_page
))
1261 set_zspage_mapping(first_page
, class->index
, ZS_EMPTY
);
1262 atomic_long_add(class->pages_per_zspage
,
1263 &pool
->pages_allocated
);
1265 spin_lock(&class->lock
);
1266 zs_stat_inc(class, OBJ_ALLOCATED
, get_maxobj_per_zspage(
1267 class->size
, class->pages_per_zspage
));
1270 obj
= (unsigned long)first_page
->freelist
;
1271 obj_handle_to_location(obj
, &m_page
, &m_objidx
);
1272 m_offset
= obj_idx_to_offset(m_page
, m_objidx
, class->size
);
1274 vaddr
= kmap_atomic(m_page
);
1275 link
= (struct link_free
*)vaddr
+ m_offset
/ sizeof(*link
);
1276 first_page
->freelist
= link
->next
;
1277 memset(link
, POISON_INUSE
, sizeof(*link
));
1278 kunmap_atomic(vaddr
);
1280 first_page
->inuse
++;
1281 zs_stat_inc(class, OBJ_USED
, 1);
1282 /* Now move the zspage to another fullness group, if required */
1283 fix_fullness_group(pool
, first_page
);
1284 spin_unlock(&class->lock
);
1288 EXPORT_SYMBOL_GPL(zs_malloc
);
1290 void zs_free(struct zs_pool
*pool
, unsigned long obj
)
1292 struct link_free
*link
;
1293 struct page
*first_page
, *f_page
;
1294 unsigned long f_objidx
, f_offset
;
1298 struct size_class
*class;
1299 enum fullness_group fullness
;
1304 obj_handle_to_location(obj
, &f_page
, &f_objidx
);
1305 first_page
= get_first_page(f_page
);
1307 get_zspage_mapping(first_page
, &class_idx
, &fullness
);
1308 class = pool
->size_class
[class_idx
];
1309 f_offset
= obj_idx_to_offset(f_page
, f_objidx
, class->size
);
1311 spin_lock(&class->lock
);
1313 /* Insert this object in containing zspage's freelist */
1314 vaddr
= kmap_atomic(f_page
);
1315 link
= (struct link_free
*)(vaddr
+ f_offset
);
1316 link
->next
= first_page
->freelist
;
1317 kunmap_atomic(vaddr
);
1318 first_page
->freelist
= (void *)obj
;
1320 first_page
->inuse
--;
1321 fullness
= fix_fullness_group(pool
, first_page
);
1323 zs_stat_dec(class, OBJ_USED
, 1);
1324 if (fullness
== ZS_EMPTY
)
1325 zs_stat_dec(class, OBJ_ALLOCATED
, get_maxobj_per_zspage(
1326 class->size
, class->pages_per_zspage
));
1328 spin_unlock(&class->lock
);
1330 if (fullness
== ZS_EMPTY
) {
1331 atomic_long_sub(class->pages_per_zspage
,
1332 &pool
->pages_allocated
);
1333 free_zspage(first_page
);
1336 EXPORT_SYMBOL_GPL(zs_free
);
1339 * zs_create_pool - Creates an allocation pool to work from.
1340 * @flags: allocation flags used to allocate pool metadata
1342 * This function must be called before anything when using
1343 * the zsmalloc allocator.
1345 * On success, a pointer to the newly created pool is returned,
1348 struct zs_pool
*zs_create_pool(char *name
, gfp_t flags
)
1351 struct zs_pool
*pool
;
1352 struct size_class
*prev_class
= NULL
;
1354 pool
= kzalloc(sizeof(*pool
), GFP_KERNEL
);
1358 pool
->name
= kstrdup(name
, GFP_KERNEL
);
1364 pool
->size_class
= kcalloc(zs_size_classes
, sizeof(struct size_class
*),
1366 if (!pool
->size_class
) {
1373 * Iterate reversly, because, size of size_class that we want to use
1374 * for merging should be larger or equal to current size.
1376 for (i
= zs_size_classes
- 1; i
>= 0; i
--) {
1378 int pages_per_zspage
;
1379 struct size_class
*class;
1381 size
= ZS_MIN_ALLOC_SIZE
+ i
* ZS_SIZE_CLASS_DELTA
;
1382 if (size
> ZS_MAX_ALLOC_SIZE
)
1383 size
= ZS_MAX_ALLOC_SIZE
;
1384 pages_per_zspage
= get_pages_per_zspage(size
);
1387 * size_class is used for normal zsmalloc operation such
1388 * as alloc/free for that size. Although it is natural that we
1389 * have one size_class for each size, there is a chance that we
1390 * can get more memory utilization if we use one size_class for
1391 * many different sizes whose size_class have same
1392 * characteristics. So, we makes size_class point to
1393 * previous size_class if possible.
1396 if (can_merge(prev_class
, size
, pages_per_zspage
)) {
1397 pool
->size_class
[i
] = prev_class
;
1402 class = kzalloc(sizeof(struct size_class
), GFP_KERNEL
);
1408 class->pages_per_zspage
= pages_per_zspage
;
1409 spin_lock_init(&class->lock
);
1410 pool
->size_class
[i
] = class;
1415 pool
->flags
= flags
;
1417 if (zs_pool_stat_create(name
, pool
))
1423 zs_destroy_pool(pool
);
1426 EXPORT_SYMBOL_GPL(zs_create_pool
);
1428 void zs_destroy_pool(struct zs_pool
*pool
)
1432 zs_pool_stat_destroy(pool
);
1434 for (i
= 0; i
< zs_size_classes
; i
++) {
1436 struct size_class
*class = pool
->size_class
[i
];
1441 if (class->index
!= i
)
1444 for (fg
= 0; fg
< _ZS_NR_FULLNESS_GROUPS
; fg
++) {
1445 if (class->fullness_list
[fg
]) {
1446 pr_info("Freeing non-empty class with size %db, fullness group %d\n",
1453 kfree(pool
->size_class
);
1457 EXPORT_SYMBOL_GPL(zs_destroy_pool
);
1459 static int __init
zs_init(void)
1461 int ret
= zs_register_cpu_notifier();
1466 init_zs_size_classes();
1469 zpool_register_driver(&zs_zpool_driver
);
1472 ret
= zs_stat_init();
1474 pr_err("zs stat initialization failed\n");
1481 zpool_unregister_driver(&zs_zpool_driver
);
1484 zs_unregister_cpu_notifier();
1489 static void __exit
zs_exit(void)
1492 zpool_unregister_driver(&zs_zpool_driver
);
1494 zs_unregister_cpu_notifier();
1499 module_init(zs_init
);
1500 module_exit(zs_exit
);
1502 MODULE_LICENSE("Dual BSD/GPL");
1503 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");