1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * zsmalloc memory allocator
6 * Copyright (C) 2011 Nitin Gupta
7 * Copyright (C) 2012, 2013 Minchan Kim
9 * This code is released using a dual license strategy: BSD/GPL
10 * You can choose the license that better fits your requirements.
12 * Released under the terms of 3-clause BSD License
13 * Released under the terms of GNU General Public License Version 2.0
17 * Following is how we use various fields and flags of underlying
18 * struct page(s) to form a zspage.
20 * Usage of struct page fields:
21 * page->private: points to zspage
22 * page->index: links together all component pages of a zspage
23 * For the huge page, this is always 0, so we use this field
25 * page->page_type: PGTY_zsmalloc, lower 24 bits locate the first object
26 * offset in a subpage of a zspage
28 * Usage of struct page flags:
29 * PG_private: identifies the first component page
30 * PG_owner_priv_1: identifies the huge component page
34 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/sched.h>
47 #include <linux/bitops.h>
48 #include <linux/errno.h>
49 #include <linux/highmem.h>
50 #include <linux/string.h>
51 #include <linux/slab.h>
52 #include <linux/pgtable.h>
53 #include <asm/tlbflush.h>
54 #include <linux/cpumask.h>
55 #include <linux/cpu.h>
56 #include <linux/vmalloc.h>
57 #include <linux/preempt.h>
58 #include <linux/spinlock.h>
59 #include <linux/sprintf.h>
60 #include <linux/shrinker.h>
61 #include <linux/types.h>
62 #include <linux/debugfs.h>
63 #include <linux/zsmalloc.h>
64 #include <linux/zpool.h>
65 #include <linux/migrate.h>
66 #include <linux/wait.h>
67 #include <linux/pagemap.h>
69 #include <linux/local_lock.h>
71 #define ZSPAGE_MAGIC 0x58
74 * This must be power of 2 and greater than or equal to sizeof(link_free).
75 * These two conditions ensure that any 'struct link_free' itself doesn't
76 * span more than 1 page which avoids complex case of mapping 2 pages simply
77 * to restore link_free pointer values.
81 #define ZS_HANDLE_SIZE (sizeof(unsigned long))
84 * Object location (<PFN>, <obj_idx>) is encoded as
85 * a single (unsigned long) handle value.
87 * Note that object index <obj_idx> starts from 0.
89 * This is made more complicated by various memory models and PAE.
92 #ifndef MAX_POSSIBLE_PHYSMEM_BITS
93 #ifdef MAX_PHYSMEM_BITS
94 #define MAX_POSSIBLE_PHYSMEM_BITS MAX_PHYSMEM_BITS
97 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
100 #define MAX_POSSIBLE_PHYSMEM_BITS BITS_PER_LONG
104 #define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT)
107 * Head in allocated object should have OBJ_ALLOCATED_TAG
108 * to identify the object was allocated or not.
109 * It's okay to add the status bit in the least bit because
110 * header keeps handle which is 4byte-aligned address so we
111 * have room for two bit at least.
113 #define OBJ_ALLOCATED_TAG 1
115 #define OBJ_TAG_BITS 1
116 #define OBJ_TAG_MASK OBJ_ALLOCATED_TAG
118 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
119 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
122 #define FULLNESS_BITS 4
124 #define MAGIC_VAL_BITS 8
126 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(CONFIG_ZSMALLOC_CHAIN_SIZE, UL))
128 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
129 #define ZS_MIN_ALLOC_SIZE \
130 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
131 /* each chunk includes extra space to keep handle */
132 #define ZS_MAX_ALLOC_SIZE PAGE_SIZE
135 * On systems with 4K page size, this gives 255 size classes! There is a
137 * - Large number of size classes is potentially wasteful as free page are
138 * spread across these classes
139 * - Small number of size classes causes large internal fragmentation
140 * - Probably its better to use specific size classes (empirically
141 * determined). NOTE: all those class sizes must be set as multiple of
142 * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
144 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
147 #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS)
148 #define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
149 ZS_SIZE_CLASS_DELTA) + 1)
152 * Pages are distinguished by the ratio of used memory (that is the ratio
153 * of ->inuse objects to all objects that page can store). For example,
154 * INUSE_RATIO_10 means that the ratio of used objects is > 0% and <= 10%.
156 * The number of fullness groups is not random. It allows us to keep
157 * difference between the least busy page in the group (minimum permitted
158 * number of ->inuse objects) and the most busy page (maximum permitted
159 * number of ->inuse objects) at a reasonable value.
161 enum fullness_group
{
164 /* NOTE: 8 more fullness groups here */
165 ZS_INUSE_RATIO_99
= 10,
170 enum class_stat_type
{
171 /* NOTE: stats for 12 fullness groups here: from inuse 0 to 100 */
172 ZS_OBJS_ALLOCATED
= NR_FULLNESS_GROUPS
,
177 struct zs_size_stat
{
178 unsigned long objs
[NR_CLASS_STAT_TYPES
];
181 #ifdef CONFIG_ZSMALLOC_STAT
182 static struct dentry
*zs_stat_root
;
185 static size_t huge_class_size
;
189 struct list_head fullness_list
[NR_FULLNESS_GROUPS
];
191 * Size of objects stored in this class. Must be multiple
196 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
197 int pages_per_zspage
;
200 struct zs_size_stat stats
;
204 * Placed within free objects to form a singly linked list.
205 * For every zspage, zspage->freeobj gives head of this list.
207 * This must be power of 2 and less than or equal to ZS_ALIGN
213 * It's valid for non-allocated object
217 * Handle of allocated object.
219 unsigned long handle
;
226 struct size_class
*size_class
[ZS_SIZE_CLASSES
];
227 struct kmem_cache
*handle_cachep
;
228 struct kmem_cache
*zspage_cachep
;
230 atomic_long_t pages_allocated
;
232 struct zs_pool_stats stats
;
234 /* Compact classes */
235 struct shrinker
*shrinker
;
237 #ifdef CONFIG_ZSMALLOC_STAT
238 struct dentry
*stat_dentry
;
240 #ifdef CONFIG_COMPACTION
241 struct work_struct free_work
;
243 /* protect page/zspage migration */
244 rwlock_t migrate_lock
;
245 atomic_t compaction_in_progress
;
250 unsigned int huge
:HUGE_BITS
;
251 unsigned int fullness
:FULLNESS_BITS
;
252 unsigned int class:CLASS_BITS
+ 1;
253 unsigned int magic
:MAGIC_VAL_BITS
;
256 unsigned int freeobj
;
257 struct page
*first_page
;
258 struct list_head list
; /* fullness list */
259 struct zs_pool
*pool
;
263 struct mapping_area
{
265 char *vm_buf
; /* copy buffer for objects that span pages */
266 char *vm_addr
; /* address of kmap_local_page()'ed pages */
267 enum zs_mapmode vm_mm
; /* mapping mode */
270 /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
271 static void SetZsHugePage(struct zspage
*zspage
)
276 static bool ZsHugePage(struct zspage
*zspage
)
281 static void migrate_lock_init(struct zspage
*zspage
);
282 static void migrate_read_lock(struct zspage
*zspage
);
283 static void migrate_read_unlock(struct zspage
*zspage
);
284 static void migrate_write_lock(struct zspage
*zspage
);
285 static void migrate_write_unlock(struct zspage
*zspage
);
287 #ifdef CONFIG_COMPACTION
288 static void kick_deferred_free(struct zs_pool
*pool
);
289 static void init_deferred_free(struct zs_pool
*pool
);
290 static void SetZsPageMovable(struct zs_pool
*pool
, struct zspage
*zspage
);
292 static void kick_deferred_free(struct zs_pool
*pool
) {}
293 static void init_deferred_free(struct zs_pool
*pool
) {}
294 static void SetZsPageMovable(struct zs_pool
*pool
, struct zspage
*zspage
) {}
297 static int create_cache(struct zs_pool
*pool
)
301 name
= kasprintf(GFP_KERNEL
, "zs_handle-%s", pool
->name
);
304 pool
->handle_cachep
= kmem_cache_create(name
, ZS_HANDLE_SIZE
,
307 if (!pool
->handle_cachep
)
310 name
= kasprintf(GFP_KERNEL
, "zspage-%s", pool
->name
);
313 pool
->zspage_cachep
= kmem_cache_create(name
, sizeof(struct zspage
),
316 if (!pool
->zspage_cachep
) {
317 kmem_cache_destroy(pool
->handle_cachep
);
318 pool
->handle_cachep
= NULL
;
325 static void destroy_cache(struct zs_pool
*pool
)
327 kmem_cache_destroy(pool
->handle_cachep
);
328 kmem_cache_destroy(pool
->zspage_cachep
);
331 static unsigned long cache_alloc_handle(struct zs_pool
*pool
, gfp_t gfp
)
333 return (unsigned long)kmem_cache_alloc(pool
->handle_cachep
,
334 gfp
& ~(__GFP_HIGHMEM
|__GFP_MOVABLE
));
337 static void cache_free_handle(struct zs_pool
*pool
, unsigned long handle
)
339 kmem_cache_free(pool
->handle_cachep
, (void *)handle
);
342 static struct zspage
*cache_alloc_zspage(struct zs_pool
*pool
, gfp_t flags
)
344 return kmem_cache_zalloc(pool
->zspage_cachep
,
345 flags
& ~(__GFP_HIGHMEM
|__GFP_MOVABLE
));
348 static void cache_free_zspage(struct zs_pool
*pool
, struct zspage
*zspage
)
350 kmem_cache_free(pool
->zspage_cachep
, zspage
);
353 /* class->lock(which owns the handle) synchronizes races */
354 static void record_obj(unsigned long handle
, unsigned long obj
)
356 *(unsigned long *)handle
= obj
;
363 static void *zs_zpool_create(const char *name
, gfp_t gfp
)
366 * Ignore global gfp flags: zs_malloc() may be invoked from
367 * different contexts and its caller must provide a valid
370 return zs_create_pool(name
);
373 static void zs_zpool_destroy(void *pool
)
375 zs_destroy_pool(pool
);
378 static int zs_zpool_malloc(void *pool
, size_t size
, gfp_t gfp
,
379 unsigned long *handle
)
381 *handle
= zs_malloc(pool
, size
, gfp
);
383 if (IS_ERR_VALUE(*handle
))
384 return PTR_ERR((void *)*handle
);
387 static void zs_zpool_free(void *pool
, unsigned long handle
)
389 zs_free(pool
, handle
);
392 static void *zs_zpool_map(void *pool
, unsigned long handle
,
393 enum zpool_mapmode mm
)
395 enum zs_mapmode zs_mm
;
410 return zs_map_object(pool
, handle
, zs_mm
);
412 static void zs_zpool_unmap(void *pool
, unsigned long handle
)
414 zs_unmap_object(pool
, handle
);
417 static u64
zs_zpool_total_pages(void *pool
)
419 return zs_get_total_pages(pool
);
422 static struct zpool_driver zs_zpool_driver
= {
424 .owner
= THIS_MODULE
,
425 .create
= zs_zpool_create
,
426 .destroy
= zs_zpool_destroy
,
427 .malloc_support_movable
= true,
428 .malloc
= zs_zpool_malloc
,
429 .free
= zs_zpool_free
,
431 .unmap
= zs_zpool_unmap
,
432 .total_pages
= zs_zpool_total_pages
,
435 MODULE_ALIAS("zpool-zsmalloc");
436 #endif /* CONFIG_ZPOOL */
438 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
439 static DEFINE_PER_CPU(struct mapping_area
, zs_map_area
) = {
440 .lock
= INIT_LOCAL_LOCK(lock
),
443 static __maybe_unused
int is_first_page(struct page
*page
)
445 return PagePrivate(page
);
448 /* Protected by class->lock */
449 static inline int get_zspage_inuse(struct zspage
*zspage
)
451 return zspage
->inuse
;
455 static inline void mod_zspage_inuse(struct zspage
*zspage
, int val
)
457 zspage
->inuse
+= val
;
460 static inline struct page
*get_first_page(struct zspage
*zspage
)
462 struct page
*first_page
= zspage
->first_page
;
464 VM_BUG_ON_PAGE(!is_first_page(first_page
), first_page
);
468 #define FIRST_OBJ_PAGE_TYPE_MASK 0xffffff
470 static inline unsigned int get_first_obj_offset(struct page
*page
)
472 VM_WARN_ON_ONCE(!PageZsmalloc(page
));
473 return page
->page_type
& FIRST_OBJ_PAGE_TYPE_MASK
;
476 static inline void set_first_obj_offset(struct page
*page
, unsigned int offset
)
478 /* With 24 bits available, we can support offsets into 16 MiB pages. */
479 BUILD_BUG_ON(PAGE_SIZE
> SZ_16M
);
480 VM_WARN_ON_ONCE(!PageZsmalloc(page
));
481 VM_WARN_ON_ONCE(offset
& ~FIRST_OBJ_PAGE_TYPE_MASK
);
482 page
->page_type
&= ~FIRST_OBJ_PAGE_TYPE_MASK
;
483 page
->page_type
|= offset
& FIRST_OBJ_PAGE_TYPE_MASK
;
486 static inline unsigned int get_freeobj(struct zspage
*zspage
)
488 return zspage
->freeobj
;
491 static inline void set_freeobj(struct zspage
*zspage
, unsigned int obj
)
493 zspage
->freeobj
= obj
;
496 static struct size_class
*zspage_class(struct zs_pool
*pool
,
497 struct zspage
*zspage
)
499 return pool
->size_class
[zspage
->class];
503 * zsmalloc divides the pool into various size classes where each
504 * class maintains a list of zspages where each zspage is divided
505 * into equal sized chunks. Each allocation falls into one of these
506 * classes depending on its size. This function returns index of the
507 * size class which has chunk size big enough to hold the given size.
509 static int get_size_class_index(int size
)
513 if (likely(size
> ZS_MIN_ALLOC_SIZE
))
514 idx
= DIV_ROUND_UP(size
- ZS_MIN_ALLOC_SIZE
,
515 ZS_SIZE_CLASS_DELTA
);
517 return min_t(int, ZS_SIZE_CLASSES
- 1, idx
);
520 static inline void class_stat_add(struct size_class
*class, int type
,
523 class->stats
.objs
[type
] += cnt
;
526 static inline void class_stat_sub(struct size_class
*class, int type
,
529 class->stats
.objs
[type
] -= cnt
;
532 static inline unsigned long class_stat_read(struct size_class
*class, int type
)
534 return class->stats
.objs
[type
];
537 #ifdef CONFIG_ZSMALLOC_STAT
539 static void __init
zs_stat_init(void)
541 if (!debugfs_initialized()) {
542 pr_warn("debugfs not available, stat dir not created\n");
546 zs_stat_root
= debugfs_create_dir("zsmalloc", NULL
);
549 static void __exit
zs_stat_exit(void)
551 debugfs_remove_recursive(zs_stat_root
);
554 static unsigned long zs_can_compact(struct size_class
*class);
556 static int zs_stats_size_show(struct seq_file
*s
, void *v
)
559 struct zs_pool
*pool
= s
->private;
560 struct size_class
*class;
562 unsigned long obj_allocated
, obj_used
, pages_used
, freeable
;
563 unsigned long total_objs
= 0, total_used_objs
= 0, total_pages
= 0;
564 unsigned long total_freeable
= 0;
565 unsigned long inuse_totals
[NR_FULLNESS_GROUPS
] = {0, };
567 seq_printf(s
, " %5s %5s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %13s %10s %10s %16s %8s\n",
568 "class", "size", "10%", "20%", "30%", "40%",
569 "50%", "60%", "70%", "80%", "90%", "99%", "100%",
570 "obj_allocated", "obj_used", "pages_used",
571 "pages_per_zspage", "freeable");
573 for (i
= 0; i
< ZS_SIZE_CLASSES
; i
++) {
575 class = pool
->size_class
[i
];
577 if (class->index
!= i
)
580 spin_lock(&class->lock
);
582 seq_printf(s
, " %5u %5u ", i
, class->size
);
583 for (fg
= ZS_INUSE_RATIO_10
; fg
< NR_FULLNESS_GROUPS
; fg
++) {
584 inuse_totals
[fg
] += class_stat_read(class, fg
);
585 seq_printf(s
, "%9lu ", class_stat_read(class, fg
));
588 obj_allocated
= class_stat_read(class, ZS_OBJS_ALLOCATED
);
589 obj_used
= class_stat_read(class, ZS_OBJS_INUSE
);
590 freeable
= zs_can_compact(class);
591 spin_unlock(&class->lock
);
593 objs_per_zspage
= class->objs_per_zspage
;
594 pages_used
= obj_allocated
/ objs_per_zspage
*
595 class->pages_per_zspage
;
597 seq_printf(s
, "%13lu %10lu %10lu %16d %8lu\n",
598 obj_allocated
, obj_used
, pages_used
,
599 class->pages_per_zspage
, freeable
);
601 total_objs
+= obj_allocated
;
602 total_used_objs
+= obj_used
;
603 total_pages
+= pages_used
;
604 total_freeable
+= freeable
;
608 seq_printf(s
, " %5s %5s ", "Total", "");
610 for (fg
= ZS_INUSE_RATIO_10
; fg
< NR_FULLNESS_GROUPS
; fg
++)
611 seq_printf(s
, "%9lu ", inuse_totals
[fg
]);
613 seq_printf(s
, "%13lu %10lu %10lu %16s %8lu\n",
614 total_objs
, total_used_objs
, total_pages
, "",
619 DEFINE_SHOW_ATTRIBUTE(zs_stats_size
);
621 static void zs_pool_stat_create(struct zs_pool
*pool
, const char *name
)
624 pr_warn("no root stat dir, not creating <%s> stat dir\n", name
);
628 pool
->stat_dentry
= debugfs_create_dir(name
, zs_stat_root
);
630 debugfs_create_file("classes", S_IFREG
| 0444, pool
->stat_dentry
, pool
,
631 &zs_stats_size_fops
);
634 static void zs_pool_stat_destroy(struct zs_pool
*pool
)
636 debugfs_remove_recursive(pool
->stat_dentry
);
639 #else /* CONFIG_ZSMALLOC_STAT */
640 static void __init
zs_stat_init(void)
644 static void __exit
zs_stat_exit(void)
648 static inline void zs_pool_stat_create(struct zs_pool
*pool
, const char *name
)
652 static inline void zs_pool_stat_destroy(struct zs_pool
*pool
)
659 * For each size class, zspages are divided into different groups
660 * depending on their usage ratio. This function returns fullness
661 * status of the given page.
663 static int get_fullness_group(struct size_class
*class, struct zspage
*zspage
)
665 int inuse
, objs_per_zspage
, ratio
;
667 inuse
= get_zspage_inuse(zspage
);
668 objs_per_zspage
= class->objs_per_zspage
;
671 return ZS_INUSE_RATIO_0
;
672 if (inuse
== objs_per_zspage
)
673 return ZS_INUSE_RATIO_100
;
675 ratio
= 100 * inuse
/ objs_per_zspage
;
677 * Take integer division into consideration: a page with one inuse
678 * object out of 127 possible, will end up having 0 usage ratio,
679 * which is wrong as it belongs in ZS_INUSE_RATIO_10 fullness group.
681 return ratio
/ 10 + 1;
685 * Each size class maintains various freelists and zspages are assigned
686 * to one of these freelists based on the number of live objects they
687 * have. This functions inserts the given zspage into the freelist
688 * identified by <class, fullness_group>.
690 static void insert_zspage(struct size_class
*class,
691 struct zspage
*zspage
,
694 class_stat_add(class, fullness
, 1);
695 list_add(&zspage
->list
, &class->fullness_list
[fullness
]);
696 zspage
->fullness
= fullness
;
700 * This function removes the given zspage from the freelist identified
701 * by <class, fullness_group>.
703 static void remove_zspage(struct size_class
*class, struct zspage
*zspage
)
705 int fullness
= zspage
->fullness
;
707 VM_BUG_ON(list_empty(&class->fullness_list
[fullness
]));
709 list_del_init(&zspage
->list
);
710 class_stat_sub(class, fullness
, 1);
714 * Each size class maintains zspages in different fullness groups depending
715 * on the number of live objects they contain. When allocating or freeing
716 * objects, the fullness status of the page can change, for instance, from
717 * INUSE_RATIO_80 to INUSE_RATIO_70 when freeing an object. This function
718 * checks if such a status change has occurred for the given page and
719 * accordingly moves the page from the list of the old fullness group to that
720 * of the new fullness group.
722 static int fix_fullness_group(struct size_class
*class, struct zspage
*zspage
)
726 newfg
= get_fullness_group(class, zspage
);
727 if (newfg
== zspage
->fullness
)
730 remove_zspage(class, zspage
);
731 insert_zspage(class, zspage
, newfg
);
736 static struct zspage
*get_zspage(struct page
*page
)
738 struct zspage
*zspage
= (struct zspage
*)page_private(page
);
740 BUG_ON(zspage
->magic
!= ZSPAGE_MAGIC
);
744 static struct page
*get_next_page(struct page
*page
)
746 struct zspage
*zspage
= get_zspage(page
);
748 if (unlikely(ZsHugePage(zspage
)))
751 return (struct page
*)page
->index
;
755 * obj_to_location - get (<page>, <obj_idx>) from encoded object value
756 * @obj: the encoded object value
757 * @page: page object resides in zspage
758 * @obj_idx: object index
760 static void obj_to_location(unsigned long obj
, struct page
**page
,
761 unsigned int *obj_idx
)
763 *page
= pfn_to_page(obj
>> OBJ_INDEX_BITS
);
764 *obj_idx
= (obj
& OBJ_INDEX_MASK
);
767 static void obj_to_page(unsigned long obj
, struct page
**page
)
769 *page
= pfn_to_page(obj
>> OBJ_INDEX_BITS
);
773 * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
774 * @page: page object resides in zspage
775 * @obj_idx: object index
777 static unsigned long location_to_obj(struct page
*page
, unsigned int obj_idx
)
781 obj
= page_to_pfn(page
) << OBJ_INDEX_BITS
;
782 obj
|= obj_idx
& OBJ_INDEX_MASK
;
787 static unsigned long handle_to_obj(unsigned long handle
)
789 return *(unsigned long *)handle
;
792 static inline bool obj_allocated(struct page
*page
, void *obj
,
793 unsigned long *phandle
)
795 unsigned long handle
;
796 struct zspage
*zspage
= get_zspage(page
);
798 if (unlikely(ZsHugePage(zspage
))) {
799 VM_BUG_ON_PAGE(!is_first_page(page
), page
);
800 handle
= page
->index
;
802 handle
= *(unsigned long *)obj
;
804 if (!(handle
& OBJ_ALLOCATED_TAG
))
807 /* Clear all tags before returning the handle */
808 *phandle
= handle
& ~OBJ_TAG_MASK
;
812 static void reset_page(struct page
*page
)
814 __ClearPageMovable(page
);
815 ClearPagePrivate(page
);
816 set_page_private(page
, 0);
818 __ClearPageZsmalloc(page
);
821 static int trylock_zspage(struct zspage
*zspage
)
823 struct page
*cursor
, *fail
;
825 for (cursor
= get_first_page(zspage
); cursor
!= NULL
; cursor
=
826 get_next_page(cursor
)) {
827 if (!trylock_page(cursor
)) {
835 for (cursor
= get_first_page(zspage
); cursor
!= fail
; cursor
=
836 get_next_page(cursor
))
842 static void __free_zspage(struct zs_pool
*pool
, struct size_class
*class,
843 struct zspage
*zspage
)
845 struct page
*page
, *next
;
847 assert_spin_locked(&class->lock
);
849 VM_BUG_ON(get_zspage_inuse(zspage
));
850 VM_BUG_ON(zspage
->fullness
!= ZS_INUSE_RATIO_0
);
852 next
= page
= get_first_page(zspage
);
854 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
855 next
= get_next_page(page
);
858 dec_zone_page_state(page
, NR_ZSPAGES
);
861 } while (page
!= NULL
);
863 cache_free_zspage(pool
, zspage
);
865 class_stat_sub(class, ZS_OBJS_ALLOCATED
, class->objs_per_zspage
);
866 atomic_long_sub(class->pages_per_zspage
, &pool
->pages_allocated
);
869 static void free_zspage(struct zs_pool
*pool
, struct size_class
*class,
870 struct zspage
*zspage
)
872 VM_BUG_ON(get_zspage_inuse(zspage
));
873 VM_BUG_ON(list_empty(&zspage
->list
));
876 * Since zs_free couldn't be sleepable, this function cannot call
877 * lock_page. The page locks trylock_zspage got will be released
880 if (!trylock_zspage(zspage
)) {
881 kick_deferred_free(pool
);
885 remove_zspage(class, zspage
);
886 __free_zspage(pool
, class, zspage
);
889 /* Initialize a newly allocated zspage */
890 static void init_zspage(struct size_class
*class, struct zspage
*zspage
)
892 unsigned int freeobj
= 1;
893 unsigned long off
= 0;
894 struct page
*page
= get_first_page(zspage
);
897 struct page
*next_page
;
898 struct link_free
*link
;
901 set_first_obj_offset(page
, off
);
903 vaddr
= kmap_local_page(page
);
904 link
= (struct link_free
*)vaddr
+ off
/ sizeof(*link
);
906 while ((off
+= class->size
) < PAGE_SIZE
) {
907 link
->next
= freeobj
++ << OBJ_TAG_BITS
;
908 link
+= class->size
/ sizeof(*link
);
912 * We now come to the last (full or partial) object on this
913 * page, which must point to the first object on the next
916 next_page
= get_next_page(page
);
918 link
->next
= freeobj
++ << OBJ_TAG_BITS
;
921 * Reset OBJ_TAG_BITS bit to last link to tell
922 * whether it's allocated object or not.
924 link
->next
= -1UL << OBJ_TAG_BITS
;
931 set_freeobj(zspage
, 0);
934 static void create_page_chain(struct size_class
*class, struct zspage
*zspage
,
935 struct page
*pages
[])
939 struct page
*prev_page
= NULL
;
940 int nr_pages
= class->pages_per_zspage
;
943 * Allocate individual pages and link them together as:
944 * 1. all pages are linked together using page->index
945 * 2. each sub-page point to zspage using page->private
947 * we set PG_private to identify the first page (i.e. no other sub-page
948 * has this flag set).
950 for (i
= 0; i
< nr_pages
; i
++) {
952 set_page_private(page
, (unsigned long)zspage
);
955 zspage
->first_page
= page
;
956 SetPagePrivate(page
);
957 if (unlikely(class->objs_per_zspage
== 1 &&
958 class->pages_per_zspage
== 1))
959 SetZsHugePage(zspage
);
961 prev_page
->index
= (unsigned long)page
;
968 * Allocate a zspage for the given size class
970 static struct zspage
*alloc_zspage(struct zs_pool
*pool
,
971 struct size_class
*class,
975 struct page
*pages
[ZS_MAX_PAGES_PER_ZSPAGE
];
976 struct zspage
*zspage
= cache_alloc_zspage(pool
, gfp
);
981 zspage
->magic
= ZSPAGE_MAGIC
;
982 migrate_lock_init(zspage
);
984 for (i
= 0; i
< class->pages_per_zspage
; i
++) {
987 page
= alloc_page(gfp
);
990 dec_zone_page_state(pages
[i
], NR_ZSPAGES
);
991 __ClearPageZsmalloc(pages
[i
]);
992 __free_page(pages
[i
]);
994 cache_free_zspage(pool
, zspage
);
997 __SetPageZsmalloc(page
);
999 inc_zone_page_state(page
, NR_ZSPAGES
);
1003 create_page_chain(class, zspage
, pages
);
1004 init_zspage(class, zspage
);
1005 zspage
->pool
= pool
;
1006 zspage
->class = class->index
;
1011 static struct zspage
*find_get_zspage(struct size_class
*class)
1014 struct zspage
*zspage
;
1016 for (i
= ZS_INUSE_RATIO_99
; i
>= ZS_INUSE_RATIO_0
; i
--) {
1017 zspage
= list_first_entry_or_null(&class->fullness_list
[i
],
1018 struct zspage
, list
);
1026 static inline int __zs_cpu_up(struct mapping_area
*area
)
1029 * Make sure we don't leak memory if a cpu UP notification
1030 * and zs_init() race and both call zs_cpu_up() on the same cpu
1034 area
->vm_buf
= kmalloc(ZS_MAX_ALLOC_SIZE
, GFP_KERNEL
);
1040 static inline void __zs_cpu_down(struct mapping_area
*area
)
1042 kfree(area
->vm_buf
);
1043 area
->vm_buf
= NULL
;
1046 static void *__zs_map_object(struct mapping_area
*area
,
1047 struct page
*pages
[2], int off
, int size
)
1050 char *buf
= area
->vm_buf
;
1052 /* disable page faults to match kmap_local_page() return conditions */
1053 pagefault_disable();
1055 /* no read fastpath */
1056 if (area
->vm_mm
== ZS_MM_WO
)
1059 sizes
[0] = PAGE_SIZE
- off
;
1060 sizes
[1] = size
- sizes
[0];
1062 /* copy object to per-cpu buffer */
1063 memcpy_from_page(buf
, pages
[0], off
, sizes
[0]);
1064 memcpy_from_page(buf
+ sizes
[0], pages
[1], 0, sizes
[1]);
1066 return area
->vm_buf
;
1069 static void __zs_unmap_object(struct mapping_area
*area
,
1070 struct page
*pages
[2], int off
, int size
)
1075 /* no write fastpath */
1076 if (area
->vm_mm
== ZS_MM_RO
)
1080 buf
= buf
+ ZS_HANDLE_SIZE
;
1081 size
-= ZS_HANDLE_SIZE
;
1082 off
+= ZS_HANDLE_SIZE
;
1084 sizes
[0] = PAGE_SIZE
- off
;
1085 sizes
[1] = size
- sizes
[0];
1087 /* copy per-cpu buffer to object */
1088 memcpy_to_page(pages
[0], off
, buf
, sizes
[0]);
1089 memcpy_to_page(pages
[1], 0, buf
+ sizes
[0], sizes
[1]);
1092 /* enable page faults to match kunmap_local() return conditions */
1096 static int zs_cpu_prepare(unsigned int cpu
)
1098 struct mapping_area
*area
;
1100 area
= &per_cpu(zs_map_area
, cpu
);
1101 return __zs_cpu_up(area
);
1104 static int zs_cpu_dead(unsigned int cpu
)
1106 struct mapping_area
*area
;
1108 area
= &per_cpu(zs_map_area
, cpu
);
1109 __zs_cpu_down(area
);
1113 static bool can_merge(struct size_class
*prev
, int pages_per_zspage
,
1114 int objs_per_zspage
)
1116 if (prev
->pages_per_zspage
== pages_per_zspage
&&
1117 prev
->objs_per_zspage
== objs_per_zspage
)
1123 static bool zspage_full(struct size_class
*class, struct zspage
*zspage
)
1125 return get_zspage_inuse(zspage
) == class->objs_per_zspage
;
1128 static bool zspage_empty(struct zspage
*zspage
)
1130 return get_zspage_inuse(zspage
) == 0;
1134 * zs_lookup_class_index() - Returns index of the zsmalloc &size_class
1135 * that hold objects of the provided size.
1136 * @pool: zsmalloc pool to use
1137 * @size: object size
1139 * Context: Any context.
1141 * Return: the index of the zsmalloc &size_class that hold objects of the
1144 unsigned int zs_lookup_class_index(struct zs_pool
*pool
, unsigned int size
)
1146 struct size_class
*class;
1148 class = pool
->size_class
[get_size_class_index(size
)];
1150 return class->index
;
1152 EXPORT_SYMBOL_GPL(zs_lookup_class_index
);
1154 unsigned long zs_get_total_pages(struct zs_pool
*pool
)
1156 return atomic_long_read(&pool
->pages_allocated
);
1158 EXPORT_SYMBOL_GPL(zs_get_total_pages
);
1161 * zs_map_object - get address of allocated object from handle.
1162 * @pool: pool from which the object was allocated
1163 * @handle: handle returned from zs_malloc
1164 * @mm: mapping mode to use
1166 * Before using an object allocated from zs_malloc, it must be mapped using
1167 * this function. When done with the object, it must be unmapped using
1170 * Only one object can be mapped per cpu at a time. There is no protection
1171 * against nested mappings.
1173 * This function returns with preemption and page faults disabled.
1175 void *zs_map_object(struct zs_pool
*pool
, unsigned long handle
,
1178 struct zspage
*zspage
;
1180 unsigned long obj
, off
;
1181 unsigned int obj_idx
;
1183 struct size_class
*class;
1184 struct mapping_area
*area
;
1185 struct page
*pages
[2];
1189 * Because we use per-cpu mapping areas shared among the
1190 * pools/users, we can't allow mapping in interrupt context
1191 * because it can corrupt another users mappings.
1193 BUG_ON(in_interrupt());
1195 /* It guarantees it can get zspage from handle safely */
1196 read_lock(&pool
->migrate_lock
);
1197 obj
= handle_to_obj(handle
);
1198 obj_to_location(obj
, &page
, &obj_idx
);
1199 zspage
= get_zspage(page
);
1202 * migration cannot move any zpages in this zspage. Here, class->lock
1203 * is too heavy since callers would take some time until they calls
1204 * zs_unmap_object API so delegate the locking from class to zspage
1205 * which is smaller granularity.
1207 migrate_read_lock(zspage
);
1208 read_unlock(&pool
->migrate_lock
);
1210 class = zspage_class(pool
, zspage
);
1211 off
= offset_in_page(class->size
* obj_idx
);
1213 local_lock(&zs_map_area
.lock
);
1214 area
= this_cpu_ptr(&zs_map_area
);
1216 if (off
+ class->size
<= PAGE_SIZE
) {
1217 /* this object is contained entirely within a page */
1218 area
->vm_addr
= kmap_local_page(page
);
1219 ret
= area
->vm_addr
+ off
;
1223 /* this object spans two pages */
1225 pages
[1] = get_next_page(page
);
1228 ret
= __zs_map_object(area
, pages
, off
, class->size
);
1230 if (likely(!ZsHugePage(zspage
)))
1231 ret
+= ZS_HANDLE_SIZE
;
1235 EXPORT_SYMBOL_GPL(zs_map_object
);
1237 void zs_unmap_object(struct zs_pool
*pool
, unsigned long handle
)
1239 struct zspage
*zspage
;
1241 unsigned long obj
, off
;
1242 unsigned int obj_idx
;
1244 struct size_class
*class;
1245 struct mapping_area
*area
;
1247 obj
= handle_to_obj(handle
);
1248 obj_to_location(obj
, &page
, &obj_idx
);
1249 zspage
= get_zspage(page
);
1250 class = zspage_class(pool
, zspage
);
1251 off
= offset_in_page(class->size
* obj_idx
);
1253 area
= this_cpu_ptr(&zs_map_area
);
1254 if (off
+ class->size
<= PAGE_SIZE
)
1255 kunmap_local(area
->vm_addr
);
1257 struct page
*pages
[2];
1260 pages
[1] = get_next_page(page
);
1263 __zs_unmap_object(area
, pages
, off
, class->size
);
1265 local_unlock(&zs_map_area
.lock
);
1267 migrate_read_unlock(zspage
);
1269 EXPORT_SYMBOL_GPL(zs_unmap_object
);
1272 * zs_huge_class_size() - Returns the size (in bytes) of the first huge
1273 * zsmalloc &size_class.
1274 * @pool: zsmalloc pool to use
1276 * The function returns the size of the first huge class - any object of equal
1277 * or bigger size will be stored in zspage consisting of a single physical
1280 * Context: Any context.
1282 * Return: the size (in bytes) of the first huge zsmalloc &size_class.
1284 size_t zs_huge_class_size(struct zs_pool
*pool
)
1286 return huge_class_size
;
1288 EXPORT_SYMBOL_GPL(zs_huge_class_size
);
1290 static unsigned long obj_malloc(struct zs_pool
*pool
,
1291 struct zspage
*zspage
, unsigned long handle
)
1293 int i
, nr_page
, offset
;
1295 struct link_free
*link
;
1296 struct size_class
*class;
1298 struct page
*m_page
;
1299 unsigned long m_offset
;
1302 class = pool
->size_class
[zspage
->class];
1303 obj
= get_freeobj(zspage
);
1305 offset
= obj
* class->size
;
1306 nr_page
= offset
>> PAGE_SHIFT
;
1307 m_offset
= offset_in_page(offset
);
1308 m_page
= get_first_page(zspage
);
1310 for (i
= 0; i
< nr_page
; i
++)
1311 m_page
= get_next_page(m_page
);
1313 vaddr
= kmap_local_page(m_page
);
1314 link
= (struct link_free
*)vaddr
+ m_offset
/ sizeof(*link
);
1315 set_freeobj(zspage
, link
->next
>> OBJ_TAG_BITS
);
1316 if (likely(!ZsHugePage(zspage
)))
1317 /* record handle in the header of allocated chunk */
1318 link
->handle
= handle
| OBJ_ALLOCATED_TAG
;
1320 /* record handle to page->index */
1321 zspage
->first_page
->index
= handle
| OBJ_ALLOCATED_TAG
;
1323 kunmap_local(vaddr
);
1324 mod_zspage_inuse(zspage
, 1);
1326 obj
= location_to_obj(m_page
, obj
);
1327 record_obj(handle
, obj
);
1334 * zs_malloc - Allocate block of given size from pool.
1335 * @pool: pool to allocate from
1336 * @size: size of block to allocate
1337 * @gfp: gfp flags when allocating object
1339 * On success, handle to the allocated object is returned,
1340 * otherwise an ERR_PTR().
1341 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
1343 unsigned long zs_malloc(struct zs_pool
*pool
, size_t size
, gfp_t gfp
)
1345 unsigned long handle
;
1346 struct size_class
*class;
1348 struct zspage
*zspage
;
1350 if (unlikely(!size
))
1351 return (unsigned long)ERR_PTR(-EINVAL
);
1353 if (unlikely(size
> ZS_MAX_ALLOC_SIZE
))
1354 return (unsigned long)ERR_PTR(-ENOSPC
);
1356 handle
= cache_alloc_handle(pool
, gfp
);
1358 return (unsigned long)ERR_PTR(-ENOMEM
);
1360 /* extra space in chunk to keep the handle */
1361 size
+= ZS_HANDLE_SIZE
;
1362 class = pool
->size_class
[get_size_class_index(size
)];
1364 /* class->lock effectively protects the zpage migration */
1365 spin_lock(&class->lock
);
1366 zspage
= find_get_zspage(class);
1367 if (likely(zspage
)) {
1368 obj_malloc(pool
, zspage
, handle
);
1369 /* Now move the zspage to another fullness group, if required */
1370 fix_fullness_group(class, zspage
);
1371 class_stat_add(class, ZS_OBJS_INUSE
, 1);
1376 spin_unlock(&class->lock
);
1378 zspage
= alloc_zspage(pool
, class, gfp
);
1380 cache_free_handle(pool
, handle
);
1381 return (unsigned long)ERR_PTR(-ENOMEM
);
1384 spin_lock(&class->lock
);
1385 obj_malloc(pool
, zspage
, handle
);
1386 newfg
= get_fullness_group(class, zspage
);
1387 insert_zspage(class, zspage
, newfg
);
1388 atomic_long_add(class->pages_per_zspage
, &pool
->pages_allocated
);
1389 class_stat_add(class, ZS_OBJS_ALLOCATED
, class->objs_per_zspage
);
1390 class_stat_add(class, ZS_OBJS_INUSE
, 1);
1392 /* We completely set up zspage so mark them as movable */
1393 SetZsPageMovable(pool
, zspage
);
1395 spin_unlock(&class->lock
);
1399 EXPORT_SYMBOL_GPL(zs_malloc
);
1401 static void obj_free(int class_size
, unsigned long obj
)
1403 struct link_free
*link
;
1404 struct zspage
*zspage
;
1405 struct page
*f_page
;
1406 unsigned long f_offset
;
1407 unsigned int f_objidx
;
1410 obj_to_location(obj
, &f_page
, &f_objidx
);
1411 f_offset
= offset_in_page(class_size
* f_objidx
);
1412 zspage
= get_zspage(f_page
);
1414 vaddr
= kmap_local_page(f_page
);
1415 link
= (struct link_free
*)(vaddr
+ f_offset
);
1417 /* Insert this object in containing zspage's freelist */
1418 if (likely(!ZsHugePage(zspage
)))
1419 link
->next
= get_freeobj(zspage
) << OBJ_TAG_BITS
;
1422 set_freeobj(zspage
, f_objidx
);
1424 kunmap_local(vaddr
);
1425 mod_zspage_inuse(zspage
, -1);
1428 void zs_free(struct zs_pool
*pool
, unsigned long handle
)
1430 struct zspage
*zspage
;
1431 struct page
*f_page
;
1433 struct size_class
*class;
1436 if (IS_ERR_OR_NULL((void *)handle
))
1440 * The pool->migrate_lock protects the race with zpage's migration
1441 * so it's safe to get the page from handle.
1443 read_lock(&pool
->migrate_lock
);
1444 obj
= handle_to_obj(handle
);
1445 obj_to_page(obj
, &f_page
);
1446 zspage
= get_zspage(f_page
);
1447 class = zspage_class(pool
, zspage
);
1448 spin_lock(&class->lock
);
1449 read_unlock(&pool
->migrate_lock
);
1451 class_stat_sub(class, ZS_OBJS_INUSE
, 1);
1452 obj_free(class->size
, obj
);
1454 fullness
= fix_fullness_group(class, zspage
);
1455 if (fullness
== ZS_INUSE_RATIO_0
)
1456 free_zspage(pool
, class, zspage
);
1458 spin_unlock(&class->lock
);
1459 cache_free_handle(pool
, handle
);
1461 EXPORT_SYMBOL_GPL(zs_free
);
1463 static void zs_object_copy(struct size_class
*class, unsigned long dst
,
1466 struct page
*s_page
, *d_page
;
1467 unsigned int s_objidx
, d_objidx
;
1468 unsigned long s_off
, d_off
;
1469 void *s_addr
, *d_addr
;
1470 int s_size
, d_size
, size
;
1473 s_size
= d_size
= class->size
;
1475 obj_to_location(src
, &s_page
, &s_objidx
);
1476 obj_to_location(dst
, &d_page
, &d_objidx
);
1478 s_off
= offset_in_page(class->size
* s_objidx
);
1479 d_off
= offset_in_page(class->size
* d_objidx
);
1481 if (s_off
+ class->size
> PAGE_SIZE
)
1482 s_size
= PAGE_SIZE
- s_off
;
1484 if (d_off
+ class->size
> PAGE_SIZE
)
1485 d_size
= PAGE_SIZE
- d_off
;
1487 s_addr
= kmap_local_page(s_page
);
1488 d_addr
= kmap_local_page(d_page
);
1491 size
= min(s_size
, d_size
);
1492 memcpy(d_addr
+ d_off
, s_addr
+ s_off
, size
);
1495 if (written
== class->size
)
1504 * Calling kunmap_local(d_addr) is necessary. kunmap_local()
1505 * calls must occurs in reverse order of calls to kmap_local_page().
1506 * So, to call kunmap_local(s_addr) we should first call
1507 * kunmap_local(d_addr). For more details see
1508 * Documentation/mm/highmem.rst.
1510 if (s_off
>= PAGE_SIZE
) {
1511 kunmap_local(d_addr
);
1512 kunmap_local(s_addr
);
1513 s_page
= get_next_page(s_page
);
1514 s_addr
= kmap_local_page(s_page
);
1515 d_addr
= kmap_local_page(d_page
);
1516 s_size
= class->size
- written
;
1520 if (d_off
>= PAGE_SIZE
) {
1521 kunmap_local(d_addr
);
1522 d_page
= get_next_page(d_page
);
1523 d_addr
= kmap_local_page(d_page
);
1524 d_size
= class->size
- written
;
1529 kunmap_local(d_addr
);
1530 kunmap_local(s_addr
);
1534 * Find alloced object in zspage from index object and
1537 static unsigned long find_alloced_obj(struct size_class
*class,
1538 struct page
*page
, int *obj_idx
)
1540 unsigned int offset
;
1541 int index
= *obj_idx
;
1542 unsigned long handle
= 0;
1543 void *addr
= kmap_local_page(page
);
1545 offset
= get_first_obj_offset(page
);
1546 offset
+= class->size
* index
;
1548 while (offset
< PAGE_SIZE
) {
1549 if (obj_allocated(page
, addr
+ offset
, &handle
))
1552 offset
+= class->size
;
1563 static void migrate_zspage(struct zs_pool
*pool
, struct zspage
*src_zspage
,
1564 struct zspage
*dst_zspage
)
1566 unsigned long used_obj
, free_obj
;
1567 unsigned long handle
;
1569 struct page
*s_page
= get_first_page(src_zspage
);
1570 struct size_class
*class = pool
->size_class
[src_zspage
->class];
1573 handle
= find_alloced_obj(class, s_page
, &obj_idx
);
1575 s_page
= get_next_page(s_page
);
1582 used_obj
= handle_to_obj(handle
);
1583 free_obj
= obj_malloc(pool
, dst_zspage
, handle
);
1584 zs_object_copy(class, free_obj
, used_obj
);
1586 obj_free(class->size
, used_obj
);
1588 /* Stop if there is no more space */
1589 if (zspage_full(class, dst_zspage
))
1592 /* Stop if there are no more objects to migrate */
1593 if (zspage_empty(src_zspage
))
1598 static struct zspage
*isolate_src_zspage(struct size_class
*class)
1600 struct zspage
*zspage
;
1603 for (fg
= ZS_INUSE_RATIO_10
; fg
<= ZS_INUSE_RATIO_99
; fg
++) {
1604 zspage
= list_first_entry_or_null(&class->fullness_list
[fg
],
1605 struct zspage
, list
);
1607 remove_zspage(class, zspage
);
1615 static struct zspage
*isolate_dst_zspage(struct size_class
*class)
1617 struct zspage
*zspage
;
1620 for (fg
= ZS_INUSE_RATIO_99
; fg
>= ZS_INUSE_RATIO_10
; fg
--) {
1621 zspage
= list_first_entry_or_null(&class->fullness_list
[fg
],
1622 struct zspage
, list
);
1624 remove_zspage(class, zspage
);
1633 * putback_zspage - add @zspage into right class's fullness list
1634 * @class: destination class
1635 * @zspage: target page
1637 * Return @zspage's fullness status
1639 static int putback_zspage(struct size_class
*class, struct zspage
*zspage
)
1643 fullness
= get_fullness_group(class, zspage
);
1644 insert_zspage(class, zspage
, fullness
);
1649 #ifdef CONFIG_COMPACTION
1651 * To prevent zspage destroy during migration, zspage freeing should
1652 * hold locks of all pages in the zspage.
1654 static void lock_zspage(struct zspage
*zspage
)
1656 struct page
*curr_page
, *page
;
1659 * Pages we haven't locked yet can be migrated off the list while we're
1660 * trying to lock them, so we need to be careful and only attempt to
1661 * lock each page under migrate_read_lock(). Otherwise, the page we lock
1662 * may no longer belong to the zspage. This means that we may wait for
1663 * the wrong page to unlock, so we must take a reference to the page
1664 * prior to waiting for it to unlock outside migrate_read_lock().
1667 migrate_read_lock(zspage
);
1668 page
= get_first_page(zspage
);
1669 if (trylock_page(page
))
1672 migrate_read_unlock(zspage
);
1673 wait_on_page_locked(page
);
1678 while ((page
= get_next_page(curr_page
))) {
1679 if (trylock_page(page
)) {
1683 migrate_read_unlock(zspage
);
1684 wait_on_page_locked(page
);
1686 migrate_read_lock(zspage
);
1689 migrate_read_unlock(zspage
);
1691 #endif /* CONFIG_COMPACTION */
1693 static void migrate_lock_init(struct zspage
*zspage
)
1695 rwlock_init(&zspage
->lock
);
1698 static void migrate_read_lock(struct zspage
*zspage
) __acquires(&zspage
->lock
)
1700 read_lock(&zspage
->lock
);
1703 static void migrate_read_unlock(struct zspage
*zspage
) __releases(&zspage
->lock
)
1705 read_unlock(&zspage
->lock
);
1708 static void migrate_write_lock(struct zspage
*zspage
)
1710 write_lock(&zspage
->lock
);
1713 static void migrate_write_unlock(struct zspage
*zspage
)
1715 write_unlock(&zspage
->lock
);
1718 #ifdef CONFIG_COMPACTION
1720 static const struct movable_operations zsmalloc_mops
;
1722 static void replace_sub_page(struct size_class
*class, struct zspage
*zspage
,
1723 struct page
*newpage
, struct page
*oldpage
)
1726 struct page
*pages
[ZS_MAX_PAGES_PER_ZSPAGE
] = {NULL
, };
1729 page
= get_first_page(zspage
);
1731 if (page
== oldpage
)
1732 pages
[idx
] = newpage
;
1736 } while ((page
= get_next_page(page
)) != NULL
);
1738 create_page_chain(class, zspage
, pages
);
1739 set_first_obj_offset(newpage
, get_first_obj_offset(oldpage
));
1740 if (unlikely(ZsHugePage(zspage
)))
1741 newpage
->index
= oldpage
->index
;
1742 __SetPageMovable(newpage
, &zsmalloc_mops
);
1745 static bool zs_page_isolate(struct page
*page
, isolate_mode_t mode
)
1748 * Page is locked so zspage couldn't be destroyed. For detail, look at
1749 * lock_zspage in free_zspage.
1751 VM_BUG_ON_PAGE(PageIsolated(page
), page
);
1756 static int zs_page_migrate(struct page
*newpage
, struct page
*page
,
1757 enum migrate_mode mode
)
1759 struct zs_pool
*pool
;
1760 struct size_class
*class;
1761 struct zspage
*zspage
;
1763 void *s_addr
, *d_addr
, *addr
;
1764 unsigned int offset
;
1765 unsigned long handle
;
1766 unsigned long old_obj
, new_obj
;
1767 unsigned int obj_idx
;
1769 VM_BUG_ON_PAGE(!PageIsolated(page
), page
);
1771 /* We're committed, tell the world that this is a Zsmalloc page. */
1772 __SetPageZsmalloc(newpage
);
1774 /* The page is locked, so this pointer must remain valid */
1775 zspage
= get_zspage(page
);
1776 pool
= zspage
->pool
;
1779 * The pool migrate_lock protects the race between zpage migration
1782 write_lock(&pool
->migrate_lock
);
1783 class = zspage_class(pool
, zspage
);
1786 * the class lock protects zpage alloc/free in the zspage.
1788 spin_lock(&class->lock
);
1789 /* the migrate_write_lock protects zpage access via zs_map_object */
1790 migrate_write_lock(zspage
);
1792 offset
= get_first_obj_offset(page
);
1793 s_addr
= kmap_local_page(page
);
1796 * Here, any user cannot access all objects in the zspage so let's move.
1798 d_addr
= kmap_local_page(newpage
);
1799 copy_page(d_addr
, s_addr
);
1800 kunmap_local(d_addr
);
1802 for (addr
= s_addr
+ offset
; addr
< s_addr
+ PAGE_SIZE
;
1803 addr
+= class->size
) {
1804 if (obj_allocated(page
, addr
, &handle
)) {
1806 old_obj
= handle_to_obj(handle
);
1807 obj_to_location(old_obj
, &dummy
, &obj_idx
);
1808 new_obj
= (unsigned long)location_to_obj(newpage
,
1810 record_obj(handle
, new_obj
);
1813 kunmap_local(s_addr
);
1815 replace_sub_page(class, zspage
, newpage
, page
);
1817 * Since we complete the data copy and set up new zspage structure,
1818 * it's okay to release migration_lock.
1820 write_unlock(&pool
->migrate_lock
);
1821 spin_unlock(&class->lock
);
1822 migrate_write_unlock(zspage
);
1825 if (page_zone(newpage
) != page_zone(page
)) {
1826 dec_zone_page_state(page
, NR_ZSPAGES
);
1827 inc_zone_page_state(newpage
, NR_ZSPAGES
);
1833 return MIGRATEPAGE_SUCCESS
;
1836 static void zs_page_putback(struct page
*page
)
1838 VM_BUG_ON_PAGE(!PageIsolated(page
), page
);
1841 static const struct movable_operations zsmalloc_mops
= {
1842 .isolate_page
= zs_page_isolate
,
1843 .migrate_page
= zs_page_migrate
,
1844 .putback_page
= zs_page_putback
,
1848 * Caller should hold page_lock of all pages in the zspage
1849 * In here, we cannot use zspage meta data.
1851 static void async_free_zspage(struct work_struct
*work
)
1854 struct size_class
*class;
1855 struct zspage
*zspage
, *tmp
;
1856 LIST_HEAD(free_pages
);
1857 struct zs_pool
*pool
= container_of(work
, struct zs_pool
,
1860 for (i
= 0; i
< ZS_SIZE_CLASSES
; i
++) {
1861 class = pool
->size_class
[i
];
1862 if (class->index
!= i
)
1865 spin_lock(&class->lock
);
1866 list_splice_init(&class->fullness_list
[ZS_INUSE_RATIO_0
],
1868 spin_unlock(&class->lock
);
1871 list_for_each_entry_safe(zspage
, tmp
, &free_pages
, list
) {
1872 list_del(&zspage
->list
);
1873 lock_zspage(zspage
);
1875 class = zspage_class(pool
, zspage
);
1876 spin_lock(&class->lock
);
1877 class_stat_sub(class, ZS_INUSE_RATIO_0
, 1);
1878 __free_zspage(pool
, class, zspage
);
1879 spin_unlock(&class->lock
);
1883 static void kick_deferred_free(struct zs_pool
*pool
)
1885 schedule_work(&pool
->free_work
);
1888 static void zs_flush_migration(struct zs_pool
*pool
)
1890 flush_work(&pool
->free_work
);
1893 static void init_deferred_free(struct zs_pool
*pool
)
1895 INIT_WORK(&pool
->free_work
, async_free_zspage
);
1898 static void SetZsPageMovable(struct zs_pool
*pool
, struct zspage
*zspage
)
1900 struct page
*page
= get_first_page(zspage
);
1903 WARN_ON(!trylock_page(page
));
1904 __SetPageMovable(page
, &zsmalloc_mops
);
1906 } while ((page
= get_next_page(page
)) != NULL
);
1909 static inline void zs_flush_migration(struct zs_pool
*pool
) { }
1914 * Based on the number of unused allocated objects calculate
1915 * and return the number of pages that we can free.
1917 static unsigned long zs_can_compact(struct size_class
*class)
1919 unsigned long obj_wasted
;
1920 unsigned long obj_allocated
= class_stat_read(class, ZS_OBJS_ALLOCATED
);
1921 unsigned long obj_used
= class_stat_read(class, ZS_OBJS_INUSE
);
1923 if (obj_allocated
<= obj_used
)
1926 obj_wasted
= obj_allocated
- obj_used
;
1927 obj_wasted
/= class->objs_per_zspage
;
1929 return obj_wasted
* class->pages_per_zspage
;
1932 static unsigned long __zs_compact(struct zs_pool
*pool
,
1933 struct size_class
*class)
1935 struct zspage
*src_zspage
= NULL
;
1936 struct zspage
*dst_zspage
= NULL
;
1937 unsigned long pages_freed
= 0;
1940 * protect the race between zpage migration and zs_free
1941 * as well as zpage allocation/free
1943 write_lock(&pool
->migrate_lock
);
1944 spin_lock(&class->lock
);
1945 while (zs_can_compact(class)) {
1949 dst_zspage
= isolate_dst_zspage(class);
1954 src_zspage
= isolate_src_zspage(class);
1958 migrate_write_lock(src_zspage
);
1959 migrate_zspage(pool
, src_zspage
, dst_zspage
);
1960 migrate_write_unlock(src_zspage
);
1962 fg
= putback_zspage(class, src_zspage
);
1963 if (fg
== ZS_INUSE_RATIO_0
) {
1964 free_zspage(pool
, class, src_zspage
);
1965 pages_freed
+= class->pages_per_zspage
;
1969 if (get_fullness_group(class, dst_zspage
) == ZS_INUSE_RATIO_100
1970 || rwlock_is_contended(&pool
->migrate_lock
)) {
1971 putback_zspage(class, dst_zspage
);
1974 spin_unlock(&class->lock
);
1975 write_unlock(&pool
->migrate_lock
);
1977 write_lock(&pool
->migrate_lock
);
1978 spin_lock(&class->lock
);
1983 putback_zspage(class, src_zspage
);
1986 putback_zspage(class, dst_zspage
);
1988 spin_unlock(&class->lock
);
1989 write_unlock(&pool
->migrate_lock
);
1994 unsigned long zs_compact(struct zs_pool
*pool
)
1997 struct size_class
*class;
1998 unsigned long pages_freed
= 0;
2001 * Pool compaction is performed under pool->migrate_lock so it is basically
2002 * single-threaded. Having more than one thread in __zs_compact()
2003 * will increase pool->migrate_lock contention, which will impact other
2004 * zsmalloc operations that need pool->migrate_lock.
2006 if (atomic_xchg(&pool
->compaction_in_progress
, 1))
2009 for (i
= ZS_SIZE_CLASSES
- 1; i
>= 0; i
--) {
2010 class = pool
->size_class
[i
];
2011 if (class->index
!= i
)
2013 pages_freed
+= __zs_compact(pool
, class);
2015 atomic_long_add(pages_freed
, &pool
->stats
.pages_compacted
);
2016 atomic_set(&pool
->compaction_in_progress
, 0);
2020 EXPORT_SYMBOL_GPL(zs_compact
);
2022 void zs_pool_stats(struct zs_pool
*pool
, struct zs_pool_stats
*stats
)
2024 memcpy(stats
, &pool
->stats
, sizeof(struct zs_pool_stats
));
2026 EXPORT_SYMBOL_GPL(zs_pool_stats
);
2028 static unsigned long zs_shrinker_scan(struct shrinker
*shrinker
,
2029 struct shrink_control
*sc
)
2031 unsigned long pages_freed
;
2032 struct zs_pool
*pool
= shrinker
->private_data
;
2035 * Compact classes and calculate compaction delta.
2036 * Can run concurrently with a manually triggered
2037 * (by user) compaction.
2039 pages_freed
= zs_compact(pool
);
2041 return pages_freed
? pages_freed
: SHRINK_STOP
;
2044 static unsigned long zs_shrinker_count(struct shrinker
*shrinker
,
2045 struct shrink_control
*sc
)
2048 struct size_class
*class;
2049 unsigned long pages_to_free
= 0;
2050 struct zs_pool
*pool
= shrinker
->private_data
;
2052 for (i
= ZS_SIZE_CLASSES
- 1; i
>= 0; i
--) {
2053 class = pool
->size_class
[i
];
2054 if (class->index
!= i
)
2057 pages_to_free
+= zs_can_compact(class);
2060 return pages_to_free
;
2063 static void zs_unregister_shrinker(struct zs_pool
*pool
)
2065 shrinker_free(pool
->shrinker
);
2068 static int zs_register_shrinker(struct zs_pool
*pool
)
2070 pool
->shrinker
= shrinker_alloc(0, "mm-zspool:%s", pool
->name
);
2071 if (!pool
->shrinker
)
2074 pool
->shrinker
->scan_objects
= zs_shrinker_scan
;
2075 pool
->shrinker
->count_objects
= zs_shrinker_count
;
2076 pool
->shrinker
->batch
= 0;
2077 pool
->shrinker
->private_data
= pool
;
2079 shrinker_register(pool
->shrinker
);
2084 static int calculate_zspage_chain_size(int class_size
)
2086 int i
, min_waste
= INT_MAX
;
2089 if (is_power_of_2(class_size
))
2092 for (i
= 1; i
<= ZS_MAX_PAGES_PER_ZSPAGE
; i
++) {
2095 waste
= (i
* PAGE_SIZE
) % class_size
;
2096 if (waste
< min_waste
) {
2106 * zs_create_pool - Creates an allocation pool to work from.
2107 * @name: pool name to be created
2109 * This function must be called before anything when using
2110 * the zsmalloc allocator.
2112 * On success, a pointer to the newly created pool is returned,
2115 struct zs_pool
*zs_create_pool(const char *name
)
2118 struct zs_pool
*pool
;
2119 struct size_class
*prev_class
= NULL
;
2121 pool
= kzalloc(sizeof(*pool
), GFP_KERNEL
);
2125 init_deferred_free(pool
);
2126 rwlock_init(&pool
->migrate_lock
);
2127 atomic_set(&pool
->compaction_in_progress
, 0);
2129 pool
->name
= kstrdup(name
, GFP_KERNEL
);
2133 if (create_cache(pool
))
2137 * Iterate reversely, because, size of size_class that we want to use
2138 * for merging should be larger or equal to current size.
2140 for (i
= ZS_SIZE_CLASSES
- 1; i
>= 0; i
--) {
2142 int pages_per_zspage
;
2143 int objs_per_zspage
;
2144 struct size_class
*class;
2147 size
= ZS_MIN_ALLOC_SIZE
+ i
* ZS_SIZE_CLASS_DELTA
;
2148 if (size
> ZS_MAX_ALLOC_SIZE
)
2149 size
= ZS_MAX_ALLOC_SIZE
;
2150 pages_per_zspage
= calculate_zspage_chain_size(size
);
2151 objs_per_zspage
= pages_per_zspage
* PAGE_SIZE
/ size
;
2154 * We iterate from biggest down to smallest classes,
2155 * so huge_class_size holds the size of the first huge
2156 * class. Any object bigger than or equal to that will
2157 * endup in the huge class.
2159 if (pages_per_zspage
!= 1 && objs_per_zspage
!= 1 &&
2161 huge_class_size
= size
;
2163 * The object uses ZS_HANDLE_SIZE bytes to store the
2164 * handle. We need to subtract it, because zs_malloc()
2165 * unconditionally adds handle size before it performs
2166 * size class search - so object may be smaller than
2167 * huge class size, yet it still can end up in the huge
2168 * class because it grows by ZS_HANDLE_SIZE extra bytes
2169 * right before class lookup.
2171 huge_class_size
-= (ZS_HANDLE_SIZE
- 1);
2175 * size_class is used for normal zsmalloc operation such
2176 * as alloc/free for that size. Although it is natural that we
2177 * have one size_class for each size, there is a chance that we
2178 * can get more memory utilization if we use one size_class for
2179 * many different sizes whose size_class have same
2180 * characteristics. So, we makes size_class point to
2181 * previous size_class if possible.
2184 if (can_merge(prev_class
, pages_per_zspage
, objs_per_zspage
)) {
2185 pool
->size_class
[i
] = prev_class
;
2190 class = kzalloc(sizeof(struct size_class
), GFP_KERNEL
);
2196 class->pages_per_zspage
= pages_per_zspage
;
2197 class->objs_per_zspage
= objs_per_zspage
;
2198 spin_lock_init(&class->lock
);
2199 pool
->size_class
[i
] = class;
2201 fullness
= ZS_INUSE_RATIO_0
;
2202 while (fullness
< NR_FULLNESS_GROUPS
) {
2203 INIT_LIST_HEAD(&class->fullness_list
[fullness
]);
2210 /* debug only, don't abort if it fails */
2211 zs_pool_stat_create(pool
, name
);
2214 * Not critical since shrinker is only used to trigger internal
2215 * defragmentation of the pool which is pretty optional thing. If
2216 * registration fails we still can use the pool normally and user can
2217 * trigger compaction manually. Thus, ignore return code.
2219 zs_register_shrinker(pool
);
2224 zs_destroy_pool(pool
);
2227 EXPORT_SYMBOL_GPL(zs_create_pool
);
2229 void zs_destroy_pool(struct zs_pool
*pool
)
2233 zs_unregister_shrinker(pool
);
2234 zs_flush_migration(pool
);
2235 zs_pool_stat_destroy(pool
);
2237 for (i
= 0; i
< ZS_SIZE_CLASSES
; i
++) {
2239 struct size_class
*class = pool
->size_class
[i
];
2244 if (class->index
!= i
)
2247 for (fg
= ZS_INUSE_RATIO_0
; fg
< NR_FULLNESS_GROUPS
; fg
++) {
2248 if (list_empty(&class->fullness_list
[fg
]))
2251 pr_err("Class-%d fullness group %d is not empty\n",
2257 destroy_cache(pool
);
2261 EXPORT_SYMBOL_GPL(zs_destroy_pool
);
2263 static int __init
zs_init(void)
2267 ret
= cpuhp_setup_state(CPUHP_MM_ZS_PREPARE
, "mm/zsmalloc:prepare",
2268 zs_cpu_prepare
, zs_cpu_dead
);
2273 zpool_register_driver(&zs_zpool_driver
);
2284 static void __exit
zs_exit(void)
2287 zpool_unregister_driver(&zs_zpool_driver
);
2289 cpuhp_remove_state(CPUHP_MM_ZS_PREPARE
);
2294 module_init(zs_init
);
2295 module_exit(zs_exit
);
2297 MODULE_LICENSE("Dual BSD/GPL");
2298 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
2299 MODULE_DESCRIPTION("zsmalloc memory allocator");