1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Page fragment allocator
5 * An arbitrary-length arbitrary-offset area of memory which resides within a
6 * 0 or higher order page. Multiple fragments within that page are
7 * individually refcounted, in the page's reference counter.
9 * The page_frag functions provide a simple allocation framework for page
10 * fragments. This is used by the network stack and network device drivers to
11 * provide a backing region of memory for use as either an sk_buff->head, or to
12 * be used in the "frags" portion of skb_shared_info.
15 #include <linux/build_bug.h>
16 #include <linux/export.h>
17 #include <linux/gfp_types.h>
18 #include <linux/init.h>
20 #include <linux/page_frag_cache.h>
23 static unsigned long encoded_page_create(struct page
*page
, unsigned int order
,
26 BUILD_BUG_ON(PAGE_FRAG_CACHE_MAX_ORDER
> PAGE_FRAG_CACHE_ORDER_MASK
);
27 BUILD_BUG_ON(PAGE_FRAG_CACHE_PFMEMALLOC_BIT
>= PAGE_SIZE
);
29 return (unsigned long)page_address(page
) |
30 (order
& PAGE_FRAG_CACHE_ORDER_MASK
) |
31 ((unsigned long)pfmemalloc
* PAGE_FRAG_CACHE_PFMEMALLOC_BIT
);
34 static unsigned long encoded_page_decode_order(unsigned long encoded_page
)
36 return encoded_page
& PAGE_FRAG_CACHE_ORDER_MASK
;
39 static void *encoded_page_decode_virt(unsigned long encoded_page
)
41 return (void *)(encoded_page
& PAGE_MASK
);
44 static struct page
*encoded_page_decode_page(unsigned long encoded_page
)
46 return virt_to_page((void *)encoded_page
);
49 static struct page
*__page_frag_cache_refill(struct page_frag_cache
*nc
,
52 unsigned long order
= PAGE_FRAG_CACHE_MAX_ORDER
;
53 struct page
*page
= NULL
;
56 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
57 gfp_mask
= (gfp_mask
& ~__GFP_DIRECT_RECLAIM
) | __GFP_COMP
|
58 __GFP_NOWARN
| __GFP_NORETRY
| __GFP_NOMEMALLOC
;
59 page
= __alloc_pages(gfp_mask
, PAGE_FRAG_CACHE_MAX_ORDER
,
62 if (unlikely(!page
)) {
63 page
= __alloc_pages(gfp
, 0, numa_mem_id(), NULL
);
67 nc
->encoded_page
= page
?
68 encoded_page_create(page
, order
, page_is_pfmemalloc(page
)) : 0;
73 void page_frag_cache_drain(struct page_frag_cache
*nc
)
75 if (!nc
->encoded_page
)
78 __page_frag_cache_drain(encoded_page_decode_page(nc
->encoded_page
),
82 EXPORT_SYMBOL(page_frag_cache_drain
);
84 void __page_frag_cache_drain(struct page
*page
, unsigned int count
)
86 VM_BUG_ON_PAGE(page_ref_count(page
) == 0, page
);
88 if (page_ref_sub_and_test(page
, count
))
89 free_unref_page(page
, compound_order(page
));
91 EXPORT_SYMBOL(__page_frag_cache_drain
);
93 void *__page_frag_alloc_align(struct page_frag_cache
*nc
,
94 unsigned int fragsz
, gfp_t gfp_mask
,
95 unsigned int align_mask
)
97 unsigned long encoded_page
= nc
->encoded_page
;
98 unsigned int size
, offset
;
101 if (unlikely(!encoded_page
)) {
103 page
= __page_frag_cache_refill(nc
, gfp_mask
);
107 encoded_page
= nc
->encoded_page
;
109 /* Even if we own the page, we do not use atomic_set().
110 * This would break get_page_unless_zero() users.
112 page_ref_add(page
, PAGE_FRAG_CACHE_MAX_SIZE
);
114 /* reset page count bias and offset to start of new frag */
115 nc
->pagecnt_bias
= PAGE_FRAG_CACHE_MAX_SIZE
+ 1;
119 size
= PAGE_SIZE
<< encoded_page_decode_order(encoded_page
);
120 offset
= __ALIGN_KERNEL_MASK(nc
->offset
, ~align_mask
);
121 if (unlikely(offset
+ fragsz
> size
)) {
122 if (unlikely(fragsz
> PAGE_SIZE
)) {
124 * The caller is trying to allocate a fragment
125 * with fragsz > PAGE_SIZE but the cache isn't big
126 * enough to satisfy the request, this may
127 * happen in low memory conditions.
128 * We don't release the cache page because
129 * it could make memory pressure worse
130 * so we simply return NULL here.
135 page
= encoded_page_decode_page(encoded_page
);
137 if (!page_ref_sub_and_test(page
, nc
->pagecnt_bias
))
140 if (unlikely(encoded_page_decode_pfmemalloc(encoded_page
))) {
141 free_unref_page(page
,
142 encoded_page_decode_order(encoded_page
));
146 /* OK, page count is 0, we can safely set it */
147 set_page_count(page
, PAGE_FRAG_CACHE_MAX_SIZE
+ 1);
149 /* reset page count bias and offset to start of new frag */
150 nc
->pagecnt_bias
= PAGE_FRAG_CACHE_MAX_SIZE
+ 1;
155 nc
->offset
= offset
+ fragsz
;
157 return encoded_page_decode_virt(encoded_page
) + offset
;
159 EXPORT_SYMBOL(__page_frag_alloc_align
);
162 * Frees a page fragment allocated out of either a compound or order 0 page.
164 void page_frag_free(void *addr
)
166 struct page
*page
= virt_to_head_page(addr
);
168 if (unlikely(put_page_testzero(page
)))
169 free_unref_page(page
, compound_order(page
));
171 EXPORT_SYMBOL(page_frag_free
);