1 /* SPDX-License-Identifier: GPL-2.0
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
8 #include <linux/error-injection.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
14 #include <net/netdev_rx_queue.h>
15 #include <net/page_pool/helpers.h>
18 #include <linux/dma-direction.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/page-flags.h>
21 #include <linux/mm.h> /* for put_page() */
22 #include <linux/poison.h>
23 #include <linux/ethtool.h>
24 #include <linux/netdevice.h>
26 #include <trace/events/page_pool.h>
28 #include "mp_dmabuf_devmem.h"
29 #include "netmem_priv.h"
30 #include "page_pool_priv.h"
32 DEFINE_STATIC_KEY_FALSE(page_pool_mem_providers
);
34 #define DEFER_TIME (msecs_to_jiffies(1000))
35 #define DEFER_WARN_INTERVAL (60 * HZ)
37 #define BIAS_MAX (LONG_MAX >> 1)
39 #ifdef CONFIG_PAGE_POOL_STATS
40 static DEFINE_PER_CPU(struct page_pool_recycle_stats
, pp_system_recycle_stats
);
42 /* alloc_stat_inc is intended to be used in softirq context */
43 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
44 /* recycle_stat_inc is safe to use when preemption is possible. */
45 #define recycle_stat_inc(pool, __stat) \
47 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
48 this_cpu_inc(s->__stat); \
51 #define recycle_stat_add(pool, __stat, val) \
53 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
54 this_cpu_add(s->__stat, val); \
57 static const char pp_stats
[][ETH_GSTRING_LEN
] = {
60 "rx_pp_alloc_slow_ho",
64 "rx_pp_recycle_cached",
65 "rx_pp_recycle_cache_full",
67 "rx_pp_recycle_ring_full",
68 "rx_pp_recycle_released_ref",
72 * page_pool_get_stats() - fetch page pool stats
73 * @pool: pool from which page was allocated
74 * @stats: struct page_pool_stats to fill in
76 * Retrieve statistics about the page_pool. This API is only available
77 * if the kernel has been configured with ``CONFIG_PAGE_POOL_STATS=y``.
78 * A pointer to a caller allocated struct page_pool_stats structure
79 * is passed to this API which is filled in. The caller can then report
80 * those stats to the user (perhaps via ethtool, debugfs, etc.).
82 bool page_pool_get_stats(const struct page_pool
*pool
,
83 struct page_pool_stats
*stats
)
90 /* The caller is responsible to initialize stats. */
91 stats
->alloc_stats
.fast
+= pool
->alloc_stats
.fast
;
92 stats
->alloc_stats
.slow
+= pool
->alloc_stats
.slow
;
93 stats
->alloc_stats
.slow_high_order
+= pool
->alloc_stats
.slow_high_order
;
94 stats
->alloc_stats
.empty
+= pool
->alloc_stats
.empty
;
95 stats
->alloc_stats
.refill
+= pool
->alloc_stats
.refill
;
96 stats
->alloc_stats
.waive
+= pool
->alloc_stats
.waive
;
98 for_each_possible_cpu(cpu
) {
99 const struct page_pool_recycle_stats
*pcpu
=
100 per_cpu_ptr(pool
->recycle_stats
, cpu
);
102 stats
->recycle_stats
.cached
+= pcpu
->cached
;
103 stats
->recycle_stats
.cache_full
+= pcpu
->cache_full
;
104 stats
->recycle_stats
.ring
+= pcpu
->ring
;
105 stats
->recycle_stats
.ring_full
+= pcpu
->ring_full
;
106 stats
->recycle_stats
.released_refcnt
+= pcpu
->released_refcnt
;
111 EXPORT_SYMBOL(page_pool_get_stats
);
113 u8
*page_pool_ethtool_stats_get_strings(u8
*data
)
117 for (i
= 0; i
< ARRAY_SIZE(pp_stats
); i
++) {
118 memcpy(data
, pp_stats
[i
], ETH_GSTRING_LEN
);
119 data
+= ETH_GSTRING_LEN
;
124 EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings
);
126 int page_pool_ethtool_stats_get_count(void)
128 return ARRAY_SIZE(pp_stats
);
130 EXPORT_SYMBOL(page_pool_ethtool_stats_get_count
);
132 u64
*page_pool_ethtool_stats_get(u64
*data
, const void *stats
)
134 const struct page_pool_stats
*pool_stats
= stats
;
136 *data
++ = pool_stats
->alloc_stats
.fast
;
137 *data
++ = pool_stats
->alloc_stats
.slow
;
138 *data
++ = pool_stats
->alloc_stats
.slow_high_order
;
139 *data
++ = pool_stats
->alloc_stats
.empty
;
140 *data
++ = pool_stats
->alloc_stats
.refill
;
141 *data
++ = pool_stats
->alloc_stats
.waive
;
142 *data
++ = pool_stats
->recycle_stats
.cached
;
143 *data
++ = pool_stats
->recycle_stats
.cache_full
;
144 *data
++ = pool_stats
->recycle_stats
.ring
;
145 *data
++ = pool_stats
->recycle_stats
.ring_full
;
146 *data
++ = pool_stats
->recycle_stats
.released_refcnt
;
150 EXPORT_SYMBOL(page_pool_ethtool_stats_get
);
153 #define alloc_stat_inc(pool, __stat)
154 #define recycle_stat_inc(pool, __stat)
155 #define recycle_stat_add(pool, __stat, val)
158 static bool page_pool_producer_lock(struct page_pool
*pool
)
159 __acquires(&pool
->ring
.producer_lock
)
161 bool in_softirq
= in_softirq();
164 spin_lock(&pool
->ring
.producer_lock
);
166 spin_lock_bh(&pool
->ring
.producer_lock
);
171 static void page_pool_producer_unlock(struct page_pool
*pool
,
173 __releases(&pool
->ring
.producer_lock
)
176 spin_unlock(&pool
->ring
.producer_lock
);
178 spin_unlock_bh(&pool
->ring
.producer_lock
);
181 static void page_pool_struct_check(void)
183 CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool
, frag
, frag_users
);
184 CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool
, frag
, frag_page
);
185 CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool
, frag
, frag_offset
);
186 CACHELINE_ASSERT_GROUP_SIZE(struct page_pool
, frag
,
187 PAGE_POOL_FRAG_GROUP_ALIGN
);
190 static int page_pool_init(struct page_pool
*pool
,
191 const struct page_pool_params
*params
,
194 unsigned int ring_qsize
= 1024; /* Default */
195 struct netdev_rx_queue
*rxq
;
198 page_pool_struct_check();
200 memcpy(&pool
->p
, ¶ms
->fast
, sizeof(pool
->p
));
201 memcpy(&pool
->slow
, ¶ms
->slow
, sizeof(pool
->slow
));
204 pool
->dma_sync_for_cpu
= true;
206 /* Validate only known flags were used */
207 if (pool
->slow
.flags
& ~PP_FLAG_ALL
)
210 if (pool
->p
.pool_size
)
211 ring_qsize
= pool
->p
.pool_size
;
213 /* Sanity limit mem that can be pinned down */
214 if (ring_qsize
> 32768)
217 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
218 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
219 * which is the XDP_TX use-case.
221 if (pool
->slow
.flags
& PP_FLAG_DMA_MAP
) {
222 if ((pool
->p
.dma_dir
!= DMA_FROM_DEVICE
) &&
223 (pool
->p
.dma_dir
!= DMA_BIDIRECTIONAL
))
226 pool
->dma_map
= true;
229 if (pool
->slow
.flags
& PP_FLAG_DMA_SYNC_DEV
) {
230 /* In order to request DMA-sync-for-device the page
233 if (!(pool
->slow
.flags
& PP_FLAG_DMA_MAP
))
236 if (!pool
->p
.max_len
)
239 pool
->dma_sync
= true;
241 /* pool->p.offset has to be set according to the address
242 * offset used by the DMA engine to start copying rx data
246 pool
->has_init_callback
= !!pool
->slow
.init_callback
;
248 #ifdef CONFIG_PAGE_POOL_STATS
249 if (!(pool
->slow
.flags
& PP_FLAG_SYSTEM_POOL
)) {
250 pool
->recycle_stats
= alloc_percpu(struct page_pool_recycle_stats
);
251 if (!pool
->recycle_stats
)
254 /* For system page pool instance we use a singular stats object
255 * instead of allocating a separate percpu variable for each
256 * (also percpu) page pool instance.
258 pool
->recycle_stats
= &pp_system_recycle_stats
;
263 if (ptr_ring_init(&pool
->ring
, ring_qsize
, GFP_KERNEL
) < 0) {
264 #ifdef CONFIG_PAGE_POOL_STATS
266 free_percpu(pool
->recycle_stats
);
271 atomic_set(&pool
->pages_state_release_cnt
, 0);
273 /* Driver calling page_pool_create() also call page_pool_destroy() */
274 refcount_set(&pool
->user_cnt
, 1);
277 get_device(pool
->p
.dev
);
279 if (pool
->slow
.flags
& PP_FLAG_ALLOW_UNREADABLE_NETMEM
) {
280 /* We rely on rtnl_lock()ing to make sure netdev_rx_queue
281 * configuration doesn't change while we're initializing
285 rxq
= __netif_get_rx_queue(pool
->slow
.netdev
,
286 pool
->slow
.queue_idx
);
287 pool
->mp_priv
= rxq
->mp_params
.mp_priv
;
291 if (!pool
->dma_map
|| !pool
->dma_sync
)
294 err
= mp_dmabuf_devmem_init(pool
);
296 pr_warn("%s() mem-provider init failed %d\n", __func__
,
301 static_branch_inc(&page_pool_mem_providers
);
307 ptr_ring_cleanup(&pool
->ring
, NULL
);
308 #ifdef CONFIG_PAGE_POOL_STATS
310 free_percpu(pool
->recycle_stats
);
315 static void page_pool_uninit(struct page_pool
*pool
)
317 ptr_ring_cleanup(&pool
->ring
, NULL
);
320 put_device(pool
->p
.dev
);
322 #ifdef CONFIG_PAGE_POOL_STATS
324 free_percpu(pool
->recycle_stats
);
329 * page_pool_create_percpu() - create a page pool for a given cpu.
330 * @params: parameters, see struct page_pool_params
331 * @cpuid: cpu identifier
334 page_pool_create_percpu(const struct page_pool_params
*params
, int cpuid
)
336 struct page_pool
*pool
;
339 pool
= kzalloc_node(sizeof(*pool
), GFP_KERNEL
, params
->nid
);
341 return ERR_PTR(-ENOMEM
);
343 err
= page_pool_init(pool
, params
, cpuid
);
347 err
= page_pool_list(pool
);
354 page_pool_uninit(pool
);
356 pr_warn("%s() gave up with errno %d\n", __func__
, err
);
360 EXPORT_SYMBOL(page_pool_create_percpu
);
363 * page_pool_create() - create a page pool
364 * @params: parameters, see struct page_pool_params
366 struct page_pool
*page_pool_create(const struct page_pool_params
*params
)
368 return page_pool_create_percpu(params
, -1);
370 EXPORT_SYMBOL(page_pool_create
);
372 static void page_pool_return_page(struct page_pool
*pool
, netmem_ref netmem
);
374 static noinline netmem_ref
page_pool_refill_alloc_cache(struct page_pool
*pool
)
376 struct ptr_ring
*r
= &pool
->ring
;
378 int pref_nid
; /* preferred NUMA node */
380 /* Quicker fallback, avoid locks when ring is empty */
381 if (__ptr_ring_empty(r
)) {
382 alloc_stat_inc(pool
, empty
);
386 /* Softirq guarantee CPU and thus NUMA node is stable. This,
387 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
390 pref_nid
= (pool
->p
.nid
== NUMA_NO_NODE
) ? numa_mem_id() : pool
->p
.nid
;
392 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
393 pref_nid
= numa_mem_id(); /* will be zero like page_to_nid() */
396 /* Refill alloc array, but only if NUMA match */
398 netmem
= (__force netmem_ref
)__ptr_ring_consume(r
);
399 if (unlikely(!netmem
))
402 if (likely(netmem_is_pref_nid(netmem
, pref_nid
))) {
403 pool
->alloc
.cache
[pool
->alloc
.count
++] = netmem
;
406 * (1) release 1 page to page-allocator and
407 * (2) break out to fallthrough to alloc_pages_node.
408 * This limit stress on page buddy alloactor.
410 page_pool_return_page(pool
, netmem
);
411 alloc_stat_inc(pool
, waive
);
415 } while (pool
->alloc
.count
< PP_ALLOC_CACHE_REFILL
);
417 /* Return last page */
418 if (likely(pool
->alloc
.count
> 0)) {
419 netmem
= pool
->alloc
.cache
[--pool
->alloc
.count
];
420 alloc_stat_inc(pool
, refill
);
427 static netmem_ref
__page_pool_get_cached(struct page_pool
*pool
)
431 /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
432 if (likely(pool
->alloc
.count
)) {
434 netmem
= pool
->alloc
.cache
[--pool
->alloc
.count
];
435 alloc_stat_inc(pool
, fast
);
437 netmem
= page_pool_refill_alloc_cache(pool
);
443 static void __page_pool_dma_sync_for_device(const struct page_pool
*pool
,
447 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
448 dma_addr_t dma_addr
= page_pool_get_dma_addr_netmem(netmem
);
450 dma_sync_size
= min(dma_sync_size
, pool
->p
.max_len
);
451 __dma_sync_single_for_device(pool
->p
.dev
, dma_addr
+ pool
->p
.offset
,
452 dma_sync_size
, pool
->p
.dma_dir
);
456 static __always_inline
void
457 page_pool_dma_sync_for_device(const struct page_pool
*pool
,
461 if (pool
->dma_sync
&& dma_dev_need_sync(pool
->p
.dev
))
462 __page_pool_dma_sync_for_device(pool
, netmem
, dma_sync_size
);
465 static bool page_pool_dma_map(struct page_pool
*pool
, netmem_ref netmem
)
469 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
470 * since dma_addr_t can be either 32 or 64 bits and does not always fit
471 * into page private data (i.e 32bit cpu with 64bit DMA caps)
472 * This mapping is kept for lifetime of page, until leaving pool.
474 dma
= dma_map_page_attrs(pool
->p
.dev
, netmem_to_page(netmem
), 0,
475 (PAGE_SIZE
<< pool
->p
.order
), pool
->p
.dma_dir
,
476 DMA_ATTR_SKIP_CPU_SYNC
|
477 DMA_ATTR_WEAK_ORDERING
);
478 if (dma_mapping_error(pool
->p
.dev
, dma
))
481 if (page_pool_set_dma_addr_netmem(netmem
, dma
))
484 page_pool_dma_sync_for_device(pool
, netmem
, pool
->p
.max_len
);
489 WARN_ONCE(1, "unexpected DMA address, please report to netdev@");
490 dma_unmap_page_attrs(pool
->p
.dev
, dma
,
491 PAGE_SIZE
<< pool
->p
.order
, pool
->p
.dma_dir
,
492 DMA_ATTR_SKIP_CPU_SYNC
| DMA_ATTR_WEAK_ORDERING
);
496 static struct page
*__page_pool_alloc_page_order(struct page_pool
*pool
,
502 page
= alloc_pages_node(pool
->p
.nid
, gfp
, pool
->p
.order
);
506 if (pool
->dma_map
&& unlikely(!page_pool_dma_map(pool
, page_to_netmem(page
)))) {
511 alloc_stat_inc(pool
, slow_high_order
);
512 page_pool_set_pp_info(pool
, page_to_netmem(page
));
514 /* Track how many pages are held 'in-flight' */
515 pool
->pages_state_hold_cnt
++;
516 trace_page_pool_state_hold(pool
, page_to_netmem(page
),
517 pool
->pages_state_hold_cnt
);
522 static noinline netmem_ref
__page_pool_alloc_pages_slow(struct page_pool
*pool
,
525 const int bulk
= PP_ALLOC_CACHE_REFILL
;
526 unsigned int pp_order
= pool
->p
.order
;
527 bool dma_map
= pool
->dma_map
;
531 /* Don't support bulk alloc for high-order pages */
532 if (unlikely(pp_order
))
533 return page_to_netmem(__page_pool_alloc_page_order(pool
, gfp
));
535 /* Unnecessary as alloc cache is empty, but guarantees zero count */
536 if (unlikely(pool
->alloc
.count
> 0))
537 return pool
->alloc
.cache
[--pool
->alloc
.count
];
539 /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk */
540 memset(&pool
->alloc
.cache
, 0, sizeof(void *) * bulk
);
542 nr_pages
= alloc_pages_bulk_node(gfp
, pool
->p
.nid
, bulk
,
543 (struct page
**)pool
->alloc
.cache
);
544 if (unlikely(!nr_pages
))
547 /* Pages have been filled into alloc.cache array, but count is zero and
548 * page element have not been (possibly) DMA mapped.
550 for (i
= 0; i
< nr_pages
; i
++) {
551 netmem
= pool
->alloc
.cache
[i
];
552 if (dma_map
&& unlikely(!page_pool_dma_map(pool
, netmem
))) {
553 put_page(netmem_to_page(netmem
));
557 page_pool_set_pp_info(pool
, netmem
);
558 pool
->alloc
.cache
[pool
->alloc
.count
++] = netmem
;
559 /* Track how many pages are held 'in-flight' */
560 pool
->pages_state_hold_cnt
++;
561 trace_page_pool_state_hold(pool
, netmem
,
562 pool
->pages_state_hold_cnt
);
565 /* Return last page */
566 if (likely(pool
->alloc
.count
> 0)) {
567 netmem
= pool
->alloc
.cache
[--pool
->alloc
.count
];
568 alloc_stat_inc(pool
, slow
);
573 /* When page just alloc'ed is should/must have refcnt 1. */
577 /* For using page_pool replace: alloc_pages() API calls, but provide
578 * synchronization guarantee for allocation side.
580 netmem_ref
page_pool_alloc_netmems(struct page_pool
*pool
, gfp_t gfp
)
584 /* Fast-path: Get a page from cache */
585 netmem
= __page_pool_get_cached(pool
);
589 /* Slow-path: cache empty, do real allocation */
590 if (static_branch_unlikely(&page_pool_mem_providers
) && pool
->mp_priv
)
591 netmem
= mp_dmabuf_devmem_alloc_netmems(pool
, gfp
);
593 netmem
= __page_pool_alloc_pages_slow(pool
, gfp
);
596 EXPORT_SYMBOL(page_pool_alloc_netmems
);
597 ALLOW_ERROR_INJECTION(page_pool_alloc_netmems
, NULL
);
599 struct page
*page_pool_alloc_pages(struct page_pool
*pool
, gfp_t gfp
)
601 return netmem_to_page(page_pool_alloc_netmems(pool
, gfp
));
603 EXPORT_SYMBOL(page_pool_alloc_pages
);
605 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
606 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
608 #define _distance(a, b) (s32)((a) - (b))
610 s32
page_pool_inflight(const struct page_pool
*pool
, bool strict
)
612 u32 release_cnt
= atomic_read(&pool
->pages_state_release_cnt
);
613 u32 hold_cnt
= READ_ONCE(pool
->pages_state_hold_cnt
);
616 inflight
= _distance(hold_cnt
, release_cnt
);
619 trace_page_pool_release(pool
, inflight
, hold_cnt
, release_cnt
);
620 WARN(inflight
< 0, "Negative(%d) inflight packet-pages",
623 inflight
= max(0, inflight
);
629 void page_pool_set_pp_info(struct page_pool
*pool
, netmem_ref netmem
)
631 netmem_set_pp(netmem
, pool
);
632 netmem_or_pp_magic(netmem
, PP_SIGNATURE
);
634 /* Ensuring all pages have been split into one fragment initially:
635 * page_pool_set_pp_info() is only called once for every page when it
636 * is allocated from the page allocator and page_pool_fragment_page()
637 * is dirtying the same cache line as the page->pp_magic above, so
638 * the overhead is negligible.
640 page_pool_fragment_netmem(netmem
, 1);
641 if (pool
->has_init_callback
)
642 pool
->slow
.init_callback(netmem
, pool
->slow
.init_arg
);
645 void page_pool_clear_pp_info(netmem_ref netmem
)
647 netmem_clear_pp_magic(netmem
);
648 netmem_set_pp(netmem
, NULL
);
651 static __always_inline
void __page_pool_release_page_dma(struct page_pool
*pool
,
657 /* Always account for inflight pages, even if we didn't
662 dma
= page_pool_get_dma_addr_netmem(netmem
);
664 /* When page is unmapped, it cannot be returned to our pool */
665 dma_unmap_page_attrs(pool
->p
.dev
, dma
,
666 PAGE_SIZE
<< pool
->p
.order
, pool
->p
.dma_dir
,
667 DMA_ATTR_SKIP_CPU_SYNC
| DMA_ATTR_WEAK_ORDERING
);
668 page_pool_set_dma_addr_netmem(netmem
, 0);
671 /* Disconnects a page (from a page_pool). API users can have a need
672 * to disconnect a page (from a page_pool), to allow it to be used as
673 * a regular page (that will eventually be returned to the normal
674 * page-allocator via put_page).
676 void page_pool_return_page(struct page_pool
*pool
, netmem_ref netmem
)
682 if (static_branch_unlikely(&page_pool_mem_providers
) && pool
->mp_priv
)
683 put
= mp_dmabuf_devmem_release_page(pool
, netmem
);
685 __page_pool_release_page_dma(pool
, netmem
);
687 /* This may be the last page returned, releasing the pool, so
688 * it is not safe to reference pool afterwards.
690 count
= atomic_inc_return_relaxed(&pool
->pages_state_release_cnt
);
691 trace_page_pool_state_release(pool
, netmem
, count
);
694 page_pool_clear_pp_info(netmem
);
695 put_page(netmem_to_page(netmem
));
697 /* An optimization would be to call __free_pages(page, pool->p.order)
698 * knowing page is not part of page-cache (thus avoiding a
699 * __page_cache_release() call).
703 static bool page_pool_recycle_in_ring(struct page_pool
*pool
, netmem_ref netmem
)
706 /* BH protection not needed if current is softirq */
708 ret
= ptr_ring_produce(&pool
->ring
, (__force
void *)netmem
);
710 ret
= ptr_ring_produce_bh(&pool
->ring
, (__force
void *)netmem
);
713 recycle_stat_inc(pool
, ring
);
720 /* Only allow direct recycling in special circumstances, into the
721 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case.
723 * Caller must provide appropriate safe context.
725 static bool page_pool_recycle_in_cache(netmem_ref netmem
,
726 struct page_pool
*pool
)
728 if (unlikely(pool
->alloc
.count
== PP_ALLOC_CACHE_SIZE
)) {
729 recycle_stat_inc(pool
, cache_full
);
733 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
734 pool
->alloc
.cache
[pool
->alloc
.count
++] = netmem
;
735 recycle_stat_inc(pool
, cached
);
739 static bool __page_pool_page_can_be_recycled(netmem_ref netmem
)
741 return netmem_is_net_iov(netmem
) ||
742 (page_ref_count(netmem_to_page(netmem
)) == 1 &&
743 !page_is_pfmemalloc(netmem_to_page(netmem
)));
746 /* If the page refcnt == 1, this will try to recycle the page.
747 * If pool->dma_sync is set, we'll try to sync the DMA area for
748 * the configured size min(dma_sync_size, pool->max_len).
749 * If the page refcnt != 1, then the page will be returned to memory
752 static __always_inline netmem_ref
753 __page_pool_put_page(struct page_pool
*pool
, netmem_ref netmem
,
754 unsigned int dma_sync_size
, bool allow_direct
)
756 lockdep_assert_no_hardirq();
758 /* This allocator is optimized for the XDP mode that uses
759 * one-frame-per-page, but have fallbacks that act like the
760 * regular page allocator APIs.
762 * refcnt == 1 means page_pool owns page, and can recycle it.
764 * page is NOT reusable when allocated when system is under
765 * some pressure. (page_is_pfmemalloc)
767 if (likely(__page_pool_page_can_be_recycled(netmem
))) {
768 /* Read barrier done in page_ref_count / READ_ONCE */
770 page_pool_dma_sync_for_device(pool
, netmem
, dma_sync_size
);
772 if (allow_direct
&& page_pool_recycle_in_cache(netmem
, pool
))
775 /* Page found as candidate for recycling */
779 /* Fallback/non-XDP mode: API user have elevated refcnt.
781 * Many drivers split up the page into fragments, and some
782 * want to keep doing this to save memory and do refcnt based
783 * recycling. Support this use case too, to ease drivers
784 * switching between XDP/non-XDP.
786 * In-case page_pool maintains the DMA mapping, API user must
787 * call page_pool_put_page once. In this elevated refcnt
788 * case, the DMA is unmapped/released, as driver is likely
789 * doing refcnt based recycle tricks, meaning another process
790 * will be invoking put_page.
792 recycle_stat_inc(pool
, released_refcnt
);
793 page_pool_return_page(pool
, netmem
);
798 static bool page_pool_napi_local(const struct page_pool
*pool
)
800 const struct napi_struct
*napi
;
803 if (unlikely(!in_softirq()))
806 /* Allow direct recycle if we have reasons to believe that we are
807 * in the same context as the consumer would run, so there's
809 * __page_pool_put_page() makes sure we're not in hardirq context
810 * and interrupts are enabled prior to accessing the cache.
812 cpuid
= smp_processor_id();
813 if (READ_ONCE(pool
->cpuid
) == cpuid
)
816 napi
= READ_ONCE(pool
->p
.napi
);
818 return napi
&& READ_ONCE(napi
->list_owner
) == cpuid
;
821 void page_pool_put_unrefed_netmem(struct page_pool
*pool
, netmem_ref netmem
,
822 unsigned int dma_sync_size
, bool allow_direct
)
825 allow_direct
= page_pool_napi_local(pool
);
828 __page_pool_put_page(pool
, netmem
, dma_sync_size
, allow_direct
);
829 if (netmem
&& !page_pool_recycle_in_ring(pool
, netmem
)) {
830 /* Cache full, fallback to free pages */
831 recycle_stat_inc(pool
, ring_full
);
832 page_pool_return_page(pool
, netmem
);
835 EXPORT_SYMBOL(page_pool_put_unrefed_netmem
);
837 void page_pool_put_unrefed_page(struct page_pool
*pool
, struct page
*page
,
838 unsigned int dma_sync_size
, bool allow_direct
)
840 page_pool_put_unrefed_netmem(pool
, page_to_netmem(page
), dma_sync_size
,
843 EXPORT_SYMBOL(page_pool_put_unrefed_page
);
845 static void page_pool_recycle_ring_bulk(struct page_pool
*pool
,
852 /* Bulk produce into ptr_ring page_pool cache */
853 in_softirq
= page_pool_producer_lock(pool
);
855 for (i
= 0; i
< bulk_len
; i
++) {
856 if (__ptr_ring_produce(&pool
->ring
, (__force
void *)bulk
[i
])) {
858 recycle_stat_inc(pool
, ring_full
);
863 page_pool_producer_unlock(pool
, in_softirq
);
864 recycle_stat_add(pool
, ring
, i
);
866 /* Hopefully all pages were returned into ptr_ring */
867 if (likely(i
== bulk_len
))
871 * ptr_ring cache is full, free remaining pages outside producer lock
872 * since put_page() with refcnt == 1 can be an expensive operation.
874 for (; i
< bulk_len
; i
++)
875 page_pool_return_page(pool
, bulk
[i
]);
879 * page_pool_put_netmem_bulk() - release references on multiple netmems
880 * @data: array holding netmem references
881 * @count: number of entries in @data
883 * Tries to refill a number of netmems into the ptr_ring cache holding ptr_ring
884 * producer lock. If the ptr_ring is full, page_pool_put_netmem_bulk()
885 * will release leftover netmems to the memory provider.
886 * page_pool_put_netmem_bulk() is suitable to be run inside the driver NAPI tx
887 * completion loop for the XDP_REDIRECT use case.
889 * Please note the caller must not use data area after running
890 * page_pool_put_netmem_bulk(), as this function overwrites it.
892 void page_pool_put_netmem_bulk(netmem_ref
*data
, u32 count
)
896 for (u32 i
= 0; i
< count
; i
++) {
897 netmem_ref netmem
= netmem_compound_head(data
[i
]);
899 if (page_pool_unref_and_test(netmem
))
900 data
[bulk_len
++] = netmem
;
905 netmem_ref bulk
[XDP_BULK_QUEUE_SIZE
];
906 struct page_pool
*pool
= NULL
;
912 for (u32 i
= 0; i
< count
; i
++) {
913 struct page_pool
*netmem_pp
;
914 netmem_ref netmem
= data
[i
];
916 netmem_pp
= netmem_get_pp(netmem
);
917 if (unlikely(!pool
)) {
919 allow_direct
= page_pool_napi_local(pool
);
920 } else if (netmem_pp
!= pool
) {
922 * If the netmem belongs to a different
923 * page_pool, save it for another round.
925 data
[foreign
++] = netmem
;
929 netmem
= __page_pool_put_page(pool
, netmem
, -1,
931 /* Approved for bulk recycling in ptr_ring cache */
933 bulk
[bulk_len
++] = netmem
;
937 page_pool_recycle_ring_bulk(pool
, bulk
, bulk_len
);
942 EXPORT_SYMBOL(page_pool_put_netmem_bulk
);
944 static netmem_ref
page_pool_drain_frag(struct page_pool
*pool
,
947 long drain_count
= BIAS_MAX
- pool
->frag_users
;
949 /* Some user is still using the page frag */
950 if (likely(page_pool_unref_netmem(netmem
, drain_count
)))
953 if (__page_pool_page_can_be_recycled(netmem
)) {
954 page_pool_dma_sync_for_device(pool
, netmem
, -1);
958 page_pool_return_page(pool
, netmem
);
962 static void page_pool_free_frag(struct page_pool
*pool
)
964 long drain_count
= BIAS_MAX
- pool
->frag_users
;
965 netmem_ref netmem
= pool
->frag_page
;
969 if (!netmem
|| page_pool_unref_netmem(netmem
, drain_count
))
972 page_pool_return_page(pool
, netmem
);
975 netmem_ref
page_pool_alloc_frag_netmem(struct page_pool
*pool
,
976 unsigned int *offset
, unsigned int size
,
979 unsigned int max_size
= PAGE_SIZE
<< pool
->p
.order
;
980 netmem_ref netmem
= pool
->frag_page
;
982 if (WARN_ON(size
> max_size
))
985 size
= ALIGN(size
, dma_get_cache_alignment());
986 *offset
= pool
->frag_offset
;
988 if (netmem
&& *offset
+ size
> max_size
) {
989 netmem
= page_pool_drain_frag(pool
, netmem
);
991 recycle_stat_inc(pool
, cached
);
992 alloc_stat_inc(pool
, fast
);
998 netmem
= page_pool_alloc_netmems(pool
, gfp
);
999 if (unlikely(!netmem
)) {
1000 pool
->frag_page
= 0;
1004 pool
->frag_page
= netmem
;
1007 pool
->frag_users
= 1;
1009 pool
->frag_offset
= size
;
1010 page_pool_fragment_netmem(netmem
, BIAS_MAX
);
1015 pool
->frag_offset
= *offset
+ size
;
1018 EXPORT_SYMBOL(page_pool_alloc_frag_netmem
);
1020 struct page
*page_pool_alloc_frag(struct page_pool
*pool
, unsigned int *offset
,
1021 unsigned int size
, gfp_t gfp
)
1023 return netmem_to_page(page_pool_alloc_frag_netmem(pool
, offset
, size
,
1026 EXPORT_SYMBOL(page_pool_alloc_frag
);
1028 static void page_pool_empty_ring(struct page_pool
*pool
)
1032 /* Empty recycle ring */
1033 while ((netmem
= (__force netmem_ref
)ptr_ring_consume_bh(&pool
->ring
))) {
1034 /* Verify the refcnt invariant of cached pages */
1035 if (!(netmem_ref_count(netmem
) == 1))
1036 pr_crit("%s() page_pool refcnt %d violation\n",
1037 __func__
, netmem_ref_count(netmem
));
1039 page_pool_return_page(pool
, netmem
);
1043 static void __page_pool_destroy(struct page_pool
*pool
)
1045 if (pool
->disconnect
)
1046 pool
->disconnect(pool
);
1048 page_pool_unlist(pool
);
1049 page_pool_uninit(pool
);
1051 if (pool
->mp_priv
) {
1052 mp_dmabuf_devmem_destroy(pool
);
1053 static_branch_dec(&page_pool_mem_providers
);
1059 static void page_pool_empty_alloc_cache_once(struct page_pool
*pool
)
1063 if (pool
->destroy_cnt
)
1066 /* Empty alloc cache, assume caller made sure this is
1067 * no-longer in use, and page_pool_alloc_pages() cannot be
1068 * call concurrently.
1070 while (pool
->alloc
.count
) {
1071 netmem
= pool
->alloc
.cache
[--pool
->alloc
.count
];
1072 page_pool_return_page(pool
, netmem
);
1076 static void page_pool_scrub(struct page_pool
*pool
)
1078 page_pool_empty_alloc_cache_once(pool
);
1079 pool
->destroy_cnt
++;
1081 /* No more consumers should exist, but producers could still
1084 page_pool_empty_ring(pool
);
1087 static int page_pool_release(struct page_pool
*pool
)
1091 page_pool_scrub(pool
);
1092 inflight
= page_pool_inflight(pool
, true);
1094 __page_pool_destroy(pool
);
1099 static void page_pool_release_retry(struct work_struct
*wq
)
1101 struct delayed_work
*dwq
= to_delayed_work(wq
);
1102 struct page_pool
*pool
= container_of(dwq
, typeof(*pool
), release_dw
);
1106 inflight
= page_pool_release(pool
);
1110 /* Periodic warning for page pools the user can't see */
1111 netdev
= READ_ONCE(pool
->slow
.netdev
);
1112 if (time_after_eq(jiffies
, pool
->defer_warn
) &&
1113 (!netdev
|| netdev
== NET_PTR_POISON
)) {
1114 int sec
= (s32
)((u32
)jiffies
- (u32
)pool
->defer_start
) / HZ
;
1116 pr_warn("%s() stalled pool shutdown: id %u, %d inflight %d sec\n",
1117 __func__
, pool
->user
.id
, inflight
, sec
);
1118 pool
->defer_warn
= jiffies
+ DEFER_WARN_INTERVAL
;
1121 /* Still not ready to be disconnected, retry later */
1122 schedule_delayed_work(&pool
->release_dw
, DEFER_TIME
);
1125 void page_pool_use_xdp_mem(struct page_pool
*pool
, void (*disconnect
)(void *),
1126 const struct xdp_mem_info
*mem
)
1128 refcount_inc(&pool
->user_cnt
);
1129 pool
->disconnect
= disconnect
;
1130 pool
->xdp_mem_id
= mem
->id
;
1133 void page_pool_disable_direct_recycling(struct page_pool
*pool
)
1135 /* Disable direct recycling based on pool->cpuid.
1136 * Paired with READ_ONCE() in page_pool_napi_local().
1138 WRITE_ONCE(pool
->cpuid
, -1);
1143 /* To avoid races with recycling and additional barriers make sure
1144 * pool and NAPI are unlinked when NAPI is disabled.
1146 WARN_ON(!test_bit(NAPI_STATE_SCHED
, &pool
->p
.napi
->state
));
1147 WARN_ON(READ_ONCE(pool
->p
.napi
->list_owner
) != -1);
1149 mutex_lock(&page_pools_lock
);
1150 WRITE_ONCE(pool
->p
.napi
, NULL
);
1151 mutex_unlock(&page_pools_lock
);
1153 EXPORT_SYMBOL(page_pool_disable_direct_recycling
);
1155 void page_pool_destroy(struct page_pool
*pool
)
1160 if (!page_pool_put(pool
))
1163 page_pool_disable_direct_recycling(pool
);
1164 page_pool_free_frag(pool
);
1166 if (!page_pool_release(pool
))
1169 page_pool_detached(pool
);
1170 pool
->defer_start
= jiffies
;
1171 pool
->defer_warn
= jiffies
+ DEFER_WARN_INTERVAL
;
1173 INIT_DELAYED_WORK(&pool
->release_dw
, page_pool_release_retry
);
1174 schedule_delayed_work(&pool
->release_dw
, DEFER_TIME
);
1176 EXPORT_SYMBOL(page_pool_destroy
);
1178 /* Caller must provide appropriate safe context, e.g. NAPI. */
1179 void page_pool_update_nid(struct page_pool
*pool
, int new_nid
)
1183 trace_page_pool_update_nid(pool
, new_nid
);
1184 pool
->p
.nid
= new_nid
;
1186 /* Flush pool alloc cache, as refill will check NUMA node */
1187 while (pool
->alloc
.count
) {
1188 netmem
= pool
->alloc
.cache
[--pool
->alloc
.count
];
1189 page_pool_return_page(pool
, netmem
);
1192 EXPORT_SYMBOL(page_pool_update_nid
);