1 /* SPDX-License-Identifier: GPL-2.0
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
13 #include <net/page_pool.h>
16 #include <linux/dma-direction.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/page-flags.h>
19 #include <linux/mm.h> /* for __put_page() */
21 #include <trace/events/page_pool.h>
23 #define DEFER_TIME (msecs_to_jiffies(1000))
24 #define DEFER_WARN_INTERVAL (60 * HZ)
26 static int page_pool_init(struct page_pool
*pool
,
27 const struct page_pool_params
*params
)
29 unsigned int ring_qsize
= 1024; /* Default */
31 memcpy(&pool
->p
, params
, sizeof(pool
->p
));
33 /* Validate only known flags were used */
34 if (pool
->p
.flags
& ~(PP_FLAG_ALL
))
37 if (pool
->p
.pool_size
)
38 ring_qsize
= pool
->p
.pool_size
;
40 /* Sanity limit mem that can be pinned down */
41 if (ring_qsize
> 32768)
44 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
45 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
46 * which is the XDP_TX use-case.
48 if (pool
->p
.flags
& PP_FLAG_DMA_MAP
) {
49 if ((pool
->p
.dma_dir
!= DMA_FROM_DEVICE
) &&
50 (pool
->p
.dma_dir
!= DMA_BIDIRECTIONAL
))
54 if (pool
->p
.flags
& PP_FLAG_DMA_SYNC_DEV
) {
55 /* In order to request DMA-sync-for-device the page
58 if (!(pool
->p
.flags
& PP_FLAG_DMA_MAP
))
64 /* pool->p.offset has to be set according to the address
65 * offset used by the DMA engine to start copying rx data
69 if (ptr_ring_init(&pool
->ring
, ring_qsize
, GFP_KERNEL
) < 0)
72 atomic_set(&pool
->pages_state_release_cnt
, 0);
74 /* Driver calling page_pool_create() also call page_pool_destroy() */
75 refcount_set(&pool
->user_cnt
, 1);
77 if (pool
->p
.flags
& PP_FLAG_DMA_MAP
)
78 get_device(pool
->p
.dev
);
83 struct page_pool
*page_pool_create(const struct page_pool_params
*params
)
85 struct page_pool
*pool
;
88 pool
= kzalloc_node(sizeof(*pool
), GFP_KERNEL
, params
->nid
);
90 return ERR_PTR(-ENOMEM
);
92 err
= page_pool_init(pool
, params
);
94 pr_warn("%s() gave up with errno %d\n", __func__
, err
);
101 EXPORT_SYMBOL(page_pool_create
);
103 static void page_pool_return_page(struct page_pool
*pool
, struct page
*page
);
106 static struct page
*page_pool_refill_alloc_cache(struct page_pool
*pool
)
108 struct ptr_ring
*r
= &pool
->ring
;
110 int pref_nid
; /* preferred NUMA node */
112 /* Quicker fallback, avoid locks when ring is empty */
113 if (__ptr_ring_empty(r
))
116 /* Softirq guarantee CPU and thus NUMA node is stable. This,
117 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
120 pref_nid
= (pool
->p
.nid
== NUMA_NO_NODE
) ? numa_mem_id() : pool
->p
.nid
;
122 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
123 pref_nid
= numa_mem_id(); /* will be zero like page_to_nid() */
126 /* Slower-path: Get pages from locked ring queue */
127 spin_lock(&r
->consumer_lock
);
129 /* Refill alloc array, but only if NUMA match */
131 page
= __ptr_ring_consume(r
);
135 if (likely(page_to_nid(page
) == pref_nid
)) {
136 pool
->alloc
.cache
[pool
->alloc
.count
++] = page
;
139 * (1) release 1 page to page-allocator and
140 * (2) break out to fallthrough to alloc_pages_node.
141 * This limit stress on page buddy alloactor.
143 page_pool_return_page(pool
, page
);
147 } while (pool
->alloc
.count
< PP_ALLOC_CACHE_REFILL
);
149 /* Return last page */
150 if (likely(pool
->alloc
.count
> 0))
151 page
= pool
->alloc
.cache
[--pool
->alloc
.count
];
153 spin_unlock(&r
->consumer_lock
);
158 static struct page
*__page_pool_get_cached(struct page_pool
*pool
)
162 /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
163 if (likely(pool
->alloc
.count
)) {
165 page
= pool
->alloc
.cache
[--pool
->alloc
.count
];
167 page
= page_pool_refill_alloc_cache(pool
);
173 static void page_pool_dma_sync_for_device(struct page_pool
*pool
,
175 unsigned int dma_sync_size
)
177 dma_sync_size
= min(dma_sync_size
, pool
->p
.max_len
);
178 dma_sync_single_range_for_device(pool
->p
.dev
, page
->dma_addr
,
179 pool
->p
.offset
, dma_sync_size
,
185 static struct page
*__page_pool_alloc_pages_slow(struct page_pool
*pool
,
192 /* We could always set __GFP_COMP, and avoid this branch, as
193 * prep_new_page() can handle order-0 with __GFP_COMP.
198 /* FUTURE development:
200 * Current slow-path essentially falls back to single page
201 * allocations, which doesn't improve performance. This code
202 * need bulk allocation support from the page allocator code.
205 /* Cache was empty, do real allocation */
207 page
= alloc_pages_node(pool
->p
.nid
, gfp
, pool
->p
.order
);
209 page
= alloc_pages(gfp
, pool
->p
.order
);
214 if (!(pool
->p
.flags
& PP_FLAG_DMA_MAP
))
217 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
218 * since dma_addr_t can be either 32 or 64 bits and does not always fit
219 * into page private data (i.e 32bit cpu with 64bit DMA caps)
220 * This mapping is kept for lifetime of page, until leaving pool.
222 dma
= dma_map_page_attrs(pool
->p
.dev
, page
, 0,
223 (PAGE_SIZE
<< pool
->p
.order
),
224 pool
->p
.dma_dir
, DMA_ATTR_SKIP_CPU_SYNC
);
225 if (dma_mapping_error(pool
->p
.dev
, dma
)) {
229 page
->dma_addr
= dma
;
231 if (pool
->p
.flags
& PP_FLAG_DMA_SYNC_DEV
)
232 page_pool_dma_sync_for_device(pool
, page
, pool
->p
.max_len
);
235 /* Track how many pages are held 'in-flight' */
236 pool
->pages_state_hold_cnt
++;
238 trace_page_pool_state_hold(pool
, page
, pool
->pages_state_hold_cnt
);
240 /* When page just alloc'ed is should/must have refcnt 1. */
244 /* For using page_pool replace: alloc_pages() API calls, but provide
245 * synchronization guarantee for allocation side.
247 struct page
*page_pool_alloc_pages(struct page_pool
*pool
, gfp_t gfp
)
251 /* Fast-path: Get a page from cache */
252 page
= __page_pool_get_cached(pool
);
256 /* Slow-path: cache empty, do real allocation */
257 page
= __page_pool_alloc_pages_slow(pool
, gfp
);
260 EXPORT_SYMBOL(page_pool_alloc_pages
);
262 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
263 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
265 #define _distance(a, b) (s32)((a) - (b))
267 static s32
page_pool_inflight(struct page_pool
*pool
)
269 u32 release_cnt
= atomic_read(&pool
->pages_state_release_cnt
);
270 u32 hold_cnt
= READ_ONCE(pool
->pages_state_hold_cnt
);
273 inflight
= _distance(hold_cnt
, release_cnt
);
275 trace_page_pool_release(pool
, inflight
, hold_cnt
, release_cnt
);
276 WARN(inflight
< 0, "Negative(%d) inflight packet-pages", inflight
);
281 /* Disconnects a page (from a page_pool). API users can have a need
282 * to disconnect a page (from a page_pool), to allow it to be used as
283 * a regular page (that will eventually be returned to the normal
284 * page-allocator via put_page).
286 void page_pool_release_page(struct page_pool
*pool
, struct page
*page
)
291 if (!(pool
->p
.flags
& PP_FLAG_DMA_MAP
))
292 /* Always account for inflight pages, even if we didn't
297 dma
= page
->dma_addr
;
299 /* When page is unmapped, it cannot be returned our pool */
300 dma_unmap_page_attrs(pool
->p
.dev
, dma
,
301 PAGE_SIZE
<< pool
->p
.order
, pool
->p
.dma_dir
,
302 DMA_ATTR_SKIP_CPU_SYNC
);
305 /* This may be the last page returned, releasing the pool, so
306 * it is not safe to reference pool afterwards.
308 count
= atomic_inc_return(&pool
->pages_state_release_cnt
);
309 trace_page_pool_state_release(pool
, page
, count
);
311 EXPORT_SYMBOL(page_pool_release_page
);
313 /* Return a page to the page allocator, cleaning up our state */
314 static void page_pool_return_page(struct page_pool
*pool
, struct page
*page
)
316 page_pool_release_page(pool
, page
);
319 /* An optimization would be to call __free_pages(page, pool->p.order)
320 * knowing page is not part of page-cache (thus avoiding a
321 * __page_cache_release() call).
325 static bool page_pool_recycle_in_ring(struct page_pool
*pool
, struct page
*page
)
328 /* BH protection not needed if current is serving softirq */
329 if (in_serving_softirq())
330 ret
= ptr_ring_produce(&pool
->ring
, page
);
332 ret
= ptr_ring_produce_bh(&pool
->ring
, page
);
334 return (ret
== 0) ? true : false;
337 /* Only allow direct recycling in special circumstances, into the
338 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case.
340 * Caller must provide appropriate safe context.
342 static bool page_pool_recycle_in_cache(struct page
*page
,
343 struct page_pool
*pool
)
345 if (unlikely(pool
->alloc
.count
== PP_ALLOC_CACHE_SIZE
))
348 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
349 pool
->alloc
.cache
[pool
->alloc
.count
++] = page
;
353 /* page is NOT reusable when:
354 * 1) allocated when system is under some pressure. (page_is_pfmemalloc)
356 static bool pool_page_reusable(struct page_pool
*pool
, struct page
*page
)
358 return !page_is_pfmemalloc(page
);
361 /* If the page refcnt == 1, this will try to recycle the page.
362 * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
363 * the configured size min(dma_sync_size, pool->max_len).
364 * If the page refcnt != 1, then the page will be returned to memory
367 static __always_inline
struct page
*
368 __page_pool_put_page(struct page_pool
*pool
, struct page
*page
,
369 unsigned int dma_sync_size
, bool allow_direct
)
371 /* This allocator is optimized for the XDP mode that uses
372 * one-frame-per-page, but have fallbacks that act like the
373 * regular page allocator APIs.
375 * refcnt == 1 means page_pool owns page, and can recycle it.
377 if (likely(page_ref_count(page
) == 1 &&
378 pool_page_reusable(pool
, page
))) {
379 /* Read barrier done in page_ref_count / READ_ONCE */
381 if (pool
->p
.flags
& PP_FLAG_DMA_SYNC_DEV
)
382 page_pool_dma_sync_for_device(pool
, page
,
385 if (allow_direct
&& in_serving_softirq() &&
386 page_pool_recycle_in_cache(page
, pool
))
389 /* Page found as candidate for recycling */
392 /* Fallback/non-XDP mode: API user have elevated refcnt.
394 * Many drivers split up the page into fragments, and some
395 * want to keep doing this to save memory and do refcnt based
396 * recycling. Support this use case too, to ease drivers
397 * switching between XDP/non-XDP.
399 * In-case page_pool maintains the DMA mapping, API user must
400 * call page_pool_put_page once. In this elevated refcnt
401 * case, the DMA is unmapped/released, as driver is likely
402 * doing refcnt based recycle tricks, meaning another process
403 * will be invoking put_page.
405 /* Do not replace this with page_pool_return_page() */
406 page_pool_release_page(pool
, page
);
412 void page_pool_put_page(struct page_pool
*pool
, struct page
*page
,
413 unsigned int dma_sync_size
, bool allow_direct
)
415 page
= __page_pool_put_page(pool
, page
, dma_sync_size
, allow_direct
);
416 if (page
&& !page_pool_recycle_in_ring(pool
, page
)) {
417 /* Cache full, fallback to free pages */
418 page_pool_return_page(pool
, page
);
421 EXPORT_SYMBOL(page_pool_put_page
);
423 /* Caller must not use data area after call, as this function overwrites it */
424 void page_pool_put_page_bulk(struct page_pool
*pool
, void **data
,
429 for (i
= 0; i
< count
; i
++) {
430 struct page
*page
= virt_to_head_page(data
[i
]);
432 page
= __page_pool_put_page(pool
, page
, -1, false);
433 /* Approved for bulk recycling in ptr_ring cache */
435 data
[bulk_len
++] = page
;
438 if (unlikely(!bulk_len
))
441 /* Bulk producer into ptr_ring page_pool cache */
442 page_pool_ring_lock(pool
);
443 for (i
= 0; i
< bulk_len
; i
++) {
444 if (__ptr_ring_produce(&pool
->ring
, data
[i
]))
445 break; /* ring full */
447 page_pool_ring_unlock(pool
);
449 /* Hopefully all pages was return into ptr_ring */
450 if (likely(i
== bulk_len
))
453 /* ptr_ring cache full, free remaining pages outside producer lock
454 * since put_page() with refcnt == 1 can be an expensive operation
456 for (; i
< bulk_len
; i
++)
457 page_pool_return_page(pool
, data
[i
]);
459 EXPORT_SYMBOL(page_pool_put_page_bulk
);
461 static void page_pool_empty_ring(struct page_pool
*pool
)
465 /* Empty recycle ring */
466 while ((page
= ptr_ring_consume_bh(&pool
->ring
))) {
467 /* Verify the refcnt invariant of cached pages */
468 if (!(page_ref_count(page
) == 1))
469 pr_crit("%s() page_pool refcnt %d violation\n",
470 __func__
, page_ref_count(page
));
472 page_pool_return_page(pool
, page
);
476 static void page_pool_free(struct page_pool
*pool
)
478 if (pool
->disconnect
)
479 pool
->disconnect(pool
);
481 ptr_ring_cleanup(&pool
->ring
, NULL
);
483 if (pool
->p
.flags
& PP_FLAG_DMA_MAP
)
484 put_device(pool
->p
.dev
);
489 static void page_pool_empty_alloc_cache_once(struct page_pool
*pool
)
493 if (pool
->destroy_cnt
)
496 /* Empty alloc cache, assume caller made sure this is
497 * no-longer in use, and page_pool_alloc_pages() cannot be
500 while (pool
->alloc
.count
) {
501 page
= pool
->alloc
.cache
[--pool
->alloc
.count
];
502 page_pool_return_page(pool
, page
);
506 static void page_pool_scrub(struct page_pool
*pool
)
508 page_pool_empty_alloc_cache_once(pool
);
511 /* No more consumers should exist, but producers could still
514 page_pool_empty_ring(pool
);
517 static int page_pool_release(struct page_pool
*pool
)
521 page_pool_scrub(pool
);
522 inflight
= page_pool_inflight(pool
);
524 page_pool_free(pool
);
529 static void page_pool_release_retry(struct work_struct
*wq
)
531 struct delayed_work
*dwq
= to_delayed_work(wq
);
532 struct page_pool
*pool
= container_of(dwq
, typeof(*pool
), release_dw
);
535 inflight
= page_pool_release(pool
);
539 /* Periodic warning */
540 if (time_after_eq(jiffies
, pool
->defer_warn
)) {
541 int sec
= (s32
)((u32
)jiffies
- (u32
)pool
->defer_start
) / HZ
;
543 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
544 __func__
, inflight
, sec
);
545 pool
->defer_warn
= jiffies
+ DEFER_WARN_INTERVAL
;
548 /* Still not ready to be disconnected, retry later */
549 schedule_delayed_work(&pool
->release_dw
, DEFER_TIME
);
552 void page_pool_use_xdp_mem(struct page_pool
*pool
, void (*disconnect
)(void *))
554 refcount_inc(&pool
->user_cnt
);
555 pool
->disconnect
= disconnect
;
558 void page_pool_destroy(struct page_pool
*pool
)
563 if (!page_pool_put(pool
))
566 if (!page_pool_release(pool
))
569 pool
->defer_start
= jiffies
;
570 pool
->defer_warn
= jiffies
+ DEFER_WARN_INTERVAL
;
572 INIT_DELAYED_WORK(&pool
->release_dw
, page_pool_release_retry
);
573 schedule_delayed_work(&pool
->release_dw
, DEFER_TIME
);
575 EXPORT_SYMBOL(page_pool_destroy
);
577 /* Caller must provide appropriate safe context, e.g. NAPI. */
578 void page_pool_update_nid(struct page_pool
*pool
, int new_nid
)
582 trace_page_pool_update_nid(pool
, new_nid
);
583 pool
->p
.nid
= new_nid
;
585 /* Flush pool alloc cache, as refill will check NUMA node */
586 while (pool
->alloc
.count
) {
587 page
= pool
->alloc
.cache
[--pool
->alloc
.count
];
588 page_pool_return_page(pool
, page
);
591 EXPORT_SYMBOL(page_pool_update_nid
);