1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2020 Intel Corporation. */
4 #ifndef XSK_BUFF_POOL_H_
5 #define XSK_BUFF_POOL_H_
7 #include <linux/if_xdp.h>
8 #include <linux/types.h>
9 #include <linux/dma-mapping.h>
23 struct xsk_buff_pool
*pool
;
26 struct list_head free_list_node
;
29 struct xsk_buff_pool
{
31 struct list_head free_list
;
32 dma_addr_t
*dma_pages
;
33 struct xdp_buff_xsk
*heads
;
47 struct xdp_buff_xsk
*free_heads
[];
51 struct xsk_buff_pool
*xp_create(struct page
**pages
, u32 nr_pages
, u32 chunks
,
52 u32 chunk_size
, u32 headroom
, u64 size
,
54 void xp_set_fq(struct xsk_buff_pool
*pool
, struct xsk_queue
*fq
);
55 void xp_destroy(struct xsk_buff_pool
*pool
);
56 void xp_release(struct xdp_buff_xsk
*xskb
);
58 /* AF_XDP, and XDP core. */
59 void xp_free(struct xdp_buff_xsk
*xskb
);
61 /* AF_XDP ZC drivers, via xdp_sock_buff.h */
62 void xp_set_rxq_info(struct xsk_buff_pool
*pool
, struct xdp_rxq_info
*rxq
);
63 int xp_dma_map(struct xsk_buff_pool
*pool
, struct device
*dev
,
64 unsigned long attrs
, struct page
**pages
, u32 nr_pages
);
65 void xp_dma_unmap(struct xsk_buff_pool
*pool
, unsigned long attrs
);
66 struct xdp_buff
*xp_alloc(struct xsk_buff_pool
*pool
);
67 bool xp_can_alloc(struct xsk_buff_pool
*pool
, u32 count
);
68 void *xp_raw_get_data(struct xsk_buff_pool
*pool
, u64 addr
);
69 dma_addr_t
xp_raw_get_dma(struct xsk_buff_pool
*pool
, u64 addr
);
70 static inline dma_addr_t
xp_get_dma(struct xdp_buff_xsk
*xskb
)
75 static inline dma_addr_t
xp_get_frame_dma(struct xdp_buff_xsk
*xskb
)
77 return xskb
->frame_dma
;
80 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk
*xskb
);
81 static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk
*xskb
)
83 if (!xskb
->pool
->dma_need_sync
)
86 xp_dma_sync_for_cpu_slow(xskb
);
89 void xp_dma_sync_for_device_slow(struct xsk_buff_pool
*pool
, dma_addr_t dma
,
91 static inline void xp_dma_sync_for_device(struct xsk_buff_pool
*pool
,
92 dma_addr_t dma
, size_t size
)
94 if (!pool
->dma_need_sync
)
97 xp_dma_sync_for_device_slow(pool
, dma
, size
);
100 /* Masks for xdp_umem_page flags.
101 * The low 12-bits of the addr will be 0 since this is the page address, so we
102 * can use them for flags.
104 #define XSK_NEXT_PG_CONTIG_SHIFT 0
105 #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
107 static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool
*pool
,
110 bool cross_pg
= (addr
& (PAGE_SIZE
- 1)) + len
> PAGE_SIZE
;
112 if (pool
->dma_pages_cnt
&& cross_pg
) {
113 return !(pool
->dma_pages
[addr
>> PAGE_SHIFT
] &
114 XSK_NEXT_PG_CONTIG_MASK
);
119 static inline u64
xp_aligned_extract_addr(struct xsk_buff_pool
*pool
, u64 addr
)
121 return addr
& pool
->chunk_mask
;
124 static inline u64
xp_unaligned_extract_addr(u64 addr
)
126 return addr
& XSK_UNALIGNED_BUF_ADDR_MASK
;
129 static inline u64
xp_unaligned_extract_offset(u64 addr
)
131 return addr
>> XSK_UNALIGNED_BUF_OFFSET_SHIFT
;
134 static inline u64
xp_unaligned_add_offset_to_addr(u64 addr
)
136 return xp_unaligned_extract_addr(addr
) +
137 xp_unaligned_extract_offset(addr
);
140 #endif /* XSK_BUFF_POOL_H_ */