1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* AF_XDP internal functions
3 * Copyright(c) 2018 Intel Corporation.
6 #ifndef _LINUX_XDP_SOCK_H
7 #define _LINUX_XDP_SOCK_H
9 #include <linux/workqueue.h>
10 #include <linux/if_xdp.h>
11 #include <linux/mutex.h>
12 #include <linux/spinlock.h>
19 /* Masks for xdp_umem_page flags.
20 * The low 12-bits of the addr will be 0 since this is the page address, so we
21 * can use them for flags.
23 #define XSK_NEXT_PG_CONTIG_SHIFT 0
24 #define XSK_NEXT_PG_CONTIG_MASK (1ULL << XSK_NEXT_PG_CONTIG_SHIFT)
26 struct xdp_umem_page
{
31 struct xdp_umem_fq_reuse
{
37 /* Flags for the umem flags field.
39 * The NEED_WAKEUP flag is 1 due to the reuse of the flags field for public
40 * flags. See inlude/uapi/include/linux/if_xdp.h.
42 #define XDP_UMEM_USES_NEED_WAKEUP (1 << 1)
47 struct xdp_umem_page
*pages
;
52 struct user_struct
*user
;
53 unsigned long address
;
55 struct work_struct work
;
62 struct net_device
*dev
;
63 struct xdp_umem_fq_reuse
*fq_reuse
;
65 spinlock_t xsk_list_lock
;
66 struct list_head xsk_list
;
69 /* Nodes are linked in the struct xdp_sock map_list field, and used to
70 * track which maps a certain socket reside in.
75 spinlock_t lock
; /* Synchronize map updates */
76 struct xdp_sock
*xsk_map
[];
80 struct list_head node
;
82 struct xdp_sock
**map_entry
;
86 /* struct sock must be the first member of struct xdp_sock */
89 struct net_device
*dev
;
90 struct xdp_umem
*umem
;
91 struct list_head flush_node
;
99 /* Protects multiple processes in the control path */
101 struct xsk_queue
*tx ____cacheline_aligned_in_smp
;
102 struct list_head list
;
103 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
104 * in the SKB destructor callback.
106 spinlock_t tx_completion_lock
;
107 /* Protects generic receive. */
110 struct list_head map_list
;
111 /* Protects map_list */
112 spinlock_t map_list_lock
;
116 #ifdef CONFIG_XDP_SOCKETS
117 int xsk_generic_rcv(struct xdp_sock
*xs
, struct xdp_buff
*xdp
);
118 bool xsk_is_setup_for_bpf_map(struct xdp_sock
*xs
);
119 /* Used from netdev driver */
120 bool xsk_umem_has_addrs(struct xdp_umem
*umem
, u32 cnt
);
121 bool xsk_umem_peek_addr(struct xdp_umem
*umem
, u64
*addr
);
122 void xsk_umem_release_addr(struct xdp_umem
*umem
);
123 void xsk_umem_complete_tx(struct xdp_umem
*umem
, u32 nb_entries
);
124 bool xsk_umem_consume_tx(struct xdp_umem
*umem
, struct xdp_desc
*desc
);
125 void xsk_umem_consume_tx_done(struct xdp_umem
*umem
);
126 struct xdp_umem_fq_reuse
*xsk_reuseq_prepare(u32 nentries
);
127 struct xdp_umem_fq_reuse
*xsk_reuseq_swap(struct xdp_umem
*umem
,
128 struct xdp_umem_fq_reuse
*newq
);
129 void xsk_reuseq_free(struct xdp_umem_fq_reuse
*rq
);
130 struct xdp_umem
*xdp_get_umem_from_qid(struct net_device
*dev
, u16 queue_id
);
131 void xsk_set_rx_need_wakeup(struct xdp_umem
*umem
);
132 void xsk_set_tx_need_wakeup(struct xdp_umem
*umem
);
133 void xsk_clear_rx_need_wakeup(struct xdp_umem
*umem
);
134 void xsk_clear_tx_need_wakeup(struct xdp_umem
*umem
);
135 bool xsk_umem_uses_need_wakeup(struct xdp_umem
*umem
);
137 void xsk_map_try_sock_delete(struct xsk_map
*map
, struct xdp_sock
*xs
,
138 struct xdp_sock
**map_entry
);
139 int xsk_map_inc(struct xsk_map
*map
);
140 void xsk_map_put(struct xsk_map
*map
);
141 int __xsk_map_redirect(struct xdp_sock
*xs
, struct xdp_buff
*xdp
);
142 void __xsk_map_flush(void);
144 static inline struct xdp_sock
*__xsk_map_lookup_elem(struct bpf_map
*map
,
147 struct xsk_map
*m
= container_of(map
, struct xsk_map
, map
);
150 if (key
>= map
->max_entries
)
153 xs
= READ_ONCE(m
->xsk_map
[key
]);
157 static inline u64
xsk_umem_extract_addr(u64 addr
)
159 return addr
& XSK_UNALIGNED_BUF_ADDR_MASK
;
162 static inline u64
xsk_umem_extract_offset(u64 addr
)
164 return addr
>> XSK_UNALIGNED_BUF_OFFSET_SHIFT
;
167 static inline u64
xsk_umem_add_offset_to_addr(u64 addr
)
169 return xsk_umem_extract_addr(addr
) + xsk_umem_extract_offset(addr
);
172 static inline char *xdp_umem_get_data(struct xdp_umem
*umem
, u64 addr
)
174 unsigned long page_addr
;
176 addr
= xsk_umem_add_offset_to_addr(addr
);
177 page_addr
= (unsigned long)umem
->pages
[addr
>> PAGE_SHIFT
].addr
;
179 return (char *)(page_addr
& PAGE_MASK
) + (addr
& ~PAGE_MASK
);
182 static inline dma_addr_t
xdp_umem_get_dma(struct xdp_umem
*umem
, u64 addr
)
184 addr
= xsk_umem_add_offset_to_addr(addr
);
186 return umem
->pages
[addr
>> PAGE_SHIFT
].dma
+ (addr
& ~PAGE_MASK
);
189 /* Reuse-queue aware version of FILL queue helpers */
190 static inline bool xsk_umem_has_addrs_rq(struct xdp_umem
*umem
, u32 cnt
)
192 struct xdp_umem_fq_reuse
*rq
= umem
->fq_reuse
;
194 if (rq
->length
>= cnt
)
197 return xsk_umem_has_addrs(umem
, cnt
- rq
->length
);
200 static inline bool xsk_umem_peek_addr_rq(struct xdp_umem
*umem
, u64
*addr
)
202 struct xdp_umem_fq_reuse
*rq
= umem
->fq_reuse
;
205 return xsk_umem_peek_addr(umem
, addr
);
207 *addr
= rq
->handles
[rq
->length
- 1];
211 static inline void xsk_umem_release_addr_rq(struct xdp_umem
*umem
)
213 struct xdp_umem_fq_reuse
*rq
= umem
->fq_reuse
;
216 xsk_umem_release_addr(umem
);
221 static inline void xsk_umem_fq_reuse(struct xdp_umem
*umem
, u64 addr
)
223 struct xdp_umem_fq_reuse
*rq
= umem
->fq_reuse
;
225 rq
->handles
[rq
->length
++] = addr
;
228 /* Handle the offset appropriately depending on aligned or unaligned mode.
229 * For unaligned mode, we store the offset in the upper 16-bits of the address.
230 * For aligned mode, we simply add the offset to the address.
232 static inline u64
xsk_umem_adjust_offset(struct xdp_umem
*umem
, u64 address
,
235 if (umem
->flags
& XDP_UMEM_UNALIGNED_CHUNK_FLAG
)
236 return address
+ (offset
<< XSK_UNALIGNED_BUF_OFFSET_SHIFT
);
238 return address
+ offset
;
241 static inline int xsk_generic_rcv(struct xdp_sock
*xs
, struct xdp_buff
*xdp
)
246 static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock
*xs
)
251 static inline bool xsk_umem_has_addrs(struct xdp_umem
*umem
, u32 cnt
)
256 static inline u64
*xsk_umem_peek_addr(struct xdp_umem
*umem
, u64
*addr
)
261 static inline void xsk_umem_release_addr(struct xdp_umem
*umem
)
265 static inline void xsk_umem_complete_tx(struct xdp_umem
*umem
, u32 nb_entries
)
269 static inline bool xsk_umem_consume_tx(struct xdp_umem
*umem
,
270 struct xdp_desc
*desc
)
275 static inline void xsk_umem_consume_tx_done(struct xdp_umem
*umem
)
279 static inline struct xdp_umem_fq_reuse
*xsk_reuseq_prepare(u32 nentries
)
284 static inline struct xdp_umem_fq_reuse
*xsk_reuseq_swap(
285 struct xdp_umem
*umem
,
286 struct xdp_umem_fq_reuse
*newq
)
290 static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse
*rq
)
294 static inline struct xdp_umem
*xdp_get_umem_from_qid(struct net_device
*dev
,
300 static inline u64
xsk_umem_extract_addr(u64 addr
)
305 static inline u64
xsk_umem_extract_offset(u64 addr
)
310 static inline u64
xsk_umem_add_offset_to_addr(u64 addr
)
315 static inline char *xdp_umem_get_data(struct xdp_umem
*umem
, u64 addr
)
320 static inline dma_addr_t
xdp_umem_get_dma(struct xdp_umem
*umem
, u64 addr
)
325 static inline bool xsk_umem_has_addrs_rq(struct xdp_umem
*umem
, u32 cnt
)
330 static inline u64
*xsk_umem_peek_addr_rq(struct xdp_umem
*umem
, u64
*addr
)
335 static inline void xsk_umem_release_addr_rq(struct xdp_umem
*umem
)
339 static inline void xsk_umem_fq_reuse(struct xdp_umem
*umem
, u64 addr
)
343 static inline void xsk_set_rx_need_wakeup(struct xdp_umem
*umem
)
347 static inline void xsk_set_tx_need_wakeup(struct xdp_umem
*umem
)
351 static inline void xsk_clear_rx_need_wakeup(struct xdp_umem
*umem
)
355 static inline void xsk_clear_tx_need_wakeup(struct xdp_umem
*umem
)
359 static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem
*umem
)
364 static inline u64
xsk_umem_adjust_offset(struct xdp_umem
*umem
, u64 handle
,
370 static inline int __xsk_map_redirect(struct xdp_sock
*xs
, struct xdp_buff
*xdp
)
375 static inline void __xsk_map_flush(void)
379 static inline struct xdp_sock
*__xsk_map_lookup_elem(struct bpf_map
*map
,
384 #endif /* CONFIG_XDP_SOCKETS */
386 #endif /* _LINUX_XDP_SOCK_H */