btrfs: migrate the block group ref counting stuff
[linux/fpc-iii.git] / include / net / xdp_sock.h
blob69796d264f0638457526345f5cc423bf2c27bf17
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* AF_XDP internal functions
3 * Copyright(c) 2018 Intel Corporation.
4 */
6 #ifndef _LINUX_XDP_SOCK_H
7 #define _LINUX_XDP_SOCK_H
9 #include <linux/workqueue.h>
10 #include <linux/if_xdp.h>
11 #include <linux/mutex.h>
12 #include <linux/spinlock.h>
13 #include <linux/mm.h>
14 #include <net/sock.h>
16 struct net_device;
17 struct xsk_queue;
19 struct xdp_umem_page {
20 void *addr;
21 dma_addr_t dma;
24 struct xdp_umem_fq_reuse {
25 u32 nentries;
26 u32 length;
27 u64 handles[];
30 struct xdp_umem {
31 struct xsk_queue *fq;
32 struct xsk_queue *cq;
33 struct xdp_umem_page *pages;
34 u64 chunk_mask;
35 u64 size;
36 u32 headroom;
37 u32 chunk_size_nohr;
38 struct user_struct *user;
39 unsigned long address;
40 refcount_t users;
41 struct work_struct work;
42 struct page **pgs;
43 u32 npgs;
44 int id;
45 struct net_device *dev;
46 struct xdp_umem_fq_reuse *fq_reuse;
47 u16 queue_id;
48 bool zc;
49 spinlock_t xsk_list_lock;
50 struct list_head xsk_list;
53 struct xdp_sock {
54 /* struct sock must be the first member of struct xdp_sock */
55 struct sock sk;
56 struct xsk_queue *rx;
57 struct net_device *dev;
58 struct xdp_umem *umem;
59 struct list_head flush_node;
60 u16 queue_id;
61 bool zc;
62 enum {
63 XSK_READY = 0,
64 XSK_BOUND,
65 XSK_UNBOUND,
66 } state;
67 /* Protects multiple processes in the control path */
68 struct mutex mutex;
69 struct xsk_queue *tx ____cacheline_aligned_in_smp;
70 struct list_head list;
71 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
72 * in the SKB destructor callback.
74 spinlock_t tx_completion_lock;
75 /* Protects generic receive. */
76 spinlock_t rx_lock;
77 u64 rx_dropped;
80 struct xdp_buff;
81 #ifdef CONFIG_XDP_SOCKETS
82 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
83 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
84 void xsk_flush(struct xdp_sock *xs);
85 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
86 /* Used from netdev driver */
87 bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
88 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
89 void xsk_umem_discard_addr(struct xdp_umem *umem);
90 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
91 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
92 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
93 struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
94 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
95 struct xdp_umem_fq_reuse *newq);
96 void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
97 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
99 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
101 return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
104 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
106 return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
109 /* Reuse-queue aware version of FILL queue helpers */
110 static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
112 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
114 if (rq->length >= cnt)
115 return true;
117 return xsk_umem_has_addrs(umem, cnt - rq->length);
120 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
122 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
124 if (!rq->length)
125 return xsk_umem_peek_addr(umem, addr);
127 *addr = rq->handles[rq->length - 1];
128 return addr;
131 static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
133 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
135 if (!rq->length)
136 xsk_umem_discard_addr(umem);
137 else
138 rq->length--;
141 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
143 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
145 rq->handles[rq->length++] = addr;
147 #else
148 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
150 return -ENOTSUPP;
153 static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
155 return -ENOTSUPP;
158 static inline void xsk_flush(struct xdp_sock *xs)
162 static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
164 return false;
167 static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
169 return false;
172 static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
174 return NULL;
177 static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
181 static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
185 static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
186 struct xdp_desc *desc)
188 return false;
191 static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
195 static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
197 return NULL;
200 static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
201 struct xdp_umem *umem,
202 struct xdp_umem_fq_reuse *newq)
204 return NULL;
206 static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
210 static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
211 u16 queue_id)
213 return NULL;
216 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
218 return NULL;
221 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
223 return 0;
226 static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
228 return false;
231 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
233 return NULL;
236 static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
240 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
244 #endif /* CONFIG_XDP_SOCKETS */
246 #endif /* _LINUX_XDP_SOCK_H */