1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
6 #include <linux/netdevice.h>
7 #include <linux/u64_stats_sync.h>
10 /* Tx descriptor size */
11 #define FUNETH_SQE_SIZE 64U
13 /* Size of device headers per Tx packet */
14 #define FUNETH_FUNOS_HDR_SZ (sizeof(struct fun_eth_tx_req))
16 /* Number of gather list entries per Tx descriptor */
17 #define FUNETH_GLE_PER_DESC (FUNETH_SQE_SIZE / sizeof(struct fun_dataop_gl))
19 /* Max gather list size in bytes for an sk_buff. */
20 #define FUNETH_MAX_GL_SZ ((MAX_SKB_FRAGS + 1) * sizeof(struct fun_dataop_gl))
22 #if IS_ENABLED(CONFIG_TLS_DEVICE)
23 # define FUNETH_TLS_SZ sizeof(struct fun_eth_tls)
25 # define FUNETH_TLS_SZ 0
28 /* Max number of Tx descriptors for an sk_buff using a gather list. */
29 #define FUNETH_MAX_GL_DESC \
30 DIV_ROUND_UP((FUNETH_FUNOS_HDR_SZ + FUNETH_MAX_GL_SZ + FUNETH_TLS_SZ), \
33 /* Max number of Tx descriptors for any packet. */
34 #define FUNETH_MAX_PKT_DESC FUNETH_MAX_GL_DESC
36 /* Rx CQ descriptor size. */
37 #define FUNETH_CQE_SIZE 64U
39 /* Offset of cqe_info within a CQE. */
40 #define FUNETH_CQE_INFO_OFFSET (FUNETH_CQE_SIZE - sizeof(struct fun_cqe_info))
42 /* Construct the IRQ portion of a CQ doorbell. The resulting value arms the
43 * interrupt with the supplied time delay and packet count moderation settings.
45 #define FUN_IRQ_CQ_DB(usec, pkts) \
46 (FUN_DB_IRQ_ARM_F | ((usec) << FUN_DB_INTCOAL_USEC_S) | \
47 ((pkts) << FUN_DB_INTCOAL_ENTRIES_S))
49 /* As above for SQ doorbells. */
50 #define FUN_IRQ_SQ_DB(usec, pkts) \
52 ((usec) << FUN_DB_INTCOAL_USEC_S) | \
53 ((pkts) << FUN_DB_INTCOAL_ENTRIES_S))
55 /* Per packet tailroom. Present only for 1-frag packets. */
56 #define FUN_RX_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
58 /* Per packet headroom for XDP. Preferred over XDP_PACKET_HEADROOM to
59 * accommodate two packets per buffer for 4K pages and 1500B MTUs.
61 #define FUN_XDP_HEADROOM 192
63 /* Initialization state of a queue. */
65 FUN_QSTATE_DESTROYED
, /* what queue? */
66 FUN_QSTATE_INIT_SW
, /* exists in SW, not on the device */
67 FUN_QSTATE_INIT_FULL
, /* exists both in SW and on device */
70 /* Initialization state of an interrupt. */
72 FUN_IRQ_INIT
, /* initialized and in the XArray but inactive */
73 FUN_IRQ_REQUESTED
, /* request_irq() done */
74 FUN_IRQ_ENABLED
, /* processing enabled */
75 FUN_IRQ_DISABLED
, /* processing disabled */
80 struct funeth_txq_stats
{ /* per Tx queue SW counters */
81 u64 tx_pkts
; /* # of Tx packets */
82 u64 tx_bytes
; /* total bytes of Tx packets */
83 u64 tx_cso
; /* # of packets with checksum offload */
84 u64 tx_tso
; /* # of non-encapsulated TSO super-packets */
85 u64 tx_encap_tso
; /* # of encapsulated TSO super-packets */
86 u64 tx_uso
; /* # of non-encapsulated UDP LSO super-packets */
87 u64 tx_more
; /* # of DBs elided due to xmit_more */
88 u64 tx_nstops
; /* # of times the queue has stopped */
89 u64 tx_nrestarts
; /* # of times the queue has restarted */
90 u64 tx_map_err
; /* # of packets dropped due to DMA mapping errors */
91 u64 tx_xdp_full
; /* # of XDP packets that could not be enqueued */
92 u64 tx_tls_pkts
; /* # of Tx TLS packets offloaded to HW */
93 u64 tx_tls_bytes
; /* Tx bytes of HW-handled TLS payload */
94 u64 tx_tls_fallback
; /* attempted Tx TLS offloads punted to SW */
95 u64 tx_tls_drops
; /* attempted Tx TLS offloads dropped */
98 struct funeth_tx_info
{ /* per Tx descriptor state */
100 struct sk_buff
*skb
; /* associated packet (sk_buff path) */
101 struct xdp_frame
*xdpf
; /* associated XDP frame (XDP path) */
106 /* RO cacheline of frequently accessed data */
107 u32 mask
; /* queue depth - 1 */
108 u32 hw_qid
; /* device ID of the queue */
109 void *desc
; /* base address of descriptor ring */
110 struct funeth_tx_info
*info
;
111 struct device
*dma_dev
; /* device for DMA mappings */
112 volatile __be64
*hw_wb
; /* HW write-back location */
113 u32 __iomem
*db
; /* SQ doorbell register address */
114 struct netdev_queue
*ndq
;
115 dma_addr_t dma_addr
; /* DMA address of descriptor ring */
116 /* producer R/W cacheline */
117 u16 qidx
; /* queue index within net_device */
119 u32 prod_cnt
; /* producer counter */
120 struct funeth_txq_stats stats
;
121 /* shared R/W cacheline, primarily accessed by consumer */
122 u32 irq_db_val
; /* value written to IRQ doorbell */
123 u32 cons_cnt
; /* consumer (cleanup) counter */
124 struct net_device
*netdev
;
127 u8 init_state
; /* queue initialization state */
128 struct u64_stats_sync syncp
;
131 struct funeth_rxq_stats
{ /* per Rx queue SW counters */
132 u64 rx_pkts
; /* # of received packets, including SW drops */
133 u64 rx_bytes
; /* total size of received packets */
134 u64 rx_cso
; /* # of packets with checksum offload */
135 u64 rx_bufs
; /* total # of Rx buffers provided to device */
136 u64 gro_pkts
; /* # of GRO superpackets */
137 u64 gro_merged
; /* # of pkts merged into existing GRO superpackets */
138 u64 rx_page_alloc
; /* # of page allocations for Rx buffers */
139 u64 rx_budget
; /* NAPI iterations that exhausted their budget */
140 u64 rx_mem_drops
; /* # of packets dropped due to memory shortage */
141 u64 rx_map_err
; /* # of page DMA mapping errors */
142 u64 xdp_drops
; /* XDP_DROPped packets */
143 u64 xdp_tx
; /* successful XDP transmits */
144 u64 xdp_redir
; /* successful XDP redirects */
145 u64 xdp_err
; /* packets dropped due to XDP errors */
148 struct funeth_rxbuf
{ /* per Rx buffer state */
149 struct page
*page
; /* associated page */
150 dma_addr_t dma_addr
; /* DMA address of page start */
151 int pg_refs
; /* page refs held by driver */
152 int node
; /* page node, or -1 if it is PF_MEMALLOC */
155 struct funeth_rx_cache
{ /* cache of DMA-mapped previously used buffers */
156 struct funeth_rxbuf
*bufs
; /* base of Rx buffer state ring */
157 unsigned int prod_cnt
; /* producer counter */
158 unsigned int cons_cnt
; /* consumer counter */
159 unsigned int mask
; /* depth - 1 */
162 /* An Rx queue consists of a CQ and an SQ used to provide Rx buffers. */
164 struct net_device
*netdev
;
165 struct napi_struct
*napi
;
166 struct device
*dma_dev
; /* device for DMA mappings */
167 void *cqes
; /* base of CQ descriptor ring */
168 const void *next_cqe_info
; /* fun_cqe_info of next CQE */
169 u32 __iomem
*cq_db
; /* CQ doorbell register address */
170 unsigned int cq_head
; /* CQ head index */
171 unsigned int cq_mask
; /* CQ depth - 1 */
172 u16 phase
; /* CQ phase tag */
173 u16 qidx
; /* queue index within net_device */
174 unsigned int irq_db_val
; /* IRQ info for CQ doorbell */
175 struct fun_eprq_rqbuf
*rqes
; /* base of RQ descriptor ring */
176 struct funeth_rxbuf
*bufs
; /* base of Rx buffer state ring */
177 struct funeth_rxbuf
*cur_buf
; /* currently active buffer */
178 u32 __iomem
*rq_db
; /* RQ doorbell register address */
179 unsigned int rq_cons
; /* RQ consumer counter */
180 unsigned int rq_mask
; /* RQ depth - 1 */
181 unsigned int buf_offset
; /* offset of next pkt in head buffer */
182 u8 xdp_flush
; /* XDP flush types needed at NAPI end */
183 u8 init_state
; /* queue initialization state */
184 u16 headroom
; /* per packet headroom */
185 unsigned int rq_cons_db
; /* value of rq_cons at last RQ db */
186 unsigned int rq_db_thres
; /* # of new buffers needed to write RQ db */
187 struct funeth_rxbuf spare_buf
; /* spare for next buffer replacement */
188 struct funeth_rx_cache cache
; /* used buffer cache */
189 struct bpf_prog
*xdp_prog
; /* optional XDP BPF program */
190 struct funeth_rxq_stats stats
;
191 dma_addr_t cq_dma_addr
; /* DMA address of CQE ring */
192 dma_addr_t rq_dma_addr
; /* DMA address of RQE ring */
194 u32 hw_cqid
; /* device ID of the queue's CQ */
195 u32 hw_sqid
; /* device ID of the queue's SQ */
197 struct u64_stats_sync syncp
;
198 struct xdp_rxq_info xdp_rxq
;
201 #define FUN_QSTAT_INC(q, counter) \
203 u64_stats_update_begin(&(q)->syncp); \
204 (q)->stats.counter++; \
205 u64_stats_update_end(&(q)->syncp); \
208 #define FUN_QSTAT_READ(q, seq, stats_copy) \
210 seq = u64_stats_fetch_begin(&(q)->syncp); \
211 stats_copy = (q)->stats; \
212 } while (u64_stats_fetch_retry(&(q)->syncp, (seq)))
214 #define FUN_INT_NAME_LEN (IFNAMSIZ + 16)
217 struct napi_struct napi
;
218 struct funeth_txq
*txq
;
219 struct funeth_rxq
*rxq
;
221 u16 irq_idx
; /* index of MSI-X interrupt */
222 int irq
; /* Linux IRQ vector */
223 cpumask_t affinity_mask
; /* IRQ affinity */
224 struct irq_affinity_notify aff_notify
;
225 char name
[FUN_INT_NAME_LEN
];
226 } ____cacheline_internodealigned_in_smp
;
228 /* Return the start address of the idx-th Tx descriptor. */
229 static inline void *fun_tx_desc_addr(const struct funeth_txq
*q
,
232 return q
->desc
+ idx
* FUNETH_SQE_SIZE
;
235 static inline void fun_txq_wr_db(const struct funeth_txq
*q
)
237 unsigned int tail
= q
->prod_cnt
& q
->mask
;
242 static inline int fun_irq_node(const struct fun_irq
*p
)
244 return cpu_to_mem(cpumask_first(&p
->affinity_mask
));
247 int fun_rxq_napi_poll(struct napi_struct
*napi
, int budget
);
248 int fun_txq_napi_poll(struct napi_struct
*napi
, int budget
);
249 netdev_tx_t
fun_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
);
250 bool fun_xdp_tx(struct funeth_txq
*q
, struct xdp_frame
*xdpf
);
251 int fun_xdp_xmit_frames(struct net_device
*dev
, int n
,
252 struct xdp_frame
**frames
, u32 flags
);
254 int funeth_txq_create(struct net_device
*dev
, unsigned int qidx
,
255 unsigned int ndesc
, struct fun_irq
*irq
, int state
,
256 struct funeth_txq
**qp
);
257 int fun_txq_create_dev(struct funeth_txq
*q
, struct fun_irq
*irq
);
258 struct funeth_txq
*funeth_txq_free(struct funeth_txq
*q
, int state
);
259 int funeth_rxq_create(struct net_device
*dev
, unsigned int qidx
,
260 unsigned int ncqe
, unsigned int nrqe
, struct fun_irq
*irq
,
261 int state
, struct funeth_rxq
**qp
);
262 int fun_rxq_create_dev(struct funeth_rxq
*q
, struct fun_irq
*irq
);
263 struct funeth_rxq
*funeth_rxq_free(struct funeth_rxq
*q
, int state
);
264 int fun_rxq_set_bpf(struct funeth_rxq
*q
, struct bpf_prog
*prog
);
266 #endif /* _FUNETH_TXRX_H */