1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2016-2017 Broadcom Limited
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf_trace.h>
17 #include <linux/filter.h>
18 #include <net/page_pool.h>
23 struct bnxt_sw_tx_bd
*bnxt_xmit_bd(struct bnxt
*bp
,
24 struct bnxt_tx_ring_info
*txr
,
25 dma_addr_t mapping
, u32 len
)
27 struct bnxt_sw_tx_bd
*tx_buf
;
33 tx_buf
= &txr
->tx_buf_ring
[prod
];
35 txbd
= &txr
->tx_desc_ring
[TX_RING(prod
)][TX_IDX(prod
)];
36 flags
= (len
<< TX_BD_LEN_SHIFT
) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT
) |
37 TX_BD_FLAGS_PACKET_END
| bnxt_lhint_arr
[len
>> 9];
38 txbd
->tx_bd_len_flags_type
= cpu_to_le32(flags
);
39 txbd
->tx_bd_opaque
= prod
;
40 txbd
->tx_bd_haddr
= cpu_to_le64(mapping
);
47 static void __bnxt_xmit_xdp(struct bnxt
*bp
, struct bnxt_tx_ring_info
*txr
,
48 dma_addr_t mapping
, u32 len
, u16 rx_prod
)
50 struct bnxt_sw_tx_bd
*tx_buf
;
52 tx_buf
= bnxt_xmit_bd(bp
, txr
, mapping
, len
);
53 tx_buf
->rx_prod
= rx_prod
;
54 tx_buf
->action
= XDP_TX
;
57 static void __bnxt_xmit_xdp_redirect(struct bnxt
*bp
,
58 struct bnxt_tx_ring_info
*txr
,
59 dma_addr_t mapping
, u32 len
,
60 struct xdp_frame
*xdpf
)
62 struct bnxt_sw_tx_bd
*tx_buf
;
64 tx_buf
= bnxt_xmit_bd(bp
, txr
, mapping
, len
);
65 tx_buf
->action
= XDP_REDIRECT
;
67 dma_unmap_addr_set(tx_buf
, mapping
, mapping
);
68 dma_unmap_len_set(tx_buf
, len
, 0);
71 void bnxt_tx_int_xdp(struct bnxt
*bp
, struct bnxt_napi
*bnapi
, int nr_pkts
)
73 struct bnxt_tx_ring_info
*txr
= bnapi
->tx_ring
;
74 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
75 bool rx_doorbell_needed
= false;
76 struct bnxt_sw_tx_bd
*tx_buf
;
77 u16 tx_cons
= txr
->tx_cons
;
78 u16 last_tx_cons
= tx_cons
;
81 for (i
= 0; i
< nr_pkts
; i
++) {
82 tx_buf
= &txr
->tx_buf_ring
[tx_cons
];
84 if (tx_buf
->action
== XDP_REDIRECT
) {
85 struct pci_dev
*pdev
= bp
->pdev
;
87 dma_unmap_single(&pdev
->dev
,
88 dma_unmap_addr(tx_buf
, mapping
),
89 dma_unmap_len(tx_buf
, len
),
91 xdp_return_frame(tx_buf
->xdpf
);
94 } else if (tx_buf
->action
== XDP_TX
) {
95 rx_doorbell_needed
= true;
96 last_tx_cons
= tx_cons
;
98 tx_cons
= NEXT_TX(tx_cons
);
100 txr
->tx_cons
= tx_cons
;
101 if (rx_doorbell_needed
) {
102 tx_buf
= &txr
->tx_buf_ring
[last_tx_cons
];
103 bnxt_db_write(bp
, &rxr
->rx_db
, tx_buf
->rx_prod
);
107 /* returns the following:
108 * true - packet consumed by XDP and new buffer is allocated.
109 * false - packet should be passed to the stack.
111 bool bnxt_rx_xdp(struct bnxt
*bp
, struct bnxt_rx_ring_info
*rxr
, u16 cons
,
112 struct page
*page
, u8
**data_ptr
, unsigned int *len
, u8
*event
)
114 struct bpf_prog
*xdp_prog
= READ_ONCE(rxr
->xdp_prog
);
115 struct bnxt_tx_ring_info
*txr
;
116 struct bnxt_sw_rx_bd
*rx_buf
;
117 struct pci_dev
*pdev
;
129 rx_buf
= &rxr
->rx_buf_ring
[cons
];
130 offset
= bp
->rx_offset
;
132 mapping
= rx_buf
->mapping
- bp
->rx_dma_offset
;
133 dma_sync_single_for_cpu(&pdev
->dev
, mapping
+ offset
, *len
, bp
->rx_dir
);
135 txr
= rxr
->bnapi
->tx_ring
;
136 xdp
.data_hard_start
= *data_ptr
- offset
;
137 xdp
.data
= *data_ptr
;
138 xdp_set_data_meta_invalid(&xdp
);
139 xdp
.data_end
= *data_ptr
+ *len
;
140 xdp
.rxq
= &rxr
->xdp_rxq
;
141 xdp
.frame_sz
= PAGE_SIZE
; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
142 orig_data
= xdp
.data
;
145 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
148 tx_avail
= bnxt_tx_avail(bp
, txr
);
149 /* If the tx ring is not full, we must not update the rx producer yet
150 * because we may still be transmitting on some BDs.
152 if (tx_avail
!= bp
->tx_ring_size
)
153 *event
&= ~BNXT_RX_EVENT
;
155 *len
= xdp
.data_end
- xdp
.data
;
156 if (orig_data
!= xdp
.data
) {
157 offset
= xdp
.data
- xdp
.data_hard_start
;
158 *data_ptr
= xdp
.data_hard_start
+ offset
;
166 trace_xdp_exception(bp
->dev
, xdp_prog
, act
);
167 bnxt_reuse_rx_data(rxr
, cons
, page
);
171 *event
= BNXT_TX_EVENT
;
172 dma_sync_single_for_device(&pdev
->dev
, mapping
+ offset
, *len
,
174 __bnxt_xmit_xdp(bp
, txr
, mapping
+ offset
, *len
,
175 NEXT_RX(rxr
->rx_prod
));
176 bnxt_reuse_rx_data(rxr
, cons
, page
);
179 /* if we are calling this here then we know that the
180 * redirect is coming from a frame received by the
183 dma_unmap_page_attrs(&pdev
->dev
, mapping
,
184 PAGE_SIZE
, bp
->rx_dir
,
185 DMA_ATTR_WEAK_ORDERING
);
187 /* if we are unable to allocate a new buffer, abort and reuse */
188 if (bnxt_alloc_rx_data(bp
, rxr
, rxr
->rx_prod
, GFP_ATOMIC
)) {
189 trace_xdp_exception(bp
->dev
, xdp_prog
, act
);
190 bnxt_reuse_rx_data(rxr
, cons
, page
);
194 if (xdp_do_redirect(bp
->dev
, &xdp
, xdp_prog
)) {
195 trace_xdp_exception(bp
->dev
, xdp_prog
, act
);
196 page_pool_recycle_direct(rxr
->page_pool
, page
);
200 *event
|= BNXT_REDIRECT_EVENT
;
203 bpf_warn_invalid_xdp_action(act
);
206 trace_xdp_exception(bp
->dev
, xdp_prog
, act
);
209 bnxt_reuse_rx_data(rxr
, cons
, page
);
215 int bnxt_xdp_xmit(struct net_device
*dev
, int num_frames
,
216 struct xdp_frame
**frames
, u32 flags
)
218 struct bnxt
*bp
= netdev_priv(dev
);
219 struct bpf_prog
*xdp_prog
= READ_ONCE(bp
->xdp_prog
);
220 struct pci_dev
*pdev
= bp
->pdev
;
221 struct bnxt_tx_ring_info
*txr
;
227 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
) ||
228 !bp
->tx_nr_rings_xdp
||
232 ring
= smp_processor_id() % bp
->tx_nr_rings_xdp
;
233 txr
= &bp
->tx_ring
[ring
];
235 for (i
= 0; i
< num_frames
; i
++) {
236 struct xdp_frame
*xdp
= frames
[i
];
238 if (!txr
|| !bnxt_tx_avail(bp
, txr
) ||
239 !(bp
->bnapi
[ring
]->flags
& BNXT_NAPI_FLAG_XDP
)) {
240 xdp_return_frame_rx_napi(xdp
);
245 mapping
= dma_map_single(&pdev
->dev
, xdp
->data
, xdp
->len
,
248 if (dma_mapping_error(&pdev
->dev
, mapping
)) {
249 xdp_return_frame_rx_napi(xdp
);
253 __bnxt_xmit_xdp_redirect(bp
, txr
, mapping
, xdp
->len
, xdp
);
256 if (flags
& XDP_XMIT_FLUSH
) {
257 /* Sync BD data before updating doorbell */
259 bnxt_db_write(bp
, &txr
->tx_db
, txr
->tx_prod
);
262 return num_frames
- drops
;
265 /* Under rtnl_lock */
266 static int bnxt_xdp_set(struct bnxt
*bp
, struct bpf_prog
*prog
)
268 struct net_device
*dev
= bp
->dev
;
269 int tx_xdp
= 0, rc
, tc
;
270 struct bpf_prog
*old
;
272 if (prog
&& bp
->dev
->mtu
> BNXT_MAX_PAGE_MODE_MTU
) {
273 netdev_warn(dev
, "MTU %d larger than largest XDP supported MTU %d.\n",
274 bp
->dev
->mtu
, BNXT_MAX_PAGE_MODE_MTU
);
277 if (!(bp
->flags
& BNXT_FLAG_SHARED_RINGS
)) {
278 netdev_warn(dev
, "ethtool rx/tx channels must be combined to support XDP.\n");
282 tx_xdp
= bp
->rx_nr_rings
;
284 tc
= netdev_get_num_tc(dev
);
287 rc
= bnxt_check_rings(bp
, bp
->tx_nr_rings_per_tc
, bp
->rx_nr_rings
,
290 netdev_warn(dev
, "Unable to reserve enough TX rings to support XDP.\n");
293 if (netif_running(dev
))
294 bnxt_close_nic(bp
, true, false);
296 old
= xchg(&bp
->xdp_prog
, prog
);
301 bnxt_set_rx_skb_mode(bp
, true);
305 bnxt_set_rx_skb_mode(bp
, false);
306 bnxt_get_max_rings(bp
, &rx
, &tx
, true);
308 bp
->flags
&= ~BNXT_FLAG_NO_AGG_RINGS
;
309 bp
->dev
->hw_features
|= NETIF_F_LRO
;
312 bp
->tx_nr_rings_xdp
= tx_xdp
;
313 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
* tc
+ tx_xdp
;
314 bp
->cp_nr_rings
= max_t(int, bp
->tx_nr_rings
, bp
->rx_nr_rings
);
315 bnxt_set_tpa_flags(bp
);
316 bnxt_set_ring_params(bp
);
318 if (netif_running(dev
))
319 return bnxt_open_nic(bp
, true, false);
324 int bnxt_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
326 struct bnxt
*bp
= netdev_priv(dev
);
329 switch (xdp
->command
) {
331 rc
= bnxt_xdp_set(bp
, xdp
->prog
);