1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2016-2017 Broadcom Limited
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf_trace.h>
17 #include <linux/filter.h>
18 #include <net/page_pool.h>
23 struct bnxt_sw_tx_bd
*bnxt_xmit_bd(struct bnxt
*bp
,
24 struct bnxt_tx_ring_info
*txr
,
25 dma_addr_t mapping
, u32 len
)
27 struct bnxt_sw_tx_bd
*tx_buf
;
33 tx_buf
= &txr
->tx_buf_ring
[prod
];
35 txbd
= &txr
->tx_desc_ring
[TX_RING(prod
)][TX_IDX(prod
)];
36 flags
= (len
<< TX_BD_LEN_SHIFT
) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT
) |
37 TX_BD_FLAGS_PACKET_END
| bnxt_lhint_arr
[len
>> 9];
38 txbd
->tx_bd_len_flags_type
= cpu_to_le32(flags
);
39 txbd
->tx_bd_opaque
= prod
;
40 txbd
->tx_bd_haddr
= cpu_to_le64(mapping
);
47 static void __bnxt_xmit_xdp(struct bnxt
*bp
, struct bnxt_tx_ring_info
*txr
,
48 dma_addr_t mapping
, u32 len
, u16 rx_prod
)
50 struct bnxt_sw_tx_bd
*tx_buf
;
52 tx_buf
= bnxt_xmit_bd(bp
, txr
, mapping
, len
);
53 tx_buf
->rx_prod
= rx_prod
;
54 tx_buf
->action
= XDP_TX
;
57 static void __bnxt_xmit_xdp_redirect(struct bnxt
*bp
,
58 struct bnxt_tx_ring_info
*txr
,
59 dma_addr_t mapping
, u32 len
,
60 struct xdp_frame
*xdpf
)
62 struct bnxt_sw_tx_bd
*tx_buf
;
64 tx_buf
= bnxt_xmit_bd(bp
, txr
, mapping
, len
);
65 tx_buf
->action
= XDP_REDIRECT
;
67 dma_unmap_addr_set(tx_buf
, mapping
, mapping
);
68 dma_unmap_len_set(tx_buf
, len
, 0);
71 void bnxt_tx_int_xdp(struct bnxt
*bp
, struct bnxt_napi
*bnapi
, int nr_pkts
)
73 struct bnxt_tx_ring_info
*txr
= bnapi
->tx_ring
;
74 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
75 bool rx_doorbell_needed
= false;
76 struct bnxt_sw_tx_bd
*tx_buf
;
77 u16 tx_cons
= txr
->tx_cons
;
78 u16 last_tx_cons
= tx_cons
;
81 for (i
= 0; i
< nr_pkts
; i
++) {
82 tx_buf
= &txr
->tx_buf_ring
[tx_cons
];
84 if (tx_buf
->action
== XDP_REDIRECT
) {
85 struct pci_dev
*pdev
= bp
->pdev
;
87 dma_unmap_single(&pdev
->dev
,
88 dma_unmap_addr(tx_buf
, mapping
),
89 dma_unmap_len(tx_buf
, len
),
91 xdp_return_frame(tx_buf
->xdpf
);
94 } else if (tx_buf
->action
== XDP_TX
) {
95 rx_doorbell_needed
= true;
96 last_tx_cons
= tx_cons
;
98 tx_cons
= NEXT_TX(tx_cons
);
100 txr
->tx_cons
= tx_cons
;
101 if (rx_doorbell_needed
) {
102 tx_buf
= &txr
->tx_buf_ring
[last_tx_cons
];
103 bnxt_db_write(bp
, &rxr
->rx_db
, tx_buf
->rx_prod
);
107 /* returns the following:
108 * true - packet consumed by XDP and new buffer is allocated.
109 * false - packet should be passed to the stack.
111 bool bnxt_rx_xdp(struct bnxt
*bp
, struct bnxt_rx_ring_info
*rxr
, u16 cons
,
112 struct page
*page
, u8
**data_ptr
, unsigned int *len
, u8
*event
)
114 struct bpf_prog
*xdp_prog
= READ_ONCE(rxr
->xdp_prog
);
115 struct bnxt_tx_ring_info
*txr
;
116 struct bnxt_sw_rx_bd
*rx_buf
;
117 struct pci_dev
*pdev
;
129 rx_buf
= &rxr
->rx_buf_ring
[cons
];
130 offset
= bp
->rx_offset
;
132 mapping
= rx_buf
->mapping
- bp
->rx_dma_offset
;
133 dma_sync_single_for_cpu(&pdev
->dev
, mapping
+ offset
, *len
, bp
->rx_dir
);
135 txr
= rxr
->bnapi
->tx_ring
;
136 xdp
.data_hard_start
= *data_ptr
- offset
;
137 xdp
.data
= *data_ptr
;
138 xdp_set_data_meta_invalid(&xdp
);
139 xdp
.data_end
= *data_ptr
+ *len
;
140 xdp
.rxq
= &rxr
->xdp_rxq
;
141 orig_data
= xdp
.data
;
144 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
147 tx_avail
= bnxt_tx_avail(bp
, txr
);
148 /* If the tx ring is not full, we must not update the rx producer yet
149 * because we may still be transmitting on some BDs.
151 if (tx_avail
!= bp
->tx_ring_size
)
152 *event
&= ~BNXT_RX_EVENT
;
154 *len
= xdp
.data_end
- xdp
.data
;
155 if (orig_data
!= xdp
.data
) {
156 offset
= xdp
.data
- xdp
.data_hard_start
;
157 *data_ptr
= xdp
.data_hard_start
+ offset
;
165 trace_xdp_exception(bp
->dev
, xdp_prog
, act
);
166 bnxt_reuse_rx_data(rxr
, cons
, page
);
170 *event
= BNXT_TX_EVENT
;
171 dma_sync_single_for_device(&pdev
->dev
, mapping
+ offset
, *len
,
173 __bnxt_xmit_xdp(bp
, txr
, mapping
+ offset
, *len
,
174 NEXT_RX(rxr
->rx_prod
));
175 bnxt_reuse_rx_data(rxr
, cons
, page
);
178 /* if we are calling this here then we know that the
179 * redirect is coming from a frame received by the
182 dma_unmap_page_attrs(&pdev
->dev
, mapping
,
183 PAGE_SIZE
, bp
->rx_dir
,
184 DMA_ATTR_WEAK_ORDERING
);
186 /* if we are unable to allocate a new buffer, abort and reuse */
187 if (bnxt_alloc_rx_data(bp
, rxr
, rxr
->rx_prod
, GFP_ATOMIC
)) {
188 trace_xdp_exception(bp
->dev
, xdp_prog
, act
);
189 bnxt_reuse_rx_data(rxr
, cons
, page
);
193 if (xdp_do_redirect(bp
->dev
, &xdp
, xdp_prog
)) {
194 trace_xdp_exception(bp
->dev
, xdp_prog
, act
);
195 page_pool_recycle_direct(rxr
->page_pool
, page
);
199 *event
|= BNXT_REDIRECT_EVENT
;
202 bpf_warn_invalid_xdp_action(act
);
205 trace_xdp_exception(bp
->dev
, xdp_prog
, act
);
208 bnxt_reuse_rx_data(rxr
, cons
, page
);
214 int bnxt_xdp_xmit(struct net_device
*dev
, int num_frames
,
215 struct xdp_frame
**frames
, u32 flags
)
217 struct bnxt
*bp
= netdev_priv(dev
);
218 struct bpf_prog
*xdp_prog
= READ_ONCE(bp
->xdp_prog
);
219 struct pci_dev
*pdev
= bp
->pdev
;
220 struct bnxt_tx_ring_info
*txr
;
226 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
) ||
227 !bp
->tx_nr_rings_xdp
||
231 ring
= smp_processor_id() % bp
->tx_nr_rings_xdp
;
232 txr
= &bp
->tx_ring
[ring
];
234 for (i
= 0; i
< num_frames
; i
++) {
235 struct xdp_frame
*xdp
= frames
[i
];
237 if (!txr
|| !bnxt_tx_avail(bp
, txr
) ||
238 !(bp
->bnapi
[ring
]->flags
& BNXT_NAPI_FLAG_XDP
)) {
239 xdp_return_frame_rx_napi(xdp
);
244 mapping
= dma_map_single(&pdev
->dev
, xdp
->data
, xdp
->len
,
247 if (dma_mapping_error(&pdev
->dev
, mapping
)) {
248 xdp_return_frame_rx_napi(xdp
);
252 __bnxt_xmit_xdp_redirect(bp
, txr
, mapping
, xdp
->len
, xdp
);
255 if (flags
& XDP_XMIT_FLUSH
) {
256 /* Sync BD data before updating doorbell */
258 bnxt_db_write(bp
, &txr
->tx_db
, txr
->tx_prod
);
261 return num_frames
- drops
;
264 /* Under rtnl_lock */
265 static int bnxt_xdp_set(struct bnxt
*bp
, struct bpf_prog
*prog
)
267 struct net_device
*dev
= bp
->dev
;
268 int tx_xdp
= 0, rc
, tc
;
269 struct bpf_prog
*old
;
271 if (prog
&& bp
->dev
->mtu
> BNXT_MAX_PAGE_MODE_MTU
) {
272 netdev_warn(dev
, "MTU %d larger than largest XDP supported MTU %d.\n",
273 bp
->dev
->mtu
, BNXT_MAX_PAGE_MODE_MTU
);
276 if (!(bp
->flags
& BNXT_FLAG_SHARED_RINGS
)) {
277 netdev_warn(dev
, "ethtool rx/tx channels must be combined to support XDP.\n");
281 tx_xdp
= bp
->rx_nr_rings
;
283 tc
= netdev_get_num_tc(dev
);
286 rc
= bnxt_check_rings(bp
, bp
->tx_nr_rings_per_tc
, bp
->rx_nr_rings
,
289 netdev_warn(dev
, "Unable to reserve enough TX rings to support XDP.\n");
292 if (netif_running(dev
))
293 bnxt_close_nic(bp
, true, false);
295 old
= xchg(&bp
->xdp_prog
, prog
);
300 bnxt_set_rx_skb_mode(bp
, true);
304 bnxt_set_rx_skb_mode(bp
, false);
305 bnxt_get_max_rings(bp
, &rx
, &tx
, true);
307 bp
->flags
&= ~BNXT_FLAG_NO_AGG_RINGS
;
308 bp
->dev
->hw_features
|= NETIF_F_LRO
;
311 bp
->tx_nr_rings_xdp
= tx_xdp
;
312 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
* tc
+ tx_xdp
;
313 bp
->cp_nr_rings
= max_t(int, bp
->tx_nr_rings
, bp
->rx_nr_rings
);
314 bnxt_set_tpa_flags(bp
);
315 bnxt_set_ring_params(bp
);
317 if (netif_running(dev
))
318 return bnxt_open_nic(bp
, true, false);
323 int bnxt_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
325 struct bnxt
*bp
= netdev_priv(dev
);
328 switch (xdp
->command
) {
330 rc
= bnxt_xdp_set(bp
, xdp
->prog
);
333 xdp
->prog_id
= bp
->xdp_prog
? bp
->xdp_prog
->aux
->id
: 0;