1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2016-2017 Broadcom Limited
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf_trace.h>
17 #include <linux/filter.h>
22 void bnxt_xmit_xdp(struct bnxt
*bp
, struct bnxt_tx_ring_info
*txr
,
23 dma_addr_t mapping
, u32 len
, u16 rx_prod
)
25 struct bnxt_sw_tx_bd
*tx_buf
;
31 tx_buf
= &txr
->tx_buf_ring
[prod
];
32 tx_buf
->rx_prod
= rx_prod
;
34 txbd
= &txr
->tx_desc_ring
[TX_RING(prod
)][TX_IDX(prod
)];
35 flags
= (len
<< TX_BD_LEN_SHIFT
) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT
) |
36 TX_BD_FLAGS_PACKET_END
| bnxt_lhint_arr
[len
>> 9];
37 txbd
->tx_bd_len_flags_type
= cpu_to_le32(flags
);
38 txbd
->tx_bd_opaque
= prod
;
39 txbd
->tx_bd_haddr
= cpu_to_le64(mapping
);
45 void bnxt_tx_int_xdp(struct bnxt
*bp
, struct bnxt_napi
*bnapi
, int nr_pkts
)
47 struct bnxt_tx_ring_info
*txr
= bnapi
->tx_ring
;
48 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
49 struct bnxt_sw_tx_bd
*tx_buf
;
50 u16 tx_cons
= txr
->tx_cons
;
51 u16 last_tx_cons
= tx_cons
;
55 for (i
= 0; i
< nr_pkts
; i
++) {
56 last_tx_cons
= tx_cons
;
57 tx_cons
= NEXT_TX(tx_cons
);
59 txr
->tx_cons
= tx_cons
;
60 if (bnxt_tx_avail(bp
, txr
) == bp
->tx_ring_size
) {
61 rx_prod
= rxr
->rx_prod
;
63 tx_buf
= &txr
->tx_buf_ring
[last_tx_cons
];
64 rx_prod
= tx_buf
->rx_prod
;
66 bnxt_db_write(bp
, rxr
->rx_doorbell
, DB_KEY_RX
| rx_prod
);
69 /* returns the following:
70 * true - packet consumed by XDP and new buffer is allocated.
71 * false - packet should be passed to the stack.
73 bool bnxt_rx_xdp(struct bnxt
*bp
, struct bnxt_rx_ring_info
*rxr
, u16 cons
,
74 struct page
*page
, u8
**data_ptr
, unsigned int *len
, u8
*event
)
76 struct bpf_prog
*xdp_prog
= READ_ONCE(rxr
->xdp_prog
);
77 struct bnxt_tx_ring_info
*txr
;
78 struct bnxt_sw_rx_bd
*rx_buf
;
91 txr
= rxr
->bnapi
->tx_ring
;
92 rx_buf
= &rxr
->rx_buf_ring
[cons
];
93 offset
= bp
->rx_offset
;
95 xdp
.data_hard_start
= *data_ptr
- offset
;
97 xdp_set_data_meta_invalid(&xdp
);
98 xdp
.data_end
= *data_ptr
+ *len
;
99 xdp
.rxq
= &rxr
->xdp_rxq
;
100 orig_data
= xdp
.data
;
101 mapping
= rx_buf
->mapping
- bp
->rx_dma_offset
;
103 dma_sync_single_for_cpu(&pdev
->dev
, mapping
+ offset
, *len
, bp
->rx_dir
);
106 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
109 tx_avail
= bnxt_tx_avail(bp
, txr
);
110 /* If the tx ring is not full, we must not update the rx producer yet
111 * because we may still be transmitting on some BDs.
113 if (tx_avail
!= bp
->tx_ring_size
)
114 *event
&= ~BNXT_RX_EVENT
;
116 *len
= xdp
.data_end
- xdp
.data
;
117 if (orig_data
!= xdp
.data
) {
118 offset
= xdp
.data
- xdp
.data_hard_start
;
119 *data_ptr
= xdp
.data_hard_start
+ offset
;
127 trace_xdp_exception(bp
->dev
, xdp_prog
, act
);
128 bnxt_reuse_rx_data(rxr
, cons
, page
);
132 *event
= BNXT_TX_EVENT
;
133 dma_sync_single_for_device(&pdev
->dev
, mapping
+ offset
, *len
,
135 bnxt_xmit_xdp(bp
, txr
, mapping
+ offset
, *len
,
136 NEXT_RX(rxr
->rx_prod
));
137 bnxt_reuse_rx_data(rxr
, cons
, page
);
140 bpf_warn_invalid_xdp_action(act
);
143 trace_xdp_exception(bp
->dev
, xdp_prog
, act
);
146 bnxt_reuse_rx_data(rxr
, cons
, page
);
152 /* Under rtnl_lock */
153 static int bnxt_xdp_set(struct bnxt
*bp
, struct bpf_prog
*prog
)
155 struct net_device
*dev
= bp
->dev
;
156 int tx_xdp
= 0, rc
, tc
;
157 struct bpf_prog
*old
;
159 if (prog
&& bp
->dev
->mtu
> BNXT_MAX_PAGE_MODE_MTU
) {
160 netdev_warn(dev
, "MTU %d larger than largest XDP supported MTU %d.\n",
161 bp
->dev
->mtu
, BNXT_MAX_PAGE_MODE_MTU
);
164 if (!(bp
->flags
& BNXT_FLAG_SHARED_RINGS
)) {
165 netdev_warn(dev
, "ethtool rx/tx channels must be combined to support XDP.\n");
169 tx_xdp
= bp
->rx_nr_rings
;
171 tc
= netdev_get_num_tc(dev
);
174 rc
= bnxt_check_rings(bp
, bp
->tx_nr_rings_per_tc
, bp
->rx_nr_rings
,
177 netdev_warn(dev
, "Unable to reserve enough TX rings to support XDP.\n");
180 if (netif_running(dev
))
181 bnxt_close_nic(bp
, true, false);
183 old
= xchg(&bp
->xdp_prog
, prog
);
188 bnxt_set_rx_skb_mode(bp
, true);
192 bnxt_set_rx_skb_mode(bp
, false);
193 bnxt_get_max_rings(bp
, &rx
, &tx
, true);
195 bp
->flags
&= ~BNXT_FLAG_NO_AGG_RINGS
;
196 bp
->dev
->hw_features
|= NETIF_F_LRO
;
199 bp
->tx_nr_rings_xdp
= tx_xdp
;
200 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
* tc
+ tx_xdp
;
201 bp
->cp_nr_rings
= max_t(int, bp
->tx_nr_rings
, bp
->rx_nr_rings
);
202 bp
->num_stat_ctxs
= bp
->cp_nr_rings
;
203 bnxt_set_tpa_flags(bp
);
204 bnxt_set_ring_params(bp
);
206 if (netif_running(dev
))
207 return bnxt_open_nic(bp
, true, false);
212 int bnxt_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
214 struct bnxt
*bp
= netdev_priv(dev
);
217 switch (xdp
->command
) {
219 rc
= bnxt_xdp_set(bp
, xdp
->prog
);
222 xdp
->prog_id
= bp
->xdp_prog
? bp
->xdp_prog
->aux
->id
: 0;