1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
4 #include <linux/inetdevice.h>
5 #include <linux/etherdevice.h>
8 #include <linux/bpf_trace.h>
11 #include <net/mana/mana.h>
13 void mana_xdp_tx(struct sk_buff
*skb
, struct net_device
*ndev
)
15 u16 txq_idx
= skb_get_queue_mapping(skb
);
16 struct netdev_queue
*ndevtxq
;
19 __skb_push(skb
, ETH_HLEN
);
21 ndevtxq
= netdev_get_tx_queue(ndev
, txq_idx
);
22 __netif_tx_lock(ndevtxq
, smp_processor_id());
24 rc
= mana_start_xmit(skb
, ndev
);
26 __netif_tx_unlock(ndevtxq
);
28 if (dev_xmit_complete(rc
))
31 dev_kfree_skb_any(skb
);
32 ndev
->stats
.tx_dropped
++;
35 static int mana_xdp_xmit_fm(struct net_device
*ndev
, struct xdp_frame
*frame
,
40 skb
= xdp_build_skb_from_frame(frame
, ndev
);
44 skb_set_queue_mapping(skb
, q_idx
);
46 mana_xdp_tx(skb
, ndev
);
51 int mana_xdp_xmit(struct net_device
*ndev
, int n
, struct xdp_frame
**frames
,
54 struct mana_port_context
*apc
= netdev_priv(ndev
);
55 struct mana_stats_tx
*tx_stats
;
59 if (unlikely(!apc
->port_is_up
))
62 q_idx
= smp_processor_id() % ndev
->real_num_tx_queues
;
64 for (i
= 0; i
< n
; i
++) {
65 if (mana_xdp_xmit_fm(ndev
, frames
[i
], q_idx
))
71 tx_stats
= &apc
->tx_qp
[q_idx
].txq
.stats
;
73 u64_stats_update_begin(&tx_stats
->syncp
);
74 tx_stats
->xdp_xmit
+= count
;
75 u64_stats_update_end(&tx_stats
->syncp
);
80 u32
mana_run_xdp(struct net_device
*ndev
, struct mana_rxq
*rxq
,
81 struct xdp_buff
*xdp
, void *buf_va
, uint pkt_len
)
83 struct mana_stats_rx
*rx_stats
;
84 struct bpf_prog
*prog
;
88 prog
= rcu_dereference(rxq
->bpf_prog
);
93 xdp_init_buff(xdp
, PAGE_SIZE
, &rxq
->xdp_rxq
);
94 xdp_prepare_buff(xdp
, buf_va
, XDP_PACKET_HEADROOM
, pkt_len
, false);
96 act
= bpf_prog_run_xdp(prog
, xdp
);
98 rx_stats
= &rxq
->stats
;
107 rxq
->xdp_rc
= xdp_do_redirect(ndev
, xdp
, prog
);
109 rxq
->xdp_flush
= true;
111 u64_stats_update_begin(&rx_stats
->syncp
);
113 rx_stats
->bytes
+= pkt_len
;
114 rx_stats
->xdp_redirect
++;
115 u64_stats_update_end(&rx_stats
->syncp
);
123 trace_xdp_exception(ndev
, prog
, act
);
127 bpf_warn_invalid_xdp_action(ndev
, prog
, act
);
136 struct bpf_prog
*mana_xdp_get(struct mana_port_context
*apc
)
140 return apc
->bpf_prog
;
143 static struct bpf_prog
*mana_chn_xdp_get(struct mana_port_context
*apc
)
145 return rtnl_dereference(apc
->rxqs
[0]->bpf_prog
);
148 /* Set xdp program on channels */
149 void mana_chn_setxdp(struct mana_port_context
*apc
, struct bpf_prog
*prog
)
151 struct bpf_prog
*old_prog
= mana_chn_xdp_get(apc
);
152 unsigned int num_queues
= apc
->num_queues
;
157 if (old_prog
== prog
)
161 bpf_prog_add(prog
, num_queues
);
163 for (i
= 0; i
< num_queues
; i
++)
164 rcu_assign_pointer(apc
->rxqs
[i
]->bpf_prog
, prog
);
167 for (i
= 0; i
< num_queues
; i
++)
168 bpf_prog_put(old_prog
);
171 static int mana_xdp_set(struct net_device
*ndev
, struct bpf_prog
*prog
,
172 struct netlink_ext_ack
*extack
)
174 struct mana_port_context
*apc
= netdev_priv(ndev
);
175 struct bpf_prog
*old_prog
;
176 struct gdma_context
*gc
;
178 gc
= apc
->ac
->gdma_dev
->gdma_context
;
180 old_prog
= mana_xdp_get(apc
);
182 if (!old_prog
&& !prog
)
185 if (prog
&& ndev
->mtu
> MANA_XDP_MTU_MAX
) {
186 netdev_err(ndev
, "XDP: mtu:%u too large, mtu_max:%lu\n",
187 ndev
->mtu
, MANA_XDP_MTU_MAX
);
188 NL_SET_ERR_MSG_MOD(extack
, "XDP: mtu too large");
193 /* One refcnt of the prog is hold by the caller already, so
194 * don't increase refcnt for this one.
196 apc
->bpf_prog
= prog
;
199 bpf_prog_put(old_prog
);
202 mana_chn_setxdp(apc
, prog
);
205 ndev
->max_mtu
= MANA_XDP_MTU_MAX
;
207 ndev
->max_mtu
= gc
->adapter_mtu
- ETH_HLEN
;
212 int mana_bpf(struct net_device
*ndev
, struct netdev_bpf
*bpf
)
214 struct netlink_ext_ack
*extack
= bpf
->extack
;
217 switch (bpf
->command
) {
219 return mana_xdp_set(ndev
, bpf
->prog
, extack
);