2 * Copyright(c) 2017 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 * This file contains HFI1 support for VNIC functionality
53 #include <linux/if_vlan.h>
57 #define HFI_TX_TIMEOUT_MS 1000
59 #define HFI1_VNIC_RCV_Q_SIZE 1024
61 #define HFI1_VNIC_UP 0
63 static DEFINE_SPINLOCK(vport_cntr_lock
);
65 static int setup_vnic_ctxt(struct hfi1_devdata
*dd
, struct hfi1_ctxtdata
*uctxt
)
67 unsigned int rcvctrl_ops
= 0;
70 uctxt
->do_interrupt
= &handle_receive_interrupt
;
72 /* Now allocate the RcvHdr queue and eager buffers. */
73 ret
= hfi1_create_rcvhdrq(dd
, uctxt
);
77 ret
= hfi1_setup_eagerbufs(uctxt
);
81 if (hfi1_rcvhdrtail_kvaddr(uctxt
))
82 clear_rcvhdrtail(uctxt
);
84 rcvctrl_ops
= HFI1_RCVCTRL_CTXT_ENB
;
85 rcvctrl_ops
|= HFI1_RCVCTRL_INTRAVAIL_ENB
;
87 if (!HFI1_CAP_KGET_MASK(uctxt
->flags
, MULTI_PKT_EGR
))
88 rcvctrl_ops
|= HFI1_RCVCTRL_ONE_PKT_EGR_ENB
;
89 if (HFI1_CAP_KGET_MASK(uctxt
->flags
, NODROP_EGR_FULL
))
90 rcvctrl_ops
|= HFI1_RCVCTRL_NO_EGR_DROP_ENB
;
91 if (HFI1_CAP_KGET_MASK(uctxt
->flags
, NODROP_RHQ_FULL
))
92 rcvctrl_ops
|= HFI1_RCVCTRL_NO_RHQ_DROP_ENB
;
93 if (HFI1_CAP_KGET_MASK(uctxt
->flags
, DMA_RTAIL
))
94 rcvctrl_ops
|= HFI1_RCVCTRL_TAILUPD_ENB
;
96 hfi1_rcvctrl(uctxt
->dd
, rcvctrl_ops
, uctxt
);
101 static int allocate_vnic_ctxt(struct hfi1_devdata
*dd
,
102 struct hfi1_ctxtdata
**vnic_ctxt
)
104 struct hfi1_ctxtdata
*uctxt
;
107 if (dd
->flags
& HFI1_FROZEN
)
110 ret
= hfi1_create_ctxtdata(dd
->pport
, dd
->node
, &uctxt
);
112 dd_dev_err(dd
, "Unable to create ctxtdata, failing open\n");
116 uctxt
->flags
= HFI1_CAP_KGET(MULTI_PKT_EGR
) |
117 HFI1_CAP_KGET(NODROP_RHQ_FULL
) |
118 HFI1_CAP_KGET(NODROP_EGR_FULL
) |
119 HFI1_CAP_KGET(DMA_RTAIL
);
121 uctxt
->is_vnic
= true;
123 msix_request_rcd_irq(uctxt
);
125 hfi1_stats
.sps_ctxts
++;
126 dd_dev_dbg(dd
, "created vnic context %d\n", uctxt
->ctxt
);
132 static void deallocate_vnic_ctxt(struct hfi1_devdata
*dd
,
133 struct hfi1_ctxtdata
*uctxt
)
135 dd_dev_dbg(dd
, "closing vnic context %d\n", uctxt
->ctxt
);
139 * Disable receive context and interrupt available, reset all
140 * RcvCtxtCtrl bits to default values.
142 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_CTXT_DIS
|
143 HFI1_RCVCTRL_TIDFLOW_DIS
|
144 HFI1_RCVCTRL_INTRAVAIL_DIS
|
145 HFI1_RCVCTRL_ONE_PKT_EGR_DIS
|
146 HFI1_RCVCTRL_NO_RHQ_DROP_DIS
|
147 HFI1_RCVCTRL_NO_EGR_DROP_DIS
, uctxt
);
149 /* msix_intr will always be > 0, only clean up if this is true */
150 if (uctxt
->msix_intr
)
151 msix_free_irq(dd
, uctxt
->msix_intr
);
153 uctxt
->event_flags
= 0;
155 hfi1_clear_tids(uctxt
);
156 hfi1_clear_ctxt_pkey(dd
, uctxt
);
158 hfi1_stats
.sps_ctxts
--;
160 hfi1_free_ctxt(uctxt
);
163 void hfi1_vnic_setup(struct hfi1_devdata
*dd
)
165 xa_init(&dd
->vnic
.vesws
);
168 void hfi1_vnic_cleanup(struct hfi1_devdata
*dd
)
170 WARN_ON(!xa_empty(&dd
->vnic
.vesws
));
173 #define SUM_GRP_COUNTERS(stats, qstats, x_grp) do { \
174 u64 *src64, *dst64; \
175 for (src64 = &qstats->x_grp.unicast, \
176 dst64 = &stats->x_grp.unicast; \
177 dst64 <= &stats->x_grp.s_1519_max;) { \
178 *dst64++ += *src64++; \
182 /* hfi1_vnic_update_stats - update statistics */
183 static void hfi1_vnic_update_stats(struct hfi1_vnic_vport_info
*vinfo
,
184 struct opa_vnic_stats
*stats
)
186 struct net_device
*netdev
= vinfo
->netdev
;
189 /* add tx counters on different queues */
190 for (i
= 0; i
< vinfo
->num_tx_q
; i
++) {
191 struct opa_vnic_stats
*qstats
= &vinfo
->stats
[i
];
192 struct rtnl_link_stats64
*qnstats
= &vinfo
->stats
[i
].netstats
;
194 stats
->netstats
.tx_fifo_errors
+= qnstats
->tx_fifo_errors
;
195 stats
->netstats
.tx_carrier_errors
+= qnstats
->tx_carrier_errors
;
196 stats
->tx_drop_state
+= qstats
->tx_drop_state
;
197 stats
->tx_dlid_zero
+= qstats
->tx_dlid_zero
;
199 SUM_GRP_COUNTERS(stats
, qstats
, tx_grp
);
200 stats
->netstats
.tx_packets
+= qnstats
->tx_packets
;
201 stats
->netstats
.tx_bytes
+= qnstats
->tx_bytes
;
204 /* add rx counters on different queues */
205 for (i
= 0; i
< vinfo
->num_rx_q
; i
++) {
206 struct opa_vnic_stats
*qstats
= &vinfo
->stats
[i
];
207 struct rtnl_link_stats64
*qnstats
= &vinfo
->stats
[i
].netstats
;
209 stats
->netstats
.rx_fifo_errors
+= qnstats
->rx_fifo_errors
;
210 stats
->netstats
.rx_nohandler
+= qnstats
->rx_nohandler
;
211 stats
->rx_drop_state
+= qstats
->rx_drop_state
;
212 stats
->rx_oversize
+= qstats
->rx_oversize
;
213 stats
->rx_runt
+= qstats
->rx_runt
;
215 SUM_GRP_COUNTERS(stats
, qstats
, rx_grp
);
216 stats
->netstats
.rx_packets
+= qnstats
->rx_packets
;
217 stats
->netstats
.rx_bytes
+= qnstats
->rx_bytes
;
220 stats
->netstats
.tx_errors
= stats
->netstats
.tx_fifo_errors
+
221 stats
->netstats
.tx_carrier_errors
+
222 stats
->tx_drop_state
+ stats
->tx_dlid_zero
;
223 stats
->netstats
.tx_dropped
= stats
->netstats
.tx_errors
;
225 stats
->netstats
.rx_errors
= stats
->netstats
.rx_fifo_errors
+
226 stats
->netstats
.rx_nohandler
+
227 stats
->rx_drop_state
+ stats
->rx_oversize
+
229 stats
->netstats
.rx_dropped
= stats
->netstats
.rx_errors
;
231 netdev
->stats
.tx_packets
= stats
->netstats
.tx_packets
;
232 netdev
->stats
.tx_bytes
= stats
->netstats
.tx_bytes
;
233 netdev
->stats
.tx_fifo_errors
= stats
->netstats
.tx_fifo_errors
;
234 netdev
->stats
.tx_carrier_errors
= stats
->netstats
.tx_carrier_errors
;
235 netdev
->stats
.tx_errors
= stats
->netstats
.tx_errors
;
236 netdev
->stats
.tx_dropped
= stats
->netstats
.tx_dropped
;
238 netdev
->stats
.rx_packets
= stats
->netstats
.rx_packets
;
239 netdev
->stats
.rx_bytes
= stats
->netstats
.rx_bytes
;
240 netdev
->stats
.rx_fifo_errors
= stats
->netstats
.rx_fifo_errors
;
241 netdev
->stats
.multicast
= stats
->rx_grp
.mcastbcast
;
242 netdev
->stats
.rx_length_errors
= stats
->rx_oversize
+ stats
->rx_runt
;
243 netdev
->stats
.rx_errors
= stats
->netstats
.rx_errors
;
244 netdev
->stats
.rx_dropped
= stats
->netstats
.rx_dropped
;
247 /* update_len_counters - update pkt's len histogram counters */
248 static inline void update_len_counters(struct opa_vnic_grp_stats
*grp
,
251 /* account for 4 byte FCS */
254 else if (len
>= 1020)
268 /* hfi1_vnic_update_tx_counters - update transmit counters */
269 static void hfi1_vnic_update_tx_counters(struct hfi1_vnic_vport_info
*vinfo
,
270 u8 q_idx
, struct sk_buff
*skb
, int err
)
272 struct ethhdr
*mac_hdr
= (struct ethhdr
*)skb_mac_header(skb
);
273 struct opa_vnic_stats
*stats
= &vinfo
->stats
[q_idx
];
274 struct opa_vnic_grp_stats
*tx_grp
= &stats
->tx_grp
;
277 stats
->netstats
.tx_packets
++;
278 stats
->netstats
.tx_bytes
+= skb
->len
+ ETH_FCS_LEN
;
280 update_len_counters(tx_grp
, skb
->len
);
282 /* rest of the counts are for good packets only */
286 if (is_multicast_ether_addr(mac_hdr
->h_dest
))
287 tx_grp
->mcastbcast
++;
291 if (!__vlan_get_tag(skb
, &vlan_tci
))
297 /* hfi1_vnic_update_rx_counters - update receive counters */
298 static void hfi1_vnic_update_rx_counters(struct hfi1_vnic_vport_info
*vinfo
,
299 u8 q_idx
, struct sk_buff
*skb
, int err
)
301 struct ethhdr
*mac_hdr
= (struct ethhdr
*)skb
->data
;
302 struct opa_vnic_stats
*stats
= &vinfo
->stats
[q_idx
];
303 struct opa_vnic_grp_stats
*rx_grp
= &stats
->rx_grp
;
306 stats
->netstats
.rx_packets
++;
307 stats
->netstats
.rx_bytes
+= skb
->len
+ ETH_FCS_LEN
;
309 update_len_counters(rx_grp
, skb
->len
);
311 /* rest of the counts are for good packets only */
315 if (is_multicast_ether_addr(mac_hdr
->h_dest
))
316 rx_grp
->mcastbcast
++;
320 if (!__vlan_get_tag(skb
, &vlan_tci
))
326 /* This function is overloaded for opa_vnic specific implementation */
327 static void hfi1_vnic_get_stats64(struct net_device
*netdev
,
328 struct rtnl_link_stats64
*stats
)
330 struct opa_vnic_stats
*vstats
= (struct opa_vnic_stats
*)stats
;
331 struct hfi1_vnic_vport_info
*vinfo
= opa_vnic_dev_priv(netdev
);
333 hfi1_vnic_update_stats(vinfo
, vstats
);
336 static u64
create_bypass_pbc(u32 vl
, u32 dw_len
)
340 pbc
= ((u64
)PBC_IHCRC_NONE
<< PBC_INSERT_HCRC_SHIFT
)
341 | PBC_INSERT_BYPASS_ICRC
| PBC_CREDIT_RETURN
343 | ((vl
& PBC_VL_MASK
) << PBC_VL_SHIFT
)
344 | (dw_len
& PBC_LENGTH_DWS_MASK
) << PBC_LENGTH_DWS_SHIFT
;
349 /* hfi1_vnic_maybe_stop_tx - stop tx queue if required */
350 static void hfi1_vnic_maybe_stop_tx(struct hfi1_vnic_vport_info
*vinfo
,
353 netif_stop_subqueue(vinfo
->netdev
, q_idx
);
354 if (!hfi1_vnic_sdma_write_avail(vinfo
, q_idx
))
357 netif_start_subqueue(vinfo
->netdev
, q_idx
);
360 static netdev_tx_t
hfi1_netdev_start_xmit(struct sk_buff
*skb
,
361 struct net_device
*netdev
)
363 struct hfi1_vnic_vport_info
*vinfo
= opa_vnic_dev_priv(netdev
);
364 u8 pad_len
, q_idx
= skb
->queue_mapping
;
365 struct hfi1_devdata
*dd
= vinfo
->dd
;
366 struct opa_vnic_skb_mdata
*mdata
;
367 u32 pkt_len
, total_len
;
371 v_dbg("xmit: queue %d skb len %d\n", q_idx
, skb
->len
);
372 if (unlikely(!netif_oper_up(netdev
))) {
373 vinfo
->stats
[q_idx
].tx_drop_state
++;
377 /* take out meta data */
378 mdata
= (struct opa_vnic_skb_mdata
*)skb
->data
;
379 skb_pull(skb
, sizeof(*mdata
));
380 if (unlikely(mdata
->flags
& OPA_VNIC_SKB_MDATA_ENCAP_ERR
)) {
381 vinfo
->stats
[q_idx
].tx_dlid_zero
++;
385 /* add tail padding (for 8 bytes size alignment) and icrc */
386 pad_len
= -(skb
->len
+ OPA_VNIC_ICRC_TAIL_LEN
) & 0x7;
387 pad_len
+= OPA_VNIC_ICRC_TAIL_LEN
;
390 * pkt_len is how much data we have to write, includes header and data.
391 * total_len is length of the packet in Dwords plus the PBC should not
394 pkt_len
= (skb
->len
+ pad_len
) >> 2;
395 total_len
= pkt_len
+ 2; /* PBC + packet */
397 pbc
= create_bypass_pbc(mdata
->vl
, total_len
);
400 v_dbg("pbc 0x%016llX len %d pad_len %d\n", pbc
, skb
->len
, pad_len
);
401 err
= dd
->process_vnic_dma_send(dd
, q_idx
, vinfo
, skb
, pbc
, pad_len
);
404 vinfo
->stats
[q_idx
].netstats
.tx_fifo_errors
++;
405 else if (err
!= -EBUSY
)
406 vinfo
->stats
[q_idx
].netstats
.tx_carrier_errors
++;
408 /* remove the header before updating tx counters */
409 skb_pull(skb
, OPA_VNIC_HDR_LEN
);
411 if (unlikely(err
== -EBUSY
)) {
412 hfi1_vnic_maybe_stop_tx(vinfo
, q_idx
);
413 dev_kfree_skb_any(skb
);
414 return NETDEV_TX_BUSY
;
418 /* update tx counters */
419 hfi1_vnic_update_tx_counters(vinfo
, q_idx
, skb
, err
);
420 dev_kfree_skb_any(skb
);
424 static u16
hfi1_vnic_select_queue(struct net_device
*netdev
,
426 struct net_device
*sb_dev
)
428 struct hfi1_vnic_vport_info
*vinfo
= opa_vnic_dev_priv(netdev
);
429 struct opa_vnic_skb_mdata
*mdata
;
430 struct sdma_engine
*sde
;
432 mdata
= (struct opa_vnic_skb_mdata
*)skb
->data
;
433 sde
= sdma_select_engine_vl(vinfo
->dd
, mdata
->entropy
, mdata
->vl
);
434 return sde
->this_idx
;
437 /* hfi1_vnic_decap_skb - strip OPA header from the skb (ethernet) packet */
438 static inline int hfi1_vnic_decap_skb(struct hfi1_vnic_rx_queue
*rxq
,
441 struct hfi1_vnic_vport_info
*vinfo
= rxq
->vinfo
;
442 int max_len
= vinfo
->netdev
->mtu
+ VLAN_ETH_HLEN
;
445 skb_pull(skb
, OPA_VNIC_HDR_LEN
);
447 /* Validate Packet length */
448 if (unlikely(skb
->len
> max_len
))
449 vinfo
->stats
[rxq
->idx
].rx_oversize
++;
450 else if (unlikely(skb
->len
< ETH_ZLEN
))
451 vinfo
->stats
[rxq
->idx
].rx_runt
++;
457 static inline struct sk_buff
*hfi1_vnic_get_skb(struct hfi1_vnic_rx_queue
*rxq
)
459 unsigned char *pad_info
;
462 skb
= skb_dequeue(&rxq
->skbq
);
466 /* remove tail padding and icrc */
467 pad_info
= skb
->data
+ skb
->len
- 1;
468 skb_trim(skb
, (skb
->len
- OPA_VNIC_ICRC_TAIL_LEN
-
469 ((*pad_info
) & 0x7)));
474 /* hfi1_vnic_handle_rx - handle skb receive */
475 static void hfi1_vnic_handle_rx(struct hfi1_vnic_rx_queue
*rxq
,
476 int *work_done
, int work_to_do
)
478 struct hfi1_vnic_vport_info
*vinfo
= rxq
->vinfo
;
483 if (*work_done
>= work_to_do
)
486 skb
= hfi1_vnic_get_skb(rxq
);
490 rc
= hfi1_vnic_decap_skb(rxq
, skb
);
491 /* update rx counters */
492 hfi1_vnic_update_rx_counters(vinfo
, rxq
->idx
, skb
, rc
);
494 dev_kfree_skb_any(skb
);
498 skb_checksum_none_assert(skb
);
499 skb
->protocol
= eth_type_trans(skb
, rxq
->netdev
);
501 napi_gro_receive(&rxq
->napi
, skb
);
506 /* hfi1_vnic_napi - napi receive polling callback function */
507 static int hfi1_vnic_napi(struct napi_struct
*napi
, int budget
)
509 struct hfi1_vnic_rx_queue
*rxq
= container_of(napi
,
510 struct hfi1_vnic_rx_queue
, napi
);
511 struct hfi1_vnic_vport_info
*vinfo
= rxq
->vinfo
;
514 v_dbg("napi %d budget %d\n", rxq
->idx
, budget
);
515 hfi1_vnic_handle_rx(rxq
, &work_done
, budget
);
517 v_dbg("napi %d work_done %d\n", rxq
->idx
, work_done
);
518 if (work_done
< budget
)
524 void hfi1_vnic_bypass_rcv(struct hfi1_packet
*packet
)
526 struct hfi1_devdata
*dd
= packet
->rcd
->dd
;
527 struct hfi1_vnic_vport_info
*vinfo
= NULL
;
528 struct hfi1_vnic_rx_queue
*rxq
;
530 int l4_type
, vesw_id
= -1;
533 l4_type
= hfi1_16B_get_l4(packet
->ebuf
);
534 if (likely(l4_type
== OPA_16B_L4_ETHR
)) {
535 vesw_id
= HFI1_VNIC_GET_VESWID(packet
->ebuf
);
536 vinfo
= xa_load(&dd
->vnic
.vesws
, vesw_id
);
539 * In case of invalid vesw id, count the error on
540 * the first available vport.
542 if (unlikely(!vinfo
)) {
543 struct hfi1_vnic_vport_info
*vinfo_tmp
;
544 unsigned long index
= 0;
546 vinfo_tmp
= xa_find(&dd
->vnic
.vesws
, &index
, ULONG_MAX
,
549 spin_lock(&vport_cntr_lock
);
550 vinfo_tmp
->stats
[0].netstats
.rx_nohandler
++;
551 spin_unlock(&vport_cntr_lock
);
556 if (unlikely(!vinfo
)) {
557 dd_dev_warn(dd
, "vnic rcv err: l4 %d vesw id %d ctx %d\n",
558 l4_type
, vesw_id
, packet
->rcd
->ctxt
);
562 q_idx
= packet
->rcd
->vnic_q_idx
;
563 rxq
= &vinfo
->rxq
[q_idx
];
564 if (unlikely(!netif_oper_up(vinfo
->netdev
))) {
565 vinfo
->stats
[q_idx
].rx_drop_state
++;
566 skb_queue_purge(&rxq
->skbq
);
570 if (unlikely(skb_queue_len(&rxq
->skbq
) > HFI1_VNIC_RCV_Q_SIZE
)) {
571 vinfo
->stats
[q_idx
].netstats
.rx_fifo_errors
++;
575 skb
= netdev_alloc_skb(vinfo
->netdev
, packet
->tlen
);
576 if (unlikely(!skb
)) {
577 vinfo
->stats
[q_idx
].netstats
.rx_fifo_errors
++;
581 memcpy(skb
->data
, packet
->ebuf
, packet
->tlen
);
582 skb_put(skb
, packet
->tlen
);
583 skb_queue_tail(&rxq
->skbq
, skb
);
585 if (napi_schedule_prep(&rxq
->napi
)) {
586 v_dbg("napi %d scheduling\n", q_idx
);
587 __napi_schedule(&rxq
->napi
);
591 static int hfi1_vnic_up(struct hfi1_vnic_vport_info
*vinfo
)
593 struct hfi1_devdata
*dd
= vinfo
->dd
;
594 struct net_device
*netdev
= vinfo
->netdev
;
597 /* ensure virtual eth switch id is valid */
601 rc
= xa_insert(&dd
->vnic
.vesws
, vinfo
->vesw_id
, vinfo
, GFP_KERNEL
);
605 for (i
= 0; i
< vinfo
->num_rx_q
; i
++) {
606 struct hfi1_vnic_rx_queue
*rxq
= &vinfo
->rxq
[i
];
608 skb_queue_head_init(&rxq
->skbq
);
609 napi_enable(&rxq
->napi
);
612 netif_carrier_on(netdev
);
613 netif_tx_start_all_queues(netdev
);
614 set_bit(HFI1_VNIC_UP
, &vinfo
->flags
);
619 static void hfi1_vnic_down(struct hfi1_vnic_vport_info
*vinfo
)
621 struct hfi1_devdata
*dd
= vinfo
->dd
;
624 clear_bit(HFI1_VNIC_UP
, &vinfo
->flags
);
625 netif_carrier_off(vinfo
->netdev
);
626 netif_tx_disable(vinfo
->netdev
);
627 xa_erase(&dd
->vnic
.vesws
, vinfo
->vesw_id
);
629 /* ensure irqs see the change */
630 msix_vnic_synchronize_irq(dd
);
632 /* remove unread skbs */
633 for (i
= 0; i
< vinfo
->num_rx_q
; i
++) {
634 struct hfi1_vnic_rx_queue
*rxq
= &vinfo
->rxq
[i
];
636 napi_disable(&rxq
->napi
);
637 skb_queue_purge(&rxq
->skbq
);
641 static int hfi1_netdev_open(struct net_device
*netdev
)
643 struct hfi1_vnic_vport_info
*vinfo
= opa_vnic_dev_priv(netdev
);
646 mutex_lock(&vinfo
->lock
);
647 rc
= hfi1_vnic_up(vinfo
);
648 mutex_unlock(&vinfo
->lock
);
652 static int hfi1_netdev_close(struct net_device
*netdev
)
654 struct hfi1_vnic_vport_info
*vinfo
= opa_vnic_dev_priv(netdev
);
656 mutex_lock(&vinfo
->lock
);
657 if (test_bit(HFI1_VNIC_UP
, &vinfo
->flags
))
658 hfi1_vnic_down(vinfo
);
659 mutex_unlock(&vinfo
->lock
);
663 static int hfi1_vnic_allot_ctxt(struct hfi1_devdata
*dd
,
664 struct hfi1_ctxtdata
**vnic_ctxt
)
668 rc
= allocate_vnic_ctxt(dd
, vnic_ctxt
);
670 dd_dev_err(dd
, "vnic ctxt alloc failed %d\n", rc
);
674 rc
= setup_vnic_ctxt(dd
, *vnic_ctxt
);
676 dd_dev_err(dd
, "vnic ctxt setup failed %d\n", rc
);
677 deallocate_vnic_ctxt(dd
, *vnic_ctxt
);
684 static int hfi1_vnic_init(struct hfi1_vnic_vport_info
*vinfo
)
686 struct hfi1_devdata
*dd
= vinfo
->dd
;
689 mutex_lock(&hfi1_mutex
);
690 if (!dd
->vnic
.num_vports
) {
691 rc
= hfi1_vnic_txreq_init(dd
);
696 for (i
= dd
->vnic
.num_ctxt
; i
< vinfo
->num_rx_q
; i
++) {
697 rc
= hfi1_vnic_allot_ctxt(dd
, &dd
->vnic
.ctxt
[i
]);
700 hfi1_rcd_get(dd
->vnic
.ctxt
[i
]);
701 dd
->vnic
.ctxt
[i
]->vnic_q_idx
= i
;
704 if (i
< vinfo
->num_rx_q
) {
706 * If required amount of contexts is not
707 * allocated successfully then remaining contexts
710 while (i
-- > dd
->vnic
.num_ctxt
) {
711 deallocate_vnic_ctxt(dd
, dd
->vnic
.ctxt
[i
]);
712 hfi1_rcd_put(dd
->vnic
.ctxt
[i
]);
713 dd
->vnic
.ctxt
[i
] = NULL
;
718 if (dd
->vnic
.num_ctxt
!= i
) {
719 dd
->vnic
.num_ctxt
= i
;
720 hfi1_init_vnic_rsm(dd
);
723 dd
->vnic
.num_vports
++;
724 hfi1_vnic_sdma_init(vinfo
);
726 if (!dd
->vnic
.num_vports
)
727 hfi1_vnic_txreq_deinit(dd
);
729 mutex_unlock(&hfi1_mutex
);
733 static void hfi1_vnic_deinit(struct hfi1_vnic_vport_info
*vinfo
)
735 struct hfi1_devdata
*dd
= vinfo
->dd
;
738 mutex_lock(&hfi1_mutex
);
739 if (--dd
->vnic
.num_vports
== 0) {
740 for (i
= 0; i
< dd
->vnic
.num_ctxt
; i
++) {
741 deallocate_vnic_ctxt(dd
, dd
->vnic
.ctxt
[i
]);
742 hfi1_rcd_put(dd
->vnic
.ctxt
[i
]);
743 dd
->vnic
.ctxt
[i
] = NULL
;
745 hfi1_deinit_vnic_rsm(dd
);
746 dd
->vnic
.num_ctxt
= 0;
747 hfi1_vnic_txreq_deinit(dd
);
749 mutex_unlock(&hfi1_mutex
);
752 static void hfi1_vnic_set_vesw_id(struct net_device
*netdev
, int id
)
754 struct hfi1_vnic_vport_info
*vinfo
= opa_vnic_dev_priv(netdev
);
758 * If vesw_id is being changed, and if the vnic port is up,
759 * reset the vnic port to ensure new vesw_id gets picked up
761 if (id
!= vinfo
->vesw_id
) {
762 mutex_lock(&vinfo
->lock
);
763 if (test_bit(HFI1_VNIC_UP
, &vinfo
->flags
)) {
764 hfi1_vnic_down(vinfo
);
772 mutex_unlock(&vinfo
->lock
);
777 static const struct net_device_ops hfi1_netdev_ops
= {
778 .ndo_open
= hfi1_netdev_open
,
779 .ndo_stop
= hfi1_netdev_close
,
780 .ndo_start_xmit
= hfi1_netdev_start_xmit
,
781 .ndo_select_queue
= hfi1_vnic_select_queue
,
782 .ndo_get_stats64
= hfi1_vnic_get_stats64
,
785 static void hfi1_vnic_free_rn(struct net_device
*netdev
)
787 struct hfi1_vnic_vport_info
*vinfo
= opa_vnic_dev_priv(netdev
);
789 hfi1_vnic_deinit(vinfo
);
790 mutex_destroy(&vinfo
->lock
);
794 struct net_device
*hfi1_vnic_alloc_rn(struct ib_device
*device
,
796 enum rdma_netdev_t type
,
798 unsigned char name_assign_type
,
799 void (*setup
)(struct net_device
*))
801 struct hfi1_devdata
*dd
= dd_from_ibdev(device
);
802 struct hfi1_vnic_vport_info
*vinfo
;
803 struct net_device
*netdev
;
804 struct rdma_netdev
*rn
;
807 if (!dd
->num_vnic_contexts
)
808 return ERR_PTR(-ENOMEM
);
810 if (!port_num
|| (port_num
> dd
->num_pports
))
811 return ERR_PTR(-EINVAL
);
813 if (type
!= RDMA_NETDEV_OPA_VNIC
)
814 return ERR_PTR(-EOPNOTSUPP
);
816 size
= sizeof(struct opa_vnic_rdma_netdev
) + sizeof(*vinfo
);
817 netdev
= alloc_netdev_mqs(size
, name
, name_assign_type
, setup
,
818 dd
->num_sdma
, dd
->num_vnic_contexts
);
820 return ERR_PTR(-ENOMEM
);
822 rn
= netdev_priv(netdev
);
823 vinfo
= opa_vnic_dev_priv(netdev
);
825 vinfo
->num_tx_q
= dd
->num_sdma
;
826 vinfo
->num_rx_q
= dd
->num_vnic_contexts
;
827 vinfo
->netdev
= netdev
;
828 rn
->free_rdma_netdev
= hfi1_vnic_free_rn
;
829 rn
->set_id
= hfi1_vnic_set_vesw_id
;
831 netdev
->features
= NETIF_F_HIGHDMA
| NETIF_F_SG
;
832 netdev
->hw_features
= netdev
->features
;
833 netdev
->vlan_features
= netdev
->features
;
834 netdev
->watchdog_timeo
= msecs_to_jiffies(HFI_TX_TIMEOUT_MS
);
835 netdev
->netdev_ops
= &hfi1_netdev_ops
;
836 mutex_init(&vinfo
->lock
);
838 for (i
= 0; i
< vinfo
->num_rx_q
; i
++) {
839 struct hfi1_vnic_rx_queue
*rxq
= &vinfo
->rxq
[i
];
843 rxq
->netdev
= netdev
;
844 netif_napi_add(netdev
, &rxq
->napi
, hfi1_vnic_napi
, 64);
847 rc
= hfi1_vnic_init(vinfo
);
853 mutex_destroy(&vinfo
->lock
);