2 * Copyright(c) 2017 - 2020 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 * This file contains HFI1 support for VNIC functionality
53 #include <linux/if_vlan.h>
58 #define HFI_TX_TIMEOUT_MS 1000
60 #define HFI1_VNIC_RCV_Q_SIZE 1024
62 #define HFI1_VNIC_UP 0
64 static DEFINE_SPINLOCK(vport_cntr_lock
);
66 #define SUM_GRP_COUNTERS(stats, qstats, x_grp) do { \
68 for (src64 = &qstats->x_grp.unicast, \
69 dst64 = &stats->x_grp.unicast; \
70 dst64 <= &stats->x_grp.s_1519_max;) { \
71 *dst64++ += *src64++; \
75 #define VNIC_MASK (0xFF)
76 #define VNIC_ID(val) ((1ull << 24) | ((val) & VNIC_MASK))
78 /* hfi1_vnic_update_stats - update statistics */
79 static void hfi1_vnic_update_stats(struct hfi1_vnic_vport_info
*vinfo
,
80 struct opa_vnic_stats
*stats
)
82 struct net_device
*netdev
= vinfo
->netdev
;
85 /* add tx counters on different queues */
86 for (i
= 0; i
< vinfo
->num_tx_q
; i
++) {
87 struct opa_vnic_stats
*qstats
= &vinfo
->stats
[i
];
88 struct rtnl_link_stats64
*qnstats
= &vinfo
->stats
[i
].netstats
;
90 stats
->netstats
.tx_fifo_errors
+= qnstats
->tx_fifo_errors
;
91 stats
->netstats
.tx_carrier_errors
+= qnstats
->tx_carrier_errors
;
92 stats
->tx_drop_state
+= qstats
->tx_drop_state
;
93 stats
->tx_dlid_zero
+= qstats
->tx_dlid_zero
;
95 SUM_GRP_COUNTERS(stats
, qstats
, tx_grp
);
96 stats
->netstats
.tx_packets
+= qnstats
->tx_packets
;
97 stats
->netstats
.tx_bytes
+= qnstats
->tx_bytes
;
100 /* add rx counters on different queues */
101 for (i
= 0; i
< vinfo
->num_rx_q
; i
++) {
102 struct opa_vnic_stats
*qstats
= &vinfo
->stats
[i
];
103 struct rtnl_link_stats64
*qnstats
= &vinfo
->stats
[i
].netstats
;
105 stats
->netstats
.rx_fifo_errors
+= qnstats
->rx_fifo_errors
;
106 stats
->netstats
.rx_nohandler
+= qnstats
->rx_nohandler
;
107 stats
->rx_drop_state
+= qstats
->rx_drop_state
;
108 stats
->rx_oversize
+= qstats
->rx_oversize
;
109 stats
->rx_runt
+= qstats
->rx_runt
;
111 SUM_GRP_COUNTERS(stats
, qstats
, rx_grp
);
112 stats
->netstats
.rx_packets
+= qnstats
->rx_packets
;
113 stats
->netstats
.rx_bytes
+= qnstats
->rx_bytes
;
116 stats
->netstats
.tx_errors
= stats
->netstats
.tx_fifo_errors
+
117 stats
->netstats
.tx_carrier_errors
+
118 stats
->tx_drop_state
+ stats
->tx_dlid_zero
;
119 stats
->netstats
.tx_dropped
= stats
->netstats
.tx_errors
;
121 stats
->netstats
.rx_errors
= stats
->netstats
.rx_fifo_errors
+
122 stats
->netstats
.rx_nohandler
+
123 stats
->rx_drop_state
+ stats
->rx_oversize
+
125 stats
->netstats
.rx_dropped
= stats
->netstats
.rx_errors
;
127 netdev
->stats
.tx_packets
= stats
->netstats
.tx_packets
;
128 netdev
->stats
.tx_bytes
= stats
->netstats
.tx_bytes
;
129 netdev
->stats
.tx_fifo_errors
= stats
->netstats
.tx_fifo_errors
;
130 netdev
->stats
.tx_carrier_errors
= stats
->netstats
.tx_carrier_errors
;
131 netdev
->stats
.tx_errors
= stats
->netstats
.tx_errors
;
132 netdev
->stats
.tx_dropped
= stats
->netstats
.tx_dropped
;
134 netdev
->stats
.rx_packets
= stats
->netstats
.rx_packets
;
135 netdev
->stats
.rx_bytes
= stats
->netstats
.rx_bytes
;
136 netdev
->stats
.rx_fifo_errors
= stats
->netstats
.rx_fifo_errors
;
137 netdev
->stats
.multicast
= stats
->rx_grp
.mcastbcast
;
138 netdev
->stats
.rx_length_errors
= stats
->rx_oversize
+ stats
->rx_runt
;
139 netdev
->stats
.rx_errors
= stats
->netstats
.rx_errors
;
140 netdev
->stats
.rx_dropped
= stats
->netstats
.rx_dropped
;
143 /* update_len_counters - update pkt's len histogram counters */
144 static inline void update_len_counters(struct opa_vnic_grp_stats
*grp
,
147 /* account for 4 byte FCS */
150 else if (len
>= 1020)
164 /* hfi1_vnic_update_tx_counters - update transmit counters */
165 static void hfi1_vnic_update_tx_counters(struct hfi1_vnic_vport_info
*vinfo
,
166 u8 q_idx
, struct sk_buff
*skb
, int err
)
168 struct ethhdr
*mac_hdr
= (struct ethhdr
*)skb_mac_header(skb
);
169 struct opa_vnic_stats
*stats
= &vinfo
->stats
[q_idx
];
170 struct opa_vnic_grp_stats
*tx_grp
= &stats
->tx_grp
;
173 stats
->netstats
.tx_packets
++;
174 stats
->netstats
.tx_bytes
+= skb
->len
+ ETH_FCS_LEN
;
176 update_len_counters(tx_grp
, skb
->len
);
178 /* rest of the counts are for good packets only */
182 if (is_multicast_ether_addr(mac_hdr
->h_dest
))
183 tx_grp
->mcastbcast
++;
187 if (!__vlan_get_tag(skb
, &vlan_tci
))
193 /* hfi1_vnic_update_rx_counters - update receive counters */
194 static void hfi1_vnic_update_rx_counters(struct hfi1_vnic_vport_info
*vinfo
,
195 u8 q_idx
, struct sk_buff
*skb
, int err
)
197 struct ethhdr
*mac_hdr
= (struct ethhdr
*)skb
->data
;
198 struct opa_vnic_stats
*stats
= &vinfo
->stats
[q_idx
];
199 struct opa_vnic_grp_stats
*rx_grp
= &stats
->rx_grp
;
202 stats
->netstats
.rx_packets
++;
203 stats
->netstats
.rx_bytes
+= skb
->len
+ ETH_FCS_LEN
;
205 update_len_counters(rx_grp
, skb
->len
);
207 /* rest of the counts are for good packets only */
211 if (is_multicast_ether_addr(mac_hdr
->h_dest
))
212 rx_grp
->mcastbcast
++;
216 if (!__vlan_get_tag(skb
, &vlan_tci
))
222 /* This function is overloaded for opa_vnic specific implementation */
223 static void hfi1_vnic_get_stats64(struct net_device
*netdev
,
224 struct rtnl_link_stats64
*stats
)
226 struct opa_vnic_stats
*vstats
= (struct opa_vnic_stats
*)stats
;
227 struct hfi1_vnic_vport_info
*vinfo
= opa_vnic_dev_priv(netdev
);
229 hfi1_vnic_update_stats(vinfo
, vstats
);
232 static u64
create_bypass_pbc(u32 vl
, u32 dw_len
)
236 pbc
= ((u64
)PBC_IHCRC_NONE
<< PBC_INSERT_HCRC_SHIFT
)
237 | PBC_INSERT_BYPASS_ICRC
| PBC_CREDIT_RETURN
239 | ((vl
& PBC_VL_MASK
) << PBC_VL_SHIFT
)
240 | (dw_len
& PBC_LENGTH_DWS_MASK
) << PBC_LENGTH_DWS_SHIFT
;
245 /* hfi1_vnic_maybe_stop_tx - stop tx queue if required */
246 static void hfi1_vnic_maybe_stop_tx(struct hfi1_vnic_vport_info
*vinfo
,
249 netif_stop_subqueue(vinfo
->netdev
, q_idx
);
250 if (!hfi1_vnic_sdma_write_avail(vinfo
, q_idx
))
253 netif_start_subqueue(vinfo
->netdev
, q_idx
);
256 static netdev_tx_t
hfi1_netdev_start_xmit(struct sk_buff
*skb
,
257 struct net_device
*netdev
)
259 struct hfi1_vnic_vport_info
*vinfo
= opa_vnic_dev_priv(netdev
);
260 u8 pad_len
, q_idx
= skb
->queue_mapping
;
261 struct hfi1_devdata
*dd
= vinfo
->dd
;
262 struct opa_vnic_skb_mdata
*mdata
;
263 u32 pkt_len
, total_len
;
267 v_dbg("xmit: queue %d skb len %d\n", q_idx
, skb
->len
);
268 if (unlikely(!netif_oper_up(netdev
))) {
269 vinfo
->stats
[q_idx
].tx_drop_state
++;
273 /* take out meta data */
274 mdata
= (struct opa_vnic_skb_mdata
*)skb
->data
;
275 skb_pull(skb
, sizeof(*mdata
));
276 if (unlikely(mdata
->flags
& OPA_VNIC_SKB_MDATA_ENCAP_ERR
)) {
277 vinfo
->stats
[q_idx
].tx_dlid_zero
++;
281 /* add tail padding (for 8 bytes size alignment) and icrc */
282 pad_len
= -(skb
->len
+ OPA_VNIC_ICRC_TAIL_LEN
) & 0x7;
283 pad_len
+= OPA_VNIC_ICRC_TAIL_LEN
;
286 * pkt_len is how much data we have to write, includes header and data.
287 * total_len is length of the packet in Dwords plus the PBC should not
290 pkt_len
= (skb
->len
+ pad_len
) >> 2;
291 total_len
= pkt_len
+ 2; /* PBC + packet */
293 pbc
= create_bypass_pbc(mdata
->vl
, total_len
);
296 v_dbg("pbc 0x%016llX len %d pad_len %d\n", pbc
, skb
->len
, pad_len
);
297 err
= dd
->process_vnic_dma_send(dd
, q_idx
, vinfo
, skb
, pbc
, pad_len
);
300 vinfo
->stats
[q_idx
].netstats
.tx_fifo_errors
++;
301 else if (err
!= -EBUSY
)
302 vinfo
->stats
[q_idx
].netstats
.tx_carrier_errors
++;
304 /* remove the header before updating tx counters */
305 skb_pull(skb
, OPA_VNIC_HDR_LEN
);
307 if (unlikely(err
== -EBUSY
)) {
308 hfi1_vnic_maybe_stop_tx(vinfo
, q_idx
);
309 dev_kfree_skb_any(skb
);
310 return NETDEV_TX_BUSY
;
314 /* update tx counters */
315 hfi1_vnic_update_tx_counters(vinfo
, q_idx
, skb
, err
);
316 dev_kfree_skb_any(skb
);
320 static u16
hfi1_vnic_select_queue(struct net_device
*netdev
,
322 struct net_device
*sb_dev
)
324 struct hfi1_vnic_vport_info
*vinfo
= opa_vnic_dev_priv(netdev
);
325 struct opa_vnic_skb_mdata
*mdata
;
326 struct sdma_engine
*sde
;
328 mdata
= (struct opa_vnic_skb_mdata
*)skb
->data
;
329 sde
= sdma_select_engine_vl(vinfo
->dd
, mdata
->entropy
, mdata
->vl
);
330 return sde
->this_idx
;
333 /* hfi1_vnic_decap_skb - strip OPA header from the skb (ethernet) packet */
334 static inline int hfi1_vnic_decap_skb(struct hfi1_vnic_rx_queue
*rxq
,
337 struct hfi1_vnic_vport_info
*vinfo
= rxq
->vinfo
;
338 int max_len
= vinfo
->netdev
->mtu
+ VLAN_ETH_HLEN
;
341 skb_pull(skb
, OPA_VNIC_HDR_LEN
);
343 /* Validate Packet length */
344 if (unlikely(skb
->len
> max_len
))
345 vinfo
->stats
[rxq
->idx
].rx_oversize
++;
346 else if (unlikely(skb
->len
< ETH_ZLEN
))
347 vinfo
->stats
[rxq
->idx
].rx_runt
++;
353 static struct hfi1_vnic_vport_info
*get_vnic_port(struct hfi1_devdata
*dd
,
356 int vnic_id
= VNIC_ID(vesw_id
);
358 return hfi1_netdev_get_data(dd
, vnic_id
);
361 static struct hfi1_vnic_vport_info
*get_first_vnic_port(struct hfi1_devdata
*dd
)
363 struct hfi1_vnic_vport_info
*vinfo
;
364 int next_id
= VNIC_ID(0);
366 vinfo
= hfi1_netdev_get_first_data(dd
, &next_id
);
368 if (next_id
> VNIC_ID(VNIC_MASK
))
374 void hfi1_vnic_bypass_rcv(struct hfi1_packet
*packet
)
376 struct hfi1_devdata
*dd
= packet
->rcd
->dd
;
377 struct hfi1_vnic_vport_info
*vinfo
= NULL
;
378 struct hfi1_vnic_rx_queue
*rxq
;
380 int l4_type
, vesw_id
= -1, rc
;
382 unsigned char *pad_info
;
384 l4_type
= hfi1_16B_get_l4(packet
->ebuf
);
385 if (likely(l4_type
== OPA_16B_L4_ETHR
)) {
386 vesw_id
= HFI1_VNIC_GET_VESWID(packet
->ebuf
);
387 vinfo
= get_vnic_port(dd
, vesw_id
);
390 * In case of invalid vesw id, count the error on
391 * the first available vport.
393 if (unlikely(!vinfo
)) {
394 struct hfi1_vnic_vport_info
*vinfo_tmp
;
396 vinfo_tmp
= get_first_vnic_port(dd
);
398 spin_lock(&vport_cntr_lock
);
399 vinfo_tmp
->stats
[0].netstats
.rx_nohandler
++;
400 spin_unlock(&vport_cntr_lock
);
405 if (unlikely(!vinfo
)) {
406 dd_dev_warn(dd
, "vnic rcv err: l4 %d vesw id %d ctx %d\n",
407 l4_type
, vesw_id
, packet
->rcd
->ctxt
);
411 q_idx
= packet
->rcd
->vnic_q_idx
;
412 rxq
= &vinfo
->rxq
[q_idx
];
413 if (unlikely(!netif_oper_up(vinfo
->netdev
))) {
414 vinfo
->stats
[q_idx
].rx_drop_state
++;
418 skb
= netdev_alloc_skb(vinfo
->netdev
, packet
->tlen
);
419 if (unlikely(!skb
)) {
420 vinfo
->stats
[q_idx
].netstats
.rx_fifo_errors
++;
424 memcpy(skb
->data
, packet
->ebuf
, packet
->tlen
);
425 skb_put(skb
, packet
->tlen
);
427 pad_info
= skb
->data
+ skb
->len
- 1;
428 skb_trim(skb
, (skb
->len
- OPA_VNIC_ICRC_TAIL_LEN
-
429 ((*pad_info
) & 0x7)));
431 rc
= hfi1_vnic_decap_skb(rxq
, skb
);
433 /* update rx counters */
434 hfi1_vnic_update_rx_counters(vinfo
, rxq
->idx
, skb
, rc
);
436 dev_kfree_skb_any(skb
);
440 skb_checksum_none_assert(skb
);
441 skb
->protocol
= eth_type_trans(skb
, rxq
->netdev
);
443 napi_gro_receive(&rxq
->napi
, skb
);
446 static int hfi1_vnic_up(struct hfi1_vnic_vport_info
*vinfo
)
448 struct hfi1_devdata
*dd
= vinfo
->dd
;
449 struct net_device
*netdev
= vinfo
->netdev
;
452 /* ensure virtual eth switch id is valid */
456 rc
= hfi1_netdev_add_data(dd
, VNIC_ID(vinfo
->vesw_id
), vinfo
);
460 rc
= hfi1_netdev_rx_init(dd
);
464 netif_carrier_on(netdev
);
465 netif_tx_start_all_queues(netdev
);
466 set_bit(HFI1_VNIC_UP
, &vinfo
->flags
);
471 hfi1_netdev_remove_data(dd
, VNIC_ID(vinfo
->vesw_id
));
475 static void hfi1_vnic_down(struct hfi1_vnic_vport_info
*vinfo
)
477 struct hfi1_devdata
*dd
= vinfo
->dd
;
479 clear_bit(HFI1_VNIC_UP
, &vinfo
->flags
);
480 netif_carrier_off(vinfo
->netdev
);
481 netif_tx_disable(vinfo
->netdev
);
482 hfi1_netdev_remove_data(dd
, VNIC_ID(vinfo
->vesw_id
));
484 hfi1_netdev_rx_destroy(dd
);
487 static int hfi1_netdev_open(struct net_device
*netdev
)
489 struct hfi1_vnic_vport_info
*vinfo
= opa_vnic_dev_priv(netdev
);
492 mutex_lock(&vinfo
->lock
);
493 rc
= hfi1_vnic_up(vinfo
);
494 mutex_unlock(&vinfo
->lock
);
498 static int hfi1_netdev_close(struct net_device
*netdev
)
500 struct hfi1_vnic_vport_info
*vinfo
= opa_vnic_dev_priv(netdev
);
502 mutex_lock(&vinfo
->lock
);
503 if (test_bit(HFI1_VNIC_UP
, &vinfo
->flags
))
504 hfi1_vnic_down(vinfo
);
505 mutex_unlock(&vinfo
->lock
);
509 static int hfi1_vnic_init(struct hfi1_vnic_vport_info
*vinfo
)
511 struct hfi1_devdata
*dd
= vinfo
->dd
;
514 mutex_lock(&hfi1_mutex
);
515 if (!dd
->vnic_num_vports
) {
516 rc
= hfi1_vnic_txreq_init(dd
);
521 rc
= hfi1_netdev_rx_init(dd
);
523 dd_dev_err(dd
, "Unable to initialize netdev contexts\n");
527 hfi1_init_vnic_rsm(dd
);
529 dd
->vnic_num_vports
++;
530 hfi1_vnic_sdma_init(vinfo
);
533 if (!dd
->vnic_num_vports
)
534 hfi1_vnic_txreq_deinit(dd
);
536 mutex_unlock(&hfi1_mutex
);
540 static void hfi1_vnic_deinit(struct hfi1_vnic_vport_info
*vinfo
)
542 struct hfi1_devdata
*dd
= vinfo
->dd
;
544 mutex_lock(&hfi1_mutex
);
545 if (--dd
->vnic_num_vports
== 0) {
546 hfi1_deinit_vnic_rsm(dd
);
547 hfi1_vnic_txreq_deinit(dd
);
549 mutex_unlock(&hfi1_mutex
);
550 hfi1_netdev_rx_destroy(dd
);
553 static void hfi1_vnic_set_vesw_id(struct net_device
*netdev
, int id
)
555 struct hfi1_vnic_vport_info
*vinfo
= opa_vnic_dev_priv(netdev
);
559 * If vesw_id is being changed, and if the vnic port is up,
560 * reset the vnic port to ensure new vesw_id gets picked up
562 if (id
!= vinfo
->vesw_id
) {
563 mutex_lock(&vinfo
->lock
);
564 if (test_bit(HFI1_VNIC_UP
, &vinfo
->flags
)) {
565 hfi1_vnic_down(vinfo
);
573 mutex_unlock(&vinfo
->lock
);
578 static const struct net_device_ops hfi1_netdev_ops
= {
579 .ndo_open
= hfi1_netdev_open
,
580 .ndo_stop
= hfi1_netdev_close
,
581 .ndo_start_xmit
= hfi1_netdev_start_xmit
,
582 .ndo_select_queue
= hfi1_vnic_select_queue
,
583 .ndo_get_stats64
= hfi1_vnic_get_stats64
,
586 static void hfi1_vnic_free_rn(struct net_device
*netdev
)
588 struct hfi1_vnic_vport_info
*vinfo
= opa_vnic_dev_priv(netdev
);
590 hfi1_vnic_deinit(vinfo
);
591 mutex_destroy(&vinfo
->lock
);
595 struct net_device
*hfi1_vnic_alloc_rn(struct ib_device
*device
,
597 enum rdma_netdev_t type
,
599 unsigned char name_assign_type
,
600 void (*setup
)(struct net_device
*))
602 struct hfi1_devdata
*dd
= dd_from_ibdev(device
);
603 struct hfi1_vnic_vport_info
*vinfo
;
604 struct net_device
*netdev
;
605 struct rdma_netdev
*rn
;
608 if (!dd
->num_netdev_contexts
)
609 return ERR_PTR(-ENOMEM
);
611 if (!port_num
|| (port_num
> dd
->num_pports
))
612 return ERR_PTR(-EINVAL
);
614 if (type
!= RDMA_NETDEV_OPA_VNIC
)
615 return ERR_PTR(-EOPNOTSUPP
);
617 size
= sizeof(struct opa_vnic_rdma_netdev
) + sizeof(*vinfo
);
618 netdev
= alloc_netdev_mqs(size
, name
, name_assign_type
, setup
,
619 chip_sdma_engines(dd
),
620 dd
->num_netdev_contexts
);
622 return ERR_PTR(-ENOMEM
);
624 rn
= netdev_priv(netdev
);
625 vinfo
= opa_vnic_dev_priv(netdev
);
627 vinfo
->num_tx_q
= chip_sdma_engines(dd
);
628 vinfo
->num_rx_q
= dd
->num_netdev_contexts
;
629 vinfo
->netdev
= netdev
;
630 rn
->free_rdma_netdev
= hfi1_vnic_free_rn
;
631 rn
->set_id
= hfi1_vnic_set_vesw_id
;
633 netdev
->features
= NETIF_F_HIGHDMA
| NETIF_F_SG
;
634 netdev
->hw_features
= netdev
->features
;
635 netdev
->vlan_features
= netdev
->features
;
636 netdev
->watchdog_timeo
= msecs_to_jiffies(HFI_TX_TIMEOUT_MS
);
637 netdev
->netdev_ops
= &hfi1_netdev_ops
;
638 mutex_init(&vinfo
->lock
);
640 for (i
= 0; i
< vinfo
->num_rx_q
; i
++) {
641 struct hfi1_vnic_rx_queue
*rxq
= &vinfo
->rxq
[i
];
645 rxq
->netdev
= netdev
;
648 rc
= hfi1_vnic_init(vinfo
);
654 mutex_destroy(&vinfo
->lock
);