2 * Linux driver for VMware's vmxnet3 ethernet NIC.
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
27 #include <linux/module.h>
28 #include <net/ip6_checksum.h>
30 #include "vmxnet3_int.h"
32 char vmxnet3_driver_name
[] = "vmxnet3";
33 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
37 * Last entry must be all 0s
39 static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table
) = {
40 {PCI_VDEVICE(VMWARE
, PCI_DEVICE_ID_VMWARE_VMXNET3
)},
44 MODULE_DEVICE_TABLE(pci
, vmxnet3_pciid_table
);
46 static int enable_mq
= 1;
49 vmxnet3_write_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
);
52 * Enable/Disable the given intr
55 vmxnet3_enable_intr(struct vmxnet3_adapter
*adapter
, unsigned intr_idx
)
57 VMXNET3_WRITE_BAR0_REG(adapter
, VMXNET3_REG_IMR
+ intr_idx
* 8, 0);
62 vmxnet3_disable_intr(struct vmxnet3_adapter
*adapter
, unsigned intr_idx
)
64 VMXNET3_WRITE_BAR0_REG(adapter
, VMXNET3_REG_IMR
+ intr_idx
* 8, 1);
69 * Enable/Disable all intrs used by the device
72 vmxnet3_enable_all_intrs(struct vmxnet3_adapter
*adapter
)
76 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
77 vmxnet3_enable_intr(adapter
, i
);
78 adapter
->shared
->devRead
.intrConf
.intrCtrl
&=
79 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL
);
84 vmxnet3_disable_all_intrs(struct vmxnet3_adapter
*adapter
)
88 adapter
->shared
->devRead
.intrConf
.intrCtrl
|=
89 cpu_to_le32(VMXNET3_IC_DISABLE_ALL
);
90 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
91 vmxnet3_disable_intr(adapter
, i
);
96 vmxnet3_ack_events(struct vmxnet3_adapter
*adapter
, u32 events
)
98 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_ECR
, events
);
103 vmxnet3_tq_stopped(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
110 vmxnet3_tq_start(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
113 netif_start_subqueue(adapter
->netdev
, tq
- adapter
->tx_queue
);
118 vmxnet3_tq_wake(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
121 netif_wake_subqueue(adapter
->netdev
, (tq
- adapter
->tx_queue
));
126 vmxnet3_tq_stop(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
130 netif_stop_subqueue(adapter
->netdev
, (tq
- adapter
->tx_queue
));
135 * Check the link state. This may start or stop the tx queue.
138 vmxnet3_check_link(struct vmxnet3_adapter
*adapter
, bool affectTxQueue
)
144 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
145 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
, VMXNET3_CMD_GET_LINK
);
146 ret
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
147 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
149 adapter
->link_speed
= ret
>> 16;
150 if (ret
& 1) { /* Link is up. */
151 netdev_info(adapter
->netdev
, "NIC Link is Up %d Mbps\n",
152 adapter
->link_speed
);
153 netif_carrier_on(adapter
->netdev
);
156 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
157 vmxnet3_tq_start(&adapter
->tx_queue
[i
],
161 netdev_info(adapter
->netdev
, "NIC Link is Down\n");
162 netif_carrier_off(adapter
->netdev
);
165 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
166 vmxnet3_tq_stop(&adapter
->tx_queue
[i
], adapter
);
172 vmxnet3_process_events(struct vmxnet3_adapter
*adapter
)
176 u32 events
= le32_to_cpu(adapter
->shared
->ecr
);
180 vmxnet3_ack_events(adapter
, events
);
182 /* Check if link state has changed */
183 if (events
& VMXNET3_ECR_LINK
)
184 vmxnet3_check_link(adapter
, true);
186 /* Check if there is an error on xmit/recv queues */
187 if (events
& (VMXNET3_ECR_TQERR
| VMXNET3_ECR_RQERR
)) {
188 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
189 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
190 VMXNET3_CMD_GET_QUEUE_STATUS
);
191 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
193 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
194 if (adapter
->tqd_start
[i
].status
.stopped
)
195 dev_err(&adapter
->netdev
->dev
,
196 "%s: tq[%d] error 0x%x\n",
197 adapter
->netdev
->name
, i
, le32_to_cpu(
198 adapter
->tqd_start
[i
].status
.error
));
199 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
200 if (adapter
->rqd_start
[i
].status
.stopped
)
201 dev_err(&adapter
->netdev
->dev
,
202 "%s: rq[%d] error 0x%x\n",
203 adapter
->netdev
->name
, i
,
204 adapter
->rqd_start
[i
].status
.error
);
206 schedule_work(&adapter
->work
);
210 #ifdef __BIG_ENDIAN_BITFIELD
212 * The device expects the bitfields in shared structures to be written in
213 * little endian. When CPU is big endian, the following routines are used to
214 * correctly read and write into ABI.
215 * The general technique used here is : double word bitfields are defined in
216 * opposite order for big endian architecture. Then before reading them in
217 * driver the complete double word is translated using le32_to_cpu. Similarly
218 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
219 * double words into required format.
220 * In order to avoid touching bits in shared structure more than once, temporary
221 * descriptors are used. These are passed as srcDesc to following functions.
223 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc
*srcDesc
,
224 struct Vmxnet3_RxDesc
*dstDesc
)
226 u32
*src
= (u32
*)srcDesc
+ 2;
227 u32
*dst
= (u32
*)dstDesc
+ 2;
228 dstDesc
->addr
= le64_to_cpu(srcDesc
->addr
);
229 *dst
= le32_to_cpu(*src
);
230 dstDesc
->ext1
= le32_to_cpu(srcDesc
->ext1
);
233 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc
*srcDesc
,
234 struct Vmxnet3_TxDesc
*dstDesc
)
237 u32
*src
= (u32
*)(srcDesc
+ 1);
238 u32
*dst
= (u32
*)(dstDesc
+ 1);
240 /* Working backwards so that the gen bit is set at the end. */
241 for (i
= 2; i
> 0; i
--) {
244 *dst
= cpu_to_le32(*src
);
249 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc
*srcDesc
,
250 struct Vmxnet3_RxCompDesc
*dstDesc
)
253 u32
*src
= (u32
*)srcDesc
;
254 u32
*dst
= (u32
*)dstDesc
;
255 for (i
= 0; i
< sizeof(struct Vmxnet3_RxCompDesc
) / sizeof(u32
); i
++) {
256 *dst
= le32_to_cpu(*src
);
263 /* Used to read bitfield values from double words. */
264 static u32
get_bitfield32(const __le32
*bitfield
, u32 pos
, u32 size
)
266 u32 temp
= le32_to_cpu(*bitfield
);
267 u32 mask
= ((1 << size
) - 1) << pos
;
275 #endif /* __BIG_ENDIAN_BITFIELD */
277 #ifdef __BIG_ENDIAN_BITFIELD
279 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
280 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
281 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
282 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
283 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
284 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
285 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
286 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
287 VMXNET3_TCD_GEN_SIZE)
288 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
289 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
290 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
292 vmxnet3_RxCompToCPU((rcd), (tmp)); \
294 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
296 vmxnet3_RxDescToCPU((rxd), (tmp)); \
301 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
302 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
303 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
304 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
305 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
306 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
308 #endif /* __BIG_ENDIAN_BITFIELD */
312 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info
*tbi
,
313 struct pci_dev
*pdev
)
315 if (tbi
->map_type
== VMXNET3_MAP_SINGLE
)
316 dma_unmap_single(&pdev
->dev
, tbi
->dma_addr
, tbi
->len
,
318 else if (tbi
->map_type
== VMXNET3_MAP_PAGE
)
319 dma_unmap_page(&pdev
->dev
, tbi
->dma_addr
, tbi
->len
,
322 BUG_ON(tbi
->map_type
!= VMXNET3_MAP_NONE
);
324 tbi
->map_type
= VMXNET3_MAP_NONE
; /* to help debugging */
329 vmxnet3_unmap_pkt(u32 eop_idx
, struct vmxnet3_tx_queue
*tq
,
330 struct pci_dev
*pdev
, struct vmxnet3_adapter
*adapter
)
335 /* no out of order completion */
336 BUG_ON(tq
->buf_info
[eop_idx
].sop_idx
!= tq
->tx_ring
.next2comp
);
337 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq
->tx_ring
.base
[eop_idx
].txd
)) != 1);
339 skb
= tq
->buf_info
[eop_idx
].skb
;
341 tq
->buf_info
[eop_idx
].skb
= NULL
;
343 VMXNET3_INC_RING_IDX_ONLY(eop_idx
, tq
->tx_ring
.size
);
345 while (tq
->tx_ring
.next2comp
!= eop_idx
) {
346 vmxnet3_unmap_tx_buf(tq
->buf_info
+ tq
->tx_ring
.next2comp
,
349 /* update next2comp w/o tx_lock. Since we are marking more,
350 * instead of less, tx ring entries avail, the worst case is
351 * that the tx routine incorrectly re-queues a pkt due to
352 * insufficient tx ring entries.
354 vmxnet3_cmd_ring_adv_next2comp(&tq
->tx_ring
);
358 dev_kfree_skb_any(skb
);
364 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue
*tq
,
365 struct vmxnet3_adapter
*adapter
)
368 union Vmxnet3_GenericDesc
*gdesc
;
370 gdesc
= tq
->comp_ring
.base
+ tq
->comp_ring
.next2proc
;
371 while (VMXNET3_TCD_GET_GEN(&gdesc
->tcd
) == tq
->comp_ring
.gen
) {
372 completed
+= vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
373 &gdesc
->tcd
), tq
, adapter
->pdev
,
376 vmxnet3_comp_ring_adv_next2proc(&tq
->comp_ring
);
377 gdesc
= tq
->comp_ring
.base
+ tq
->comp_ring
.next2proc
;
381 spin_lock(&tq
->tx_lock
);
382 if (unlikely(vmxnet3_tq_stopped(tq
, adapter
) &&
383 vmxnet3_cmd_ring_desc_avail(&tq
->tx_ring
) >
384 VMXNET3_WAKE_QUEUE_THRESHOLD(tq
) &&
385 netif_carrier_ok(adapter
->netdev
))) {
386 vmxnet3_tq_wake(tq
, adapter
);
388 spin_unlock(&tq
->tx_lock
);
395 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue
*tq
,
396 struct vmxnet3_adapter
*adapter
)
400 while (tq
->tx_ring
.next2comp
!= tq
->tx_ring
.next2fill
) {
401 struct vmxnet3_tx_buf_info
*tbi
;
403 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2comp
;
405 vmxnet3_unmap_tx_buf(tbi
, adapter
->pdev
);
407 dev_kfree_skb_any(tbi
->skb
);
410 vmxnet3_cmd_ring_adv_next2comp(&tq
->tx_ring
);
413 /* sanity check, verify all buffers are indeed unmapped and freed */
414 for (i
= 0; i
< tq
->tx_ring
.size
; i
++) {
415 BUG_ON(tq
->buf_info
[i
].skb
!= NULL
||
416 tq
->buf_info
[i
].map_type
!= VMXNET3_MAP_NONE
);
419 tq
->tx_ring
.gen
= VMXNET3_INIT_GEN
;
420 tq
->tx_ring
.next2fill
= tq
->tx_ring
.next2comp
= 0;
422 tq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
423 tq
->comp_ring
.next2proc
= 0;
428 vmxnet3_tq_destroy(struct vmxnet3_tx_queue
*tq
,
429 struct vmxnet3_adapter
*adapter
)
431 if (tq
->tx_ring
.base
) {
432 dma_free_coherent(&adapter
->pdev
->dev
, tq
->tx_ring
.size
*
433 sizeof(struct Vmxnet3_TxDesc
),
434 tq
->tx_ring
.base
, tq
->tx_ring
.basePA
);
435 tq
->tx_ring
.base
= NULL
;
437 if (tq
->data_ring
.base
) {
438 dma_free_coherent(&adapter
->pdev
->dev
, tq
->data_ring
.size
*
439 sizeof(struct Vmxnet3_TxDataDesc
),
440 tq
->data_ring
.base
, tq
->data_ring
.basePA
);
441 tq
->data_ring
.base
= NULL
;
443 if (tq
->comp_ring
.base
) {
444 dma_free_coherent(&adapter
->pdev
->dev
, tq
->comp_ring
.size
*
445 sizeof(struct Vmxnet3_TxCompDesc
),
446 tq
->comp_ring
.base
, tq
->comp_ring
.basePA
);
447 tq
->comp_ring
.base
= NULL
;
450 dma_free_coherent(&adapter
->pdev
->dev
,
451 tq
->tx_ring
.size
* sizeof(tq
->buf_info
[0]),
452 tq
->buf_info
, tq
->buf_info_pa
);
458 /* Destroy all tx queues */
460 vmxnet3_tq_destroy_all(struct vmxnet3_adapter
*adapter
)
464 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
465 vmxnet3_tq_destroy(&adapter
->tx_queue
[i
], adapter
);
470 vmxnet3_tq_init(struct vmxnet3_tx_queue
*tq
,
471 struct vmxnet3_adapter
*adapter
)
475 /* reset the tx ring contents to 0 and reset the tx ring states */
476 memset(tq
->tx_ring
.base
, 0, tq
->tx_ring
.size
*
477 sizeof(struct Vmxnet3_TxDesc
));
478 tq
->tx_ring
.next2fill
= tq
->tx_ring
.next2comp
= 0;
479 tq
->tx_ring
.gen
= VMXNET3_INIT_GEN
;
481 memset(tq
->data_ring
.base
, 0, tq
->data_ring
.size
*
482 sizeof(struct Vmxnet3_TxDataDesc
));
484 /* reset the tx comp ring contents to 0 and reset comp ring states */
485 memset(tq
->comp_ring
.base
, 0, tq
->comp_ring
.size
*
486 sizeof(struct Vmxnet3_TxCompDesc
));
487 tq
->comp_ring
.next2proc
= 0;
488 tq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
490 /* reset the bookkeeping data */
491 memset(tq
->buf_info
, 0, sizeof(tq
->buf_info
[0]) * tq
->tx_ring
.size
);
492 for (i
= 0; i
< tq
->tx_ring
.size
; i
++)
493 tq
->buf_info
[i
].map_type
= VMXNET3_MAP_NONE
;
495 /* stats are not reset */
500 vmxnet3_tq_create(struct vmxnet3_tx_queue
*tq
,
501 struct vmxnet3_adapter
*adapter
)
505 BUG_ON(tq
->tx_ring
.base
|| tq
->data_ring
.base
||
506 tq
->comp_ring
.base
|| tq
->buf_info
);
508 tq
->tx_ring
.base
= dma_alloc_coherent(&adapter
->pdev
->dev
,
509 tq
->tx_ring
.size
* sizeof(struct Vmxnet3_TxDesc
),
510 &tq
->tx_ring
.basePA
, GFP_KERNEL
);
511 if (!tq
->tx_ring
.base
) {
512 netdev_err(adapter
->netdev
, "failed to allocate tx ring\n");
516 tq
->data_ring
.base
= dma_alloc_coherent(&adapter
->pdev
->dev
,
517 tq
->data_ring
.size
* sizeof(struct Vmxnet3_TxDataDesc
),
518 &tq
->data_ring
.basePA
, GFP_KERNEL
);
519 if (!tq
->data_ring
.base
) {
520 netdev_err(adapter
->netdev
, "failed to allocate data ring\n");
524 tq
->comp_ring
.base
= dma_alloc_coherent(&adapter
->pdev
->dev
,
525 tq
->comp_ring
.size
* sizeof(struct Vmxnet3_TxCompDesc
),
526 &tq
->comp_ring
.basePA
, GFP_KERNEL
);
527 if (!tq
->comp_ring
.base
) {
528 netdev_err(adapter
->netdev
, "failed to allocate tx comp ring\n");
532 sz
= tq
->tx_ring
.size
* sizeof(tq
->buf_info
[0]);
533 tq
->buf_info
= dma_zalloc_coherent(&adapter
->pdev
->dev
, sz
,
534 &tq
->buf_info_pa
, GFP_KERNEL
);
541 vmxnet3_tq_destroy(tq
, adapter
);
546 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter
*adapter
)
550 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
551 vmxnet3_tq_cleanup(&adapter
->tx_queue
[i
], adapter
);
555 * starting from ring->next2fill, allocate rx buffers for the given ring
556 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
557 * are allocated or allocation fails
561 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue
*rq
, u32 ring_idx
,
562 int num_to_alloc
, struct vmxnet3_adapter
*adapter
)
564 int num_allocated
= 0;
565 struct vmxnet3_rx_buf_info
*rbi_base
= rq
->buf_info
[ring_idx
];
566 struct vmxnet3_cmd_ring
*ring
= &rq
->rx_ring
[ring_idx
];
569 while (num_allocated
<= num_to_alloc
) {
570 struct vmxnet3_rx_buf_info
*rbi
;
571 union Vmxnet3_GenericDesc
*gd
;
573 rbi
= rbi_base
+ ring
->next2fill
;
574 gd
= ring
->base
+ ring
->next2fill
;
576 if (rbi
->buf_type
== VMXNET3_RX_BUF_SKB
) {
577 if (rbi
->skb
== NULL
) {
578 rbi
->skb
= __netdev_alloc_skb_ip_align(adapter
->netdev
,
581 if (unlikely(rbi
->skb
== NULL
)) {
582 rq
->stats
.rx_buf_alloc_failure
++;
586 rbi
->dma_addr
= dma_map_single(
588 rbi
->skb
->data
, rbi
->len
,
591 /* rx buffer skipped by the device */
593 val
= VMXNET3_RXD_BTYPE_HEAD
<< VMXNET3_RXD_BTYPE_SHIFT
;
595 BUG_ON(rbi
->buf_type
!= VMXNET3_RX_BUF_PAGE
||
596 rbi
->len
!= PAGE_SIZE
);
598 if (rbi
->page
== NULL
) {
599 rbi
->page
= alloc_page(GFP_ATOMIC
);
600 if (unlikely(rbi
->page
== NULL
)) {
601 rq
->stats
.rx_buf_alloc_failure
++;
604 rbi
->dma_addr
= dma_map_page(
606 rbi
->page
, 0, PAGE_SIZE
,
609 /* rx buffers skipped by the device */
611 val
= VMXNET3_RXD_BTYPE_BODY
<< VMXNET3_RXD_BTYPE_SHIFT
;
614 BUG_ON(rbi
->dma_addr
== 0);
615 gd
->rxd
.addr
= cpu_to_le64(rbi
->dma_addr
);
616 gd
->dword
[2] = cpu_to_le32((!ring
->gen
<< VMXNET3_RXD_GEN_SHIFT
)
619 /* Fill the last buffer but dont mark it ready, or else the
620 * device will think that the queue is full */
621 if (num_allocated
== num_to_alloc
)
624 gd
->dword
[2] |= cpu_to_le32(ring
->gen
<< VMXNET3_RXD_GEN_SHIFT
);
626 vmxnet3_cmd_ring_adv_next2fill(ring
);
629 netdev_dbg(adapter
->netdev
,
630 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
631 num_allocated
, ring
->next2fill
, ring
->next2comp
);
633 /* so that the device can distinguish a full ring and an empty ring */
634 BUG_ON(num_allocated
!= 0 && ring
->next2fill
== ring
->next2comp
);
636 return num_allocated
;
641 vmxnet3_append_frag(struct sk_buff
*skb
, struct Vmxnet3_RxCompDesc
*rcd
,
642 struct vmxnet3_rx_buf_info
*rbi
)
644 struct skb_frag_struct
*frag
= skb_shinfo(skb
)->frags
+
645 skb_shinfo(skb
)->nr_frags
;
647 BUG_ON(skb_shinfo(skb
)->nr_frags
>= MAX_SKB_FRAGS
);
649 __skb_frag_set_page(frag
, rbi
->page
);
650 frag
->page_offset
= 0;
651 skb_frag_size_set(frag
, rcd
->len
);
652 skb
->data_len
+= rcd
->len
;
653 skb
->truesize
+= PAGE_SIZE
;
654 skb_shinfo(skb
)->nr_frags
++;
659 vmxnet3_map_pkt(struct sk_buff
*skb
, struct vmxnet3_tx_ctx
*ctx
,
660 struct vmxnet3_tx_queue
*tq
, struct pci_dev
*pdev
,
661 struct vmxnet3_adapter
*adapter
)
664 unsigned long buf_offset
;
666 union Vmxnet3_GenericDesc
*gdesc
;
667 struct vmxnet3_tx_buf_info
*tbi
= NULL
;
669 BUG_ON(ctx
->copy_size
> skb_headlen(skb
));
671 /* use the previous gen bit for the SOP desc */
672 dw2
= (tq
->tx_ring
.gen
^ 0x1) << VMXNET3_TXD_GEN_SHIFT
;
674 ctx
->sop_txd
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
675 gdesc
= ctx
->sop_txd
; /* both loops below can be skipped */
677 /* no need to map the buffer if headers are copied */
678 if (ctx
->copy_size
) {
679 ctx
->sop_txd
->txd
.addr
= cpu_to_le64(tq
->data_ring
.basePA
+
680 tq
->tx_ring
.next2fill
*
681 sizeof(struct Vmxnet3_TxDataDesc
));
682 ctx
->sop_txd
->dword
[2] = cpu_to_le32(dw2
| ctx
->copy_size
);
683 ctx
->sop_txd
->dword
[3] = 0;
685 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
686 tbi
->map_type
= VMXNET3_MAP_NONE
;
688 netdev_dbg(adapter
->netdev
,
689 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
690 tq
->tx_ring
.next2fill
,
691 le64_to_cpu(ctx
->sop_txd
->txd
.addr
),
692 ctx
->sop_txd
->dword
[2], ctx
->sop_txd
->dword
[3]);
693 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
695 /* use the right gen for non-SOP desc */
696 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
699 /* linear part can use multiple tx desc if it's big */
700 len
= skb_headlen(skb
) - ctx
->copy_size
;
701 buf_offset
= ctx
->copy_size
;
705 if (len
< VMXNET3_MAX_TX_BUF_SIZE
) {
709 buf_size
= VMXNET3_MAX_TX_BUF_SIZE
;
710 /* spec says that for TxDesc.len, 0 == 2^14 */
713 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
714 tbi
->map_type
= VMXNET3_MAP_SINGLE
;
715 tbi
->dma_addr
= dma_map_single(&adapter
->pdev
->dev
,
716 skb
->data
+ buf_offset
, buf_size
,
721 gdesc
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
722 BUG_ON(gdesc
->txd
.gen
== tq
->tx_ring
.gen
);
724 gdesc
->txd
.addr
= cpu_to_le64(tbi
->dma_addr
);
725 gdesc
->dword
[2] = cpu_to_le32(dw2
);
728 netdev_dbg(adapter
->netdev
,
729 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
730 tq
->tx_ring
.next2fill
, le64_to_cpu(gdesc
->txd
.addr
),
731 le32_to_cpu(gdesc
->dword
[2]), gdesc
->dword
[3]);
732 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
733 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
736 buf_offset
+= buf_size
;
739 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
740 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
744 len
= skb_frag_size(frag
);
746 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
747 if (len
< VMXNET3_MAX_TX_BUF_SIZE
) {
751 buf_size
= VMXNET3_MAX_TX_BUF_SIZE
;
752 /* spec says that for TxDesc.len, 0 == 2^14 */
754 tbi
->map_type
= VMXNET3_MAP_PAGE
;
755 tbi
->dma_addr
= skb_frag_dma_map(&adapter
->pdev
->dev
, frag
,
756 buf_offset
, buf_size
,
761 gdesc
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
762 BUG_ON(gdesc
->txd
.gen
== tq
->tx_ring
.gen
);
764 gdesc
->txd
.addr
= cpu_to_le64(tbi
->dma_addr
);
765 gdesc
->dword
[2] = cpu_to_le32(dw2
);
768 netdev_dbg(adapter
->netdev
,
769 "txd[%u]: 0x%llu %u %u\n",
770 tq
->tx_ring
.next2fill
, le64_to_cpu(gdesc
->txd
.addr
),
771 le32_to_cpu(gdesc
->dword
[2]), gdesc
->dword
[3]);
772 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
773 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
776 buf_offset
+= buf_size
;
780 ctx
->eop_txd
= gdesc
;
782 /* set the last buf_info for the pkt */
784 tbi
->sop_idx
= ctx
->sop_txd
- tq
->tx_ring
.base
;
788 /* Init all tx queues */
790 vmxnet3_tq_init_all(struct vmxnet3_adapter
*adapter
)
794 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
795 vmxnet3_tq_init(&adapter
->tx_queue
[i
], adapter
);
800 * parse and copy relevant protocol headers:
801 * For a tso pkt, relevant headers are L2/3/4 including options
802 * For a pkt requesting csum offloading, they are L2/3 and may include L4
803 * if it's a TCP/UDP pkt
806 * -1: error happens during parsing
807 * 0: protocol headers parsed, but too big to be copied
808 * 1: protocol headers parsed and copied
811 * 1. related *ctx fields are updated.
812 * 2. ctx->copy_size is # of bytes copied
813 * 3. the portion copied is guaranteed to be in the linear part
817 vmxnet3_parse_and_copy_hdr(struct sk_buff
*skb
, struct vmxnet3_tx_queue
*tq
,
818 struct vmxnet3_tx_ctx
*ctx
,
819 struct vmxnet3_adapter
*adapter
)
821 struct Vmxnet3_TxDataDesc
*tdd
;
823 if (ctx
->mss
) { /* TSO */
824 ctx
->eth_ip_hdr_size
= skb_transport_offset(skb
);
825 ctx
->l4_hdr_size
= tcp_hdrlen(skb
);
826 ctx
->copy_size
= ctx
->eth_ip_hdr_size
+ ctx
->l4_hdr_size
;
828 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
829 ctx
->eth_ip_hdr_size
= skb_checksum_start_offset(skb
);
832 const struct iphdr
*iph
= ip_hdr(skb
);
834 if (iph
->protocol
== IPPROTO_TCP
)
835 ctx
->l4_hdr_size
= tcp_hdrlen(skb
);
836 else if (iph
->protocol
== IPPROTO_UDP
)
837 ctx
->l4_hdr_size
= sizeof(struct udphdr
);
839 ctx
->l4_hdr_size
= 0;
841 /* for simplicity, don't copy L4 headers */
842 ctx
->l4_hdr_size
= 0;
844 ctx
->copy_size
= min(ctx
->eth_ip_hdr_size
+
845 ctx
->l4_hdr_size
, skb
->len
);
847 ctx
->eth_ip_hdr_size
= 0;
848 ctx
->l4_hdr_size
= 0;
849 /* copy as much as allowed */
850 ctx
->copy_size
= min((unsigned int)VMXNET3_HDR_COPY_SIZE
854 /* make sure headers are accessible directly */
855 if (unlikely(!pskb_may_pull(skb
, ctx
->copy_size
)))
859 if (unlikely(ctx
->copy_size
> VMXNET3_HDR_COPY_SIZE
)) {
860 tq
->stats
.oversized_hdr
++;
865 tdd
= tq
->data_ring
.base
+ tq
->tx_ring
.next2fill
;
867 memcpy(tdd
->data
, skb
->data
, ctx
->copy_size
);
868 netdev_dbg(adapter
->netdev
,
869 "copy %u bytes to dataRing[%u]\n",
870 ctx
->copy_size
, tq
->tx_ring
.next2fill
);
879 vmxnet3_prepare_tso(struct sk_buff
*skb
,
880 struct vmxnet3_tx_ctx
*ctx
)
882 struct tcphdr
*tcph
= tcp_hdr(skb
);
885 struct iphdr
*iph
= ip_hdr(skb
);
888 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, 0,
891 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
893 tcph
->check
= ~csum_ipv6_magic(&iph
->saddr
, &iph
->daddr
, 0,
898 static int txd_estimate(const struct sk_buff
*skb
)
900 int count
= VMXNET3_TXD_NEEDED(skb_headlen(skb
)) + 1;
903 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
904 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
906 count
+= VMXNET3_TXD_NEEDED(skb_frag_size(frag
));
912 * Transmits a pkt thru a given tq
914 * NETDEV_TX_OK: descriptors are setup successfully
915 * NETDEV_TX_OK: error occurred, the pkt is dropped
916 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
919 * 1. tx ring may be changed
920 * 2. tq stats may be updated accordingly
921 * 3. shared->txNumDeferred may be updated
925 vmxnet3_tq_xmit(struct sk_buff
*skb
, struct vmxnet3_tx_queue
*tq
,
926 struct vmxnet3_adapter
*adapter
, struct net_device
*netdev
)
931 struct vmxnet3_tx_ctx ctx
;
932 union Vmxnet3_GenericDesc
*gdesc
;
933 #ifdef __BIG_ENDIAN_BITFIELD
934 /* Use temporary descriptor to avoid touching bits multiple times */
935 union Vmxnet3_GenericDesc tempTxDesc
;
938 count
= txd_estimate(skb
);
940 ctx
.ipv4
= (vlan_get_protocol(skb
) == cpu_to_be16(ETH_P_IP
));
942 ctx
.mss
= skb_shinfo(skb
)->gso_size
;
944 if (skb_header_cloned(skb
)) {
945 if (unlikely(pskb_expand_head(skb
, 0, 0,
947 tq
->stats
.drop_tso
++;
950 tq
->stats
.copy_skb_header
++;
952 vmxnet3_prepare_tso(skb
, &ctx
);
954 if (unlikely(count
> VMXNET3_MAX_TXD_PER_PKT
)) {
956 /* non-tso pkts must not use more than
957 * VMXNET3_MAX_TXD_PER_PKT entries
959 if (skb_linearize(skb
) != 0) {
960 tq
->stats
.drop_too_many_frags
++;
963 tq
->stats
.linearized
++;
965 /* recalculate the # of descriptors to use */
966 count
= VMXNET3_TXD_NEEDED(skb_headlen(skb
)) + 1;
970 spin_lock_irqsave(&tq
->tx_lock
, flags
);
972 if (count
> vmxnet3_cmd_ring_desc_avail(&tq
->tx_ring
)) {
973 tq
->stats
.tx_ring_full
++;
974 netdev_dbg(adapter
->netdev
,
975 "tx queue stopped on %s, next2comp %u"
976 " next2fill %u\n", adapter
->netdev
->name
,
977 tq
->tx_ring
.next2comp
, tq
->tx_ring
.next2fill
);
979 vmxnet3_tq_stop(tq
, adapter
);
980 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
981 return NETDEV_TX_BUSY
;
985 ret
= vmxnet3_parse_and_copy_hdr(skb
, tq
, &ctx
, adapter
);
987 BUG_ON(ret
<= 0 && ctx
.copy_size
!= 0);
988 /* hdrs parsed, check against other limits */
990 if (unlikely(ctx
.eth_ip_hdr_size
+ ctx
.l4_hdr_size
>
991 VMXNET3_MAX_TX_BUF_SIZE
)) {
995 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
996 if (unlikely(ctx
.eth_ip_hdr_size
+
998 VMXNET3_MAX_CSUM_OFFSET
)) {
1004 tq
->stats
.drop_hdr_inspect_err
++;
1005 goto unlock_drop_pkt
;
1008 /* fill tx descs related to addr & len */
1009 vmxnet3_map_pkt(skb
, &ctx
, tq
, adapter
->pdev
, adapter
);
1011 /* setup the EOP desc */
1012 ctx
.eop_txd
->dword
[3] = cpu_to_le32(VMXNET3_TXD_CQ
| VMXNET3_TXD_EOP
);
1014 /* setup the SOP desc */
1015 #ifdef __BIG_ENDIAN_BITFIELD
1016 gdesc
= &tempTxDesc
;
1017 gdesc
->dword
[2] = ctx
.sop_txd
->dword
[2];
1018 gdesc
->dword
[3] = ctx
.sop_txd
->dword
[3];
1020 gdesc
= ctx
.sop_txd
;
1023 gdesc
->txd
.hlen
= ctx
.eth_ip_hdr_size
+ ctx
.l4_hdr_size
;
1024 gdesc
->txd
.om
= VMXNET3_OM_TSO
;
1025 gdesc
->txd
.msscof
= ctx
.mss
;
1026 le32_add_cpu(&tq
->shared
->txNumDeferred
, (skb
->len
-
1027 gdesc
->txd
.hlen
+ ctx
.mss
- 1) / ctx
.mss
);
1029 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1030 gdesc
->txd
.hlen
= ctx
.eth_ip_hdr_size
;
1031 gdesc
->txd
.om
= VMXNET3_OM_CSUM
;
1032 gdesc
->txd
.msscof
= ctx
.eth_ip_hdr_size
+
1036 gdesc
->txd
.msscof
= 0;
1038 le32_add_cpu(&tq
->shared
->txNumDeferred
, 1);
1041 if (vlan_tx_tag_present(skb
)) {
1043 gdesc
->txd
.tci
= vlan_tx_tag_get(skb
);
1046 /* finally flips the GEN bit of the SOP desc. */
1047 gdesc
->dword
[2] = cpu_to_le32(le32_to_cpu(gdesc
->dword
[2]) ^
1049 #ifdef __BIG_ENDIAN_BITFIELD
1050 /* Finished updating in bitfields of Tx Desc, so write them in original
1053 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc
*)gdesc
,
1054 (struct Vmxnet3_TxDesc
*)ctx
.sop_txd
);
1055 gdesc
= ctx
.sop_txd
;
1057 netdev_dbg(adapter
->netdev
,
1058 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1060 tq
->tx_ring
.base
), le64_to_cpu(gdesc
->txd
.addr
),
1061 le32_to_cpu(gdesc
->dword
[2]), le32_to_cpu(gdesc
->dword
[3]));
1063 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
1065 if (le32_to_cpu(tq
->shared
->txNumDeferred
) >=
1066 le32_to_cpu(tq
->shared
->txThreshold
)) {
1067 tq
->shared
->txNumDeferred
= 0;
1068 VMXNET3_WRITE_BAR0_REG(adapter
,
1069 VMXNET3_REG_TXPROD
+ tq
->qid
* 8,
1070 tq
->tx_ring
.next2fill
);
1073 return NETDEV_TX_OK
;
1076 tq
->stats
.drop_oversized_hdr
++;
1078 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
1080 tq
->stats
.drop_total
++;
1082 return NETDEV_TX_OK
;
1087 vmxnet3_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
1089 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1091 BUG_ON(skb
->queue_mapping
> adapter
->num_tx_queues
);
1092 return vmxnet3_tq_xmit(skb
,
1093 &adapter
->tx_queue
[skb
->queue_mapping
],
1099 vmxnet3_rx_csum(struct vmxnet3_adapter
*adapter
,
1100 struct sk_buff
*skb
,
1101 union Vmxnet3_GenericDesc
*gdesc
)
1103 if (!gdesc
->rcd
.cnc
&& adapter
->netdev
->features
& NETIF_F_RXCSUM
) {
1104 /* typical case: TCP/UDP over IP and both csums are correct */
1105 if ((le32_to_cpu(gdesc
->dword
[3]) & VMXNET3_RCD_CSUM_OK
) ==
1106 VMXNET3_RCD_CSUM_OK
) {
1107 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1108 BUG_ON(!(gdesc
->rcd
.tcp
|| gdesc
->rcd
.udp
));
1109 BUG_ON(!(gdesc
->rcd
.v4
|| gdesc
->rcd
.v6
));
1110 BUG_ON(gdesc
->rcd
.frg
);
1112 if (gdesc
->rcd
.csum
) {
1113 skb
->csum
= htons(gdesc
->rcd
.csum
);
1114 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1116 skb_checksum_none_assert(skb
);
1120 skb_checksum_none_assert(skb
);
1126 vmxnet3_rx_error(struct vmxnet3_rx_queue
*rq
, struct Vmxnet3_RxCompDesc
*rcd
,
1127 struct vmxnet3_rx_ctx
*ctx
, struct vmxnet3_adapter
*adapter
)
1129 rq
->stats
.drop_err
++;
1131 rq
->stats
.drop_fcs
++;
1133 rq
->stats
.drop_total
++;
1136 * We do not unmap and chain the rx buffer to the skb.
1137 * We basically pretend this buffer is not used and will be recycled
1138 * by vmxnet3_rq_alloc_rx_buf()
1142 * ctx->skb may be NULL if this is the first and the only one
1146 dev_kfree_skb_irq(ctx
->skb
);
1153 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue
*rq
,
1154 struct vmxnet3_adapter
*adapter
, int quota
)
1156 static const u32 rxprod_reg
[2] = {
1157 VMXNET3_REG_RXPROD
, VMXNET3_REG_RXPROD2
1160 bool skip_page_frags
= false;
1161 struct Vmxnet3_RxCompDesc
*rcd
;
1162 struct vmxnet3_rx_ctx
*ctx
= &rq
->rx_ctx
;
1163 #ifdef __BIG_ENDIAN_BITFIELD
1164 struct Vmxnet3_RxDesc rxCmdDesc
;
1165 struct Vmxnet3_RxCompDesc rxComp
;
1167 vmxnet3_getRxComp(rcd
, &rq
->comp_ring
.base
[rq
->comp_ring
.next2proc
].rcd
,
1169 while (rcd
->gen
== rq
->comp_ring
.gen
) {
1170 struct vmxnet3_rx_buf_info
*rbi
;
1171 struct sk_buff
*skb
, *new_skb
= NULL
;
1172 struct page
*new_page
= NULL
;
1174 struct Vmxnet3_RxDesc
*rxd
;
1176 struct vmxnet3_cmd_ring
*ring
= NULL
;
1177 if (num_rxd
>= quota
) {
1178 /* we may stop even before we see the EOP desc of
1184 BUG_ON(rcd
->rqID
!= rq
->qid
&& rcd
->rqID
!= rq
->qid2
);
1186 ring_idx
= rcd
->rqID
< adapter
->num_rx_queues
? 0 : 1;
1187 ring
= rq
->rx_ring
+ ring_idx
;
1188 vmxnet3_getRxDesc(rxd
, &rq
->rx_ring
[ring_idx
].base
[idx
].rxd
,
1190 rbi
= rq
->buf_info
[ring_idx
] + idx
;
1192 BUG_ON(rxd
->addr
!= rbi
->dma_addr
||
1193 rxd
->len
!= rbi
->len
);
1195 if (unlikely(rcd
->eop
&& rcd
->err
)) {
1196 vmxnet3_rx_error(rq
, rcd
, ctx
, adapter
);
1200 if (rcd
->sop
) { /* first buf of the pkt */
1201 BUG_ON(rxd
->btype
!= VMXNET3_RXD_BTYPE_HEAD
||
1202 rcd
->rqID
!= rq
->qid
);
1204 BUG_ON(rbi
->buf_type
!= VMXNET3_RX_BUF_SKB
);
1205 BUG_ON(ctx
->skb
!= NULL
|| rbi
->skb
== NULL
);
1207 if (unlikely(rcd
->len
== 0)) {
1208 /* Pretend the rx buffer is skipped. */
1209 BUG_ON(!(rcd
->sop
&& rcd
->eop
));
1210 netdev_dbg(adapter
->netdev
,
1211 "rxRing[%u][%u] 0 length\n",
1216 skip_page_frags
= false;
1217 ctx
->skb
= rbi
->skb
;
1218 new_skb
= netdev_alloc_skb_ip_align(adapter
->netdev
,
1220 if (new_skb
== NULL
) {
1221 /* Skb allocation failed, do not handover this
1222 * skb to stack. Reuse it. Drop the existing pkt
1224 rq
->stats
.rx_buf_alloc_failure
++;
1226 rq
->stats
.drop_total
++;
1227 skip_page_frags
= true;
1231 dma_unmap_single(&adapter
->pdev
->dev
, rbi
->dma_addr
,
1233 PCI_DMA_FROMDEVICE
);
1236 if (rcd
->rssType
!= VMXNET3_RCD_RSS_TYPE_NONE
&&
1237 (adapter
->netdev
->features
& NETIF_F_RXHASH
))
1238 ctx
->skb
->rxhash
= le32_to_cpu(rcd
->rssHash
);
1240 skb_put(ctx
->skb
, rcd
->len
);
1242 /* Immediate refill */
1244 rbi
->dma_addr
= dma_map_single(&adapter
->pdev
->dev
,
1245 rbi
->skb
->data
, rbi
->len
,
1246 PCI_DMA_FROMDEVICE
);
1247 rxd
->addr
= cpu_to_le64(rbi
->dma_addr
);
1248 rxd
->len
= rbi
->len
;
1251 BUG_ON(ctx
->skb
== NULL
&& !skip_page_frags
);
1253 /* non SOP buffer must be type 1 in most cases */
1254 BUG_ON(rbi
->buf_type
!= VMXNET3_RX_BUF_PAGE
);
1255 BUG_ON(rxd
->btype
!= VMXNET3_RXD_BTYPE_BODY
);
1257 /* If an sop buffer was dropped, skip all
1258 * following non-sop fragments. They will be reused.
1260 if (skip_page_frags
)
1263 new_page
= alloc_page(GFP_ATOMIC
);
1264 if (unlikely(new_page
== NULL
)) {
1265 /* Replacement page frag could not be allocated.
1266 * Reuse this page. Drop the pkt and free the
1267 * skb which contained this page as a frag. Skip
1268 * processing all the following non-sop frags.
1270 rq
->stats
.rx_buf_alloc_failure
++;
1271 dev_kfree_skb(ctx
->skb
);
1273 skip_page_frags
= true;
1278 dma_unmap_page(&adapter
->pdev
->dev
,
1279 rbi
->dma_addr
, rbi
->len
,
1280 PCI_DMA_FROMDEVICE
);
1282 vmxnet3_append_frag(ctx
->skb
, rcd
, rbi
);
1285 /* Immediate refill */
1286 rbi
->page
= new_page
;
1287 rbi
->dma_addr
= dma_map_page(&adapter
->pdev
->dev
,
1290 PCI_DMA_FROMDEVICE
);
1291 rxd
->addr
= cpu_to_le64(rbi
->dma_addr
);
1292 rxd
->len
= rbi
->len
;
1298 skb
->len
+= skb
->data_len
;
1300 vmxnet3_rx_csum(adapter
, skb
,
1301 (union Vmxnet3_GenericDesc
*)rcd
);
1302 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
1304 if (unlikely(rcd
->ts
))
1305 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), rcd
->tci
);
1307 if (adapter
->netdev
->features
& NETIF_F_LRO
)
1308 netif_receive_skb(skb
);
1310 napi_gro_receive(&rq
->napi
, skb
);
1316 /* device may have skipped some rx descs */
1317 ring
->next2comp
= idx
;
1318 num_to_alloc
= vmxnet3_cmd_ring_desc_avail(ring
);
1319 ring
= rq
->rx_ring
+ ring_idx
;
1320 while (num_to_alloc
) {
1321 vmxnet3_getRxDesc(rxd
, &ring
->base
[ring
->next2fill
].rxd
,
1325 /* Recv desc is ready to be used by the device */
1326 rxd
->gen
= ring
->gen
;
1327 vmxnet3_cmd_ring_adv_next2fill(ring
);
1331 /* if needed, update the register */
1332 if (unlikely(rq
->shared
->updateRxProd
)) {
1333 VMXNET3_WRITE_BAR0_REG(adapter
,
1334 rxprod_reg
[ring_idx
] + rq
->qid
* 8,
1338 vmxnet3_comp_ring_adv_next2proc(&rq
->comp_ring
);
1339 vmxnet3_getRxComp(rcd
,
1340 &rq
->comp_ring
.base
[rq
->comp_ring
.next2proc
].rcd
, &rxComp
);
1348 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue
*rq
,
1349 struct vmxnet3_adapter
*adapter
)
1352 struct Vmxnet3_RxDesc
*rxd
;
1354 for (ring_idx
= 0; ring_idx
< 2; ring_idx
++) {
1355 for (i
= 0; i
< rq
->rx_ring
[ring_idx
].size
; i
++) {
1356 #ifdef __BIG_ENDIAN_BITFIELD
1357 struct Vmxnet3_RxDesc rxDesc
;
1359 vmxnet3_getRxDesc(rxd
,
1360 &rq
->rx_ring
[ring_idx
].base
[i
].rxd
, &rxDesc
);
1362 if (rxd
->btype
== VMXNET3_RXD_BTYPE_HEAD
&&
1363 rq
->buf_info
[ring_idx
][i
].skb
) {
1364 dma_unmap_single(&adapter
->pdev
->dev
, rxd
->addr
,
1365 rxd
->len
, PCI_DMA_FROMDEVICE
);
1366 dev_kfree_skb(rq
->buf_info
[ring_idx
][i
].skb
);
1367 rq
->buf_info
[ring_idx
][i
].skb
= NULL
;
1368 } else if (rxd
->btype
== VMXNET3_RXD_BTYPE_BODY
&&
1369 rq
->buf_info
[ring_idx
][i
].page
) {
1370 dma_unmap_page(&adapter
->pdev
->dev
, rxd
->addr
,
1371 rxd
->len
, PCI_DMA_FROMDEVICE
);
1372 put_page(rq
->buf_info
[ring_idx
][i
].page
);
1373 rq
->buf_info
[ring_idx
][i
].page
= NULL
;
1377 rq
->rx_ring
[ring_idx
].gen
= VMXNET3_INIT_GEN
;
1378 rq
->rx_ring
[ring_idx
].next2fill
=
1379 rq
->rx_ring
[ring_idx
].next2comp
= 0;
1382 rq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
1383 rq
->comp_ring
.next2proc
= 0;
1388 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter
*adapter
)
1392 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1393 vmxnet3_rq_cleanup(&adapter
->rx_queue
[i
], adapter
);
1397 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue
*rq
,
1398 struct vmxnet3_adapter
*adapter
)
1403 /* all rx buffers must have already been freed */
1404 for (i
= 0; i
< 2; i
++) {
1405 if (rq
->buf_info
[i
]) {
1406 for (j
= 0; j
< rq
->rx_ring
[i
].size
; j
++)
1407 BUG_ON(rq
->buf_info
[i
][j
].page
!= NULL
);
1412 for (i
= 0; i
< 2; i
++) {
1413 if (rq
->rx_ring
[i
].base
) {
1414 dma_free_coherent(&adapter
->pdev
->dev
,
1416 * sizeof(struct Vmxnet3_RxDesc
),
1417 rq
->rx_ring
[i
].base
,
1418 rq
->rx_ring
[i
].basePA
);
1419 rq
->rx_ring
[i
].base
= NULL
;
1421 rq
->buf_info
[i
] = NULL
;
1424 if (rq
->comp_ring
.base
) {
1425 dma_free_coherent(&adapter
->pdev
->dev
, rq
->comp_ring
.size
1426 * sizeof(struct Vmxnet3_RxCompDesc
),
1427 rq
->comp_ring
.base
, rq
->comp_ring
.basePA
);
1428 rq
->comp_ring
.base
= NULL
;
1431 if (rq
->buf_info
[0]) {
1432 size_t sz
= sizeof(struct vmxnet3_rx_buf_info
) *
1433 (rq
->rx_ring
[0].size
+ rq
->rx_ring
[1].size
);
1434 dma_free_coherent(&adapter
->pdev
->dev
, sz
, rq
->buf_info
[0],
1441 vmxnet3_rq_init(struct vmxnet3_rx_queue
*rq
,
1442 struct vmxnet3_adapter
*adapter
)
1446 /* initialize buf_info */
1447 for (i
= 0; i
< rq
->rx_ring
[0].size
; i
++) {
1449 /* 1st buf for a pkt is skbuff */
1450 if (i
% adapter
->rx_buf_per_pkt
== 0) {
1451 rq
->buf_info
[0][i
].buf_type
= VMXNET3_RX_BUF_SKB
;
1452 rq
->buf_info
[0][i
].len
= adapter
->skb_buf_size
;
1453 } else { /* subsequent bufs for a pkt is frag */
1454 rq
->buf_info
[0][i
].buf_type
= VMXNET3_RX_BUF_PAGE
;
1455 rq
->buf_info
[0][i
].len
= PAGE_SIZE
;
1458 for (i
= 0; i
< rq
->rx_ring
[1].size
; i
++) {
1459 rq
->buf_info
[1][i
].buf_type
= VMXNET3_RX_BUF_PAGE
;
1460 rq
->buf_info
[1][i
].len
= PAGE_SIZE
;
1463 /* reset internal state and allocate buffers for both rings */
1464 for (i
= 0; i
< 2; i
++) {
1465 rq
->rx_ring
[i
].next2fill
= rq
->rx_ring
[i
].next2comp
= 0;
1467 memset(rq
->rx_ring
[i
].base
, 0, rq
->rx_ring
[i
].size
*
1468 sizeof(struct Vmxnet3_RxDesc
));
1469 rq
->rx_ring
[i
].gen
= VMXNET3_INIT_GEN
;
1471 if (vmxnet3_rq_alloc_rx_buf(rq
, 0, rq
->rx_ring
[0].size
- 1,
1473 /* at least has 1 rx buffer for the 1st ring */
1476 vmxnet3_rq_alloc_rx_buf(rq
, 1, rq
->rx_ring
[1].size
- 1, adapter
);
1478 /* reset the comp ring */
1479 rq
->comp_ring
.next2proc
= 0;
1480 memset(rq
->comp_ring
.base
, 0, rq
->comp_ring
.size
*
1481 sizeof(struct Vmxnet3_RxCompDesc
));
1482 rq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
1485 rq
->rx_ctx
.skb
= NULL
;
1487 /* stats are not reset */
1493 vmxnet3_rq_init_all(struct vmxnet3_adapter
*adapter
)
1497 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1498 err
= vmxnet3_rq_init(&adapter
->rx_queue
[i
], adapter
);
1499 if (unlikely(err
)) {
1500 dev_err(&adapter
->netdev
->dev
, "%s: failed to "
1501 "initialize rx queue%i\n",
1502 adapter
->netdev
->name
, i
);
1512 vmxnet3_rq_create(struct vmxnet3_rx_queue
*rq
, struct vmxnet3_adapter
*adapter
)
1516 struct vmxnet3_rx_buf_info
*bi
;
1518 for (i
= 0; i
< 2; i
++) {
1520 sz
= rq
->rx_ring
[i
].size
* sizeof(struct Vmxnet3_RxDesc
);
1521 rq
->rx_ring
[i
].base
= dma_alloc_coherent(
1522 &adapter
->pdev
->dev
, sz
,
1523 &rq
->rx_ring
[i
].basePA
,
1525 if (!rq
->rx_ring
[i
].base
) {
1526 netdev_err(adapter
->netdev
,
1527 "failed to allocate rx ring %d\n", i
);
1532 sz
= rq
->comp_ring
.size
* sizeof(struct Vmxnet3_RxCompDesc
);
1533 rq
->comp_ring
.base
= dma_alloc_coherent(&adapter
->pdev
->dev
, sz
,
1534 &rq
->comp_ring
.basePA
,
1536 if (!rq
->comp_ring
.base
) {
1537 netdev_err(adapter
->netdev
, "failed to allocate rx comp ring\n");
1541 sz
= sizeof(struct vmxnet3_rx_buf_info
) * (rq
->rx_ring
[0].size
+
1542 rq
->rx_ring
[1].size
);
1543 bi
= dma_zalloc_coherent(&adapter
->pdev
->dev
, sz
, &rq
->buf_info_pa
,
1548 rq
->buf_info
[0] = bi
;
1549 rq
->buf_info
[1] = bi
+ rq
->rx_ring
[0].size
;
1554 vmxnet3_rq_destroy(rq
, adapter
);
1560 vmxnet3_rq_create_all(struct vmxnet3_adapter
*adapter
)
1564 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1565 err
= vmxnet3_rq_create(&adapter
->rx_queue
[i
], adapter
);
1566 if (unlikely(err
)) {
1567 dev_err(&adapter
->netdev
->dev
,
1568 "%s: failed to create rx queue%i\n",
1569 adapter
->netdev
->name
, i
);
1575 vmxnet3_rq_destroy_all(adapter
);
1580 /* Multiple queue aware polling function for tx and rx */
1583 vmxnet3_do_poll(struct vmxnet3_adapter
*adapter
, int budget
)
1585 int rcd_done
= 0, i
;
1586 if (unlikely(adapter
->shared
->ecr
))
1587 vmxnet3_process_events(adapter
);
1588 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1589 vmxnet3_tq_tx_complete(&adapter
->tx_queue
[i
], adapter
);
1591 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1592 rcd_done
+= vmxnet3_rq_rx_complete(&adapter
->rx_queue
[i
],
1599 vmxnet3_poll(struct napi_struct
*napi
, int budget
)
1601 struct vmxnet3_rx_queue
*rx_queue
= container_of(napi
,
1602 struct vmxnet3_rx_queue
, napi
);
1605 rxd_done
= vmxnet3_do_poll(rx_queue
->adapter
, budget
);
1607 if (rxd_done
< budget
) {
1608 napi_complete(napi
);
1609 vmxnet3_enable_all_intrs(rx_queue
->adapter
);
1615 * NAPI polling function for MSI-X mode with multiple Rx queues
1616 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1620 vmxnet3_poll_rx_only(struct napi_struct
*napi
, int budget
)
1622 struct vmxnet3_rx_queue
*rq
= container_of(napi
,
1623 struct vmxnet3_rx_queue
, napi
);
1624 struct vmxnet3_adapter
*adapter
= rq
->adapter
;
1627 /* When sharing interrupt with corresponding tx queue, process
1628 * tx completions in that queue as well
1630 if (adapter
->share_intr
== VMXNET3_INTR_BUDDYSHARE
) {
1631 struct vmxnet3_tx_queue
*tq
=
1632 &adapter
->tx_queue
[rq
- adapter
->rx_queue
];
1633 vmxnet3_tq_tx_complete(tq
, adapter
);
1636 rxd_done
= vmxnet3_rq_rx_complete(rq
, adapter
, budget
);
1638 if (rxd_done
< budget
) {
1639 napi_complete(napi
);
1640 vmxnet3_enable_intr(adapter
, rq
->comp_ring
.intr_idx
);
1646 #ifdef CONFIG_PCI_MSI
1649 * Handle completion interrupts on tx queues
1650 * Returns whether or not the intr is handled
1654 vmxnet3_msix_tx(int irq
, void *data
)
1656 struct vmxnet3_tx_queue
*tq
= data
;
1657 struct vmxnet3_adapter
*adapter
= tq
->adapter
;
1659 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1660 vmxnet3_disable_intr(adapter
, tq
->comp_ring
.intr_idx
);
1662 /* Handle the case where only one irq is allocate for all tx queues */
1663 if (adapter
->share_intr
== VMXNET3_INTR_TXSHARE
) {
1665 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1666 struct vmxnet3_tx_queue
*txq
= &adapter
->tx_queue
[i
];
1667 vmxnet3_tq_tx_complete(txq
, adapter
);
1670 vmxnet3_tq_tx_complete(tq
, adapter
);
1672 vmxnet3_enable_intr(adapter
, tq
->comp_ring
.intr_idx
);
1679 * Handle completion interrupts on rx queues. Returns whether or not the
1684 vmxnet3_msix_rx(int irq
, void *data
)
1686 struct vmxnet3_rx_queue
*rq
= data
;
1687 struct vmxnet3_adapter
*adapter
= rq
->adapter
;
1689 /* disable intr if needed */
1690 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1691 vmxnet3_disable_intr(adapter
, rq
->comp_ring
.intr_idx
);
1692 napi_schedule(&rq
->napi
);
1698 *----------------------------------------------------------------------------
1700 * vmxnet3_msix_event --
1702 * vmxnet3 msix event intr handler
1705 * whether or not the intr is handled
1707 *----------------------------------------------------------------------------
1711 vmxnet3_msix_event(int irq
, void *data
)
1713 struct net_device
*dev
= data
;
1714 struct vmxnet3_adapter
*adapter
= netdev_priv(dev
);
1716 /* disable intr if needed */
1717 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1718 vmxnet3_disable_intr(adapter
, adapter
->intr
.event_intr_idx
);
1720 if (adapter
->shared
->ecr
)
1721 vmxnet3_process_events(adapter
);
1723 vmxnet3_enable_intr(adapter
, adapter
->intr
.event_intr_idx
);
1728 #endif /* CONFIG_PCI_MSI */
1731 /* Interrupt handler for vmxnet3 */
1733 vmxnet3_intr(int irq
, void *dev_id
)
1735 struct net_device
*dev
= dev_id
;
1736 struct vmxnet3_adapter
*adapter
= netdev_priv(dev
);
1738 if (adapter
->intr
.type
== VMXNET3_IT_INTX
) {
1739 u32 icr
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_ICR
);
1740 if (unlikely(icr
== 0))
1746 /* disable intr if needed */
1747 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1748 vmxnet3_disable_all_intrs(adapter
);
1750 napi_schedule(&adapter
->rx_queue
[0].napi
);
1755 #ifdef CONFIG_NET_POLL_CONTROLLER
1757 /* netpoll callback. */
1759 vmxnet3_netpoll(struct net_device
*netdev
)
1761 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1763 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1764 vmxnet3_disable_all_intrs(adapter
);
1766 vmxnet3_do_poll(adapter
, adapter
->rx_queue
[0].rx_ring
[0].size
);
1767 vmxnet3_enable_all_intrs(adapter
);
1770 #endif /* CONFIG_NET_POLL_CONTROLLER */
1773 vmxnet3_request_irqs(struct vmxnet3_adapter
*adapter
)
1775 struct vmxnet3_intr
*intr
= &adapter
->intr
;
1779 #ifdef CONFIG_PCI_MSI
1780 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
1781 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1782 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
) {
1783 sprintf(adapter
->tx_queue
[i
].name
, "%s-tx-%d",
1784 adapter
->netdev
->name
, vector
);
1786 intr
->msix_entries
[vector
].vector
,
1788 adapter
->tx_queue
[i
].name
,
1789 &adapter
->tx_queue
[i
]);
1791 sprintf(adapter
->tx_queue
[i
].name
, "%s-rxtx-%d",
1792 adapter
->netdev
->name
, vector
);
1795 dev_err(&adapter
->netdev
->dev
,
1796 "Failed to request irq for MSIX, %s, "
1798 adapter
->tx_queue
[i
].name
, err
);
1802 /* Handle the case where only 1 MSIx was allocated for
1804 if (adapter
->share_intr
== VMXNET3_INTR_TXSHARE
) {
1805 for (; i
< adapter
->num_tx_queues
; i
++)
1806 adapter
->tx_queue
[i
].comp_ring
.intr_idx
1811 adapter
->tx_queue
[i
].comp_ring
.intr_idx
1815 if (adapter
->share_intr
== VMXNET3_INTR_BUDDYSHARE
)
1818 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1819 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
)
1820 sprintf(adapter
->rx_queue
[i
].name
, "%s-rx-%d",
1821 adapter
->netdev
->name
, vector
);
1823 sprintf(adapter
->rx_queue
[i
].name
, "%s-rxtx-%d",
1824 adapter
->netdev
->name
, vector
);
1825 err
= request_irq(intr
->msix_entries
[vector
].vector
,
1827 adapter
->rx_queue
[i
].name
,
1828 &(adapter
->rx_queue
[i
]));
1830 netdev_err(adapter
->netdev
,
1831 "Failed to request irq for MSIX, "
1833 adapter
->rx_queue
[i
].name
, err
);
1837 adapter
->rx_queue
[i
].comp_ring
.intr_idx
= vector
++;
1840 sprintf(intr
->event_msi_vector_name
, "%s-event-%d",
1841 adapter
->netdev
->name
, vector
);
1842 err
= request_irq(intr
->msix_entries
[vector
].vector
,
1843 vmxnet3_msix_event
, 0,
1844 intr
->event_msi_vector_name
, adapter
->netdev
);
1845 intr
->event_intr_idx
= vector
;
1847 } else if (intr
->type
== VMXNET3_IT_MSI
) {
1848 adapter
->num_rx_queues
= 1;
1849 err
= request_irq(adapter
->pdev
->irq
, vmxnet3_intr
, 0,
1850 adapter
->netdev
->name
, adapter
->netdev
);
1853 adapter
->num_rx_queues
= 1;
1854 err
= request_irq(adapter
->pdev
->irq
, vmxnet3_intr
,
1855 IRQF_SHARED
, adapter
->netdev
->name
,
1857 #ifdef CONFIG_PCI_MSI
1860 intr
->num_intrs
= vector
+ 1;
1862 netdev_err(adapter
->netdev
,
1863 "Failed to request irq (intr type:%d), error %d\n",
1866 /* Number of rx queues will not change after this */
1867 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1868 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[i
];
1870 rq
->qid2
= i
+ adapter
->num_rx_queues
;
1875 /* init our intr settings */
1876 for (i
= 0; i
< intr
->num_intrs
; i
++)
1877 intr
->mod_levels
[i
] = UPT1_IML_ADAPTIVE
;
1878 if (adapter
->intr
.type
!= VMXNET3_IT_MSIX
) {
1879 adapter
->intr
.event_intr_idx
= 0;
1880 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1881 adapter
->tx_queue
[i
].comp_ring
.intr_idx
= 0;
1882 adapter
->rx_queue
[0].comp_ring
.intr_idx
= 0;
1885 netdev_info(adapter
->netdev
,
1886 "intr type %u, mode %u, %u vectors allocated\n",
1887 intr
->type
, intr
->mask_mode
, intr
->num_intrs
);
1895 vmxnet3_free_irqs(struct vmxnet3_adapter
*adapter
)
1897 struct vmxnet3_intr
*intr
= &adapter
->intr
;
1898 BUG_ON(intr
->type
== VMXNET3_IT_AUTO
|| intr
->num_intrs
<= 0);
1900 switch (intr
->type
) {
1901 #ifdef CONFIG_PCI_MSI
1902 case VMXNET3_IT_MSIX
:
1906 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
) {
1907 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1908 free_irq(intr
->msix_entries
[vector
++].vector
,
1909 &(adapter
->tx_queue
[i
]));
1910 if (adapter
->share_intr
== VMXNET3_INTR_TXSHARE
)
1915 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1916 free_irq(intr
->msix_entries
[vector
++].vector
,
1917 &(adapter
->rx_queue
[i
]));
1920 free_irq(intr
->msix_entries
[vector
].vector
,
1922 BUG_ON(vector
>= intr
->num_intrs
);
1926 case VMXNET3_IT_MSI
:
1927 free_irq(adapter
->pdev
->irq
, adapter
->netdev
);
1929 case VMXNET3_IT_INTX
:
1930 free_irq(adapter
->pdev
->irq
, adapter
->netdev
);
1939 vmxnet3_restore_vlan(struct vmxnet3_adapter
*adapter
)
1941 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1944 /* allow untagged pkts */
1945 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, 0);
1947 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
1948 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, vid
);
1953 vmxnet3_vlan_rx_add_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
1955 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1957 if (!(netdev
->flags
& IFF_PROMISC
)) {
1958 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1959 unsigned long flags
;
1961 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, vid
);
1962 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
1963 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1964 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
1965 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
1968 set_bit(vid
, adapter
->active_vlans
);
1975 vmxnet3_vlan_rx_kill_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
1977 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1979 if (!(netdev
->flags
& IFF_PROMISC
)) {
1980 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1981 unsigned long flags
;
1983 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable
, vid
);
1984 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
1985 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1986 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
1987 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
1990 clear_bit(vid
, adapter
->active_vlans
);
1997 vmxnet3_copy_mc(struct net_device
*netdev
)
2000 u32 sz
= netdev_mc_count(netdev
) * ETH_ALEN
;
2002 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2004 /* We may be called with BH disabled */
2005 buf
= kmalloc(sz
, GFP_ATOMIC
);
2007 struct netdev_hw_addr
*ha
;
2010 netdev_for_each_mc_addr(ha
, netdev
)
2011 memcpy(buf
+ i
++ * ETH_ALEN
, ha
->addr
,
2020 vmxnet3_set_mc(struct net_device
*netdev
)
2022 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2023 unsigned long flags
;
2024 struct Vmxnet3_RxFilterConf
*rxConf
=
2025 &adapter
->shared
->devRead
.rxFilterConf
;
2026 u8
*new_table
= NULL
;
2027 dma_addr_t new_table_pa
= 0;
2028 u32 new_mode
= VMXNET3_RXM_UCAST
;
2030 if (netdev
->flags
& IFF_PROMISC
) {
2031 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
2032 memset(vfTable
, 0, VMXNET3_VFT_SIZE
* sizeof(*vfTable
));
2034 new_mode
|= VMXNET3_RXM_PROMISC
;
2036 vmxnet3_restore_vlan(adapter
);
2039 if (netdev
->flags
& IFF_BROADCAST
)
2040 new_mode
|= VMXNET3_RXM_BCAST
;
2042 if (netdev
->flags
& IFF_ALLMULTI
)
2043 new_mode
|= VMXNET3_RXM_ALL_MULTI
;
2045 if (!netdev_mc_empty(netdev
)) {
2046 new_table
= vmxnet3_copy_mc(netdev
);
2048 new_mode
|= VMXNET3_RXM_MCAST
;
2049 rxConf
->mfTableLen
= cpu_to_le16(
2050 netdev_mc_count(netdev
) * ETH_ALEN
);
2051 new_table_pa
= dma_map_single(
2052 &adapter
->pdev
->dev
,
2056 rxConf
->mfTablePA
= cpu_to_le64(new_table_pa
);
2058 netdev_info(netdev
, "failed to copy mcast list"
2059 ", setting ALL_MULTI\n");
2060 new_mode
|= VMXNET3_RXM_ALL_MULTI
;
2065 if (!(new_mode
& VMXNET3_RXM_MCAST
)) {
2066 rxConf
->mfTableLen
= 0;
2067 rxConf
->mfTablePA
= 0;
2070 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2071 if (new_mode
!= rxConf
->rxMode
) {
2072 rxConf
->rxMode
= cpu_to_le32(new_mode
);
2073 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2074 VMXNET3_CMD_UPDATE_RX_MODE
);
2075 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2076 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
2079 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2080 VMXNET3_CMD_UPDATE_MAC_FILTERS
);
2081 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2084 dma_unmap_single(&adapter
->pdev
->dev
, new_table_pa
,
2085 rxConf
->mfTableLen
, PCI_DMA_TODEVICE
);
2091 vmxnet3_rq_destroy_all(struct vmxnet3_adapter
*adapter
)
2095 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2096 vmxnet3_rq_destroy(&adapter
->rx_queue
[i
], adapter
);
2101 * Set up driver_shared based on settings in adapter.
2105 vmxnet3_setup_driver_shared(struct vmxnet3_adapter
*adapter
)
2107 struct Vmxnet3_DriverShared
*shared
= adapter
->shared
;
2108 struct Vmxnet3_DSDevRead
*devRead
= &shared
->devRead
;
2109 struct Vmxnet3_TxQueueConf
*tqc
;
2110 struct Vmxnet3_RxQueueConf
*rqc
;
2113 memset(shared
, 0, sizeof(*shared
));
2115 /* driver settings */
2116 shared
->magic
= cpu_to_le32(VMXNET3_REV1_MAGIC
);
2117 devRead
->misc
.driverInfo
.version
= cpu_to_le32(
2118 VMXNET3_DRIVER_VERSION_NUM
);
2119 devRead
->misc
.driverInfo
.gos
.gosBits
= (sizeof(void *) == 4 ?
2120 VMXNET3_GOS_BITS_32
: VMXNET3_GOS_BITS_64
);
2121 devRead
->misc
.driverInfo
.gos
.gosType
= VMXNET3_GOS_TYPE_LINUX
;
2122 *((u32
*)&devRead
->misc
.driverInfo
.gos
) = cpu_to_le32(
2123 *((u32
*)&devRead
->misc
.driverInfo
.gos
));
2124 devRead
->misc
.driverInfo
.vmxnet3RevSpt
= cpu_to_le32(1);
2125 devRead
->misc
.driverInfo
.uptVerSpt
= cpu_to_le32(1);
2127 devRead
->misc
.ddPA
= cpu_to_le64(adapter
->adapter_pa
);
2128 devRead
->misc
.ddLen
= cpu_to_le32(sizeof(struct vmxnet3_adapter
));
2130 /* set up feature flags */
2131 if (adapter
->netdev
->features
& NETIF_F_RXCSUM
)
2132 devRead
->misc
.uptFeatures
|= UPT1_F_RXCSUM
;
2134 if (adapter
->netdev
->features
& NETIF_F_LRO
) {
2135 devRead
->misc
.uptFeatures
|= UPT1_F_LRO
;
2136 devRead
->misc
.maxNumRxSG
= cpu_to_le16(1 + MAX_SKB_FRAGS
);
2138 if (adapter
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)
2139 devRead
->misc
.uptFeatures
|= UPT1_F_RXVLAN
;
2141 devRead
->misc
.mtu
= cpu_to_le32(adapter
->netdev
->mtu
);
2142 devRead
->misc
.queueDescPA
= cpu_to_le64(adapter
->queue_desc_pa
);
2143 devRead
->misc
.queueDescLen
= cpu_to_le32(
2144 adapter
->num_tx_queues
* sizeof(struct Vmxnet3_TxQueueDesc
) +
2145 adapter
->num_rx_queues
* sizeof(struct Vmxnet3_RxQueueDesc
));
2147 /* tx queue settings */
2148 devRead
->misc
.numTxQueues
= adapter
->num_tx_queues
;
2149 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2150 struct vmxnet3_tx_queue
*tq
= &adapter
->tx_queue
[i
];
2151 BUG_ON(adapter
->tx_queue
[i
].tx_ring
.base
== NULL
);
2152 tqc
= &adapter
->tqd_start
[i
].conf
;
2153 tqc
->txRingBasePA
= cpu_to_le64(tq
->tx_ring
.basePA
);
2154 tqc
->dataRingBasePA
= cpu_to_le64(tq
->data_ring
.basePA
);
2155 tqc
->compRingBasePA
= cpu_to_le64(tq
->comp_ring
.basePA
);
2156 tqc
->ddPA
= cpu_to_le64(tq
->buf_info_pa
);
2157 tqc
->txRingSize
= cpu_to_le32(tq
->tx_ring
.size
);
2158 tqc
->dataRingSize
= cpu_to_le32(tq
->data_ring
.size
);
2159 tqc
->compRingSize
= cpu_to_le32(tq
->comp_ring
.size
);
2160 tqc
->ddLen
= cpu_to_le32(
2161 sizeof(struct vmxnet3_tx_buf_info
) *
2163 tqc
->intrIdx
= tq
->comp_ring
.intr_idx
;
2166 /* rx queue settings */
2167 devRead
->misc
.numRxQueues
= adapter
->num_rx_queues
;
2168 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2169 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[i
];
2170 rqc
= &adapter
->rqd_start
[i
].conf
;
2171 rqc
->rxRingBasePA
[0] = cpu_to_le64(rq
->rx_ring
[0].basePA
);
2172 rqc
->rxRingBasePA
[1] = cpu_to_le64(rq
->rx_ring
[1].basePA
);
2173 rqc
->compRingBasePA
= cpu_to_le64(rq
->comp_ring
.basePA
);
2174 rqc
->ddPA
= cpu_to_le64(rq
->buf_info_pa
);
2175 rqc
->rxRingSize
[0] = cpu_to_le32(rq
->rx_ring
[0].size
);
2176 rqc
->rxRingSize
[1] = cpu_to_le32(rq
->rx_ring
[1].size
);
2177 rqc
->compRingSize
= cpu_to_le32(rq
->comp_ring
.size
);
2178 rqc
->ddLen
= cpu_to_le32(
2179 sizeof(struct vmxnet3_rx_buf_info
) *
2180 (rqc
->rxRingSize
[0] +
2181 rqc
->rxRingSize
[1]));
2182 rqc
->intrIdx
= rq
->comp_ring
.intr_idx
;
2186 memset(adapter
->rss_conf
, 0, sizeof(*adapter
->rss_conf
));
2189 struct UPT1_RSSConf
*rssConf
= adapter
->rss_conf
;
2190 static const uint8_t rss_key
[UPT1_RSS_MAX_KEY_SIZE
] = {
2191 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
2192 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
2193 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
2194 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
2195 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
2198 devRead
->misc
.uptFeatures
|= UPT1_F_RSS
;
2199 devRead
->misc
.numRxQueues
= adapter
->num_rx_queues
;
2200 rssConf
->hashType
= UPT1_RSS_HASH_TYPE_TCP_IPV4
|
2201 UPT1_RSS_HASH_TYPE_IPV4
|
2202 UPT1_RSS_HASH_TYPE_TCP_IPV6
|
2203 UPT1_RSS_HASH_TYPE_IPV6
;
2204 rssConf
->hashFunc
= UPT1_RSS_HASH_FUNC_TOEPLITZ
;
2205 rssConf
->hashKeySize
= UPT1_RSS_MAX_KEY_SIZE
;
2206 rssConf
->indTableSize
= VMXNET3_RSS_IND_TABLE_SIZE
;
2207 memcpy(rssConf
->hashKey
, rss_key
, sizeof(rss_key
));
2209 for (i
= 0; i
< rssConf
->indTableSize
; i
++)
2210 rssConf
->indTable
[i
] = ethtool_rxfh_indir_default(
2211 i
, adapter
->num_rx_queues
);
2213 devRead
->rssConfDesc
.confVer
= 1;
2214 devRead
->rssConfDesc
.confLen
= cpu_to_le32(sizeof(*rssConf
));
2215 devRead
->rssConfDesc
.confPA
=
2216 cpu_to_le64(adapter
->rss_conf_pa
);
2219 #endif /* VMXNET3_RSS */
2222 devRead
->intrConf
.autoMask
= adapter
->intr
.mask_mode
==
2224 devRead
->intrConf
.numIntrs
= adapter
->intr
.num_intrs
;
2225 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
2226 devRead
->intrConf
.modLevels
[i
] = adapter
->intr
.mod_levels
[i
];
2228 devRead
->intrConf
.eventIntrIdx
= adapter
->intr
.event_intr_idx
;
2229 devRead
->intrConf
.intrCtrl
|= cpu_to_le32(VMXNET3_IC_DISABLE_ALL
);
2231 /* rx filter settings */
2232 devRead
->rxFilterConf
.rxMode
= 0;
2233 vmxnet3_restore_vlan(adapter
);
2234 vmxnet3_write_mac_addr(adapter
, adapter
->netdev
->dev_addr
);
2236 /* the rest are already zeroed */
2241 vmxnet3_activate_dev(struct vmxnet3_adapter
*adapter
)
2245 unsigned long flags
;
2247 netdev_dbg(adapter
->netdev
, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2248 " ring sizes %u %u %u\n", adapter
->netdev
->name
,
2249 adapter
->skb_buf_size
, adapter
->rx_buf_per_pkt
,
2250 adapter
->tx_queue
[0].tx_ring
.size
,
2251 adapter
->rx_queue
[0].rx_ring
[0].size
,
2252 adapter
->rx_queue
[0].rx_ring
[1].size
);
2254 vmxnet3_tq_init_all(adapter
);
2255 err
= vmxnet3_rq_init_all(adapter
);
2257 netdev_err(adapter
->netdev
,
2258 "Failed to init rx queue error %d\n", err
);
2262 err
= vmxnet3_request_irqs(adapter
);
2264 netdev_err(adapter
->netdev
,
2265 "Failed to setup irq for error %d\n", err
);
2269 vmxnet3_setup_driver_shared(adapter
);
2271 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAL
, VMXNET3_GET_ADDR_LO(
2272 adapter
->shared_pa
));
2273 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAH
, VMXNET3_GET_ADDR_HI(
2274 adapter
->shared_pa
));
2275 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2276 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2277 VMXNET3_CMD_ACTIVATE_DEV
);
2278 ret
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
2279 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2282 netdev_err(adapter
->netdev
,
2283 "Failed to activate dev: error %u\n", ret
);
2288 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2289 VMXNET3_WRITE_BAR0_REG(adapter
,
2290 VMXNET3_REG_RXPROD
+ i
* VMXNET3_REG_ALIGN
,
2291 adapter
->rx_queue
[i
].rx_ring
[0].next2fill
);
2292 VMXNET3_WRITE_BAR0_REG(adapter
, (VMXNET3_REG_RXPROD2
+
2293 (i
* VMXNET3_REG_ALIGN
)),
2294 adapter
->rx_queue
[i
].rx_ring
[1].next2fill
);
2297 /* Apply the rx filter settins last. */
2298 vmxnet3_set_mc(adapter
->netdev
);
2301 * Check link state when first activating device. It will start the
2302 * tx queue if the link is up.
2304 vmxnet3_check_link(adapter
, true);
2305 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2306 napi_enable(&adapter
->rx_queue
[i
].napi
);
2307 vmxnet3_enable_all_intrs(adapter
);
2308 clear_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
);
2312 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAL
, 0);
2313 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAH
, 0);
2314 vmxnet3_free_irqs(adapter
);
2317 /* free up buffers we allocated */
2318 vmxnet3_rq_cleanup_all(adapter
);
2324 vmxnet3_reset_dev(struct vmxnet3_adapter
*adapter
)
2326 unsigned long flags
;
2327 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2328 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
, VMXNET3_CMD_RESET_DEV
);
2329 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2334 vmxnet3_quiesce_dev(struct vmxnet3_adapter
*adapter
)
2337 unsigned long flags
;
2338 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
))
2342 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2343 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2344 VMXNET3_CMD_QUIESCE_DEV
);
2345 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2346 vmxnet3_disable_all_intrs(adapter
);
2348 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2349 napi_disable(&adapter
->rx_queue
[i
].napi
);
2350 netif_tx_disable(adapter
->netdev
);
2351 adapter
->link_speed
= 0;
2352 netif_carrier_off(adapter
->netdev
);
2354 vmxnet3_tq_cleanup_all(adapter
);
2355 vmxnet3_rq_cleanup_all(adapter
);
2356 vmxnet3_free_irqs(adapter
);
2362 vmxnet3_write_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
)
2367 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_MACL
, tmp
);
2369 tmp
= (mac
[5] << 8) | mac
[4];
2370 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_MACH
, tmp
);
2375 vmxnet3_set_mac_addr(struct net_device
*netdev
, void *p
)
2377 struct sockaddr
*addr
= p
;
2378 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2380 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2381 vmxnet3_write_mac_addr(adapter
, addr
->sa_data
);
2387 /* ==================== initialization and cleanup routines ============ */
2390 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter
*adapter
, bool *dma64
)
2393 unsigned long mmio_start
, mmio_len
;
2394 struct pci_dev
*pdev
= adapter
->pdev
;
2396 err
= pci_enable_device(pdev
);
2398 dev_err(&pdev
->dev
, "Failed to enable adapter: error %d\n", err
);
2402 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) == 0) {
2403 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64)) != 0) {
2405 "pci_set_consistent_dma_mask failed\n");
2411 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)) != 0) {
2413 "pci_set_dma_mask failed\n");
2420 err
= pci_request_selected_regions(pdev
, (1 << 2) - 1,
2421 vmxnet3_driver_name
);
2424 "Failed to request region for adapter: error %d\n", err
);
2428 pci_set_master(pdev
);
2430 mmio_start
= pci_resource_start(pdev
, 0);
2431 mmio_len
= pci_resource_len(pdev
, 0);
2432 adapter
->hw_addr0
= ioremap(mmio_start
, mmio_len
);
2433 if (!adapter
->hw_addr0
) {
2434 dev_err(&pdev
->dev
, "Failed to map bar0\n");
2439 mmio_start
= pci_resource_start(pdev
, 1);
2440 mmio_len
= pci_resource_len(pdev
, 1);
2441 adapter
->hw_addr1
= ioremap(mmio_start
, mmio_len
);
2442 if (!adapter
->hw_addr1
) {
2443 dev_err(&pdev
->dev
, "Failed to map bar1\n");
2450 iounmap(adapter
->hw_addr0
);
2452 pci_release_selected_regions(pdev
, (1 << 2) - 1);
2454 pci_disable_device(pdev
);
2460 vmxnet3_free_pci_resources(struct vmxnet3_adapter
*adapter
)
2462 BUG_ON(!adapter
->pdev
);
2464 iounmap(adapter
->hw_addr0
);
2465 iounmap(adapter
->hw_addr1
);
2466 pci_release_selected_regions(adapter
->pdev
, (1 << 2) - 1);
2467 pci_disable_device(adapter
->pdev
);
2472 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter
*adapter
)
2474 size_t sz
, i
, ring0_size
, ring1_size
, comp_size
;
2475 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[0];
2478 if (adapter
->netdev
->mtu
<= VMXNET3_MAX_SKB_BUF_SIZE
-
2479 VMXNET3_MAX_ETH_HDR_SIZE
) {
2480 adapter
->skb_buf_size
= adapter
->netdev
->mtu
+
2481 VMXNET3_MAX_ETH_HDR_SIZE
;
2482 if (adapter
->skb_buf_size
< VMXNET3_MIN_T0_BUF_SIZE
)
2483 adapter
->skb_buf_size
= VMXNET3_MIN_T0_BUF_SIZE
;
2485 adapter
->rx_buf_per_pkt
= 1;
2487 adapter
->skb_buf_size
= VMXNET3_MAX_SKB_BUF_SIZE
;
2488 sz
= adapter
->netdev
->mtu
- VMXNET3_MAX_SKB_BUF_SIZE
+
2489 VMXNET3_MAX_ETH_HDR_SIZE
;
2490 adapter
->rx_buf_per_pkt
= 1 + (sz
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
2494 * for simplicity, force the ring0 size to be a multiple of
2495 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2497 sz
= adapter
->rx_buf_per_pkt
* VMXNET3_RING_SIZE_ALIGN
;
2498 ring0_size
= adapter
->rx_queue
[0].rx_ring
[0].size
;
2499 ring0_size
= (ring0_size
+ sz
- 1) / sz
* sz
;
2500 ring0_size
= min_t(u32
, ring0_size
, VMXNET3_RX_RING_MAX_SIZE
/
2502 ring1_size
= adapter
->rx_queue
[0].rx_ring
[1].size
;
2503 comp_size
= ring0_size
+ ring1_size
;
2505 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2506 rq
= &adapter
->rx_queue
[i
];
2507 rq
->rx_ring
[0].size
= ring0_size
;
2508 rq
->rx_ring
[1].size
= ring1_size
;
2509 rq
->comp_ring
.size
= comp_size
;
2515 vmxnet3_create_queues(struct vmxnet3_adapter
*adapter
, u32 tx_ring_size
,
2516 u32 rx_ring_size
, u32 rx_ring2_size
)
2520 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2521 struct vmxnet3_tx_queue
*tq
= &adapter
->tx_queue
[i
];
2522 tq
->tx_ring
.size
= tx_ring_size
;
2523 tq
->data_ring
.size
= tx_ring_size
;
2524 tq
->comp_ring
.size
= tx_ring_size
;
2525 tq
->shared
= &adapter
->tqd_start
[i
].ctrl
;
2527 tq
->adapter
= adapter
;
2529 err
= vmxnet3_tq_create(tq
, adapter
);
2531 * Too late to change num_tx_queues. We cannot do away with
2532 * lesser number of queues than what we asked for
2538 adapter
->rx_queue
[0].rx_ring
[0].size
= rx_ring_size
;
2539 adapter
->rx_queue
[0].rx_ring
[1].size
= rx_ring2_size
;
2540 vmxnet3_adjust_rx_ring_size(adapter
);
2541 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2542 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[i
];
2543 /* qid and qid2 for rx queues will be assigned later when num
2544 * of rx queues is finalized after allocating intrs */
2545 rq
->shared
= &adapter
->rqd_start
[i
].ctrl
;
2546 rq
->adapter
= adapter
;
2547 err
= vmxnet3_rq_create(rq
, adapter
);
2550 netdev_err(adapter
->netdev
,
2551 "Could not allocate any rx queues. "
2555 netdev_info(adapter
->netdev
,
2556 "Number of rx queues changed "
2558 adapter
->num_rx_queues
= i
;
2566 vmxnet3_tq_destroy_all(adapter
);
2571 vmxnet3_open(struct net_device
*netdev
)
2573 struct vmxnet3_adapter
*adapter
;
2576 adapter
= netdev_priv(netdev
);
2578 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2579 spin_lock_init(&adapter
->tx_queue
[i
].tx_lock
);
2581 err
= vmxnet3_create_queues(adapter
, VMXNET3_DEF_TX_RING_SIZE
,
2582 VMXNET3_DEF_RX_RING_SIZE
,
2583 VMXNET3_DEF_RX_RING_SIZE
);
2587 err
= vmxnet3_activate_dev(adapter
);
2594 vmxnet3_rq_destroy_all(adapter
);
2595 vmxnet3_tq_destroy_all(adapter
);
2602 vmxnet3_close(struct net_device
*netdev
)
2604 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2607 * Reset_work may be in the middle of resetting the device, wait for its
2610 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2613 vmxnet3_quiesce_dev(adapter
);
2615 vmxnet3_rq_destroy_all(adapter
);
2616 vmxnet3_tq_destroy_all(adapter
);
2618 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
2626 vmxnet3_force_close(struct vmxnet3_adapter
*adapter
)
2631 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2632 * vmxnet3_close() will deadlock.
2634 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
));
2636 /* we need to enable NAPI, otherwise dev_close will deadlock */
2637 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2638 napi_enable(&adapter
->rx_queue
[i
].napi
);
2639 dev_close(adapter
->netdev
);
2644 vmxnet3_change_mtu(struct net_device
*netdev
, int new_mtu
)
2646 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2649 if (new_mtu
< VMXNET3_MIN_MTU
|| new_mtu
> VMXNET3_MAX_MTU
)
2652 netdev
->mtu
= new_mtu
;
2655 * Reset_work may be in the middle of resetting the device, wait for its
2658 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2661 if (netif_running(netdev
)) {
2662 vmxnet3_quiesce_dev(adapter
);
2663 vmxnet3_reset_dev(adapter
);
2665 /* we need to re-create the rx queue based on the new mtu */
2666 vmxnet3_rq_destroy_all(adapter
);
2667 vmxnet3_adjust_rx_ring_size(adapter
);
2668 err
= vmxnet3_rq_create_all(adapter
);
2671 "failed to re-create rx queues, "
2672 " error %d. Closing it.\n", err
);
2676 err
= vmxnet3_activate_dev(adapter
);
2679 "failed to re-activate, error %d. "
2680 "Closing it\n", err
);
2686 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
2688 vmxnet3_force_close(adapter
);
2695 vmxnet3_declare_features(struct vmxnet3_adapter
*adapter
, bool dma64
)
2697 struct net_device
*netdev
= adapter
->netdev
;
2699 netdev
->hw_features
= NETIF_F_SG
| NETIF_F_RXCSUM
|
2700 NETIF_F_HW_CSUM
| NETIF_F_HW_VLAN_CTAG_TX
|
2701 NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_TSO
| NETIF_F_TSO6
|
2704 netdev
->hw_features
|= NETIF_F_HIGHDMA
;
2705 netdev
->vlan_features
= netdev
->hw_features
&
2706 ~(NETIF_F_HW_VLAN_CTAG_TX
|
2707 NETIF_F_HW_VLAN_CTAG_RX
);
2708 netdev
->features
= netdev
->hw_features
| NETIF_F_HW_VLAN_CTAG_FILTER
;
2713 vmxnet3_read_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
)
2717 tmp
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_MACL
);
2720 tmp
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_MACH
);
2721 mac
[4] = tmp
& 0xff;
2722 mac
[5] = (tmp
>> 8) & 0xff;
2725 #ifdef CONFIG_PCI_MSI
2728 * Enable MSIx vectors.
2730 * 0 on successful enabling of required vectors,
2731 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
2733 * number of vectors which can be enabled otherwise (this number is smaller
2734 * than VMXNET3_LINUX_MIN_MSIX_VECT)
2738 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter
*adapter
,
2741 int err
= 0, vector_threshold
;
2742 vector_threshold
= VMXNET3_LINUX_MIN_MSIX_VECT
;
2744 while (vectors
>= vector_threshold
) {
2745 err
= pci_enable_msix(adapter
->pdev
, adapter
->intr
.msix_entries
,
2748 adapter
->intr
.num_intrs
= vectors
;
2750 } else if (err
< 0) {
2751 dev_err(&adapter
->netdev
->dev
,
2752 "Failed to enable MSI-X, error: %d\n", err
);
2754 } else if (err
< vector_threshold
) {
2757 /* If fails to enable required number of MSI-x vectors
2758 * try enabling minimum number of vectors required.
2760 dev_err(&adapter
->netdev
->dev
,
2761 "Failed to enable %d MSI-X, trying %d instead\n",
2762 vectors
, vector_threshold
);
2763 vectors
= vector_threshold
;
2767 dev_info(&adapter
->pdev
->dev
,
2768 "Number of MSI-X interrupts which can be allocated "
2769 "is lower than min threshold required.\n");
2774 #endif /* CONFIG_PCI_MSI */
2777 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter
*adapter
)
2780 unsigned long flags
;
2783 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2784 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2785 VMXNET3_CMD_GET_CONF_INTR
);
2786 cfg
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
2787 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2788 adapter
->intr
.type
= cfg
& 0x3;
2789 adapter
->intr
.mask_mode
= (cfg
>> 2) & 0x3;
2791 if (adapter
->intr
.type
== VMXNET3_IT_AUTO
) {
2792 adapter
->intr
.type
= VMXNET3_IT_MSIX
;
2795 #ifdef CONFIG_PCI_MSI
2796 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
2797 int vector
, err
= 0;
2799 adapter
->intr
.num_intrs
= (adapter
->share_intr
==
2800 VMXNET3_INTR_TXSHARE
) ? 1 :
2801 adapter
->num_tx_queues
;
2802 adapter
->intr
.num_intrs
+= (adapter
->share_intr
==
2803 VMXNET3_INTR_BUDDYSHARE
) ? 0 :
2804 adapter
->num_rx_queues
;
2805 adapter
->intr
.num_intrs
+= 1; /* for link event */
2807 adapter
->intr
.num_intrs
= (adapter
->intr
.num_intrs
>
2808 VMXNET3_LINUX_MIN_MSIX_VECT
2809 ? adapter
->intr
.num_intrs
:
2810 VMXNET3_LINUX_MIN_MSIX_VECT
);
2812 for (vector
= 0; vector
< adapter
->intr
.num_intrs
; vector
++)
2813 adapter
->intr
.msix_entries
[vector
].entry
= vector
;
2815 err
= vmxnet3_acquire_msix_vectors(adapter
,
2816 adapter
->intr
.num_intrs
);
2817 /* If we cannot allocate one MSIx vector per queue
2818 * then limit the number of rx queues to 1
2820 if (err
== VMXNET3_LINUX_MIN_MSIX_VECT
) {
2821 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
2822 || adapter
->num_rx_queues
!= 1) {
2823 adapter
->share_intr
= VMXNET3_INTR_TXSHARE
;
2824 netdev_err(adapter
->netdev
,
2825 "Number of rx queues : 1\n");
2826 adapter
->num_rx_queues
= 1;
2827 adapter
->intr
.num_intrs
=
2828 VMXNET3_LINUX_MIN_MSIX_VECT
;
2835 /* If we cannot allocate MSIx vectors use only one rx queue */
2836 dev_info(&adapter
->pdev
->dev
,
2837 "Failed to enable MSI-X, error %d. "
2838 "Limiting #rx queues to 1, try MSI.\n", err
);
2840 adapter
->intr
.type
= VMXNET3_IT_MSI
;
2843 if (adapter
->intr
.type
== VMXNET3_IT_MSI
) {
2845 err
= pci_enable_msi(adapter
->pdev
);
2847 adapter
->num_rx_queues
= 1;
2848 adapter
->intr
.num_intrs
= 1;
2852 #endif /* CONFIG_PCI_MSI */
2854 adapter
->num_rx_queues
= 1;
2855 dev_info(&adapter
->netdev
->dev
,
2856 "Using INTx interrupt, #Rx queues: 1.\n");
2857 adapter
->intr
.type
= VMXNET3_IT_INTX
;
2859 /* INT-X related setting */
2860 adapter
->intr
.num_intrs
= 1;
2865 vmxnet3_free_intr_resources(struct vmxnet3_adapter
*adapter
)
2867 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
)
2868 pci_disable_msix(adapter
->pdev
);
2869 else if (adapter
->intr
.type
== VMXNET3_IT_MSI
)
2870 pci_disable_msi(adapter
->pdev
);
2872 BUG_ON(adapter
->intr
.type
!= VMXNET3_IT_INTX
);
2877 vmxnet3_tx_timeout(struct net_device
*netdev
)
2879 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2880 adapter
->tx_timeout_count
++;
2882 netdev_err(adapter
->netdev
, "tx hang\n");
2883 schedule_work(&adapter
->work
);
2884 netif_wake_queue(adapter
->netdev
);
2889 vmxnet3_reset_work(struct work_struct
*data
)
2891 struct vmxnet3_adapter
*adapter
;
2893 adapter
= container_of(data
, struct vmxnet3_adapter
, work
);
2895 /* if another thread is resetting the device, no need to proceed */
2896 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2899 /* if the device is closed, we must leave it alone */
2901 if (netif_running(adapter
->netdev
)) {
2902 netdev_notice(adapter
->netdev
, "resetting\n");
2903 vmxnet3_quiesce_dev(adapter
);
2904 vmxnet3_reset_dev(adapter
);
2905 vmxnet3_activate_dev(adapter
);
2907 netdev_info(adapter
->netdev
, "already closed\n");
2911 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
2916 vmxnet3_probe_device(struct pci_dev
*pdev
,
2917 const struct pci_device_id
*id
)
2919 static const struct net_device_ops vmxnet3_netdev_ops
= {
2920 .ndo_open
= vmxnet3_open
,
2921 .ndo_stop
= vmxnet3_close
,
2922 .ndo_start_xmit
= vmxnet3_xmit_frame
,
2923 .ndo_set_mac_address
= vmxnet3_set_mac_addr
,
2924 .ndo_change_mtu
= vmxnet3_change_mtu
,
2925 .ndo_set_features
= vmxnet3_set_features
,
2926 .ndo_get_stats64
= vmxnet3_get_stats64
,
2927 .ndo_tx_timeout
= vmxnet3_tx_timeout
,
2928 .ndo_set_rx_mode
= vmxnet3_set_mc
,
2929 .ndo_vlan_rx_add_vid
= vmxnet3_vlan_rx_add_vid
,
2930 .ndo_vlan_rx_kill_vid
= vmxnet3_vlan_rx_kill_vid
,
2931 #ifdef CONFIG_NET_POLL_CONTROLLER
2932 .ndo_poll_controller
= vmxnet3_netpoll
,
2936 bool dma64
= false; /* stupid gcc */
2938 struct net_device
*netdev
;
2939 struct vmxnet3_adapter
*adapter
;
2945 if (!pci_msi_enabled())
2950 num_rx_queues
= min(VMXNET3_DEVICE_MAX_RX_QUEUES
,
2951 (int)num_online_cpus());
2955 num_rx_queues
= rounddown_pow_of_two(num_rx_queues
);
2958 num_tx_queues
= min(VMXNET3_DEVICE_MAX_TX_QUEUES
,
2959 (int)num_online_cpus());
2963 num_tx_queues
= rounddown_pow_of_two(num_tx_queues
);
2964 netdev
= alloc_etherdev_mq(sizeof(struct vmxnet3_adapter
),
2965 max(num_tx_queues
, num_rx_queues
));
2966 dev_info(&pdev
->dev
,
2967 "# of Tx queues : %d, # of Rx queues : %d\n",
2968 num_tx_queues
, num_rx_queues
);
2973 pci_set_drvdata(pdev
, netdev
);
2974 adapter
= netdev_priv(netdev
);
2975 adapter
->netdev
= netdev
;
2976 adapter
->pdev
= pdev
;
2978 spin_lock_init(&adapter
->cmd_lock
);
2979 adapter
->adapter_pa
= dma_map_single(&adapter
->pdev
->dev
, adapter
,
2980 sizeof(struct vmxnet3_adapter
),
2982 adapter
->shared
= dma_alloc_coherent(
2983 &adapter
->pdev
->dev
,
2984 sizeof(struct Vmxnet3_DriverShared
),
2985 &adapter
->shared_pa
, GFP_KERNEL
);
2986 if (!adapter
->shared
) {
2987 dev_err(&pdev
->dev
, "Failed to allocate memory\n");
2989 goto err_alloc_shared
;
2992 adapter
->num_rx_queues
= num_rx_queues
;
2993 adapter
->num_tx_queues
= num_tx_queues
;
2994 adapter
->rx_buf_per_pkt
= 1;
2996 size
= sizeof(struct Vmxnet3_TxQueueDesc
) * adapter
->num_tx_queues
;
2997 size
+= sizeof(struct Vmxnet3_RxQueueDesc
) * adapter
->num_rx_queues
;
2998 adapter
->tqd_start
= dma_alloc_coherent(&adapter
->pdev
->dev
, size
,
2999 &adapter
->queue_desc_pa
,
3002 if (!adapter
->tqd_start
) {
3003 dev_err(&pdev
->dev
, "Failed to allocate memory\n");
3005 goto err_alloc_queue_desc
;
3007 adapter
->rqd_start
= (struct Vmxnet3_RxQueueDesc
*)(adapter
->tqd_start
+
3008 adapter
->num_tx_queues
);
3010 adapter
->pm_conf
= dma_alloc_coherent(&adapter
->pdev
->dev
,
3011 sizeof(struct Vmxnet3_PMConf
),
3012 &adapter
->pm_conf_pa
,
3014 if (adapter
->pm_conf
== NULL
) {
3021 adapter
->rss_conf
= dma_alloc_coherent(&adapter
->pdev
->dev
,
3022 sizeof(struct UPT1_RSSConf
),
3023 &adapter
->rss_conf_pa
,
3025 if (adapter
->rss_conf
== NULL
) {
3029 #endif /* VMXNET3_RSS */
3031 err
= vmxnet3_alloc_pci_resources(adapter
, &dma64
);
3035 ver
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_VRRS
);
3037 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_VRRS
, 1);
3040 "Incompatible h/w version (0x%x) for adapter\n", ver
);
3045 ver
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_UVRS
);
3047 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_UVRS
, 1);
3050 "Incompatible upt version (0x%x) for adapter\n", ver
);
3055 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3056 vmxnet3_declare_features(adapter
, dma64
);
3058 if (adapter
->num_tx_queues
== adapter
->num_rx_queues
)
3059 adapter
->share_intr
= VMXNET3_INTR_BUDDYSHARE
;
3061 adapter
->share_intr
= VMXNET3_INTR_DONTSHARE
;
3063 vmxnet3_alloc_intr_resources(adapter
);
3066 if (adapter
->num_rx_queues
> 1 &&
3067 adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
3068 adapter
->rss
= true;
3069 netdev
->hw_features
|= NETIF_F_RXHASH
;
3070 netdev
->features
|= NETIF_F_RXHASH
;
3071 dev_dbg(&pdev
->dev
, "RSS is enabled.\n");
3073 adapter
->rss
= false;
3077 vmxnet3_read_mac_addr(adapter
, mac
);
3078 memcpy(netdev
->dev_addr
, mac
, netdev
->addr_len
);
3080 netdev
->netdev_ops
= &vmxnet3_netdev_ops
;
3081 vmxnet3_set_ethtool_ops(netdev
);
3082 netdev
->watchdog_timeo
= 5 * HZ
;
3084 INIT_WORK(&adapter
->work
, vmxnet3_reset_work
);
3085 set_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
);
3087 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
3089 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3090 netif_napi_add(adapter
->netdev
,
3091 &adapter
->rx_queue
[i
].napi
,
3092 vmxnet3_poll_rx_only
, 64);
3095 netif_napi_add(adapter
->netdev
, &adapter
->rx_queue
[0].napi
,
3099 netif_set_real_num_tx_queues(adapter
->netdev
, adapter
->num_tx_queues
);
3100 netif_set_real_num_rx_queues(adapter
->netdev
, adapter
->num_rx_queues
);
3102 netif_carrier_off(netdev
);
3103 err
= register_netdev(netdev
);
3106 dev_err(&pdev
->dev
, "Failed to register adapter\n");
3110 vmxnet3_check_link(adapter
, false);
3114 vmxnet3_free_intr_resources(adapter
);
3116 vmxnet3_free_pci_resources(adapter
);
3119 dma_free_coherent(&adapter
->pdev
->dev
, sizeof(struct UPT1_RSSConf
),
3120 adapter
->rss_conf
, adapter
->rss_conf_pa
);
3123 dma_free_coherent(&adapter
->pdev
->dev
, sizeof(struct Vmxnet3_PMConf
),
3124 adapter
->pm_conf
, adapter
->pm_conf_pa
);
3126 dma_free_coherent(&adapter
->pdev
->dev
, size
, adapter
->tqd_start
,
3127 adapter
->queue_desc_pa
);
3128 err_alloc_queue_desc
:
3129 dma_free_coherent(&adapter
->pdev
->dev
,
3130 sizeof(struct Vmxnet3_DriverShared
),
3131 adapter
->shared
, adapter
->shared_pa
);
3133 dma_unmap_single(&adapter
->pdev
->dev
, adapter
->adapter_pa
,
3134 sizeof(struct vmxnet3_adapter
), PCI_DMA_TODEVICE
);
3135 pci_set_drvdata(pdev
, NULL
);
3136 free_netdev(netdev
);
3142 vmxnet3_remove_device(struct pci_dev
*pdev
)
3144 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3145 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3151 num_rx_queues
= min(VMXNET3_DEVICE_MAX_RX_QUEUES
,
3152 (int)num_online_cpus());
3156 num_rx_queues
= rounddown_pow_of_two(num_rx_queues
);
3158 cancel_work_sync(&adapter
->work
);
3160 unregister_netdev(netdev
);
3162 vmxnet3_free_intr_resources(adapter
);
3163 vmxnet3_free_pci_resources(adapter
);
3165 dma_free_coherent(&adapter
->pdev
->dev
, sizeof(struct UPT1_RSSConf
),
3166 adapter
->rss_conf
, adapter
->rss_conf_pa
);
3168 dma_free_coherent(&adapter
->pdev
->dev
, sizeof(struct Vmxnet3_PMConf
),
3169 adapter
->pm_conf
, adapter
->pm_conf_pa
);
3171 size
= sizeof(struct Vmxnet3_TxQueueDesc
) * adapter
->num_tx_queues
;
3172 size
+= sizeof(struct Vmxnet3_RxQueueDesc
) * num_rx_queues
;
3173 dma_free_coherent(&adapter
->pdev
->dev
, size
, adapter
->tqd_start
,
3174 adapter
->queue_desc_pa
);
3175 dma_free_coherent(&adapter
->pdev
->dev
,
3176 sizeof(struct Vmxnet3_DriverShared
),
3177 adapter
->shared
, adapter
->shared_pa
);
3178 dma_unmap_single(&adapter
->pdev
->dev
, adapter
->adapter_pa
,
3179 sizeof(struct vmxnet3_adapter
), PCI_DMA_TODEVICE
);
3180 free_netdev(netdev
);
3187 vmxnet3_suspend(struct device
*device
)
3189 struct pci_dev
*pdev
= to_pci_dev(device
);
3190 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3191 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3192 struct Vmxnet3_PMConf
*pmConf
;
3193 struct ethhdr
*ehdr
;
3194 struct arphdr
*ahdr
;
3196 struct in_device
*in_dev
;
3197 struct in_ifaddr
*ifa
;
3198 unsigned long flags
;
3201 if (!netif_running(netdev
))
3204 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3205 napi_disable(&adapter
->rx_queue
[i
].napi
);
3207 vmxnet3_disable_all_intrs(adapter
);
3208 vmxnet3_free_irqs(adapter
);
3209 vmxnet3_free_intr_resources(adapter
);
3211 netif_device_detach(netdev
);
3212 netif_tx_stop_all_queues(netdev
);
3214 /* Create wake-up filters. */
3215 pmConf
= adapter
->pm_conf
;
3216 memset(pmConf
, 0, sizeof(*pmConf
));
3218 if (adapter
->wol
& WAKE_UCAST
) {
3219 pmConf
->filters
[i
].patternSize
= ETH_ALEN
;
3220 pmConf
->filters
[i
].maskSize
= 1;
3221 memcpy(pmConf
->filters
[i
].pattern
, netdev
->dev_addr
, ETH_ALEN
);
3222 pmConf
->filters
[i
].mask
[0] = 0x3F; /* LSB ETH_ALEN bits */
3224 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_FILTER
;
3228 if (adapter
->wol
& WAKE_ARP
) {
3229 in_dev
= in_dev_get(netdev
);
3233 ifa
= (struct in_ifaddr
*)in_dev
->ifa_list
;
3237 pmConf
->filters
[i
].patternSize
= ETH_HLEN
+ /* Ethernet header*/
3238 sizeof(struct arphdr
) + /* ARP header */
3239 2 * ETH_ALEN
+ /* 2 Ethernet addresses*/
3240 2 * sizeof(u32
); /*2 IPv4 addresses */
3241 pmConf
->filters
[i
].maskSize
=
3242 (pmConf
->filters
[i
].patternSize
- 1) / 8 + 1;
3244 /* ETH_P_ARP in Ethernet header. */
3245 ehdr
= (struct ethhdr
*)pmConf
->filters
[i
].pattern
;
3246 ehdr
->h_proto
= htons(ETH_P_ARP
);
3248 /* ARPOP_REQUEST in ARP header. */
3249 ahdr
= (struct arphdr
*)&pmConf
->filters
[i
].pattern
[ETH_HLEN
];
3250 ahdr
->ar_op
= htons(ARPOP_REQUEST
);
3251 arpreq
= (u8
*)(ahdr
+ 1);
3253 /* The Unicast IPv4 address in 'tip' field. */
3254 arpreq
+= 2 * ETH_ALEN
+ sizeof(u32
);
3255 *(u32
*)arpreq
= ifa
->ifa_address
;
3257 /* The mask for the relevant bits. */
3258 pmConf
->filters
[i
].mask
[0] = 0x00;
3259 pmConf
->filters
[i
].mask
[1] = 0x30; /* ETH_P_ARP */
3260 pmConf
->filters
[i
].mask
[2] = 0x30; /* ARPOP_REQUEST */
3261 pmConf
->filters
[i
].mask
[3] = 0x00;
3262 pmConf
->filters
[i
].mask
[4] = 0xC0; /* IPv4 TIP */
3263 pmConf
->filters
[i
].mask
[5] = 0x03; /* IPv4 TIP */
3266 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_FILTER
;
3271 if (adapter
->wol
& WAKE_MAGIC
)
3272 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_MAGIC
;
3274 pmConf
->numFilters
= i
;
3276 adapter
->shared
->devRead
.pmConfDesc
.confVer
= cpu_to_le32(1);
3277 adapter
->shared
->devRead
.pmConfDesc
.confLen
= cpu_to_le32(sizeof(
3279 adapter
->shared
->devRead
.pmConfDesc
.confPA
=
3280 cpu_to_le64(adapter
->pm_conf_pa
);
3282 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
3283 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
3284 VMXNET3_CMD_UPDATE_PMCFG
);
3285 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
3287 pci_save_state(pdev
);
3288 pci_enable_wake(pdev
, pci_choose_state(pdev
, PMSG_SUSPEND
),
3290 pci_disable_device(pdev
);
3291 pci_set_power_state(pdev
, pci_choose_state(pdev
, PMSG_SUSPEND
));
3298 vmxnet3_resume(struct device
*device
)
3301 unsigned long flags
;
3302 struct pci_dev
*pdev
= to_pci_dev(device
);
3303 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3304 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3305 struct Vmxnet3_PMConf
*pmConf
;
3307 if (!netif_running(netdev
))
3310 /* Destroy wake-up filters. */
3311 pmConf
= adapter
->pm_conf
;
3312 memset(pmConf
, 0, sizeof(*pmConf
));
3314 adapter
->shared
->devRead
.pmConfDesc
.confVer
= cpu_to_le32(1);
3315 adapter
->shared
->devRead
.pmConfDesc
.confLen
= cpu_to_le32(sizeof(
3317 adapter
->shared
->devRead
.pmConfDesc
.confPA
=
3318 cpu_to_le64(adapter
->pm_conf_pa
);
3320 netif_device_attach(netdev
);
3321 pci_set_power_state(pdev
, PCI_D0
);
3322 pci_restore_state(pdev
);
3323 err
= pci_enable_device_mem(pdev
);
3327 pci_enable_wake(pdev
, PCI_D0
, 0);
3329 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
3330 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
3331 VMXNET3_CMD_UPDATE_PMCFG
);
3332 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
3333 vmxnet3_alloc_intr_resources(adapter
);
3334 vmxnet3_request_irqs(adapter
);
3335 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3336 napi_enable(&adapter
->rx_queue
[i
].napi
);
3337 vmxnet3_enable_all_intrs(adapter
);
3342 static const struct dev_pm_ops vmxnet3_pm_ops
= {
3343 .suspend
= vmxnet3_suspend
,
3344 .resume
= vmxnet3_resume
,
3348 static struct pci_driver vmxnet3_driver
= {
3349 .name
= vmxnet3_driver_name
,
3350 .id_table
= vmxnet3_pciid_table
,
3351 .probe
= vmxnet3_probe_device
,
3352 .remove
= vmxnet3_remove_device
,
3354 .driver
.pm
= &vmxnet3_pm_ops
,
3360 vmxnet3_init_module(void)
3362 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC
,
3363 VMXNET3_DRIVER_VERSION_REPORT
);
3364 return pci_register_driver(&vmxnet3_driver
);
3367 module_init(vmxnet3_init_module
);
3371 vmxnet3_exit_module(void)
3373 pci_unregister_driver(&vmxnet3_driver
);
3376 module_exit(vmxnet3_exit_module
);
3378 MODULE_AUTHOR("VMware, Inc.");
3379 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC
);
3380 MODULE_LICENSE("GPL v2");
3381 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING
);