2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/init.h>
26 #include <linux/workqueue.h>
27 #include <linux/pci.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/if_vlan.h>
32 #include <linux/ethtool.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <net/ip6_checksum.h>
39 #include "cq_enet_desc.h"
41 #include "vnic_intr.h"
42 #include "vnic_stats.h"
46 #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
47 #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
48 #define MAX_TSO (1 << 16)
49 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
51 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
53 /* Supported devices */
54 static DEFINE_PCI_DEVICE_TABLE(enic_id_table
) = {
55 { PCI_VDEVICE(CISCO
, PCI_DEVICE_ID_CISCO_VIC_ENET
) },
56 { 0, } /* end of table */
59 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
60 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
61 MODULE_LICENSE("GPL");
62 MODULE_VERSION(DRV_VERSION
);
63 MODULE_DEVICE_TABLE(pci
, enic_id_table
);
66 char name
[ETH_GSTRING_LEN
];
70 #define ENIC_TX_STAT(stat) \
71 { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
72 #define ENIC_RX_STAT(stat) \
73 { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
75 static const struct enic_stat enic_tx_stats
[] = {
76 ENIC_TX_STAT(tx_frames_ok
),
77 ENIC_TX_STAT(tx_unicast_frames_ok
),
78 ENIC_TX_STAT(tx_multicast_frames_ok
),
79 ENIC_TX_STAT(tx_broadcast_frames_ok
),
80 ENIC_TX_STAT(tx_bytes_ok
),
81 ENIC_TX_STAT(tx_unicast_bytes_ok
),
82 ENIC_TX_STAT(tx_multicast_bytes_ok
),
83 ENIC_TX_STAT(tx_broadcast_bytes_ok
),
84 ENIC_TX_STAT(tx_drops
),
85 ENIC_TX_STAT(tx_errors
),
89 static const struct enic_stat enic_rx_stats
[] = {
90 ENIC_RX_STAT(rx_frames_ok
),
91 ENIC_RX_STAT(rx_frames_total
),
92 ENIC_RX_STAT(rx_unicast_frames_ok
),
93 ENIC_RX_STAT(rx_multicast_frames_ok
),
94 ENIC_RX_STAT(rx_broadcast_frames_ok
),
95 ENIC_RX_STAT(rx_bytes_ok
),
96 ENIC_RX_STAT(rx_unicast_bytes_ok
),
97 ENIC_RX_STAT(rx_multicast_bytes_ok
),
98 ENIC_RX_STAT(rx_broadcast_bytes_ok
),
99 ENIC_RX_STAT(rx_drop
),
100 ENIC_RX_STAT(rx_no_bufs
),
101 ENIC_RX_STAT(rx_errors
),
102 ENIC_RX_STAT(rx_rss
),
103 ENIC_RX_STAT(rx_crc_errors
),
104 ENIC_RX_STAT(rx_frames_64
),
105 ENIC_RX_STAT(rx_frames_127
),
106 ENIC_RX_STAT(rx_frames_255
),
107 ENIC_RX_STAT(rx_frames_511
),
108 ENIC_RX_STAT(rx_frames_1023
),
109 ENIC_RX_STAT(rx_frames_1518
),
110 ENIC_RX_STAT(rx_frames_to_max
),
113 static const unsigned int enic_n_tx_stats
= ARRAY_SIZE(enic_tx_stats
);
114 static const unsigned int enic_n_rx_stats
= ARRAY_SIZE(enic_rx_stats
);
116 static int enic_get_settings(struct net_device
*netdev
,
117 struct ethtool_cmd
*ecmd
)
119 struct enic
*enic
= netdev_priv(netdev
);
121 ecmd
->supported
= (SUPPORTED_10000baseT_Full
| SUPPORTED_FIBRE
);
122 ecmd
->advertising
= (ADVERTISED_10000baseT_Full
| ADVERTISED_FIBRE
);
123 ecmd
->port
= PORT_FIBRE
;
124 ecmd
->transceiver
= XCVR_EXTERNAL
;
126 if (netif_carrier_ok(netdev
)) {
127 ecmd
->speed
= vnic_dev_port_speed(enic
->vdev
);
128 ecmd
->duplex
= DUPLEX_FULL
;
134 ecmd
->autoneg
= AUTONEG_DISABLE
;
139 static void enic_get_drvinfo(struct net_device
*netdev
,
140 struct ethtool_drvinfo
*drvinfo
)
142 struct enic
*enic
= netdev_priv(netdev
);
143 struct vnic_devcmd_fw_info
*fw_info
;
145 spin_lock(&enic
->devcmd_lock
);
146 vnic_dev_fw_info(enic
->vdev
, &fw_info
);
147 spin_unlock(&enic
->devcmd_lock
);
149 strncpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
150 strncpy(drvinfo
->version
, DRV_VERSION
, sizeof(drvinfo
->version
));
151 strncpy(drvinfo
->fw_version
, fw_info
->fw_version
,
152 sizeof(drvinfo
->fw_version
));
153 strncpy(drvinfo
->bus_info
, pci_name(enic
->pdev
),
154 sizeof(drvinfo
->bus_info
));
157 static void enic_get_strings(struct net_device
*netdev
, u32 stringset
, u8
*data
)
163 for (i
= 0; i
< enic_n_tx_stats
; i
++) {
164 memcpy(data
, enic_tx_stats
[i
].name
, ETH_GSTRING_LEN
);
165 data
+= ETH_GSTRING_LEN
;
167 for (i
= 0; i
< enic_n_rx_stats
; i
++) {
168 memcpy(data
, enic_rx_stats
[i
].name
, ETH_GSTRING_LEN
);
169 data
+= ETH_GSTRING_LEN
;
175 static int enic_get_sset_count(struct net_device
*netdev
, int sset
)
179 return enic_n_tx_stats
+ enic_n_rx_stats
;
185 static void enic_get_ethtool_stats(struct net_device
*netdev
,
186 struct ethtool_stats
*stats
, u64
*data
)
188 struct enic
*enic
= netdev_priv(netdev
);
189 struct vnic_stats
*vstats
;
192 spin_lock(&enic
->devcmd_lock
);
193 vnic_dev_stats_dump(enic
->vdev
, &vstats
);
194 spin_unlock(&enic
->devcmd_lock
);
196 for (i
= 0; i
< enic_n_tx_stats
; i
++)
197 *(data
++) = ((u64
*)&vstats
->tx
)[enic_tx_stats
[i
].offset
];
198 for (i
= 0; i
< enic_n_rx_stats
; i
++)
199 *(data
++) = ((u64
*)&vstats
->rx
)[enic_rx_stats
[i
].offset
];
202 static u32
enic_get_rx_csum(struct net_device
*netdev
)
204 struct enic
*enic
= netdev_priv(netdev
);
205 return enic
->csum_rx_enabled
;
208 static int enic_set_rx_csum(struct net_device
*netdev
, u32 data
)
210 struct enic
*enic
= netdev_priv(netdev
);
212 if (data
&& !ENIC_SETTING(enic
, RXCSUM
))
215 enic
->csum_rx_enabled
= !!data
;
220 static int enic_set_tx_csum(struct net_device
*netdev
, u32 data
)
222 struct enic
*enic
= netdev_priv(netdev
);
224 if (data
&& !ENIC_SETTING(enic
, TXCSUM
))
228 netdev
->features
|= NETIF_F_HW_CSUM
;
230 netdev
->features
&= ~NETIF_F_HW_CSUM
;
235 static int enic_set_tso(struct net_device
*netdev
, u32 data
)
237 struct enic
*enic
= netdev_priv(netdev
);
239 if (data
&& !ENIC_SETTING(enic
, TSO
))
244 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_TSO_ECN
;
247 ~(NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_TSO_ECN
);
252 static u32
enic_get_msglevel(struct net_device
*netdev
)
254 struct enic
*enic
= netdev_priv(netdev
);
255 return enic
->msg_enable
;
258 static void enic_set_msglevel(struct net_device
*netdev
, u32 value
)
260 struct enic
*enic
= netdev_priv(netdev
);
261 enic
->msg_enable
= value
;
264 static int enic_get_coalesce(struct net_device
*netdev
,
265 struct ethtool_coalesce
*ecmd
)
267 struct enic
*enic
= netdev_priv(netdev
);
269 ecmd
->tx_coalesce_usecs
= enic
->tx_coalesce_usecs
;
270 ecmd
->rx_coalesce_usecs
= enic
->rx_coalesce_usecs
;
275 static int enic_set_coalesce(struct net_device
*netdev
,
276 struct ethtool_coalesce
*ecmd
)
278 struct enic
*enic
= netdev_priv(netdev
);
279 u32 tx_coalesce_usecs
;
280 u32 rx_coalesce_usecs
;
282 tx_coalesce_usecs
= min_t(u32
,
283 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX
),
284 ecmd
->tx_coalesce_usecs
);
285 rx_coalesce_usecs
= min_t(u32
,
286 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX
),
287 ecmd
->rx_coalesce_usecs
);
289 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
290 case VNIC_DEV_INTR_MODE_INTX
:
291 if (tx_coalesce_usecs
!= rx_coalesce_usecs
)
294 vnic_intr_coalescing_timer_set(&enic
->intr
[ENIC_INTX_WQ_RQ
],
295 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs
));
297 case VNIC_DEV_INTR_MODE_MSI
:
298 if (tx_coalesce_usecs
!= rx_coalesce_usecs
)
301 vnic_intr_coalescing_timer_set(&enic
->intr
[0],
302 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs
));
304 case VNIC_DEV_INTR_MODE_MSIX
:
305 vnic_intr_coalescing_timer_set(&enic
->intr
[ENIC_MSIX_WQ
],
306 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs
));
307 vnic_intr_coalescing_timer_set(&enic
->intr
[ENIC_MSIX_RQ
],
308 INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs
));
314 enic
->tx_coalesce_usecs
= tx_coalesce_usecs
;
315 enic
->rx_coalesce_usecs
= rx_coalesce_usecs
;
320 static const struct ethtool_ops enic_ethtool_ops
= {
321 .get_settings
= enic_get_settings
,
322 .get_drvinfo
= enic_get_drvinfo
,
323 .get_msglevel
= enic_get_msglevel
,
324 .set_msglevel
= enic_set_msglevel
,
325 .get_link
= ethtool_op_get_link
,
326 .get_strings
= enic_get_strings
,
327 .get_sset_count
= enic_get_sset_count
,
328 .get_ethtool_stats
= enic_get_ethtool_stats
,
329 .get_rx_csum
= enic_get_rx_csum
,
330 .set_rx_csum
= enic_set_rx_csum
,
331 .get_tx_csum
= ethtool_op_get_tx_csum
,
332 .set_tx_csum
= enic_set_tx_csum
,
333 .get_sg
= ethtool_op_get_sg
,
334 .set_sg
= ethtool_op_set_sg
,
335 .get_tso
= ethtool_op_get_tso
,
336 .set_tso
= enic_set_tso
,
337 .get_coalesce
= enic_get_coalesce
,
338 .set_coalesce
= enic_set_coalesce
,
339 .get_flags
= ethtool_op_get_flags
,
340 .set_flags
= ethtool_op_set_flags
,
343 static void enic_free_wq_buf(struct vnic_wq
*wq
, struct vnic_wq_buf
*buf
)
345 struct enic
*enic
= vnic_dev_priv(wq
->vdev
);
348 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
349 buf
->len
, PCI_DMA_TODEVICE
);
351 pci_unmap_page(enic
->pdev
, buf
->dma_addr
,
352 buf
->len
, PCI_DMA_TODEVICE
);
355 dev_kfree_skb_any(buf
->os_buf
);
358 static void enic_wq_free_buf(struct vnic_wq
*wq
,
359 struct cq_desc
*cq_desc
, struct vnic_wq_buf
*buf
, void *opaque
)
361 enic_free_wq_buf(wq
, buf
);
364 static int enic_wq_service(struct vnic_dev
*vdev
, struct cq_desc
*cq_desc
,
365 u8 type
, u16 q_number
, u16 completed_index
, void *opaque
)
367 struct enic
*enic
= vnic_dev_priv(vdev
);
369 spin_lock(&enic
->wq_lock
[q_number
]);
371 vnic_wq_service(&enic
->wq
[q_number
], cq_desc
,
372 completed_index
, enic_wq_free_buf
,
375 if (netif_queue_stopped(enic
->netdev
) &&
376 vnic_wq_desc_avail(&enic
->wq
[q_number
]) >=
377 (MAX_SKB_FRAGS
+ ENIC_DESC_MAX_SPLITS
))
378 netif_wake_queue(enic
->netdev
);
380 spin_unlock(&enic
->wq_lock
[q_number
]);
385 static void enic_log_q_error(struct enic
*enic
)
390 for (i
= 0; i
< enic
->wq_count
; i
++) {
391 error_status
= vnic_wq_error_status(&enic
->wq
[i
]);
393 printk(KERN_ERR PFX
"%s: WQ[%d] error_status %d\n",
394 enic
->netdev
->name
, i
, error_status
);
397 for (i
= 0; i
< enic
->rq_count
; i
++) {
398 error_status
= vnic_rq_error_status(&enic
->rq
[i
]);
400 printk(KERN_ERR PFX
"%s: RQ[%d] error_status %d\n",
401 enic
->netdev
->name
, i
, error_status
);
405 static void enic_link_check(struct enic
*enic
)
407 int link_status
= vnic_dev_link_status(enic
->vdev
);
408 int carrier_ok
= netif_carrier_ok(enic
->netdev
);
410 if (link_status
&& !carrier_ok
) {
411 printk(KERN_INFO PFX
"%s: Link UP\n", enic
->netdev
->name
);
412 netif_carrier_on(enic
->netdev
);
413 } else if (!link_status
&& carrier_ok
) {
414 printk(KERN_INFO PFX
"%s: Link DOWN\n", enic
->netdev
->name
);
415 netif_carrier_off(enic
->netdev
);
419 static void enic_mtu_check(struct enic
*enic
)
421 u32 mtu
= vnic_dev_mtu(enic
->vdev
);
423 if (mtu
&& mtu
!= enic
->port_mtu
) {
424 enic
->port_mtu
= mtu
;
425 if (mtu
< enic
->netdev
->mtu
)
426 printk(KERN_WARNING PFX
427 "%s: interface MTU (%d) set higher "
428 "than switch port MTU (%d)\n",
429 enic
->netdev
->name
, enic
->netdev
->mtu
, mtu
);
433 static void enic_msglvl_check(struct enic
*enic
)
435 u32 msg_enable
= vnic_dev_msg_lvl(enic
->vdev
);
437 if (msg_enable
!= enic
->msg_enable
) {
438 printk(KERN_INFO PFX
"%s: msg lvl changed from 0x%x to 0x%x\n",
439 enic
->netdev
->name
, enic
->msg_enable
, msg_enable
);
440 enic
->msg_enable
= msg_enable
;
444 static void enic_notify_check(struct enic
*enic
)
446 enic_msglvl_check(enic
);
447 enic_mtu_check(enic
);
448 enic_link_check(enic
);
451 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
453 static irqreturn_t
enic_isr_legacy(int irq
, void *data
)
455 struct net_device
*netdev
= data
;
456 struct enic
*enic
= netdev_priv(netdev
);
459 vnic_intr_mask(&enic
->intr
[ENIC_INTX_WQ_RQ
]);
461 pba
= vnic_intr_legacy_pba(enic
->legacy_pba
);
463 vnic_intr_unmask(&enic
->intr
[ENIC_INTX_WQ_RQ
]);
464 return IRQ_NONE
; /* not our interrupt */
467 if (ENIC_TEST_INTR(pba
, ENIC_INTX_NOTIFY
)) {
468 vnic_intr_return_all_credits(&enic
->intr
[ENIC_INTX_NOTIFY
]);
469 enic_notify_check(enic
);
472 if (ENIC_TEST_INTR(pba
, ENIC_INTX_ERR
)) {
473 vnic_intr_return_all_credits(&enic
->intr
[ENIC_INTX_ERR
]);
474 enic_log_q_error(enic
);
475 /* schedule recovery from WQ/RQ error */
476 schedule_work(&enic
->reset
);
480 if (ENIC_TEST_INTR(pba
, ENIC_INTX_WQ_RQ
)) {
481 if (napi_schedule_prep(&enic
->napi
))
482 __napi_schedule(&enic
->napi
);
484 vnic_intr_unmask(&enic
->intr
[ENIC_INTX_WQ_RQ
]);
490 static irqreturn_t
enic_isr_msi(int irq
, void *data
)
492 struct enic
*enic
= data
;
494 /* With MSI, there is no sharing of interrupts, so this is
495 * our interrupt and there is no need to ack it. The device
496 * is not providing per-vector masking, so the OS will not
497 * write to PCI config space to mask/unmask the interrupt.
498 * We're using mask_on_assertion for MSI, so the device
499 * automatically masks the interrupt when the interrupt is
500 * generated. Later, when exiting polling, the interrupt
501 * will be unmasked (see enic_poll).
503 * Also, the device uses the same PCIe Traffic Class (TC)
504 * for Memory Write data and MSI, so there are no ordering
505 * issues; the MSI will always arrive at the Root Complex
506 * _after_ corresponding Memory Writes (i.e. descriptor
510 napi_schedule(&enic
->napi
);
515 static irqreturn_t
enic_isr_msix_rq(int irq
, void *data
)
517 struct enic
*enic
= data
;
519 /* schedule NAPI polling for RQ cleanup */
520 napi_schedule(&enic
->napi
);
525 static irqreturn_t
enic_isr_msix_wq(int irq
, void *data
)
527 struct enic
*enic
= data
;
528 unsigned int wq_work_to_do
= -1; /* no limit */
529 unsigned int wq_work_done
;
531 wq_work_done
= vnic_cq_service(&enic
->cq
[ENIC_CQ_WQ
],
532 wq_work_to_do
, enic_wq_service
, NULL
);
534 vnic_intr_return_credits(&enic
->intr
[ENIC_MSIX_WQ
],
537 1 /* reset intr timer */);
542 static irqreturn_t
enic_isr_msix_err(int irq
, void *data
)
544 struct enic
*enic
= data
;
546 vnic_intr_return_all_credits(&enic
->intr
[ENIC_MSIX_ERR
]);
548 enic_log_q_error(enic
);
550 /* schedule recovery from WQ/RQ error */
551 schedule_work(&enic
->reset
);
556 static irqreturn_t
enic_isr_msix_notify(int irq
, void *data
)
558 struct enic
*enic
= data
;
560 vnic_intr_return_all_credits(&enic
->intr
[ENIC_MSIX_NOTIFY
]);
561 enic_notify_check(enic
);
566 static inline void enic_queue_wq_skb_cont(struct enic
*enic
,
567 struct vnic_wq
*wq
, struct sk_buff
*skb
,
568 unsigned int len_left
)
572 /* Queue additional data fragments */
573 for (frag
= skb_shinfo(skb
)->frags
; len_left
; frag
++) {
574 len_left
-= frag
->size
;
575 enic_queue_wq_desc_cont(wq
, skb
,
576 pci_map_page(enic
->pdev
, frag
->page
,
577 frag
->page_offset
, frag
->size
,
580 (len_left
== 0)); /* EOP? */
584 static inline void enic_queue_wq_skb_vlan(struct enic
*enic
,
585 struct vnic_wq
*wq
, struct sk_buff
*skb
,
586 int vlan_tag_insert
, unsigned int vlan_tag
)
588 unsigned int head_len
= skb_headlen(skb
);
589 unsigned int len_left
= skb
->len
- head_len
;
590 int eop
= (len_left
== 0);
592 /* Queue the main skb fragment. The fragments are no larger
593 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
594 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
595 * per fragment is queued.
597 enic_queue_wq_desc(wq
, skb
,
598 pci_map_single(enic
->pdev
, skb
->data
,
599 head_len
, PCI_DMA_TODEVICE
),
601 vlan_tag_insert
, vlan_tag
,
605 enic_queue_wq_skb_cont(enic
, wq
, skb
, len_left
);
608 static inline void enic_queue_wq_skb_csum_l4(struct enic
*enic
,
609 struct vnic_wq
*wq
, struct sk_buff
*skb
,
610 int vlan_tag_insert
, unsigned int vlan_tag
)
612 unsigned int head_len
= skb_headlen(skb
);
613 unsigned int len_left
= skb
->len
- head_len
;
614 unsigned int hdr_len
= skb_transport_offset(skb
);
615 unsigned int csum_offset
= hdr_len
+ skb
->csum_offset
;
616 int eop
= (len_left
== 0);
618 /* Queue the main skb fragment. The fragments are no larger
619 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
620 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
621 * per fragment is queued.
623 enic_queue_wq_desc_csum_l4(wq
, skb
,
624 pci_map_single(enic
->pdev
, skb
->data
,
625 head_len
, PCI_DMA_TODEVICE
),
629 vlan_tag_insert
, vlan_tag
,
633 enic_queue_wq_skb_cont(enic
, wq
, skb
, len_left
);
636 static inline void enic_queue_wq_skb_tso(struct enic
*enic
,
637 struct vnic_wq
*wq
, struct sk_buff
*skb
, unsigned int mss
,
638 int vlan_tag_insert
, unsigned int vlan_tag
)
640 unsigned int frag_len_left
= skb_headlen(skb
);
641 unsigned int len_left
= skb
->len
- frag_len_left
;
642 unsigned int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
643 int eop
= (len_left
== 0);
646 unsigned int offset
= 0;
649 /* Preload TCP csum field with IP pseudo hdr calculated
650 * with IP length set to zero. HW will later add in length
651 * to each TCP segment resulting from the TSO.
654 if (skb
->protocol
== cpu_to_be16(ETH_P_IP
)) {
655 ip_hdr(skb
)->check
= 0;
656 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
657 ip_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
658 } else if (skb
->protocol
== cpu_to_be16(ETH_P_IPV6
)) {
659 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
660 &ipv6_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
663 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
664 * for the main skb fragment
666 while (frag_len_left
) {
667 len
= min(frag_len_left
, (unsigned int)WQ_ENET_MAX_DESC_LEN
);
668 dma_addr
= pci_map_single(enic
->pdev
, skb
->data
+ offset
,
669 len
, PCI_DMA_TODEVICE
);
670 enic_queue_wq_desc_tso(wq
, skb
,
674 vlan_tag_insert
, vlan_tag
,
675 eop
&& (len
== frag_len_left
));
676 frag_len_left
-= len
;
683 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
684 * for additional data fragments
686 for (frag
= skb_shinfo(skb
)->frags
; len_left
; frag
++) {
687 len_left
-= frag
->size
;
688 frag_len_left
= frag
->size
;
689 offset
= frag
->page_offset
;
691 while (frag_len_left
) {
692 len
= min(frag_len_left
,
693 (unsigned int)WQ_ENET_MAX_DESC_LEN
);
694 dma_addr
= pci_map_page(enic
->pdev
, frag
->page
,
697 enic_queue_wq_desc_cont(wq
, skb
,
701 (len
== frag_len_left
)); /* EOP? */
702 frag_len_left
-= len
;
708 static inline void enic_queue_wq_skb(struct enic
*enic
,
709 struct vnic_wq
*wq
, struct sk_buff
*skb
)
711 unsigned int mss
= skb_shinfo(skb
)->gso_size
;
712 unsigned int vlan_tag
= 0;
713 int vlan_tag_insert
= 0;
715 if (enic
->vlan_group
&& vlan_tx_tag_present(skb
)) {
716 /* VLAN tag from trunking driver */
718 vlan_tag
= vlan_tx_tag_get(skb
);
722 enic_queue_wq_skb_tso(enic
, wq
, skb
, mss
,
723 vlan_tag_insert
, vlan_tag
);
724 else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
725 enic_queue_wq_skb_csum_l4(enic
, wq
, skb
,
726 vlan_tag_insert
, vlan_tag
);
728 enic_queue_wq_skb_vlan(enic
, wq
, skb
,
729 vlan_tag_insert
, vlan_tag
);
732 /* netif_tx_lock held, process context with BHs disabled, or BH */
733 static netdev_tx_t
enic_hard_start_xmit(struct sk_buff
*skb
,
734 struct net_device
*netdev
)
736 struct enic
*enic
= netdev_priv(netdev
);
737 struct vnic_wq
*wq
= &enic
->wq
[0];
745 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
746 * which is very likely. In the off chance it's going to take
747 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
750 if (skb_shinfo(skb
)->gso_size
== 0 &&
751 skb_shinfo(skb
)->nr_frags
+ 1 > ENIC_NON_TSO_MAX_DESC
&&
752 skb_linearize(skb
)) {
757 spin_lock_irqsave(&enic
->wq_lock
[0], flags
);
759 if (vnic_wq_desc_avail(wq
) <
760 skb_shinfo(skb
)->nr_frags
+ ENIC_DESC_MAX_SPLITS
) {
761 netif_stop_queue(netdev
);
762 /* This is a hard error, log it */
763 printk(KERN_ERR PFX
"%s: BUG! Tx ring full when "
764 "queue awake!\n", netdev
->name
);
765 spin_unlock_irqrestore(&enic
->wq_lock
[0], flags
);
766 return NETDEV_TX_BUSY
;
769 enic_queue_wq_skb(enic
, wq
, skb
);
771 if (vnic_wq_desc_avail(wq
) < MAX_SKB_FRAGS
+ ENIC_DESC_MAX_SPLITS
)
772 netif_stop_queue(netdev
);
774 spin_unlock_irqrestore(&enic
->wq_lock
[0], flags
);
779 /* dev_base_lock rwlock held, nominally process context */
780 static struct net_device_stats
*enic_get_stats(struct net_device
*netdev
)
782 struct enic
*enic
= netdev_priv(netdev
);
783 struct net_device_stats
*net_stats
= &netdev
->stats
;
784 struct vnic_stats
*stats
;
786 spin_lock(&enic
->devcmd_lock
);
787 vnic_dev_stats_dump(enic
->vdev
, &stats
);
788 spin_unlock(&enic
->devcmd_lock
);
790 net_stats
->tx_packets
= stats
->tx
.tx_frames_ok
;
791 net_stats
->tx_bytes
= stats
->tx
.tx_bytes_ok
;
792 net_stats
->tx_errors
= stats
->tx
.tx_errors
;
793 net_stats
->tx_dropped
= stats
->tx
.tx_drops
;
795 net_stats
->rx_packets
= stats
->rx
.rx_frames_ok
;
796 net_stats
->rx_bytes
= stats
->rx
.rx_bytes_ok
;
797 net_stats
->rx_errors
= stats
->rx
.rx_errors
;
798 net_stats
->multicast
= stats
->rx
.rx_multicast_frames_ok
;
799 net_stats
->rx_over_errors
= enic
->rq_truncated_pkts
;
800 net_stats
->rx_crc_errors
= enic
->rq_bad_fcs
;
801 net_stats
->rx_dropped
= stats
->rx
.rx_no_bufs
+ stats
->rx
.rx_drop
;
806 static void enic_reset_mcaddrs(struct enic
*enic
)
811 static int enic_set_mac_addr(struct net_device
*netdev
, char *addr
)
813 if (!is_valid_ether_addr(addr
))
814 return -EADDRNOTAVAIL
;
816 memcpy(netdev
->dev_addr
, addr
, netdev
->addr_len
);
821 /* netif_tx_lock held, BHs disabled */
822 static void enic_set_multicast_list(struct net_device
*netdev
)
824 struct enic
*enic
= netdev_priv(netdev
);
825 struct dev_mc_list
*list
;
827 int multicast
= (netdev
->flags
& IFF_MULTICAST
) ? 1 : 0;
828 int broadcast
= (netdev
->flags
& IFF_BROADCAST
) ? 1 : 0;
829 int promisc
= (netdev
->flags
& IFF_PROMISC
) ? 1 : 0;
830 unsigned int mc_count
= netdev_mc_count(netdev
);
831 int allmulti
= (netdev
->flags
& IFF_ALLMULTI
) ||
832 mc_count
> ENIC_MULTICAST_PERFECT_FILTERS
;
833 unsigned int flags
= netdev
->flags
| (allmulti
? IFF_ALLMULTI
: 0);
834 u8 mc_addr
[ENIC_MULTICAST_PERFECT_FILTERS
][ETH_ALEN
];
837 if (mc_count
> ENIC_MULTICAST_PERFECT_FILTERS
)
838 mc_count
= ENIC_MULTICAST_PERFECT_FILTERS
;
840 spin_lock(&enic
->devcmd_lock
);
842 if (enic
->flags
!= flags
) {
844 vnic_dev_packet_filter(enic
->vdev
, directed
,
845 multicast
, broadcast
, promisc
, allmulti
);
848 /* Is there an easier way? Trying to minimize to
849 * calls to add/del multicast addrs. We keep the
850 * addrs from the last call in enic->mc_addr and
851 * look for changes to add/del.
855 netdev_for_each_mc_addr(list
, netdev
) {
858 memcpy(mc_addr
[i
++], list
->dmi_addr
, ETH_ALEN
);
861 for (i
= 0; i
< enic
->mc_count
; i
++) {
862 for (j
= 0; j
< mc_count
; j
++)
863 if (compare_ether_addr(enic
->mc_addr
[i
],
867 enic_del_multicast_addr(enic
, enic
->mc_addr
[i
]);
870 for (i
= 0; i
< mc_count
; i
++) {
871 for (j
= 0; j
< enic
->mc_count
; j
++)
872 if (compare_ether_addr(mc_addr
[i
],
873 enic
->mc_addr
[j
]) == 0)
875 if (j
== enic
->mc_count
)
876 enic_add_multicast_addr(enic
, mc_addr
[i
]);
879 /* Save the list to compare against next time
882 for (i
= 0; i
< mc_count
; i
++)
883 memcpy(enic
->mc_addr
[i
], mc_addr
[i
], ETH_ALEN
);
885 enic
->mc_count
= mc_count
;
887 spin_unlock(&enic
->devcmd_lock
);
890 /* rtnl lock is held */
891 static void enic_vlan_rx_register(struct net_device
*netdev
,
892 struct vlan_group
*vlan_group
)
894 struct enic
*enic
= netdev_priv(netdev
);
895 enic
->vlan_group
= vlan_group
;
898 /* rtnl lock is held */
899 static void enic_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
901 struct enic
*enic
= netdev_priv(netdev
);
903 spin_lock(&enic
->devcmd_lock
);
904 enic_add_vlan(enic
, vid
);
905 spin_unlock(&enic
->devcmd_lock
);
908 /* rtnl lock is held */
909 static void enic_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
911 struct enic
*enic
= netdev_priv(netdev
);
913 spin_lock(&enic
->devcmd_lock
);
914 enic_del_vlan(enic
, vid
);
915 spin_unlock(&enic
->devcmd_lock
);
918 /* netif_tx_lock held, BHs disabled */
919 static void enic_tx_timeout(struct net_device
*netdev
)
921 struct enic
*enic
= netdev_priv(netdev
);
922 schedule_work(&enic
->reset
);
925 static void enic_free_rq_buf(struct vnic_rq
*rq
, struct vnic_rq_buf
*buf
)
927 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
932 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
933 buf
->len
, PCI_DMA_FROMDEVICE
);
934 dev_kfree_skb_any(buf
->os_buf
);
937 static int enic_rq_alloc_buf(struct vnic_rq
*rq
)
939 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
940 struct net_device
*netdev
= enic
->netdev
;
942 unsigned int len
= netdev
->mtu
+ ETH_HLEN
;
943 unsigned int os_buf_index
= 0;
946 skb
= netdev_alloc_skb_ip_align(netdev
, len
);
950 dma_addr
= pci_map_single(enic
->pdev
, skb
->data
,
951 len
, PCI_DMA_FROMDEVICE
);
953 enic_queue_rq_desc(rq
, skb
, os_buf_index
,
959 static int enic_rq_alloc_buf_a1(struct vnic_rq
*rq
)
961 struct rq_enet_desc
*desc
= vnic_rq_next_desc(rq
);
963 if (vnic_rq_posting_soon(rq
)) {
965 /* SW workaround for A0 HW erratum: if we're just about
966 * to write posted_index, insert a dummy desc
970 rq_enet_desc_enc(desc
, 0, RQ_ENET_TYPE_RESV2
, 0);
971 vnic_rq_post(rq
, 0, 0, 0, 0);
973 return enic_rq_alloc_buf(rq
);
979 static int enic_set_rq_alloc_buf(struct enic
*enic
)
981 enum vnic_dev_hw_version hw_ver
;
984 err
= vnic_dev_hw_version(enic
->vdev
, &hw_ver
);
989 case VNIC_DEV_HW_VER_A1
:
990 enic
->rq_alloc_buf
= enic_rq_alloc_buf_a1
;
992 case VNIC_DEV_HW_VER_A2
:
993 case VNIC_DEV_HW_VER_UNKNOWN
:
994 enic
->rq_alloc_buf
= enic_rq_alloc_buf
;
1003 static int enic_get_skb_header(struct sk_buff
*skb
, void **iphdr
,
1004 void **tcph
, u64
*hdr_flags
, void *priv
)
1006 struct cq_enet_rq_desc
*cq_desc
= priv
;
1007 unsigned int ip_len
;
1010 u8 type
, color
, eop
, sop
, ingress_port
, vlan_stripped
;
1011 u8 fcoe
, fcoe_sof
, fcoe_fc_crc_ok
, fcoe_enc_error
, fcoe_eof
;
1012 u8 tcp_udp_csum_ok
, udp
, tcp
, ipv4_csum_ok
;
1013 u8 ipv6
, ipv4
, ipv4_fragment
, fcs_ok
, rss_type
, csum_not_calc
;
1015 u16 q_number
, completed_index
, bytes_written
, vlan
, checksum
;
1018 cq_enet_rq_desc_dec(cq_desc
,
1019 &type
, &color
, &q_number
, &completed_index
,
1020 &ingress_port
, &fcoe
, &eop
, &sop
, &rss_type
,
1021 &csum_not_calc
, &rss_hash
, &bytes_written
,
1022 &packet_error
, &vlan_stripped
, &vlan
, &checksum
,
1023 &fcoe_sof
, &fcoe_fc_crc_ok
, &fcoe_enc_error
,
1024 &fcoe_eof
, &tcp_udp_csum_ok
, &udp
, &tcp
,
1025 &ipv4_csum_ok
, &ipv6
, &ipv4
, &ipv4_fragment
,
1028 if (!(ipv4
&& tcp
&& !ipv4_fragment
))
1031 skb_reset_network_header(skb
);
1034 ip_len
= ip_hdrlen(skb
);
1035 skb_set_transport_header(skb
, ip_len
);
1037 /* check if ip header and tcp header are complete */
1038 if (ntohs(iph
->tot_len
) < ip_len
+ tcp_hdrlen(skb
))
1041 *hdr_flags
= LRO_IPV4
| LRO_TCP
;
1042 *tcph
= tcp_hdr(skb
);
1048 static void enic_rq_indicate_buf(struct vnic_rq
*rq
,
1049 struct cq_desc
*cq_desc
, struct vnic_rq_buf
*buf
,
1050 int skipped
, void *opaque
)
1052 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
1053 struct net_device
*netdev
= enic
->netdev
;
1054 struct sk_buff
*skb
;
1056 u8 type
, color
, eop
, sop
, ingress_port
, vlan_stripped
;
1057 u8 fcoe
, fcoe_sof
, fcoe_fc_crc_ok
, fcoe_enc_error
, fcoe_eof
;
1058 u8 tcp_udp_csum_ok
, udp
, tcp
, ipv4_csum_ok
;
1059 u8 ipv6
, ipv4
, ipv4_fragment
, fcs_ok
, rss_type
, csum_not_calc
;
1061 u16 q_number
, completed_index
, bytes_written
, vlan
, checksum
;
1068 prefetch(skb
->data
- NET_IP_ALIGN
);
1069 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
1070 buf
->len
, PCI_DMA_FROMDEVICE
);
1072 cq_enet_rq_desc_dec((struct cq_enet_rq_desc
*)cq_desc
,
1073 &type
, &color
, &q_number
, &completed_index
,
1074 &ingress_port
, &fcoe
, &eop
, &sop
, &rss_type
,
1075 &csum_not_calc
, &rss_hash
, &bytes_written
,
1076 &packet_error
, &vlan_stripped
, &vlan
, &checksum
,
1077 &fcoe_sof
, &fcoe_fc_crc_ok
, &fcoe_enc_error
,
1078 &fcoe_eof
, &tcp_udp_csum_ok
, &udp
, &tcp
,
1079 &ipv4_csum_ok
, &ipv6
, &ipv4
, &ipv4_fragment
,
1085 if (bytes_written
> 0)
1087 else if (bytes_written
== 0)
1088 enic
->rq_truncated_pkts
++;
1091 dev_kfree_skb_any(skb
);
1096 if (eop
&& bytes_written
> 0) {
1101 skb_put(skb
, bytes_written
);
1102 skb
->protocol
= eth_type_trans(skb
, netdev
);
1104 if (enic
->csum_rx_enabled
&& !csum_not_calc
) {
1105 skb
->csum
= htons(checksum
);
1106 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1111 if (enic
->vlan_group
&& vlan_stripped
) {
1113 if ((netdev
->features
& NETIF_F_LRO
) && ipv4
)
1114 lro_vlan_hwaccel_receive_skb(&enic
->lro_mgr
,
1115 skb
, enic
->vlan_group
,
1118 vlan_hwaccel_receive_skb(skb
,
1119 enic
->vlan_group
, vlan
);
1123 if ((netdev
->features
& NETIF_F_LRO
) && ipv4
)
1124 lro_receive_skb(&enic
->lro_mgr
, skb
, cq_desc
);
1126 netif_receive_skb(skb
);
1135 dev_kfree_skb_any(skb
);
1139 static int enic_rq_service(struct vnic_dev
*vdev
, struct cq_desc
*cq_desc
,
1140 u8 type
, u16 q_number
, u16 completed_index
, void *opaque
)
1142 struct enic
*enic
= vnic_dev_priv(vdev
);
1144 vnic_rq_service(&enic
->rq
[q_number
], cq_desc
,
1145 completed_index
, VNIC_RQ_RETURN_DESC
,
1146 enic_rq_indicate_buf
, opaque
);
1151 static int enic_poll(struct napi_struct
*napi
, int budget
)
1153 struct enic
*enic
= container_of(napi
, struct enic
, napi
);
1154 struct net_device
*netdev
= enic
->netdev
;
1155 unsigned int rq_work_to_do
= budget
;
1156 unsigned int wq_work_to_do
= -1; /* no limit */
1157 unsigned int work_done
, rq_work_done
, wq_work_done
;
1160 /* Service RQ (first) and WQ
1163 rq_work_done
= vnic_cq_service(&enic
->cq
[ENIC_CQ_RQ
],
1164 rq_work_to_do
, enic_rq_service
, NULL
);
1166 wq_work_done
= vnic_cq_service(&enic
->cq
[ENIC_CQ_WQ
],
1167 wq_work_to_do
, enic_wq_service
, NULL
);
1169 /* Accumulate intr event credits for this polling
1170 * cycle. An intr event is the completion of a
1171 * a WQ or RQ packet.
1174 work_done
= rq_work_done
+ wq_work_done
;
1177 vnic_intr_return_credits(&enic
->intr
[ENIC_INTX_WQ_RQ
],
1179 0 /* don't unmask intr */,
1180 0 /* don't reset intr timer */);
1182 err
= vnic_rq_fill(&enic
->rq
[0], enic
->rq_alloc_buf
);
1184 /* Buffer allocation failed. Stay in polling
1185 * mode so we can try to fill the ring again.
1189 rq_work_done
= rq_work_to_do
;
1191 if (rq_work_done
< rq_work_to_do
) {
1193 /* Some work done, but not enough to stay in polling,
1194 * flush all LROs and exit polling
1197 if (netdev
->features
& NETIF_F_LRO
)
1198 lro_flush_all(&enic
->lro_mgr
);
1200 napi_complete(napi
);
1201 vnic_intr_unmask(&enic
->intr
[ENIC_INTX_WQ_RQ
]);
1204 return rq_work_done
;
1207 static int enic_poll_msix(struct napi_struct
*napi
, int budget
)
1209 struct enic
*enic
= container_of(napi
, struct enic
, napi
);
1210 struct net_device
*netdev
= enic
->netdev
;
1211 unsigned int work_to_do
= budget
;
1212 unsigned int work_done
;
1218 work_done
= vnic_cq_service(&enic
->cq
[ENIC_CQ_RQ
],
1219 work_to_do
, enic_rq_service
, NULL
);
1221 /* Return intr event credits for this polling
1222 * cycle. An intr event is the completion of a
1227 vnic_intr_return_credits(&enic
->intr
[ENIC_MSIX_RQ
],
1229 0 /* don't unmask intr */,
1230 0 /* don't reset intr timer */);
1232 err
= vnic_rq_fill(&enic
->rq
[0], enic
->rq_alloc_buf
);
1234 /* Buffer allocation failed. Stay in polling mode
1235 * so we can try to fill the ring again.
1239 work_done
= work_to_do
;
1241 if (work_done
< work_to_do
) {
1243 /* Some work done, but not enough to stay in polling,
1244 * flush all LROs and exit polling
1247 if (netdev
->features
& NETIF_F_LRO
)
1248 lro_flush_all(&enic
->lro_mgr
);
1250 napi_complete(napi
);
1251 vnic_intr_unmask(&enic
->intr
[ENIC_MSIX_RQ
]);
1257 static void enic_notify_timer(unsigned long data
)
1259 struct enic
*enic
= (struct enic
*)data
;
1261 enic_notify_check(enic
);
1263 mod_timer(&enic
->notify_timer
,
1264 round_jiffies(jiffies
+ ENIC_NOTIFY_TIMER_PERIOD
));
1267 static void enic_free_intr(struct enic
*enic
)
1269 struct net_device
*netdev
= enic
->netdev
;
1272 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1273 case VNIC_DEV_INTR_MODE_INTX
:
1274 free_irq(enic
->pdev
->irq
, netdev
);
1276 case VNIC_DEV_INTR_MODE_MSI
:
1277 free_irq(enic
->pdev
->irq
, enic
);
1279 case VNIC_DEV_INTR_MODE_MSIX
:
1280 for (i
= 0; i
< ARRAY_SIZE(enic
->msix
); i
++)
1281 if (enic
->msix
[i
].requested
)
1282 free_irq(enic
->msix_entry
[i
].vector
,
1283 enic
->msix
[i
].devid
);
1290 static int enic_request_intr(struct enic
*enic
)
1292 struct net_device
*netdev
= enic
->netdev
;
1296 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1298 case VNIC_DEV_INTR_MODE_INTX
:
1300 err
= request_irq(enic
->pdev
->irq
, enic_isr_legacy
,
1301 IRQF_SHARED
, netdev
->name
, netdev
);
1304 case VNIC_DEV_INTR_MODE_MSI
:
1306 err
= request_irq(enic
->pdev
->irq
, enic_isr_msi
,
1307 0, netdev
->name
, enic
);
1310 case VNIC_DEV_INTR_MODE_MSIX
:
1312 sprintf(enic
->msix
[ENIC_MSIX_RQ
].devname
,
1313 "%.11s-rx-0", netdev
->name
);
1314 enic
->msix
[ENIC_MSIX_RQ
].isr
= enic_isr_msix_rq
;
1315 enic
->msix
[ENIC_MSIX_RQ
].devid
= enic
;
1317 sprintf(enic
->msix
[ENIC_MSIX_WQ
].devname
,
1318 "%.11s-tx-0", netdev
->name
);
1319 enic
->msix
[ENIC_MSIX_WQ
].isr
= enic_isr_msix_wq
;
1320 enic
->msix
[ENIC_MSIX_WQ
].devid
= enic
;
1322 sprintf(enic
->msix
[ENIC_MSIX_ERR
].devname
,
1323 "%.11s-err", netdev
->name
);
1324 enic
->msix
[ENIC_MSIX_ERR
].isr
= enic_isr_msix_err
;
1325 enic
->msix
[ENIC_MSIX_ERR
].devid
= enic
;
1327 sprintf(enic
->msix
[ENIC_MSIX_NOTIFY
].devname
,
1328 "%.11s-notify", netdev
->name
);
1329 enic
->msix
[ENIC_MSIX_NOTIFY
].isr
= enic_isr_msix_notify
;
1330 enic
->msix
[ENIC_MSIX_NOTIFY
].devid
= enic
;
1332 for (i
= 0; i
< ARRAY_SIZE(enic
->msix
); i
++) {
1333 err
= request_irq(enic
->msix_entry
[i
].vector
,
1334 enic
->msix
[i
].isr
, 0,
1335 enic
->msix
[i
].devname
,
1336 enic
->msix
[i
].devid
);
1338 enic_free_intr(enic
);
1341 enic
->msix
[i
].requested
= 1;
1353 static void enic_synchronize_irqs(struct enic
*enic
)
1357 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1358 case VNIC_DEV_INTR_MODE_INTX
:
1359 case VNIC_DEV_INTR_MODE_MSI
:
1360 synchronize_irq(enic
->pdev
->irq
);
1362 case VNIC_DEV_INTR_MODE_MSIX
:
1363 for (i
= 0; i
< enic
->intr_count
; i
++)
1364 synchronize_irq(enic
->msix_entry
[i
].vector
);
1371 static int enic_notify_set(struct enic
*enic
)
1375 spin_lock(&enic
->devcmd_lock
);
1376 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1377 case VNIC_DEV_INTR_MODE_INTX
:
1378 err
= vnic_dev_notify_set(enic
->vdev
, ENIC_INTX_NOTIFY
);
1380 case VNIC_DEV_INTR_MODE_MSIX
:
1381 err
= vnic_dev_notify_set(enic
->vdev
, ENIC_MSIX_NOTIFY
);
1384 err
= vnic_dev_notify_set(enic
->vdev
, -1 /* no intr */);
1387 spin_unlock(&enic
->devcmd_lock
);
1392 static void enic_notify_timer_start(struct enic
*enic
)
1394 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1395 case VNIC_DEV_INTR_MODE_MSI
:
1396 mod_timer(&enic
->notify_timer
, jiffies
);
1399 /* Using intr for notification for INTx/MSI-X */
1404 /* rtnl lock is held, process context */
1405 static int enic_open(struct net_device
*netdev
)
1407 struct enic
*enic
= netdev_priv(netdev
);
1411 err
= enic_request_intr(enic
);
1413 printk(KERN_ERR PFX
"%s: Unable to request irq.\n",
1418 err
= enic_notify_set(enic
);
1421 "%s: Failed to alloc notify buffer, aborting.\n",
1423 goto err_out_free_intr
;
1426 for (i
= 0; i
< enic
->rq_count
; i
++) {
1427 vnic_rq_fill(&enic
->rq
[i
], enic
->rq_alloc_buf
);
1428 /* Need at least one buffer on ring to get going */
1429 if (vnic_rq_desc_used(&enic
->rq
[i
]) == 0) {
1431 "%s: Unable to alloc receive buffers.\n",
1434 goto err_out_notify_unset
;
1438 for (i
= 0; i
< enic
->wq_count
; i
++)
1439 vnic_wq_enable(&enic
->wq
[i
]);
1440 for (i
= 0; i
< enic
->rq_count
; i
++)
1441 vnic_rq_enable(&enic
->rq
[i
]);
1443 spin_lock(&enic
->devcmd_lock
);
1444 enic_add_station_addr(enic
);
1445 spin_unlock(&enic
->devcmd_lock
);
1446 enic_set_multicast_list(netdev
);
1448 netif_wake_queue(netdev
);
1449 napi_enable(&enic
->napi
);
1450 spin_lock(&enic
->devcmd_lock
);
1451 vnic_dev_enable(enic
->vdev
);
1452 spin_unlock(&enic
->devcmd_lock
);
1454 for (i
= 0; i
< enic
->intr_count
; i
++)
1455 vnic_intr_unmask(&enic
->intr
[i
]);
1457 enic_notify_timer_start(enic
);
1461 err_out_notify_unset
:
1462 spin_lock(&enic
->devcmd_lock
);
1463 vnic_dev_notify_unset(enic
->vdev
);
1464 spin_unlock(&enic
->devcmd_lock
);
1466 enic_free_intr(enic
);
1471 /* rtnl lock is held, process context */
1472 static int enic_stop(struct net_device
*netdev
)
1474 struct enic
*enic
= netdev_priv(netdev
);
1478 for (i
= 0; i
< enic
->intr_count
; i
++)
1479 vnic_intr_mask(&enic
->intr
[i
]);
1481 enic_synchronize_irqs(enic
);
1483 del_timer_sync(&enic
->notify_timer
);
1485 spin_lock(&enic
->devcmd_lock
);
1486 vnic_dev_disable(enic
->vdev
);
1487 spin_unlock(&enic
->devcmd_lock
);
1488 napi_disable(&enic
->napi
);
1489 netif_carrier_off(netdev
);
1490 netif_tx_disable(netdev
);
1492 for (i
= 0; i
< enic
->wq_count
; i
++) {
1493 err
= vnic_wq_disable(&enic
->wq
[i
]);
1497 for (i
= 0; i
< enic
->rq_count
; i
++) {
1498 err
= vnic_rq_disable(&enic
->rq
[i
]);
1503 spin_lock(&enic
->devcmd_lock
);
1504 vnic_dev_notify_unset(enic
->vdev
);
1505 spin_unlock(&enic
->devcmd_lock
);
1506 enic_free_intr(enic
);
1508 for (i
= 0; i
< enic
->wq_count
; i
++)
1509 vnic_wq_clean(&enic
->wq
[i
], enic_free_wq_buf
);
1510 for (i
= 0; i
< enic
->rq_count
; i
++)
1511 vnic_rq_clean(&enic
->rq
[i
], enic_free_rq_buf
);
1512 for (i
= 0; i
< enic
->cq_count
; i
++)
1513 vnic_cq_clean(&enic
->cq
[i
]);
1514 for (i
= 0; i
< enic
->intr_count
; i
++)
1515 vnic_intr_clean(&enic
->intr
[i
]);
1520 static int enic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1522 struct enic
*enic
= netdev_priv(netdev
);
1523 int running
= netif_running(netdev
);
1525 if (new_mtu
< ENIC_MIN_MTU
|| new_mtu
> ENIC_MAX_MTU
)
1531 netdev
->mtu
= new_mtu
;
1533 if (netdev
->mtu
> enic
->port_mtu
)
1534 printk(KERN_WARNING PFX
1535 "%s: interface MTU (%d) set higher "
1536 "than port MTU (%d)\n",
1537 netdev
->name
, netdev
->mtu
, enic
->port_mtu
);
1545 #ifdef CONFIG_NET_POLL_CONTROLLER
1546 static void enic_poll_controller(struct net_device
*netdev
)
1548 struct enic
*enic
= netdev_priv(netdev
);
1549 struct vnic_dev
*vdev
= enic
->vdev
;
1551 switch (vnic_dev_get_intr_mode(vdev
)) {
1552 case VNIC_DEV_INTR_MODE_MSIX
:
1553 enic_isr_msix_rq(enic
->pdev
->irq
, enic
);
1554 enic_isr_msix_wq(enic
->pdev
->irq
, enic
);
1556 case VNIC_DEV_INTR_MODE_MSI
:
1557 enic_isr_msi(enic
->pdev
->irq
, enic
);
1559 case VNIC_DEV_INTR_MODE_INTX
:
1560 enic_isr_legacy(enic
->pdev
->irq
, netdev
);
1568 static int enic_dev_wait(struct vnic_dev
*vdev
,
1569 int (*start
)(struct vnic_dev
*, int),
1570 int (*finished
)(struct vnic_dev
*, int *),
1577 BUG_ON(in_interrupt());
1579 err
= start(vdev
, arg
);
1583 /* Wait for func to complete...2 seconds max
1586 time
= jiffies
+ (HZ
* 2);
1589 err
= finished(vdev
, &done
);
1596 schedule_timeout_uninterruptible(HZ
/ 10);
1598 } while (time_after(time
, jiffies
));
1603 static int enic_dev_open(struct enic
*enic
)
1607 err
= enic_dev_wait(enic
->vdev
, vnic_dev_open
,
1608 vnic_dev_open_done
, 0);
1611 "vNIC device open failed, err %d.\n", err
);
1616 static int enic_dev_soft_reset(struct enic
*enic
)
1620 err
= enic_dev_wait(enic
->vdev
, vnic_dev_soft_reset
,
1621 vnic_dev_soft_reset_done
, 0);
1624 "vNIC soft reset failed, err %d.\n", err
);
1629 static int enic_set_niccfg(struct enic
*enic
)
1631 const u8 rss_default_cpu
= 0;
1632 const u8 rss_hash_type
= 0;
1633 const u8 rss_hash_bits
= 0;
1634 const u8 rss_base_cpu
= 0;
1635 const u8 rss_enable
= 0;
1636 const u8 tso_ipid_split_en
= 0;
1637 const u8 ig_vlan_strip_en
= 1;
1639 /* Enable VLAN tag stripping. RSS not enabled (yet).
1642 return enic_set_nic_cfg(enic
,
1643 rss_default_cpu
, rss_hash_type
,
1644 rss_hash_bits
, rss_base_cpu
,
1645 rss_enable
, tso_ipid_split_en
,
1649 static void enic_reset(struct work_struct
*work
)
1651 struct enic
*enic
= container_of(work
, struct enic
, reset
);
1653 if (!netif_running(enic
->netdev
))
1658 spin_lock(&enic
->devcmd_lock
);
1659 vnic_dev_hang_notify(enic
->vdev
);
1660 spin_unlock(&enic
->devcmd_lock
);
1662 enic_stop(enic
->netdev
);
1663 enic_dev_soft_reset(enic
);
1664 vnic_dev_init(enic
->vdev
, 0);
1665 enic_reset_mcaddrs(enic
);
1666 enic_init_vnic_resources(enic
);
1667 enic_set_niccfg(enic
);
1668 enic_open(enic
->netdev
);
1673 static int enic_set_intr_mode(struct enic
*enic
)
1679 /* Set interrupt mode (INTx, MSI, MSI-X) depending
1680 * system capabilities.
1684 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
1685 * (the second to last INTR is used for WQ/RQ errors)
1686 * (the last INTR is used for notifications)
1689 BUG_ON(ARRAY_SIZE(enic
->msix_entry
) < n
+ m
+ 2);
1690 for (i
= 0; i
< n
+ m
+ 2; i
++)
1691 enic
->msix_entry
[i
].entry
= i
;
1693 if (enic
->config
.intr_mode
< 1 &&
1694 enic
->rq_count
>= n
&&
1695 enic
->wq_count
>= m
&&
1696 enic
->cq_count
>= n
+ m
&&
1697 enic
->intr_count
>= n
+ m
+ 2 &&
1698 !pci_enable_msix(enic
->pdev
, enic
->msix_entry
, n
+ m
+ 2)) {
1702 enic
->cq_count
= n
+ m
;
1703 enic
->intr_count
= n
+ m
+ 2;
1705 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_MSIX
);
1712 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
1715 if (enic
->config
.intr_mode
< 2 &&
1716 enic
->rq_count
>= 1 &&
1717 enic
->wq_count
>= 1 &&
1718 enic
->cq_count
>= 2 &&
1719 enic
->intr_count
>= 1 &&
1720 !pci_enable_msi(enic
->pdev
)) {
1725 enic
->intr_count
= 1;
1727 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_MSI
);
1734 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
1735 * (the first INTR is used for WQ/RQ)
1736 * (the second INTR is used for WQ/RQ errors)
1737 * (the last INTR is used for notifications)
1740 if (enic
->config
.intr_mode
< 3 &&
1741 enic
->rq_count
>= 1 &&
1742 enic
->wq_count
>= 1 &&
1743 enic
->cq_count
>= 2 &&
1744 enic
->intr_count
>= 3) {
1749 enic
->intr_count
= 3;
1751 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_INTX
);
1756 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_UNKNOWN
);
1761 static void enic_clear_intr_mode(struct enic
*enic
)
1763 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1764 case VNIC_DEV_INTR_MODE_MSIX
:
1765 pci_disable_msix(enic
->pdev
);
1767 case VNIC_DEV_INTR_MODE_MSI
:
1768 pci_disable_msi(enic
->pdev
);
1774 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_UNKNOWN
);
1777 static const struct net_device_ops enic_netdev_ops
= {
1778 .ndo_open
= enic_open
,
1779 .ndo_stop
= enic_stop
,
1780 .ndo_start_xmit
= enic_hard_start_xmit
,
1781 .ndo_get_stats
= enic_get_stats
,
1782 .ndo_validate_addr
= eth_validate_addr
,
1783 .ndo_set_mac_address
= eth_mac_addr
,
1784 .ndo_set_multicast_list
= enic_set_multicast_list
,
1785 .ndo_change_mtu
= enic_change_mtu
,
1786 .ndo_vlan_rx_register
= enic_vlan_rx_register
,
1787 .ndo_vlan_rx_add_vid
= enic_vlan_rx_add_vid
,
1788 .ndo_vlan_rx_kill_vid
= enic_vlan_rx_kill_vid
,
1789 .ndo_tx_timeout
= enic_tx_timeout
,
1790 #ifdef CONFIG_NET_POLL_CONTROLLER
1791 .ndo_poll_controller
= enic_poll_controller
,
1795 void enic_dev_deinit(struct enic
*enic
)
1797 netif_napi_del(&enic
->napi
);
1798 enic_free_vnic_resources(enic
);
1799 enic_clear_intr_mode(enic
);
1802 int enic_dev_init(struct enic
*enic
)
1804 struct net_device
*netdev
= enic
->netdev
;
1807 /* Get vNIC configuration
1810 err
= enic_get_vnic_config(enic
);
1813 "Get vNIC configuration failed, aborting.\n");
1817 /* Get available resource counts
1820 enic_get_res_counts(enic
);
1822 /* Set interrupt mode based on resource counts and system
1826 err
= enic_set_intr_mode(enic
);
1829 "Failed to set intr mode based on resource "
1830 "counts and system capabilities, aborting.\n");
1834 /* Allocate and configure vNIC resources
1837 err
= enic_alloc_vnic_resources(enic
);
1840 "Failed to alloc vNIC resources, aborting.\n");
1841 goto err_out_free_vnic_resources
;
1844 enic_init_vnic_resources(enic
);
1846 err
= enic_set_rq_alloc_buf(enic
);
1849 "Failed to set RQ buffer allocator, aborting.\n");
1850 goto err_out_free_vnic_resources
;
1853 err
= enic_set_niccfg(enic
);
1856 "Failed to config nic, aborting.\n");
1857 goto err_out_free_vnic_resources
;
1860 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1862 netif_napi_add(netdev
, &enic
->napi
, enic_poll
, 64);
1864 case VNIC_DEV_INTR_MODE_MSIX
:
1865 netif_napi_add(netdev
, &enic
->napi
, enic_poll_msix
, 64);
1871 err_out_free_vnic_resources
:
1872 enic_clear_intr_mode(enic
);
1873 enic_free_vnic_resources(enic
);
1878 static void enic_iounmap(struct enic
*enic
)
1882 for (i
= 0; i
< ARRAY_SIZE(enic
->bar
); i
++)
1883 if (enic
->bar
[i
].vaddr
)
1884 iounmap(enic
->bar
[i
].vaddr
);
1887 static int __devinit
enic_probe(struct pci_dev
*pdev
,
1888 const struct pci_device_id
*ent
)
1890 struct net_device
*netdev
;
1896 /* Allocate net device structure and initialize. Private
1897 * instance data is initialized to zero.
1900 netdev
= alloc_etherdev(sizeof(struct enic
));
1902 printk(KERN_ERR PFX
"Etherdev alloc failed, aborting.\n");
1906 pci_set_drvdata(pdev
, netdev
);
1908 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1910 enic
= netdev_priv(netdev
);
1911 enic
->netdev
= netdev
;
1914 /* Setup PCI resources
1917 err
= pci_enable_device(pdev
);
1920 "Cannot enable PCI device, aborting.\n");
1921 goto err_out_free_netdev
;
1924 err
= pci_request_regions(pdev
, DRV_NAME
);
1927 "Cannot request PCI regions, aborting.\n");
1928 goto err_out_disable_device
;
1931 pci_set_master(pdev
);
1933 /* Query PCI controller on system for DMA addressing
1934 * limitation for the device. Try 40-bit first, and
1938 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(40));
1940 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1943 "No usable DMA configuration, aborting.\n");
1944 goto err_out_release_regions
;
1946 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
1949 "Unable to obtain 32-bit DMA "
1950 "for consistent allocations, aborting.\n");
1951 goto err_out_release_regions
;
1954 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(40));
1957 "Unable to obtain 40-bit DMA "
1958 "for consistent allocations, aborting.\n");
1959 goto err_out_release_regions
;
1964 /* Map vNIC resources from BAR0-5
1967 for (i
= 0; i
< ARRAY_SIZE(enic
->bar
); i
++) {
1968 if (!(pci_resource_flags(pdev
, i
) & IORESOURCE_MEM
))
1970 enic
->bar
[i
].len
= pci_resource_len(pdev
, i
);
1971 enic
->bar
[i
].vaddr
= pci_iomap(pdev
, i
, enic
->bar
[i
].len
);
1972 if (!enic
->bar
[i
].vaddr
) {
1974 "Cannot memory-map BAR %d, aborting.\n", i
);
1976 goto err_out_iounmap
;
1978 enic
->bar
[i
].bus_addr
= pci_resource_start(pdev
, i
);
1981 /* Register vNIC device
1984 enic
->vdev
= vnic_dev_register(NULL
, enic
, pdev
, enic
->bar
,
1985 ARRAY_SIZE(enic
->bar
));
1988 "vNIC registration failed, aborting.\n");
1990 goto err_out_iounmap
;
1993 /* Issue device open to get device in known state
1996 err
= enic_dev_open(enic
);
1999 "vNIC dev open failed, aborting.\n");
2000 goto err_out_vnic_unregister
;
2003 /* Issue device init to initialize the vnic-to-switch link.
2004 * We'll start with carrier off and wait for link UP
2005 * notification later to turn on carrier. We don't need
2006 * to wait here for the vnic-to-switch link initialization
2007 * to complete; link UP notification is the indication that
2008 * the process is complete.
2011 netif_carrier_off(netdev
);
2013 err
= vnic_dev_init(enic
->vdev
, 0);
2016 "vNIC dev init failed, aborting.\n");
2017 goto err_out_dev_close
;
2020 err
= enic_dev_init(enic
);
2023 "Device initialization failed, aborting.\n");
2024 goto err_out_dev_close
;
2027 /* Setup notification timer, HW reset task, and locks
2030 init_timer(&enic
->notify_timer
);
2031 enic
->notify_timer
.function
= enic_notify_timer
;
2032 enic
->notify_timer
.data
= (unsigned long)enic
;
2034 INIT_WORK(&enic
->reset
, enic_reset
);
2036 for (i
= 0; i
< enic
->wq_count
; i
++)
2037 spin_lock_init(&enic
->wq_lock
[i
]);
2039 spin_lock_init(&enic
->devcmd_lock
);
2041 /* Register net device
2044 enic
->port_mtu
= enic
->config
.mtu
;
2045 (void)enic_change_mtu(netdev
, enic
->port_mtu
);
2047 err
= enic_set_mac_addr(netdev
, enic
->mac_addr
);
2050 "Invalid MAC address, aborting.\n");
2051 goto err_out_dev_deinit
;
2054 enic
->tx_coalesce_usecs
= enic
->config
.intr_timer_usec
;
2055 enic
->rx_coalesce_usecs
= enic
->tx_coalesce_usecs
;
2057 netdev
->netdev_ops
= &enic_netdev_ops
;
2058 netdev
->watchdog_timeo
= 2 * HZ
;
2059 netdev
->ethtool_ops
= &enic_ethtool_ops
;
2061 netdev
->features
|= NETIF_F_HW_VLAN_TX
|
2062 NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
;
2063 if (ENIC_SETTING(enic
, TXCSUM
))
2064 netdev
->features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
;
2065 if (ENIC_SETTING(enic
, TSO
))
2066 netdev
->features
|= NETIF_F_TSO
|
2067 NETIF_F_TSO6
| NETIF_F_TSO_ECN
;
2068 if (ENIC_SETTING(enic
, LRO
))
2069 netdev
->features
|= NETIF_F_LRO
;
2071 netdev
->features
|= NETIF_F_HIGHDMA
;
2073 enic
->csum_rx_enabled
= ENIC_SETTING(enic
, RXCSUM
);
2075 enic
->lro_mgr
.max_aggr
= ENIC_LRO_MAX_AGGR
;
2076 enic
->lro_mgr
.max_desc
= ENIC_LRO_MAX_DESC
;
2077 enic
->lro_mgr
.lro_arr
= enic
->lro_desc
;
2078 enic
->lro_mgr
.get_skb_header
= enic_get_skb_header
;
2079 enic
->lro_mgr
.features
= LRO_F_NAPI
| LRO_F_EXTRACT_VLAN_ID
;
2080 enic
->lro_mgr
.dev
= netdev
;
2081 enic
->lro_mgr
.ip_summed
= CHECKSUM_COMPLETE
;
2082 enic
->lro_mgr
.ip_summed_aggr
= CHECKSUM_UNNECESSARY
;
2084 err
= register_netdev(netdev
);
2087 "Cannot register net device, aborting.\n");
2088 goto err_out_dev_deinit
;
2094 enic_dev_deinit(enic
);
2096 vnic_dev_close(enic
->vdev
);
2097 err_out_vnic_unregister
:
2098 vnic_dev_unregister(enic
->vdev
);
2101 err_out_release_regions
:
2102 pci_release_regions(pdev
);
2103 err_out_disable_device
:
2104 pci_disable_device(pdev
);
2105 err_out_free_netdev
:
2106 pci_set_drvdata(pdev
, NULL
);
2107 free_netdev(netdev
);
2112 static void __devexit
enic_remove(struct pci_dev
*pdev
)
2114 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2117 struct enic
*enic
= netdev_priv(netdev
);
2119 flush_scheduled_work();
2120 unregister_netdev(netdev
);
2121 enic_dev_deinit(enic
);
2122 vnic_dev_close(enic
->vdev
);
2123 vnic_dev_unregister(enic
->vdev
);
2125 pci_release_regions(pdev
);
2126 pci_disable_device(pdev
);
2127 pci_set_drvdata(pdev
, NULL
);
2128 free_netdev(netdev
);
2132 static struct pci_driver enic_driver
= {
2134 .id_table
= enic_id_table
,
2135 .probe
= enic_probe
,
2136 .remove
= __devexit_p(enic_remove
),
2139 static int __init
enic_init_module(void)
2141 printk(KERN_INFO PFX
"%s, ver %s\n", DRV_DESCRIPTION
, DRV_VERSION
);
2143 return pci_register_driver(&enic_driver
);
2146 static void __exit
enic_cleanup_module(void)
2148 pci_unregister_driver(&enic_driver
);
2151 module_init(enic_init_module
);
2152 module_exit(enic_cleanup_module
);