2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/init.h>
26 #include <linux/workqueue.h>
27 #include <linux/pci.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/if_vlan.h>
32 #include <linux/ethtool.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <net/ip6_checksum.h>
39 #include "cq_enet_desc.h"
41 #include "vnic_intr.h"
42 #include "vnic_stats.h"
46 #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
47 #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
48 #define MAX_TSO (1 << 16)
49 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
51 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
53 /* Supported devices */
54 static struct pci_device_id enic_id_table
[] = {
55 { PCI_VDEVICE(CISCO
, PCI_DEVICE_ID_CISCO_VIC_ENET
) },
56 { 0, } /* end of table */
59 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
60 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
61 MODULE_LICENSE("GPL");
62 MODULE_VERSION(DRV_VERSION
);
63 MODULE_DEVICE_TABLE(pci
, enic_id_table
);
66 char name
[ETH_GSTRING_LEN
];
70 #define ENIC_TX_STAT(stat) \
71 { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
72 #define ENIC_RX_STAT(stat) \
73 { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
75 static const struct enic_stat enic_tx_stats
[] = {
76 ENIC_TX_STAT(tx_frames_ok
),
77 ENIC_TX_STAT(tx_unicast_frames_ok
),
78 ENIC_TX_STAT(tx_multicast_frames_ok
),
79 ENIC_TX_STAT(tx_broadcast_frames_ok
),
80 ENIC_TX_STAT(tx_bytes_ok
),
81 ENIC_TX_STAT(tx_unicast_bytes_ok
),
82 ENIC_TX_STAT(tx_multicast_bytes_ok
),
83 ENIC_TX_STAT(tx_broadcast_bytes_ok
),
84 ENIC_TX_STAT(tx_drops
),
85 ENIC_TX_STAT(tx_errors
),
89 static const struct enic_stat enic_rx_stats
[] = {
90 ENIC_RX_STAT(rx_frames_ok
),
91 ENIC_RX_STAT(rx_frames_total
),
92 ENIC_RX_STAT(rx_unicast_frames_ok
),
93 ENIC_RX_STAT(rx_multicast_frames_ok
),
94 ENIC_RX_STAT(rx_broadcast_frames_ok
),
95 ENIC_RX_STAT(rx_bytes_ok
),
96 ENIC_RX_STAT(rx_unicast_bytes_ok
),
97 ENIC_RX_STAT(rx_multicast_bytes_ok
),
98 ENIC_RX_STAT(rx_broadcast_bytes_ok
),
99 ENIC_RX_STAT(rx_drop
),
100 ENIC_RX_STAT(rx_no_bufs
),
101 ENIC_RX_STAT(rx_errors
),
102 ENIC_RX_STAT(rx_rss
),
103 ENIC_RX_STAT(rx_crc_errors
),
104 ENIC_RX_STAT(rx_frames_64
),
105 ENIC_RX_STAT(rx_frames_127
),
106 ENIC_RX_STAT(rx_frames_255
),
107 ENIC_RX_STAT(rx_frames_511
),
108 ENIC_RX_STAT(rx_frames_1023
),
109 ENIC_RX_STAT(rx_frames_1518
),
110 ENIC_RX_STAT(rx_frames_to_max
),
113 static const unsigned int enic_n_tx_stats
= ARRAY_SIZE(enic_tx_stats
);
114 static const unsigned int enic_n_rx_stats
= ARRAY_SIZE(enic_rx_stats
);
116 static int enic_get_settings(struct net_device
*netdev
,
117 struct ethtool_cmd
*ecmd
)
119 struct enic
*enic
= netdev_priv(netdev
);
121 ecmd
->supported
= (SUPPORTED_10000baseT_Full
| SUPPORTED_FIBRE
);
122 ecmd
->advertising
= (ADVERTISED_10000baseT_Full
| ADVERTISED_FIBRE
);
123 ecmd
->port
= PORT_FIBRE
;
124 ecmd
->transceiver
= XCVR_EXTERNAL
;
126 if (netif_carrier_ok(netdev
)) {
127 ecmd
->speed
= vnic_dev_port_speed(enic
->vdev
);
128 ecmd
->duplex
= DUPLEX_FULL
;
134 ecmd
->autoneg
= AUTONEG_DISABLE
;
139 static void enic_get_drvinfo(struct net_device
*netdev
,
140 struct ethtool_drvinfo
*drvinfo
)
142 struct enic
*enic
= netdev_priv(netdev
);
143 struct vnic_devcmd_fw_info
*fw_info
;
145 spin_lock(&enic
->devcmd_lock
);
146 vnic_dev_fw_info(enic
->vdev
, &fw_info
);
147 spin_unlock(&enic
->devcmd_lock
);
149 strncpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
150 strncpy(drvinfo
->version
, DRV_VERSION
, sizeof(drvinfo
->version
));
151 strncpy(drvinfo
->fw_version
, fw_info
->fw_version
,
152 sizeof(drvinfo
->fw_version
));
153 strncpy(drvinfo
->bus_info
, pci_name(enic
->pdev
),
154 sizeof(drvinfo
->bus_info
));
157 static void enic_get_strings(struct net_device
*netdev
, u32 stringset
, u8
*data
)
163 for (i
= 0; i
< enic_n_tx_stats
; i
++) {
164 memcpy(data
, enic_tx_stats
[i
].name
, ETH_GSTRING_LEN
);
165 data
+= ETH_GSTRING_LEN
;
167 for (i
= 0; i
< enic_n_rx_stats
; i
++) {
168 memcpy(data
, enic_rx_stats
[i
].name
, ETH_GSTRING_LEN
);
169 data
+= ETH_GSTRING_LEN
;
175 static int enic_get_sset_count(struct net_device
*netdev
, int sset
)
179 return enic_n_tx_stats
+ enic_n_rx_stats
;
185 static void enic_get_ethtool_stats(struct net_device
*netdev
,
186 struct ethtool_stats
*stats
, u64
*data
)
188 struct enic
*enic
= netdev_priv(netdev
);
189 struct vnic_stats
*vstats
;
192 spin_lock(&enic
->devcmd_lock
);
193 vnic_dev_stats_dump(enic
->vdev
, &vstats
);
194 spin_unlock(&enic
->devcmd_lock
);
196 for (i
= 0; i
< enic_n_tx_stats
; i
++)
197 *(data
++) = ((u64
*)&vstats
->tx
)[enic_tx_stats
[i
].offset
];
198 for (i
= 0; i
< enic_n_rx_stats
; i
++)
199 *(data
++) = ((u64
*)&vstats
->rx
)[enic_rx_stats
[i
].offset
];
202 static u32
enic_get_rx_csum(struct net_device
*netdev
)
204 struct enic
*enic
= netdev_priv(netdev
);
205 return enic
->csum_rx_enabled
;
208 static int enic_set_rx_csum(struct net_device
*netdev
, u32 data
)
210 struct enic
*enic
= netdev_priv(netdev
);
212 if (data
&& !ENIC_SETTING(enic
, RXCSUM
))
215 enic
->csum_rx_enabled
= !!data
;
220 static int enic_set_tx_csum(struct net_device
*netdev
, u32 data
)
222 struct enic
*enic
= netdev_priv(netdev
);
224 if (data
&& !ENIC_SETTING(enic
, TXCSUM
))
228 netdev
->features
|= NETIF_F_HW_CSUM
;
230 netdev
->features
&= ~NETIF_F_HW_CSUM
;
235 static int enic_set_tso(struct net_device
*netdev
, u32 data
)
237 struct enic
*enic
= netdev_priv(netdev
);
239 if (data
&& !ENIC_SETTING(enic
, TSO
))
244 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_TSO_ECN
;
247 ~(NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_TSO_ECN
);
252 static u32
enic_get_msglevel(struct net_device
*netdev
)
254 struct enic
*enic
= netdev_priv(netdev
);
255 return enic
->msg_enable
;
258 static void enic_set_msglevel(struct net_device
*netdev
, u32 value
)
260 struct enic
*enic
= netdev_priv(netdev
);
261 enic
->msg_enable
= value
;
264 static const struct ethtool_ops enic_ethtool_ops
= {
265 .get_settings
= enic_get_settings
,
266 .get_drvinfo
= enic_get_drvinfo
,
267 .get_msglevel
= enic_get_msglevel
,
268 .set_msglevel
= enic_set_msglevel
,
269 .get_link
= ethtool_op_get_link
,
270 .get_strings
= enic_get_strings
,
271 .get_sset_count
= enic_get_sset_count
,
272 .get_ethtool_stats
= enic_get_ethtool_stats
,
273 .get_rx_csum
= enic_get_rx_csum
,
274 .set_rx_csum
= enic_set_rx_csum
,
275 .get_tx_csum
= ethtool_op_get_tx_csum
,
276 .set_tx_csum
= enic_set_tx_csum
,
277 .get_sg
= ethtool_op_get_sg
,
278 .set_sg
= ethtool_op_set_sg
,
279 .get_tso
= ethtool_op_get_tso
,
280 .set_tso
= enic_set_tso
,
281 .get_flags
= ethtool_op_get_flags
,
282 .set_flags
= ethtool_op_set_flags
,
285 static void enic_free_wq_buf(struct vnic_wq
*wq
, struct vnic_wq_buf
*buf
)
287 struct enic
*enic
= vnic_dev_priv(wq
->vdev
);
290 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
291 buf
->len
, PCI_DMA_TODEVICE
);
293 pci_unmap_page(enic
->pdev
, buf
->dma_addr
,
294 buf
->len
, PCI_DMA_TODEVICE
);
297 dev_kfree_skb_any(buf
->os_buf
);
300 static void enic_wq_free_buf(struct vnic_wq
*wq
,
301 struct cq_desc
*cq_desc
, struct vnic_wq_buf
*buf
, void *opaque
)
303 enic_free_wq_buf(wq
, buf
);
306 static int enic_wq_service(struct vnic_dev
*vdev
, struct cq_desc
*cq_desc
,
307 u8 type
, u16 q_number
, u16 completed_index
, void *opaque
)
309 struct enic
*enic
= vnic_dev_priv(vdev
);
311 spin_lock(&enic
->wq_lock
[q_number
]);
313 vnic_wq_service(&enic
->wq
[q_number
], cq_desc
,
314 completed_index
, enic_wq_free_buf
,
317 if (netif_queue_stopped(enic
->netdev
) &&
318 vnic_wq_desc_avail(&enic
->wq
[q_number
]) >=
319 (MAX_SKB_FRAGS
+ ENIC_DESC_MAX_SPLITS
))
320 netif_wake_queue(enic
->netdev
);
322 spin_unlock(&enic
->wq_lock
[q_number
]);
327 static void enic_log_q_error(struct enic
*enic
)
332 for (i
= 0; i
< enic
->wq_count
; i
++) {
333 error_status
= vnic_wq_error_status(&enic
->wq
[i
]);
335 printk(KERN_ERR PFX
"%s: WQ[%d] error_status %d\n",
336 enic
->netdev
->name
, i
, error_status
);
339 for (i
= 0; i
< enic
->rq_count
; i
++) {
340 error_status
= vnic_rq_error_status(&enic
->rq
[i
]);
342 printk(KERN_ERR PFX
"%s: RQ[%d] error_status %d\n",
343 enic
->netdev
->name
, i
, error_status
);
347 static void enic_link_check(struct enic
*enic
)
349 int link_status
= vnic_dev_link_status(enic
->vdev
);
350 int carrier_ok
= netif_carrier_ok(enic
->netdev
);
352 if (link_status
&& !carrier_ok
) {
353 printk(KERN_INFO PFX
"%s: Link UP\n", enic
->netdev
->name
);
354 netif_carrier_on(enic
->netdev
);
355 } else if (!link_status
&& carrier_ok
) {
356 printk(KERN_INFO PFX
"%s: Link DOWN\n", enic
->netdev
->name
);
357 netif_carrier_off(enic
->netdev
);
361 static void enic_mtu_check(struct enic
*enic
)
363 u32 mtu
= vnic_dev_mtu(enic
->vdev
);
365 if (mtu
&& mtu
!= enic
->port_mtu
) {
366 if (mtu
< enic
->netdev
->mtu
)
367 printk(KERN_WARNING PFX
368 "%s: interface MTU (%d) set higher "
369 "than switch port MTU (%d)\n",
370 enic
->netdev
->name
, enic
->netdev
->mtu
, mtu
);
371 enic
->port_mtu
= mtu
;
375 static void enic_msglvl_check(struct enic
*enic
)
377 u32 msg_enable
= vnic_dev_msg_lvl(enic
->vdev
);
379 if (msg_enable
!= enic
->msg_enable
) {
380 printk(KERN_INFO PFX
"%s: msg lvl changed from 0x%x to 0x%x\n",
381 enic
->netdev
->name
, enic
->msg_enable
, msg_enable
);
382 enic
->msg_enable
= msg_enable
;
386 static void enic_notify_check(struct enic
*enic
)
388 enic_msglvl_check(enic
);
389 enic_mtu_check(enic
);
390 enic_link_check(enic
);
393 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
395 static irqreturn_t
enic_isr_legacy(int irq
, void *data
)
397 struct net_device
*netdev
= data
;
398 struct enic
*enic
= netdev_priv(netdev
);
401 vnic_intr_mask(&enic
->intr
[ENIC_INTX_WQ_RQ
]);
403 pba
= vnic_intr_legacy_pba(enic
->legacy_pba
);
405 vnic_intr_unmask(&enic
->intr
[ENIC_INTX_WQ_RQ
]);
406 return IRQ_NONE
; /* not our interrupt */
409 if (ENIC_TEST_INTR(pba
, ENIC_INTX_NOTIFY
)) {
410 vnic_intr_return_all_credits(&enic
->intr
[ENIC_INTX_NOTIFY
]);
411 enic_notify_check(enic
);
414 if (ENIC_TEST_INTR(pba
, ENIC_INTX_ERR
)) {
415 vnic_intr_return_all_credits(&enic
->intr
[ENIC_INTX_ERR
]);
416 enic_log_q_error(enic
);
417 /* schedule recovery from WQ/RQ error */
418 schedule_work(&enic
->reset
);
422 if (ENIC_TEST_INTR(pba
, ENIC_INTX_WQ_RQ
)) {
423 if (napi_schedule_prep(&enic
->napi
))
424 __napi_schedule(&enic
->napi
);
426 vnic_intr_unmask(&enic
->intr
[ENIC_INTX_WQ_RQ
]);
432 static irqreturn_t
enic_isr_msi(int irq
, void *data
)
434 struct enic
*enic
= data
;
436 /* With MSI, there is no sharing of interrupts, so this is
437 * our interrupt and there is no need to ack it. The device
438 * is not providing per-vector masking, so the OS will not
439 * write to PCI config space to mask/unmask the interrupt.
440 * We're using mask_on_assertion for MSI, so the device
441 * automatically masks the interrupt when the interrupt is
442 * generated. Later, when exiting polling, the interrupt
443 * will be unmasked (see enic_poll).
445 * Also, the device uses the same PCIe Traffic Class (TC)
446 * for Memory Write data and MSI, so there are no ordering
447 * issues; the MSI will always arrive at the Root Complex
448 * _after_ corresponding Memory Writes (i.e. descriptor
452 napi_schedule(&enic
->napi
);
457 static irqreturn_t
enic_isr_msix_rq(int irq
, void *data
)
459 struct enic
*enic
= data
;
461 /* schedule NAPI polling for RQ cleanup */
462 napi_schedule(&enic
->napi
);
467 static irqreturn_t
enic_isr_msix_wq(int irq
, void *data
)
469 struct enic
*enic
= data
;
470 unsigned int wq_work_to_do
= -1; /* no limit */
471 unsigned int wq_work_done
;
473 wq_work_done
= vnic_cq_service(&enic
->cq
[ENIC_CQ_WQ
],
474 wq_work_to_do
, enic_wq_service
, NULL
);
476 vnic_intr_return_credits(&enic
->intr
[ENIC_MSIX_WQ
],
479 1 /* reset intr timer */);
484 static irqreturn_t
enic_isr_msix_err(int irq
, void *data
)
486 struct enic
*enic
= data
;
488 vnic_intr_return_all_credits(&enic
->intr
[ENIC_MSIX_ERR
]);
490 enic_log_q_error(enic
);
492 /* schedule recovery from WQ/RQ error */
493 schedule_work(&enic
->reset
);
498 static irqreturn_t
enic_isr_msix_notify(int irq
, void *data
)
500 struct enic
*enic
= data
;
502 vnic_intr_return_all_credits(&enic
->intr
[ENIC_MSIX_NOTIFY
]);
503 enic_notify_check(enic
);
508 static inline void enic_queue_wq_skb_cont(struct enic
*enic
,
509 struct vnic_wq
*wq
, struct sk_buff
*skb
,
510 unsigned int len_left
)
514 /* Queue additional data fragments */
515 for (frag
= skb_shinfo(skb
)->frags
; len_left
; frag
++) {
516 len_left
-= frag
->size
;
517 enic_queue_wq_desc_cont(wq
, skb
,
518 pci_map_page(enic
->pdev
, frag
->page
,
519 frag
->page_offset
, frag
->size
,
522 (len_left
== 0)); /* EOP? */
526 static inline void enic_queue_wq_skb_vlan(struct enic
*enic
,
527 struct vnic_wq
*wq
, struct sk_buff
*skb
,
528 int vlan_tag_insert
, unsigned int vlan_tag
)
530 unsigned int head_len
= skb_headlen(skb
);
531 unsigned int len_left
= skb
->len
- head_len
;
532 int eop
= (len_left
== 0);
534 /* Queue the main skb fragment. The fragments are no larger
535 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
536 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
537 * per fragment is queued.
539 enic_queue_wq_desc(wq
, skb
,
540 pci_map_single(enic
->pdev
, skb
->data
,
541 head_len
, PCI_DMA_TODEVICE
),
543 vlan_tag_insert
, vlan_tag
,
547 enic_queue_wq_skb_cont(enic
, wq
, skb
, len_left
);
550 static inline void enic_queue_wq_skb_csum_l4(struct enic
*enic
,
551 struct vnic_wq
*wq
, struct sk_buff
*skb
,
552 int vlan_tag_insert
, unsigned int vlan_tag
)
554 unsigned int head_len
= skb_headlen(skb
);
555 unsigned int len_left
= skb
->len
- head_len
;
556 unsigned int hdr_len
= skb_transport_offset(skb
);
557 unsigned int csum_offset
= hdr_len
+ skb
->csum_offset
;
558 int eop
= (len_left
== 0);
560 /* Queue the main skb fragment. The fragments are no larger
561 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
562 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
563 * per fragment is queued.
565 enic_queue_wq_desc_csum_l4(wq
, skb
,
566 pci_map_single(enic
->pdev
, skb
->data
,
567 head_len
, PCI_DMA_TODEVICE
),
571 vlan_tag_insert
, vlan_tag
,
575 enic_queue_wq_skb_cont(enic
, wq
, skb
, len_left
);
578 static inline void enic_queue_wq_skb_tso(struct enic
*enic
,
579 struct vnic_wq
*wq
, struct sk_buff
*skb
, unsigned int mss
,
580 int vlan_tag_insert
, unsigned int vlan_tag
)
582 unsigned int frag_len_left
= skb_headlen(skb
);
583 unsigned int len_left
= skb
->len
- frag_len_left
;
584 unsigned int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
585 int eop
= (len_left
== 0);
588 unsigned int offset
= 0;
591 /* Preload TCP csum field with IP pseudo hdr calculated
592 * with IP length set to zero. HW will later add in length
593 * to each TCP segment resulting from the TSO.
596 if (skb
->protocol
== cpu_to_be16(ETH_P_IP
)) {
597 ip_hdr(skb
)->check
= 0;
598 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
599 ip_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
600 } else if (skb
->protocol
== cpu_to_be16(ETH_P_IPV6
)) {
601 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
602 &ipv6_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
605 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
606 * for the main skb fragment
608 while (frag_len_left
) {
609 len
= min(frag_len_left
, (unsigned int)WQ_ENET_MAX_DESC_LEN
);
610 dma_addr
= pci_map_single(enic
->pdev
, skb
->data
+ offset
,
611 len
, PCI_DMA_TODEVICE
);
612 enic_queue_wq_desc_tso(wq
, skb
,
616 vlan_tag_insert
, vlan_tag
,
617 eop
&& (len
== frag_len_left
));
618 frag_len_left
-= len
;
625 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
626 * for additional data fragments
628 for (frag
= skb_shinfo(skb
)->frags
; len_left
; frag
++) {
629 len_left
-= frag
->size
;
630 frag_len_left
= frag
->size
;
631 offset
= frag
->page_offset
;
633 while (frag_len_left
) {
634 len
= min(frag_len_left
,
635 (unsigned int)WQ_ENET_MAX_DESC_LEN
);
636 dma_addr
= pci_map_page(enic
->pdev
, frag
->page
,
639 enic_queue_wq_desc_cont(wq
, skb
,
643 (len
== frag_len_left
)); /* EOP? */
644 frag_len_left
-= len
;
650 static inline void enic_queue_wq_skb(struct enic
*enic
,
651 struct vnic_wq
*wq
, struct sk_buff
*skb
)
653 unsigned int mss
= skb_shinfo(skb
)->gso_size
;
654 unsigned int vlan_tag
= 0;
655 int vlan_tag_insert
= 0;
657 if (enic
->vlan_group
&& vlan_tx_tag_present(skb
)) {
658 /* VLAN tag from trunking driver */
660 vlan_tag
= vlan_tx_tag_get(skb
);
664 enic_queue_wq_skb_tso(enic
, wq
, skb
, mss
,
665 vlan_tag_insert
, vlan_tag
);
666 else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
667 enic_queue_wq_skb_csum_l4(enic
, wq
, skb
,
668 vlan_tag_insert
, vlan_tag
);
670 enic_queue_wq_skb_vlan(enic
, wq
, skb
,
671 vlan_tag_insert
, vlan_tag
);
674 /* netif_tx_lock held, process context with BHs disabled, or BH */
675 static netdev_tx_t
enic_hard_start_xmit(struct sk_buff
*skb
,
676 struct net_device
*netdev
)
678 struct enic
*enic
= netdev_priv(netdev
);
679 struct vnic_wq
*wq
= &enic
->wq
[0];
687 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
688 * which is very likely. In the off chance it's going to take
689 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
692 if (skb_shinfo(skb
)->gso_size
== 0 &&
693 skb_shinfo(skb
)->nr_frags
+ 1 > ENIC_NON_TSO_MAX_DESC
&&
694 skb_linearize(skb
)) {
699 spin_lock_irqsave(&enic
->wq_lock
[0], flags
);
701 if (vnic_wq_desc_avail(wq
) <
702 skb_shinfo(skb
)->nr_frags
+ ENIC_DESC_MAX_SPLITS
) {
703 netif_stop_queue(netdev
);
704 /* This is a hard error, log it */
705 printk(KERN_ERR PFX
"%s: BUG! Tx ring full when "
706 "queue awake!\n", netdev
->name
);
707 spin_unlock_irqrestore(&enic
->wq_lock
[0], flags
);
708 return NETDEV_TX_BUSY
;
711 enic_queue_wq_skb(enic
, wq
, skb
);
713 if (vnic_wq_desc_avail(wq
) < MAX_SKB_FRAGS
+ ENIC_DESC_MAX_SPLITS
)
714 netif_stop_queue(netdev
);
716 spin_unlock_irqrestore(&enic
->wq_lock
[0], flags
);
721 /* dev_base_lock rwlock held, nominally process context */
722 static struct net_device_stats
*enic_get_stats(struct net_device
*netdev
)
724 struct enic
*enic
= netdev_priv(netdev
);
725 struct net_device_stats
*net_stats
= &netdev
->stats
;
726 struct vnic_stats
*stats
;
728 spin_lock(&enic
->devcmd_lock
);
729 vnic_dev_stats_dump(enic
->vdev
, &stats
);
730 spin_unlock(&enic
->devcmd_lock
);
732 net_stats
->tx_packets
= stats
->tx
.tx_frames_ok
;
733 net_stats
->tx_bytes
= stats
->tx
.tx_bytes_ok
;
734 net_stats
->tx_errors
= stats
->tx
.tx_errors
;
735 net_stats
->tx_dropped
= stats
->tx
.tx_drops
;
737 net_stats
->rx_packets
= stats
->rx
.rx_frames_ok
;
738 net_stats
->rx_bytes
= stats
->rx
.rx_bytes_ok
;
739 net_stats
->rx_errors
= stats
->rx
.rx_errors
;
740 net_stats
->multicast
= stats
->rx
.rx_multicast_frames_ok
;
741 net_stats
->rx_over_errors
= enic
->rq_truncated_pkts
;
742 net_stats
->rx_crc_errors
= enic
->rq_bad_fcs
;
743 net_stats
->rx_dropped
= stats
->rx
.rx_no_bufs
+ stats
->rx
.rx_drop
;
748 static void enic_reset_mcaddrs(struct enic
*enic
)
753 static int enic_set_mac_addr(struct net_device
*netdev
, char *addr
)
755 if (!is_valid_ether_addr(addr
))
756 return -EADDRNOTAVAIL
;
758 memcpy(netdev
->dev_addr
, addr
, netdev
->addr_len
);
763 /* netif_tx_lock held, BHs disabled */
764 static void enic_set_multicast_list(struct net_device
*netdev
)
766 struct enic
*enic
= netdev_priv(netdev
);
767 struct dev_mc_list
*list
= netdev
->mc_list
;
769 int multicast
= (netdev
->flags
& IFF_MULTICAST
) ? 1 : 0;
770 int broadcast
= (netdev
->flags
& IFF_BROADCAST
) ? 1 : 0;
771 int promisc
= (netdev
->flags
& IFF_PROMISC
) ? 1 : 0;
772 int allmulti
= (netdev
->flags
& IFF_ALLMULTI
) ||
773 (netdev
->mc_count
> ENIC_MULTICAST_PERFECT_FILTERS
);
774 u8 mc_addr
[ENIC_MULTICAST_PERFECT_FILTERS
][ETH_ALEN
];
775 unsigned int mc_count
= netdev
->mc_count
;
778 if (mc_count
> ENIC_MULTICAST_PERFECT_FILTERS
)
779 mc_count
= ENIC_MULTICAST_PERFECT_FILTERS
;
781 spin_lock(&enic
->devcmd_lock
);
783 vnic_dev_packet_filter(enic
->vdev
, directed
,
784 multicast
, broadcast
, promisc
, allmulti
);
786 /* Is there an easier way? Trying to minimize to
787 * calls to add/del multicast addrs. We keep the
788 * addrs from the last call in enic->mc_addr and
789 * look for changes to add/del.
792 for (i
= 0; list
&& i
< mc_count
; i
++) {
793 memcpy(mc_addr
[i
], list
->dmi_addr
, ETH_ALEN
);
797 for (i
= 0; i
< enic
->mc_count
; i
++) {
798 for (j
= 0; j
< mc_count
; j
++)
799 if (compare_ether_addr(enic
->mc_addr
[i
],
803 enic_del_multicast_addr(enic
, enic
->mc_addr
[i
]);
806 for (i
= 0; i
< mc_count
; i
++) {
807 for (j
= 0; j
< enic
->mc_count
; j
++)
808 if (compare_ether_addr(mc_addr
[i
],
809 enic
->mc_addr
[j
]) == 0)
811 if (j
== enic
->mc_count
)
812 enic_add_multicast_addr(enic
, mc_addr
[i
]);
815 /* Save the list to compare against next time
818 for (i
= 0; i
< mc_count
; i
++)
819 memcpy(enic
->mc_addr
[i
], mc_addr
[i
], ETH_ALEN
);
821 enic
->mc_count
= mc_count
;
823 spin_unlock(&enic
->devcmd_lock
);
826 /* rtnl lock is held */
827 static void enic_vlan_rx_register(struct net_device
*netdev
,
828 struct vlan_group
*vlan_group
)
830 struct enic
*enic
= netdev_priv(netdev
);
831 enic
->vlan_group
= vlan_group
;
834 /* rtnl lock is held */
835 static void enic_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
837 struct enic
*enic
= netdev_priv(netdev
);
839 spin_lock(&enic
->devcmd_lock
);
840 enic_add_vlan(enic
, vid
);
841 spin_unlock(&enic
->devcmd_lock
);
844 /* rtnl lock is held */
845 static void enic_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
847 struct enic
*enic
= netdev_priv(netdev
);
849 spin_lock(&enic
->devcmd_lock
);
850 enic_del_vlan(enic
, vid
);
851 spin_unlock(&enic
->devcmd_lock
);
854 /* netif_tx_lock held, BHs disabled */
855 static void enic_tx_timeout(struct net_device
*netdev
)
857 struct enic
*enic
= netdev_priv(netdev
);
858 schedule_work(&enic
->reset
);
861 static void enic_free_rq_buf(struct vnic_rq
*rq
, struct vnic_rq_buf
*buf
)
863 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
868 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
869 buf
->len
, PCI_DMA_FROMDEVICE
);
870 dev_kfree_skb_any(buf
->os_buf
);
873 static inline struct sk_buff
*enic_rq_alloc_skb(struct net_device
*netdev
,
878 skb
= netdev_alloc_skb(netdev
, size
+ NET_IP_ALIGN
);
881 skb_reserve(skb
, NET_IP_ALIGN
);
886 static int enic_rq_alloc_buf(struct vnic_rq
*rq
)
888 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
889 struct net_device
*netdev
= enic
->netdev
;
891 unsigned int len
= netdev
->mtu
+ ETH_HLEN
;
892 unsigned int os_buf_index
= 0;
895 skb
= enic_rq_alloc_skb(netdev
, len
);
899 dma_addr
= pci_map_single(enic
->pdev
, skb
->data
,
900 len
, PCI_DMA_FROMDEVICE
);
902 enic_queue_rq_desc(rq
, skb
, os_buf_index
,
908 static int enic_rq_alloc_buf_a1(struct vnic_rq
*rq
)
910 struct rq_enet_desc
*desc
= vnic_rq_next_desc(rq
);
912 if (vnic_rq_posting_soon(rq
)) {
914 /* SW workaround for A0 HW erratum: if we're just about
915 * to write posted_index, insert a dummy desc
919 rq_enet_desc_enc(desc
, 0, RQ_ENET_TYPE_RESV2
, 0);
920 vnic_rq_post(rq
, 0, 0, 0, 0);
922 return enic_rq_alloc_buf(rq
);
928 static int enic_set_rq_alloc_buf(struct enic
*enic
)
930 enum vnic_dev_hw_version hw_ver
;
933 err
= vnic_dev_hw_version(enic
->vdev
, &hw_ver
);
938 case VNIC_DEV_HW_VER_A1
:
939 enic
->rq_alloc_buf
= enic_rq_alloc_buf_a1
;
941 case VNIC_DEV_HW_VER_A2
:
942 case VNIC_DEV_HW_VER_UNKNOWN
:
943 enic
->rq_alloc_buf
= enic_rq_alloc_buf
;
952 static int enic_get_skb_header(struct sk_buff
*skb
, void **iphdr
,
953 void **tcph
, u64
*hdr_flags
, void *priv
)
955 struct cq_enet_rq_desc
*cq_desc
= priv
;
959 u8 type
, color
, eop
, sop
, ingress_port
, vlan_stripped
;
960 u8 fcoe
, fcoe_sof
, fcoe_fc_crc_ok
, fcoe_enc_error
, fcoe_eof
;
961 u8 tcp_udp_csum_ok
, udp
, tcp
, ipv4_csum_ok
;
962 u8 ipv6
, ipv4
, ipv4_fragment
, fcs_ok
, rss_type
, csum_not_calc
;
964 u16 q_number
, completed_index
, bytes_written
, vlan
, checksum
;
967 cq_enet_rq_desc_dec(cq_desc
,
968 &type
, &color
, &q_number
, &completed_index
,
969 &ingress_port
, &fcoe
, &eop
, &sop
, &rss_type
,
970 &csum_not_calc
, &rss_hash
, &bytes_written
,
971 &packet_error
, &vlan_stripped
, &vlan
, &checksum
,
972 &fcoe_sof
, &fcoe_fc_crc_ok
, &fcoe_enc_error
,
973 &fcoe_eof
, &tcp_udp_csum_ok
, &udp
, &tcp
,
974 &ipv4_csum_ok
, &ipv6
, &ipv4
, &ipv4_fragment
,
977 if (!(ipv4
&& tcp
&& !ipv4_fragment
))
980 skb_reset_network_header(skb
);
983 ip_len
= ip_hdrlen(skb
);
984 skb_set_transport_header(skb
, ip_len
);
986 /* check if ip header and tcp header are complete */
987 if (ntohs(iph
->tot_len
) < ip_len
+ tcp_hdrlen(skb
))
990 *hdr_flags
= LRO_IPV4
| LRO_TCP
;
991 *tcph
= tcp_hdr(skb
);
997 static void enic_rq_indicate_buf(struct vnic_rq
*rq
,
998 struct cq_desc
*cq_desc
, struct vnic_rq_buf
*buf
,
999 int skipped
, void *opaque
)
1001 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
1002 struct net_device
*netdev
= enic
->netdev
;
1003 struct sk_buff
*skb
;
1005 u8 type
, color
, eop
, sop
, ingress_port
, vlan_stripped
;
1006 u8 fcoe
, fcoe_sof
, fcoe_fc_crc_ok
, fcoe_enc_error
, fcoe_eof
;
1007 u8 tcp_udp_csum_ok
, udp
, tcp
, ipv4_csum_ok
;
1008 u8 ipv6
, ipv4
, ipv4_fragment
, fcs_ok
, rss_type
, csum_not_calc
;
1010 u16 q_number
, completed_index
, bytes_written
, vlan
, checksum
;
1017 prefetch(skb
->data
- NET_IP_ALIGN
);
1018 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
1019 buf
->len
, PCI_DMA_FROMDEVICE
);
1021 cq_enet_rq_desc_dec((struct cq_enet_rq_desc
*)cq_desc
,
1022 &type
, &color
, &q_number
, &completed_index
,
1023 &ingress_port
, &fcoe
, &eop
, &sop
, &rss_type
,
1024 &csum_not_calc
, &rss_hash
, &bytes_written
,
1025 &packet_error
, &vlan_stripped
, &vlan
, &checksum
,
1026 &fcoe_sof
, &fcoe_fc_crc_ok
, &fcoe_enc_error
,
1027 &fcoe_eof
, &tcp_udp_csum_ok
, &udp
, &tcp
,
1028 &ipv4_csum_ok
, &ipv6
, &ipv4
, &ipv4_fragment
,
1034 if (bytes_written
> 0)
1036 else if (bytes_written
== 0)
1037 enic
->rq_truncated_pkts
++;
1040 dev_kfree_skb_any(skb
);
1045 if (eop
&& bytes_written
> 0) {
1050 skb_put(skb
, bytes_written
);
1051 skb
->protocol
= eth_type_trans(skb
, netdev
);
1053 if (enic
->csum_rx_enabled
&& !csum_not_calc
) {
1054 skb
->csum
= htons(checksum
);
1055 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1060 if (enic
->vlan_group
&& vlan_stripped
) {
1062 if ((netdev
->features
& NETIF_F_LRO
) && ipv4
)
1063 lro_vlan_hwaccel_receive_skb(&enic
->lro_mgr
,
1064 skb
, enic
->vlan_group
,
1067 vlan_hwaccel_receive_skb(skb
,
1068 enic
->vlan_group
, vlan
);
1072 if ((netdev
->features
& NETIF_F_LRO
) && ipv4
)
1073 lro_receive_skb(&enic
->lro_mgr
, skb
, cq_desc
);
1075 netif_receive_skb(skb
);
1084 dev_kfree_skb_any(skb
);
1088 static int enic_rq_service(struct vnic_dev
*vdev
, struct cq_desc
*cq_desc
,
1089 u8 type
, u16 q_number
, u16 completed_index
, void *opaque
)
1091 struct enic
*enic
= vnic_dev_priv(vdev
);
1093 vnic_rq_service(&enic
->rq
[q_number
], cq_desc
,
1094 completed_index
, VNIC_RQ_RETURN_DESC
,
1095 enic_rq_indicate_buf
, opaque
);
1100 static void enic_rq_drop_buf(struct vnic_rq
*rq
,
1101 struct cq_desc
*cq_desc
, struct vnic_rq_buf
*buf
,
1102 int skipped
, void *opaque
)
1104 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
1105 struct sk_buff
*skb
= buf
->os_buf
;
1110 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
1111 buf
->len
, PCI_DMA_FROMDEVICE
);
1113 dev_kfree_skb_any(skb
);
1116 static int enic_rq_service_drop(struct vnic_dev
*vdev
, struct cq_desc
*cq_desc
,
1117 u8 type
, u16 q_number
, u16 completed_index
, void *opaque
)
1119 struct enic
*enic
= vnic_dev_priv(vdev
);
1121 vnic_rq_service(&enic
->rq
[q_number
], cq_desc
,
1122 completed_index
, VNIC_RQ_RETURN_DESC
,
1123 enic_rq_drop_buf
, opaque
);
1128 static int enic_poll(struct napi_struct
*napi
, int budget
)
1130 struct enic
*enic
= container_of(napi
, struct enic
, napi
);
1131 struct net_device
*netdev
= enic
->netdev
;
1132 unsigned int rq_work_to_do
= budget
;
1133 unsigned int wq_work_to_do
= -1; /* no limit */
1134 unsigned int work_done
, rq_work_done
, wq_work_done
;
1136 /* Service RQ (first) and WQ
1139 rq_work_done
= vnic_cq_service(&enic
->cq
[ENIC_CQ_RQ
],
1140 rq_work_to_do
, enic_rq_service
, NULL
);
1142 wq_work_done
= vnic_cq_service(&enic
->cq
[ENIC_CQ_WQ
],
1143 wq_work_to_do
, enic_wq_service
, NULL
);
1145 /* Accumulate intr event credits for this polling
1146 * cycle. An intr event is the completion of a
1147 * a WQ or RQ packet.
1150 work_done
= rq_work_done
+ wq_work_done
;
1153 vnic_intr_return_credits(&enic
->intr
[ENIC_INTX_WQ_RQ
],
1155 0 /* don't unmask intr */,
1156 0 /* don't reset intr timer */);
1158 if (rq_work_done
> 0) {
1163 vnic_rq_fill(&enic
->rq
[0], enic
->rq_alloc_buf
);
1167 /* If no work done, flush all LROs and exit polling
1170 if (netdev
->features
& NETIF_F_LRO
)
1171 lro_flush_all(&enic
->lro_mgr
);
1173 napi_complete(napi
);
1174 vnic_intr_unmask(&enic
->intr
[ENIC_INTX_WQ_RQ
]);
1177 return rq_work_done
;
1180 static int enic_poll_msix(struct napi_struct
*napi
, int budget
)
1182 struct enic
*enic
= container_of(napi
, struct enic
, napi
);
1183 struct net_device
*netdev
= enic
->netdev
;
1184 unsigned int work_to_do
= budget
;
1185 unsigned int work_done
;
1190 work_done
= vnic_cq_service(&enic
->cq
[ENIC_CQ_RQ
],
1191 work_to_do
, enic_rq_service
, NULL
);
1193 if (work_done
> 0) {
1198 vnic_rq_fill(&enic
->rq
[0], enic
->rq_alloc_buf
);
1200 /* Return intr event credits for this polling
1201 * cycle. An intr event is the completion of a
1205 vnic_intr_return_credits(&enic
->intr
[ENIC_MSIX_RQ
],
1207 0 /* don't unmask intr */,
1208 0 /* don't reset intr timer */);
1211 /* If no work done, flush all LROs and exit polling
1214 if (netdev
->features
& NETIF_F_LRO
)
1215 lro_flush_all(&enic
->lro_mgr
);
1217 napi_complete(napi
);
1218 vnic_intr_unmask(&enic
->intr
[ENIC_MSIX_RQ
]);
1224 static void enic_notify_timer(unsigned long data
)
1226 struct enic
*enic
= (struct enic
*)data
;
1228 enic_notify_check(enic
);
1230 mod_timer(&enic
->notify_timer
,
1231 round_jiffies(jiffies
+ ENIC_NOTIFY_TIMER_PERIOD
));
1234 static void enic_free_intr(struct enic
*enic
)
1236 struct net_device
*netdev
= enic
->netdev
;
1239 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1240 case VNIC_DEV_INTR_MODE_INTX
:
1241 free_irq(enic
->pdev
->irq
, netdev
);
1243 case VNIC_DEV_INTR_MODE_MSI
:
1244 free_irq(enic
->pdev
->irq
, enic
);
1246 case VNIC_DEV_INTR_MODE_MSIX
:
1247 for (i
= 0; i
< ARRAY_SIZE(enic
->msix
); i
++)
1248 if (enic
->msix
[i
].requested
)
1249 free_irq(enic
->msix_entry
[i
].vector
,
1250 enic
->msix
[i
].devid
);
1257 static int enic_request_intr(struct enic
*enic
)
1259 struct net_device
*netdev
= enic
->netdev
;
1263 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1265 case VNIC_DEV_INTR_MODE_INTX
:
1267 err
= request_irq(enic
->pdev
->irq
, enic_isr_legacy
,
1268 IRQF_SHARED
, netdev
->name
, netdev
);
1271 case VNIC_DEV_INTR_MODE_MSI
:
1273 err
= request_irq(enic
->pdev
->irq
, enic_isr_msi
,
1274 0, netdev
->name
, enic
);
1277 case VNIC_DEV_INTR_MODE_MSIX
:
1279 sprintf(enic
->msix
[ENIC_MSIX_RQ
].devname
,
1280 "%.11s-rx-0", netdev
->name
);
1281 enic
->msix
[ENIC_MSIX_RQ
].isr
= enic_isr_msix_rq
;
1282 enic
->msix
[ENIC_MSIX_RQ
].devid
= enic
;
1284 sprintf(enic
->msix
[ENIC_MSIX_WQ
].devname
,
1285 "%.11s-tx-0", netdev
->name
);
1286 enic
->msix
[ENIC_MSIX_WQ
].isr
= enic_isr_msix_wq
;
1287 enic
->msix
[ENIC_MSIX_WQ
].devid
= enic
;
1289 sprintf(enic
->msix
[ENIC_MSIX_ERR
].devname
,
1290 "%.11s-err", netdev
->name
);
1291 enic
->msix
[ENIC_MSIX_ERR
].isr
= enic_isr_msix_err
;
1292 enic
->msix
[ENIC_MSIX_ERR
].devid
= enic
;
1294 sprintf(enic
->msix
[ENIC_MSIX_NOTIFY
].devname
,
1295 "%.11s-notify", netdev
->name
);
1296 enic
->msix
[ENIC_MSIX_NOTIFY
].isr
= enic_isr_msix_notify
;
1297 enic
->msix
[ENIC_MSIX_NOTIFY
].devid
= enic
;
1299 for (i
= 0; i
< ARRAY_SIZE(enic
->msix
); i
++) {
1300 err
= request_irq(enic
->msix_entry
[i
].vector
,
1301 enic
->msix
[i
].isr
, 0,
1302 enic
->msix
[i
].devname
,
1303 enic
->msix
[i
].devid
);
1305 enic_free_intr(enic
);
1308 enic
->msix
[i
].requested
= 1;
1320 static int enic_notify_set(struct enic
*enic
)
1324 spin_lock(&enic
->devcmd_lock
);
1325 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1326 case VNIC_DEV_INTR_MODE_INTX
:
1327 err
= vnic_dev_notify_set(enic
->vdev
, ENIC_INTX_NOTIFY
);
1329 case VNIC_DEV_INTR_MODE_MSIX
:
1330 err
= vnic_dev_notify_set(enic
->vdev
, ENIC_MSIX_NOTIFY
);
1333 err
= vnic_dev_notify_set(enic
->vdev
, -1 /* no intr */);
1336 spin_unlock(&enic
->devcmd_lock
);
1341 static void enic_notify_timer_start(struct enic
*enic
)
1343 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1344 case VNIC_DEV_INTR_MODE_MSI
:
1345 mod_timer(&enic
->notify_timer
, jiffies
);
1348 /* Using intr for notification for INTx/MSI-X */
1353 /* rtnl lock is held, process context */
1354 static int enic_open(struct net_device
*netdev
)
1356 struct enic
*enic
= netdev_priv(netdev
);
1360 err
= enic_request_intr(enic
);
1362 printk(KERN_ERR PFX
"%s: Unable to request irq.\n",
1367 err
= enic_notify_set(enic
);
1370 "%s: Failed to alloc notify buffer, aborting.\n",
1372 goto err_out_free_intr
;
1375 for (i
= 0; i
< enic
->rq_count
; i
++) {
1376 err
= vnic_rq_fill(&enic
->rq
[i
], enic
->rq_alloc_buf
);
1379 "%s: Unable to alloc receive buffers.\n",
1381 goto err_out_notify_unset
;
1385 for (i
= 0; i
< enic
->wq_count
; i
++)
1386 vnic_wq_enable(&enic
->wq
[i
]);
1387 for (i
= 0; i
< enic
->rq_count
; i
++)
1388 vnic_rq_enable(&enic
->rq
[i
]);
1390 spin_lock(&enic
->devcmd_lock
);
1391 enic_add_station_addr(enic
);
1392 spin_unlock(&enic
->devcmd_lock
);
1393 enic_set_multicast_list(netdev
);
1395 netif_wake_queue(netdev
);
1396 napi_enable(&enic
->napi
);
1397 spin_lock(&enic
->devcmd_lock
);
1398 vnic_dev_enable(enic
->vdev
);
1399 spin_unlock(&enic
->devcmd_lock
);
1401 for (i
= 0; i
< enic
->intr_count
; i
++)
1402 vnic_intr_unmask(&enic
->intr
[i
]);
1404 enic_notify_timer_start(enic
);
1408 err_out_notify_unset
:
1409 spin_lock(&enic
->devcmd_lock
);
1410 vnic_dev_notify_unset(enic
->vdev
);
1411 spin_unlock(&enic
->devcmd_lock
);
1413 enic_free_intr(enic
);
1418 /* rtnl lock is held, process context */
1419 static int enic_stop(struct net_device
*netdev
)
1421 struct enic
*enic
= netdev_priv(netdev
);
1425 del_timer_sync(&enic
->notify_timer
);
1427 spin_lock(&enic
->devcmd_lock
);
1428 vnic_dev_disable(enic
->vdev
);
1429 spin_unlock(&enic
->devcmd_lock
);
1430 napi_disable(&enic
->napi
);
1431 netif_stop_queue(netdev
);
1433 for (i
= 0; i
< enic
->intr_count
; i
++)
1434 vnic_intr_mask(&enic
->intr
[i
]);
1436 for (i
= 0; i
< enic
->wq_count
; i
++) {
1437 err
= vnic_wq_disable(&enic
->wq
[i
]);
1441 for (i
= 0; i
< enic
->rq_count
; i
++) {
1442 err
= vnic_rq_disable(&enic
->rq
[i
]);
1447 spin_lock(&enic
->devcmd_lock
);
1448 vnic_dev_notify_unset(enic
->vdev
);
1449 spin_unlock(&enic
->devcmd_lock
);
1450 enic_free_intr(enic
);
1452 (void)vnic_cq_service(&enic
->cq
[ENIC_CQ_RQ
],
1453 -1, enic_rq_service_drop
, NULL
);
1454 (void)vnic_cq_service(&enic
->cq
[ENIC_CQ_WQ
],
1455 -1, enic_wq_service
, NULL
);
1457 for (i
= 0; i
< enic
->wq_count
; i
++)
1458 vnic_wq_clean(&enic
->wq
[i
], enic_free_wq_buf
);
1459 for (i
= 0; i
< enic
->rq_count
; i
++)
1460 vnic_rq_clean(&enic
->rq
[i
], enic_free_rq_buf
);
1461 for (i
= 0; i
< enic
->cq_count
; i
++)
1462 vnic_cq_clean(&enic
->cq
[i
]);
1463 for (i
= 0; i
< enic
->intr_count
; i
++)
1464 vnic_intr_clean(&enic
->intr
[i
]);
1469 static int enic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1471 struct enic
*enic
= netdev_priv(netdev
);
1472 int running
= netif_running(netdev
);
1474 if (new_mtu
< ENIC_MIN_MTU
|| new_mtu
> ENIC_MAX_MTU
)
1480 netdev
->mtu
= new_mtu
;
1482 if (netdev
->mtu
> enic
->port_mtu
)
1483 printk(KERN_WARNING PFX
1484 "%s: interface MTU (%d) set higher "
1485 "than port MTU (%d)\n",
1486 netdev
->name
, netdev
->mtu
, enic
->port_mtu
);
1494 #ifdef CONFIG_NET_POLL_CONTROLLER
1495 static void enic_poll_controller(struct net_device
*netdev
)
1497 struct enic
*enic
= netdev_priv(netdev
);
1498 struct vnic_dev
*vdev
= enic
->vdev
;
1500 switch (vnic_dev_get_intr_mode(vdev
)) {
1501 case VNIC_DEV_INTR_MODE_MSIX
:
1502 enic_isr_msix_rq(enic
->pdev
->irq
, enic
);
1503 enic_isr_msix_wq(enic
->pdev
->irq
, enic
);
1505 case VNIC_DEV_INTR_MODE_MSI
:
1506 enic_isr_msi(enic
->pdev
->irq
, enic
);
1508 case VNIC_DEV_INTR_MODE_INTX
:
1509 enic_isr_legacy(enic
->pdev
->irq
, netdev
);
1517 static int enic_dev_wait(struct vnic_dev
*vdev
,
1518 int (*start
)(struct vnic_dev
*, int),
1519 int (*finished
)(struct vnic_dev
*, int *),
1526 BUG_ON(in_interrupt());
1528 err
= start(vdev
, arg
);
1532 /* Wait for func to complete...2 seconds max
1535 time
= jiffies
+ (HZ
* 2);
1538 err
= finished(vdev
, &done
);
1545 schedule_timeout_uninterruptible(HZ
/ 10);
1547 } while (time_after(time
, jiffies
));
1552 static int enic_dev_open(struct enic
*enic
)
1556 err
= enic_dev_wait(enic
->vdev
, vnic_dev_open
,
1557 vnic_dev_open_done
, 0);
1560 "vNIC device open failed, err %d.\n", err
);
1565 static int enic_dev_soft_reset(struct enic
*enic
)
1569 err
= enic_dev_wait(enic
->vdev
, vnic_dev_soft_reset
,
1570 vnic_dev_soft_reset_done
, 0);
1573 "vNIC soft reset failed, err %d.\n", err
);
1578 static int enic_set_niccfg(struct enic
*enic
)
1580 const u8 rss_default_cpu
= 0;
1581 const u8 rss_hash_type
= 0;
1582 const u8 rss_hash_bits
= 0;
1583 const u8 rss_base_cpu
= 0;
1584 const u8 rss_enable
= 0;
1585 const u8 tso_ipid_split_en
= 0;
1586 const u8 ig_vlan_strip_en
= 1;
1588 /* Enable VLAN tag stripping. RSS not enabled (yet).
1591 return enic_set_nic_cfg(enic
,
1592 rss_default_cpu
, rss_hash_type
,
1593 rss_hash_bits
, rss_base_cpu
,
1594 rss_enable
, tso_ipid_split_en
,
1598 static void enic_reset(struct work_struct
*work
)
1600 struct enic
*enic
= container_of(work
, struct enic
, reset
);
1602 if (!netif_running(enic
->netdev
))
1607 spin_lock(&enic
->devcmd_lock
);
1608 vnic_dev_hang_notify(enic
->vdev
);
1609 spin_unlock(&enic
->devcmd_lock
);
1611 enic_stop(enic
->netdev
);
1612 enic_dev_soft_reset(enic
);
1613 vnic_dev_init(enic
->vdev
, 0);
1614 enic_reset_mcaddrs(enic
);
1615 enic_init_vnic_resources(enic
);
1616 enic_set_niccfg(enic
);
1617 enic_open(enic
->netdev
);
1622 static int enic_set_intr_mode(struct enic
*enic
)
1628 /* Set interrupt mode (INTx, MSI, MSI-X) depending
1629 * system capabilities.
1633 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
1634 * (the second to last INTR is used for WQ/RQ errors)
1635 * (the last INTR is used for notifications)
1638 BUG_ON(ARRAY_SIZE(enic
->msix_entry
) < n
+ m
+ 2);
1639 for (i
= 0; i
< n
+ m
+ 2; i
++)
1640 enic
->msix_entry
[i
].entry
= i
;
1642 if (enic
->config
.intr_mode
< 1 &&
1643 enic
->rq_count
>= n
&&
1644 enic
->wq_count
>= m
&&
1645 enic
->cq_count
>= n
+ m
&&
1646 enic
->intr_count
>= n
+ m
+ 2 &&
1647 !pci_enable_msix(enic
->pdev
, enic
->msix_entry
, n
+ m
+ 2)) {
1651 enic
->cq_count
= n
+ m
;
1652 enic
->intr_count
= n
+ m
+ 2;
1654 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_MSIX
);
1661 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
1664 if (enic
->config
.intr_mode
< 2 &&
1665 enic
->rq_count
>= 1 &&
1666 enic
->wq_count
>= 1 &&
1667 enic
->cq_count
>= 2 &&
1668 enic
->intr_count
>= 1 &&
1669 !pci_enable_msi(enic
->pdev
)) {
1674 enic
->intr_count
= 1;
1676 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_MSI
);
1683 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
1684 * (the first INTR is used for WQ/RQ)
1685 * (the second INTR is used for WQ/RQ errors)
1686 * (the last INTR is used for notifications)
1689 if (enic
->config
.intr_mode
< 3 &&
1690 enic
->rq_count
>= 1 &&
1691 enic
->wq_count
>= 1 &&
1692 enic
->cq_count
>= 2 &&
1693 enic
->intr_count
>= 3) {
1698 enic
->intr_count
= 3;
1700 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_INTX
);
1705 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_UNKNOWN
);
1710 static void enic_clear_intr_mode(struct enic
*enic
)
1712 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1713 case VNIC_DEV_INTR_MODE_MSIX
:
1714 pci_disable_msix(enic
->pdev
);
1716 case VNIC_DEV_INTR_MODE_MSI
:
1717 pci_disable_msi(enic
->pdev
);
1723 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_UNKNOWN
);
1726 static const struct net_device_ops enic_netdev_ops
= {
1727 .ndo_open
= enic_open
,
1728 .ndo_stop
= enic_stop
,
1729 .ndo_start_xmit
= enic_hard_start_xmit
,
1730 .ndo_get_stats
= enic_get_stats
,
1731 .ndo_validate_addr
= eth_validate_addr
,
1732 .ndo_set_mac_address
= eth_mac_addr
,
1733 .ndo_set_multicast_list
= enic_set_multicast_list
,
1734 .ndo_change_mtu
= enic_change_mtu
,
1735 .ndo_vlan_rx_register
= enic_vlan_rx_register
,
1736 .ndo_vlan_rx_add_vid
= enic_vlan_rx_add_vid
,
1737 .ndo_vlan_rx_kill_vid
= enic_vlan_rx_kill_vid
,
1738 .ndo_tx_timeout
= enic_tx_timeout
,
1739 #ifdef CONFIG_NET_POLL_CONTROLLER
1740 .ndo_poll_controller
= enic_poll_controller
,
1744 void enic_dev_deinit(struct enic
*enic
)
1746 netif_napi_del(&enic
->napi
);
1747 enic_free_vnic_resources(enic
);
1748 enic_clear_intr_mode(enic
);
1751 int enic_dev_init(struct enic
*enic
)
1753 struct net_device
*netdev
= enic
->netdev
;
1756 /* Get vNIC configuration
1759 err
= enic_get_vnic_config(enic
);
1762 "Get vNIC configuration failed, aborting.\n");
1766 /* Get available resource counts
1769 enic_get_res_counts(enic
);
1771 /* Set interrupt mode based on resource counts and system
1775 err
= enic_set_intr_mode(enic
);
1778 "Failed to set intr mode, aborting.\n");
1782 /* Allocate and configure vNIC resources
1785 err
= enic_alloc_vnic_resources(enic
);
1788 "Failed to alloc vNIC resources, aborting.\n");
1789 goto err_out_free_vnic_resources
;
1792 enic_init_vnic_resources(enic
);
1794 err
= enic_set_rq_alloc_buf(enic
);
1797 "Failed to set RQ buffer allocator, aborting.\n");
1798 goto err_out_free_vnic_resources
;
1801 err
= enic_set_niccfg(enic
);
1804 "Failed to config nic, aborting.\n");
1805 goto err_out_free_vnic_resources
;
1808 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1810 netif_napi_add(netdev
, &enic
->napi
, enic_poll
, 64);
1812 case VNIC_DEV_INTR_MODE_MSIX
:
1813 netif_napi_add(netdev
, &enic
->napi
, enic_poll_msix
, 64);
1819 err_out_free_vnic_resources
:
1820 enic_clear_intr_mode(enic
);
1821 enic_free_vnic_resources(enic
);
1826 static void enic_iounmap(struct enic
*enic
)
1830 for (i
= 0; i
< ARRAY_SIZE(enic
->bar
); i
++)
1831 if (enic
->bar
[i
].vaddr
)
1832 iounmap(enic
->bar
[i
].vaddr
);
1835 static int __devinit
enic_probe(struct pci_dev
*pdev
,
1836 const struct pci_device_id
*ent
)
1838 struct net_device
*netdev
;
1844 /* Allocate net device structure and initialize. Private
1845 * instance data is initialized to zero.
1848 netdev
= alloc_etherdev(sizeof(struct enic
));
1850 printk(KERN_ERR PFX
"Etherdev alloc failed, aborting.\n");
1854 pci_set_drvdata(pdev
, netdev
);
1856 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1858 enic
= netdev_priv(netdev
);
1859 enic
->netdev
= netdev
;
1862 /* Setup PCI resources
1865 err
= pci_enable_device(pdev
);
1868 "Cannot enable PCI device, aborting.\n");
1869 goto err_out_free_netdev
;
1872 err
= pci_request_regions(pdev
, DRV_NAME
);
1875 "Cannot request PCI regions, aborting.\n");
1876 goto err_out_disable_device
;
1879 pci_set_master(pdev
);
1881 /* Query PCI controller on system for DMA addressing
1882 * limitation for the device. Try 40-bit first, and
1886 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(40));
1888 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1891 "No usable DMA configuration, aborting.\n");
1892 goto err_out_release_regions
;
1894 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
1897 "Unable to obtain 32-bit DMA "
1898 "for consistent allocations, aborting.\n");
1899 goto err_out_release_regions
;
1902 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(40));
1905 "Unable to obtain 40-bit DMA "
1906 "for consistent allocations, aborting.\n");
1907 goto err_out_release_regions
;
1912 /* Map vNIC resources from BAR0-5
1915 for (i
= 0; i
< ARRAY_SIZE(enic
->bar
); i
++) {
1916 if (!(pci_resource_flags(pdev
, i
) & IORESOURCE_MEM
))
1918 enic
->bar
[i
].len
= pci_resource_len(pdev
, i
);
1919 enic
->bar
[i
].vaddr
= pci_iomap(pdev
, i
, enic
->bar
[i
].len
);
1920 if (!enic
->bar
[i
].vaddr
) {
1922 "Cannot memory-map BAR %d, aborting.\n", i
);
1924 goto err_out_iounmap
;
1926 enic
->bar
[i
].bus_addr
= pci_resource_start(pdev
, i
);
1929 /* Register vNIC device
1932 enic
->vdev
= vnic_dev_register(NULL
, enic
, pdev
, enic
->bar
,
1933 ARRAY_SIZE(enic
->bar
));
1936 "vNIC registration failed, aborting.\n");
1938 goto err_out_iounmap
;
1941 /* Issue device open to get device in known state
1944 err
= enic_dev_open(enic
);
1947 "vNIC dev open failed, aborting.\n");
1948 goto err_out_vnic_unregister
;
1951 /* Issue device init to initialize the vnic-to-switch link.
1952 * We'll start with carrier off and wait for link UP
1953 * notification later to turn on carrier. We don't need
1954 * to wait here for the vnic-to-switch link initialization
1955 * to complete; link UP notification is the indication that
1956 * the process is complete.
1959 netif_carrier_off(netdev
);
1961 err
= vnic_dev_init(enic
->vdev
, 0);
1964 "vNIC dev init failed, aborting.\n");
1965 goto err_out_dev_close
;
1968 err
= enic_dev_init(enic
);
1971 "Device initialization failed, aborting.\n");
1972 goto err_out_dev_close
;
1975 /* Setup notification timer, HW reset task, and locks
1978 init_timer(&enic
->notify_timer
);
1979 enic
->notify_timer
.function
= enic_notify_timer
;
1980 enic
->notify_timer
.data
= (unsigned long)enic
;
1982 INIT_WORK(&enic
->reset
, enic_reset
);
1984 for (i
= 0; i
< enic
->wq_count
; i
++)
1985 spin_lock_init(&enic
->wq_lock
[i
]);
1987 spin_lock_init(&enic
->devcmd_lock
);
1989 /* Register net device
1992 enic
->port_mtu
= enic
->config
.mtu
;
1993 (void)enic_change_mtu(netdev
, enic
->port_mtu
);
1995 err
= enic_set_mac_addr(netdev
, enic
->mac_addr
);
1998 "Invalid MAC address, aborting.\n");
1999 goto err_out_dev_deinit
;
2002 netdev
->netdev_ops
= &enic_netdev_ops
;
2003 netdev
->watchdog_timeo
= 2 * HZ
;
2004 netdev
->ethtool_ops
= &enic_ethtool_ops
;
2006 netdev
->features
|= NETIF_F_HW_VLAN_TX
|
2007 NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
;
2008 if (ENIC_SETTING(enic
, TXCSUM
))
2009 netdev
->features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
;
2010 if (ENIC_SETTING(enic
, TSO
))
2011 netdev
->features
|= NETIF_F_TSO
|
2012 NETIF_F_TSO6
| NETIF_F_TSO_ECN
;
2013 if (ENIC_SETTING(enic
, LRO
))
2014 netdev
->features
|= NETIF_F_LRO
;
2016 netdev
->features
|= NETIF_F_HIGHDMA
;
2018 enic
->csum_rx_enabled
= ENIC_SETTING(enic
, RXCSUM
);
2020 enic
->lro_mgr
.max_aggr
= ENIC_LRO_MAX_AGGR
;
2021 enic
->lro_mgr
.max_desc
= ENIC_LRO_MAX_DESC
;
2022 enic
->lro_mgr
.lro_arr
= enic
->lro_desc
;
2023 enic
->lro_mgr
.get_skb_header
= enic_get_skb_header
;
2024 enic
->lro_mgr
.features
= LRO_F_NAPI
| LRO_F_EXTRACT_VLAN_ID
;
2025 enic
->lro_mgr
.dev
= netdev
;
2026 enic
->lro_mgr
.ip_summed
= CHECKSUM_COMPLETE
;
2027 enic
->lro_mgr
.ip_summed_aggr
= CHECKSUM_UNNECESSARY
;
2029 err
= register_netdev(netdev
);
2032 "Cannot register net device, aborting.\n");
2033 goto err_out_dev_deinit
;
2039 enic_dev_deinit(enic
);
2041 vnic_dev_close(enic
->vdev
);
2042 err_out_vnic_unregister
:
2043 vnic_dev_unregister(enic
->vdev
);
2046 err_out_release_regions
:
2047 pci_release_regions(pdev
);
2048 err_out_disable_device
:
2049 pci_disable_device(pdev
);
2050 err_out_free_netdev
:
2051 pci_set_drvdata(pdev
, NULL
);
2052 free_netdev(netdev
);
2057 static void __devexit
enic_remove(struct pci_dev
*pdev
)
2059 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2062 struct enic
*enic
= netdev_priv(netdev
);
2064 flush_scheduled_work();
2065 unregister_netdev(netdev
);
2066 enic_dev_deinit(enic
);
2067 vnic_dev_close(enic
->vdev
);
2068 vnic_dev_unregister(enic
->vdev
);
2070 pci_release_regions(pdev
);
2071 pci_disable_device(pdev
);
2072 pci_set_drvdata(pdev
, NULL
);
2073 free_netdev(netdev
);
2077 static struct pci_driver enic_driver
= {
2079 .id_table
= enic_id_table
,
2080 .probe
= enic_probe
,
2081 .remove
= __devexit_p(enic_remove
),
2084 static int __init
enic_init_module(void)
2086 printk(KERN_INFO PFX
"%s, ver %s\n", DRV_DESCRIPTION
, DRV_VERSION
);
2088 return pci_register_driver(&enic_driver
);
2091 static void __exit
enic_cleanup_module(void)
2093 pci_unregister_driver(&enic_driver
);
2096 module_init(enic_init_module
);
2097 module_exit(enic_cleanup_module
);