1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #include <linux/types.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/vmalloc.h>
34 #include <linux/string.h>
37 #include <linux/tcp.h>
38 #include <linux/ipv6.h>
39 #include <net/checksum.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
45 #include "ixgbe_common.h"
47 char ixgbe_driver_name
[] = "ixgbe";
48 static const char ixgbe_driver_string
[] =
49 "Intel(R) 10 Gigabit PCI Express Network Driver";
51 #define DRV_VERSION "1.1.18"
52 const char ixgbe_driver_version
[] = DRV_VERSION
;
53 static const char ixgbe_copyright
[] =
54 "Copyright (c) 1999-2007 Intel Corporation.";
56 static const struct ixgbe_info
*ixgbe_info_tbl
[] = {
57 [board_82598
] = &ixgbe_82598_info
,
60 /* ixgbe_pci_tbl - PCI Device ID Table
62 * Wildcard entries (PCI_ANY_ID) should come last
63 * Last entry must be all 0s
65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66 * Class, Class Mask, private data (not used) }
68 static struct pci_device_id ixgbe_pci_tbl
[] = {
69 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AF_DUAL_PORT
),
71 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AF_SINGLE_PORT
),
73 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AT_DUAL_PORT
),
75 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598EB_CX4
),
78 /* required last entry */
81 MODULE_DEVICE_TABLE(pci
, ixgbe_pci_tbl
);
83 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
84 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
85 MODULE_LICENSE("GPL");
86 MODULE_VERSION(DRV_VERSION
);
88 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
90 static void ixgbe_release_hw_control(struct ixgbe_adapter
*adapter
)
94 /* Let firmware take over control of h/w */
95 ctrl_ext
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_CTRL_EXT
);
96 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_CTRL_EXT
,
97 ctrl_ext
& ~IXGBE_CTRL_EXT_DRV_LOAD
);
100 static void ixgbe_get_hw_control(struct ixgbe_adapter
*adapter
)
104 /* Let firmware know the driver has taken over */
105 ctrl_ext
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_CTRL_EXT
);
106 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_CTRL_EXT
,
107 ctrl_ext
| IXGBE_CTRL_EXT_DRV_LOAD
);
112 * ixgbe_get_hw_dev_name - return device name string
113 * used by hardware layer to print debugging information
115 char *ixgbe_get_hw_dev_name(struct ixgbe_hw
*hw
)
117 struct ixgbe_adapter
*adapter
= hw
->back
;
118 struct net_device
*netdev
= adapter
->netdev
;
123 static void ixgbe_set_ivar(struct ixgbe_adapter
*adapter
, u16 int_alloc_entry
,
128 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
129 index
= (int_alloc_entry
>> 2) & 0x1F;
130 ivar
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_IVAR(index
));
131 ivar
&= ~(0xFF << (8 * (int_alloc_entry
& 0x3)));
132 ivar
|= (msix_vector
<< (8 * (int_alloc_entry
& 0x3)));
133 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_IVAR(index
), ivar
);
136 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter
*adapter
,
137 struct ixgbe_tx_buffer
140 if (tx_buffer_info
->dma
) {
141 pci_unmap_page(adapter
->pdev
,
143 tx_buffer_info
->length
, PCI_DMA_TODEVICE
);
144 tx_buffer_info
->dma
= 0;
146 if (tx_buffer_info
->skb
) {
147 dev_kfree_skb_any(tx_buffer_info
->skb
);
148 tx_buffer_info
->skb
= NULL
;
150 /* tx_buffer_info must be completely set up in the transmit path */
153 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter
*adapter
,
154 struct ixgbe_ring
*tx_ring
,
156 union ixgbe_adv_tx_desc
*eop_desc
)
158 /* Detect a transmit hang in hardware, this serializes the
159 * check with the clearing of time_stamp and movement of i */
160 adapter
->detect_tx_hung
= false;
161 if (tx_ring
->tx_buffer_info
[eop
].dma
&&
162 time_after(jiffies
, tx_ring
->tx_buffer_info
[eop
].time_stamp
+ HZ
) &&
163 !(IXGBE_READ_REG(&adapter
->hw
, IXGBE_TFCS
) & IXGBE_TFCS_TXOFF
)) {
164 /* detected Tx unit hang */
165 DPRINTK(DRV
, ERR
, "Detected Tx Unit Hang\n"
168 " next_to_use <%x>\n"
169 " next_to_clean <%x>\n"
170 "tx_buffer_info[next_to_clean]\n"
171 " time_stamp <%lx>\n"
172 " next_to_watch <%x>\n"
174 " next_to_watch.status <%x>\n",
175 readl(adapter
->hw
.hw_addr
+ tx_ring
->head
),
176 readl(adapter
->hw
.hw_addr
+ tx_ring
->tail
),
177 tx_ring
->next_to_use
,
178 tx_ring
->next_to_clean
,
179 tx_ring
->tx_buffer_info
[eop
].time_stamp
,
180 eop
, jiffies
, eop_desc
->wb
.status
);
187 #define IXGBE_MAX_TXD_PWR 14
188 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
190 /* Tx Descriptors needed, worst case */
191 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
192 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
193 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
194 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
197 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
198 * @adapter: board private structure
200 static bool ixgbe_clean_tx_irq(struct ixgbe_adapter
*adapter
,
201 struct ixgbe_ring
*tx_ring
)
203 struct net_device
*netdev
= adapter
->netdev
;
204 union ixgbe_adv_tx_desc
*tx_desc
, *eop_desc
;
205 struct ixgbe_tx_buffer
*tx_buffer_info
;
207 bool cleaned
= false;
208 unsigned int total_tx_bytes
= 0, total_tx_packets
= 0;
210 i
= tx_ring
->next_to_clean
;
211 eop
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
212 eop_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
213 while (eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)) {
216 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
217 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
218 cleaned
= (i
== eop
);
220 tx_ring
->stats
.bytes
+= tx_buffer_info
->length
;
222 struct sk_buff
*skb
= tx_buffer_info
->skb
;
223 <<<<<<< HEAD
:drivers
/net
/ixgbe
/ixgbe_main
.c
226 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/net
/ixgbe
/ixgbe_main
.c
227 unsigned int segs
, bytecount
;
228 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
229 /* multiply data chunks by size of headers */
230 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
232 total_tx_packets
+= segs
;
233 total_tx_bytes
+= bytecount
;
234 <<<<<<< HEAD
:drivers
/net
/ixgbe
/ixgbe_main
.c
237 total_tx_bytes
+= skb
->len
;
240 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/net
/ixgbe
/ixgbe_main
.c
242 ixgbe_unmap_and_free_tx_resource(adapter
,
244 tx_desc
->wb
.status
= 0;
247 if (i
== tx_ring
->count
)
251 tx_ring
->stats
.packets
++;
253 eop
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
254 eop_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
256 /* weight of a sort for tx, avoid endless transmit cleanup */
257 if (total_tx_packets
>= tx_ring
->work_limit
)
261 tx_ring
->next_to_clean
= i
;
263 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
264 if (total_tx_packets
&& netif_carrier_ok(netdev
) &&
265 (IXGBE_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
)) {
266 /* Make sure that anybody stopping the queue after this
267 * sees the new next_to_clean.
270 if (netif_queue_stopped(netdev
) &&
271 !test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
272 netif_wake_queue(netdev
);
273 adapter
->restart_queue
++;
277 if (adapter
->detect_tx_hung
)
278 if (ixgbe_check_tx_hang(adapter
, tx_ring
, eop
, eop_desc
))
279 netif_stop_queue(netdev
);
281 if (total_tx_packets
>= tx_ring
->work_limit
)
282 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
, tx_ring
->eims_value
);
284 adapter
->net_stats
.tx_bytes
+= total_tx_bytes
;
285 adapter
->net_stats
.tx_packets
+= total_tx_packets
;
286 cleaned
= total_tx_packets
? true : false;
291 * ixgbe_receive_skb - Send a completed packet up the stack
292 * @adapter: board private structure
293 * @skb: packet to send up
294 * @is_vlan: packet has a VLAN tag
295 * @tag: VLAN tag from descriptor
297 static void ixgbe_receive_skb(struct ixgbe_adapter
*adapter
,
298 struct sk_buff
*skb
, bool is_vlan
,
301 if (!(adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
)) {
302 if (adapter
->vlgrp
&& is_vlan
)
303 vlan_hwaccel_receive_skb(skb
, adapter
->vlgrp
, tag
);
305 netif_receive_skb(skb
);
308 if (adapter
->vlgrp
&& is_vlan
)
309 vlan_hwaccel_rx(skb
, adapter
->vlgrp
, tag
);
316 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
317 * @adapter: address of board private structure
318 * @status_err: hardware indication of status of receive
319 * @skb: skb currently being received and modified
321 static inline void ixgbe_rx_checksum(struct ixgbe_adapter
*adapter
,
325 skb
->ip_summed
= CHECKSUM_NONE
;
327 /* Ignore Checksum bit is set, or rx csum disabled */
328 if ((status_err
& IXGBE_RXD_STAT_IXSM
) ||
329 !(adapter
->flags
& IXGBE_FLAG_RX_CSUM_ENABLED
))
332 /* if IP and error */
333 if ((status_err
& IXGBE_RXD_STAT_IPCS
) &&
334 (status_err
& IXGBE_RXDADV_ERR_IPE
)) {
335 adapter
->hw_csum_rx_error
++;
339 if (!(status_err
& IXGBE_RXD_STAT_L4CS
))
342 if (status_err
& IXGBE_RXDADV_ERR_TCPE
) {
343 adapter
->hw_csum_rx_error
++;
347 /* It must be a TCP or UDP packet with a valid checksum */
348 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
349 adapter
->hw_csum_rx_good
++;
353 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
354 * @adapter: address of board private structure
356 static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter
*adapter
,
357 struct ixgbe_ring
*rx_ring
,
360 struct net_device
*netdev
= adapter
->netdev
;
361 struct pci_dev
*pdev
= adapter
->pdev
;
362 union ixgbe_adv_rx_desc
*rx_desc
;
363 struct ixgbe_rx_buffer
*rx_buffer_info
;
366 unsigned int bufsz
= adapter
->rx_buf_len
+ NET_IP_ALIGN
;
368 i
= rx_ring
->next_to_use
;
369 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
371 while (cleaned_count
--) {
372 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
374 if (!rx_buffer_info
->page
&&
375 (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
)) {
376 rx_buffer_info
->page
= alloc_page(GFP_ATOMIC
);
377 if (!rx_buffer_info
->page
) {
378 adapter
->alloc_rx_page_failed
++;
381 rx_buffer_info
->page_dma
=
382 pci_map_page(pdev
, rx_buffer_info
->page
,
383 0, PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
386 if (!rx_buffer_info
->skb
) {
387 skb
= netdev_alloc_skb(netdev
, bufsz
);
390 adapter
->alloc_rx_buff_failed
++;
395 * Make buffer alignment 2 beyond a 16 byte boundary
396 * this will result in a 16 byte aligned IP header after
397 * the 14 byte MAC header is removed
399 skb_reserve(skb
, NET_IP_ALIGN
);
401 rx_buffer_info
->skb
= skb
;
402 rx_buffer_info
->dma
= pci_map_single(pdev
, skb
->data
,
406 /* Refresh the desc even if buffer_addrs didn't change because
407 * each write-back erases this info. */
408 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
409 rx_desc
->read
.pkt_addr
=
410 cpu_to_le64(rx_buffer_info
->page_dma
);
411 rx_desc
->read
.hdr_addr
=
412 cpu_to_le64(rx_buffer_info
->dma
);
414 rx_desc
->read
.pkt_addr
=
415 cpu_to_le64(rx_buffer_info
->dma
);
419 if (i
== rx_ring
->count
)
421 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
424 if (rx_ring
->next_to_use
!= i
) {
425 rx_ring
->next_to_use
= i
;
427 i
= (rx_ring
->count
- 1);
430 * Force memory writes to complete before letting h/w
431 * know there are new descriptors to fetch. (Only
432 * applicable for weak-ordered memory model archs,
436 writel(i
, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
440 static bool ixgbe_clean_rx_irq(struct ixgbe_adapter
*adapter
,
441 struct ixgbe_ring
*rx_ring
,
442 int *work_done
, int work_to_do
)
444 struct net_device
*netdev
= adapter
->netdev
;
445 struct pci_dev
*pdev
= adapter
->pdev
;
446 union ixgbe_adv_rx_desc
*rx_desc
, *next_rxd
;
447 struct ixgbe_rx_buffer
*rx_buffer_info
, *next_buffer
;
450 u32 upper_len
, len
, staterr
;
451 u16 hdr_info
, vlan_tag
;
452 bool is_vlan
, cleaned
= false;
453 int cleaned_count
= 0;
454 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
456 i
= rx_ring
->next_to_clean
;
458 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
459 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
460 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
461 is_vlan
= (staterr
& IXGBE_RXD_STAT_VP
);
462 vlan_tag
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
464 while (staterr
& IXGBE_RXD_STAT_DD
) {
465 if (*work_done
>= work_to_do
)
469 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
471 le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hdr_info
);
473 ((hdr_info
& IXGBE_RXDADV_HDRBUFLEN_MASK
) >>
474 IXGBE_RXDADV_HDRBUFLEN_SHIFT
);
475 if (hdr_info
& IXGBE_RXDADV_SPH
)
476 adapter
->rx_hdr_split
++;
477 if (len
> IXGBE_RX_HDR_SIZE
)
478 len
= IXGBE_RX_HDR_SIZE
;
479 upper_len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
481 len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
484 skb
= rx_buffer_info
->skb
;
485 prefetch(skb
->data
- NET_IP_ALIGN
);
486 rx_buffer_info
->skb
= NULL
;
488 if (len
&& !skb_shinfo(skb
)->nr_frags
) {
489 pci_unmap_single(pdev
, rx_buffer_info
->dma
,
490 adapter
->rx_buf_len
+ NET_IP_ALIGN
,
496 pci_unmap_page(pdev
, rx_buffer_info
->page_dma
,
497 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
498 rx_buffer_info
->page_dma
= 0;
499 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
500 rx_buffer_info
->page
, 0, upper_len
);
501 rx_buffer_info
->page
= NULL
;
503 skb
->len
+= upper_len
;
504 skb
->data_len
+= upper_len
;
505 skb
->truesize
+= upper_len
;
509 if (i
== rx_ring
->count
)
511 next_buffer
= &rx_ring
->rx_buffer_info
[i
];
513 next_rxd
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
517 if (staterr
& IXGBE_RXD_STAT_EOP
) {
518 rx_ring
->stats
.packets
++;
519 rx_ring
->stats
.bytes
+= skb
->len
;
521 rx_buffer_info
->skb
= next_buffer
->skb
;
522 rx_buffer_info
->dma
= next_buffer
->dma
;
523 next_buffer
->skb
= skb
;
524 adapter
->non_eop_descs
++;
528 if (staterr
& IXGBE_RXDADV_ERR_FRAME_ERR_MASK
) {
529 dev_kfree_skb_irq(skb
);
533 ixgbe_rx_checksum(adapter
, staterr
, skb
);
535 /* probably a little skewed due to removing CRC */
536 total_rx_bytes
+= skb
->len
;
539 skb
->protocol
= eth_type_trans(skb
, netdev
);
540 ixgbe_receive_skb(adapter
, skb
, is_vlan
, vlan_tag
);
541 netdev
->last_rx
= jiffies
;
544 rx_desc
->wb
.upper
.status_error
= 0;
546 /* return some buffers to hardware, one at a time is too slow */
547 if (cleaned_count
>= IXGBE_RX_BUFFER_WRITE
) {
548 ixgbe_alloc_rx_buffers(adapter
, rx_ring
, cleaned_count
);
552 /* use prefetched values */
554 rx_buffer_info
= next_buffer
;
556 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
557 is_vlan
= (staterr
& IXGBE_RXD_STAT_VP
);
558 vlan_tag
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
561 rx_ring
->next_to_clean
= i
;
562 cleaned_count
= IXGBE_DESC_UNUSED(rx_ring
);
565 ixgbe_alloc_rx_buffers(adapter
, rx_ring
, cleaned_count
);
567 adapter
->net_stats
.rx_bytes
+= total_rx_bytes
;
568 adapter
->net_stats
.rx_packets
+= total_rx_packets
;
573 #define IXGBE_MAX_INTR 10
575 * ixgbe_configure_msix - Configure MSI-X hardware
576 * @adapter: board private structure
578 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
581 static void ixgbe_configure_msix(struct ixgbe_adapter
*adapter
)
585 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
586 ixgbe_set_ivar(adapter
, IXGBE_IVAR_TX_QUEUE(i
),
587 IXGBE_MSIX_VECTOR(vector
));
588 writel(EITR_INTS_PER_SEC_TO_REG(adapter
->tx_eitr
),
589 adapter
->hw
.hw_addr
+ adapter
->tx_ring
[i
].itr_register
);
593 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
594 ixgbe_set_ivar(adapter
, IXGBE_IVAR_RX_QUEUE(i
),
595 IXGBE_MSIX_VECTOR(vector
));
596 writel(EITR_INTS_PER_SEC_TO_REG(adapter
->rx_eitr
),
597 adapter
->hw
.hw_addr
+ adapter
->rx_ring
[i
].itr_register
);
601 vector
= adapter
->num_tx_queues
+ adapter
->num_rx_queues
;
602 ixgbe_set_ivar(adapter
, IXGBE_IVAR_OTHER_CAUSES_INDEX
,
603 IXGBE_MSIX_VECTOR(vector
));
604 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EITR(vector
), 1950);
607 static irqreturn_t
ixgbe_msix_lsc(int irq
, void *data
)
609 struct net_device
*netdev
= data
;
610 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
611 struct ixgbe_hw
*hw
= &adapter
->hw
;
612 u32 eicr
= IXGBE_READ_REG(hw
, IXGBE_EICR
);
614 if (eicr
& IXGBE_EICR_LSC
) {
616 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
617 mod_timer(&adapter
->watchdog_timer
, jiffies
);
620 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
621 IXGBE_WRITE_REG(hw
, IXGBE_EIMS
, IXGBE_EIMS_OTHER
);
626 static irqreturn_t
ixgbe_msix_clean_tx(int irq
, void *data
)
628 struct ixgbe_ring
*txr
= data
;
629 struct ixgbe_adapter
*adapter
= txr
->adapter
;
631 ixgbe_clean_tx_irq(adapter
, txr
);
636 static irqreturn_t
ixgbe_msix_clean_rx(int irq
, void *data
)
638 struct ixgbe_ring
*rxr
= data
;
639 struct ixgbe_adapter
*adapter
= rxr
->adapter
;
641 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, rxr
->eims_value
);
642 netif_rx_schedule(adapter
->netdev
, &adapter
->napi
);
646 static int ixgbe_clean_rxonly(struct napi_struct
*napi
, int budget
)
648 struct ixgbe_adapter
*adapter
= container_of(napi
,
649 struct ixgbe_adapter
, napi
);
650 struct net_device
*netdev
= adapter
->netdev
;
652 struct ixgbe_ring
*rxr
= adapter
->rx_ring
;
654 /* Keep link state information with original netdev */
655 if (!netif_carrier_ok(netdev
))
658 ixgbe_clean_rx_irq(adapter
, rxr
, &work_done
, budget
);
660 /* If no Tx and not enough Rx work done, exit the polling mode */
661 if ((work_done
< budget
) || !netif_running(netdev
)) {
663 netif_rx_complete(netdev
, napi
);
664 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
665 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
,
673 * ixgbe_setup_msix - Initialize MSI-X interrupts
675 * ixgbe_setup_msix allocates MSI-X vectors and requests
676 * interrutps from the kernel.
678 static int ixgbe_setup_msix(struct ixgbe_adapter
*adapter
)
680 struct net_device
*netdev
= adapter
->netdev
;
681 int i
, int_vector
= 0, err
= 0;
684 /* +1 for the LSC interrupt */
685 max_msix_count
= adapter
->num_rx_queues
+ adapter
->num_tx_queues
+ 1;
686 adapter
->msix_entries
= kcalloc(max_msix_count
,
687 sizeof(struct msix_entry
), GFP_KERNEL
);
688 if (!adapter
->msix_entries
)
691 for (i
= 0; i
< max_msix_count
; i
++)
692 adapter
->msix_entries
[i
].entry
= i
;
694 err
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
699 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
700 sprintf(adapter
->tx_ring
[i
].name
, "%s-tx%d", netdev
->name
, i
);
701 err
= request_irq(adapter
->msix_entries
[int_vector
].vector
,
702 &ixgbe_msix_clean_tx
,
704 adapter
->tx_ring
[i
].name
,
705 &(adapter
->tx_ring
[i
]));
708 "request_irq failed for MSIX interrupt "
712 adapter
->tx_ring
[i
].eims_value
=
713 (1 << IXGBE_MSIX_VECTOR(int_vector
));
714 adapter
->tx_ring
[i
].itr_register
= IXGBE_EITR(int_vector
);
718 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
719 if (strlen(netdev
->name
) < (IFNAMSIZ
- 5))
720 sprintf(adapter
->rx_ring
[i
].name
,
721 "%s-rx%d", netdev
->name
, i
);
723 memcpy(adapter
->rx_ring
[i
].name
,
724 netdev
->name
, IFNAMSIZ
);
725 err
= request_irq(adapter
->msix_entries
[int_vector
].vector
,
726 &ixgbe_msix_clean_rx
, 0,
727 adapter
->rx_ring
[i
].name
,
728 &(adapter
->rx_ring
[i
]));
731 "request_irq failed for MSIX interrupt "
736 adapter
->rx_ring
[i
].eims_value
=
737 (1 << IXGBE_MSIX_VECTOR(int_vector
));
738 adapter
->rx_ring
[i
].itr_register
= IXGBE_EITR(int_vector
);
742 sprintf(adapter
->lsc_name
, "%s-lsc", netdev
->name
);
743 err
= request_irq(adapter
->msix_entries
[int_vector
].vector
,
744 &ixgbe_msix_lsc
, 0, adapter
->lsc_name
, netdev
);
747 "request_irq for msix_lsc failed: %d\n", err
);
751 /* FIXME: implement netif_napi_remove() instead */
752 adapter
->napi
.poll
= ixgbe_clean_rxonly
;
753 adapter
->flags
|= IXGBE_FLAG_MSIX_ENABLED
;
758 for (; int_vector
>= adapter
->num_tx_queues
; int_vector
--)
759 free_irq(adapter
->msix_entries
[int_vector
].vector
,
760 &(adapter
->rx_ring
[int_vector
-
761 adapter
->num_tx_queues
]));
763 for (; int_vector
>= 0; int_vector
--)
764 free_irq(adapter
->msix_entries
[int_vector
].vector
,
765 &(adapter
->tx_ring
[int_vector
]));
767 kfree(adapter
->msix_entries
);
768 adapter
->msix_entries
= NULL
;
769 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
774 * ixgbe_intr - Interrupt Handler
775 * @irq: interrupt number
776 * @data: pointer to a network interface device structure
777 * @pt_regs: CPU registers structure
779 static irqreturn_t
ixgbe_intr(int irq
, void *data
)
781 struct net_device
*netdev
= data
;
782 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
783 struct ixgbe_hw
*hw
= &adapter
->hw
;
786 eicr
= IXGBE_READ_REG(hw
, IXGBE_EICR
);
789 return IRQ_NONE
; /* Not our interrupt */
791 if (eicr
& IXGBE_EICR_LSC
) {
793 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
794 mod_timer(&adapter
->watchdog_timer
, jiffies
);
796 if (netif_rx_schedule_prep(netdev
, &adapter
->napi
)) {
797 /* Disable interrupts and register for poll. The flush of the
798 * posted write is intentionally left out. */
799 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, ~0);
800 __netif_rx_schedule(netdev
, &adapter
->napi
);
807 * ixgbe_request_irq - initialize interrupts
808 * @adapter: board private structure
810 * Attempts to configure interrupts using the best available
811 * capabilities of the hardware and kernel.
813 static int ixgbe_request_irq(struct ixgbe_adapter
*adapter
, u32
*num_rx_queues
)
815 struct net_device
*netdev
= adapter
->netdev
;
817 irq_handler_t handler
= ixgbe_intr
;
821 err
= ixgbe_setup_msix(adapter
);
826 * if we can't do MSI-X, fall through and try MSI
827 * No need to reallocate memory since we're decreasing the number of
828 * queues. We just won't use the other ones, also it is freed correctly
834 err
= pci_enable_msi(adapter
->pdev
);
836 adapter
->flags
|= IXGBE_FLAG_MSI_ENABLED
;
837 flags
&= ~IRQF_SHARED
;
838 handler
= &ixgbe_intr
;
841 err
= request_irq(adapter
->pdev
->irq
, handler
, flags
,
842 netdev
->name
, netdev
);
844 DPRINTK(PROBE
, ERR
, "request_irq failed, Error %d\n", err
);
850 static void ixgbe_free_irq(struct ixgbe_adapter
*adapter
)
852 struct net_device
*netdev
= adapter
->netdev
;
854 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
857 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
858 free_irq(adapter
->msix_entries
[i
].vector
,
859 &(adapter
->tx_ring
[i
]));
860 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
861 free_irq(adapter
->msix_entries
[i
+
862 adapter
->num_tx_queues
].vector
,
863 &(adapter
->rx_ring
[i
]));
864 i
= adapter
->num_rx_queues
+ adapter
->num_tx_queues
;
865 free_irq(adapter
->msix_entries
[i
].vector
, netdev
);
866 pci_disable_msix(adapter
->pdev
);
867 kfree(adapter
->msix_entries
);
868 adapter
->msix_entries
= NULL
;
869 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
873 free_irq(adapter
->pdev
->irq
, netdev
);
874 if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
875 pci_disable_msi(adapter
->pdev
);
876 adapter
->flags
&= ~IXGBE_FLAG_MSI_ENABLED
;
881 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
882 * @adapter: board private structure
884 static inline void ixgbe_irq_disable(struct ixgbe_adapter
*adapter
)
886 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, ~0);
887 IXGBE_WRITE_FLUSH(&adapter
->hw
);
888 synchronize_irq(adapter
->pdev
->irq
);
892 * ixgbe_irq_enable - Enable default interrupt generation settings
893 * @adapter: board private structure
895 static inline void ixgbe_irq_enable(struct ixgbe_adapter
*adapter
)
897 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
898 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIAC
,
899 (IXGBE_EIMS_ENABLE_MASK
&
900 ~(IXGBE_EIMS_OTHER
| IXGBE_EIMS_LSC
)));
901 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
,
902 IXGBE_EIMS_ENABLE_MASK
);
903 IXGBE_WRITE_FLUSH(&adapter
->hw
);
907 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
910 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter
*adapter
)
913 struct ixgbe_hw
*hw
= &adapter
->hw
;
915 if (adapter
->rx_eitr
)
916 IXGBE_WRITE_REG(hw
, IXGBE_EITR(0),
917 EITR_INTS_PER_SEC_TO_REG(adapter
->rx_eitr
));
919 /* for re-triggering the interrupt in non-NAPI mode */
920 adapter
->rx_ring
[0].eims_value
= (1 << IXGBE_MSIX_VECTOR(0));
921 adapter
->tx_ring
[0].eims_value
= (1 << IXGBE_MSIX_VECTOR(0));
923 ixgbe_set_ivar(adapter
, IXGBE_IVAR_RX_QUEUE(0), 0);
924 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
925 ixgbe_set_ivar(adapter
, IXGBE_IVAR_TX_QUEUE(i
), i
);
929 * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset
930 * @adapter: board private structure
932 * Configure the Tx unit of the MAC after a reset.
934 static void ixgbe_configure_tx(struct ixgbe_adapter
*adapter
)
937 struct ixgbe_hw
*hw
= &adapter
->hw
;
940 /* Setup the HW Tx Head and Tail descriptor pointers */
941 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
942 tdba
= adapter
->tx_ring
[i
].dma
;
943 tdlen
= adapter
->tx_ring
[i
].count
*
944 sizeof(union ixgbe_adv_tx_desc
);
945 IXGBE_WRITE_REG(hw
, IXGBE_TDBAL(i
), (tdba
& DMA_32BIT_MASK
));
946 IXGBE_WRITE_REG(hw
, IXGBE_TDBAH(i
), (tdba
>> 32));
947 IXGBE_WRITE_REG(hw
, IXGBE_TDLEN(i
), tdlen
);
948 IXGBE_WRITE_REG(hw
, IXGBE_TDH(i
), 0);
949 IXGBE_WRITE_REG(hw
, IXGBE_TDT(i
), 0);
950 adapter
->tx_ring
[i
].head
= IXGBE_TDH(i
);
951 adapter
->tx_ring
[i
].tail
= IXGBE_TDT(i
);
954 IXGBE_WRITE_REG(hw
, IXGBE_TIPG
, IXGBE_TIPG_FIBER_DEFAULT
);
957 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
958 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
960 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
962 * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset
963 * @adapter: board private structure
965 * Configure the Rx unit of the MAC after a reset.
967 static void ixgbe_configure_rx(struct ixgbe_adapter
*adapter
)
970 struct ixgbe_hw
*hw
= &adapter
->hw
;
971 struct net_device
*netdev
= adapter
->netdev
;
972 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
973 u32 rdlen
, rxctrl
, rxcsum
;
981 /* Decide whether to use packet split mode or not */
982 if (netdev
->mtu
> ETH_DATA_LEN
)
983 adapter
->flags
|= IXGBE_FLAG_RX_PS_ENABLED
;
985 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
987 /* Set the RX buffer length according to the mode */
988 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
989 adapter
->rx_buf_len
= IXGBE_RX_HDR_SIZE
;
991 if (netdev
->mtu
<= ETH_DATA_LEN
)
992 adapter
->rx_buf_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
994 adapter
->rx_buf_len
= ALIGN(max_frame
, 1024);
997 fctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_FCTRL
);
998 fctrl
|= IXGBE_FCTRL_BAM
;
999 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_FCTRL
, fctrl
);
1001 hlreg0
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
1002 if (adapter
->netdev
->mtu
<= ETH_DATA_LEN
)
1003 hlreg0
&= ~IXGBE_HLREG0_JUMBOEN
;
1005 hlreg0
|= IXGBE_HLREG0_JUMBOEN
;
1006 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, hlreg0
);
1008 pages
= PAGE_USE_COUNT(adapter
->netdev
->mtu
);
1010 srrctl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_SRRCTL(0));
1011 srrctl
&= ~IXGBE_SRRCTL_BSIZEHDR_MASK
;
1012 srrctl
&= ~IXGBE_SRRCTL_BSIZEPKT_MASK
;
1014 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1015 srrctl
|= PAGE_SIZE
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1016 srrctl
|= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
1017 srrctl
|= ((IXGBE_RX_HDR_SIZE
<<
1018 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
) &
1019 IXGBE_SRRCTL_BSIZEHDR_MASK
);
1021 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1023 if (adapter
->rx_buf_len
== MAXIMUM_ETHERNET_VLAN_SIZE
)
1025 IXGBE_RXBUFFER_2048
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1028 adapter
->rx_buf_len
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1030 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_SRRCTL(0), srrctl
);
1032 rdlen
= adapter
->rx_ring
[0].count
* sizeof(union ixgbe_adv_rx_desc
);
1033 /* disable receives while setting up the descriptors */
1034 rxctrl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
1035 IXGBE_WRITE_REG(hw
, IXGBE_RXCTRL
, rxctrl
& ~IXGBE_RXCTRL_RXEN
);
1037 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1038 * the Base and Length of the Rx Descriptor Ring */
1039 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1040 rdba
= adapter
->rx_ring
[i
].dma
;
1041 IXGBE_WRITE_REG(hw
, IXGBE_RDBAL(i
), (rdba
& DMA_32BIT_MASK
));
1042 IXGBE_WRITE_REG(hw
, IXGBE_RDBAH(i
), (rdba
>> 32));
1043 IXGBE_WRITE_REG(hw
, IXGBE_RDLEN(i
), rdlen
);
1044 IXGBE_WRITE_REG(hw
, IXGBE_RDH(i
), 0);
1045 IXGBE_WRITE_REG(hw
, IXGBE_RDT(i
), 0);
1046 adapter
->rx_ring
[i
].head
= IXGBE_RDH(i
);
1047 adapter
->rx_ring
[i
].tail
= IXGBE_RDT(i
);
1050 if (adapter
->num_rx_queues
> 1) {
1051 /* Random 40bytes used as random key in RSS hash function */
1052 get_random_bytes(&random
[0], 40);
1054 switch (adapter
->num_rx_queues
) {
1057 /* Bits [3:0] in each byte refers the Rx queue no */
1068 /* Fill out redirection table */
1069 for (i
= 0; i
< 32; i
++) {
1070 IXGBE_WRITE_REG_ARRAY(hw
, IXGBE_RETA(0), i
, reta
);
1071 if (adapter
->num_rx_queues
> 4) {
1073 IXGBE_WRITE_REG_ARRAY(hw
, IXGBE_RETA(0), i
,
1078 /* Fill out hash function seeds */
1079 for (i
= 0; i
< 10; i
++)
1080 IXGBE_WRITE_REG_ARRAY(hw
, IXGBE_RSSRK(0), i
, random
[i
]);
1082 mrqc
= IXGBE_MRQC_RSSEN
1083 /* Perform hash on these packet types */
1084 | IXGBE_MRQC_RSS_FIELD_IPV4
1085 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
1086 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
1087 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
1088 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
1089 | IXGBE_MRQC_RSS_FIELD_IPV6
1090 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
1091 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
1092 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
;
1093 IXGBE_WRITE_REG(hw
, IXGBE_MRQC
, mrqc
);
1095 /* Multiqueue and packet checksumming are mutually exclusive. */
1096 rxcsum
= IXGBE_READ_REG(hw
, IXGBE_RXCSUM
);
1097 rxcsum
|= IXGBE_RXCSUM_PCSD
;
1098 IXGBE_WRITE_REG(hw
, IXGBE_RXCSUM
, rxcsum
);
1100 /* Enable Receive Checksum Offload for TCP and UDP */
1101 rxcsum
= IXGBE_READ_REG(hw
, IXGBE_RXCSUM
);
1102 if (adapter
->flags
& IXGBE_FLAG_RX_CSUM_ENABLED
) {
1103 /* Enable IPv4 payload checksum for UDP fragments
1104 * Must be used in conjunction with packet-split. */
1105 rxcsum
|= IXGBE_RXCSUM_IPPCSE
;
1107 /* don't need to clear IPPCSE as it defaults to 0 */
1109 IXGBE_WRITE_REG(hw
, IXGBE_RXCSUM
, rxcsum
);
1111 /* Enable Receives */
1112 IXGBE_WRITE_REG(hw
, IXGBE_RXCTRL
, rxctrl
);
1113 rxctrl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
1116 static void ixgbe_vlan_rx_register(struct net_device
*netdev
,
1117 struct vlan_group
*grp
)
1119 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1122 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1123 ixgbe_irq_disable(adapter
);
1124 adapter
->vlgrp
= grp
;
1127 /* enable VLAN tag insert/strip */
1128 ctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_VLNCTRL
);
1129 ctrl
|= IXGBE_VLNCTRL_VME
| IXGBE_VLNCTRL_VFE
;
1130 ctrl
&= ~IXGBE_VLNCTRL_CFIEN
;
1131 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_VLNCTRL
, ctrl
);
1134 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1135 ixgbe_irq_enable(adapter
);
1138 static void ixgbe_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1140 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1142 /* add VID to filter table */
1143 ixgbe_set_vfta(&adapter
->hw
, vid
, 0, true);
1146 static void ixgbe_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1148 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1150 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1151 ixgbe_irq_disable(adapter
);
1153 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
1155 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1156 ixgbe_irq_enable(adapter
);
1158 /* remove VID from filter table */
1159 ixgbe_set_vfta(&adapter
->hw
, vid
, 0, false);
1162 static void ixgbe_restore_vlan(struct ixgbe_adapter
*adapter
)
1164 ixgbe_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
1166 if (adapter
->vlgrp
) {
1168 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
1169 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
1171 ixgbe_vlan_rx_add_vid(adapter
->netdev
, vid
);
1177 * ixgbe_set_multi - Multicast and Promiscuous mode set
1178 * @netdev: network interface device structure
1180 * The set_multi entry point is called whenever the multicast address
1181 * list or the network interface flags are updated. This routine is
1182 * responsible for configuring the hardware for proper multicast,
1183 * promiscuous mode, and all-multi behavior.
1185 static void ixgbe_set_multi(struct net_device
*netdev
)
1187 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1188 struct ixgbe_hw
*hw
= &adapter
->hw
;
1189 struct dev_mc_list
*mc_ptr
;
1194 /* Check for Promiscuous and All Multicast modes */
1196 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
1198 if (netdev
->flags
& IFF_PROMISC
) {
1199 fctrl
|= (IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
1200 } else if (netdev
->flags
& IFF_ALLMULTI
) {
1201 fctrl
|= IXGBE_FCTRL_MPE
;
1202 fctrl
&= ~IXGBE_FCTRL_UPE
;
1204 fctrl
&= ~(IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
1207 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, fctrl
);
1209 if (netdev
->mc_count
) {
1210 mta_list
= kcalloc(netdev
->mc_count
, ETH_ALEN
, GFP_ATOMIC
);
1214 /* Shared function expects packed array of only addresses. */
1215 mc_ptr
= netdev
->mc_list
;
1217 for (i
= 0; i
< netdev
->mc_count
; i
++) {
1220 memcpy(mta_list
+ (i
* ETH_ALEN
), mc_ptr
->dmi_addr
,
1222 mc_ptr
= mc_ptr
->next
;
1225 ixgbe_update_mc_addr_list(hw
, mta_list
, i
, 0);
1228 ixgbe_update_mc_addr_list(hw
, NULL
, 0, 0);
1233 static void ixgbe_configure(struct ixgbe_adapter
*adapter
)
1235 struct net_device
*netdev
= adapter
->netdev
;
1238 ixgbe_set_multi(netdev
);
1240 ixgbe_restore_vlan(adapter
);
1242 ixgbe_configure_tx(adapter
);
1243 ixgbe_configure_rx(adapter
);
1244 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1245 ixgbe_alloc_rx_buffers(adapter
, &adapter
->rx_ring
[i
],
1246 (adapter
->rx_ring
[i
].count
- 1));
1249 static int ixgbe_up_complete(struct ixgbe_adapter
*adapter
)
1251 struct net_device
*netdev
= adapter
->netdev
;
1254 struct ixgbe_hw
*hw
= &adapter
->hw
;
1255 u32 txdctl
, rxdctl
, mhadd
;
1256 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1258 ixgbe_get_hw_control(adapter
);
1260 if (adapter
->flags
& (IXGBE_FLAG_MSIX_ENABLED
|
1261 IXGBE_FLAG_MSI_ENABLED
)) {
1262 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
1263 gpie
= (IXGBE_GPIE_MSIX_MODE
| IXGBE_GPIE_EIAME
|
1264 IXGBE_GPIE_PBA_SUPPORT
| IXGBE_GPIE_OCD
);
1267 gpie
= (IXGBE_GPIE_EIAME
|
1268 IXGBE_GPIE_PBA_SUPPORT
);
1270 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_GPIE
, gpie
);
1271 gpie
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_GPIE
);
1274 mhadd
= IXGBE_READ_REG(hw
, IXGBE_MHADD
);
1276 if (max_frame
!= (mhadd
>> IXGBE_MHADD_MFS_SHIFT
)) {
1277 mhadd
&= ~IXGBE_MHADD_MFS_MASK
;
1278 mhadd
|= max_frame
<< IXGBE_MHADD_MFS_SHIFT
;
1280 IXGBE_WRITE_REG(hw
, IXGBE_MHADD
, mhadd
);
1283 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1284 txdctl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_TXDCTL(i
));
1285 txdctl
|= IXGBE_TXDCTL_ENABLE
;
1286 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_TXDCTL(i
), txdctl
);
1289 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1290 rxdctl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_RXDCTL(i
));
1291 rxdctl
|= IXGBE_RXDCTL_ENABLE
;
1292 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_RXDCTL(i
), rxdctl
);
1294 /* enable all receives */
1295 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
1296 rxdctl
|= (IXGBE_RXCTRL_DMBYPS
| IXGBE_RXCTRL_RXEN
);
1297 IXGBE_WRITE_REG(hw
, IXGBE_RXCTRL
, rxdctl
);
1299 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
1300 ixgbe_configure_msix(adapter
);
1302 ixgbe_configure_msi_and_legacy(adapter
);
1304 clear_bit(__IXGBE_DOWN
, &adapter
->state
);
1305 napi_enable(&adapter
->napi
);
1306 ixgbe_irq_enable(adapter
);
1308 /* bring the link up in the watchdog, this could race with our first
1309 * link up interrupt but shouldn't be a problem */
1310 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1314 void ixgbe_reinit_locked(struct ixgbe_adapter
*adapter
)
1316 WARN_ON(in_interrupt());
1317 while (test_and_set_bit(__IXGBE_RESETTING
, &adapter
->state
))
1319 ixgbe_down(adapter
);
1321 clear_bit(__IXGBE_RESETTING
, &adapter
->state
);
1324 int ixgbe_up(struct ixgbe_adapter
*adapter
)
1326 /* hardware has been reset, we need to reload some things */
1327 ixgbe_configure(adapter
);
1329 return ixgbe_up_complete(adapter
);
1332 void ixgbe_reset(struct ixgbe_adapter
*adapter
)
1334 if (ixgbe_init_hw(&adapter
->hw
))
1335 DPRINTK(PROBE
, ERR
, "Hardware Error\n");
1337 /* reprogram the RAR[0] in case user changed it. */
1338 ixgbe_set_rar(&adapter
->hw
, 0, adapter
->hw
.mac
.addr
, 0, IXGBE_RAH_AV
);
1343 static int ixgbe_resume(struct pci_dev
*pdev
)
1345 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1346 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1347 u32 err
, num_rx_queues
= adapter
->num_rx_queues
;
1349 pci_set_power_state(pdev
, PCI_D0
);
1350 pci_restore_state(pdev
);
1351 err
= pci_enable_device(pdev
);
1353 printk(KERN_ERR
"ixgbe: Cannot enable PCI device from " \
1357 pci_set_master(pdev
);
1359 pci_enable_wake(pdev
, PCI_D3hot
, 0);
1360 pci_enable_wake(pdev
, PCI_D3cold
, 0);
1362 if (netif_running(netdev
)) {
1363 err
= ixgbe_request_irq(adapter
, &num_rx_queues
);
1368 ixgbe_reset(adapter
);
1370 if (netif_running(netdev
))
1373 netif_device_attach(netdev
);
1380 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
1381 * @adapter: board private structure
1382 * @rx_ring: ring to free buffers from
1384 static void ixgbe_clean_rx_ring(struct ixgbe_adapter
*adapter
,
1385 struct ixgbe_ring
*rx_ring
)
1387 struct pci_dev
*pdev
= adapter
->pdev
;
1391 /* Free all the Rx ring sk_buffs */
1393 for (i
= 0; i
< rx_ring
->count
; i
++) {
1394 struct ixgbe_rx_buffer
*rx_buffer_info
;
1396 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
1397 if (rx_buffer_info
->dma
) {
1398 pci_unmap_single(pdev
, rx_buffer_info
->dma
,
1399 adapter
->rx_buf_len
,
1400 PCI_DMA_FROMDEVICE
);
1401 rx_buffer_info
->dma
= 0;
1403 if (rx_buffer_info
->skb
) {
1404 dev_kfree_skb(rx_buffer_info
->skb
);
1405 rx_buffer_info
->skb
= NULL
;
1407 if (!rx_buffer_info
->page
)
1409 pci_unmap_page(pdev
, rx_buffer_info
->page_dma
, PAGE_SIZE
,
1410 PCI_DMA_FROMDEVICE
);
1411 rx_buffer_info
->page_dma
= 0;
1413 put_page(rx_buffer_info
->page
);
1414 rx_buffer_info
->page
= NULL
;
1417 size
= sizeof(struct ixgbe_rx_buffer
) * rx_ring
->count
;
1418 memset(rx_ring
->rx_buffer_info
, 0, size
);
1420 /* Zero out the descriptor ring */
1421 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1423 rx_ring
->next_to_clean
= 0;
1424 rx_ring
->next_to_use
= 0;
1426 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
1427 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
1431 * ixgbe_clean_tx_ring - Free Tx Buffers
1432 * @adapter: board private structure
1433 * @tx_ring: ring to be cleaned
1435 static void ixgbe_clean_tx_ring(struct ixgbe_adapter
*adapter
,
1436 struct ixgbe_ring
*tx_ring
)
1438 struct ixgbe_tx_buffer
*tx_buffer_info
;
1442 /* Free all the Tx ring sk_buffs */
1444 for (i
= 0; i
< tx_ring
->count
; i
++) {
1445 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
1446 ixgbe_unmap_and_free_tx_resource(adapter
, tx_buffer_info
);
1449 size
= sizeof(struct ixgbe_tx_buffer
) * tx_ring
->count
;
1450 memset(tx_ring
->tx_buffer_info
, 0, size
);
1452 /* Zero out the descriptor ring */
1453 memset(tx_ring
->desc
, 0, tx_ring
->size
);
1455 tx_ring
->next_to_use
= 0;
1456 tx_ring
->next_to_clean
= 0;
1458 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
1459 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
1463 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
1464 * @adapter: board private structure
1466 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter
*adapter
)
1470 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1471 ixgbe_clean_tx_ring(adapter
, &adapter
->tx_ring
[i
]);
1475 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
1476 * @adapter: board private structure
1478 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter
*adapter
)
1482 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1483 ixgbe_clean_rx_ring(adapter
, &adapter
->rx_ring
[i
]);
1486 void ixgbe_down(struct ixgbe_adapter
*adapter
)
1488 struct net_device
*netdev
= adapter
->netdev
;
1491 /* signal that we are down to the interrupt handler */
1492 set_bit(__IXGBE_DOWN
, &adapter
->state
);
1494 /* disable receives */
1495 rxctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_RXCTRL
);
1496 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_RXCTRL
,
1497 rxctrl
& ~IXGBE_RXCTRL_RXEN
);
1499 netif_tx_disable(netdev
);
1501 /* disable transmits in the hardware */
1503 /* flush both disables */
1504 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1507 napi_disable(&adapter
->napi
);
1509 ixgbe_irq_disable(adapter
);
1511 del_timer_sync(&adapter
->watchdog_timer
);
1513 netif_carrier_off(netdev
);
1514 netif_stop_queue(netdev
);
1516 ixgbe_reset(adapter
);
1517 ixgbe_clean_all_tx_rings(adapter
);
1518 ixgbe_clean_all_rx_rings(adapter
);
1522 static int ixgbe_suspend(struct pci_dev
*pdev
, pm_message_t state
)
1524 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1525 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1530 netif_device_detach(netdev
);
1532 if (netif_running(netdev
)) {
1533 ixgbe_down(adapter
);
1534 ixgbe_free_irq(adapter
);
1538 retval
= pci_save_state(pdev
);
1543 pci_enable_wake(pdev
, PCI_D3hot
, 0);
1544 pci_enable_wake(pdev
, PCI_D3cold
, 0);
1546 ixgbe_release_hw_control(adapter
);
1548 pci_disable_device(pdev
);
1550 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
1555 static void ixgbe_shutdown(struct pci_dev
*pdev
)
1557 ixgbe_suspend(pdev
, PMSG_SUSPEND
);
1561 * ixgbe_clean - NAPI Rx polling callback
1562 * @adapter: board private structure
1564 static int ixgbe_clean(struct napi_struct
*napi
, int budget
)
1566 struct ixgbe_adapter
*adapter
= container_of(napi
,
1567 struct ixgbe_adapter
, napi
);
1568 struct net_device
*netdev
= adapter
->netdev
;
1569 int tx_cleaned
= 0, work_done
= 0;
1571 /* In non-MSIX case, there is no multi-Tx/Rx queue */
1572 tx_cleaned
= ixgbe_clean_tx_irq(adapter
, adapter
->tx_ring
);
1573 ixgbe_clean_rx_irq(adapter
, &adapter
->rx_ring
[0], &work_done
,
1579 /* If budget not fully consumed, exit the polling mode */
1580 if (work_done
< budget
) {
1581 netif_rx_complete(netdev
, napi
);
1582 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1583 ixgbe_irq_enable(adapter
);
1590 * ixgbe_tx_timeout - Respond to a Tx Hang
1591 * @netdev: network interface device structure
1593 static void ixgbe_tx_timeout(struct net_device
*netdev
)
1595 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1597 /* Do the reset outside of interrupt context */
1598 schedule_work(&adapter
->reset_task
);
1601 static void ixgbe_reset_task(struct work_struct
*work
)
1603 struct ixgbe_adapter
*adapter
;
1604 adapter
= container_of(work
, struct ixgbe_adapter
, reset_task
);
1606 adapter
->tx_timeout_count
++;
1608 ixgbe_reinit_locked(adapter
);
1612 * ixgbe_alloc_queues - Allocate memory for all rings
1613 * @adapter: board private structure to initialize
1615 * We allocate one ring per queue at run-time since we don't know the
1616 * number of queues at compile-time. The polling_netdev array is
1617 * intended for Multiqueue, but should work fine with a single queue.
1619 static int __devinit
ixgbe_alloc_queues(struct ixgbe_adapter
*adapter
)
1623 adapter
->tx_ring
= kcalloc(adapter
->num_tx_queues
,
1624 sizeof(struct ixgbe_ring
), GFP_KERNEL
);
1625 if (!adapter
->tx_ring
)
1628 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1629 adapter
->tx_ring
[i
].count
= IXGBE_DEFAULT_TXD
;
1631 adapter
->rx_ring
= kcalloc(adapter
->num_rx_queues
,
1632 sizeof(struct ixgbe_ring
), GFP_KERNEL
);
1633 if (!adapter
->rx_ring
) {
1634 kfree(adapter
->tx_ring
);
1638 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1639 adapter
->rx_ring
[i
].adapter
= adapter
;
1640 adapter
->rx_ring
[i
].itr_register
= IXGBE_EITR(i
);
1641 adapter
->rx_ring
[i
].count
= IXGBE_DEFAULT_RXD
;
1648 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
1649 * @adapter: board private structure to initialize
1651 * ixgbe_sw_init initializes the Adapter private data structure.
1652 * Fields are initialized based on PCI device information and
1653 * OS network device settings (MTU size).
1655 static int __devinit
ixgbe_sw_init(struct ixgbe_adapter
*adapter
)
1657 struct ixgbe_hw
*hw
= &adapter
->hw
;
1658 struct pci_dev
*pdev
= adapter
->pdev
;
1660 /* default flow control settings */
1661 hw
->fc
.original_type
= ixgbe_fc_full
;
1662 hw
->fc
.type
= ixgbe_fc_full
;
1664 hw
->mac
.link_mode_select
= IXGBE_AUTOC_LMS_10G_LINK_NO_AN
;
1665 if (hw
->mac
.ops
.reset(hw
)) {
1666 dev_err(&pdev
->dev
, "HW Init failed\n");
1669 if (hw
->mac
.ops
.setup_link_speed(hw
, IXGBE_LINK_SPEED_10GB_FULL
, true,
1671 dev_err(&pdev
->dev
, "Link Speed setup failed\n");
1675 /* initialize eeprom parameters */
1676 if (ixgbe_init_eeprom(hw
)) {
1677 dev_err(&pdev
->dev
, "EEPROM initialization failed\n");
1681 /* Set the default values */
1682 adapter
->num_rx_queues
= IXGBE_DEFAULT_RXQ
;
1683 adapter
->num_tx_queues
= 1;
1684 adapter
->flags
|= IXGBE_FLAG_RX_CSUM_ENABLED
;
1686 if (ixgbe_alloc_queues(adapter
)) {
1687 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
1691 set_bit(__IXGBE_DOWN
, &adapter
->state
);
1697 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
1698 * @adapter: board private structure
1699 * @txdr: tx descriptor ring (for a specific queue) to setup
1701 * Return 0 on success, negative on failure
1703 int ixgbe_setup_tx_resources(struct ixgbe_adapter
*adapter
,
1704 struct ixgbe_ring
*txdr
)
1706 struct pci_dev
*pdev
= adapter
->pdev
;
1709 size
= sizeof(struct ixgbe_tx_buffer
) * txdr
->count
;
1710 txdr
->tx_buffer_info
= vmalloc(size
);
1711 if (!txdr
->tx_buffer_info
) {
1713 "Unable to allocate memory for the transmit descriptor ring\n");
1716 memset(txdr
->tx_buffer_info
, 0, size
);
1718 /* round up to nearest 4K */
1719 txdr
->size
= txdr
->count
* sizeof(union ixgbe_adv_tx_desc
);
1720 txdr
->size
= ALIGN(txdr
->size
, 4096);
1722 txdr
->desc
= pci_alloc_consistent(pdev
, txdr
->size
, &txdr
->dma
);
1724 vfree(txdr
->tx_buffer_info
);
1726 "Memory allocation failed for the tx desc ring\n");
1730 txdr
->adapter
= adapter
;
1731 txdr
->next_to_use
= 0;
1732 txdr
->next_to_clean
= 0;
1733 txdr
->work_limit
= txdr
->count
;
1739 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
1740 * @adapter: board private structure
1741 * @rxdr: rx descriptor ring (for a specific queue) to setup
1743 * Returns 0 on success, negative on failure
1745 int ixgbe_setup_rx_resources(struct ixgbe_adapter
*adapter
,
1746 struct ixgbe_ring
*rxdr
)
1748 struct pci_dev
*pdev
= adapter
->pdev
;
1751 size
= sizeof(struct ixgbe_rx_buffer
) * rxdr
->count
;
1752 rxdr
->rx_buffer_info
= vmalloc(size
);
1753 if (!rxdr
->rx_buffer_info
) {
1755 "vmalloc allocation failed for the rx desc ring\n");
1758 memset(rxdr
->rx_buffer_info
, 0, size
);
1760 desc_len
= sizeof(union ixgbe_adv_rx_desc
);
1762 /* Round up to nearest 4K */
1763 rxdr
->size
= rxdr
->count
* desc_len
;
1764 rxdr
->size
= ALIGN(rxdr
->size
, 4096);
1766 rxdr
->desc
= pci_alloc_consistent(pdev
, rxdr
->size
, &rxdr
->dma
);
1770 "Memory allocation failed for the rx desc ring\n");
1771 vfree(rxdr
->rx_buffer_info
);
1775 rxdr
->next_to_clean
= 0;
1776 rxdr
->next_to_use
= 0;
1777 rxdr
->adapter
= adapter
;
1783 * ixgbe_free_tx_resources - Free Tx Resources per Queue
1784 * @adapter: board private structure
1785 * @tx_ring: Tx descriptor ring for a specific queue
1787 * Free all transmit software resources
1789 static void ixgbe_free_tx_resources(struct ixgbe_adapter
*adapter
,
1790 struct ixgbe_ring
*tx_ring
)
1792 struct pci_dev
*pdev
= adapter
->pdev
;
1794 ixgbe_clean_tx_ring(adapter
, tx_ring
);
1796 vfree(tx_ring
->tx_buffer_info
);
1797 tx_ring
->tx_buffer_info
= NULL
;
1799 pci_free_consistent(pdev
, tx_ring
->size
, tx_ring
->desc
, tx_ring
->dma
);
1801 tx_ring
->desc
= NULL
;
1805 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
1806 * @adapter: board private structure
1808 * Free all transmit software resources
1810 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter
*adapter
)
1814 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1815 ixgbe_free_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
1819 * ixgbe_free_rx_resources - Free Rx Resources
1820 * @adapter: board private structure
1821 * @rx_ring: ring to clean the resources from
1823 * Free all receive software resources
1825 static void ixgbe_free_rx_resources(struct ixgbe_adapter
*adapter
,
1826 struct ixgbe_ring
*rx_ring
)
1828 struct pci_dev
*pdev
= adapter
->pdev
;
1830 ixgbe_clean_rx_ring(adapter
, rx_ring
);
1832 vfree(rx_ring
->rx_buffer_info
);
1833 rx_ring
->rx_buffer_info
= NULL
;
1835 pci_free_consistent(pdev
, rx_ring
->size
, rx_ring
->desc
, rx_ring
->dma
);
1837 rx_ring
->desc
= NULL
;
1841 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
1842 * @adapter: board private structure
1844 * Free all receive software resources
1846 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter
*adapter
)
1850 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1851 ixgbe_free_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
1855 * ixgbe_setup_all_tx_resources - wrapper to allocate Tx resources
1856 * (Descriptors) for all queues
1857 * @adapter: board private structure
1859 * If this function returns with an error, then it's possible one or
1860 * more of the rings is populated (while the rest are not). It is the
1861 * callers duty to clean those orphaned rings.
1863 * Return 0 on success, negative on failure
1865 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter
*adapter
)
1869 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1870 err
= ixgbe_setup_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
1873 "Allocation for Tx Queue %u failed\n", i
);
1882 * ixgbe_setup_all_rx_resources - wrapper to allocate Rx resources
1883 * (Descriptors) for all queues
1884 * @adapter: board private structure
1886 * If this function returns with an error, then it's possible one or
1887 * more of the rings is populated (while the rest are not). It is the
1888 * callers duty to clean those orphaned rings.
1890 * Return 0 on success, negative on failure
1893 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter
*adapter
)
1897 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1898 err
= ixgbe_setup_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
1901 "Allocation for Rx Queue %u failed\n", i
);
1910 * ixgbe_change_mtu - Change the Maximum Transfer Unit
1911 * @netdev: network interface device structure
1912 * @new_mtu: new value for maximum frame size
1914 * Returns 0 on success, negative on failure
1916 static int ixgbe_change_mtu(struct net_device
*netdev
, int new_mtu
)
1918 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1919 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1921 if ((max_frame
< (ETH_ZLEN
+ ETH_FCS_LEN
)) ||
1922 (max_frame
> IXGBE_MAX_JUMBO_FRAME_SIZE
))
1925 netdev
->mtu
= new_mtu
;
1927 if (netif_running(netdev
))
1928 ixgbe_reinit_locked(adapter
);
1934 * ixgbe_open - Called when a network interface is made active
1935 * @netdev: network interface device structure
1937 * Returns 0 on success, negative value on failure
1939 * The open entry point is called when a network interface is made
1940 * active by the system (IFF_UP). At this point all resources needed
1941 * for transmit and receive operations are allocated, the interrupt
1942 * handler is registered with the OS, the watchdog timer is started,
1943 * and the stack is notified that the interface is ready.
1945 static int ixgbe_open(struct net_device
*netdev
)
1947 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1949 u32 num_rx_queues
= adapter
->num_rx_queues
;
1951 <<<<<<< HEAD
:drivers
/net
/ixgbe
/ixgbe_main
.c
1953 /* disallow open during test */
1954 if (test_bit(__IXGBE_TESTING
, &adapter
->state
))
1957 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/net
/ixgbe
/ixgbe_main
.c
1959 /* allocate transmit descriptors */
1960 err
= ixgbe_setup_all_tx_resources(adapter
);
1964 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)) {
1966 adapter
->num_rx_queues
= num_rx_queues
;
1969 /* allocate receive descriptors */
1970 err
= ixgbe_setup_all_rx_resources(adapter
);
1974 ixgbe_configure(adapter
);
1976 err
= ixgbe_request_irq(adapter
, &num_rx_queues
);
1980 /* ixgbe_request might have reduced num_rx_queues */
1981 if (num_rx_queues
< adapter
->num_rx_queues
) {
1982 /* We didn't get MSI-X, so we need to release everything,
1983 * set our Rx queue count to num_rx_queues, and redo the
1984 * whole init process.
1986 ixgbe_free_irq(adapter
);
1987 if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
1988 pci_disable_msi(adapter
->pdev
);
1989 adapter
->flags
&= ~IXGBE_FLAG_MSI_ENABLED
;
1991 ixgbe_free_all_rx_resources(adapter
);
1992 ixgbe_free_all_tx_resources(adapter
);
1993 adapter
->num_rx_queues
= num_rx_queues
;
1995 /* Reset the hardware, and start over. */
1996 ixgbe_reset(adapter
);
1998 goto try_intr_reinit
;
2001 err
= ixgbe_up_complete(adapter
);
2008 ixgbe_release_hw_control(adapter
);
2009 ixgbe_free_irq(adapter
);
2011 ixgbe_free_all_rx_resources(adapter
);
2013 ixgbe_free_all_tx_resources(adapter
);
2015 ixgbe_reset(adapter
);
2021 * ixgbe_close - Disables a network interface
2022 * @netdev: network interface device structure
2024 * Returns 0, this is not allowed to fail
2026 * The close entry point is called when an interface is de-activated
2027 * by the OS. The hardware is still under the drivers control, but
2028 * needs to be disabled. A global MAC reset is issued to stop the
2029 * hardware, and all transmit and receive resources are freed.
2031 static int ixgbe_close(struct net_device
*netdev
)
2033 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2035 ixgbe_down(adapter
);
2036 ixgbe_free_irq(adapter
);
2038 ixgbe_free_all_tx_resources(adapter
);
2039 ixgbe_free_all_rx_resources(adapter
);
2041 ixgbe_release_hw_control(adapter
);
2047 * ixgbe_update_stats - Update the board statistics counters.
2048 * @adapter: board private structure
2050 void ixgbe_update_stats(struct ixgbe_adapter
*adapter
)
2052 struct ixgbe_hw
*hw
= &adapter
->hw
;
2054 u32 i
, missed_rx
= 0, mpc
, bprc
, lxon
, lxoff
, xon_off_tot
;
2056 adapter
->stats
.crcerrs
+= IXGBE_READ_REG(hw
, IXGBE_CRCERRS
);
2057 for (i
= 0; i
< 8; i
++) {
2058 /* for packet buffers not used, the register should read 0 */
2059 mpc
= IXGBE_READ_REG(hw
, IXGBE_MPC(i
));
2061 adapter
->stats
.mpc
[i
] += mpc
;
2062 total_mpc
+= adapter
->stats
.mpc
[i
];
2063 adapter
->stats
.rnbc
[i
] += IXGBE_READ_REG(hw
, IXGBE_RNBC(i
));
2065 adapter
->stats
.gprc
+= IXGBE_READ_REG(hw
, IXGBE_GPRC
);
2066 /* work around hardware counting issue */
2067 adapter
->stats
.gprc
-= missed_rx
;
2069 /* 82598 hardware only has a 32 bit counter in the high register */
2070 adapter
->stats
.gorc
+= IXGBE_READ_REG(hw
, IXGBE_GORCH
);
2071 adapter
->stats
.gotc
+= IXGBE_READ_REG(hw
, IXGBE_GOTCH
);
2072 adapter
->stats
.tor
+= IXGBE_READ_REG(hw
, IXGBE_TORH
);
2073 bprc
= IXGBE_READ_REG(hw
, IXGBE_BPRC
);
2074 adapter
->stats
.bprc
+= bprc
;
2075 adapter
->stats
.mprc
+= IXGBE_READ_REG(hw
, IXGBE_MPRC
);
2076 adapter
->stats
.mprc
-= bprc
;
2077 adapter
->stats
.roc
+= IXGBE_READ_REG(hw
, IXGBE_ROC
);
2078 adapter
->stats
.prc64
+= IXGBE_READ_REG(hw
, IXGBE_PRC64
);
2079 adapter
->stats
.prc127
+= IXGBE_READ_REG(hw
, IXGBE_PRC127
);
2080 adapter
->stats
.prc255
+= IXGBE_READ_REG(hw
, IXGBE_PRC255
);
2081 adapter
->stats
.prc511
+= IXGBE_READ_REG(hw
, IXGBE_PRC511
);
2082 adapter
->stats
.prc1023
+= IXGBE_READ_REG(hw
, IXGBE_PRC1023
);
2083 adapter
->stats
.prc1522
+= IXGBE_READ_REG(hw
, IXGBE_PRC1522
);
2084 adapter
->stats
.rlec
+= IXGBE_READ_REG(hw
, IXGBE_RLEC
);
2085 adapter
->stats
.lxonrxc
+= IXGBE_READ_REG(hw
, IXGBE_LXONRXC
);
2086 adapter
->stats
.lxoffrxc
+= IXGBE_READ_REG(hw
, IXGBE_LXOFFRXC
);
2087 lxon
= IXGBE_READ_REG(hw
, IXGBE_LXONTXC
);
2088 adapter
->stats
.lxontxc
+= lxon
;
2089 lxoff
= IXGBE_READ_REG(hw
, IXGBE_LXOFFTXC
);
2090 adapter
->stats
.lxofftxc
+= lxoff
;
2091 adapter
->stats
.ruc
+= IXGBE_READ_REG(hw
, IXGBE_RUC
);
2092 adapter
->stats
.gptc
+= IXGBE_READ_REG(hw
, IXGBE_GPTC
);
2093 adapter
->stats
.mptc
+= IXGBE_READ_REG(hw
, IXGBE_MPTC
);
2095 * 82598 errata - tx of flow control packets is included in tx counters
2097 xon_off_tot
= lxon
+ lxoff
;
2098 adapter
->stats
.gptc
-= xon_off_tot
;
2099 adapter
->stats
.mptc
-= xon_off_tot
;
2100 adapter
->stats
.gotc
-= (xon_off_tot
* (ETH_ZLEN
+ ETH_FCS_LEN
));
2101 adapter
->stats
.ruc
+= IXGBE_READ_REG(hw
, IXGBE_RUC
);
2102 adapter
->stats
.rfc
+= IXGBE_READ_REG(hw
, IXGBE_RFC
);
2103 adapter
->stats
.rjc
+= IXGBE_READ_REG(hw
, IXGBE_RJC
);
2104 adapter
->stats
.tpr
+= IXGBE_READ_REG(hw
, IXGBE_TPR
);
2105 adapter
->stats
.ptc64
+= IXGBE_READ_REG(hw
, IXGBE_PTC64
);
2106 adapter
->stats
.ptc64
-= xon_off_tot
;
2107 adapter
->stats
.ptc127
+= IXGBE_READ_REG(hw
, IXGBE_PTC127
);
2108 adapter
->stats
.ptc255
+= IXGBE_READ_REG(hw
, IXGBE_PTC255
);
2109 adapter
->stats
.ptc511
+= IXGBE_READ_REG(hw
, IXGBE_PTC511
);
2110 adapter
->stats
.ptc1023
+= IXGBE_READ_REG(hw
, IXGBE_PTC1023
);
2111 adapter
->stats
.ptc1522
+= IXGBE_READ_REG(hw
, IXGBE_PTC1522
);
2112 adapter
->stats
.bptc
+= IXGBE_READ_REG(hw
, IXGBE_BPTC
);
2114 /* Fill out the OS statistics structure */
2115 adapter
->net_stats
.multicast
= adapter
->stats
.mprc
;
2118 adapter
->net_stats
.rx_errors
= adapter
->stats
.crcerrs
+
2119 adapter
->stats
.rlec
;
2120 adapter
->net_stats
.rx_dropped
= 0;
2121 adapter
->net_stats
.rx_length_errors
= adapter
->stats
.rlec
;
2122 adapter
->net_stats
.rx_crc_errors
= adapter
->stats
.crcerrs
;
2123 adapter
->net_stats
.rx_missed_errors
= total_mpc
;
2127 * ixgbe_watchdog - Timer Call-back
2128 * @data: pointer to adapter cast into an unsigned long
2130 static void ixgbe_watchdog(unsigned long data
)
2132 struct ixgbe_adapter
*adapter
= (struct ixgbe_adapter
*)data
;
2133 struct net_device
*netdev
= adapter
->netdev
;
2137 adapter
->hw
.mac
.ops
.check_link(&adapter
->hw
, &(link_speed
), &link_up
);
2140 if (!netif_carrier_ok(netdev
)) {
2141 u32 frctl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_FCTRL
);
2142 u32 rmcs
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_RMCS
);
2143 #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
2144 #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
2145 DPRINTK(LINK
, INFO
, "NIC Link is Up %s, "
2146 "Flow Control: %s\n",
2147 (link_speed
== IXGBE_LINK_SPEED_10GB_FULL
?
2149 (link_speed
== IXGBE_LINK_SPEED_1GB_FULL
?
2150 <<<<<<< HEAD
:drivers
/net
/ixgbe
/ixgbe_main
.c
2151 "1 Gpbs" : "unknown speed")),
2153 "1 Gbps" : "unknown speed")),
2154 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/net
/ixgbe
/ixgbe_main
.c
2155 ((FLOW_RX
&& FLOW_TX
) ? "RX/TX" :
2157 (FLOW_TX
? "TX" : "None"))));
2159 netif_carrier_on(netdev
);
2160 netif_wake_queue(netdev
);
2162 /* Force detection of hung controller */
2163 adapter
->detect_tx_hung
= true;
2166 if (netif_carrier_ok(netdev
)) {
2167 DPRINTK(LINK
, INFO
, "NIC Link is Down\n");
2168 netif_carrier_off(netdev
);
2169 netif_stop_queue(netdev
);
2173 ixgbe_update_stats(adapter
);
2175 /* Reset the timer */
2176 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
2177 mod_timer(&adapter
->watchdog_timer
,
2178 round_jiffies(jiffies
+ 2 * HZ
));
2181 static int ixgbe_tso(struct ixgbe_adapter
*adapter
,
2182 struct ixgbe_ring
*tx_ring
, struct sk_buff
*skb
,
2183 u32 tx_flags
, u8
*hdr_len
)
2185 struct ixgbe_adv_tx_context_desc
*context_desc
;
2188 struct ixgbe_tx_buffer
*tx_buffer_info
;
2189 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
= 0;
2190 u32 mss_l4len_idx
= 0, l4len
;
2193 if (skb_is_gso(skb
)) {
2194 if (skb_header_cloned(skb
)) {
2195 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2199 l4len
= tcp_hdrlen(skb
);
2202 if (skb
->protocol
== htons(ETH_P_IP
)) {
2203 struct iphdr
*iph
= ip_hdr(skb
);
2206 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2210 adapter
->hw_tso_ctxt
++;
2211 } else if (skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV6
) {
2212 ipv6_hdr(skb
)->payload_len
= 0;
2213 tcp_hdr(skb
)->check
=
2214 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2215 &ipv6_hdr(skb
)->daddr
,
2217 adapter
->hw_tso6_ctxt
++;
2220 i
= tx_ring
->next_to_use
;
2222 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2223 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
2225 /* VLAN MACLEN IPLEN */
2226 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2228 (tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
);
2229 vlan_macip_lens
|= ((skb_network_offset(skb
)) <<
2230 IXGBE_ADVTXD_MACLEN_SHIFT
);
2231 *hdr_len
+= skb_network_offset(skb
);
2233 (skb_transport_header(skb
) - skb_network_header(skb
));
2235 (skb_transport_header(skb
) - skb_network_header(skb
));
2236 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
2237 context_desc
->seqnum_seed
= 0;
2239 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2240 type_tucmd_mlhl
|= (IXGBE_TXD_CMD_DEXT
|
2241 IXGBE_ADVTXD_DTYP_CTXT
);
2243 if (skb
->protocol
== htons(ETH_P_IP
))
2244 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2245 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2246 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
2250 (skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
);
2251 mss_l4len_idx
|= (l4len
<< IXGBE_ADVTXD_L4LEN_SHIFT
);
2252 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
2254 tx_buffer_info
->time_stamp
= jiffies
;
2255 tx_buffer_info
->next_to_watch
= i
;
2258 if (i
== tx_ring
->count
)
2260 tx_ring
->next_to_use
= i
;
2267 static bool ixgbe_tx_csum(struct ixgbe_adapter
*adapter
,
2268 struct ixgbe_ring
*tx_ring
,
2269 struct sk_buff
*skb
, u32 tx_flags
)
2271 struct ixgbe_adv_tx_context_desc
*context_desc
;
2273 struct ixgbe_tx_buffer
*tx_buffer_info
;
2274 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
= 0;
2276 if (skb
->ip_summed
== CHECKSUM_PARTIAL
||
2277 (tx_flags
& IXGBE_TX_FLAGS_VLAN
)) {
2278 i
= tx_ring
->next_to_use
;
2279 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2280 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
2282 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2284 (tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
);
2285 vlan_macip_lens
|= (skb_network_offset(skb
) <<
2286 IXGBE_ADVTXD_MACLEN_SHIFT
);
2287 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2288 vlan_macip_lens
|= (skb_transport_header(skb
) -
2289 skb_network_header(skb
));
2291 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
2292 context_desc
->seqnum_seed
= 0;
2294 type_tucmd_mlhl
|= (IXGBE_TXD_CMD_DEXT
|
2295 IXGBE_ADVTXD_DTYP_CTXT
);
2297 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2298 <<<<<<< HEAD
:drivers
/net
/ixgbe
/ixgbe_main
.c
2299 if (skb
->protocol
== htons(ETH_P_IP
))
2301 switch (skb
->protocol
) {
2302 case __constant_htons(ETH_P_IP
):
2303 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/net
/ixgbe
/ixgbe_main
.c
2304 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2305 <<<<<<< HEAD
:drivers
/net
/ixgbe
/ixgbe_main
.c
2307 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
2309 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2312 case __constant_htons(ETH_P_IPV6
):
2313 /* XXX what about other V6 headers?? */
2314 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
2316 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2318 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/net
/ixgbe
/ixgbe_main
.c
2320 <<<<<<< HEAD
:drivers
/net
/ixgbe
/ixgbe_main
.c
2321 if (skb
->sk
->sk_protocol
== IPPROTO_TCP
)
2322 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2325 if (unlikely(net_ratelimit())) {
2326 DPRINTK(PROBE
, WARNING
,
2327 "partial checksum but proto=%x!\n",
2332 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/net
/ixgbe
/ixgbe_main
.c
2335 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
2336 context_desc
->mss_l4len_idx
= 0;
2338 tx_buffer_info
->time_stamp
= jiffies
;
2339 tx_buffer_info
->next_to_watch
= i
;
2340 adapter
->hw_csum_tx_good
++;
2342 if (i
== tx_ring
->count
)
2344 tx_ring
->next_to_use
= i
;
2351 static int ixgbe_tx_map(struct ixgbe_adapter
*adapter
,
2352 struct ixgbe_ring
*tx_ring
,
2353 struct sk_buff
*skb
, unsigned int first
)
2355 struct ixgbe_tx_buffer
*tx_buffer_info
;
2356 unsigned int len
= skb
->len
;
2357 unsigned int offset
= 0, size
, count
= 0, i
;
2358 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
2361 len
-= skb
->data_len
;
2363 i
= tx_ring
->next_to_use
;
2366 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2367 size
= min(len
, (uint
)IXGBE_MAX_DATA_PER_TXD
);
2369 tx_buffer_info
->length
= size
;
2370 tx_buffer_info
->dma
= pci_map_single(adapter
->pdev
,
2372 size
, PCI_DMA_TODEVICE
);
2373 tx_buffer_info
->time_stamp
= jiffies
;
2374 tx_buffer_info
->next_to_watch
= i
;
2380 if (i
== tx_ring
->count
)
2384 for (f
= 0; f
< nr_frags
; f
++) {
2385 struct skb_frag_struct
*frag
;
2387 frag
= &skb_shinfo(skb
)->frags
[f
];
2389 offset
= frag
->page_offset
;
2392 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2393 size
= min(len
, (uint
)IXGBE_MAX_DATA_PER_TXD
);
2395 tx_buffer_info
->length
= size
;
2396 tx_buffer_info
->dma
= pci_map_page(adapter
->pdev
,
2399 size
, PCI_DMA_TODEVICE
);
2400 tx_buffer_info
->time_stamp
= jiffies
;
2401 tx_buffer_info
->next_to_watch
= i
;
2407 if (i
== tx_ring
->count
)
2412 i
= tx_ring
->count
- 1;
2415 tx_ring
->tx_buffer_info
[i
].skb
= skb
;
2416 tx_ring
->tx_buffer_info
[first
].next_to_watch
= i
;
2421 static void ixgbe_tx_queue(struct ixgbe_adapter
*adapter
,
2422 struct ixgbe_ring
*tx_ring
,
2423 int tx_flags
, int count
, u32 paylen
, u8 hdr_len
)
2425 union ixgbe_adv_tx_desc
*tx_desc
= NULL
;
2426 struct ixgbe_tx_buffer
*tx_buffer_info
;
2427 u32 olinfo_status
= 0, cmd_type_len
= 0;
2429 u32 txd_cmd
= IXGBE_TXD_CMD_EOP
| IXGBE_TXD_CMD_RS
| IXGBE_TXD_CMD_IFCS
;
2431 cmd_type_len
|= IXGBE_ADVTXD_DTYP_DATA
;
2433 cmd_type_len
|= IXGBE_ADVTXD_DCMD_IFCS
| IXGBE_ADVTXD_DCMD_DEXT
;
2435 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2436 cmd_type_len
|= IXGBE_ADVTXD_DCMD_VLE
;
2438 if (tx_flags
& IXGBE_TX_FLAGS_TSO
) {
2439 cmd_type_len
|= IXGBE_ADVTXD_DCMD_TSE
;
2441 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
2442 IXGBE_ADVTXD_POPTS_SHIFT
;
2444 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
2445 olinfo_status
|= IXGBE_TXD_POPTS_IXSM
<<
2446 IXGBE_ADVTXD_POPTS_SHIFT
;
2448 } else if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
2449 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
2450 IXGBE_ADVTXD_POPTS_SHIFT
;
2452 olinfo_status
|= ((paylen
- hdr_len
) << IXGBE_ADVTXD_PAYLEN_SHIFT
);
2454 i
= tx_ring
->next_to_use
;
2456 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2457 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
2458 tx_desc
->read
.buffer_addr
= cpu_to_le64(tx_buffer_info
->dma
);
2459 tx_desc
->read
.cmd_type_len
=
2460 cpu_to_le32(cmd_type_len
| tx_buffer_info
->length
);
2461 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
2464 if (i
== tx_ring
->count
)
2468 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(txd_cmd
);
2471 * Force memory writes to complete before letting h/w
2472 * know there are new descriptors to fetch. (Only
2473 * applicable for weak-ordered memory model archs,
2478 tx_ring
->next_to_use
= i
;
2479 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
2482 static int __ixgbe_maybe_stop_tx(struct net_device
*netdev
,
2483 struct ixgbe_ring
*tx_ring
, int size
)
2485 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2487 netif_stop_queue(netdev
);
2488 /* Herbert's original patch had:
2489 * smp_mb__after_netif_stop_queue();
2490 * but since that doesn't exist yet, just open code it. */
2493 /* We need to check again in a case another CPU has just
2494 * made room available. */
2495 if (likely(IXGBE_DESC_UNUSED(tx_ring
) < size
))
2498 /* A reprieve! - use start_queue because it doesn't call schedule */
2499 netif_wake_queue(netdev
);
2500 ++adapter
->restart_queue
;
2504 static int ixgbe_maybe_stop_tx(struct net_device
*netdev
,
2505 struct ixgbe_ring
*tx_ring
, int size
)
2507 if (likely(IXGBE_DESC_UNUSED(tx_ring
) >= size
))
2509 return __ixgbe_maybe_stop_tx(netdev
, tx_ring
, size
);
2513 static int ixgbe_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
2515 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2516 struct ixgbe_ring
*tx_ring
;
2517 unsigned int len
= skb
->len
;
2519 unsigned int tx_flags
= 0;
2522 unsigned int mss
= 0;
2525 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
2526 len
-= skb
->data_len
;
2528 tx_ring
= adapter
->tx_ring
;
2530 if (skb
->len
<= 0) {
2532 return NETDEV_TX_OK
;
2534 mss
= skb_shinfo(skb
)->gso_size
;
2538 else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2541 count
+= TXD_USE_COUNT(len
);
2542 for (f
= 0; f
< nr_frags
; f
++)
2543 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
2545 if (ixgbe_maybe_stop_tx(netdev
, tx_ring
, count
)) {
2547 return NETDEV_TX_BUSY
;
2549 if (adapter
->vlgrp
&& vlan_tx_tag_present(skb
)) {
2550 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
2551 tx_flags
|= (vlan_tx_tag_get(skb
) << IXGBE_TX_FLAGS_VLAN_SHIFT
);
2554 if (skb
->protocol
== htons(ETH_P_IP
))
2555 tx_flags
|= IXGBE_TX_FLAGS_IPV4
;
2556 first
= tx_ring
->next_to_use
;
2557 tso
= ixgbe_tso(adapter
, tx_ring
, skb
, tx_flags
, &hdr_len
);
2559 dev_kfree_skb_any(skb
);
2560 return NETDEV_TX_OK
;
2564 tx_flags
|= IXGBE_TX_FLAGS_TSO
;
2565 else if (ixgbe_tx_csum(adapter
, tx_ring
, skb
, tx_flags
) &&
2566 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
2567 tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
2569 ixgbe_tx_queue(adapter
, tx_ring
, tx_flags
,
2570 ixgbe_tx_map(adapter
, tx_ring
, skb
, first
),
2573 netdev
->trans_start
= jiffies
;
2575 ixgbe_maybe_stop_tx(netdev
, tx_ring
, DESC_NEEDED
);
2577 return NETDEV_TX_OK
;
2581 * ixgbe_get_stats - Get System Network Statistics
2582 * @netdev: network interface device structure
2584 * Returns the address of the device statistics structure.
2585 * The statistics are actually updated from the timer callback.
2587 static struct net_device_stats
*ixgbe_get_stats(struct net_device
*netdev
)
2589 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2591 /* only return the current stats */
2592 return &adapter
->net_stats
;
2596 * ixgbe_set_mac - Change the Ethernet Address of the NIC
2597 * @netdev: network interface device structure
2598 * @p: pointer to an address structure
2600 * Returns 0 on success, negative on failure
2602 static int ixgbe_set_mac(struct net_device
*netdev
, void *p
)
2604 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2605 struct sockaddr
*addr
= p
;
2607 if (!is_valid_ether_addr(addr
->sa_data
))
2608 return -EADDRNOTAVAIL
;
2610 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2611 memcpy(adapter
->hw
.mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
2613 ixgbe_set_rar(&adapter
->hw
, 0, adapter
->hw
.mac
.addr
, 0, IXGBE_RAH_AV
);
2618 #ifdef CONFIG_NET_POLL_CONTROLLER
2620 * Polling 'interrupt' - used by things like netconsole to send skbs
2621 * without having to re-enable interrupts. It's not called while
2622 * the interrupt routine is executing.
2624 static void ixgbe_netpoll(struct net_device
*netdev
)
2626 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2628 disable_irq(adapter
->pdev
->irq
);
2629 adapter
->flags
|= IXGBE_FLAG_IN_NETPOLL
;
2630 ixgbe_intr(adapter
->pdev
->irq
, netdev
);
2631 adapter
->flags
&= ~IXGBE_FLAG_IN_NETPOLL
;
2632 enable_irq(adapter
->pdev
->irq
);
2637 * ixgbe_probe - Device Initialization Routine
2638 * @pdev: PCI device information struct
2639 * @ent: entry in ixgbe_pci_tbl
2641 * Returns 0 on success, negative on failure
2643 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
2644 * The OS initialization, configuring of the adapter private structure,
2645 * and a hardware reset occur.
2647 static int __devinit
ixgbe_probe(struct pci_dev
*pdev
,
2648 const struct pci_device_id
*ent
)
2650 struct net_device
*netdev
;
2651 struct ixgbe_adapter
*adapter
= NULL
;
2652 struct ixgbe_hw
*hw
;
2653 const struct ixgbe_info
*ii
= ixgbe_info_tbl
[ent
->driver_data
];
2654 unsigned long mmio_start
, mmio_len
;
2655 static int cards_found
;
2656 int i
, err
, pci_using_dac
;
2657 u16 link_status
, link_speed
, link_width
;
2660 err
= pci_enable_device(pdev
);
2664 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
) &&
2665 !pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
)) {
2668 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
2670 err
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
2672 dev_err(&pdev
->dev
, "No usable DMA "
2673 "configuration, aborting\n");
2680 err
= pci_request_regions(pdev
, ixgbe_driver_name
);
2682 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
2686 pci_set_master(pdev
);
2688 netdev
= alloc_etherdev(sizeof(struct ixgbe_adapter
));
2691 goto err_alloc_etherdev
;
2694 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2696 pci_set_drvdata(pdev
, netdev
);
2697 adapter
= netdev_priv(netdev
);
2699 adapter
->netdev
= netdev
;
2700 adapter
->pdev
= pdev
;
2703 adapter
->msg_enable
= (1 << DEFAULT_DEBUG_LEVEL_SHIFT
) - 1;
2705 mmio_start
= pci_resource_start(pdev
, 0);
2706 mmio_len
= pci_resource_len(pdev
, 0);
2708 hw
->hw_addr
= ioremap(mmio_start
, mmio_len
);
2714 for (i
= 1; i
<= 5; i
++) {
2715 if (pci_resource_len(pdev
, i
) == 0)
2719 netdev
->open
= &ixgbe_open
;
2720 netdev
->stop
= &ixgbe_close
;
2721 netdev
->hard_start_xmit
= &ixgbe_xmit_frame
;
2722 netdev
->get_stats
= &ixgbe_get_stats
;
2723 netdev
->set_multicast_list
= &ixgbe_set_multi
;
2724 netdev
->set_mac_address
= &ixgbe_set_mac
;
2725 netdev
->change_mtu
= &ixgbe_change_mtu
;
2726 ixgbe_set_ethtool_ops(netdev
);
2727 netdev
->tx_timeout
= &ixgbe_tx_timeout
;
2728 netdev
->watchdog_timeo
= 5 * HZ
;
2729 netif_napi_add(netdev
, &adapter
->napi
, ixgbe_clean
, 64);
2730 netdev
->vlan_rx_register
= ixgbe_vlan_rx_register
;
2731 netdev
->vlan_rx_add_vid
= ixgbe_vlan_rx_add_vid
;
2732 netdev
->vlan_rx_kill_vid
= ixgbe_vlan_rx_kill_vid
;
2733 #ifdef CONFIG_NET_POLL_CONTROLLER
2734 netdev
->poll_controller
= ixgbe_netpoll
;
2736 strcpy(netdev
->name
, pci_name(pdev
));
2738 netdev
->mem_start
= mmio_start
;
2739 netdev
->mem_end
= mmio_start
+ mmio_len
;
2741 adapter
->bd_number
= cards_found
;
2743 /* PCI config space info */
2744 hw
->vendor_id
= pdev
->vendor
;
2745 hw
->device_id
= pdev
->device
;
2746 hw
->revision_id
= pdev
->revision
;
2747 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2748 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2751 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
2753 err
= ii
->get_invariants(hw
);
2757 /* setup the private structure */
2758 err
= ixgbe_sw_init(adapter
);
2762 netdev
->features
= NETIF_F_SG
|
2764 NETIF_F_HW_VLAN_TX
|
2765 NETIF_F_HW_VLAN_RX
|
2766 NETIF_F_HW_VLAN_FILTER
;
2768 netdev
->features
|= NETIF_F_TSO
;
2770 netdev
->features
|= NETIF_F_TSO6
;
2772 netdev
->features
|= NETIF_F_HIGHDMA
;
2775 /* make sure the EEPROM is good */
2776 if (ixgbe_validate_eeprom_checksum(hw
, NULL
) < 0) {
2777 dev_err(&pdev
->dev
, "The EEPROM Checksum Is Not Valid\n");
2782 memcpy(netdev
->dev_addr
, hw
->mac
.perm_addr
, netdev
->addr_len
);
2783 memcpy(netdev
->perm_addr
, hw
->mac
.perm_addr
, netdev
->addr_len
);
2785 if (ixgbe_validate_mac_addr(netdev
->dev_addr
)) {
2790 init_timer(&adapter
->watchdog_timer
);
2791 adapter
->watchdog_timer
.function
= &ixgbe_watchdog
;
2792 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
2794 INIT_WORK(&adapter
->reset_task
, ixgbe_reset_task
);
2796 /* initialize default flow control settings */
2797 hw
->fc
.original_type
= ixgbe_fc_full
;
2798 hw
->fc
.type
= ixgbe_fc_full
;
2799 hw
->fc
.high_water
= IXGBE_DEFAULT_FCRTH
;
2800 hw
->fc
.low_water
= IXGBE_DEFAULT_FCRTL
;
2801 hw
->fc
.pause_time
= IXGBE_DEFAULT_FCPAUSE
;
2803 /* Interrupt Throttle Rate */
2804 adapter
->rx_eitr
= (1000000 / IXGBE_DEFAULT_ITR_RX_USECS
);
2805 adapter
->tx_eitr
= (1000000 / IXGBE_DEFAULT_ITR_TX_USECS
);
2807 /* print bus type/speed/width info */
2808 pci_read_config_word(pdev
, IXGBE_PCI_LINK_STATUS
, &link_status
);
2809 link_speed
= link_status
& IXGBE_PCI_LINK_SPEED
;
2810 link_width
= link_status
& IXGBE_PCI_LINK_WIDTH
;
2811 dev_info(&pdev
->dev
, "(PCI Express:%s:%s) "
2812 "%02x:%02x:%02x:%02x:%02x:%02x\n",
2813 ((link_speed
== IXGBE_PCI_LINK_SPEED_5000
) ? "5.0Gb/s" :
2814 (link_speed
== IXGBE_PCI_LINK_SPEED_2500
) ? "2.5Gb/s" :
2816 ((link_width
== IXGBE_PCI_LINK_WIDTH_8
) ? "Width x8" :
2817 (link_width
== IXGBE_PCI_LINK_WIDTH_4
) ? "Width x4" :
2818 (link_width
== IXGBE_PCI_LINK_WIDTH_2
) ? "Width x2" :
2819 (link_width
== IXGBE_PCI_LINK_WIDTH_1
) ? "Width x1" :
2821 netdev
->dev_addr
[0], netdev
->dev_addr
[1], netdev
->dev_addr
[2],
2822 netdev
->dev_addr
[3], netdev
->dev_addr
[4], netdev
->dev_addr
[5]);
2823 ixgbe_read_part_num(hw
, &part_num
);
2824 dev_info(&pdev
->dev
, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
2825 hw
->mac
.type
, hw
->phy
.type
,
2826 (part_num
>> 8), (part_num
& 0xff));
2828 <<<<<<< HEAD
:drivers
/net
/ixgbe
/ixgbe_main
.c
2830 if (link_width
<= IXGBE_PCI_LINK_WIDTH_4
) {
2831 dev_warn(&pdev
->dev
, "PCI-Express bandwidth available for "
2832 "this card is not sufficient for optimal "
2834 dev_warn(&pdev
->dev
, "For optimal performance a x8 "
2835 "PCI-Express slot is required.\n");
2838 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/net
/ixgbe
/ixgbe_main
.c
2839 /* reset the hardware with the new settings */
2842 netif_carrier_off(netdev
);
2843 netif_stop_queue(netdev
);
2845 strcpy(netdev
->name
, "eth%d");
2846 err
= register_netdev(netdev
);
2851 dev_info(&pdev
->dev
, "Intel(R) 10 Gigabit Network Connection\n");
2856 ixgbe_release_hw_control(adapter
);
2860 iounmap(hw
->hw_addr
);
2862 free_netdev(netdev
);
2864 pci_release_regions(pdev
);
2867 pci_disable_device(pdev
);
2872 * ixgbe_remove - Device Removal Routine
2873 * @pdev: PCI device information struct
2875 * ixgbe_remove is called by the PCI subsystem to alert the driver
2876 * that it should release a PCI device. The could be caused by a
2877 * Hot-Plug event, or because the driver is going to be removed from
2880 static void __devexit
ixgbe_remove(struct pci_dev
*pdev
)
2882 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2883 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2885 set_bit(__IXGBE_DOWN
, &adapter
->state
);
2886 del_timer_sync(&adapter
->watchdog_timer
);
2888 flush_scheduled_work();
2890 unregister_netdev(netdev
);
2892 ixgbe_release_hw_control(adapter
);
2894 kfree(adapter
->tx_ring
);
2895 kfree(adapter
->rx_ring
);
2897 iounmap(adapter
->hw
.hw_addr
);
2898 pci_release_regions(pdev
);
2900 free_netdev(netdev
);
2902 pci_disable_device(pdev
);
2906 * ixgbe_io_error_detected - called when PCI error is detected
2907 * @pdev: Pointer to PCI device
2908 * @state: The current pci connection state
2910 * This function is called after a PCI bus error affecting
2911 * this device has been detected.
2913 static pci_ers_result_t
ixgbe_io_error_detected(struct pci_dev
*pdev
,
2914 pci_channel_state_t state
)
2916 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2917 struct ixgbe_adapter
*adapter
= netdev
->priv
;
2919 netif_device_detach(netdev
);
2921 if (netif_running(netdev
))
2922 ixgbe_down(adapter
);
2923 pci_disable_device(pdev
);
2925 /* Request a slot slot reset. */
2926 return PCI_ERS_RESULT_NEED_RESET
;
2930 * ixgbe_io_slot_reset - called after the pci bus has been reset.
2931 * @pdev: Pointer to PCI device
2933 * Restart the card from scratch, as if from a cold-boot.
2935 static pci_ers_result_t
ixgbe_io_slot_reset(struct pci_dev
*pdev
)
2937 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2938 struct ixgbe_adapter
*adapter
= netdev
->priv
;
2940 if (pci_enable_device(pdev
)) {
2942 "Cannot re-enable PCI device after reset.\n");
2943 return PCI_ERS_RESULT_DISCONNECT
;
2945 pci_set_master(pdev
);
2947 pci_enable_wake(pdev
, PCI_D3hot
, 0);
2948 pci_enable_wake(pdev
, PCI_D3cold
, 0);
2950 ixgbe_reset(adapter
);
2952 return PCI_ERS_RESULT_RECOVERED
;
2956 * ixgbe_io_resume - called when traffic can start flowing again.
2957 * @pdev: Pointer to PCI device
2959 * This callback is called when the error recovery driver tells us that
2960 * its OK to resume normal operation.
2962 static void ixgbe_io_resume(struct pci_dev
*pdev
)
2964 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2965 struct ixgbe_adapter
*adapter
= netdev
->priv
;
2967 if (netif_running(netdev
)) {
2968 if (ixgbe_up(adapter
)) {
2969 DPRINTK(PROBE
, INFO
, "ixgbe_up failed after reset\n");
2974 netif_device_attach(netdev
);
2978 static struct pci_error_handlers ixgbe_err_handler
= {
2979 .error_detected
= ixgbe_io_error_detected
,
2980 .slot_reset
= ixgbe_io_slot_reset
,
2981 .resume
= ixgbe_io_resume
,
2984 static struct pci_driver ixgbe_driver
= {
2985 .name
= ixgbe_driver_name
,
2986 .id_table
= ixgbe_pci_tbl
,
2987 .probe
= ixgbe_probe
,
2988 .remove
= __devexit_p(ixgbe_remove
),
2990 .suspend
= ixgbe_suspend
,
2991 .resume
= ixgbe_resume
,
2993 .shutdown
= ixgbe_shutdown
,
2994 .err_handler
= &ixgbe_err_handler
2998 * ixgbe_init_module - Driver Registration Routine
3000 * ixgbe_init_module is the first routine called when the driver is
3001 * loaded. All it does is register with the PCI subsystem.
3003 static int __init
ixgbe_init_module(void)
3006 printk(KERN_INFO
"%s: %s - version %s\n", ixgbe_driver_name
,
3007 ixgbe_driver_string
, ixgbe_driver_version
);
3009 printk(KERN_INFO
"%s: %s\n", ixgbe_driver_name
, ixgbe_copyright
);
3011 ret
= pci_register_driver(&ixgbe_driver
);
3014 module_init(ixgbe_init_module
);
3017 * ixgbe_exit_module - Driver Exit Cleanup Routine
3019 * ixgbe_exit_module is called just before the driver is removed
3022 static void __exit
ixgbe_exit_module(void)
3024 pci_unregister_driver(&ixgbe_driver
);
3026 module_exit(ixgbe_exit_module
);