2 * Copyright (C) 2005 - 2010 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
20 #include <asm/div64.h>
22 MODULE_VERSION(DRV_VER
);
23 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
24 MODULE_DESCRIPTION(DRV_DESC
" " DRV_VER
);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
28 static unsigned int rx_frag_size
= 2048;
29 static unsigned int num_vfs
;
30 module_param(rx_frag_size
, uint
, S_IRUGO
);
31 module_param(num_vfs
, uint
, S_IRUGO
);
32 MODULE_PARM_DESC(rx_frag_size
, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs
, "Number of PCI VFs to initialize");
35 static bool multi_rxq
= true;
36 module_param(multi_rxq
, bool, S_IRUGO
| S_IWUSR
);
37 MODULE_PARM_DESC(multi_rxq
, "Multi Rx Queue support. Enabled by default");
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids
) = {
40 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
41 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID2
) },
42 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
43 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID3
)},
47 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc
[] = {
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc
[] = {
119 static inline bool be_multi_rxq(struct be_adapter
*adapter
)
121 return (adapter
->num_rx_qs
> 1);
124 static void be_queue_free(struct be_adapter
*adapter
, struct be_queue_info
*q
)
126 struct be_dma_mem
*mem
= &q
->dma_mem
;
128 pci_free_consistent(adapter
->pdev
, mem
->size
,
132 static int be_queue_alloc(struct be_adapter
*adapter
, struct be_queue_info
*q
,
133 u16 len
, u16 entry_size
)
135 struct be_dma_mem
*mem
= &q
->dma_mem
;
137 memset(q
, 0, sizeof(*q
));
139 q
->entry_size
= entry_size
;
140 mem
->size
= len
* entry_size
;
141 mem
->va
= pci_alloc_consistent(adapter
->pdev
, mem
->size
, &mem
->dma
);
144 memset(mem
->va
, 0, mem
->size
);
148 static void be_intr_set(struct be_adapter
*adapter
, bool enable
)
150 u8 __iomem
*addr
= adapter
->pcicfg
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
;
151 u32 reg
= ioread32(addr
);
152 u32 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
154 if (adapter
->eeh_err
)
157 if (!enabled
&& enable
)
158 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
159 else if (enabled
&& !enable
)
160 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
164 iowrite32(reg
, addr
);
167 static void be_rxq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
170 val
|= qid
& DB_RQ_RING_ID_MASK
;
171 val
|= posted
<< DB_RQ_NUM_POSTED_SHIFT
;
174 iowrite32(val
, adapter
->db
+ DB_RQ_OFFSET
);
177 static void be_txq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
180 val
|= qid
& DB_TXULP_RING_ID_MASK
;
181 val
|= (posted
& DB_TXULP_NUM_POSTED_MASK
) << DB_TXULP_NUM_POSTED_SHIFT
;
184 iowrite32(val
, adapter
->db
+ DB_TXULP1_OFFSET
);
187 static void be_eq_notify(struct be_adapter
*adapter
, u16 qid
,
188 bool arm
, bool clear_int
, u16 num_popped
)
191 val
|= qid
& DB_EQ_RING_ID_MASK
;
192 val
|= ((qid
& DB_EQ_RING_ID_EXT_MASK
) <<
193 DB_EQ_RING_ID_EXT_MASK_SHIFT
);
195 if (adapter
->eeh_err
)
199 val
|= 1 << DB_EQ_REARM_SHIFT
;
201 val
|= 1 << DB_EQ_CLR_SHIFT
;
202 val
|= 1 << DB_EQ_EVNT_SHIFT
;
203 val
|= num_popped
<< DB_EQ_NUM_POPPED_SHIFT
;
204 iowrite32(val
, adapter
->db
+ DB_EQ_OFFSET
);
207 void be_cq_notify(struct be_adapter
*adapter
, u16 qid
, bool arm
, u16 num_popped
)
210 val
|= qid
& DB_CQ_RING_ID_MASK
;
211 val
|= ((qid
& DB_CQ_RING_ID_EXT_MASK
) <<
212 DB_CQ_RING_ID_EXT_MASK_SHIFT
);
214 if (adapter
->eeh_err
)
218 val
|= 1 << DB_CQ_REARM_SHIFT
;
219 val
|= num_popped
<< DB_CQ_NUM_POPPED_SHIFT
;
220 iowrite32(val
, adapter
->db
+ DB_CQ_OFFSET
);
223 static int be_mac_addr_set(struct net_device
*netdev
, void *p
)
225 struct be_adapter
*adapter
= netdev_priv(netdev
);
226 struct sockaddr
*addr
= p
;
229 if (!is_valid_ether_addr(addr
->sa_data
))
230 return -EADDRNOTAVAIL
;
232 /* MAC addr configuration will be done in hardware for VFs
233 * by their corresponding PFs. Just copy to netdev addr here
235 if (!be_physfn(adapter
))
238 status
= be_cmd_pmac_del(adapter
, adapter
->if_handle
, adapter
->pmac_id
);
242 status
= be_cmd_pmac_add(adapter
, (u8
*)addr
->sa_data
,
243 adapter
->if_handle
, &adapter
->pmac_id
);
246 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
251 void netdev_stats_update(struct be_adapter
*adapter
)
253 struct be_hw_stats
*hw_stats
= hw_stats_from_cmd(adapter
->stats_cmd
.va
);
254 struct be_rxf_stats
*rxf_stats
= &hw_stats
->rxf
;
255 struct be_port_rxf_stats
*port_stats
=
256 &rxf_stats
->port
[adapter
->port_num
];
257 struct net_device_stats
*dev_stats
= &adapter
->netdev
->stats
;
258 struct be_erx_stats
*erx_stats
= &hw_stats
->erx
;
259 struct be_rx_obj
*rxo
;
262 memset(dev_stats
, 0, sizeof(*dev_stats
));
263 for_all_rx_queues(adapter
, rxo
, i
) {
264 dev_stats
->rx_packets
+= rx_stats(rxo
)->rx_pkts
;
265 dev_stats
->rx_bytes
+= rx_stats(rxo
)->rx_bytes
;
266 dev_stats
->multicast
+= rx_stats(rxo
)->rx_mcast_pkts
;
267 /* no space in linux buffers: best possible approximation */
268 dev_stats
->rx_dropped
+=
269 erx_stats
->rx_drops_no_fragments
[rxo
->q
.id
];
272 dev_stats
->tx_packets
= tx_stats(adapter
)->be_tx_pkts
;
273 dev_stats
->tx_bytes
= tx_stats(adapter
)->be_tx_bytes
;
275 /* bad pkts received */
276 dev_stats
->rx_errors
= port_stats
->rx_crc_errors
+
277 port_stats
->rx_alignment_symbol_errors
+
278 port_stats
->rx_in_range_errors
+
279 port_stats
->rx_out_range_errors
+
280 port_stats
->rx_frame_too_long
+
281 port_stats
->rx_dropped_too_small
+
282 port_stats
->rx_dropped_too_short
+
283 port_stats
->rx_dropped_header_too_small
+
284 port_stats
->rx_dropped_tcp_length
+
285 port_stats
->rx_dropped_runt
+
286 port_stats
->rx_tcp_checksum_errs
+
287 port_stats
->rx_ip_checksum_errs
+
288 port_stats
->rx_udp_checksum_errs
;
290 /* detailed rx errors */
291 dev_stats
->rx_length_errors
= port_stats
->rx_in_range_errors
+
292 port_stats
->rx_out_range_errors
+
293 port_stats
->rx_frame_too_long
;
295 dev_stats
->rx_crc_errors
= port_stats
->rx_crc_errors
;
297 /* frame alignment errors */
298 dev_stats
->rx_frame_errors
= port_stats
->rx_alignment_symbol_errors
;
300 /* receiver fifo overrun */
301 /* drops_no_pbuf is no per i/f, it's per BE card */
302 dev_stats
->rx_fifo_errors
= port_stats
->rx_fifo_overflow
+
303 port_stats
->rx_input_fifo_overflow
+
304 rxf_stats
->rx_drops_no_pbuf
;
307 void be_link_status_update(struct be_adapter
*adapter
, bool link_up
)
309 struct net_device
*netdev
= adapter
->netdev
;
311 /* If link came up or went down */
312 if (adapter
->link_up
!= link_up
) {
313 adapter
->link_speed
= -1;
315 netif_start_queue(netdev
);
316 netif_carrier_on(netdev
);
317 printk(KERN_INFO
"%s: Link up\n", netdev
->name
);
319 netif_stop_queue(netdev
);
320 netif_carrier_off(netdev
);
321 printk(KERN_INFO
"%s: Link down\n", netdev
->name
);
323 adapter
->link_up
= link_up
;
327 /* Update the EQ delay n BE based on the RX frags consumed / sec */
328 static void be_rx_eqd_update(struct be_adapter
*adapter
, struct be_rx_obj
*rxo
)
330 struct be_eq_obj
*rx_eq
= &rxo
->rx_eq
;
331 struct be_rx_stats
*stats
= &rxo
->stats
;
335 if (!rx_eq
->enable_aic
)
339 if (time_before(now
, stats
->rx_fps_jiffies
)) {
340 stats
->rx_fps_jiffies
= now
;
344 /* Update once a second */
345 if ((now
- stats
->rx_fps_jiffies
) < HZ
)
348 stats
->rx_fps
= (stats
->rx_frags
- stats
->prev_rx_frags
) /
349 ((now
- stats
->rx_fps_jiffies
) / HZ
);
351 stats
->rx_fps_jiffies
= now
;
352 stats
->prev_rx_frags
= stats
->rx_frags
;
353 eqd
= stats
->rx_fps
/ 110000;
355 if (eqd
> rx_eq
->max_eqd
)
356 eqd
= rx_eq
->max_eqd
;
357 if (eqd
< rx_eq
->min_eqd
)
358 eqd
= rx_eq
->min_eqd
;
361 if (eqd
!= rx_eq
->cur_eqd
)
362 be_cmd_modify_eqd(adapter
, rx_eq
->q
.id
, eqd
);
364 rx_eq
->cur_eqd
= eqd
;
367 static u32
be_calc_rate(u64 bytes
, unsigned long ticks
)
371 do_div(rate
, ticks
/ HZ
);
372 rate
<<= 3; /* bytes/sec -> bits/sec */
373 do_div(rate
, 1000000ul); /* MB/Sec */
378 static void be_tx_rate_update(struct be_adapter
*adapter
)
380 struct be_tx_stats
*stats
= tx_stats(adapter
);
383 /* Wrapped around? */
384 if (time_before(now
, stats
->be_tx_jiffies
)) {
385 stats
->be_tx_jiffies
= now
;
389 /* Update tx rate once in two seconds */
390 if ((now
- stats
->be_tx_jiffies
) > 2 * HZ
) {
391 stats
->be_tx_rate
= be_calc_rate(stats
->be_tx_bytes
392 - stats
->be_tx_bytes_prev
,
393 now
- stats
->be_tx_jiffies
);
394 stats
->be_tx_jiffies
= now
;
395 stats
->be_tx_bytes_prev
= stats
->be_tx_bytes
;
399 static void be_tx_stats_update(struct be_adapter
*adapter
,
400 u32 wrb_cnt
, u32 copied
, u32 gso_segs
, bool stopped
)
402 struct be_tx_stats
*stats
= tx_stats(adapter
);
404 stats
->be_tx_wrbs
+= wrb_cnt
;
405 stats
->be_tx_bytes
+= copied
;
406 stats
->be_tx_pkts
+= (gso_segs
? gso_segs
: 1);
408 stats
->be_tx_stops
++;
411 /* Determine number of WRB entries needed to xmit data in an skb */
412 static u32
wrb_cnt_for_skb(struct be_adapter
*adapter
, struct sk_buff
*skb
,
415 int cnt
= (skb
->len
> skb
->data_len
);
417 cnt
+= skb_shinfo(skb
)->nr_frags
;
419 /* to account for hdr wrb */
421 if (lancer_chip(adapter
) || !(cnt
& 1)) {
424 /* add a dummy to make it an even num */
428 BUG_ON(cnt
> BE_MAX_TX_FRAG_COUNT
);
432 static inline void wrb_fill(struct be_eth_wrb
*wrb
, u64 addr
, int len
)
434 wrb
->frag_pa_hi
= upper_32_bits(addr
);
435 wrb
->frag_pa_lo
= addr
& 0xFFFFFFFF;
436 wrb
->frag_len
= len
& ETH_WRB_FRAG_LEN_MASK
;
439 static void wrb_fill_hdr(struct be_adapter
*adapter
, struct be_eth_hdr_wrb
*hdr
,
440 struct sk_buff
*skb
, u32 wrb_cnt
, u32 len
)
445 memset(hdr
, 0, sizeof(*hdr
));
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, crc
, hdr
, 1);
449 if (skb_is_gso(skb
)) {
450 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso
, hdr
, 1);
451 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso_mss
,
452 hdr
, skb_shinfo(skb
)->gso_size
);
453 if (skb_is_gso_v6(skb
) && !lancer_chip(adapter
))
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso6
, hdr
, 1);
455 if (lancer_chip(adapter
) && adapter
->sli_family
==
456 LANCER_A0_SLI_FAMILY
) {
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, ipcs
, hdr
, 1);
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb
,
461 else if (is_udp_pkt(skb
))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb
,
465 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
467 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, tcpcs
, hdr
, 1);
468 else if (is_udp_pkt(skb
))
469 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, udpcs
, hdr
, 1);
472 if (adapter
->vlan_grp
&& vlan_tx_tag_present(skb
)) {
473 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan
, hdr
, 1);
474 vlan_tag
= vlan_tx_tag_get(skb
);
475 vlan_prio
= (vlan_tag
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
476 /* If vlan priority provided by OS is NOT in available bmap */
477 if (!(adapter
->vlan_prio_bmap
& (1 << vlan_prio
)))
478 vlan_tag
= (vlan_tag
& ~VLAN_PRIO_MASK
) |
479 adapter
->recommended_prio
;
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan_tag
, hdr
, vlan_tag
);
483 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, event
, hdr
, 1);
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, complete
, hdr
, 1);
485 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, num_wrb
, hdr
, wrb_cnt
);
486 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, len
, hdr
, len
);
489 static void unmap_tx_frag(struct pci_dev
*pdev
, struct be_eth_wrb
*wrb
,
494 be_dws_le_to_cpu(wrb
, sizeof(*wrb
));
496 dma
= (u64
)wrb
->frag_pa_hi
<< 32 | (u64
)wrb
->frag_pa_lo
;
499 pci_unmap_single(pdev
, dma
, wrb
->frag_len
,
502 pci_unmap_page(pdev
, dma
, wrb
->frag_len
,
507 static int make_tx_wrbs(struct be_adapter
*adapter
,
508 struct sk_buff
*skb
, u32 wrb_cnt
, bool dummy_wrb
)
512 struct pci_dev
*pdev
= adapter
->pdev
;
513 struct sk_buff
*first_skb
= skb
;
514 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
515 struct be_eth_wrb
*wrb
;
516 struct be_eth_hdr_wrb
*hdr
;
517 bool map_single
= false;
520 hdr
= queue_head_node(txq
);
522 map_head
= txq
->head
;
524 if (skb
->len
> skb
->data_len
) {
525 int len
= skb_headlen(skb
);
526 busaddr
= pci_map_single(pdev
, skb
->data
, len
,
528 if (pci_dma_mapping_error(pdev
, busaddr
))
531 wrb
= queue_head_node(txq
);
532 wrb_fill(wrb
, busaddr
, len
);
533 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
538 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
539 struct skb_frag_struct
*frag
=
540 &skb_shinfo(skb
)->frags
[i
];
541 busaddr
= pci_map_page(pdev
, frag
->page
,
543 frag
->size
, PCI_DMA_TODEVICE
);
544 if (pci_dma_mapping_error(pdev
, busaddr
))
546 wrb
= queue_head_node(txq
);
547 wrb_fill(wrb
, busaddr
, frag
->size
);
548 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
550 copied
+= frag
->size
;
554 wrb
= queue_head_node(txq
);
556 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
560 wrb_fill_hdr(adapter
, hdr
, first_skb
, wrb_cnt
, copied
);
561 be_dws_cpu_to_le(hdr
, sizeof(*hdr
));
565 txq
->head
= map_head
;
567 wrb
= queue_head_node(txq
);
568 unmap_tx_frag(pdev
, wrb
, map_single
);
570 copied
-= wrb
->frag_len
;
576 static netdev_tx_t
be_xmit(struct sk_buff
*skb
,
577 struct net_device
*netdev
)
579 struct be_adapter
*adapter
= netdev_priv(netdev
);
580 struct be_tx_obj
*tx_obj
= &adapter
->tx_obj
;
581 struct be_queue_info
*txq
= &tx_obj
->q
;
582 u32 wrb_cnt
= 0, copied
= 0;
583 u32 start
= txq
->head
;
584 bool dummy_wrb
, stopped
= false;
586 wrb_cnt
= wrb_cnt_for_skb(adapter
, skb
, &dummy_wrb
);
588 copied
= make_tx_wrbs(adapter
, skb
, wrb_cnt
, dummy_wrb
);
590 /* record the sent skb in the sent_skb table */
591 BUG_ON(tx_obj
->sent_skb_list
[start
]);
592 tx_obj
->sent_skb_list
[start
] = skb
;
594 /* Ensure txq has space for the next skb; Else stop the queue
595 * *BEFORE* ringing the tx doorbell, so that we serialze the
596 * tx compls of the current transmit which'll wake up the queue
598 atomic_add(wrb_cnt
, &txq
->used
);
599 if ((BE_MAX_TX_FRAG_COUNT
+ atomic_read(&txq
->used
)) >=
601 netif_stop_queue(netdev
);
605 be_txq_notify(adapter
, txq
->id
, wrb_cnt
);
607 be_tx_stats_update(adapter
, wrb_cnt
, copied
,
608 skb_shinfo(skb
)->gso_segs
, stopped
);
611 dev_kfree_skb_any(skb
);
616 static int be_change_mtu(struct net_device
*netdev
, int new_mtu
)
618 struct be_adapter
*adapter
= netdev_priv(netdev
);
619 if (new_mtu
< BE_MIN_MTU
||
620 new_mtu
> (BE_MAX_JUMBO_FRAME_SIZE
-
621 (ETH_HLEN
+ ETH_FCS_LEN
))) {
622 dev_info(&adapter
->pdev
->dev
,
623 "MTU must be between %d and %d bytes\n",
625 (BE_MAX_JUMBO_FRAME_SIZE
- (ETH_HLEN
+ ETH_FCS_LEN
)));
628 dev_info(&adapter
->pdev
->dev
, "MTU changed from %d to %d bytes\n",
629 netdev
->mtu
, new_mtu
);
630 netdev
->mtu
= new_mtu
;
635 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
636 * If the user configures more, place BE in vlan promiscuous mode.
638 static int be_vid_config(struct be_adapter
*adapter
, bool vf
, u32 vf_num
)
640 u16 vtag
[BE_NUM_VLANS_SUPPORTED
];
646 if_handle
= adapter
->vf_cfg
[vf_num
].vf_if_handle
;
647 vtag
[0] = cpu_to_le16(adapter
->vf_cfg
[vf_num
].vf_vlan_tag
);
648 status
= be_cmd_vlan_config(adapter
, if_handle
, vtag
, 1, 1, 0);
651 if (adapter
->vlans_added
<= adapter
->max_vlans
) {
652 /* Construct VLAN Table to give to HW */
653 for (i
= 0; i
< VLAN_N_VID
; i
++) {
654 if (adapter
->vlan_tag
[i
]) {
655 vtag
[ntags
] = cpu_to_le16(i
);
659 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
662 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
669 static void be_vlan_register(struct net_device
*netdev
, struct vlan_group
*grp
)
671 struct be_adapter
*adapter
= netdev_priv(netdev
);
673 adapter
->vlan_grp
= grp
;
676 static void be_vlan_add_vid(struct net_device
*netdev
, u16 vid
)
678 struct be_adapter
*adapter
= netdev_priv(netdev
);
680 adapter
->vlans_added
++;
681 if (!be_physfn(adapter
))
684 adapter
->vlan_tag
[vid
] = 1;
685 if (adapter
->vlans_added
<= (adapter
->max_vlans
+ 1))
686 be_vid_config(adapter
, false, 0);
689 static void be_vlan_rem_vid(struct net_device
*netdev
, u16 vid
)
691 struct be_adapter
*adapter
= netdev_priv(netdev
);
693 adapter
->vlans_added
--;
694 vlan_group_set_device(adapter
->vlan_grp
, vid
, NULL
);
696 if (!be_physfn(adapter
))
699 adapter
->vlan_tag
[vid
] = 0;
700 if (adapter
->vlans_added
<= adapter
->max_vlans
)
701 be_vid_config(adapter
, false, 0);
704 static void be_set_multicast_list(struct net_device
*netdev
)
706 struct be_adapter
*adapter
= netdev_priv(netdev
);
708 if (netdev
->flags
& IFF_PROMISC
) {
709 be_cmd_promiscuous_config(adapter
, adapter
->port_num
, 1);
710 adapter
->promiscuous
= true;
714 /* BE was previously in promiscous mode; disable it */
715 if (adapter
->promiscuous
) {
716 adapter
->promiscuous
= false;
717 be_cmd_promiscuous_config(adapter
, adapter
->port_num
, 0);
720 /* Enable multicast promisc if num configured exceeds what we support */
721 if (netdev
->flags
& IFF_ALLMULTI
||
722 netdev_mc_count(netdev
) > BE_MAX_MC
) {
723 be_cmd_multicast_set(adapter
, adapter
->if_handle
, NULL
,
724 &adapter
->mc_cmd_mem
);
728 be_cmd_multicast_set(adapter
, adapter
->if_handle
, netdev
,
729 &adapter
->mc_cmd_mem
);
734 static int be_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
736 struct be_adapter
*adapter
= netdev_priv(netdev
);
739 if (!adapter
->sriov_enabled
)
742 if (!is_valid_ether_addr(mac
) || (vf
>= num_vfs
))
745 if (adapter
->vf_cfg
[vf
].vf_pmac_id
!= BE_INVALID_PMAC_ID
)
746 status
= be_cmd_pmac_del(adapter
,
747 adapter
->vf_cfg
[vf
].vf_if_handle
,
748 adapter
->vf_cfg
[vf
].vf_pmac_id
);
750 status
= be_cmd_pmac_add(adapter
, mac
,
751 adapter
->vf_cfg
[vf
].vf_if_handle
,
752 &adapter
->vf_cfg
[vf
].vf_pmac_id
);
755 dev_err(&adapter
->pdev
->dev
, "MAC %pM set on VF %d Failed\n",
758 memcpy(adapter
->vf_cfg
[vf
].vf_mac_addr
, mac
, ETH_ALEN
);
763 static int be_get_vf_config(struct net_device
*netdev
, int vf
,
764 struct ifla_vf_info
*vi
)
766 struct be_adapter
*adapter
= netdev_priv(netdev
);
768 if (!adapter
->sriov_enabled
)
775 vi
->tx_rate
= adapter
->vf_cfg
[vf
].vf_tx_rate
;
776 vi
->vlan
= adapter
->vf_cfg
[vf
].vf_vlan_tag
;
778 memcpy(&vi
->mac
, adapter
->vf_cfg
[vf
].vf_mac_addr
, ETH_ALEN
);
783 static int be_set_vf_vlan(struct net_device
*netdev
,
784 int vf
, u16 vlan
, u8 qos
)
786 struct be_adapter
*adapter
= netdev_priv(netdev
);
789 if (!adapter
->sriov_enabled
)
792 if ((vf
>= num_vfs
) || (vlan
> 4095))
796 adapter
->vf_cfg
[vf
].vf_vlan_tag
= vlan
;
797 adapter
->vlans_added
++;
799 adapter
->vf_cfg
[vf
].vf_vlan_tag
= 0;
800 adapter
->vlans_added
--;
803 status
= be_vid_config(adapter
, true, vf
);
806 dev_info(&adapter
->pdev
->dev
,
807 "VLAN %d config on VF %d failed\n", vlan
, vf
);
811 static int be_set_vf_tx_rate(struct net_device
*netdev
,
814 struct be_adapter
*adapter
= netdev_priv(netdev
);
817 if (!adapter
->sriov_enabled
)
820 if ((vf
>= num_vfs
) || (rate
< 0))
826 adapter
->vf_cfg
[vf
].vf_tx_rate
= rate
;
827 status
= be_cmd_set_qos(adapter
, rate
/ 10, vf
);
830 dev_info(&adapter
->pdev
->dev
,
831 "tx rate %d on VF %d failed\n", rate
, vf
);
835 static void be_rx_rate_update(struct be_rx_obj
*rxo
)
837 struct be_rx_stats
*stats
= &rxo
->stats
;
841 if (time_before(now
, stats
->rx_jiffies
)) {
842 stats
->rx_jiffies
= now
;
846 /* Update the rate once in two seconds */
847 if ((now
- stats
->rx_jiffies
) < 2 * HZ
)
850 stats
->rx_rate
= be_calc_rate(stats
->rx_bytes
- stats
->rx_bytes_prev
,
851 now
- stats
->rx_jiffies
);
852 stats
->rx_jiffies
= now
;
853 stats
->rx_bytes_prev
= stats
->rx_bytes
;
856 static void be_rx_stats_update(struct be_rx_obj
*rxo
,
857 u32 pktsize
, u16 numfrags
, u8 pkt_type
)
859 struct be_rx_stats
*stats
= &rxo
->stats
;
862 stats
->rx_frags
+= numfrags
;
863 stats
->rx_bytes
+= pktsize
;
865 if (pkt_type
== BE_MULTICAST_PACKET
)
866 stats
->rx_mcast_pkts
++;
869 static inline bool csum_passed(struct be_eth_rx_compl
*rxcp
)
871 u8 l4_cksm
, ipv6
, ipcksm
;
873 l4_cksm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, l4_cksm
, rxcp
);
874 ipcksm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, ipcksm
, rxcp
);
875 ipv6
= AMAP_GET_BITS(struct amap_eth_rx_compl
, ip_version
, rxcp
);
877 /* Ignore ipcksm for ipv6 pkts */
878 return l4_cksm
&& (ipcksm
|| ipv6
);
881 static struct be_rx_page_info
*
882 get_rx_page_info(struct be_adapter
*adapter
,
883 struct be_rx_obj
*rxo
,
886 struct be_rx_page_info
*rx_page_info
;
887 struct be_queue_info
*rxq
= &rxo
->q
;
889 rx_page_info
= &rxo
->page_info_tbl
[frag_idx
];
890 BUG_ON(!rx_page_info
->page
);
892 if (rx_page_info
->last_page_user
) {
893 pci_unmap_page(adapter
->pdev
, dma_unmap_addr(rx_page_info
, bus
),
894 adapter
->big_page_size
, PCI_DMA_FROMDEVICE
);
895 rx_page_info
->last_page_user
= false;
898 atomic_dec(&rxq
->used
);
902 /* Throwaway the data in the Rx completion */
903 static void be_rx_compl_discard(struct be_adapter
*adapter
,
904 struct be_rx_obj
*rxo
,
905 struct be_eth_rx_compl
*rxcp
)
907 struct be_queue_info
*rxq
= &rxo
->q
;
908 struct be_rx_page_info
*page_info
;
909 u16 rxq_idx
, i
, num_rcvd
;
911 rxq_idx
= AMAP_GET_BITS(struct amap_eth_rx_compl
, fragndx
, rxcp
);
912 num_rcvd
= AMAP_GET_BITS(struct amap_eth_rx_compl
, numfrags
, rxcp
);
914 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
915 if (likely(rxq_idx
!= rxo
->last_frag_index
&& num_rcvd
!= 0)) {
917 rxo
->last_frag_index
= rxq_idx
;
919 for (i
= 0; i
< num_rcvd
; i
++) {
920 page_info
= get_rx_page_info(adapter
, rxo
, rxq_idx
);
921 put_page(page_info
->page
);
922 memset(page_info
, 0, sizeof(*page_info
));
923 index_inc(&rxq_idx
, rxq
->len
);
929 * skb_fill_rx_data forms a complete skb for an ether frame
932 static void skb_fill_rx_data(struct be_adapter
*adapter
, struct be_rx_obj
*rxo
,
933 struct sk_buff
*skb
, struct be_eth_rx_compl
*rxcp
,
936 struct be_queue_info
*rxq
= &rxo
->q
;
937 struct be_rx_page_info
*page_info
;
939 u32 pktsize
, hdr_len
, curr_frag_len
, size
;
943 rxq_idx
= AMAP_GET_BITS(struct amap_eth_rx_compl
, fragndx
, rxcp
);
944 pktsize
= AMAP_GET_BITS(struct amap_eth_rx_compl
, pktsize
, rxcp
);
945 pkt_type
= AMAP_GET_BITS(struct amap_eth_rx_compl
, cast_enc
, rxcp
);
947 page_info
= get_rx_page_info(adapter
, rxo
, rxq_idx
);
949 start
= page_address(page_info
->page
) + page_info
->page_offset
;
952 /* Copy data in the first descriptor of this completion */
953 curr_frag_len
= min(pktsize
, rx_frag_size
);
955 /* Copy the header portion into skb_data */
956 hdr_len
= min((u32
)BE_HDR_LEN
, curr_frag_len
);
957 memcpy(skb
->data
, start
, hdr_len
);
958 skb
->len
= curr_frag_len
;
959 if (curr_frag_len
<= BE_HDR_LEN
) { /* tiny packet */
960 /* Complete packet has now been moved to data */
961 put_page(page_info
->page
);
963 skb
->tail
+= curr_frag_len
;
965 skb_shinfo(skb
)->nr_frags
= 1;
966 skb_shinfo(skb
)->frags
[0].page
= page_info
->page
;
967 skb_shinfo(skb
)->frags
[0].page_offset
=
968 page_info
->page_offset
+ hdr_len
;
969 skb_shinfo(skb
)->frags
[0].size
= curr_frag_len
- hdr_len
;
970 skb
->data_len
= curr_frag_len
- hdr_len
;
971 skb
->tail
+= hdr_len
;
973 page_info
->page
= NULL
;
975 if (pktsize
<= rx_frag_size
) {
976 BUG_ON(num_rcvd
!= 1);
980 /* More frags present for this completion */
982 for (i
= 1, j
= 0; i
< num_rcvd
; i
++) {
983 size
-= curr_frag_len
;
984 index_inc(&rxq_idx
, rxq
->len
);
985 page_info
= get_rx_page_info(adapter
, rxo
, rxq_idx
);
987 curr_frag_len
= min(size
, rx_frag_size
);
989 /* Coalesce all frags from the same physical page in one slot */
990 if (page_info
->page_offset
== 0) {
993 skb_shinfo(skb
)->frags
[j
].page
= page_info
->page
;
994 skb_shinfo(skb
)->frags
[j
].page_offset
=
995 page_info
->page_offset
;
996 skb_shinfo(skb
)->frags
[j
].size
= 0;
997 skb_shinfo(skb
)->nr_frags
++;
999 put_page(page_info
->page
);
1002 skb_shinfo(skb
)->frags
[j
].size
+= curr_frag_len
;
1003 skb
->len
+= curr_frag_len
;
1004 skb
->data_len
+= curr_frag_len
;
1006 page_info
->page
= NULL
;
1008 BUG_ON(j
> MAX_SKB_FRAGS
);
1011 be_rx_stats_update(rxo
, pktsize
, num_rcvd
, pkt_type
);
1014 /* Process the RX completion indicated by rxcp when GRO is disabled */
1015 static void be_rx_compl_process(struct be_adapter
*adapter
,
1016 struct be_rx_obj
*rxo
,
1017 struct be_eth_rx_compl
*rxcp
)
1019 struct sk_buff
*skb
;
1024 num_rcvd
= AMAP_GET_BITS(struct amap_eth_rx_compl
, numfrags
, rxcp
);
1026 skb
= netdev_alloc_skb_ip_align(adapter
->netdev
, BE_HDR_LEN
);
1027 if (unlikely(!skb
)) {
1028 if (net_ratelimit())
1029 dev_warn(&adapter
->pdev
->dev
, "skb alloc failed\n");
1030 be_rx_compl_discard(adapter
, rxo
, rxcp
);
1034 skb_fill_rx_data(adapter
, rxo
, skb
, rxcp
, num_rcvd
);
1036 if (likely(adapter
->rx_csum
&& csum_passed(rxcp
)))
1037 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1039 skb_checksum_none_assert(skb
);
1041 skb
->truesize
= skb
->len
+ sizeof(struct sk_buff
);
1042 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
1044 vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtp
, rxcp
);
1045 vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtm
, rxcp
);
1047 /* vlanf could be wrongly set in some cards.
1048 * ignore if vtm is not set */
1049 if ((adapter
->function_mode
& 0x400) && !vtm
)
1052 if (unlikely(vlanf
)) {
1053 if (!adapter
->vlan_grp
|| adapter
->vlans_added
== 0) {
1057 vid
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vlan_tag
, rxcp
);
1058 if (!lancer_chip(adapter
))
1060 vlan_hwaccel_receive_skb(skb
, adapter
->vlan_grp
, vid
);
1062 netif_receive_skb(skb
);
1066 /* Process the RX completion indicated by rxcp when GRO is enabled */
1067 static void be_rx_compl_process_gro(struct be_adapter
*adapter
,
1068 struct be_rx_obj
*rxo
,
1069 struct be_eth_rx_compl
*rxcp
)
1071 struct be_rx_page_info
*page_info
;
1072 struct sk_buff
*skb
= NULL
;
1073 struct be_queue_info
*rxq
= &rxo
->q
;
1074 struct be_eq_obj
*eq_obj
= &rxo
->rx_eq
;
1075 u32 num_rcvd
, pkt_size
, remaining
, vlanf
, curr_frag_len
;
1076 u16 i
, rxq_idx
= 0, vid
, j
;
1080 num_rcvd
= AMAP_GET_BITS(struct amap_eth_rx_compl
, numfrags
, rxcp
);
1081 pkt_size
= AMAP_GET_BITS(struct amap_eth_rx_compl
, pktsize
, rxcp
);
1082 vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtp
, rxcp
);
1083 rxq_idx
= AMAP_GET_BITS(struct amap_eth_rx_compl
, fragndx
, rxcp
);
1084 vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtm
, rxcp
);
1085 pkt_type
= AMAP_GET_BITS(struct amap_eth_rx_compl
, cast_enc
, rxcp
);
1087 /* vlanf could be wrongly set in some cards.
1088 * ignore if vtm is not set */
1089 if ((adapter
->function_mode
& 0x400) && !vtm
)
1092 skb
= napi_get_frags(&eq_obj
->napi
);
1094 be_rx_compl_discard(adapter
, rxo
, rxcp
);
1098 remaining
= pkt_size
;
1099 for (i
= 0, j
= -1; i
< num_rcvd
; i
++) {
1100 page_info
= get_rx_page_info(adapter
, rxo
, rxq_idx
);
1102 curr_frag_len
= min(remaining
, rx_frag_size
);
1104 /* Coalesce all frags from the same physical page in one slot */
1105 if (i
== 0 || page_info
->page_offset
== 0) {
1106 /* First frag or Fresh page */
1108 skb_shinfo(skb
)->frags
[j
].page
= page_info
->page
;
1109 skb_shinfo(skb
)->frags
[j
].page_offset
=
1110 page_info
->page_offset
;
1111 skb_shinfo(skb
)->frags
[j
].size
= 0;
1113 put_page(page_info
->page
);
1115 skb_shinfo(skb
)->frags
[j
].size
+= curr_frag_len
;
1117 remaining
-= curr_frag_len
;
1118 index_inc(&rxq_idx
, rxq
->len
);
1119 memset(page_info
, 0, sizeof(*page_info
));
1121 BUG_ON(j
> MAX_SKB_FRAGS
);
1123 skb_shinfo(skb
)->nr_frags
= j
+ 1;
1124 skb
->len
= pkt_size
;
1125 skb
->data_len
= pkt_size
;
1126 skb
->truesize
+= pkt_size
;
1127 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1129 if (likely(!vlanf
)) {
1130 napi_gro_frags(&eq_obj
->napi
);
1132 vid
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vlan_tag
, rxcp
);
1133 if (!lancer_chip(adapter
))
1136 if (!adapter
->vlan_grp
|| adapter
->vlans_added
== 0)
1139 vlan_gro_frags(&eq_obj
->napi
, adapter
->vlan_grp
, vid
);
1142 be_rx_stats_update(rxo
, pkt_size
, num_rcvd
, pkt_type
);
1145 static struct be_eth_rx_compl
*be_rx_compl_get(struct be_rx_obj
*rxo
)
1147 struct be_eth_rx_compl
*rxcp
= queue_tail_node(&rxo
->cq
);
1149 if (rxcp
->dw
[offsetof(struct amap_eth_rx_compl
, valid
) / 32] == 0)
1153 be_dws_le_to_cpu(rxcp
, sizeof(*rxcp
));
1155 queue_tail_inc(&rxo
->cq
);
1159 /* To reset the valid bit, we need to reset the whole word as
1160 * when walking the queue the valid entries are little-endian
1161 * and invalid entries are host endian
1163 static inline void be_rx_compl_reset(struct be_eth_rx_compl
*rxcp
)
1165 rxcp
->dw
[offsetof(struct amap_eth_rx_compl
, valid
) / 32] = 0;
1168 static inline struct page
*be_alloc_pages(u32 size
)
1170 gfp_t alloc_flags
= GFP_ATOMIC
;
1171 u32 order
= get_order(size
);
1173 alloc_flags
|= __GFP_COMP
;
1174 return alloc_pages(alloc_flags
, order
);
1178 * Allocate a page, split it to fragments of size rx_frag_size and post as
1179 * receive buffers to BE
1181 static void be_post_rx_frags(struct be_rx_obj
*rxo
)
1183 struct be_adapter
*adapter
= rxo
->adapter
;
1184 struct be_rx_page_info
*page_info_tbl
= rxo
->page_info_tbl
;
1185 struct be_rx_page_info
*page_info
= NULL
, *prev_page_info
= NULL
;
1186 struct be_queue_info
*rxq
= &rxo
->q
;
1187 struct page
*pagep
= NULL
;
1188 struct be_eth_rx_d
*rxd
;
1189 u64 page_dmaaddr
= 0, frag_dmaaddr
;
1190 u32 posted
, page_offset
= 0;
1192 page_info
= &rxo
->page_info_tbl
[rxq
->head
];
1193 for (posted
= 0; posted
< MAX_RX_POST
&& !page_info
->page
; posted
++) {
1195 pagep
= be_alloc_pages(adapter
->big_page_size
);
1196 if (unlikely(!pagep
)) {
1197 rxo
->stats
.rx_post_fail
++;
1200 page_dmaaddr
= pci_map_page(adapter
->pdev
, pagep
, 0,
1201 adapter
->big_page_size
,
1202 PCI_DMA_FROMDEVICE
);
1203 page_info
->page_offset
= 0;
1206 page_info
->page_offset
= page_offset
+ rx_frag_size
;
1208 page_offset
= page_info
->page_offset
;
1209 page_info
->page
= pagep
;
1210 dma_unmap_addr_set(page_info
, bus
, page_dmaaddr
);
1211 frag_dmaaddr
= page_dmaaddr
+ page_info
->page_offset
;
1213 rxd
= queue_head_node(rxq
);
1214 rxd
->fragpa_lo
= cpu_to_le32(frag_dmaaddr
& 0xFFFFFFFF);
1215 rxd
->fragpa_hi
= cpu_to_le32(upper_32_bits(frag_dmaaddr
));
1217 /* Any space left in the current big page for another frag? */
1218 if ((page_offset
+ rx_frag_size
+ rx_frag_size
) >
1219 adapter
->big_page_size
) {
1221 page_info
->last_page_user
= true;
1224 prev_page_info
= page_info
;
1225 queue_head_inc(rxq
);
1226 page_info
= &page_info_tbl
[rxq
->head
];
1229 prev_page_info
->last_page_user
= true;
1232 atomic_add(posted
, &rxq
->used
);
1233 be_rxq_notify(adapter
, rxq
->id
, posted
);
1234 } else if (atomic_read(&rxq
->used
) == 0) {
1235 /* Let be_worker replenish when memory is available */
1236 rxo
->rx_post_starved
= true;
1240 static struct be_eth_tx_compl
*be_tx_compl_get(struct be_queue_info
*tx_cq
)
1242 struct be_eth_tx_compl
*txcp
= queue_tail_node(tx_cq
);
1244 if (txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] == 0)
1248 be_dws_le_to_cpu(txcp
, sizeof(*txcp
));
1250 txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] = 0;
1252 queue_tail_inc(tx_cq
);
1256 static void be_tx_compl_process(struct be_adapter
*adapter
, u16 last_index
)
1258 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
1259 struct be_eth_wrb
*wrb
;
1260 struct sk_buff
**sent_skbs
= adapter
->tx_obj
.sent_skb_list
;
1261 struct sk_buff
*sent_skb
;
1262 u16 cur_index
, num_wrbs
= 1; /* account for hdr wrb */
1263 bool unmap_skb_hdr
= true;
1265 sent_skb
= sent_skbs
[txq
->tail
];
1267 sent_skbs
[txq
->tail
] = NULL
;
1269 /* skip header wrb */
1270 queue_tail_inc(txq
);
1273 cur_index
= txq
->tail
;
1274 wrb
= queue_tail_node(txq
);
1275 unmap_tx_frag(adapter
->pdev
, wrb
, (unmap_skb_hdr
&&
1276 skb_headlen(sent_skb
)));
1277 unmap_skb_hdr
= false;
1280 queue_tail_inc(txq
);
1281 } while (cur_index
!= last_index
);
1283 atomic_sub(num_wrbs
, &txq
->used
);
1285 kfree_skb(sent_skb
);
1288 static inline struct be_eq_entry
*event_get(struct be_eq_obj
*eq_obj
)
1290 struct be_eq_entry
*eqe
= queue_tail_node(&eq_obj
->q
);
1296 eqe
->evt
= le32_to_cpu(eqe
->evt
);
1297 queue_tail_inc(&eq_obj
->q
);
1301 static int event_handle(struct be_adapter
*adapter
,
1302 struct be_eq_obj
*eq_obj
)
1304 struct be_eq_entry
*eqe
;
1307 while ((eqe
= event_get(eq_obj
)) != NULL
) {
1312 /* Deal with any spurious interrupts that come
1315 be_eq_notify(adapter
, eq_obj
->q
.id
, true, true, num
);
1317 napi_schedule(&eq_obj
->napi
);
1322 /* Just read and notify events without processing them.
1323 * Used at the time of destroying event queues */
1324 static void be_eq_clean(struct be_adapter
*adapter
,
1325 struct be_eq_obj
*eq_obj
)
1327 struct be_eq_entry
*eqe
;
1330 while ((eqe
= event_get(eq_obj
)) != NULL
) {
1336 be_eq_notify(adapter
, eq_obj
->q
.id
, false, true, num
);
1339 static void be_rx_q_clean(struct be_adapter
*adapter
, struct be_rx_obj
*rxo
)
1341 struct be_rx_page_info
*page_info
;
1342 struct be_queue_info
*rxq
= &rxo
->q
;
1343 struct be_queue_info
*rx_cq
= &rxo
->cq
;
1344 struct be_eth_rx_compl
*rxcp
;
1347 /* First cleanup pending rx completions */
1348 while ((rxcp
= be_rx_compl_get(rxo
)) != NULL
) {
1349 be_rx_compl_discard(adapter
, rxo
, rxcp
);
1350 be_rx_compl_reset(rxcp
);
1351 be_cq_notify(adapter
, rx_cq
->id
, false, 1);
1354 /* Then free posted rx buffer that were not used */
1355 tail
= (rxq
->head
+ rxq
->len
- atomic_read(&rxq
->used
)) % rxq
->len
;
1356 for (; atomic_read(&rxq
->used
) > 0; index_inc(&tail
, rxq
->len
)) {
1357 page_info
= get_rx_page_info(adapter
, rxo
, tail
);
1358 put_page(page_info
->page
);
1359 memset(page_info
, 0, sizeof(*page_info
));
1361 BUG_ON(atomic_read(&rxq
->used
));
1364 static void be_tx_compl_clean(struct be_adapter
*adapter
)
1366 struct be_queue_info
*tx_cq
= &adapter
->tx_obj
.cq
;
1367 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
1368 struct be_eth_tx_compl
*txcp
;
1369 u16 end_idx
, cmpl
= 0, timeo
= 0;
1370 struct sk_buff
**sent_skbs
= adapter
->tx_obj
.sent_skb_list
;
1371 struct sk_buff
*sent_skb
;
1374 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1376 while ((txcp
= be_tx_compl_get(tx_cq
))) {
1377 end_idx
= AMAP_GET_BITS(struct amap_eth_tx_compl
,
1379 be_tx_compl_process(adapter
, end_idx
);
1383 be_cq_notify(adapter
, tx_cq
->id
, false, cmpl
);
1387 if (atomic_read(&txq
->used
) == 0 || ++timeo
> 200)
1393 if (atomic_read(&txq
->used
))
1394 dev_err(&adapter
->pdev
->dev
, "%d pending tx-completions\n",
1395 atomic_read(&txq
->used
));
1397 /* free posted tx for which compls will never arrive */
1398 while (atomic_read(&txq
->used
)) {
1399 sent_skb
= sent_skbs
[txq
->tail
];
1400 end_idx
= txq
->tail
;
1402 wrb_cnt_for_skb(adapter
, sent_skb
, &dummy_wrb
) - 1,
1404 be_tx_compl_process(adapter
, end_idx
);
1408 static void be_mcc_queues_destroy(struct be_adapter
*adapter
)
1410 struct be_queue_info
*q
;
1412 q
= &adapter
->mcc_obj
.q
;
1414 be_cmd_q_destroy(adapter
, q
, QTYPE_MCCQ
);
1415 be_queue_free(adapter
, q
);
1417 q
= &adapter
->mcc_obj
.cq
;
1419 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1420 be_queue_free(adapter
, q
);
1423 /* Must be called only after TX qs are created as MCC shares TX EQ */
1424 static int be_mcc_queues_create(struct be_adapter
*adapter
)
1426 struct be_queue_info
*q
, *cq
;
1428 /* Alloc MCC compl queue */
1429 cq
= &adapter
->mcc_obj
.cq
;
1430 if (be_queue_alloc(adapter
, cq
, MCC_CQ_LEN
,
1431 sizeof(struct be_mcc_compl
)))
1434 /* Ask BE to create MCC compl queue; share TX's eq */
1435 if (be_cmd_cq_create(adapter
, cq
, &adapter
->tx_eq
.q
, false, true, 0))
1438 /* Alloc MCC queue */
1439 q
= &adapter
->mcc_obj
.q
;
1440 if (be_queue_alloc(adapter
, q
, MCC_Q_LEN
, sizeof(struct be_mcc_wrb
)))
1441 goto mcc_cq_destroy
;
1443 /* Ask BE to create MCC queue */
1444 if (be_cmd_mccq_create(adapter
, q
, cq
))
1450 be_queue_free(adapter
, q
);
1452 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1454 be_queue_free(adapter
, cq
);
1459 static void be_tx_queues_destroy(struct be_adapter
*adapter
)
1461 struct be_queue_info
*q
;
1463 q
= &adapter
->tx_obj
.q
;
1465 be_cmd_q_destroy(adapter
, q
, QTYPE_TXQ
);
1466 be_queue_free(adapter
, q
);
1468 q
= &adapter
->tx_obj
.cq
;
1470 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1471 be_queue_free(adapter
, q
);
1473 /* Clear any residual events */
1474 be_eq_clean(adapter
, &adapter
->tx_eq
);
1476 q
= &adapter
->tx_eq
.q
;
1478 be_cmd_q_destroy(adapter
, q
, QTYPE_EQ
);
1479 be_queue_free(adapter
, q
);
1482 static int be_tx_queues_create(struct be_adapter
*adapter
)
1484 struct be_queue_info
*eq
, *q
, *cq
;
1486 adapter
->tx_eq
.max_eqd
= 0;
1487 adapter
->tx_eq
.min_eqd
= 0;
1488 adapter
->tx_eq
.cur_eqd
= 96;
1489 adapter
->tx_eq
.enable_aic
= false;
1490 /* Alloc Tx Event queue */
1491 eq
= &adapter
->tx_eq
.q
;
1492 if (be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
, sizeof(struct be_eq_entry
)))
1495 /* Ask BE to create Tx Event queue */
1496 if (be_cmd_eq_create(adapter
, eq
, adapter
->tx_eq
.cur_eqd
))
1499 adapter
->tx_eq
.msix_vec_idx
= adapter
->msix_vec_next_idx
++;
1502 /* Alloc TX eth compl queue */
1503 cq
= &adapter
->tx_obj
.cq
;
1504 if (be_queue_alloc(adapter
, cq
, TX_CQ_LEN
,
1505 sizeof(struct be_eth_tx_compl
)))
1508 /* Ask BE to create Tx eth compl queue */
1509 if (be_cmd_cq_create(adapter
, cq
, eq
, false, false, 3))
1512 /* Alloc TX eth queue */
1513 q
= &adapter
->tx_obj
.q
;
1514 if (be_queue_alloc(adapter
, q
, TX_Q_LEN
, sizeof(struct be_eth_wrb
)))
1517 /* Ask BE to create Tx eth queue */
1518 if (be_cmd_txq_create(adapter
, q
, cq
))
1523 be_queue_free(adapter
, q
);
1525 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1527 be_queue_free(adapter
, cq
);
1529 be_cmd_q_destroy(adapter
, eq
, QTYPE_EQ
);
1531 be_queue_free(adapter
, eq
);
1535 static void be_rx_queues_destroy(struct be_adapter
*adapter
)
1537 struct be_queue_info
*q
;
1538 struct be_rx_obj
*rxo
;
1541 for_all_rx_queues(adapter
, rxo
, i
) {
1544 be_cmd_q_destroy(adapter
, q
, QTYPE_RXQ
);
1545 /* After the rxq is invalidated, wait for a grace time
1546 * of 1ms for all dma to end and the flush compl to
1550 be_rx_q_clean(adapter
, rxo
);
1552 be_queue_free(adapter
, q
);
1556 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1557 be_queue_free(adapter
, q
);
1559 /* Clear any residual events */
1562 be_eq_clean(adapter
, &rxo
->rx_eq
);
1563 be_cmd_q_destroy(adapter
, q
, QTYPE_EQ
);
1565 be_queue_free(adapter
, q
);
1569 static int be_rx_queues_create(struct be_adapter
*adapter
)
1571 struct be_queue_info
*eq
, *q
, *cq
;
1572 struct be_rx_obj
*rxo
;
1575 adapter
->big_page_size
= (1 << get_order(rx_frag_size
)) * PAGE_SIZE
;
1576 for_all_rx_queues(adapter
, rxo
, i
) {
1577 rxo
->adapter
= adapter
;
1578 /* Init last_frag_index so that the frag index in the first
1579 * completion will never match */
1580 rxo
->last_frag_index
= 0xffff;
1581 rxo
->rx_eq
.max_eqd
= BE_MAX_EQD
;
1582 rxo
->rx_eq
.enable_aic
= true;
1586 rc
= be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
,
1587 sizeof(struct be_eq_entry
));
1591 rc
= be_cmd_eq_create(adapter
, eq
, rxo
->rx_eq
.cur_eqd
);
1595 rxo
->rx_eq
.msix_vec_idx
= adapter
->msix_vec_next_idx
++;
1599 rc
= be_queue_alloc(adapter
, cq
, RX_CQ_LEN
,
1600 sizeof(struct be_eth_rx_compl
));
1604 rc
= be_cmd_cq_create(adapter
, cq
, eq
, false, false, 3);
1609 rc
= be_queue_alloc(adapter
, q
, RX_Q_LEN
,
1610 sizeof(struct be_eth_rx_d
));
1614 rc
= be_cmd_rxq_create(adapter
, q
, cq
->id
, rx_frag_size
,
1615 BE_MAX_JUMBO_FRAME_SIZE
, adapter
->if_handle
,
1616 (i
> 0) ? 1 : 0/* rss enable */, &rxo
->rss_id
);
1621 if (be_multi_rxq(adapter
)) {
1622 u8 rsstable
[MAX_RSS_QS
];
1624 for_all_rss_queues(adapter
, rxo
, i
)
1625 rsstable
[i
] = rxo
->rss_id
;
1627 rc
= be_cmd_rss_config(adapter
, rsstable
,
1628 adapter
->num_rx_qs
- 1);
1635 be_rx_queues_destroy(adapter
);
1639 static bool event_peek(struct be_eq_obj
*eq_obj
)
1641 struct be_eq_entry
*eqe
= queue_tail_node(&eq_obj
->q
);
1648 static irqreturn_t
be_intx(int irq
, void *dev
)
1650 struct be_adapter
*adapter
= dev
;
1651 struct be_rx_obj
*rxo
;
1652 int isr
, i
, tx
= 0 , rx
= 0;
1654 if (lancer_chip(adapter
)) {
1655 if (event_peek(&adapter
->tx_eq
))
1656 tx
= event_handle(adapter
, &adapter
->tx_eq
);
1657 for_all_rx_queues(adapter
, rxo
, i
) {
1658 if (event_peek(&rxo
->rx_eq
))
1659 rx
|= event_handle(adapter
, &rxo
->rx_eq
);
1666 isr
= ioread32(adapter
->csr
+ CEV_ISR0_OFFSET
+
1667 (adapter
->tx_eq
.q
.id
/ 8) * CEV_ISR_SIZE
);
1671 if ((1 << adapter
->tx_eq
.msix_vec_idx
& isr
))
1672 event_handle(adapter
, &adapter
->tx_eq
);
1674 for_all_rx_queues(adapter
, rxo
, i
) {
1675 if ((1 << rxo
->rx_eq
.msix_vec_idx
& isr
))
1676 event_handle(adapter
, &rxo
->rx_eq
);
1683 static irqreturn_t
be_msix_rx(int irq
, void *dev
)
1685 struct be_rx_obj
*rxo
= dev
;
1686 struct be_adapter
*adapter
= rxo
->adapter
;
1688 event_handle(adapter
, &rxo
->rx_eq
);
1693 static irqreturn_t
be_msix_tx_mcc(int irq
, void *dev
)
1695 struct be_adapter
*adapter
= dev
;
1697 event_handle(adapter
, &adapter
->tx_eq
);
1702 static inline bool do_gro(struct be_rx_obj
*rxo
,
1703 struct be_eth_rx_compl
*rxcp
, u8 err
)
1705 int tcp_frame
= AMAP_GET_BITS(struct amap_eth_rx_compl
, tcpf
, rxcp
);
1708 rxo
->stats
.rxcp_err
++;
1710 return (tcp_frame
&& !err
) ? true : false;
1713 static int be_poll_rx(struct napi_struct
*napi
, int budget
)
1715 struct be_eq_obj
*rx_eq
= container_of(napi
, struct be_eq_obj
, napi
);
1716 struct be_rx_obj
*rxo
= container_of(rx_eq
, struct be_rx_obj
, rx_eq
);
1717 struct be_adapter
*adapter
= rxo
->adapter
;
1718 struct be_queue_info
*rx_cq
= &rxo
->cq
;
1719 struct be_eth_rx_compl
*rxcp
;
1721 u16 frag_index
, num_rcvd
;
1724 rxo
->stats
.rx_polls
++;
1725 for (work_done
= 0; work_done
< budget
; work_done
++) {
1726 rxcp
= be_rx_compl_get(rxo
);
1730 err
= AMAP_GET_BITS(struct amap_eth_rx_compl
, err
, rxcp
);
1731 frag_index
= AMAP_GET_BITS(struct amap_eth_rx_compl
, fragndx
,
1733 num_rcvd
= AMAP_GET_BITS(struct amap_eth_rx_compl
, numfrags
,
1736 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1737 if (likely(frag_index
!= rxo
->last_frag_index
&&
1739 rxo
->last_frag_index
= frag_index
;
1741 if (do_gro(rxo
, rxcp
, err
))
1742 be_rx_compl_process_gro(adapter
, rxo
, rxcp
);
1744 be_rx_compl_process(adapter
, rxo
, rxcp
);
1747 be_rx_compl_reset(rxcp
);
1750 /* Refill the queue */
1751 if (atomic_read(&rxo
->q
.used
) < RX_FRAGS_REFILL_WM
)
1752 be_post_rx_frags(rxo
);
1755 if (work_done
< budget
) {
1756 napi_complete(napi
);
1757 be_cq_notify(adapter
, rx_cq
->id
, true, work_done
);
1759 /* More to be consumed; continue with interrupts disabled */
1760 be_cq_notify(adapter
, rx_cq
->id
, false, work_done
);
1765 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1766 * For TX/MCC we don't honour budget; consume everything
1768 static int be_poll_tx_mcc(struct napi_struct
*napi
, int budget
)
1770 struct be_eq_obj
*tx_eq
= container_of(napi
, struct be_eq_obj
, napi
);
1771 struct be_adapter
*adapter
=
1772 container_of(tx_eq
, struct be_adapter
, tx_eq
);
1773 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
1774 struct be_queue_info
*tx_cq
= &adapter
->tx_obj
.cq
;
1775 struct be_eth_tx_compl
*txcp
;
1776 int tx_compl
= 0, mcc_compl
, status
= 0;
1779 while ((txcp
= be_tx_compl_get(tx_cq
))) {
1780 end_idx
= AMAP_GET_BITS(struct amap_eth_tx_compl
,
1782 be_tx_compl_process(adapter
, end_idx
);
1786 mcc_compl
= be_process_mcc(adapter
, &status
);
1788 napi_complete(napi
);
1791 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
1792 be_cq_notify(adapter
, mcc_obj
->cq
.id
, true, mcc_compl
);
1796 be_cq_notify(adapter
, adapter
->tx_obj
.cq
.id
, true, tx_compl
);
1798 /* As Tx wrbs have been freed up, wake up netdev queue if
1799 * it was stopped due to lack of tx wrbs.
1801 if (netif_queue_stopped(adapter
->netdev
) &&
1802 atomic_read(&txq
->used
) < txq
->len
/ 2) {
1803 netif_wake_queue(adapter
->netdev
);
1806 tx_stats(adapter
)->be_tx_events
++;
1807 tx_stats(adapter
)->be_tx_compl
+= tx_compl
;
1813 void be_detect_dump_ue(struct be_adapter
*adapter
)
1815 u32 ue_status_lo
, ue_status_hi
, ue_status_lo_mask
, ue_status_hi_mask
;
1818 pci_read_config_dword(adapter
->pdev
,
1819 PCICFG_UE_STATUS_LOW
, &ue_status_lo
);
1820 pci_read_config_dword(adapter
->pdev
,
1821 PCICFG_UE_STATUS_HIGH
, &ue_status_hi
);
1822 pci_read_config_dword(adapter
->pdev
,
1823 PCICFG_UE_STATUS_LOW_MASK
, &ue_status_lo_mask
);
1824 pci_read_config_dword(adapter
->pdev
,
1825 PCICFG_UE_STATUS_HI_MASK
, &ue_status_hi_mask
);
1827 ue_status_lo
= (ue_status_lo
& (~ue_status_lo_mask
));
1828 ue_status_hi
= (ue_status_hi
& (~ue_status_hi_mask
));
1830 if (ue_status_lo
|| ue_status_hi
) {
1831 adapter
->ue_detected
= true;
1832 dev_err(&adapter
->pdev
->dev
, "UE Detected!!\n");
1836 for (i
= 0; ue_status_lo
; ue_status_lo
>>= 1, i
++) {
1837 if (ue_status_lo
& 1)
1838 dev_err(&adapter
->pdev
->dev
,
1839 "UE: %s bit set\n", ue_status_low_desc
[i
]);
1843 for (i
= 0; ue_status_hi
; ue_status_hi
>>= 1, i
++) {
1844 if (ue_status_hi
& 1)
1845 dev_err(&adapter
->pdev
->dev
,
1846 "UE: %s bit set\n", ue_status_hi_desc
[i
]);
1852 static void be_worker(struct work_struct
*work
)
1854 struct be_adapter
*adapter
=
1855 container_of(work
, struct be_adapter
, work
.work
);
1856 struct be_rx_obj
*rxo
;
1859 /* when interrupts are not yet enabled, just reap any pending
1860 * mcc completions */
1861 if (!netif_running(adapter
->netdev
)) {
1862 int mcc_compl
, status
= 0;
1864 mcc_compl
= be_process_mcc(adapter
, &status
);
1867 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
1868 be_cq_notify(adapter
, mcc_obj
->cq
.id
, false, mcc_compl
);
1873 if (!adapter
->stats_ioctl_sent
)
1874 be_cmd_get_stats(adapter
, &adapter
->stats_cmd
);
1876 be_tx_rate_update(adapter
);
1878 for_all_rx_queues(adapter
, rxo
, i
) {
1879 be_rx_rate_update(rxo
);
1880 be_rx_eqd_update(adapter
, rxo
);
1882 if (rxo
->rx_post_starved
) {
1883 rxo
->rx_post_starved
= false;
1884 be_post_rx_frags(rxo
);
1887 if (!adapter
->ue_detected
&& !lancer_chip(adapter
))
1888 be_detect_dump_ue(adapter
);
1891 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(1000));
1894 static void be_msix_disable(struct be_adapter
*adapter
)
1896 if (adapter
->msix_enabled
) {
1897 pci_disable_msix(adapter
->pdev
);
1898 adapter
->msix_enabled
= false;
1902 static int be_num_rxqs_get(struct be_adapter
*adapter
)
1904 if (multi_rxq
&& (adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
) &&
1905 !adapter
->sriov_enabled
&& !(adapter
->function_mode
& 0x400)) {
1906 return 1 + MAX_RSS_QS
; /* one default non-RSS queue */
1908 dev_warn(&adapter
->pdev
->dev
,
1909 "No support for multiple RX queues\n");
1914 static void be_msix_enable(struct be_adapter
*adapter
)
1916 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1919 adapter
->num_rx_qs
= be_num_rxqs_get(adapter
);
1921 for (i
= 0; i
< (adapter
->num_rx_qs
+ 1); i
++)
1922 adapter
->msix_entries
[i
].entry
= i
;
1924 status
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
1925 adapter
->num_rx_qs
+ 1);
1928 } else if (status
>= BE_MIN_MSIX_VECTORS
) {
1929 if (pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
1931 adapter
->num_rx_qs
= status
- 1;
1932 dev_warn(&adapter
->pdev
->dev
,
1933 "Could alloc only %d MSIx vectors. "
1934 "Using %d RX Qs\n", status
, adapter
->num_rx_qs
);
1940 adapter
->msix_enabled
= true;
1943 static void be_sriov_enable(struct be_adapter
*adapter
)
1945 be_check_sriov_fn_type(adapter
);
1946 #ifdef CONFIG_PCI_IOV
1947 if (be_physfn(adapter
) && num_vfs
) {
1950 status
= pci_enable_sriov(adapter
->pdev
, num_vfs
);
1951 adapter
->sriov_enabled
= status
? false : true;
1956 static void be_sriov_disable(struct be_adapter
*adapter
)
1958 #ifdef CONFIG_PCI_IOV
1959 if (adapter
->sriov_enabled
) {
1960 pci_disable_sriov(adapter
->pdev
);
1961 adapter
->sriov_enabled
= false;
1966 static inline int be_msix_vec_get(struct be_adapter
*adapter
,
1967 struct be_eq_obj
*eq_obj
)
1969 return adapter
->msix_entries
[eq_obj
->msix_vec_idx
].vector
;
1972 static int be_request_irq(struct be_adapter
*adapter
,
1973 struct be_eq_obj
*eq_obj
,
1974 void *handler
, char *desc
, void *context
)
1976 struct net_device
*netdev
= adapter
->netdev
;
1979 sprintf(eq_obj
->desc
, "%s-%s", netdev
->name
, desc
);
1980 vec
= be_msix_vec_get(adapter
, eq_obj
);
1981 return request_irq(vec
, handler
, 0, eq_obj
->desc
, context
);
1984 static void be_free_irq(struct be_adapter
*adapter
, struct be_eq_obj
*eq_obj
,
1987 int vec
= be_msix_vec_get(adapter
, eq_obj
);
1988 free_irq(vec
, context
);
1991 static int be_msix_register(struct be_adapter
*adapter
)
1993 struct be_rx_obj
*rxo
;
1997 status
= be_request_irq(adapter
, &adapter
->tx_eq
, be_msix_tx_mcc
, "tx",
2002 for_all_rx_queues(adapter
, rxo
, i
) {
2003 sprintf(qname
, "rxq%d", i
);
2004 status
= be_request_irq(adapter
, &rxo
->rx_eq
, be_msix_rx
,
2013 be_free_irq(adapter
, &adapter
->tx_eq
, adapter
);
2015 for (i
--, rxo
= &adapter
->rx_obj
[i
]; i
>= 0; i
--, rxo
--)
2016 be_free_irq(adapter
, &rxo
->rx_eq
, rxo
);
2019 dev_warn(&adapter
->pdev
->dev
,
2020 "MSIX Request IRQ failed - err %d\n", status
);
2021 pci_disable_msix(adapter
->pdev
);
2022 adapter
->msix_enabled
= false;
2026 static int be_irq_register(struct be_adapter
*adapter
)
2028 struct net_device
*netdev
= adapter
->netdev
;
2031 if (adapter
->msix_enabled
) {
2032 status
= be_msix_register(adapter
);
2035 /* INTx is not supported for VF */
2036 if (!be_physfn(adapter
))
2041 netdev
->irq
= adapter
->pdev
->irq
;
2042 status
= request_irq(netdev
->irq
, be_intx
, IRQF_SHARED
, netdev
->name
,
2045 dev_err(&adapter
->pdev
->dev
,
2046 "INTx request IRQ failed - err %d\n", status
);
2050 adapter
->isr_registered
= true;
2054 static void be_irq_unregister(struct be_adapter
*adapter
)
2056 struct net_device
*netdev
= adapter
->netdev
;
2057 struct be_rx_obj
*rxo
;
2060 if (!adapter
->isr_registered
)
2064 if (!adapter
->msix_enabled
) {
2065 free_irq(netdev
->irq
, adapter
);
2070 be_free_irq(adapter
, &adapter
->tx_eq
, adapter
);
2072 for_all_rx_queues(adapter
, rxo
, i
)
2073 be_free_irq(adapter
, &rxo
->rx_eq
, rxo
);
2076 adapter
->isr_registered
= false;
2079 static int be_close(struct net_device
*netdev
)
2081 struct be_adapter
*adapter
= netdev_priv(netdev
);
2082 struct be_rx_obj
*rxo
;
2083 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
2086 be_async_mcc_disable(adapter
);
2088 netif_stop_queue(netdev
);
2089 netif_carrier_off(netdev
);
2090 adapter
->link_up
= false;
2092 if (!lancer_chip(adapter
))
2093 be_intr_set(adapter
, false);
2095 if (adapter
->msix_enabled
) {
2096 vec
= be_msix_vec_get(adapter
, tx_eq
);
2097 synchronize_irq(vec
);
2099 for_all_rx_queues(adapter
, rxo
, i
) {
2100 vec
= be_msix_vec_get(adapter
, &rxo
->rx_eq
);
2101 synchronize_irq(vec
);
2104 synchronize_irq(netdev
->irq
);
2106 be_irq_unregister(adapter
);
2108 for_all_rx_queues(adapter
, rxo
, i
)
2109 napi_disable(&rxo
->rx_eq
.napi
);
2111 napi_disable(&tx_eq
->napi
);
2113 /* Wait for all pending tx completions to arrive so that
2114 * all tx skbs are freed.
2116 be_tx_compl_clean(adapter
);
2121 static int be_open(struct net_device
*netdev
)
2123 struct be_adapter
*adapter
= netdev_priv(netdev
);
2124 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
2125 struct be_rx_obj
*rxo
;
2131 for_all_rx_queues(adapter
, rxo
, i
) {
2132 be_post_rx_frags(rxo
);
2133 napi_enable(&rxo
->rx_eq
.napi
);
2135 napi_enable(&tx_eq
->napi
);
2137 be_irq_register(adapter
);
2139 if (!lancer_chip(adapter
))
2140 be_intr_set(adapter
, true);
2142 /* The evt queues are created in unarmed state; arm them */
2143 for_all_rx_queues(adapter
, rxo
, i
) {
2144 be_eq_notify(adapter
, rxo
->rx_eq
.q
.id
, true, false, 0);
2145 be_cq_notify(adapter
, rxo
->cq
.id
, true, 0);
2147 be_eq_notify(adapter
, tx_eq
->q
.id
, true, false, 0);
2149 /* Now that interrupts are on we can process async mcc */
2150 be_async_mcc_enable(adapter
);
2152 status
= be_cmd_link_status_query(adapter
, &link_up
, &mac_speed
,
2156 be_link_status_update(adapter
, link_up
);
2158 if (be_physfn(adapter
)) {
2159 status
= be_vid_config(adapter
, false, 0);
2163 status
= be_cmd_set_flow_control(adapter
,
2164 adapter
->tx_fc
, adapter
->rx_fc
);
2171 be_close(adapter
->netdev
);
2175 static int be_setup_wol(struct be_adapter
*adapter
, bool enable
)
2177 struct be_dma_mem cmd
;
2181 memset(mac
, 0, ETH_ALEN
);
2183 cmd
.size
= sizeof(struct be_cmd_req_acpi_wol_magic_config
);
2184 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
, &cmd
.dma
);
2187 memset(cmd
.va
, 0, cmd
.size
);
2190 status
= pci_write_config_dword(adapter
->pdev
,
2191 PCICFG_PM_CONTROL_OFFSET
, PCICFG_PM_CONTROL_MASK
);
2193 dev_err(&adapter
->pdev
->dev
,
2194 "Could not enable Wake-on-lan\n");
2195 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
,
2199 status
= be_cmd_enable_magic_wol(adapter
,
2200 adapter
->netdev
->dev_addr
, &cmd
);
2201 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 1);
2202 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 1);
2204 status
= be_cmd_enable_magic_wol(adapter
, mac
, &cmd
);
2205 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 0);
2206 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 0);
2209 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
, cmd
.dma
);
2214 * Generate a seed MAC address from the PF MAC Address using jhash.
2215 * MAC Address for VFs are assigned incrementally starting from the seed.
2216 * These addresses are programmed in the ASIC by the PF and the VF driver
2217 * queries for the MAC address during its probe.
2219 static inline int be_vf_eth_addr_config(struct be_adapter
*adapter
)
2225 be_vf_eth_addr_generate(adapter
, mac
);
2227 for (vf
= 0; vf
< num_vfs
; vf
++) {
2228 status
= be_cmd_pmac_add(adapter
, mac
,
2229 adapter
->vf_cfg
[vf
].vf_if_handle
,
2230 &adapter
->vf_cfg
[vf
].vf_pmac_id
);
2232 dev_err(&adapter
->pdev
->dev
,
2233 "Mac address add failed for VF %d\n", vf
);
2235 memcpy(adapter
->vf_cfg
[vf
].vf_mac_addr
, mac
, ETH_ALEN
);
2242 static inline void be_vf_eth_addr_rem(struct be_adapter
*adapter
)
2246 for (vf
= 0; vf
< num_vfs
; vf
++) {
2247 if (adapter
->vf_cfg
[vf
].vf_pmac_id
!= BE_INVALID_PMAC_ID
)
2248 be_cmd_pmac_del(adapter
,
2249 adapter
->vf_cfg
[vf
].vf_if_handle
,
2250 adapter
->vf_cfg
[vf
].vf_pmac_id
);
2254 static int be_setup(struct be_adapter
*adapter
)
2256 struct net_device
*netdev
= adapter
->netdev
;
2257 u32 cap_flags
, en_flags
, vf
= 0;
2261 cap_flags
= en_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
;
2263 if (be_physfn(adapter
)) {
2264 cap_flags
|= BE_IF_FLAGS_MCAST_PROMISCUOUS
|
2265 BE_IF_FLAGS_PROMISCUOUS
|
2266 BE_IF_FLAGS_PASS_L3L4_ERRORS
;
2267 en_flags
|= BE_IF_FLAGS_PASS_L3L4_ERRORS
;
2269 if (be_multi_rxq(adapter
)) {
2270 cap_flags
|= BE_IF_FLAGS_RSS
;
2271 en_flags
|= BE_IF_FLAGS_RSS
;
2275 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
,
2276 netdev
->dev_addr
, false/* pmac_invalid */,
2277 &adapter
->if_handle
, &adapter
->pmac_id
, 0);
2281 if (be_physfn(adapter
)) {
2282 while (vf
< num_vfs
) {
2283 cap_flags
= en_flags
= BE_IF_FLAGS_UNTAGGED
2284 | BE_IF_FLAGS_BROADCAST
;
2285 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
,
2287 &adapter
->vf_cfg
[vf
].vf_if_handle
,
2290 dev_err(&adapter
->pdev
->dev
,
2291 "Interface Create failed for VF %d\n", vf
);
2294 adapter
->vf_cfg
[vf
].vf_pmac_id
= BE_INVALID_PMAC_ID
;
2297 } else if (!be_physfn(adapter
)) {
2298 status
= be_cmd_mac_addr_query(adapter
, mac
,
2299 MAC_ADDRESS_TYPE_NETWORK
, false, adapter
->if_handle
);
2301 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
2302 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
2306 status
= be_tx_queues_create(adapter
);
2310 status
= be_rx_queues_create(adapter
);
2314 status
= be_mcc_queues_create(adapter
);
2318 if (be_physfn(adapter
)) {
2319 status
= be_vf_eth_addr_config(adapter
);
2324 adapter
->link_speed
= -1;
2329 if (be_physfn(adapter
))
2330 be_vf_eth_addr_rem(adapter
);
2331 be_mcc_queues_destroy(adapter
);
2333 be_rx_queues_destroy(adapter
);
2335 be_tx_queues_destroy(adapter
);
2337 for (vf
= 0; vf
< num_vfs
; vf
++)
2338 if (adapter
->vf_cfg
[vf
].vf_if_handle
)
2339 be_cmd_if_destroy(adapter
,
2340 adapter
->vf_cfg
[vf
].vf_if_handle
);
2341 be_cmd_if_destroy(adapter
, adapter
->if_handle
);
2346 static int be_clear(struct be_adapter
*adapter
)
2348 if (be_physfn(adapter
))
2349 be_vf_eth_addr_rem(adapter
);
2351 be_mcc_queues_destroy(adapter
);
2352 be_rx_queues_destroy(adapter
);
2353 be_tx_queues_destroy(adapter
);
2355 be_cmd_if_destroy(adapter
, adapter
->if_handle
);
2357 /* tell fw we're done with firing cmds */
2358 be_cmd_fw_clean(adapter
);
2363 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2364 static bool be_flash_redboot(struct be_adapter
*adapter
,
2365 const u8
*p
, u32 img_start
, int image_size
,
2372 crc_offset
= hdr_size
+ img_start
+ image_size
- 4;
2376 status
= be_cmd_get_flash_crc(adapter
, flashed_crc
,
2379 dev_err(&adapter
->pdev
->dev
,
2380 "could not get crc from flash, not flashing redboot\n");
2384 /*update redboot only if crc does not match*/
2385 if (!memcmp(flashed_crc
, p
, 4))
2391 static int be_flash_data(struct be_adapter
*adapter
,
2392 const struct firmware
*fw
,
2393 struct be_dma_mem
*flash_cmd
, int num_of_images
)
2396 int status
= 0, i
, filehdr_size
= 0;
2397 u32 total_bytes
= 0, flash_op
;
2399 const u8
*p
= fw
->data
;
2400 struct be_cmd_write_flashrom
*req
= flash_cmd
->va
;
2401 const struct flash_comp
*pflashcomp
;
2404 static const struct flash_comp gen3_flash_types
[9] = {
2405 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3
, IMG_TYPE_ISCSI_ACTIVE
,
2406 FLASH_IMAGE_MAX_SIZE_g3
},
2407 { FLASH_REDBOOT_START_g3
, IMG_TYPE_REDBOOT
,
2408 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3
},
2409 { FLASH_iSCSI_BIOS_START_g3
, IMG_TYPE_BIOS
,
2410 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2411 { FLASH_PXE_BIOS_START_g3
, IMG_TYPE_PXE_BIOS
,
2412 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2413 { FLASH_FCoE_BIOS_START_g3
, IMG_TYPE_FCOE_BIOS
,
2414 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2415 { FLASH_iSCSI_BACKUP_IMAGE_START_g3
, IMG_TYPE_ISCSI_BACKUP
,
2416 FLASH_IMAGE_MAX_SIZE_g3
},
2417 { FLASH_FCoE_PRIMARY_IMAGE_START_g3
, IMG_TYPE_FCOE_FW_ACTIVE
,
2418 FLASH_IMAGE_MAX_SIZE_g3
},
2419 { FLASH_FCoE_BACKUP_IMAGE_START_g3
, IMG_TYPE_FCOE_FW_BACKUP
,
2420 FLASH_IMAGE_MAX_SIZE_g3
},
2421 { FLASH_NCSI_START_g3
, IMG_TYPE_NCSI_FW
,
2422 FLASH_NCSI_IMAGE_MAX_SIZE_g3
}
2424 static const struct flash_comp gen2_flash_types
[8] = {
2425 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2
, IMG_TYPE_ISCSI_ACTIVE
,
2426 FLASH_IMAGE_MAX_SIZE_g2
},
2427 { FLASH_REDBOOT_START_g2
, IMG_TYPE_REDBOOT
,
2428 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2
},
2429 { FLASH_iSCSI_BIOS_START_g2
, IMG_TYPE_BIOS
,
2430 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2431 { FLASH_PXE_BIOS_START_g2
, IMG_TYPE_PXE_BIOS
,
2432 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2433 { FLASH_FCoE_BIOS_START_g2
, IMG_TYPE_FCOE_BIOS
,
2434 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2435 { FLASH_iSCSI_BACKUP_IMAGE_START_g2
, IMG_TYPE_ISCSI_BACKUP
,
2436 FLASH_IMAGE_MAX_SIZE_g2
},
2437 { FLASH_FCoE_PRIMARY_IMAGE_START_g2
, IMG_TYPE_FCOE_FW_ACTIVE
,
2438 FLASH_IMAGE_MAX_SIZE_g2
},
2439 { FLASH_FCoE_BACKUP_IMAGE_START_g2
, IMG_TYPE_FCOE_FW_BACKUP
,
2440 FLASH_IMAGE_MAX_SIZE_g2
}
2443 if (adapter
->generation
== BE_GEN3
) {
2444 pflashcomp
= gen3_flash_types
;
2445 filehdr_size
= sizeof(struct flash_file_hdr_g3
);
2446 num_comp
= ARRAY_SIZE(gen3_flash_types
);
2448 pflashcomp
= gen2_flash_types
;
2449 filehdr_size
= sizeof(struct flash_file_hdr_g2
);
2450 num_comp
= ARRAY_SIZE(gen2_flash_types
);
2452 for (i
= 0; i
< num_comp
; i
++) {
2453 if ((pflashcomp
[i
].optype
== IMG_TYPE_NCSI_FW
) &&
2454 memcmp(adapter
->fw_ver
, "3.102.148.0", 11) < 0)
2456 if ((pflashcomp
[i
].optype
== IMG_TYPE_REDBOOT
) &&
2457 (!be_flash_redboot(adapter
, fw
->data
,
2458 pflashcomp
[i
].offset
, pflashcomp
[i
].size
,
2462 p
+= filehdr_size
+ pflashcomp
[i
].offset
2463 + (num_of_images
* sizeof(struct image_hdr
));
2464 if (p
+ pflashcomp
[i
].size
> fw
->data
+ fw
->size
)
2466 total_bytes
= pflashcomp
[i
].size
;
2467 while (total_bytes
) {
2468 if (total_bytes
> 32*1024)
2469 num_bytes
= 32*1024;
2471 num_bytes
= total_bytes
;
2472 total_bytes
-= num_bytes
;
2475 flash_op
= FLASHROM_OPER_FLASH
;
2477 flash_op
= FLASHROM_OPER_SAVE
;
2478 memcpy(req
->params
.data_buf
, p
, num_bytes
);
2480 status
= be_cmd_write_flashrom(adapter
, flash_cmd
,
2481 pflashcomp
[i
].optype
, flash_op
, num_bytes
);
2483 dev_err(&adapter
->pdev
->dev
,
2484 "cmd to write to flash rom failed.\n");
2493 static int get_ufigen_type(struct flash_file_hdr_g2
*fhdr
)
2497 if (fhdr
->build
[0] == '3')
2499 else if (fhdr
->build
[0] == '2')
2505 int be_load_fw(struct be_adapter
*adapter
, u8
*func
)
2507 char fw_file
[ETHTOOL_FLASH_MAX_FILENAME
];
2508 const struct firmware
*fw
;
2509 struct flash_file_hdr_g2
*fhdr
;
2510 struct flash_file_hdr_g3
*fhdr3
;
2511 struct image_hdr
*img_hdr_ptr
= NULL
;
2512 struct be_dma_mem flash_cmd
;
2513 int status
, i
= 0, num_imgs
= 0;
2516 if (!netif_running(adapter
->netdev
)) {
2517 dev_err(&adapter
->pdev
->dev
,
2518 "Firmware load not allowed (interface is down)\n");
2522 strcpy(fw_file
, func
);
2524 status
= request_firmware(&fw
, fw_file
, &adapter
->pdev
->dev
);
2529 fhdr
= (struct flash_file_hdr_g2
*) p
;
2530 dev_info(&adapter
->pdev
->dev
, "Flashing firmware file %s\n", fw_file
);
2532 flash_cmd
.size
= sizeof(struct be_cmd_write_flashrom
) + 32*1024;
2533 flash_cmd
.va
= pci_alloc_consistent(adapter
->pdev
, flash_cmd
.size
,
2535 if (!flash_cmd
.va
) {
2537 dev_err(&adapter
->pdev
->dev
,
2538 "Memory allocation failure while flashing\n");
2542 if ((adapter
->generation
== BE_GEN3
) &&
2543 (get_ufigen_type(fhdr
) == BE_GEN3
)) {
2544 fhdr3
= (struct flash_file_hdr_g3
*) fw
->data
;
2545 num_imgs
= le32_to_cpu(fhdr3
->num_imgs
);
2546 for (i
= 0; i
< num_imgs
; i
++) {
2547 img_hdr_ptr
= (struct image_hdr
*) (fw
->data
+
2548 (sizeof(struct flash_file_hdr_g3
) +
2549 i
* sizeof(struct image_hdr
)));
2550 if (le32_to_cpu(img_hdr_ptr
->imageid
) == 1)
2551 status
= be_flash_data(adapter
, fw
, &flash_cmd
,
2554 } else if ((adapter
->generation
== BE_GEN2
) &&
2555 (get_ufigen_type(fhdr
) == BE_GEN2
)) {
2556 status
= be_flash_data(adapter
, fw
, &flash_cmd
, 0);
2558 dev_err(&adapter
->pdev
->dev
,
2559 "UFI and Interface are not compatible for flashing\n");
2563 pci_free_consistent(adapter
->pdev
, flash_cmd
.size
, flash_cmd
.va
,
2566 dev_err(&adapter
->pdev
->dev
, "Firmware load error\n");
2570 dev_info(&adapter
->pdev
->dev
, "Firmware flashed successfully\n");
2573 release_firmware(fw
);
2577 static struct net_device_ops be_netdev_ops
= {
2578 .ndo_open
= be_open
,
2579 .ndo_stop
= be_close
,
2580 .ndo_start_xmit
= be_xmit
,
2581 .ndo_set_rx_mode
= be_set_multicast_list
,
2582 .ndo_set_mac_address
= be_mac_addr_set
,
2583 .ndo_change_mtu
= be_change_mtu
,
2584 .ndo_validate_addr
= eth_validate_addr
,
2585 .ndo_vlan_rx_register
= be_vlan_register
,
2586 .ndo_vlan_rx_add_vid
= be_vlan_add_vid
,
2587 .ndo_vlan_rx_kill_vid
= be_vlan_rem_vid
,
2588 .ndo_set_vf_mac
= be_set_vf_mac
,
2589 .ndo_set_vf_vlan
= be_set_vf_vlan
,
2590 .ndo_set_vf_tx_rate
= be_set_vf_tx_rate
,
2591 .ndo_get_vf_config
= be_get_vf_config
2594 static void be_netdev_init(struct net_device
*netdev
)
2596 struct be_adapter
*adapter
= netdev_priv(netdev
);
2597 struct be_rx_obj
*rxo
;
2600 netdev
->features
|= NETIF_F_SG
| NETIF_F_HW_VLAN_RX
| NETIF_F_TSO
|
2601 NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_FILTER
|
2602 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2603 NETIF_F_GRO
| NETIF_F_TSO6
;
2605 netdev
->vlan_features
|= NETIF_F_SG
| NETIF_F_TSO
|
2606 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
2608 if (lancer_chip(adapter
))
2609 netdev
->vlan_features
|= NETIF_F_TSO6
;
2611 netdev
->flags
|= IFF_MULTICAST
;
2613 adapter
->rx_csum
= true;
2615 /* Default settings for Rx and Tx flow control */
2616 adapter
->rx_fc
= true;
2617 adapter
->tx_fc
= true;
2619 netif_set_gso_max_size(netdev
, 65535);
2621 BE_SET_NETDEV_OPS(netdev
, &be_netdev_ops
);
2623 SET_ETHTOOL_OPS(netdev
, &be_ethtool_ops
);
2625 for_all_rx_queues(adapter
, rxo
, i
)
2626 netif_napi_add(netdev
, &rxo
->rx_eq
.napi
, be_poll_rx
,
2629 netif_napi_add(netdev
, &adapter
->tx_eq
.napi
, be_poll_tx_mcc
,
2632 netif_stop_queue(netdev
);
2635 static void be_unmap_pci_bars(struct be_adapter
*adapter
)
2638 iounmap(adapter
->csr
);
2640 iounmap(adapter
->db
);
2641 if (adapter
->pcicfg
&& be_physfn(adapter
))
2642 iounmap(adapter
->pcicfg
);
2645 static int be_map_pci_bars(struct be_adapter
*adapter
)
2648 int pcicfg_reg
, db_reg
;
2650 if (lancer_chip(adapter
)) {
2651 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 0),
2652 pci_resource_len(adapter
->pdev
, 0));
2659 if (be_physfn(adapter
)) {
2660 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 2),
2661 pci_resource_len(adapter
->pdev
, 2));
2664 adapter
->csr
= addr
;
2667 if (adapter
->generation
== BE_GEN2
) {
2672 if (be_physfn(adapter
))
2677 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, db_reg
),
2678 pci_resource_len(adapter
->pdev
, db_reg
));
2683 if (be_physfn(adapter
)) {
2684 addr
= ioremap_nocache(
2685 pci_resource_start(adapter
->pdev
, pcicfg_reg
),
2686 pci_resource_len(adapter
->pdev
, pcicfg_reg
));
2689 adapter
->pcicfg
= addr
;
2691 adapter
->pcicfg
= adapter
->db
+ SRIOV_VF_PCICFG_OFFSET
;
2695 be_unmap_pci_bars(adapter
);
2700 static void be_ctrl_cleanup(struct be_adapter
*adapter
)
2702 struct be_dma_mem
*mem
= &adapter
->mbox_mem_alloced
;
2704 be_unmap_pci_bars(adapter
);
2707 pci_free_consistent(adapter
->pdev
, mem
->size
,
2710 mem
= &adapter
->mc_cmd_mem
;
2712 pci_free_consistent(adapter
->pdev
, mem
->size
,
2716 static int be_ctrl_init(struct be_adapter
*adapter
)
2718 struct be_dma_mem
*mbox_mem_alloc
= &adapter
->mbox_mem_alloced
;
2719 struct be_dma_mem
*mbox_mem_align
= &adapter
->mbox_mem
;
2720 struct be_dma_mem
*mc_cmd_mem
= &adapter
->mc_cmd_mem
;
2723 status
= be_map_pci_bars(adapter
);
2727 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
2728 mbox_mem_alloc
->va
= pci_alloc_consistent(adapter
->pdev
,
2729 mbox_mem_alloc
->size
, &mbox_mem_alloc
->dma
);
2730 if (!mbox_mem_alloc
->va
) {
2732 goto unmap_pci_bars
;
2735 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
2736 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
2737 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
2738 memset(mbox_mem_align
->va
, 0, sizeof(struct be_mcc_mailbox
));
2740 mc_cmd_mem
->size
= sizeof(struct be_cmd_req_mcast_mac_config
);
2741 mc_cmd_mem
->va
= pci_alloc_consistent(adapter
->pdev
, mc_cmd_mem
->size
,
2743 if (mc_cmd_mem
->va
== NULL
) {
2747 memset(mc_cmd_mem
->va
, 0, mc_cmd_mem
->size
);
2749 mutex_init(&adapter
->mbox_lock
);
2750 spin_lock_init(&adapter
->mcc_lock
);
2751 spin_lock_init(&adapter
->mcc_cq_lock
);
2753 init_completion(&adapter
->flash_compl
);
2754 pci_save_state(adapter
->pdev
);
2758 pci_free_consistent(adapter
->pdev
, mbox_mem_alloc
->size
,
2759 mbox_mem_alloc
->va
, mbox_mem_alloc
->dma
);
2762 be_unmap_pci_bars(adapter
);
2768 static void be_stats_cleanup(struct be_adapter
*adapter
)
2770 struct be_dma_mem
*cmd
= &adapter
->stats_cmd
;
2773 pci_free_consistent(adapter
->pdev
, cmd
->size
,
2777 static int be_stats_init(struct be_adapter
*adapter
)
2779 struct be_dma_mem
*cmd
= &adapter
->stats_cmd
;
2781 cmd
->size
= sizeof(struct be_cmd_req_get_stats
);
2782 cmd
->va
= pci_alloc_consistent(adapter
->pdev
, cmd
->size
, &cmd
->dma
);
2783 if (cmd
->va
== NULL
)
2785 memset(cmd
->va
, 0, cmd
->size
);
2789 static void __devexit
be_remove(struct pci_dev
*pdev
)
2791 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2796 cancel_delayed_work_sync(&adapter
->work
);
2798 unregister_netdev(adapter
->netdev
);
2802 be_stats_cleanup(adapter
);
2804 be_ctrl_cleanup(adapter
);
2806 be_sriov_disable(adapter
);
2808 be_msix_disable(adapter
);
2810 pci_set_drvdata(pdev
, NULL
);
2811 pci_release_regions(pdev
);
2812 pci_disable_device(pdev
);
2814 free_netdev(adapter
->netdev
);
2817 static int be_get_config(struct be_adapter
*adapter
)
2822 status
= be_cmd_get_fw_ver(adapter
, adapter
->fw_ver
);
2826 status
= be_cmd_query_fw_cfg(adapter
, &adapter
->port_num
,
2827 &adapter
->function_mode
, &adapter
->function_caps
);
2831 memset(mac
, 0, ETH_ALEN
);
2833 if (be_physfn(adapter
)) {
2834 status
= be_cmd_mac_addr_query(adapter
, mac
,
2835 MAC_ADDRESS_TYPE_NETWORK
, true /*permanent */, 0);
2840 if (!is_valid_ether_addr(mac
))
2841 return -EADDRNOTAVAIL
;
2843 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
2844 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
2847 if (adapter
->function_mode
& 0x400)
2848 adapter
->max_vlans
= BE_NUM_VLANS_SUPPORTED
/4;
2850 adapter
->max_vlans
= BE_NUM_VLANS_SUPPORTED
;
2855 static int be_dev_family_check(struct be_adapter
*adapter
)
2857 struct pci_dev
*pdev
= adapter
->pdev
;
2858 u32 sli_intf
= 0, if_type
;
2860 switch (pdev
->device
) {
2863 adapter
->generation
= BE_GEN2
;
2867 adapter
->generation
= BE_GEN3
;
2870 pci_read_config_dword(pdev
, SLI_INTF_REG_OFFSET
, &sli_intf
);
2871 if_type
= (sli_intf
& SLI_INTF_IF_TYPE_MASK
) >>
2872 SLI_INTF_IF_TYPE_SHIFT
;
2874 if (((sli_intf
& SLI_INTF_VALID_MASK
) != SLI_INTF_VALID
) ||
2876 dev_err(&pdev
->dev
, "SLI_INTF reg val is not valid\n");
2880 dev_err(&pdev
->dev
, "VFs not supported\n");
2883 adapter
->sli_family
= ((sli_intf
& SLI_INTF_FAMILY_MASK
) >>
2884 SLI_INTF_FAMILY_SHIFT
);
2885 adapter
->generation
= BE_GEN3
;
2888 adapter
->generation
= 0;
2893 static int __devinit
be_probe(struct pci_dev
*pdev
,
2894 const struct pci_device_id
*pdev_id
)
2897 struct be_adapter
*adapter
;
2898 struct net_device
*netdev
;
2900 status
= pci_enable_device(pdev
);
2904 status
= pci_request_regions(pdev
, DRV_NAME
);
2907 pci_set_master(pdev
);
2909 netdev
= alloc_etherdev(sizeof(struct be_adapter
));
2910 if (netdev
== NULL
) {
2914 adapter
= netdev_priv(netdev
);
2915 adapter
->pdev
= pdev
;
2916 pci_set_drvdata(pdev
, adapter
);
2918 status
= be_dev_family_check(adapter
);
2922 adapter
->netdev
= netdev
;
2923 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2925 status
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
2927 netdev
->features
|= NETIF_F_HIGHDMA
;
2929 status
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2931 dev_err(&pdev
->dev
, "Could not set PCI DMA Mask\n");
2936 be_sriov_enable(adapter
);
2938 status
= be_ctrl_init(adapter
);
2942 /* sync up with fw's ready state */
2943 if (be_physfn(adapter
)) {
2944 status
= be_cmd_POST(adapter
);
2949 /* tell fw we're ready to fire cmds */
2950 status
= be_cmd_fw_init(adapter
);
2954 if (be_physfn(adapter
)) {
2955 status
= be_cmd_reset_function(adapter
);
2960 status
= be_stats_init(adapter
);
2964 status
= be_get_config(adapter
);
2968 be_msix_enable(adapter
);
2970 INIT_DELAYED_WORK(&adapter
->work
, be_worker
);
2972 status
= be_setup(adapter
);
2976 be_netdev_init(netdev
);
2977 status
= register_netdev(netdev
);
2980 netif_carrier_off(netdev
);
2982 dev_info(&pdev
->dev
, "%s port %d\n", nic_name(pdev
), adapter
->port_num
);
2983 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(100));
2989 be_msix_disable(adapter
);
2991 be_stats_cleanup(adapter
);
2993 be_ctrl_cleanup(adapter
);
2995 be_sriov_disable(adapter
);
2996 free_netdev(netdev
);
2997 pci_set_drvdata(pdev
, NULL
);
2999 pci_release_regions(pdev
);
3001 pci_disable_device(pdev
);
3003 dev_err(&pdev
->dev
, "%s initialization failed\n", nic_name(pdev
));
3007 static int be_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3009 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3010 struct net_device
*netdev
= adapter
->netdev
;
3013 be_setup_wol(adapter
, true);
3015 netif_device_detach(netdev
);
3016 if (netif_running(netdev
)) {
3021 be_cmd_get_flow_control(adapter
, &adapter
->tx_fc
, &adapter
->rx_fc
);
3024 pci_save_state(pdev
);
3025 pci_disable_device(pdev
);
3026 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3030 static int be_resume(struct pci_dev
*pdev
)
3033 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3034 struct net_device
*netdev
= adapter
->netdev
;
3036 netif_device_detach(netdev
);
3038 status
= pci_enable_device(pdev
);
3042 pci_set_power_state(pdev
, 0);
3043 pci_restore_state(pdev
);
3045 /* tell fw we're ready to fire cmds */
3046 status
= be_cmd_fw_init(adapter
);
3051 if (netif_running(netdev
)) {
3056 netif_device_attach(netdev
);
3059 be_setup_wol(adapter
, false);
3064 * An FLR will stop BE from DMAing any data.
3066 static void be_shutdown(struct pci_dev
*pdev
)
3068 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3069 struct net_device
*netdev
= adapter
->netdev
;
3071 netif_device_detach(netdev
);
3073 be_cmd_reset_function(adapter
);
3076 be_setup_wol(adapter
, true);
3078 pci_disable_device(pdev
);
3081 static pci_ers_result_t
be_eeh_err_detected(struct pci_dev
*pdev
,
3082 pci_channel_state_t state
)
3084 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3085 struct net_device
*netdev
= adapter
->netdev
;
3087 dev_err(&adapter
->pdev
->dev
, "EEH error detected\n");
3089 adapter
->eeh_err
= true;
3091 netif_device_detach(netdev
);
3093 if (netif_running(netdev
)) {
3100 if (state
== pci_channel_io_perm_failure
)
3101 return PCI_ERS_RESULT_DISCONNECT
;
3103 pci_disable_device(pdev
);
3105 return PCI_ERS_RESULT_NEED_RESET
;
3108 static pci_ers_result_t
be_eeh_reset(struct pci_dev
*pdev
)
3110 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3113 dev_info(&adapter
->pdev
->dev
, "EEH reset\n");
3114 adapter
->eeh_err
= false;
3116 status
= pci_enable_device(pdev
);
3118 return PCI_ERS_RESULT_DISCONNECT
;
3120 pci_set_master(pdev
);
3121 pci_set_power_state(pdev
, 0);
3122 pci_restore_state(pdev
);
3124 /* Check if card is ok and fw is ready */
3125 status
= be_cmd_POST(adapter
);
3127 return PCI_ERS_RESULT_DISCONNECT
;
3129 return PCI_ERS_RESULT_RECOVERED
;
3132 static void be_eeh_resume(struct pci_dev
*pdev
)
3135 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3136 struct net_device
*netdev
= adapter
->netdev
;
3138 dev_info(&adapter
->pdev
->dev
, "EEH resume\n");
3140 pci_save_state(pdev
);
3142 /* tell fw we're ready to fire cmds */
3143 status
= be_cmd_fw_init(adapter
);
3147 status
= be_setup(adapter
);
3151 if (netif_running(netdev
)) {
3152 status
= be_open(netdev
);
3156 netif_device_attach(netdev
);
3159 dev_err(&adapter
->pdev
->dev
, "EEH resume failed\n");
3162 static struct pci_error_handlers be_eeh_handlers
= {
3163 .error_detected
= be_eeh_err_detected
,
3164 .slot_reset
= be_eeh_reset
,
3165 .resume
= be_eeh_resume
,
3168 static struct pci_driver be_driver
= {
3170 .id_table
= be_dev_ids
,
3172 .remove
= be_remove
,
3173 .suspend
= be_suspend
,
3174 .resume
= be_resume
,
3175 .shutdown
= be_shutdown
,
3176 .err_handler
= &be_eeh_handlers
3179 static int __init
be_init_module(void)
3181 if (rx_frag_size
!= 8192 && rx_frag_size
!= 4096 &&
3182 rx_frag_size
!= 2048) {
3183 printk(KERN_WARNING DRV_NAME
3184 " : Module param rx_frag_size must be 2048/4096/8192."
3186 rx_frag_size
= 2048;
3190 printk(KERN_WARNING DRV_NAME
3191 " : Module param num_vfs must not be greater than 32."
3196 return pci_register_driver(&be_driver
);
3198 module_init(be_init_module
);
3200 static void __exit
be_exit_module(void)
3202 pci_unregister_driver(&be_driver
);
3204 module_exit(be_exit_module
);