2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
22 #include <asm/div64.h>
24 MODULE_VERSION(DRV_VER
);
25 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
26 MODULE_DESCRIPTION(DRV_DESC
" " DRV_VER
);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
30 static unsigned int num_vfs
;
31 module_param(num_vfs
, uint
, S_IRUGO
);
32 MODULE_PARM_DESC(num_vfs
, "Number of PCI VFs to initialize");
34 static ushort rx_frag_size
= 2048;
35 module_param(rx_frag_size
, ushort
, S_IRUGO
);
36 MODULE_PARM_DESC(rx_frag_size
, "Size of a fragment that holds rcvd data.");
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids
) = {
39 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
40 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID2
) },
41 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
42 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
43 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID3
)},
44 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID4
)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID5
)},
48 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc
[] = {
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc
[] = {
120 /* Is BE in a multi-channel mode */
121 static inline bool be_is_mc(struct be_adapter
*adapter
) {
122 return (adapter
->function_mode
& FLEX10_MODE
||
123 adapter
->function_mode
& VNIC_MODE
||
124 adapter
->function_mode
& UMC_ENABLED
);
127 static void be_queue_free(struct be_adapter
*adapter
, struct be_queue_info
*q
)
129 struct be_dma_mem
*mem
= &q
->dma_mem
;
131 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
137 static int be_queue_alloc(struct be_adapter
*adapter
, struct be_queue_info
*q
,
138 u16 len
, u16 entry_size
)
140 struct be_dma_mem
*mem
= &q
->dma_mem
;
142 memset(q
, 0, sizeof(*q
));
144 q
->entry_size
= entry_size
;
145 mem
->size
= len
* entry_size
;
146 mem
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, mem
->size
, &mem
->dma
,
150 memset(mem
->va
, 0, mem
->size
);
154 static void be_intr_set(struct be_adapter
*adapter
, bool enable
)
158 if (adapter
->eeh_err
)
161 pci_read_config_dword(adapter
->pdev
, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
,
163 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
165 if (!enabled
&& enable
)
166 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
167 else if (enabled
&& !enable
)
168 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
172 pci_write_config_dword(adapter
->pdev
,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
, reg
);
176 static void be_rxq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
179 val
|= qid
& DB_RQ_RING_ID_MASK
;
180 val
|= posted
<< DB_RQ_NUM_POSTED_SHIFT
;
183 iowrite32(val
, adapter
->db
+ DB_RQ_OFFSET
);
186 static void be_txq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
189 val
|= qid
& DB_TXULP_RING_ID_MASK
;
190 val
|= (posted
& DB_TXULP_NUM_POSTED_MASK
) << DB_TXULP_NUM_POSTED_SHIFT
;
193 iowrite32(val
, adapter
->db
+ DB_TXULP1_OFFSET
);
196 static void be_eq_notify(struct be_adapter
*adapter
, u16 qid
,
197 bool arm
, bool clear_int
, u16 num_popped
)
200 val
|= qid
& DB_EQ_RING_ID_MASK
;
201 val
|= ((qid
& DB_EQ_RING_ID_EXT_MASK
) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT
);
204 if (adapter
->eeh_err
)
208 val
|= 1 << DB_EQ_REARM_SHIFT
;
210 val
|= 1 << DB_EQ_CLR_SHIFT
;
211 val
|= 1 << DB_EQ_EVNT_SHIFT
;
212 val
|= num_popped
<< DB_EQ_NUM_POPPED_SHIFT
;
213 iowrite32(val
, adapter
->db
+ DB_EQ_OFFSET
);
216 void be_cq_notify(struct be_adapter
*adapter
, u16 qid
, bool arm
, u16 num_popped
)
219 val
|= qid
& DB_CQ_RING_ID_MASK
;
220 val
|= ((qid
& DB_CQ_RING_ID_EXT_MASK
) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT
);
223 if (adapter
->eeh_err
)
227 val
|= 1 << DB_CQ_REARM_SHIFT
;
228 val
|= num_popped
<< DB_CQ_NUM_POPPED_SHIFT
;
229 iowrite32(val
, adapter
->db
+ DB_CQ_OFFSET
);
232 static int be_mac_addr_set(struct net_device
*netdev
, void *p
)
234 struct be_adapter
*adapter
= netdev_priv(netdev
);
235 struct sockaddr
*addr
= p
;
237 u8 current_mac
[ETH_ALEN
];
238 u32 pmac_id
= adapter
->pmac_id
[0];
240 if (!is_valid_ether_addr(addr
->sa_data
))
241 return -EADDRNOTAVAIL
;
243 status
= be_cmd_mac_addr_query(adapter
, current_mac
,
244 MAC_ADDRESS_TYPE_NETWORK
, false,
245 adapter
->if_handle
, 0);
249 if (memcmp(addr
->sa_data
, current_mac
, ETH_ALEN
)) {
250 status
= be_cmd_pmac_add(adapter
, (u8
*)addr
->sa_data
,
251 adapter
->if_handle
, &adapter
->pmac_id
[0], 0);
255 be_cmd_pmac_del(adapter
, adapter
->if_handle
, pmac_id
, 0);
257 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
260 dev_err(&adapter
->pdev
->dev
, "MAC %pM set Failed\n", addr
->sa_data
);
264 static void populate_be2_stats(struct be_adapter
*adapter
)
266 struct be_hw_stats_v0
*hw_stats
= hw_stats_from_cmd(adapter
);
267 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
268 struct be_rxf_stats_v0
*rxf_stats
= &hw_stats
->rxf
;
269 struct be_port_rxf_stats_v0
*port_stats
=
270 &rxf_stats
->port
[adapter
->port_num
];
271 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
273 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
274 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
275 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
276 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
277 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
278 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
279 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
280 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
281 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
282 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
283 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rx_fifo_overflow
;
284 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
285 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
286 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
287 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
288 drvs
->rx_input_fifo_overflow_drop
= port_stats
->rx_input_fifo_overflow
;
289 drvs
->rx_dropped_header_too_small
=
290 port_stats
->rx_dropped_header_too_small
;
291 drvs
->rx_address_mismatch_drops
=
292 port_stats
->rx_address_mismatch_drops
+
293 port_stats
->rx_vlan_mismatch_drops
;
294 drvs
->rx_alignment_symbol_errors
=
295 port_stats
->rx_alignment_symbol_errors
;
297 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
298 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
300 if (adapter
->port_num
)
301 drvs
->jabber_events
= rxf_stats
->port1_jabber_events
;
303 drvs
->jabber_events
= rxf_stats
->port0_jabber_events
;
304 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
305 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
306 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
307 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
308 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
309 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
310 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
313 static void populate_be3_stats(struct be_adapter
*adapter
)
315 struct be_hw_stats_v1
*hw_stats
= hw_stats_from_cmd(adapter
);
316 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
317 struct be_rxf_stats_v1
*rxf_stats
= &hw_stats
->rxf
;
318 struct be_port_rxf_stats_v1
*port_stats
=
319 &rxf_stats
->port
[adapter
->port_num
];
320 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
322 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
323 drvs
->pmem_fifo_overflow_drop
= port_stats
->pmem_fifo_overflow_drop
;
324 drvs
->rx_priority_pause_frames
= port_stats
->rx_priority_pause_frames
;
325 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
326 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
327 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
328 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
329 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
330 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
331 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
332 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
333 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
334 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
335 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
336 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
337 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
338 drvs
->rx_dropped_header_too_small
=
339 port_stats
->rx_dropped_header_too_small
;
340 drvs
->rx_input_fifo_overflow_drop
=
341 port_stats
->rx_input_fifo_overflow_drop
;
342 drvs
->rx_address_mismatch_drops
= port_stats
->rx_address_mismatch_drops
;
343 drvs
->rx_alignment_symbol_errors
=
344 port_stats
->rx_alignment_symbol_errors
;
345 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rxpp_fifo_overflow_drop
;
346 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
347 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
348 drvs
->jabber_events
= port_stats
->jabber_events
;
349 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
350 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
351 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
352 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
353 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
354 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
355 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
358 static void populate_lancer_stats(struct be_adapter
*adapter
)
361 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
362 struct lancer_pport_stats
*pport_stats
=
363 pport_stats_from_cmd(adapter
);
365 be_dws_le_to_cpu(pport_stats
, sizeof(*pport_stats
));
366 drvs
->rx_pause_frames
= pport_stats
->rx_pause_frames_lo
;
367 drvs
->rx_crc_errors
= pport_stats
->rx_crc_errors_lo
;
368 drvs
->rx_control_frames
= pport_stats
->rx_control_frames_lo
;
369 drvs
->rx_in_range_errors
= pport_stats
->rx_in_range_errors
;
370 drvs
->rx_frame_too_long
= pport_stats
->rx_frames_too_long_lo
;
371 drvs
->rx_dropped_runt
= pport_stats
->rx_dropped_runt
;
372 drvs
->rx_ip_checksum_errs
= pport_stats
->rx_ip_checksum_errors
;
373 drvs
->rx_tcp_checksum_errs
= pport_stats
->rx_tcp_checksum_errors
;
374 drvs
->rx_udp_checksum_errs
= pport_stats
->rx_udp_checksum_errors
;
375 drvs
->rx_dropped_tcp_length
=
376 pport_stats
->rx_dropped_invalid_tcp_length
;
377 drvs
->rx_dropped_too_small
= pport_stats
->rx_dropped_too_small
;
378 drvs
->rx_dropped_too_short
= pport_stats
->rx_dropped_too_short
;
379 drvs
->rx_out_range_errors
= pport_stats
->rx_out_of_range_errors
;
380 drvs
->rx_dropped_header_too_small
=
381 pport_stats
->rx_dropped_header_too_small
;
382 drvs
->rx_input_fifo_overflow_drop
= pport_stats
->rx_fifo_overflow
;
383 drvs
->rx_address_mismatch_drops
=
384 pport_stats
->rx_address_mismatch_drops
+
385 pport_stats
->rx_vlan_mismatch_drops
;
386 drvs
->rx_alignment_symbol_errors
= pport_stats
->rx_symbol_errors_lo
;
387 drvs
->rxpp_fifo_overflow_drop
= pport_stats
->rx_fifo_overflow
;
388 drvs
->tx_pauseframes
= pport_stats
->tx_pause_frames_lo
;
389 drvs
->tx_controlframes
= pport_stats
->tx_control_frames_lo
;
390 drvs
->jabber_events
= pport_stats
->rx_jabbers
;
391 drvs
->forwarded_packets
= pport_stats
->num_forwards_lo
;
392 drvs
->rx_drops_mtu
= pport_stats
->rx_drops_mtu_lo
;
393 drvs
->rx_drops_too_many_frags
=
394 pport_stats
->rx_drops_too_many_frags_lo
;
397 static void accumulate_16bit_val(u32
*acc
, u16 val
)
399 #define lo(x) (x & 0xFFFF)
400 #define hi(x) (x & 0xFFFF0000)
401 bool wrapped
= val
< lo(*acc
);
402 u32 newacc
= hi(*acc
) + val
;
406 ACCESS_ONCE(*acc
) = newacc
;
409 void be_parse_stats(struct be_adapter
*adapter
)
411 struct be_erx_stats_v1
*erx
= be_erx_stats_from_cmd(adapter
);
412 struct be_rx_obj
*rxo
;
415 if (adapter
->generation
== BE_GEN3
) {
416 if (lancer_chip(adapter
))
417 populate_lancer_stats(adapter
);
419 populate_be3_stats(adapter
);
421 populate_be2_stats(adapter
);
424 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
425 for_all_rx_queues(adapter
, rxo
, i
) {
426 /* below erx HW counter can actually wrap around after
427 * 65535. Driver accumulates a 32-bit value
429 accumulate_16bit_val(&rx_stats(rxo
)->rx_drops_no_frags
,
430 (u16
)erx
->rx_drops_no_fragments
[rxo
->q
.id
]);
434 static struct rtnl_link_stats64
*be_get_stats64(struct net_device
*netdev
,
435 struct rtnl_link_stats64
*stats
)
437 struct be_adapter
*adapter
= netdev_priv(netdev
);
438 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
439 struct be_rx_obj
*rxo
;
440 struct be_tx_obj
*txo
;
445 for_all_rx_queues(adapter
, rxo
, i
) {
446 const struct be_rx_stats
*rx_stats
= rx_stats(rxo
);
448 start
= u64_stats_fetch_begin_bh(&rx_stats
->sync
);
449 pkts
= rx_stats(rxo
)->rx_pkts
;
450 bytes
= rx_stats(rxo
)->rx_bytes
;
451 } while (u64_stats_fetch_retry_bh(&rx_stats
->sync
, start
));
452 stats
->rx_packets
+= pkts
;
453 stats
->rx_bytes
+= bytes
;
454 stats
->multicast
+= rx_stats(rxo
)->rx_mcast_pkts
;
455 stats
->rx_dropped
+= rx_stats(rxo
)->rx_drops_no_skbs
+
456 rx_stats(rxo
)->rx_drops_no_frags
;
459 for_all_tx_queues(adapter
, txo
, i
) {
460 const struct be_tx_stats
*tx_stats
= tx_stats(txo
);
462 start
= u64_stats_fetch_begin_bh(&tx_stats
->sync
);
463 pkts
= tx_stats(txo
)->tx_pkts
;
464 bytes
= tx_stats(txo
)->tx_bytes
;
465 } while (u64_stats_fetch_retry_bh(&tx_stats
->sync
, start
));
466 stats
->tx_packets
+= pkts
;
467 stats
->tx_bytes
+= bytes
;
470 /* bad pkts received */
471 stats
->rx_errors
= drvs
->rx_crc_errors
+
472 drvs
->rx_alignment_symbol_errors
+
473 drvs
->rx_in_range_errors
+
474 drvs
->rx_out_range_errors
+
475 drvs
->rx_frame_too_long
+
476 drvs
->rx_dropped_too_small
+
477 drvs
->rx_dropped_too_short
+
478 drvs
->rx_dropped_header_too_small
+
479 drvs
->rx_dropped_tcp_length
+
480 drvs
->rx_dropped_runt
;
482 /* detailed rx errors */
483 stats
->rx_length_errors
= drvs
->rx_in_range_errors
+
484 drvs
->rx_out_range_errors
+
485 drvs
->rx_frame_too_long
;
487 stats
->rx_crc_errors
= drvs
->rx_crc_errors
;
489 /* frame alignment errors */
490 stats
->rx_frame_errors
= drvs
->rx_alignment_symbol_errors
;
492 /* receiver fifo overrun */
493 /* drops_no_pbuf is no per i/f, it's per BE card */
494 stats
->rx_fifo_errors
= drvs
->rxpp_fifo_overflow_drop
+
495 drvs
->rx_input_fifo_overflow_drop
+
496 drvs
->rx_drops_no_pbuf
;
500 void be_link_status_update(struct be_adapter
*adapter
, u8 link_status
)
502 struct net_device
*netdev
= adapter
->netdev
;
504 if (!(adapter
->flags
& BE_FLAGS_LINK_STATUS_INIT
)) {
505 netif_carrier_off(netdev
);
506 adapter
->flags
|= BE_FLAGS_LINK_STATUS_INIT
;
509 if ((link_status
& LINK_STATUS_MASK
) == LINK_UP
)
510 netif_carrier_on(netdev
);
512 netif_carrier_off(netdev
);
515 static void be_tx_stats_update(struct be_tx_obj
*txo
,
516 u32 wrb_cnt
, u32 copied
, u32 gso_segs
, bool stopped
)
518 struct be_tx_stats
*stats
= tx_stats(txo
);
520 u64_stats_update_begin(&stats
->sync
);
522 stats
->tx_wrbs
+= wrb_cnt
;
523 stats
->tx_bytes
+= copied
;
524 stats
->tx_pkts
+= (gso_segs
? gso_segs
: 1);
527 u64_stats_update_end(&stats
->sync
);
530 /* Determine number of WRB entries needed to xmit data in an skb */
531 static u32
wrb_cnt_for_skb(struct be_adapter
*adapter
, struct sk_buff
*skb
,
534 int cnt
= (skb
->len
> skb
->data_len
);
536 cnt
+= skb_shinfo(skb
)->nr_frags
;
538 /* to account for hdr wrb */
540 if (lancer_chip(adapter
) || !(cnt
& 1)) {
543 /* add a dummy to make it an even num */
547 BUG_ON(cnt
> BE_MAX_TX_FRAG_COUNT
);
551 static inline void wrb_fill(struct be_eth_wrb
*wrb
, u64 addr
, int len
)
553 wrb
->frag_pa_hi
= upper_32_bits(addr
);
554 wrb
->frag_pa_lo
= addr
& 0xFFFFFFFF;
555 wrb
->frag_len
= len
& ETH_WRB_FRAG_LEN_MASK
;
558 static inline u16
be_get_tx_vlan_tag(struct be_adapter
*adapter
,
564 vlan_tag
= vlan_tx_tag_get(skb
);
565 vlan_prio
= (vlan_tag
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
566 /* If vlan priority provided by OS is NOT in available bmap */
567 if (!(adapter
->vlan_prio_bmap
& (1 << vlan_prio
)))
568 vlan_tag
= (vlan_tag
& ~VLAN_PRIO_MASK
) |
569 adapter
->recommended_prio
;
574 static int be_vlan_tag_chk(struct be_adapter
*adapter
, struct sk_buff
*skb
)
576 return vlan_tx_tag_present(skb
) || adapter
->pvid
;
579 static void wrb_fill_hdr(struct be_adapter
*adapter
, struct be_eth_hdr_wrb
*hdr
,
580 struct sk_buff
*skb
, u32 wrb_cnt
, u32 len
)
584 memset(hdr
, 0, sizeof(*hdr
));
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, crc
, hdr
, 1);
588 if (skb_is_gso(skb
)) {
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso
, hdr
, 1);
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso_mss
,
591 hdr
, skb_shinfo(skb
)->gso_size
);
592 if (skb_is_gso_v6(skb
) && !lancer_chip(adapter
))
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso6
, hdr
, 1);
594 if (lancer_chip(adapter
) && adapter
->sli_family
==
595 LANCER_A0_SLI_FAMILY
) {
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, ipcs
, hdr
, 1);
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb
,
600 else if (is_udp_pkt(skb
))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb
,
604 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, tcpcs
, hdr
, 1);
607 else if (is_udp_pkt(skb
))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, udpcs
, hdr
, 1);
611 if (vlan_tx_tag_present(skb
)) {
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan
, hdr
, 1);
613 vlan_tag
= be_get_tx_vlan_tag(adapter
, skb
);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan_tag
, hdr
, vlan_tag
);
617 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, event
, hdr
, 1);
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, complete
, hdr
, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, num_wrb
, hdr
, wrb_cnt
);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, len
, hdr
, len
);
623 static void unmap_tx_frag(struct device
*dev
, struct be_eth_wrb
*wrb
,
628 be_dws_le_to_cpu(wrb
, sizeof(*wrb
));
630 dma
= (u64
)wrb
->frag_pa_hi
<< 32 | (u64
)wrb
->frag_pa_lo
;
633 dma_unmap_single(dev
, dma
, wrb
->frag_len
,
636 dma_unmap_page(dev
, dma
, wrb
->frag_len
, DMA_TO_DEVICE
);
640 static int make_tx_wrbs(struct be_adapter
*adapter
, struct be_queue_info
*txq
,
641 struct sk_buff
*skb
, u32 wrb_cnt
, bool dummy_wrb
)
645 struct device
*dev
= &adapter
->pdev
->dev
;
646 struct sk_buff
*first_skb
= skb
;
647 struct be_eth_wrb
*wrb
;
648 struct be_eth_hdr_wrb
*hdr
;
649 bool map_single
= false;
652 hdr
= queue_head_node(txq
);
654 map_head
= txq
->head
;
656 if (skb
->len
> skb
->data_len
) {
657 int len
= skb_headlen(skb
);
658 busaddr
= dma_map_single(dev
, skb
->data
, len
, DMA_TO_DEVICE
);
659 if (dma_mapping_error(dev
, busaddr
))
662 wrb
= queue_head_node(txq
);
663 wrb_fill(wrb
, busaddr
, len
);
664 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
669 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
670 const struct skb_frag_struct
*frag
=
671 &skb_shinfo(skb
)->frags
[i
];
672 busaddr
= skb_frag_dma_map(dev
, frag
, 0,
673 skb_frag_size(frag
), DMA_TO_DEVICE
);
674 if (dma_mapping_error(dev
, busaddr
))
676 wrb
= queue_head_node(txq
);
677 wrb_fill(wrb
, busaddr
, skb_frag_size(frag
));
678 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
680 copied
+= skb_frag_size(frag
);
684 wrb
= queue_head_node(txq
);
686 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
690 wrb_fill_hdr(adapter
, hdr
, first_skb
, wrb_cnt
, copied
);
691 be_dws_cpu_to_le(hdr
, sizeof(*hdr
));
695 txq
->head
= map_head
;
697 wrb
= queue_head_node(txq
);
698 unmap_tx_frag(dev
, wrb
, map_single
);
700 copied
-= wrb
->frag_len
;
706 static struct sk_buff
*be_insert_vlan_in_pkt(struct be_adapter
*adapter
,
711 skb
= skb_share_check(skb
, GFP_ATOMIC
);
715 if (vlan_tx_tag_present(skb
)) {
716 vlan_tag
= be_get_tx_vlan_tag(adapter
, skb
);
717 __vlan_put_tag(skb
, vlan_tag
);
724 static netdev_tx_t
be_xmit(struct sk_buff
*skb
,
725 struct net_device
*netdev
)
727 struct be_adapter
*adapter
= netdev_priv(netdev
);
728 struct be_tx_obj
*txo
= &adapter
->tx_obj
[skb_get_queue_mapping(skb
)];
729 struct be_queue_info
*txq
= &txo
->q
;
730 struct iphdr
*ip
= NULL
;
731 u32 wrb_cnt
= 0, copied
= 0;
732 u32 start
= txq
->head
, eth_hdr_len
;
733 bool dummy_wrb
, stopped
= false;
735 eth_hdr_len
= ntohs(skb
->protocol
) == ETH_P_8021Q
?
736 VLAN_ETH_HLEN
: ETH_HLEN
;
738 /* HW has a bug which considers padding bytes as legal
739 * and modifies the IPv4 hdr's 'tot_len' field
741 if (skb
->len
<= 60 && be_vlan_tag_chk(adapter
, skb
) &&
743 ip
= (struct iphdr
*)ip_hdr(skb
);
744 pskb_trim(skb
, eth_hdr_len
+ ntohs(ip
->tot_len
));
747 /* HW has a bug wherein it will calculate CSUM for VLAN
748 * pkts even though it is disabled.
749 * Manually insert VLAN in pkt.
751 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&&
752 be_vlan_tag_chk(adapter
, skb
)) {
753 skb
= be_insert_vlan_in_pkt(adapter
, skb
);
758 wrb_cnt
= wrb_cnt_for_skb(adapter
, skb
, &dummy_wrb
);
760 copied
= make_tx_wrbs(adapter
, txq
, skb
, wrb_cnt
, dummy_wrb
);
762 int gso_segs
= skb_shinfo(skb
)->gso_segs
;
764 /* record the sent skb in the sent_skb table */
765 BUG_ON(txo
->sent_skb_list
[start
]);
766 txo
->sent_skb_list
[start
] = skb
;
768 /* Ensure txq has space for the next skb; Else stop the queue
769 * *BEFORE* ringing the tx doorbell, so that we serialze the
770 * tx compls of the current transmit which'll wake up the queue
772 atomic_add(wrb_cnt
, &txq
->used
);
773 if ((BE_MAX_TX_FRAG_COUNT
+ atomic_read(&txq
->used
)) >=
775 netif_stop_subqueue(netdev
, skb_get_queue_mapping(skb
));
779 be_txq_notify(adapter
, txq
->id
, wrb_cnt
);
781 be_tx_stats_update(txo
, wrb_cnt
, copied
, gso_segs
, stopped
);
784 dev_kfree_skb_any(skb
);
790 static int be_change_mtu(struct net_device
*netdev
, int new_mtu
)
792 struct be_adapter
*adapter
= netdev_priv(netdev
);
793 if (new_mtu
< BE_MIN_MTU
||
794 new_mtu
> (BE_MAX_JUMBO_FRAME_SIZE
-
795 (ETH_HLEN
+ ETH_FCS_LEN
))) {
796 dev_info(&adapter
->pdev
->dev
,
797 "MTU must be between %d and %d bytes\n",
799 (BE_MAX_JUMBO_FRAME_SIZE
- (ETH_HLEN
+ ETH_FCS_LEN
)));
802 dev_info(&adapter
->pdev
->dev
, "MTU changed from %d to %d bytes\n",
803 netdev
->mtu
, new_mtu
);
804 netdev
->mtu
= new_mtu
;
809 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
810 * If the user configures more, place BE in vlan promiscuous mode.
812 static int be_vid_config(struct be_adapter
*adapter
, bool vf
, u32 vf_num
)
814 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf_num
];
815 u16 vtag
[BE_NUM_VLANS_SUPPORTED
];
820 vtag
[0] = cpu_to_le16(vf_cfg
->vlan_tag
);
821 status
= be_cmd_vlan_config(adapter
, vf_cfg
->if_handle
, vtag
,
825 /* No need to further configure vids if in promiscuous mode */
826 if (adapter
->promiscuous
)
829 if (adapter
->vlans_added
<= adapter
->max_vlans
) {
830 /* Construct VLAN Table to give to HW */
831 for (i
= 0; i
< VLAN_N_VID
; i
++) {
832 if (adapter
->vlan_tag
[i
]) {
833 vtag
[ntags
] = cpu_to_le16(i
);
837 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
840 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
847 static int be_vlan_add_vid(struct net_device
*netdev
, u16 vid
)
849 struct be_adapter
*adapter
= netdev_priv(netdev
);
852 if (!be_physfn(adapter
)) {
857 adapter
->vlan_tag
[vid
] = 1;
858 if (adapter
->vlans_added
<= (adapter
->max_vlans
+ 1))
859 status
= be_vid_config(adapter
, false, 0);
862 adapter
->vlans_added
++;
864 adapter
->vlan_tag
[vid
] = 0;
869 static int be_vlan_rem_vid(struct net_device
*netdev
, u16 vid
)
871 struct be_adapter
*adapter
= netdev_priv(netdev
);
874 if (!be_physfn(adapter
)) {
879 adapter
->vlan_tag
[vid
] = 0;
880 if (adapter
->vlans_added
<= adapter
->max_vlans
)
881 status
= be_vid_config(adapter
, false, 0);
884 adapter
->vlans_added
--;
886 adapter
->vlan_tag
[vid
] = 1;
891 static void be_set_rx_mode(struct net_device
*netdev
)
893 struct be_adapter
*adapter
= netdev_priv(netdev
);
895 if (netdev
->flags
& IFF_PROMISC
) {
896 be_cmd_rx_filter(adapter
, IFF_PROMISC
, ON
);
897 adapter
->promiscuous
= true;
901 /* BE was previously in promiscuous mode; disable it */
902 if (adapter
->promiscuous
) {
903 adapter
->promiscuous
= false;
904 be_cmd_rx_filter(adapter
, IFF_PROMISC
, OFF
);
906 if (adapter
->vlans_added
)
907 be_vid_config(adapter
, false, 0);
910 /* Enable multicast promisc if num configured exceeds what we support */
911 if (netdev
->flags
& IFF_ALLMULTI
||
912 netdev_mc_count(netdev
) > BE_MAX_MC
) {
913 be_cmd_rx_filter(adapter
, IFF_ALLMULTI
, ON
);
917 if (netdev_uc_count(netdev
) != adapter
->uc_macs
) {
918 struct netdev_hw_addr
*ha
;
919 int i
= 1; /* First slot is claimed by the Primary MAC */
921 for (; adapter
->uc_macs
> 0; adapter
->uc_macs
--, i
++) {
922 be_cmd_pmac_del(adapter
, adapter
->if_handle
,
923 adapter
->pmac_id
[i
], 0);
926 if (netdev_uc_count(netdev
) > adapter
->max_pmac_cnt
) {
927 be_cmd_rx_filter(adapter
, IFF_PROMISC
, ON
);
928 adapter
->promiscuous
= true;
932 netdev_for_each_uc_addr(ha
, adapter
->netdev
) {
933 adapter
->uc_macs
++; /* First slot is for Primary MAC */
934 be_cmd_pmac_add(adapter
, (u8
*)ha
->addr
,
936 &adapter
->pmac_id
[adapter
->uc_macs
], 0);
940 be_cmd_rx_filter(adapter
, IFF_MULTICAST
, ON
);
945 static int be_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
947 struct be_adapter
*adapter
= netdev_priv(netdev
);
948 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
951 if (!sriov_enabled(adapter
))
954 if (!is_valid_ether_addr(mac
) || vf
>= adapter
->num_vfs
)
957 if (lancer_chip(adapter
)) {
958 status
= be_cmd_set_mac_list(adapter
, mac
, 1, vf
+ 1);
960 status
= be_cmd_pmac_del(adapter
, vf_cfg
->if_handle
,
961 vf_cfg
->pmac_id
, vf
+ 1);
963 status
= be_cmd_pmac_add(adapter
, mac
, vf_cfg
->if_handle
,
964 &vf_cfg
->pmac_id
, vf
+ 1);
968 dev_err(&adapter
->pdev
->dev
, "MAC %pM set on VF %d Failed\n",
971 memcpy(vf_cfg
->mac_addr
, mac
, ETH_ALEN
);
976 static int be_get_vf_config(struct net_device
*netdev
, int vf
,
977 struct ifla_vf_info
*vi
)
979 struct be_adapter
*adapter
= netdev_priv(netdev
);
980 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
982 if (!sriov_enabled(adapter
))
985 if (vf
>= adapter
->num_vfs
)
989 vi
->tx_rate
= vf_cfg
->tx_rate
;
990 vi
->vlan
= vf_cfg
->vlan_tag
;
992 memcpy(&vi
->mac
, vf_cfg
->mac_addr
, ETH_ALEN
);
997 static int be_set_vf_vlan(struct net_device
*netdev
,
998 int vf
, u16 vlan
, u8 qos
)
1000 struct be_adapter
*adapter
= netdev_priv(netdev
);
1003 if (!sriov_enabled(adapter
))
1006 if (vf
>= adapter
->num_vfs
|| vlan
> 4095)
1010 if (adapter
->vf_cfg
[vf
].vlan_tag
!= vlan
) {
1011 /* If this is new value, program it. Else skip. */
1012 adapter
->vf_cfg
[vf
].vlan_tag
= vlan
;
1014 status
= be_cmd_set_hsw_config(adapter
, vlan
,
1015 vf
+ 1, adapter
->vf_cfg
[vf
].if_handle
);
1018 /* Reset Transparent Vlan Tagging. */
1019 adapter
->vf_cfg
[vf
].vlan_tag
= 0;
1020 vlan
= adapter
->vf_cfg
[vf
].def_vid
;
1021 status
= be_cmd_set_hsw_config(adapter
, vlan
, vf
+ 1,
1022 adapter
->vf_cfg
[vf
].if_handle
);
1027 dev_info(&adapter
->pdev
->dev
,
1028 "VLAN %d config on VF %d failed\n", vlan
, vf
);
1032 static int be_set_vf_tx_rate(struct net_device
*netdev
,
1035 struct be_adapter
*adapter
= netdev_priv(netdev
);
1038 if (!sriov_enabled(adapter
))
1041 if (vf
>= adapter
->num_vfs
)
1044 if (rate
< 100 || rate
> 10000) {
1045 dev_err(&adapter
->pdev
->dev
,
1046 "tx rate must be between 100 and 10000 Mbps\n");
1050 status
= be_cmd_set_qos(adapter
, rate
/ 10, vf
+ 1);
1053 dev_err(&adapter
->pdev
->dev
,
1054 "tx rate %d on VF %d failed\n", rate
, vf
);
1056 adapter
->vf_cfg
[vf
].tx_rate
= rate
;
1060 static void be_eqd_update(struct be_adapter
*adapter
, struct be_eq_obj
*eqo
)
1062 struct be_rx_stats
*stats
= rx_stats(&adapter
->rx_obj
[eqo
->idx
]);
1063 ulong now
= jiffies
;
1064 ulong delta
= now
- stats
->rx_jiffies
;
1066 unsigned int start
, eqd
;
1068 if (!eqo
->enable_aic
) {
1073 if (eqo
->idx
>= adapter
->num_rx_qs
)
1076 stats
= rx_stats(&adapter
->rx_obj
[eqo
->idx
]);
1078 /* Wrapped around */
1079 if (time_before(now
, stats
->rx_jiffies
)) {
1080 stats
->rx_jiffies
= now
;
1084 /* Update once a second */
1089 start
= u64_stats_fetch_begin_bh(&stats
->sync
);
1090 pkts
= stats
->rx_pkts
;
1091 } while (u64_stats_fetch_retry_bh(&stats
->sync
, start
));
1093 stats
->rx_pps
= (unsigned long)(pkts
- stats
->rx_pkts_prev
) / (delta
/ HZ
);
1094 stats
->rx_pkts_prev
= pkts
;
1095 stats
->rx_jiffies
= now
;
1096 eqd
= (stats
->rx_pps
/ 110000) << 3;
1097 eqd
= min(eqd
, eqo
->max_eqd
);
1098 eqd
= max(eqd
, eqo
->min_eqd
);
1103 if (eqd
!= eqo
->cur_eqd
) {
1104 be_cmd_modify_eqd(adapter
, eqo
->q
.id
, eqd
);
1109 static void be_rx_stats_update(struct be_rx_obj
*rxo
,
1110 struct be_rx_compl_info
*rxcp
)
1112 struct be_rx_stats
*stats
= rx_stats(rxo
);
1114 u64_stats_update_begin(&stats
->sync
);
1116 stats
->rx_bytes
+= rxcp
->pkt_size
;
1118 if (rxcp
->pkt_type
== BE_MULTICAST_PACKET
)
1119 stats
->rx_mcast_pkts
++;
1121 stats
->rx_compl_err
++;
1122 u64_stats_update_end(&stats
->sync
);
1125 static inline bool csum_passed(struct be_rx_compl_info
*rxcp
)
1127 /* L4 checksum is not reliable for non TCP/UDP packets.
1128 * Also ignore ipcksm for ipv6 pkts */
1129 return (rxcp
->tcpf
|| rxcp
->udpf
) && rxcp
->l4_csum
&&
1130 (rxcp
->ip_csum
|| rxcp
->ipv6
);
1133 static struct be_rx_page_info
*get_rx_page_info(struct be_rx_obj
*rxo
,
1136 struct be_adapter
*adapter
= rxo
->adapter
;
1137 struct be_rx_page_info
*rx_page_info
;
1138 struct be_queue_info
*rxq
= &rxo
->q
;
1140 rx_page_info
= &rxo
->page_info_tbl
[frag_idx
];
1141 BUG_ON(!rx_page_info
->page
);
1143 if (rx_page_info
->last_page_user
) {
1144 dma_unmap_page(&adapter
->pdev
->dev
,
1145 dma_unmap_addr(rx_page_info
, bus
),
1146 adapter
->big_page_size
, DMA_FROM_DEVICE
);
1147 rx_page_info
->last_page_user
= false;
1150 atomic_dec(&rxq
->used
);
1151 return rx_page_info
;
1154 /* Throwaway the data in the Rx completion */
1155 static void be_rx_compl_discard(struct be_rx_obj
*rxo
,
1156 struct be_rx_compl_info
*rxcp
)
1158 struct be_queue_info
*rxq
= &rxo
->q
;
1159 struct be_rx_page_info
*page_info
;
1160 u16 i
, num_rcvd
= rxcp
->num_rcvd
;
1162 for (i
= 0; i
< num_rcvd
; i
++) {
1163 page_info
= get_rx_page_info(rxo
, rxcp
->rxq_idx
);
1164 put_page(page_info
->page
);
1165 memset(page_info
, 0, sizeof(*page_info
));
1166 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1171 * skb_fill_rx_data forms a complete skb for an ether frame
1172 * indicated by rxcp.
1174 static void skb_fill_rx_data(struct be_rx_obj
*rxo
, struct sk_buff
*skb
,
1175 struct be_rx_compl_info
*rxcp
)
1177 struct be_queue_info
*rxq
= &rxo
->q
;
1178 struct be_rx_page_info
*page_info
;
1180 u16 hdr_len
, curr_frag_len
, remaining
;
1183 page_info
= get_rx_page_info(rxo
, rxcp
->rxq_idx
);
1184 start
= page_address(page_info
->page
) + page_info
->page_offset
;
1187 /* Copy data in the first descriptor of this completion */
1188 curr_frag_len
= min(rxcp
->pkt_size
, rx_frag_size
);
1190 /* Copy the header portion into skb_data */
1191 hdr_len
= min(BE_HDR_LEN
, curr_frag_len
);
1192 memcpy(skb
->data
, start
, hdr_len
);
1193 skb
->len
= curr_frag_len
;
1194 if (curr_frag_len
<= BE_HDR_LEN
) { /* tiny packet */
1195 /* Complete packet has now been moved to data */
1196 put_page(page_info
->page
);
1198 skb
->tail
+= curr_frag_len
;
1200 skb_shinfo(skb
)->nr_frags
= 1;
1201 skb_frag_set_page(skb
, 0, page_info
->page
);
1202 skb_shinfo(skb
)->frags
[0].page_offset
=
1203 page_info
->page_offset
+ hdr_len
;
1204 skb_frag_size_set(&skb_shinfo(skb
)->frags
[0], curr_frag_len
- hdr_len
);
1205 skb
->data_len
= curr_frag_len
- hdr_len
;
1206 skb
->truesize
+= rx_frag_size
;
1207 skb
->tail
+= hdr_len
;
1209 page_info
->page
= NULL
;
1211 if (rxcp
->pkt_size
<= rx_frag_size
) {
1212 BUG_ON(rxcp
->num_rcvd
!= 1);
1216 /* More frags present for this completion */
1217 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1218 remaining
= rxcp
->pkt_size
- curr_frag_len
;
1219 for (i
= 1, j
= 0; i
< rxcp
->num_rcvd
; i
++) {
1220 page_info
= get_rx_page_info(rxo
, rxcp
->rxq_idx
);
1221 curr_frag_len
= min(remaining
, rx_frag_size
);
1223 /* Coalesce all frags from the same physical page in one slot */
1224 if (page_info
->page_offset
== 0) {
1227 skb_frag_set_page(skb
, j
, page_info
->page
);
1228 skb_shinfo(skb
)->frags
[j
].page_offset
=
1229 page_info
->page_offset
;
1230 skb_frag_size_set(&skb_shinfo(skb
)->frags
[j
], 0);
1231 skb_shinfo(skb
)->nr_frags
++;
1233 put_page(page_info
->page
);
1236 skb_frag_size_add(&skb_shinfo(skb
)->frags
[j
], curr_frag_len
);
1237 skb
->len
+= curr_frag_len
;
1238 skb
->data_len
+= curr_frag_len
;
1239 skb
->truesize
+= rx_frag_size
;
1240 remaining
-= curr_frag_len
;
1241 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1242 page_info
->page
= NULL
;
1244 BUG_ON(j
> MAX_SKB_FRAGS
);
1247 /* Process the RX completion indicated by rxcp when GRO is disabled */
1248 static void be_rx_compl_process(struct be_rx_obj
*rxo
,
1249 struct be_rx_compl_info
*rxcp
)
1251 struct be_adapter
*adapter
= rxo
->adapter
;
1252 struct net_device
*netdev
= adapter
->netdev
;
1253 struct sk_buff
*skb
;
1255 skb
= netdev_alloc_skb_ip_align(netdev
, BE_RX_SKB_ALLOC_SIZE
);
1256 if (unlikely(!skb
)) {
1257 rx_stats(rxo
)->rx_drops_no_skbs
++;
1258 be_rx_compl_discard(rxo
, rxcp
);
1262 skb_fill_rx_data(rxo
, skb
, rxcp
);
1264 if (likely((netdev
->features
& NETIF_F_RXCSUM
) && csum_passed(rxcp
)))
1265 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1267 skb_checksum_none_assert(skb
);
1269 skb
->protocol
= eth_type_trans(skb
, netdev
);
1270 if (netdev
->features
& NETIF_F_RXHASH
)
1271 skb
->rxhash
= rxcp
->rss_hash
;
1275 __vlan_hwaccel_put_tag(skb
, rxcp
->vlan_tag
);
1277 netif_receive_skb(skb
);
1280 /* Process the RX completion indicated by rxcp when GRO is enabled */
1281 void be_rx_compl_process_gro(struct be_rx_obj
*rxo
, struct napi_struct
*napi
,
1282 struct be_rx_compl_info
*rxcp
)
1284 struct be_adapter
*adapter
= rxo
->adapter
;
1285 struct be_rx_page_info
*page_info
;
1286 struct sk_buff
*skb
= NULL
;
1287 struct be_queue_info
*rxq
= &rxo
->q
;
1288 u16 remaining
, curr_frag_len
;
1291 skb
= napi_get_frags(napi
);
1293 be_rx_compl_discard(rxo
, rxcp
);
1297 remaining
= rxcp
->pkt_size
;
1298 for (i
= 0, j
= -1; i
< rxcp
->num_rcvd
; i
++) {
1299 page_info
= get_rx_page_info(rxo
, rxcp
->rxq_idx
);
1301 curr_frag_len
= min(remaining
, rx_frag_size
);
1303 /* Coalesce all frags from the same physical page in one slot */
1304 if (i
== 0 || page_info
->page_offset
== 0) {
1305 /* First frag or Fresh page */
1307 skb_frag_set_page(skb
, j
, page_info
->page
);
1308 skb_shinfo(skb
)->frags
[j
].page_offset
=
1309 page_info
->page_offset
;
1310 skb_frag_size_set(&skb_shinfo(skb
)->frags
[j
], 0);
1312 put_page(page_info
->page
);
1314 skb_frag_size_add(&skb_shinfo(skb
)->frags
[j
], curr_frag_len
);
1315 skb
->truesize
+= rx_frag_size
;
1316 remaining
-= curr_frag_len
;
1317 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1318 memset(page_info
, 0, sizeof(*page_info
));
1320 BUG_ON(j
> MAX_SKB_FRAGS
);
1322 skb_shinfo(skb
)->nr_frags
= j
+ 1;
1323 skb
->len
= rxcp
->pkt_size
;
1324 skb
->data_len
= rxcp
->pkt_size
;
1325 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1326 if (adapter
->netdev
->features
& NETIF_F_RXHASH
)
1327 skb
->rxhash
= rxcp
->rss_hash
;
1330 __vlan_hwaccel_put_tag(skb
, rxcp
->vlan_tag
);
1332 napi_gro_frags(napi
);
1335 static void be_parse_rx_compl_v1(struct be_eth_rx_compl
*compl,
1336 struct be_rx_compl_info
*rxcp
)
1339 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, pktsize
, compl);
1340 rxcp
->vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vtp
, compl);
1341 rxcp
->err
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, err
, compl);
1342 rxcp
->tcpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, tcpf
, compl);
1343 rxcp
->udpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, udpf
, compl);
1345 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, ipcksm
, compl);
1347 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, l4_cksm
, compl);
1349 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, ip_version
, compl);
1351 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, fragndx
, compl);
1353 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, numfrags
, compl);
1355 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, cast_enc
, compl);
1357 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, rsshash
, rxcp
);
1359 rxcp
->vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vtm
,
1361 rxcp
->vlan_tag
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vlan_tag
,
1364 rxcp
->port
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, port
, compl);
1367 static void be_parse_rx_compl_v0(struct be_eth_rx_compl
*compl,
1368 struct be_rx_compl_info
*rxcp
)
1371 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, pktsize
, compl);
1372 rxcp
->vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vtp
, compl);
1373 rxcp
->err
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, err
, compl);
1374 rxcp
->tcpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, tcpf
, compl);
1375 rxcp
->udpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, udpf
, compl);
1377 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, ipcksm
, compl);
1379 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, l4_cksm
, compl);
1381 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, ip_version
, compl);
1383 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, fragndx
, compl);
1385 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, numfrags
, compl);
1387 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, cast_enc
, compl);
1389 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, rsshash
, rxcp
);
1391 rxcp
->vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vtm
,
1393 rxcp
->vlan_tag
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vlan_tag
,
1396 rxcp
->port
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, port
, compl);
1399 static struct be_rx_compl_info
*be_rx_compl_get(struct be_rx_obj
*rxo
)
1401 struct be_eth_rx_compl
*compl = queue_tail_node(&rxo
->cq
);
1402 struct be_rx_compl_info
*rxcp
= &rxo
->rxcp
;
1403 struct be_adapter
*adapter
= rxo
->adapter
;
1405 /* For checking the valid bit it is Ok to use either definition as the
1406 * valid bit is at the same position in both v0 and v1 Rx compl */
1407 if (compl->dw
[offsetof(struct amap_eth_rx_compl_v1
, valid
) / 32] == 0)
1411 be_dws_le_to_cpu(compl, sizeof(*compl));
1413 if (adapter
->be3_native
)
1414 be_parse_rx_compl_v1(compl, rxcp
);
1416 be_parse_rx_compl_v0(compl, rxcp
);
1419 /* vlanf could be wrongly set in some cards.
1420 * ignore if vtm is not set */
1421 if ((adapter
->function_mode
& FLEX10_MODE
) && !rxcp
->vtm
)
1424 if (!lancer_chip(adapter
))
1425 rxcp
->vlan_tag
= swab16(rxcp
->vlan_tag
);
1427 if (adapter
->pvid
== (rxcp
->vlan_tag
& VLAN_VID_MASK
) &&
1428 !adapter
->vlan_tag
[rxcp
->vlan_tag
])
1432 /* As the compl has been parsed, reset it; we wont touch it again */
1433 compl->dw
[offsetof(struct amap_eth_rx_compl_v1
, valid
) / 32] = 0;
1435 queue_tail_inc(&rxo
->cq
);
1439 static inline struct page
*be_alloc_pages(u32 size
, gfp_t gfp
)
1441 u32 order
= get_order(size
);
1445 return alloc_pages(gfp
, order
);
1449 * Allocate a page, split it to fragments of size rx_frag_size and post as
1450 * receive buffers to BE
1452 static void be_post_rx_frags(struct be_rx_obj
*rxo
, gfp_t gfp
)
1454 struct be_adapter
*adapter
= rxo
->adapter
;
1455 struct be_rx_page_info
*page_info
= NULL
, *prev_page_info
= NULL
;
1456 struct be_queue_info
*rxq
= &rxo
->q
;
1457 struct page
*pagep
= NULL
;
1458 struct be_eth_rx_d
*rxd
;
1459 u64 page_dmaaddr
= 0, frag_dmaaddr
;
1460 u32 posted
, page_offset
= 0;
1462 page_info
= &rxo
->page_info_tbl
[rxq
->head
];
1463 for (posted
= 0; posted
< MAX_RX_POST
&& !page_info
->page
; posted
++) {
1465 pagep
= be_alloc_pages(adapter
->big_page_size
, gfp
);
1466 if (unlikely(!pagep
)) {
1467 rx_stats(rxo
)->rx_post_fail
++;
1470 page_dmaaddr
= dma_map_page(&adapter
->pdev
->dev
, pagep
,
1471 0, adapter
->big_page_size
,
1473 page_info
->page_offset
= 0;
1476 page_info
->page_offset
= page_offset
+ rx_frag_size
;
1478 page_offset
= page_info
->page_offset
;
1479 page_info
->page
= pagep
;
1480 dma_unmap_addr_set(page_info
, bus
, page_dmaaddr
);
1481 frag_dmaaddr
= page_dmaaddr
+ page_info
->page_offset
;
1483 rxd
= queue_head_node(rxq
);
1484 rxd
->fragpa_lo
= cpu_to_le32(frag_dmaaddr
& 0xFFFFFFFF);
1485 rxd
->fragpa_hi
= cpu_to_le32(upper_32_bits(frag_dmaaddr
));
1487 /* Any space left in the current big page for another frag? */
1488 if ((page_offset
+ rx_frag_size
+ rx_frag_size
) >
1489 adapter
->big_page_size
) {
1491 page_info
->last_page_user
= true;
1494 prev_page_info
= page_info
;
1495 queue_head_inc(rxq
);
1496 page_info
= &rxo
->page_info_tbl
[rxq
->head
];
1499 prev_page_info
->last_page_user
= true;
1502 atomic_add(posted
, &rxq
->used
);
1503 be_rxq_notify(adapter
, rxq
->id
, posted
);
1504 } else if (atomic_read(&rxq
->used
) == 0) {
1505 /* Let be_worker replenish when memory is available */
1506 rxo
->rx_post_starved
= true;
1510 static struct be_eth_tx_compl
*be_tx_compl_get(struct be_queue_info
*tx_cq
)
1512 struct be_eth_tx_compl
*txcp
= queue_tail_node(tx_cq
);
1514 if (txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] == 0)
1518 be_dws_le_to_cpu(txcp
, sizeof(*txcp
));
1520 txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] = 0;
1522 queue_tail_inc(tx_cq
);
1526 static u16
be_tx_compl_process(struct be_adapter
*adapter
,
1527 struct be_tx_obj
*txo
, u16 last_index
)
1529 struct be_queue_info
*txq
= &txo
->q
;
1530 struct be_eth_wrb
*wrb
;
1531 struct sk_buff
**sent_skbs
= txo
->sent_skb_list
;
1532 struct sk_buff
*sent_skb
;
1533 u16 cur_index
, num_wrbs
= 1; /* account for hdr wrb */
1534 bool unmap_skb_hdr
= true;
1536 sent_skb
= sent_skbs
[txq
->tail
];
1538 sent_skbs
[txq
->tail
] = NULL
;
1540 /* skip header wrb */
1541 queue_tail_inc(txq
);
1544 cur_index
= txq
->tail
;
1545 wrb
= queue_tail_node(txq
);
1546 unmap_tx_frag(&adapter
->pdev
->dev
, wrb
,
1547 (unmap_skb_hdr
&& skb_headlen(sent_skb
)));
1548 unmap_skb_hdr
= false;
1551 queue_tail_inc(txq
);
1552 } while (cur_index
!= last_index
);
1554 kfree_skb(sent_skb
);
1558 /* Return the number of events in the event queue */
1559 static inline int events_get(struct be_eq_obj
*eqo
)
1561 struct be_eq_entry
*eqe
;
1565 eqe
= queue_tail_node(&eqo
->q
);
1572 queue_tail_inc(&eqo
->q
);
1578 static int event_handle(struct be_eq_obj
*eqo
)
1581 int num
= events_get(eqo
);
1583 /* Deal with any spurious interrupts that come without events */
1587 be_eq_notify(eqo
->adapter
, eqo
->q
.id
, rearm
, true, num
);
1589 napi_schedule(&eqo
->napi
);
1594 /* Leaves the EQ is disarmed state */
1595 static void be_eq_clean(struct be_eq_obj
*eqo
)
1597 int num
= events_get(eqo
);
1599 be_eq_notify(eqo
->adapter
, eqo
->q
.id
, false, true, num
);
1602 static void be_rx_cq_clean(struct be_rx_obj
*rxo
)
1604 struct be_rx_page_info
*page_info
;
1605 struct be_queue_info
*rxq
= &rxo
->q
;
1606 struct be_queue_info
*rx_cq
= &rxo
->cq
;
1607 struct be_rx_compl_info
*rxcp
;
1610 /* First cleanup pending rx completions */
1611 while ((rxcp
= be_rx_compl_get(rxo
)) != NULL
) {
1612 be_rx_compl_discard(rxo
, rxcp
);
1613 be_cq_notify(rxo
->adapter
, rx_cq
->id
, false, 1);
1616 /* Then free posted rx buffer that were not used */
1617 tail
= (rxq
->head
+ rxq
->len
- atomic_read(&rxq
->used
)) % rxq
->len
;
1618 for (; atomic_read(&rxq
->used
) > 0; index_inc(&tail
, rxq
->len
)) {
1619 page_info
= get_rx_page_info(rxo
, tail
);
1620 put_page(page_info
->page
);
1621 memset(page_info
, 0, sizeof(*page_info
));
1623 BUG_ON(atomic_read(&rxq
->used
));
1624 rxq
->tail
= rxq
->head
= 0;
1627 static void be_tx_compl_clean(struct be_adapter
*adapter
)
1629 struct be_tx_obj
*txo
;
1630 struct be_queue_info
*txq
;
1631 struct be_eth_tx_compl
*txcp
;
1632 u16 end_idx
, cmpl
= 0, timeo
= 0, num_wrbs
= 0;
1633 struct sk_buff
*sent_skb
;
1635 int i
, pending_txqs
;
1637 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1639 pending_txqs
= adapter
->num_tx_qs
;
1641 for_all_tx_queues(adapter
, txo
, i
) {
1643 while ((txcp
= be_tx_compl_get(&txo
->cq
))) {
1645 AMAP_GET_BITS(struct amap_eth_tx_compl
,
1647 num_wrbs
+= be_tx_compl_process(adapter
, txo
,
1652 be_cq_notify(adapter
, txo
->cq
.id
, false, cmpl
);
1653 atomic_sub(num_wrbs
, &txq
->used
);
1657 if (atomic_read(&txq
->used
) == 0)
1661 if (pending_txqs
== 0 || ++timeo
> 200)
1667 for_all_tx_queues(adapter
, txo
, i
) {
1669 if (atomic_read(&txq
->used
))
1670 dev_err(&adapter
->pdev
->dev
, "%d pending tx-compls\n",
1671 atomic_read(&txq
->used
));
1673 /* free posted tx for which compls will never arrive */
1674 while (atomic_read(&txq
->used
)) {
1675 sent_skb
= txo
->sent_skb_list
[txq
->tail
];
1676 end_idx
= txq
->tail
;
1677 num_wrbs
= wrb_cnt_for_skb(adapter
, sent_skb
,
1679 index_adv(&end_idx
, num_wrbs
- 1, txq
->len
);
1680 num_wrbs
= be_tx_compl_process(adapter
, txo
, end_idx
);
1681 atomic_sub(num_wrbs
, &txq
->used
);
1686 static void be_evt_queues_destroy(struct be_adapter
*adapter
)
1688 struct be_eq_obj
*eqo
;
1691 for_all_evt_queues(adapter
, eqo
, i
) {
1694 be_cmd_q_destroy(adapter
, &eqo
->q
, QTYPE_EQ
);
1695 be_queue_free(adapter
, &eqo
->q
);
1699 static int be_evt_queues_create(struct be_adapter
*adapter
)
1701 struct be_queue_info
*eq
;
1702 struct be_eq_obj
*eqo
;
1705 adapter
->num_evt_qs
= num_irqs(adapter
);
1707 for_all_evt_queues(adapter
, eqo
, i
) {
1708 eqo
->adapter
= adapter
;
1709 eqo
->tx_budget
= BE_TX_BUDGET
;
1711 eqo
->max_eqd
= BE_MAX_EQD
;
1712 eqo
->enable_aic
= true;
1715 rc
= be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
,
1716 sizeof(struct be_eq_entry
));
1720 rc
= be_cmd_eq_create(adapter
, eq
, eqo
->cur_eqd
);
1727 static void be_mcc_queues_destroy(struct be_adapter
*adapter
)
1729 struct be_queue_info
*q
;
1731 q
= &adapter
->mcc_obj
.q
;
1733 be_cmd_q_destroy(adapter
, q
, QTYPE_MCCQ
);
1734 be_queue_free(adapter
, q
);
1736 q
= &adapter
->mcc_obj
.cq
;
1738 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1739 be_queue_free(adapter
, q
);
1742 /* Must be called only after TX qs are created as MCC shares TX EQ */
1743 static int be_mcc_queues_create(struct be_adapter
*adapter
)
1745 struct be_queue_info
*q
, *cq
;
1747 cq
= &adapter
->mcc_obj
.cq
;
1748 if (be_queue_alloc(adapter
, cq
, MCC_CQ_LEN
,
1749 sizeof(struct be_mcc_compl
)))
1752 /* Use the default EQ for MCC completions */
1753 if (be_cmd_cq_create(adapter
, cq
, &mcc_eqo(adapter
)->q
, true, 0))
1756 q
= &adapter
->mcc_obj
.q
;
1757 if (be_queue_alloc(adapter
, q
, MCC_Q_LEN
, sizeof(struct be_mcc_wrb
)))
1758 goto mcc_cq_destroy
;
1760 if (be_cmd_mccq_create(adapter
, q
, cq
))
1766 be_queue_free(adapter
, q
);
1768 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1770 be_queue_free(adapter
, cq
);
1775 static void be_tx_queues_destroy(struct be_adapter
*adapter
)
1777 struct be_queue_info
*q
;
1778 struct be_tx_obj
*txo
;
1781 for_all_tx_queues(adapter
, txo
, i
) {
1784 be_cmd_q_destroy(adapter
, q
, QTYPE_TXQ
);
1785 be_queue_free(adapter
, q
);
1789 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1790 be_queue_free(adapter
, q
);
1794 static int be_num_txqs_want(struct be_adapter
*adapter
)
1796 if (sriov_enabled(adapter
) || be_is_mc(adapter
) ||
1797 lancer_chip(adapter
) || !be_physfn(adapter
) ||
1798 adapter
->generation
== BE_GEN2
)
1804 static int be_tx_cqs_create(struct be_adapter
*adapter
)
1806 struct be_queue_info
*cq
, *eq
;
1808 struct be_tx_obj
*txo
;
1811 adapter
->num_tx_qs
= be_num_txqs_want(adapter
);
1812 if (adapter
->num_tx_qs
!= MAX_TX_QS
) {
1814 netif_set_real_num_tx_queues(adapter
->netdev
,
1815 adapter
->num_tx_qs
);
1819 for_all_tx_queues(adapter
, txo
, i
) {
1821 status
= be_queue_alloc(adapter
, cq
, TX_CQ_LEN
,
1822 sizeof(struct be_eth_tx_compl
));
1826 /* If num_evt_qs is less than num_tx_qs, then more than
1827 * one txq share an eq
1829 eq
= &adapter
->eq_obj
[i
% adapter
->num_evt_qs
].q
;
1830 status
= be_cmd_cq_create(adapter
, cq
, eq
, false, 3);
1837 static int be_tx_qs_create(struct be_adapter
*adapter
)
1839 struct be_tx_obj
*txo
;
1842 for_all_tx_queues(adapter
, txo
, i
) {
1843 status
= be_queue_alloc(adapter
, &txo
->q
, TX_Q_LEN
,
1844 sizeof(struct be_eth_wrb
));
1848 status
= be_cmd_txq_create(adapter
, &txo
->q
, &txo
->cq
);
1856 static void be_rx_cqs_destroy(struct be_adapter
*adapter
)
1858 struct be_queue_info
*q
;
1859 struct be_rx_obj
*rxo
;
1862 for_all_rx_queues(adapter
, rxo
, i
) {
1865 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1866 be_queue_free(adapter
, q
);
1870 static int be_rx_cqs_create(struct be_adapter
*adapter
)
1872 struct be_queue_info
*eq
, *cq
;
1873 struct be_rx_obj
*rxo
;
1876 /* We'll create as many RSS rings as there are irqs.
1877 * But when there's only one irq there's no use creating RSS rings
1879 adapter
->num_rx_qs
= (num_irqs(adapter
) > 1) ?
1880 num_irqs(adapter
) + 1 : 1;
1882 adapter
->big_page_size
= (1 << get_order(rx_frag_size
)) * PAGE_SIZE
;
1883 for_all_rx_queues(adapter
, rxo
, i
) {
1884 rxo
->adapter
= adapter
;
1886 rc
= be_queue_alloc(adapter
, cq
, RX_CQ_LEN
,
1887 sizeof(struct be_eth_rx_compl
));
1891 eq
= &adapter
->eq_obj
[i
% adapter
->num_evt_qs
].q
;
1892 rc
= be_cmd_cq_create(adapter
, cq
, eq
, false, 3);
1897 if (adapter
->num_rx_qs
!= MAX_RX_QS
)
1898 dev_info(&adapter
->pdev
->dev
,
1899 "Created only %d receive queues", adapter
->num_rx_qs
);
1904 static irqreturn_t
be_intx(int irq
, void *dev
)
1906 struct be_adapter
*adapter
= dev
;
1909 /* With INTx only one EQ is used */
1910 num_evts
= event_handle(&adapter
->eq_obj
[0]);
1917 static irqreturn_t
be_msix(int irq
, void *dev
)
1919 struct be_eq_obj
*eqo
= dev
;
1925 static inline bool do_gro(struct be_rx_compl_info
*rxcp
)
1927 return (rxcp
->tcpf
&& !rxcp
->err
) ? true : false;
1930 static int be_process_rx(struct be_rx_obj
*rxo
, struct napi_struct
*napi
,
1933 struct be_adapter
*adapter
= rxo
->adapter
;
1934 struct be_queue_info
*rx_cq
= &rxo
->cq
;
1935 struct be_rx_compl_info
*rxcp
;
1938 for (work_done
= 0; work_done
< budget
; work_done
++) {
1939 rxcp
= be_rx_compl_get(rxo
);
1943 /* Is it a flush compl that has no data */
1944 if (unlikely(rxcp
->num_rcvd
== 0))
1947 /* Discard compl with partial DMA Lancer B0 */
1948 if (unlikely(!rxcp
->pkt_size
)) {
1949 be_rx_compl_discard(rxo
, rxcp
);
1953 /* On BE drop pkts that arrive due to imperfect filtering in
1954 * promiscuous mode on some skews
1956 if (unlikely(rxcp
->port
!= adapter
->port_num
&&
1957 !lancer_chip(adapter
))) {
1958 be_rx_compl_discard(rxo
, rxcp
);
1963 be_rx_compl_process_gro(rxo
, napi
, rxcp
);
1965 be_rx_compl_process(rxo
, rxcp
);
1967 be_rx_stats_update(rxo
, rxcp
);
1971 be_cq_notify(adapter
, rx_cq
->id
, true, work_done
);
1973 if (atomic_read(&rxo
->q
.used
) < RX_FRAGS_REFILL_WM
)
1974 be_post_rx_frags(rxo
, GFP_ATOMIC
);
1980 static bool be_process_tx(struct be_adapter
*adapter
, struct be_tx_obj
*txo
,
1981 int budget
, int idx
)
1983 struct be_eth_tx_compl
*txcp
;
1984 int num_wrbs
= 0, work_done
;
1986 for (work_done
= 0; work_done
< budget
; work_done
++) {
1987 txcp
= be_tx_compl_get(&txo
->cq
);
1990 num_wrbs
+= be_tx_compl_process(adapter
, txo
,
1991 AMAP_GET_BITS(struct amap_eth_tx_compl
,
1996 be_cq_notify(adapter
, txo
->cq
.id
, true, work_done
);
1997 atomic_sub(num_wrbs
, &txo
->q
.used
);
1999 /* As Tx wrbs have been freed up, wake up netdev queue
2000 * if it was stopped due to lack of tx wrbs. */
2001 if (__netif_subqueue_stopped(adapter
->netdev
, idx
) &&
2002 atomic_read(&txo
->q
.used
) < txo
->q
.len
/ 2) {
2003 netif_wake_subqueue(adapter
->netdev
, idx
);
2006 u64_stats_update_begin(&tx_stats(txo
)->sync_compl
);
2007 tx_stats(txo
)->tx_compl
+= work_done
;
2008 u64_stats_update_end(&tx_stats(txo
)->sync_compl
);
2010 return (work_done
< budget
); /* Done */
2013 int be_poll(struct napi_struct
*napi
, int budget
)
2015 struct be_eq_obj
*eqo
= container_of(napi
, struct be_eq_obj
, napi
);
2016 struct be_adapter
*adapter
= eqo
->adapter
;
2017 int max_work
= 0, work
, i
;
2020 /* Process all TXQs serviced by this EQ */
2021 for (i
= eqo
->idx
; i
< adapter
->num_tx_qs
; i
+= adapter
->num_evt_qs
) {
2022 tx_done
= be_process_tx(adapter
, &adapter
->tx_obj
[i
],
2028 /* This loop will iterate twice for EQ0 in which
2029 * completions of the last RXQ (default one) are also processed
2030 * For other EQs the loop iterates only once
2032 for (i
= eqo
->idx
; i
< adapter
->num_rx_qs
; i
+= adapter
->num_evt_qs
) {
2033 work
= be_process_rx(&adapter
->rx_obj
[i
], napi
, budget
);
2034 max_work
= max(work
, max_work
);
2037 if (is_mcc_eqo(eqo
))
2038 be_process_mcc(adapter
);
2040 if (max_work
< budget
) {
2041 napi_complete(napi
);
2042 be_eq_notify(adapter
, eqo
->q
.id
, true, false, 0);
2044 /* As we'll continue in polling mode, count and clear events */
2045 be_eq_notify(adapter
, eqo
->q
.id
, false, false, events_get(eqo
));
2050 void be_detect_dump_ue(struct be_adapter
*adapter
)
2052 u32 ue_lo
= 0, ue_hi
= 0, ue_lo_mask
= 0, ue_hi_mask
= 0;
2053 u32 sliport_status
= 0, sliport_err1
= 0, sliport_err2
= 0;
2056 if (adapter
->eeh_err
|| adapter
->ue_detected
)
2059 if (lancer_chip(adapter
)) {
2060 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
2061 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
2062 sliport_err1
= ioread32(adapter
->db
+
2063 SLIPORT_ERROR1_OFFSET
);
2064 sliport_err2
= ioread32(adapter
->db
+
2065 SLIPORT_ERROR2_OFFSET
);
2068 pci_read_config_dword(adapter
->pdev
,
2069 PCICFG_UE_STATUS_LOW
, &ue_lo
);
2070 pci_read_config_dword(adapter
->pdev
,
2071 PCICFG_UE_STATUS_HIGH
, &ue_hi
);
2072 pci_read_config_dword(adapter
->pdev
,
2073 PCICFG_UE_STATUS_LOW_MASK
, &ue_lo_mask
);
2074 pci_read_config_dword(adapter
->pdev
,
2075 PCICFG_UE_STATUS_HI_MASK
, &ue_hi_mask
);
2077 ue_lo
= (ue_lo
& (~ue_lo_mask
));
2078 ue_hi
= (ue_hi
& (~ue_hi_mask
));
2081 if (ue_lo
|| ue_hi
||
2082 sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
2083 adapter
->ue_detected
= true;
2084 adapter
->eeh_err
= true;
2085 dev_err(&adapter
->pdev
->dev
,
2086 "Unrecoverable error in the card\n");
2090 for (i
= 0; ue_lo
; ue_lo
>>= 1, i
++) {
2092 dev_err(&adapter
->pdev
->dev
,
2093 "UE: %s bit set\n", ue_status_low_desc
[i
]);
2097 for (i
= 0; ue_hi
; ue_hi
>>= 1, i
++) {
2099 dev_err(&adapter
->pdev
->dev
,
2100 "UE: %s bit set\n", ue_status_hi_desc
[i
]);
2104 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
2105 dev_err(&adapter
->pdev
->dev
,
2106 "sliport status 0x%x\n", sliport_status
);
2107 dev_err(&adapter
->pdev
->dev
,
2108 "sliport error1 0x%x\n", sliport_err1
);
2109 dev_err(&adapter
->pdev
->dev
,
2110 "sliport error2 0x%x\n", sliport_err2
);
2114 static void be_msix_disable(struct be_adapter
*adapter
)
2116 if (msix_enabled(adapter
)) {
2117 pci_disable_msix(adapter
->pdev
);
2118 adapter
->num_msix_vec
= 0;
2122 static uint
be_num_rss_want(struct be_adapter
*adapter
)
2124 if ((adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
) &&
2125 adapter
->num_vfs
== 0 && be_physfn(adapter
) &&
2127 return (adapter
->be3_native
) ? BE3_MAX_RSS_QS
: BE2_MAX_RSS_QS
;
2132 static void be_msix_enable(struct be_adapter
*adapter
)
2134 #define BE_MIN_MSIX_VECTORS 1
2135 int i
, status
, num_vec
;
2137 /* If RSS queues are not used, need a vec for default RX Q */
2138 num_vec
= min(be_num_rss_want(adapter
), num_online_cpus());
2139 num_vec
= max(num_vec
, BE_MIN_MSIX_VECTORS
);
2141 for (i
= 0; i
< num_vec
; i
++)
2142 adapter
->msix_entries
[i
].entry
= i
;
2144 status
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
, num_vec
);
2147 } else if (status
>= BE_MIN_MSIX_VECTORS
) {
2149 if (pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
2155 adapter
->num_msix_vec
= num_vec
;
2159 static int be_sriov_enable(struct be_adapter
*adapter
)
2161 be_check_sriov_fn_type(adapter
);
2163 #ifdef CONFIG_PCI_IOV
2164 if (be_physfn(adapter
) && num_vfs
) {
2168 pos
= pci_find_ext_capability(adapter
->pdev
,
2169 PCI_EXT_CAP_ID_SRIOV
);
2170 pci_read_config_word(adapter
->pdev
,
2171 pos
+ PCI_SRIOV_TOTAL_VF
, &dev_vfs
);
2173 adapter
->num_vfs
= min_t(u16
, num_vfs
, dev_vfs
);
2174 if (adapter
->num_vfs
!= num_vfs
)
2175 dev_info(&adapter
->pdev
->dev
,
2176 "Device supports %d VFs and not %d\n",
2177 adapter
->num_vfs
, num_vfs
);
2179 status
= pci_enable_sriov(adapter
->pdev
, adapter
->num_vfs
);
2181 adapter
->num_vfs
= 0;
2183 if (adapter
->num_vfs
) {
2184 adapter
->vf_cfg
= kcalloc(num_vfs
,
2185 sizeof(struct be_vf_cfg
),
2187 if (!adapter
->vf_cfg
)
2195 static void be_sriov_disable(struct be_adapter
*adapter
)
2197 #ifdef CONFIG_PCI_IOV
2198 if (sriov_enabled(adapter
)) {
2199 pci_disable_sriov(adapter
->pdev
);
2200 kfree(adapter
->vf_cfg
);
2201 adapter
->num_vfs
= 0;
2206 static inline int be_msix_vec_get(struct be_adapter
*adapter
,
2207 struct be_eq_obj
*eqo
)
2209 return adapter
->msix_entries
[eqo
->idx
].vector
;
2212 static int be_msix_register(struct be_adapter
*adapter
)
2214 struct net_device
*netdev
= adapter
->netdev
;
2215 struct be_eq_obj
*eqo
;
2218 for_all_evt_queues(adapter
, eqo
, i
) {
2219 sprintf(eqo
->desc
, "%s-q%d", netdev
->name
, i
);
2220 vec
= be_msix_vec_get(adapter
, eqo
);
2221 status
= request_irq(vec
, be_msix
, 0, eqo
->desc
, eqo
);
2228 for (i
--, eqo
= &adapter
->eq_obj
[i
]; i
>= 0; i
--, eqo
--)
2229 free_irq(be_msix_vec_get(adapter
, eqo
), eqo
);
2230 dev_warn(&adapter
->pdev
->dev
, "MSIX Request IRQ failed - err %d\n",
2232 be_msix_disable(adapter
);
2236 static int be_irq_register(struct be_adapter
*adapter
)
2238 struct net_device
*netdev
= adapter
->netdev
;
2241 if (msix_enabled(adapter
)) {
2242 status
= be_msix_register(adapter
);
2245 /* INTx is not supported for VF */
2246 if (!be_physfn(adapter
))
2251 netdev
->irq
= adapter
->pdev
->irq
;
2252 status
= request_irq(netdev
->irq
, be_intx
, IRQF_SHARED
, netdev
->name
,
2255 dev_err(&adapter
->pdev
->dev
,
2256 "INTx request IRQ failed - err %d\n", status
);
2260 adapter
->isr_registered
= true;
2264 static void be_irq_unregister(struct be_adapter
*adapter
)
2266 struct net_device
*netdev
= adapter
->netdev
;
2267 struct be_eq_obj
*eqo
;
2270 if (!adapter
->isr_registered
)
2274 if (!msix_enabled(adapter
)) {
2275 free_irq(netdev
->irq
, adapter
);
2280 for_all_evt_queues(adapter
, eqo
, i
)
2281 free_irq(be_msix_vec_get(adapter
, eqo
), eqo
);
2284 adapter
->isr_registered
= false;
2287 static void be_rx_qs_destroy(struct be_adapter
*adapter
)
2289 struct be_queue_info
*q
;
2290 struct be_rx_obj
*rxo
;
2293 for_all_rx_queues(adapter
, rxo
, i
) {
2296 be_cmd_rxq_destroy(adapter
, q
);
2297 /* After the rxq is invalidated, wait for a grace time
2298 * of 1ms for all dma to end and the flush compl to
2302 be_rx_cq_clean(rxo
);
2304 be_queue_free(adapter
, q
);
2308 static int be_close(struct net_device
*netdev
)
2310 struct be_adapter
*adapter
= netdev_priv(netdev
);
2311 struct be_eq_obj
*eqo
;
2314 be_async_mcc_disable(adapter
);
2316 if (!lancer_chip(adapter
))
2317 be_intr_set(adapter
, false);
2319 for_all_evt_queues(adapter
, eqo
, i
) {
2320 napi_disable(&eqo
->napi
);
2321 if (msix_enabled(adapter
))
2322 synchronize_irq(be_msix_vec_get(adapter
, eqo
));
2324 synchronize_irq(netdev
->irq
);
2328 be_irq_unregister(adapter
);
2330 /* Wait for all pending tx completions to arrive so that
2331 * all tx skbs are freed.
2333 be_tx_compl_clean(adapter
);
2335 be_rx_qs_destroy(adapter
);
2339 static int be_rx_qs_create(struct be_adapter
*adapter
)
2341 struct be_rx_obj
*rxo
;
2345 for_all_rx_queues(adapter
, rxo
, i
) {
2346 rc
= be_queue_alloc(adapter
, &rxo
->q
, RX_Q_LEN
,
2347 sizeof(struct be_eth_rx_d
));
2352 /* The FW would like the default RXQ to be created first */
2353 rxo
= default_rxo(adapter
);
2354 rc
= be_cmd_rxq_create(adapter
, &rxo
->q
, rxo
->cq
.id
, rx_frag_size
,
2355 adapter
->if_handle
, false, &rxo
->rss_id
);
2359 for_all_rss_queues(adapter
, rxo
, i
) {
2360 rc
= be_cmd_rxq_create(adapter
, &rxo
->q
, rxo
->cq
.id
,
2361 rx_frag_size
, adapter
->if_handle
,
2362 true, &rxo
->rss_id
);
2367 if (be_multi_rxq(adapter
)) {
2368 for (j
= 0; j
< 128; j
+= adapter
->num_rx_qs
- 1) {
2369 for_all_rss_queues(adapter
, rxo
, i
) {
2372 rsstable
[j
+ i
] = rxo
->rss_id
;
2375 rc
= be_cmd_rss_config(adapter
, rsstable
, 128);
2380 /* First time posting */
2381 for_all_rx_queues(adapter
, rxo
, i
)
2382 be_post_rx_frags(rxo
, GFP_KERNEL
);
2386 static int be_open(struct net_device
*netdev
)
2388 struct be_adapter
*adapter
= netdev_priv(netdev
);
2389 struct be_eq_obj
*eqo
;
2390 struct be_rx_obj
*rxo
;
2391 struct be_tx_obj
*txo
;
2395 status
= be_rx_qs_create(adapter
);
2399 be_irq_register(adapter
);
2401 if (!lancer_chip(adapter
))
2402 be_intr_set(adapter
, true);
2404 for_all_rx_queues(adapter
, rxo
, i
)
2405 be_cq_notify(adapter
, rxo
->cq
.id
, true, 0);
2407 for_all_tx_queues(adapter
, txo
, i
)
2408 be_cq_notify(adapter
, txo
->cq
.id
, true, 0);
2410 be_async_mcc_enable(adapter
);
2412 for_all_evt_queues(adapter
, eqo
, i
) {
2413 napi_enable(&eqo
->napi
);
2414 be_eq_notify(adapter
, eqo
->q
.id
, true, true, 0);
2417 status
= be_cmd_link_status_query(adapter
, NULL
, NULL
,
2420 be_link_status_update(adapter
, link_status
);
2424 be_close(adapter
->netdev
);
2428 static int be_setup_wol(struct be_adapter
*adapter
, bool enable
)
2430 struct be_dma_mem cmd
;
2434 memset(mac
, 0, ETH_ALEN
);
2436 cmd
.size
= sizeof(struct be_cmd_req_acpi_wol_magic_config
);
2437 cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, cmd
.size
, &cmd
.dma
,
2441 memset(cmd
.va
, 0, cmd
.size
);
2444 status
= pci_write_config_dword(adapter
->pdev
,
2445 PCICFG_PM_CONTROL_OFFSET
, PCICFG_PM_CONTROL_MASK
);
2447 dev_err(&adapter
->pdev
->dev
,
2448 "Could not enable Wake-on-lan\n");
2449 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
,
2453 status
= be_cmd_enable_magic_wol(adapter
,
2454 adapter
->netdev
->dev_addr
, &cmd
);
2455 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 1);
2456 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 1);
2458 status
= be_cmd_enable_magic_wol(adapter
, mac
, &cmd
);
2459 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 0);
2460 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 0);
2463 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
, cmd
.dma
);
2468 * Generate a seed MAC address from the PF MAC Address using jhash.
2469 * MAC Address for VFs are assigned incrementally starting from the seed.
2470 * These addresses are programmed in the ASIC by the PF and the VF driver
2471 * queries for the MAC address during its probe.
2473 static inline int be_vf_eth_addr_config(struct be_adapter
*adapter
)
2478 struct be_vf_cfg
*vf_cfg
;
2480 be_vf_eth_addr_generate(adapter
, mac
);
2482 for_all_vfs(adapter
, vf_cfg
, vf
) {
2483 if (lancer_chip(adapter
)) {
2484 status
= be_cmd_set_mac_list(adapter
, mac
, 1, vf
+ 1);
2486 status
= be_cmd_pmac_add(adapter
, mac
,
2488 &vf_cfg
->pmac_id
, vf
+ 1);
2492 dev_err(&adapter
->pdev
->dev
,
2493 "Mac address assignment failed for VF %d\n", vf
);
2495 memcpy(vf_cfg
->mac_addr
, mac
, ETH_ALEN
);
2502 static void be_vf_clear(struct be_adapter
*adapter
)
2504 struct be_vf_cfg
*vf_cfg
;
2507 for_all_vfs(adapter
, vf_cfg
, vf
) {
2508 if (lancer_chip(adapter
))
2509 be_cmd_set_mac_list(adapter
, NULL
, 0, vf
+ 1);
2511 be_cmd_pmac_del(adapter
, vf_cfg
->if_handle
,
2512 vf_cfg
->pmac_id
, vf
+ 1);
2514 be_cmd_if_destroy(adapter
, vf_cfg
->if_handle
, vf
+ 1);
2518 static int be_clear(struct be_adapter
*adapter
)
2522 if (adapter
->flags
& BE_FLAGS_WORKER_SCHEDULED
) {
2523 cancel_delayed_work_sync(&adapter
->work
);
2524 adapter
->flags
&= ~BE_FLAGS_WORKER_SCHEDULED
;
2527 if (sriov_enabled(adapter
))
2528 be_vf_clear(adapter
);
2530 for (; adapter
->uc_macs
> 0; adapter
->uc_macs
--, i
++)
2531 be_cmd_pmac_del(adapter
, adapter
->if_handle
,
2532 adapter
->pmac_id
[i
], 0);
2534 be_cmd_if_destroy(adapter
, adapter
->if_handle
, 0);
2536 be_mcc_queues_destroy(adapter
);
2537 be_rx_cqs_destroy(adapter
);
2538 be_tx_queues_destroy(adapter
);
2539 be_evt_queues_destroy(adapter
);
2541 /* tell fw we're done with firing cmds */
2542 be_cmd_fw_clean(adapter
);
2544 be_msix_disable(adapter
);
2545 kfree(adapter
->pmac_id
);
2549 static void be_vf_setup_init(struct be_adapter
*adapter
)
2551 struct be_vf_cfg
*vf_cfg
;
2554 for_all_vfs(adapter
, vf_cfg
, vf
) {
2555 vf_cfg
->if_handle
= -1;
2556 vf_cfg
->pmac_id
= -1;
2560 static int be_vf_setup(struct be_adapter
*adapter
)
2562 struct be_vf_cfg
*vf_cfg
;
2563 u32 cap_flags
, en_flags
, vf
;
2564 u16 def_vlan
, lnk_speed
;
2567 be_vf_setup_init(adapter
);
2569 cap_flags
= en_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
|
2570 BE_IF_FLAGS_MULTICAST
;
2571 for_all_vfs(adapter
, vf_cfg
, vf
) {
2572 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
, NULL
,
2573 &vf_cfg
->if_handle
, NULL
, vf
+ 1);
2578 status
= be_vf_eth_addr_config(adapter
);
2582 for_all_vfs(adapter
, vf_cfg
, vf
) {
2583 status
= be_cmd_link_status_query(adapter
, NULL
, &lnk_speed
,
2587 vf_cfg
->tx_rate
= lnk_speed
* 10;
2589 status
= be_cmd_get_hsw_config(adapter
, &def_vlan
,
2590 vf
+ 1, vf_cfg
->if_handle
);
2593 vf_cfg
->def_vid
= def_vlan
;
2600 static void be_setup_init(struct be_adapter
*adapter
)
2602 adapter
->vlan_prio_bmap
= 0xff;
2603 adapter
->link_speed
= -1;
2604 adapter
->if_handle
= -1;
2605 adapter
->be3_native
= false;
2606 adapter
->promiscuous
= false;
2607 adapter
->eq_next_idx
= 0;
2610 static int be_add_mac_from_list(struct be_adapter
*adapter
, u8
*mac
)
2614 bool pmac_id_active
;
2616 status
= be_cmd_get_mac_from_list(adapter
, 0, &pmac_id_active
,
2621 if (pmac_id_active
) {
2622 status
= be_cmd_mac_addr_query(adapter
, mac
,
2623 MAC_ADDRESS_TYPE_NETWORK
,
2624 false, adapter
->if_handle
, pmac_id
);
2627 adapter
->pmac_id
[0] = pmac_id
;
2629 status
= be_cmd_pmac_add(adapter
, mac
,
2630 adapter
->if_handle
, &adapter
->pmac_id
[0], 0);
2636 static int be_setup(struct be_adapter
*adapter
)
2638 struct net_device
*netdev
= adapter
->netdev
;
2639 u32 cap_flags
, en_flags
;
2644 be_setup_init(adapter
);
2646 be_cmd_req_native_mode(adapter
);
2648 be_msix_enable(adapter
);
2650 status
= be_evt_queues_create(adapter
);
2654 status
= be_tx_cqs_create(adapter
);
2658 status
= be_rx_cqs_create(adapter
);
2662 status
= be_mcc_queues_create(adapter
);
2666 memset(mac
, 0, ETH_ALEN
);
2667 status
= be_cmd_mac_addr_query(adapter
, mac
, MAC_ADDRESS_TYPE_NETWORK
,
2668 true /*permanent */, 0, 0);
2671 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
2672 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
2674 en_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
|
2675 BE_IF_FLAGS_MULTICAST
| BE_IF_FLAGS_PASS_L3L4_ERRORS
;
2676 cap_flags
= en_flags
| BE_IF_FLAGS_MCAST_PROMISCUOUS
|
2677 BE_IF_FLAGS_VLAN_PROMISCUOUS
| BE_IF_FLAGS_PROMISCUOUS
;
2679 if (adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
) {
2680 cap_flags
|= BE_IF_FLAGS_RSS
;
2681 en_flags
|= BE_IF_FLAGS_RSS
;
2683 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
,
2684 netdev
->dev_addr
, &adapter
->if_handle
,
2685 &adapter
->pmac_id
[0], 0);
2689 /* The VF's permanent mac queried from card is incorrect.
2690 * For BEx: Query the mac configued by the PF using if_handle
2691 * For Lancer: Get and use mac_list to obtain mac address.
2693 if (!be_physfn(adapter
)) {
2694 if (lancer_chip(adapter
))
2695 status
= be_add_mac_from_list(adapter
, mac
);
2697 status
= be_cmd_mac_addr_query(adapter
, mac
,
2698 MAC_ADDRESS_TYPE_NETWORK
, false,
2699 adapter
->if_handle
, 0);
2701 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
2702 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
2706 status
= be_tx_qs_create(adapter
);
2710 be_cmd_get_fw_ver(adapter
, adapter
->fw_ver
, NULL
);
2712 status
= be_vid_config(adapter
, false, 0);
2716 be_set_rx_mode(adapter
->netdev
);
2718 status
= be_cmd_get_flow_control(adapter
, &tx_fc
, &rx_fc
);
2719 /* For Lancer: It is legal for this cmd to fail on VF */
2720 if (status
&& (be_physfn(adapter
) || !lancer_chip(adapter
)))
2723 if (rx_fc
!= adapter
->rx_fc
|| tx_fc
!= adapter
->tx_fc
) {
2724 status
= be_cmd_set_flow_control(adapter
, adapter
->tx_fc
,
2726 /* For Lancer: It is legal for this cmd to fail on VF */
2727 if (status
&& (be_physfn(adapter
) || !lancer_chip(adapter
)))
2731 pcie_set_readrq(adapter
->pdev
, 4096);
2733 if (sriov_enabled(adapter
)) {
2734 status
= be_vf_setup(adapter
);
2739 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(1000));
2740 adapter
->flags
|= BE_FLAGS_WORKER_SCHEDULED
;
2748 #ifdef CONFIG_NET_POLL_CONTROLLER
2749 static void be_netpoll(struct net_device
*netdev
)
2751 struct be_adapter
*adapter
= netdev_priv(netdev
);
2752 struct be_eq_obj
*eqo
;
2755 for_all_evt_queues(adapter
, eqo
, i
)
2762 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2763 static bool be_flash_redboot(struct be_adapter
*adapter
,
2764 const u8
*p
, u32 img_start
, int image_size
,
2771 crc_offset
= hdr_size
+ img_start
+ image_size
- 4;
2775 status
= be_cmd_get_flash_crc(adapter
, flashed_crc
,
2778 dev_err(&adapter
->pdev
->dev
,
2779 "could not get crc from flash, not flashing redboot\n");
2783 /*update redboot only if crc does not match*/
2784 if (!memcmp(flashed_crc
, p
, 4))
2790 static bool phy_flashing_required(struct be_adapter
*adapter
)
2793 struct be_phy_info phy_info
;
2795 status
= be_cmd_get_phy_info(adapter
, &phy_info
);
2798 if ((phy_info
.phy_type
== TN_8022
) &&
2799 (phy_info
.interface_type
== PHY_TYPE_BASET_10GB
)) {
2805 static int be_flash_data(struct be_adapter
*adapter
,
2806 const struct firmware
*fw
,
2807 struct be_dma_mem
*flash_cmd
, int num_of_images
)
2810 int status
= 0, i
, filehdr_size
= 0;
2811 u32 total_bytes
= 0, flash_op
;
2813 const u8
*p
= fw
->data
;
2814 struct be_cmd_write_flashrom
*req
= flash_cmd
->va
;
2815 const struct flash_comp
*pflashcomp
;
2818 static const struct flash_comp gen3_flash_types
[10] = {
2819 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3
, IMG_TYPE_ISCSI_ACTIVE
,
2820 FLASH_IMAGE_MAX_SIZE_g3
},
2821 { FLASH_REDBOOT_START_g3
, IMG_TYPE_REDBOOT
,
2822 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3
},
2823 { FLASH_iSCSI_BIOS_START_g3
, IMG_TYPE_BIOS
,
2824 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2825 { FLASH_PXE_BIOS_START_g3
, IMG_TYPE_PXE_BIOS
,
2826 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2827 { FLASH_FCoE_BIOS_START_g3
, IMG_TYPE_FCOE_BIOS
,
2828 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2829 { FLASH_iSCSI_BACKUP_IMAGE_START_g3
, IMG_TYPE_ISCSI_BACKUP
,
2830 FLASH_IMAGE_MAX_SIZE_g3
},
2831 { FLASH_FCoE_PRIMARY_IMAGE_START_g3
, IMG_TYPE_FCOE_FW_ACTIVE
,
2832 FLASH_IMAGE_MAX_SIZE_g3
},
2833 { FLASH_FCoE_BACKUP_IMAGE_START_g3
, IMG_TYPE_FCOE_FW_BACKUP
,
2834 FLASH_IMAGE_MAX_SIZE_g3
},
2835 { FLASH_NCSI_START_g3
, IMG_TYPE_NCSI_FW
,
2836 FLASH_NCSI_IMAGE_MAX_SIZE_g3
},
2837 { FLASH_PHY_FW_START_g3
, IMG_TYPE_PHY_FW
,
2838 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3
}
2840 static const struct flash_comp gen2_flash_types
[8] = {
2841 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2
, IMG_TYPE_ISCSI_ACTIVE
,
2842 FLASH_IMAGE_MAX_SIZE_g2
},
2843 { FLASH_REDBOOT_START_g2
, IMG_TYPE_REDBOOT
,
2844 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2
},
2845 { FLASH_iSCSI_BIOS_START_g2
, IMG_TYPE_BIOS
,
2846 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2847 { FLASH_PXE_BIOS_START_g2
, IMG_TYPE_PXE_BIOS
,
2848 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2849 { FLASH_FCoE_BIOS_START_g2
, IMG_TYPE_FCOE_BIOS
,
2850 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2851 { FLASH_iSCSI_BACKUP_IMAGE_START_g2
, IMG_TYPE_ISCSI_BACKUP
,
2852 FLASH_IMAGE_MAX_SIZE_g2
},
2853 { FLASH_FCoE_PRIMARY_IMAGE_START_g2
, IMG_TYPE_FCOE_FW_ACTIVE
,
2854 FLASH_IMAGE_MAX_SIZE_g2
},
2855 { FLASH_FCoE_BACKUP_IMAGE_START_g2
, IMG_TYPE_FCOE_FW_BACKUP
,
2856 FLASH_IMAGE_MAX_SIZE_g2
}
2859 if (adapter
->generation
== BE_GEN3
) {
2860 pflashcomp
= gen3_flash_types
;
2861 filehdr_size
= sizeof(struct flash_file_hdr_g3
);
2862 num_comp
= ARRAY_SIZE(gen3_flash_types
);
2864 pflashcomp
= gen2_flash_types
;
2865 filehdr_size
= sizeof(struct flash_file_hdr_g2
);
2866 num_comp
= ARRAY_SIZE(gen2_flash_types
);
2868 for (i
= 0; i
< num_comp
; i
++) {
2869 if ((pflashcomp
[i
].optype
== IMG_TYPE_NCSI_FW
) &&
2870 memcmp(adapter
->fw_ver
, "3.102.148.0", 11) < 0)
2872 if (pflashcomp
[i
].optype
== IMG_TYPE_PHY_FW
) {
2873 if (!phy_flashing_required(adapter
))
2876 if ((pflashcomp
[i
].optype
== IMG_TYPE_REDBOOT
) &&
2877 (!be_flash_redboot(adapter
, fw
->data
,
2878 pflashcomp
[i
].offset
, pflashcomp
[i
].size
, filehdr_size
+
2879 (num_of_images
* sizeof(struct image_hdr
)))))
2882 p
+= filehdr_size
+ pflashcomp
[i
].offset
2883 + (num_of_images
* sizeof(struct image_hdr
));
2884 if (p
+ pflashcomp
[i
].size
> fw
->data
+ fw
->size
)
2886 total_bytes
= pflashcomp
[i
].size
;
2887 while (total_bytes
) {
2888 if (total_bytes
> 32*1024)
2889 num_bytes
= 32*1024;
2891 num_bytes
= total_bytes
;
2892 total_bytes
-= num_bytes
;
2894 if (pflashcomp
[i
].optype
== IMG_TYPE_PHY_FW
)
2895 flash_op
= FLASHROM_OPER_PHY_FLASH
;
2897 flash_op
= FLASHROM_OPER_FLASH
;
2899 if (pflashcomp
[i
].optype
== IMG_TYPE_PHY_FW
)
2900 flash_op
= FLASHROM_OPER_PHY_SAVE
;
2902 flash_op
= FLASHROM_OPER_SAVE
;
2904 memcpy(req
->params
.data_buf
, p
, num_bytes
);
2906 status
= be_cmd_write_flashrom(adapter
, flash_cmd
,
2907 pflashcomp
[i
].optype
, flash_op
, num_bytes
);
2909 if ((status
== ILLEGAL_IOCTL_REQ
) &&
2910 (pflashcomp
[i
].optype
==
2913 dev_err(&adapter
->pdev
->dev
,
2914 "cmd to write to flash rom failed.\n");
2922 static int get_ufigen_type(struct flash_file_hdr_g2
*fhdr
)
2926 if (fhdr
->build
[0] == '3')
2928 else if (fhdr
->build
[0] == '2')
2934 static int lancer_fw_download(struct be_adapter
*adapter
,
2935 const struct firmware
*fw
)
2937 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2938 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2939 struct be_dma_mem flash_cmd
;
2940 const u8
*data_ptr
= NULL
;
2941 u8
*dest_image_ptr
= NULL
;
2942 size_t image_size
= 0;
2944 u32 data_written
= 0;
2949 if (!IS_ALIGNED(fw
->size
, sizeof(u32
))) {
2950 dev_err(&adapter
->pdev
->dev
,
2951 "FW Image not properly aligned. "
2952 "Length must be 4 byte aligned.\n");
2954 goto lancer_fw_exit
;
2957 flash_cmd
.size
= sizeof(struct lancer_cmd_req_write_object
)
2958 + LANCER_FW_DOWNLOAD_CHUNK
;
2959 flash_cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
,
2960 &flash_cmd
.dma
, GFP_KERNEL
);
2961 if (!flash_cmd
.va
) {
2963 dev_err(&adapter
->pdev
->dev
,
2964 "Memory allocation failure while flashing\n");
2965 goto lancer_fw_exit
;
2968 dest_image_ptr
= flash_cmd
.va
+
2969 sizeof(struct lancer_cmd_req_write_object
);
2970 image_size
= fw
->size
;
2971 data_ptr
= fw
->data
;
2973 while (image_size
) {
2974 chunk_size
= min_t(u32
, image_size
, LANCER_FW_DOWNLOAD_CHUNK
);
2976 /* Copy the image chunk content. */
2977 memcpy(dest_image_ptr
, data_ptr
, chunk_size
);
2979 status
= lancer_cmd_write_object(adapter
, &flash_cmd
,
2980 chunk_size
, offset
, LANCER_FW_DOWNLOAD_LOCATION
,
2981 &data_written
, &add_status
);
2986 offset
+= data_written
;
2987 data_ptr
+= data_written
;
2988 image_size
-= data_written
;
2992 /* Commit the FW written */
2993 status
= lancer_cmd_write_object(adapter
, &flash_cmd
,
2994 0, offset
, LANCER_FW_DOWNLOAD_LOCATION
,
2995 &data_written
, &add_status
);
2998 dma_free_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
, flash_cmd
.va
,
3001 dev_err(&adapter
->pdev
->dev
,
3002 "Firmware load error. "
3003 "Status code: 0x%x Additional Status: 0x%x\n",
3004 status
, add_status
);
3005 goto lancer_fw_exit
;
3008 dev_info(&adapter
->pdev
->dev
, "Firmware flashed successfully\n");
3013 static int be_fw_download(struct be_adapter
*adapter
, const struct firmware
* fw
)
3015 struct flash_file_hdr_g2
*fhdr
;
3016 struct flash_file_hdr_g3
*fhdr3
;
3017 struct image_hdr
*img_hdr_ptr
= NULL
;
3018 struct be_dma_mem flash_cmd
;
3020 int status
= 0, i
= 0, num_imgs
= 0;
3023 fhdr
= (struct flash_file_hdr_g2
*) p
;
3025 flash_cmd
.size
= sizeof(struct be_cmd_write_flashrom
) + 32*1024;
3026 flash_cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
,
3027 &flash_cmd
.dma
, GFP_KERNEL
);
3028 if (!flash_cmd
.va
) {
3030 dev_err(&adapter
->pdev
->dev
,
3031 "Memory allocation failure while flashing\n");
3035 if ((adapter
->generation
== BE_GEN3
) &&
3036 (get_ufigen_type(fhdr
) == BE_GEN3
)) {
3037 fhdr3
= (struct flash_file_hdr_g3
*) fw
->data
;
3038 num_imgs
= le32_to_cpu(fhdr3
->num_imgs
);
3039 for (i
= 0; i
< num_imgs
; i
++) {
3040 img_hdr_ptr
= (struct image_hdr
*) (fw
->data
+
3041 (sizeof(struct flash_file_hdr_g3
) +
3042 i
* sizeof(struct image_hdr
)));
3043 if (le32_to_cpu(img_hdr_ptr
->imageid
) == 1)
3044 status
= be_flash_data(adapter
, fw
, &flash_cmd
,
3047 } else if ((adapter
->generation
== BE_GEN2
) &&
3048 (get_ufigen_type(fhdr
) == BE_GEN2
)) {
3049 status
= be_flash_data(adapter
, fw
, &flash_cmd
, 0);
3051 dev_err(&adapter
->pdev
->dev
,
3052 "UFI and Interface are not compatible for flashing\n");
3056 dma_free_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
, flash_cmd
.va
,
3059 dev_err(&adapter
->pdev
->dev
, "Firmware load error\n");
3063 dev_info(&adapter
->pdev
->dev
, "Firmware flashed successfully\n");
3069 int be_load_fw(struct be_adapter
*adapter
, u8
*fw_file
)
3071 const struct firmware
*fw
;
3074 if (!netif_running(adapter
->netdev
)) {
3075 dev_err(&adapter
->pdev
->dev
,
3076 "Firmware load not allowed (interface is down)\n");
3080 status
= request_firmware(&fw
, fw_file
, &adapter
->pdev
->dev
);
3084 dev_info(&adapter
->pdev
->dev
, "Flashing firmware file %s\n", fw_file
);
3086 if (lancer_chip(adapter
))
3087 status
= lancer_fw_download(adapter
, fw
);
3089 status
= be_fw_download(adapter
, fw
);
3092 release_firmware(fw
);
3096 static const struct net_device_ops be_netdev_ops
= {
3097 .ndo_open
= be_open
,
3098 .ndo_stop
= be_close
,
3099 .ndo_start_xmit
= be_xmit
,
3100 .ndo_set_rx_mode
= be_set_rx_mode
,
3101 .ndo_set_mac_address
= be_mac_addr_set
,
3102 .ndo_change_mtu
= be_change_mtu
,
3103 .ndo_get_stats64
= be_get_stats64
,
3104 .ndo_validate_addr
= eth_validate_addr
,
3105 .ndo_vlan_rx_add_vid
= be_vlan_add_vid
,
3106 .ndo_vlan_rx_kill_vid
= be_vlan_rem_vid
,
3107 .ndo_set_vf_mac
= be_set_vf_mac
,
3108 .ndo_set_vf_vlan
= be_set_vf_vlan
,
3109 .ndo_set_vf_tx_rate
= be_set_vf_tx_rate
,
3110 .ndo_get_vf_config
= be_get_vf_config
,
3111 #ifdef CONFIG_NET_POLL_CONTROLLER
3112 .ndo_poll_controller
= be_netpoll
,
3116 static void be_netdev_init(struct net_device
*netdev
)
3118 struct be_adapter
*adapter
= netdev_priv(netdev
);
3119 struct be_eq_obj
*eqo
;
3122 netdev
->hw_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
|
3123 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
3125 if (be_multi_rxq(adapter
))
3126 netdev
->hw_features
|= NETIF_F_RXHASH
;
3128 netdev
->features
|= netdev
->hw_features
|
3129 NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
;
3131 netdev
->vlan_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
|
3132 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
3134 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3136 netdev
->flags
|= IFF_MULTICAST
;
3138 netif_set_gso_max_size(netdev
, 65535);
3140 netdev
->netdev_ops
= &be_netdev_ops
;
3142 SET_ETHTOOL_OPS(netdev
, &be_ethtool_ops
);
3144 for_all_evt_queues(adapter
, eqo
, i
)
3145 netif_napi_add(netdev
, &eqo
->napi
, be_poll
, BE_NAPI_WEIGHT
);
3148 static void be_unmap_pci_bars(struct be_adapter
*adapter
)
3151 iounmap(adapter
->csr
);
3153 iounmap(adapter
->db
);
3156 static int be_map_pci_bars(struct be_adapter
*adapter
)
3161 if (lancer_chip(adapter
)) {
3162 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 0),
3163 pci_resource_len(adapter
->pdev
, 0));
3170 if (be_physfn(adapter
)) {
3171 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 2),
3172 pci_resource_len(adapter
->pdev
, 2));
3175 adapter
->csr
= addr
;
3178 if (adapter
->generation
== BE_GEN2
) {
3181 if (be_physfn(adapter
))
3186 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, db_reg
),
3187 pci_resource_len(adapter
->pdev
, db_reg
));
3194 be_unmap_pci_bars(adapter
);
3199 static void be_ctrl_cleanup(struct be_adapter
*adapter
)
3201 struct be_dma_mem
*mem
= &adapter
->mbox_mem_alloced
;
3203 be_unmap_pci_bars(adapter
);
3206 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
3209 mem
= &adapter
->rx_filter
;
3211 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
3215 static int be_ctrl_init(struct be_adapter
*adapter
)
3217 struct be_dma_mem
*mbox_mem_alloc
= &adapter
->mbox_mem_alloced
;
3218 struct be_dma_mem
*mbox_mem_align
= &adapter
->mbox_mem
;
3219 struct be_dma_mem
*rx_filter
= &adapter
->rx_filter
;
3222 status
= be_map_pci_bars(adapter
);
3226 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
3227 mbox_mem_alloc
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
,
3228 mbox_mem_alloc
->size
,
3229 &mbox_mem_alloc
->dma
,
3231 if (!mbox_mem_alloc
->va
) {
3233 goto unmap_pci_bars
;
3235 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
3236 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
3237 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
3238 memset(mbox_mem_align
->va
, 0, sizeof(struct be_mcc_mailbox
));
3240 rx_filter
->size
= sizeof(struct be_cmd_req_rx_filter
);
3241 rx_filter
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, rx_filter
->size
,
3242 &rx_filter
->dma
, GFP_KERNEL
);
3243 if (rx_filter
->va
== NULL
) {
3247 memset(rx_filter
->va
, 0, rx_filter
->size
);
3249 mutex_init(&adapter
->mbox_lock
);
3250 spin_lock_init(&adapter
->mcc_lock
);
3251 spin_lock_init(&adapter
->mcc_cq_lock
);
3253 init_completion(&adapter
->flash_compl
);
3254 pci_save_state(adapter
->pdev
);
3258 dma_free_coherent(&adapter
->pdev
->dev
, mbox_mem_alloc
->size
,
3259 mbox_mem_alloc
->va
, mbox_mem_alloc
->dma
);
3262 be_unmap_pci_bars(adapter
);
3268 static void be_stats_cleanup(struct be_adapter
*adapter
)
3270 struct be_dma_mem
*cmd
= &adapter
->stats_cmd
;
3273 dma_free_coherent(&adapter
->pdev
->dev
, cmd
->size
,
3277 static int be_stats_init(struct be_adapter
*adapter
)
3279 struct be_dma_mem
*cmd
= &adapter
->stats_cmd
;
3281 if (adapter
->generation
== BE_GEN2
) {
3282 cmd
->size
= sizeof(struct be_cmd_req_get_stats_v0
);
3284 if (lancer_chip(adapter
))
3285 cmd
->size
= sizeof(struct lancer_cmd_req_pport_stats
);
3287 cmd
->size
= sizeof(struct be_cmd_req_get_stats_v1
);
3289 cmd
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, cmd
->size
, &cmd
->dma
,
3291 if (cmd
->va
== NULL
)
3293 memset(cmd
->va
, 0, cmd
->size
);
3297 static void __devexit
be_remove(struct pci_dev
*pdev
)
3299 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3304 unregister_netdev(adapter
->netdev
);
3308 be_stats_cleanup(adapter
);
3310 be_ctrl_cleanup(adapter
);
3312 be_sriov_disable(adapter
);
3314 pci_set_drvdata(pdev
, NULL
);
3315 pci_release_regions(pdev
);
3316 pci_disable_device(pdev
);
3318 free_netdev(adapter
->netdev
);
3321 bool be_is_wol_supported(struct be_adapter
*adapter
)
3323 return ((adapter
->wol_cap
& BE_WOL_CAP
) &&
3324 !be_is_wol_excluded(adapter
)) ? true : false;
3327 static int be_get_config(struct be_adapter
*adapter
)
3331 status
= be_cmd_query_fw_cfg(adapter
, &adapter
->port_num
,
3332 &adapter
->function_mode
, &adapter
->function_caps
);
3336 if (adapter
->function_mode
& FLEX10_MODE
)
3337 adapter
->max_vlans
= BE_NUM_VLANS_SUPPORTED
/8;
3339 adapter
->max_vlans
= BE_NUM_VLANS_SUPPORTED
;
3341 if (be_physfn(adapter
))
3342 adapter
->max_pmac_cnt
= BE_UC_PMAC_COUNT
;
3344 adapter
->max_pmac_cnt
= BE_VF_UC_PMAC_COUNT
;
3346 /* primary mac needs 1 pmac entry */
3347 adapter
->pmac_id
= kcalloc(adapter
->max_pmac_cnt
+ 1,
3348 sizeof(u32
), GFP_KERNEL
);
3349 if (!adapter
->pmac_id
)
3352 status
= be_cmd_get_cntl_attributes(adapter
);
3356 status
= be_cmd_get_acpi_wol_cap(adapter
);
3358 /* in case of a failure to get wol capabillities
3359 * check the exclusion list to determine WOL capability */
3360 if (!be_is_wol_excluded(adapter
))
3361 adapter
->wol_cap
|= BE_WOL_CAP
;
3364 if (be_is_wol_supported(adapter
))
3365 adapter
->wol
= true;
3370 static int be_dev_family_check(struct be_adapter
*adapter
)
3372 struct pci_dev
*pdev
= adapter
->pdev
;
3373 u32 sli_intf
= 0, if_type
;
3375 switch (pdev
->device
) {
3378 adapter
->generation
= BE_GEN2
;
3383 adapter
->generation
= BE_GEN3
;
3387 pci_read_config_dword(pdev
, SLI_INTF_REG_OFFSET
, &sli_intf
);
3388 if_type
= (sli_intf
& SLI_INTF_IF_TYPE_MASK
) >>
3389 SLI_INTF_IF_TYPE_SHIFT
;
3391 if (((sli_intf
& SLI_INTF_VALID_MASK
) != SLI_INTF_VALID
) ||
3393 dev_err(&pdev
->dev
, "SLI_INTF reg val is not valid\n");
3396 adapter
->sli_family
= ((sli_intf
& SLI_INTF_FAMILY_MASK
) >>
3397 SLI_INTF_FAMILY_SHIFT
);
3398 adapter
->generation
= BE_GEN3
;
3401 adapter
->generation
= 0;
3406 static int lancer_wait_ready(struct be_adapter
*adapter
)
3408 #define SLIPORT_READY_TIMEOUT 30
3412 for (i
= 0; i
< SLIPORT_READY_TIMEOUT
; i
++) {
3413 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
3414 if (sliport_status
& SLIPORT_STATUS_RDY_MASK
)
3420 if (i
== SLIPORT_READY_TIMEOUT
)
3426 static int lancer_test_and_set_rdy_state(struct be_adapter
*adapter
)
3429 u32 sliport_status
, err
, reset_needed
;
3430 status
= lancer_wait_ready(adapter
);
3432 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
3433 err
= sliport_status
& SLIPORT_STATUS_ERR_MASK
;
3434 reset_needed
= sliport_status
& SLIPORT_STATUS_RN_MASK
;
3435 if (err
&& reset_needed
) {
3436 iowrite32(SLI_PORT_CONTROL_IP_MASK
,
3437 adapter
->db
+ SLIPORT_CONTROL_OFFSET
);
3439 /* check adapter has corrected the error */
3440 status
= lancer_wait_ready(adapter
);
3441 sliport_status
= ioread32(adapter
->db
+
3442 SLIPORT_STATUS_OFFSET
);
3443 sliport_status
&= (SLIPORT_STATUS_ERR_MASK
|
3444 SLIPORT_STATUS_RN_MASK
);
3445 if (status
|| sliport_status
)
3447 } else if (err
|| reset_needed
) {
3454 static void lancer_test_and_recover_fn_err(struct be_adapter
*adapter
)
3459 if (adapter
->eeh_err
|| adapter
->ue_detected
)
3462 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
3464 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
3465 dev_err(&adapter
->pdev
->dev
,
3466 "Adapter in error state."
3467 "Trying to recover.\n");
3469 status
= lancer_test_and_set_rdy_state(adapter
);
3473 netif_device_detach(adapter
->netdev
);
3475 if (netif_running(adapter
->netdev
))
3476 be_close(adapter
->netdev
);
3480 adapter
->fw_timeout
= false;
3482 status
= be_setup(adapter
);
3486 if (netif_running(adapter
->netdev
)) {
3487 status
= be_open(adapter
->netdev
);
3492 netif_device_attach(adapter
->netdev
);
3494 dev_err(&adapter
->pdev
->dev
,
3495 "Adapter error recovery succeeded\n");
3499 dev_err(&adapter
->pdev
->dev
,
3500 "Adapter error recovery failed\n");
3503 static void be_worker(struct work_struct
*work
)
3505 struct be_adapter
*adapter
=
3506 container_of(work
, struct be_adapter
, work
.work
);
3507 struct be_rx_obj
*rxo
;
3508 struct be_eq_obj
*eqo
;
3511 if (lancer_chip(adapter
))
3512 lancer_test_and_recover_fn_err(adapter
);
3514 be_detect_dump_ue(adapter
);
3516 /* when interrupts are not yet enabled, just reap any pending
3517 * mcc completions */
3518 if (!netif_running(adapter
->netdev
)) {
3519 be_process_mcc(adapter
);
3523 if (!adapter
->stats_cmd_sent
) {
3524 if (lancer_chip(adapter
))
3525 lancer_cmd_get_pport_stats(adapter
,
3526 &adapter
->stats_cmd
);
3528 be_cmd_get_stats(adapter
, &adapter
->stats_cmd
);
3531 for_all_rx_queues(adapter
, rxo
, i
) {
3532 if (rxo
->rx_post_starved
) {
3533 rxo
->rx_post_starved
= false;
3534 be_post_rx_frags(rxo
, GFP_KERNEL
);
3538 for_all_evt_queues(adapter
, eqo
, i
)
3539 be_eqd_update(adapter
, eqo
);
3542 adapter
->work_counter
++;
3543 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(1000));
3546 static int __devinit
be_probe(struct pci_dev
*pdev
,
3547 const struct pci_device_id
*pdev_id
)
3550 struct be_adapter
*adapter
;
3551 struct net_device
*netdev
;
3553 status
= pci_enable_device(pdev
);
3557 status
= pci_request_regions(pdev
, DRV_NAME
);
3560 pci_set_master(pdev
);
3562 netdev
= alloc_etherdev_mq(sizeof(struct be_adapter
), MAX_TX_QS
);
3563 if (netdev
== NULL
) {
3567 adapter
= netdev_priv(netdev
);
3568 adapter
->pdev
= pdev
;
3569 pci_set_drvdata(pdev
, adapter
);
3571 status
= be_dev_family_check(adapter
);
3575 adapter
->netdev
= netdev
;
3576 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3578 status
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64));
3580 netdev
->features
|= NETIF_F_HIGHDMA
;
3582 status
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
3584 dev_err(&pdev
->dev
, "Could not set PCI DMA Mask\n");
3589 status
= be_sriov_enable(adapter
);
3593 status
= be_ctrl_init(adapter
);
3597 if (lancer_chip(adapter
)) {
3598 status
= lancer_wait_ready(adapter
);
3600 iowrite32(SLI_PORT_CONTROL_IP_MASK
,
3601 adapter
->db
+ SLIPORT_CONTROL_OFFSET
);
3602 status
= lancer_test_and_set_rdy_state(adapter
);
3605 dev_err(&pdev
->dev
, "Adapter in non recoverable error\n");
3610 /* sync up with fw's ready state */
3611 if (be_physfn(adapter
)) {
3612 status
= be_cmd_POST(adapter
);
3617 /* tell fw we're ready to fire cmds */
3618 status
= be_cmd_fw_init(adapter
);
3622 status
= be_cmd_reset_function(adapter
);
3626 /* The INTR bit may be set in the card when probed by a kdump kernel
3629 if (!lancer_chip(adapter
))
3630 be_intr_set(adapter
, false);
3632 status
= be_stats_init(adapter
);
3636 status
= be_get_config(adapter
);
3640 INIT_DELAYED_WORK(&adapter
->work
, be_worker
);
3641 adapter
->rx_fc
= adapter
->tx_fc
= true;
3643 status
= be_setup(adapter
);
3647 be_netdev_init(netdev
);
3648 status
= register_netdev(netdev
);
3652 dev_info(&pdev
->dev
, "%s: %s port %d\n", netdev
->name
, nic_name(pdev
),
3660 be_msix_disable(adapter
);
3662 be_stats_cleanup(adapter
);
3664 be_ctrl_cleanup(adapter
);
3666 be_sriov_disable(adapter
);
3668 free_netdev(netdev
);
3669 pci_set_drvdata(pdev
, NULL
);
3671 pci_release_regions(pdev
);
3673 pci_disable_device(pdev
);
3675 dev_err(&pdev
->dev
, "%s initialization failed\n", nic_name(pdev
));
3679 static int be_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3681 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3682 struct net_device
*netdev
= adapter
->netdev
;
3685 be_setup_wol(adapter
, true);
3687 netif_device_detach(netdev
);
3688 if (netif_running(netdev
)) {
3695 pci_save_state(pdev
);
3696 pci_disable_device(pdev
);
3697 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3701 static int be_resume(struct pci_dev
*pdev
)
3704 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3705 struct net_device
*netdev
= adapter
->netdev
;
3707 netif_device_detach(netdev
);
3709 status
= pci_enable_device(pdev
);
3713 pci_set_power_state(pdev
, 0);
3714 pci_restore_state(pdev
);
3716 /* tell fw we're ready to fire cmds */
3717 status
= be_cmd_fw_init(adapter
);
3722 if (netif_running(netdev
)) {
3727 netif_device_attach(netdev
);
3730 be_setup_wol(adapter
, false);
3736 * An FLR will stop BE from DMAing any data.
3738 static void be_shutdown(struct pci_dev
*pdev
)
3740 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3745 cancel_delayed_work_sync(&adapter
->work
);
3747 netif_device_detach(adapter
->netdev
);
3750 be_setup_wol(adapter
, true);
3752 be_cmd_reset_function(adapter
);
3754 pci_disable_device(pdev
);
3757 static pci_ers_result_t
be_eeh_err_detected(struct pci_dev
*pdev
,
3758 pci_channel_state_t state
)
3760 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3761 struct net_device
*netdev
= adapter
->netdev
;
3763 dev_err(&adapter
->pdev
->dev
, "EEH error detected\n");
3765 adapter
->eeh_err
= true;
3767 netif_device_detach(netdev
);
3769 if (netif_running(netdev
)) {
3776 if (state
== pci_channel_io_perm_failure
)
3777 return PCI_ERS_RESULT_DISCONNECT
;
3779 pci_disable_device(pdev
);
3781 return PCI_ERS_RESULT_NEED_RESET
;
3784 static pci_ers_result_t
be_eeh_reset(struct pci_dev
*pdev
)
3786 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3789 dev_info(&adapter
->pdev
->dev
, "EEH reset\n");
3790 adapter
->eeh_err
= false;
3791 adapter
->ue_detected
= false;
3792 adapter
->fw_timeout
= false;
3794 status
= pci_enable_device(pdev
);
3796 return PCI_ERS_RESULT_DISCONNECT
;
3798 pci_set_master(pdev
);
3799 pci_set_power_state(pdev
, 0);
3800 pci_restore_state(pdev
);
3802 /* Check if card is ok and fw is ready */
3803 status
= be_cmd_POST(adapter
);
3805 return PCI_ERS_RESULT_DISCONNECT
;
3807 return PCI_ERS_RESULT_RECOVERED
;
3810 static void be_eeh_resume(struct pci_dev
*pdev
)
3813 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3814 struct net_device
*netdev
= adapter
->netdev
;
3816 dev_info(&adapter
->pdev
->dev
, "EEH resume\n");
3818 pci_save_state(pdev
);
3820 /* tell fw we're ready to fire cmds */
3821 status
= be_cmd_fw_init(adapter
);
3825 status
= be_setup(adapter
);
3829 if (netif_running(netdev
)) {
3830 status
= be_open(netdev
);
3834 netif_device_attach(netdev
);
3837 dev_err(&adapter
->pdev
->dev
, "EEH resume failed\n");
3840 static struct pci_error_handlers be_eeh_handlers
= {
3841 .error_detected
= be_eeh_err_detected
,
3842 .slot_reset
= be_eeh_reset
,
3843 .resume
= be_eeh_resume
,
3846 static struct pci_driver be_driver
= {
3848 .id_table
= be_dev_ids
,
3850 .remove
= be_remove
,
3851 .suspend
= be_suspend
,
3852 .resume
= be_resume
,
3853 .shutdown
= be_shutdown
,
3854 .err_handler
= &be_eeh_handlers
3857 static int __init
be_init_module(void)
3859 if (rx_frag_size
!= 8192 && rx_frag_size
!= 4096 &&
3860 rx_frag_size
!= 2048) {
3861 printk(KERN_WARNING DRV_NAME
3862 " : Module param rx_frag_size must be 2048/4096/8192."
3864 rx_frag_size
= 2048;
3867 return pci_register_driver(&be_driver
);
3869 module_init(be_init_module
);
3871 static void __exit
be_exit_module(void)
3873 pci_unregister_driver(&be_driver
);
3875 module_exit(be_exit_module
);