2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
22 #include <asm/div64.h>
24 MODULE_VERSION(DRV_VER
);
25 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
26 MODULE_DESCRIPTION(DRV_DESC
" " DRV_VER
);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
30 static unsigned int num_vfs
;
31 module_param(num_vfs
, uint
, S_IRUGO
);
32 MODULE_PARM_DESC(num_vfs
, "Number of PCI VFs to initialize");
34 static ushort rx_frag_size
= 2048;
35 module_param(rx_frag_size
, ushort
, S_IRUGO
);
36 MODULE_PARM_DESC(rx_frag_size
, "Size of a fragment that holds rcvd data.");
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids
) = {
39 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
40 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID2
) },
41 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
42 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
43 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID3
)},
44 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID4
)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID5
)},
48 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc
[] = {
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc
[] = {
120 /* Is BE in a multi-channel mode */
121 static inline bool be_is_mc(struct be_adapter
*adapter
) {
122 return (adapter
->function_mode
& FLEX10_MODE
||
123 adapter
->function_mode
& VNIC_MODE
||
124 adapter
->function_mode
& UMC_ENABLED
);
127 static void be_queue_free(struct be_adapter
*adapter
, struct be_queue_info
*q
)
129 struct be_dma_mem
*mem
= &q
->dma_mem
;
131 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
135 static int be_queue_alloc(struct be_adapter
*adapter
, struct be_queue_info
*q
,
136 u16 len
, u16 entry_size
)
138 struct be_dma_mem
*mem
= &q
->dma_mem
;
140 memset(q
, 0, sizeof(*q
));
142 q
->entry_size
= entry_size
;
143 mem
->size
= len
* entry_size
;
144 mem
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, mem
->size
, &mem
->dma
,
148 memset(mem
->va
, 0, mem
->size
);
152 static void be_intr_set(struct be_adapter
*adapter
, bool enable
)
156 if (adapter
->eeh_err
)
159 pci_read_config_dword(adapter
->pdev
, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
,
161 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
163 if (!enabled
&& enable
)
164 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
165 else if (enabled
&& !enable
)
166 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
170 pci_write_config_dword(adapter
->pdev
,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
, reg
);
174 static void be_rxq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
177 val
|= qid
& DB_RQ_RING_ID_MASK
;
178 val
|= posted
<< DB_RQ_NUM_POSTED_SHIFT
;
181 iowrite32(val
, adapter
->db
+ DB_RQ_OFFSET
);
184 static void be_txq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
187 val
|= qid
& DB_TXULP_RING_ID_MASK
;
188 val
|= (posted
& DB_TXULP_NUM_POSTED_MASK
) << DB_TXULP_NUM_POSTED_SHIFT
;
191 iowrite32(val
, adapter
->db
+ DB_TXULP1_OFFSET
);
194 static void be_eq_notify(struct be_adapter
*adapter
, u16 qid
,
195 bool arm
, bool clear_int
, u16 num_popped
)
198 val
|= qid
& DB_EQ_RING_ID_MASK
;
199 val
|= ((qid
& DB_EQ_RING_ID_EXT_MASK
) <<
200 DB_EQ_RING_ID_EXT_MASK_SHIFT
);
202 if (adapter
->eeh_err
)
206 val
|= 1 << DB_EQ_REARM_SHIFT
;
208 val
|= 1 << DB_EQ_CLR_SHIFT
;
209 val
|= 1 << DB_EQ_EVNT_SHIFT
;
210 val
|= num_popped
<< DB_EQ_NUM_POPPED_SHIFT
;
211 iowrite32(val
, adapter
->db
+ DB_EQ_OFFSET
);
214 void be_cq_notify(struct be_adapter
*adapter
, u16 qid
, bool arm
, u16 num_popped
)
217 val
|= qid
& DB_CQ_RING_ID_MASK
;
218 val
|= ((qid
& DB_CQ_RING_ID_EXT_MASK
) <<
219 DB_CQ_RING_ID_EXT_MASK_SHIFT
);
221 if (adapter
->eeh_err
)
225 val
|= 1 << DB_CQ_REARM_SHIFT
;
226 val
|= num_popped
<< DB_CQ_NUM_POPPED_SHIFT
;
227 iowrite32(val
, adapter
->db
+ DB_CQ_OFFSET
);
230 static int be_mac_addr_set(struct net_device
*netdev
, void *p
)
232 struct be_adapter
*adapter
= netdev_priv(netdev
);
233 struct sockaddr
*addr
= p
;
235 u8 current_mac
[ETH_ALEN
];
236 u32 pmac_id
= adapter
->pmac_id
;
238 if (!is_valid_ether_addr(addr
->sa_data
))
239 return -EADDRNOTAVAIL
;
241 status
= be_cmd_mac_addr_query(adapter
, current_mac
,
242 MAC_ADDRESS_TYPE_NETWORK
, false,
243 adapter
->if_handle
, 0);
247 if (memcmp(addr
->sa_data
, current_mac
, ETH_ALEN
)) {
248 status
= be_cmd_pmac_add(adapter
, (u8
*)addr
->sa_data
,
249 adapter
->if_handle
, &adapter
->pmac_id
, 0);
253 be_cmd_pmac_del(adapter
, adapter
->if_handle
, pmac_id
, 0);
255 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
258 dev_err(&adapter
->pdev
->dev
, "MAC %pM set Failed\n", addr
->sa_data
);
262 static void populate_be2_stats(struct be_adapter
*adapter
)
264 struct be_hw_stats_v0
*hw_stats
= hw_stats_from_cmd(adapter
);
265 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
266 struct be_rxf_stats_v0
*rxf_stats
= &hw_stats
->rxf
;
267 struct be_port_rxf_stats_v0
*port_stats
=
268 &rxf_stats
->port
[adapter
->port_num
];
269 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
271 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
272 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
273 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
274 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
275 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
276 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
277 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
278 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
279 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
280 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
281 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rx_fifo_overflow
;
282 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
283 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
284 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
285 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
286 drvs
->rx_input_fifo_overflow_drop
= port_stats
->rx_input_fifo_overflow
;
287 drvs
->rx_dropped_header_too_small
=
288 port_stats
->rx_dropped_header_too_small
;
289 drvs
->rx_address_match_errors
= port_stats
->rx_address_match_errors
;
290 drvs
->rx_alignment_symbol_errors
=
291 port_stats
->rx_alignment_symbol_errors
;
293 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
294 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
296 if (adapter
->port_num
)
297 drvs
->jabber_events
= rxf_stats
->port1_jabber_events
;
299 drvs
->jabber_events
= rxf_stats
->port0_jabber_events
;
300 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
301 drvs
->rx_drops_no_txpb
= rxf_stats
->rx_drops_no_txpb
;
302 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
303 drvs
->rx_drops_invalid_ring
= rxf_stats
->rx_drops_invalid_ring
;
304 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
305 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
306 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
307 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
308 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
311 static void populate_be3_stats(struct be_adapter
*adapter
)
313 struct be_hw_stats_v1
*hw_stats
= hw_stats_from_cmd(adapter
);
314 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
315 struct be_rxf_stats_v1
*rxf_stats
= &hw_stats
->rxf
;
316 struct be_port_rxf_stats_v1
*port_stats
=
317 &rxf_stats
->port
[adapter
->port_num
];
318 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
320 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
321 drvs
->pmem_fifo_overflow_drop
= port_stats
->pmem_fifo_overflow_drop
;
322 drvs
->rx_priority_pause_frames
= port_stats
->rx_priority_pause_frames
;
323 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
324 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
325 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
326 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
327 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
328 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
329 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
330 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
331 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
332 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
333 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
334 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
335 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
336 drvs
->rx_dropped_header_too_small
=
337 port_stats
->rx_dropped_header_too_small
;
338 drvs
->rx_input_fifo_overflow_drop
=
339 port_stats
->rx_input_fifo_overflow_drop
;
340 drvs
->rx_address_match_errors
= port_stats
->rx_address_match_errors
;
341 drvs
->rx_alignment_symbol_errors
=
342 port_stats
->rx_alignment_symbol_errors
;
343 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rxpp_fifo_overflow_drop
;
344 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
345 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
346 drvs
->jabber_events
= port_stats
->jabber_events
;
347 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
348 drvs
->rx_drops_no_txpb
= rxf_stats
->rx_drops_no_txpb
;
349 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
350 drvs
->rx_drops_invalid_ring
= rxf_stats
->rx_drops_invalid_ring
;
351 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
352 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
353 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
354 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
355 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
358 static void populate_lancer_stats(struct be_adapter
*adapter
)
361 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
362 struct lancer_pport_stats
*pport_stats
=
363 pport_stats_from_cmd(adapter
);
365 be_dws_le_to_cpu(pport_stats
, sizeof(*pport_stats
));
366 drvs
->rx_pause_frames
= pport_stats
->rx_pause_frames_lo
;
367 drvs
->rx_crc_errors
= pport_stats
->rx_crc_errors_lo
;
368 drvs
->rx_control_frames
= pport_stats
->rx_control_frames_lo
;
369 drvs
->rx_in_range_errors
= pport_stats
->rx_in_range_errors
;
370 drvs
->rx_frame_too_long
= pport_stats
->rx_frames_too_long_lo
;
371 drvs
->rx_dropped_runt
= pport_stats
->rx_dropped_runt
;
372 drvs
->rx_ip_checksum_errs
= pport_stats
->rx_ip_checksum_errors
;
373 drvs
->rx_tcp_checksum_errs
= pport_stats
->rx_tcp_checksum_errors
;
374 drvs
->rx_udp_checksum_errs
= pport_stats
->rx_udp_checksum_errors
;
375 drvs
->rx_dropped_tcp_length
=
376 pport_stats
->rx_dropped_invalid_tcp_length
;
377 drvs
->rx_dropped_too_small
= pport_stats
->rx_dropped_too_small
;
378 drvs
->rx_dropped_too_short
= pport_stats
->rx_dropped_too_short
;
379 drvs
->rx_out_range_errors
= pport_stats
->rx_out_of_range_errors
;
380 drvs
->rx_dropped_header_too_small
=
381 pport_stats
->rx_dropped_header_too_small
;
382 drvs
->rx_input_fifo_overflow_drop
= pport_stats
->rx_fifo_overflow
;
383 drvs
->rx_address_match_errors
= pport_stats
->rx_address_match_errors
;
384 drvs
->rx_alignment_symbol_errors
= pport_stats
->rx_symbol_errors_lo
;
385 drvs
->rxpp_fifo_overflow_drop
= pport_stats
->rx_fifo_overflow
;
386 drvs
->tx_pauseframes
= pport_stats
->tx_pause_frames_lo
;
387 drvs
->tx_controlframes
= pport_stats
->tx_control_frames_lo
;
388 drvs
->jabber_events
= pport_stats
->rx_jabbers
;
389 drvs
->rx_drops_invalid_ring
= pport_stats
->rx_drops_invalid_queue
;
390 drvs
->forwarded_packets
= pport_stats
->num_forwards_lo
;
391 drvs
->rx_drops_mtu
= pport_stats
->rx_drops_mtu_lo
;
392 drvs
->rx_drops_too_many_frags
=
393 pport_stats
->rx_drops_too_many_frags_lo
;
396 static void accumulate_16bit_val(u32
*acc
, u16 val
)
398 #define lo(x) (x & 0xFFFF)
399 #define hi(x) (x & 0xFFFF0000)
400 bool wrapped
= val
< lo(*acc
);
401 u32 newacc
= hi(*acc
) + val
;
405 ACCESS_ONCE(*acc
) = newacc
;
408 void be_parse_stats(struct be_adapter
*adapter
)
410 struct be_erx_stats_v1
*erx
= be_erx_stats_from_cmd(adapter
);
411 struct be_rx_obj
*rxo
;
414 if (adapter
->generation
== BE_GEN3
) {
415 if (lancer_chip(adapter
))
416 populate_lancer_stats(adapter
);
418 populate_be3_stats(adapter
);
420 populate_be2_stats(adapter
);
423 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
424 for_all_rx_queues(adapter
, rxo
, i
) {
425 /* below erx HW counter can actually wrap around after
426 * 65535. Driver accumulates a 32-bit value
428 accumulate_16bit_val(&rx_stats(rxo
)->rx_drops_no_frags
,
429 (u16
)erx
->rx_drops_no_fragments
[rxo
->q
.id
]);
433 static struct rtnl_link_stats64
*be_get_stats64(struct net_device
*netdev
,
434 struct rtnl_link_stats64
*stats
)
436 struct be_adapter
*adapter
= netdev_priv(netdev
);
437 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
438 struct be_rx_obj
*rxo
;
439 struct be_tx_obj
*txo
;
444 for_all_rx_queues(adapter
, rxo
, i
) {
445 const struct be_rx_stats
*rx_stats
= rx_stats(rxo
);
447 start
= u64_stats_fetch_begin_bh(&rx_stats
->sync
);
448 pkts
= rx_stats(rxo
)->rx_pkts
;
449 bytes
= rx_stats(rxo
)->rx_bytes
;
450 } while (u64_stats_fetch_retry_bh(&rx_stats
->sync
, start
));
451 stats
->rx_packets
+= pkts
;
452 stats
->rx_bytes
+= bytes
;
453 stats
->multicast
+= rx_stats(rxo
)->rx_mcast_pkts
;
454 stats
->rx_dropped
+= rx_stats(rxo
)->rx_drops_no_skbs
+
455 rx_stats(rxo
)->rx_drops_no_frags
;
458 for_all_tx_queues(adapter
, txo
, i
) {
459 const struct be_tx_stats
*tx_stats
= tx_stats(txo
);
461 start
= u64_stats_fetch_begin_bh(&tx_stats
->sync
);
462 pkts
= tx_stats(txo
)->tx_pkts
;
463 bytes
= tx_stats(txo
)->tx_bytes
;
464 } while (u64_stats_fetch_retry_bh(&tx_stats
->sync
, start
));
465 stats
->tx_packets
+= pkts
;
466 stats
->tx_bytes
+= bytes
;
469 /* bad pkts received */
470 stats
->rx_errors
= drvs
->rx_crc_errors
+
471 drvs
->rx_alignment_symbol_errors
+
472 drvs
->rx_in_range_errors
+
473 drvs
->rx_out_range_errors
+
474 drvs
->rx_frame_too_long
+
475 drvs
->rx_dropped_too_small
+
476 drvs
->rx_dropped_too_short
+
477 drvs
->rx_dropped_header_too_small
+
478 drvs
->rx_dropped_tcp_length
+
479 drvs
->rx_dropped_runt
;
481 /* detailed rx errors */
482 stats
->rx_length_errors
= drvs
->rx_in_range_errors
+
483 drvs
->rx_out_range_errors
+
484 drvs
->rx_frame_too_long
;
486 stats
->rx_crc_errors
= drvs
->rx_crc_errors
;
488 /* frame alignment errors */
489 stats
->rx_frame_errors
= drvs
->rx_alignment_symbol_errors
;
491 /* receiver fifo overrun */
492 /* drops_no_pbuf is no per i/f, it's per BE card */
493 stats
->rx_fifo_errors
= drvs
->rxpp_fifo_overflow_drop
+
494 drvs
->rx_input_fifo_overflow_drop
+
495 drvs
->rx_drops_no_pbuf
;
499 void be_link_status_update(struct be_adapter
*adapter
, u8 link_status
)
501 struct net_device
*netdev
= adapter
->netdev
;
503 if (!(adapter
->flags
& BE_FLAGS_LINK_STATUS_INIT
)) {
504 netif_carrier_off(netdev
);
505 adapter
->flags
|= BE_FLAGS_LINK_STATUS_INIT
;
508 if ((link_status
& LINK_STATUS_MASK
) == LINK_UP
)
509 netif_carrier_on(netdev
);
511 netif_carrier_off(netdev
);
514 static void be_tx_stats_update(struct be_tx_obj
*txo
,
515 u32 wrb_cnt
, u32 copied
, u32 gso_segs
, bool stopped
)
517 struct be_tx_stats
*stats
= tx_stats(txo
);
519 u64_stats_update_begin(&stats
->sync
);
521 stats
->tx_wrbs
+= wrb_cnt
;
522 stats
->tx_bytes
+= copied
;
523 stats
->tx_pkts
+= (gso_segs
? gso_segs
: 1);
526 u64_stats_update_end(&stats
->sync
);
529 /* Determine number of WRB entries needed to xmit data in an skb */
530 static u32
wrb_cnt_for_skb(struct be_adapter
*adapter
, struct sk_buff
*skb
,
533 int cnt
= (skb
->len
> skb
->data_len
);
535 cnt
+= skb_shinfo(skb
)->nr_frags
;
537 /* to account for hdr wrb */
539 if (lancer_chip(adapter
) || !(cnt
& 1)) {
542 /* add a dummy to make it an even num */
546 BUG_ON(cnt
> BE_MAX_TX_FRAG_COUNT
);
550 static inline void wrb_fill(struct be_eth_wrb
*wrb
, u64 addr
, int len
)
552 wrb
->frag_pa_hi
= upper_32_bits(addr
);
553 wrb
->frag_pa_lo
= addr
& 0xFFFFFFFF;
554 wrb
->frag_len
= len
& ETH_WRB_FRAG_LEN_MASK
;
557 static inline u16
be_get_tx_vlan_tag(struct be_adapter
*adapter
,
563 vlan_tag
= vlan_tx_tag_get(skb
);
564 vlan_prio
= (vlan_tag
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
565 /* If vlan priority provided by OS is NOT in available bmap */
566 if (!(adapter
->vlan_prio_bmap
& (1 << vlan_prio
)))
567 vlan_tag
= (vlan_tag
& ~VLAN_PRIO_MASK
) |
568 adapter
->recommended_prio
;
573 static void wrb_fill_hdr(struct be_adapter
*adapter
, struct be_eth_hdr_wrb
*hdr
,
574 struct sk_buff
*skb
, u32 wrb_cnt
, u32 len
)
578 memset(hdr
, 0, sizeof(*hdr
));
580 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, crc
, hdr
, 1);
582 if (skb_is_gso(skb
)) {
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso
, hdr
, 1);
584 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso_mss
,
585 hdr
, skb_shinfo(skb
)->gso_size
);
586 if (skb_is_gso_v6(skb
) && !lancer_chip(adapter
))
587 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso6
, hdr
, 1);
588 if (lancer_chip(adapter
) && adapter
->sli_family
==
589 LANCER_A0_SLI_FAMILY
) {
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, ipcs
, hdr
, 1);
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb
,
594 else if (is_udp_pkt(skb
))
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb
,
598 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, tcpcs
, hdr
, 1);
601 else if (is_udp_pkt(skb
))
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, udpcs
, hdr
, 1);
605 if (vlan_tx_tag_present(skb
)) {
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan
, hdr
, 1);
607 vlan_tag
= be_get_tx_vlan_tag(adapter
, skb
);
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan_tag
, hdr
, vlan_tag
);
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, event
, hdr
, 1);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, complete
, hdr
, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, num_wrb
, hdr
, wrb_cnt
);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, len
, hdr
, len
);
617 static void unmap_tx_frag(struct device
*dev
, struct be_eth_wrb
*wrb
,
622 be_dws_le_to_cpu(wrb
, sizeof(*wrb
));
624 dma
= (u64
)wrb
->frag_pa_hi
<< 32 | (u64
)wrb
->frag_pa_lo
;
627 dma_unmap_single(dev
, dma
, wrb
->frag_len
,
630 dma_unmap_page(dev
, dma
, wrb
->frag_len
, DMA_TO_DEVICE
);
634 static int make_tx_wrbs(struct be_adapter
*adapter
, struct be_queue_info
*txq
,
635 struct sk_buff
*skb
, u32 wrb_cnt
, bool dummy_wrb
)
639 struct device
*dev
= &adapter
->pdev
->dev
;
640 struct sk_buff
*first_skb
= skb
;
641 struct be_eth_wrb
*wrb
;
642 struct be_eth_hdr_wrb
*hdr
;
643 bool map_single
= false;
646 hdr
= queue_head_node(txq
);
648 map_head
= txq
->head
;
650 if (skb
->len
> skb
->data_len
) {
651 int len
= skb_headlen(skb
);
652 busaddr
= dma_map_single(dev
, skb
->data
, len
, DMA_TO_DEVICE
);
653 if (dma_mapping_error(dev
, busaddr
))
656 wrb
= queue_head_node(txq
);
657 wrb_fill(wrb
, busaddr
, len
);
658 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
663 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
664 const struct skb_frag_struct
*frag
=
665 &skb_shinfo(skb
)->frags
[i
];
666 busaddr
= skb_frag_dma_map(dev
, frag
, 0,
667 skb_frag_size(frag
), DMA_TO_DEVICE
);
668 if (dma_mapping_error(dev
, busaddr
))
670 wrb
= queue_head_node(txq
);
671 wrb_fill(wrb
, busaddr
, skb_frag_size(frag
));
672 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
674 copied
+= skb_frag_size(frag
);
678 wrb
= queue_head_node(txq
);
680 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
684 wrb_fill_hdr(adapter
, hdr
, first_skb
, wrb_cnt
, copied
);
685 be_dws_cpu_to_le(hdr
, sizeof(*hdr
));
689 txq
->head
= map_head
;
691 wrb
= queue_head_node(txq
);
692 unmap_tx_frag(dev
, wrb
, map_single
);
694 copied
-= wrb
->frag_len
;
700 static netdev_tx_t
be_xmit(struct sk_buff
*skb
,
701 struct net_device
*netdev
)
703 struct be_adapter
*adapter
= netdev_priv(netdev
);
704 struct be_tx_obj
*txo
= &adapter
->tx_obj
[skb_get_queue_mapping(skb
)];
705 struct be_queue_info
*txq
= &txo
->q
;
706 u32 wrb_cnt
= 0, copied
= 0;
707 u32 start
= txq
->head
;
708 bool dummy_wrb
, stopped
= false;
710 /* For vlan tagged pkts, BE
711 * 1) calculates checksum even when CSO is not requested
712 * 2) calculates checksum wrongly for padded pkt less than
714 * As a workaround disable TX vlan offloading in such cases.
716 if (unlikely(vlan_tx_tag_present(skb
) &&
717 (skb
->ip_summed
!= CHECKSUM_PARTIAL
|| skb
->len
<= 60))) {
718 skb
= skb_share_check(skb
, GFP_ATOMIC
);
722 skb
= __vlan_put_tag(skb
, be_get_tx_vlan_tag(adapter
, skb
));
729 wrb_cnt
= wrb_cnt_for_skb(adapter
, skb
, &dummy_wrb
);
731 copied
= make_tx_wrbs(adapter
, txq
, skb
, wrb_cnt
, dummy_wrb
);
733 /* record the sent skb in the sent_skb table */
734 BUG_ON(txo
->sent_skb_list
[start
]);
735 txo
->sent_skb_list
[start
] = skb
;
737 /* Ensure txq has space for the next skb; Else stop the queue
738 * *BEFORE* ringing the tx doorbell, so that we serialze the
739 * tx compls of the current transmit which'll wake up the queue
741 atomic_add(wrb_cnt
, &txq
->used
);
742 if ((BE_MAX_TX_FRAG_COUNT
+ atomic_read(&txq
->used
)) >=
744 netif_stop_subqueue(netdev
, skb_get_queue_mapping(skb
));
748 be_txq_notify(adapter
, txq
->id
, wrb_cnt
);
750 be_tx_stats_update(txo
, wrb_cnt
, copied
,
751 skb_shinfo(skb
)->gso_segs
, stopped
);
754 dev_kfree_skb_any(skb
);
760 static int be_change_mtu(struct net_device
*netdev
, int new_mtu
)
762 struct be_adapter
*adapter
= netdev_priv(netdev
);
763 if (new_mtu
< BE_MIN_MTU
||
764 new_mtu
> (BE_MAX_JUMBO_FRAME_SIZE
-
765 (ETH_HLEN
+ ETH_FCS_LEN
))) {
766 dev_info(&adapter
->pdev
->dev
,
767 "MTU must be between %d and %d bytes\n",
769 (BE_MAX_JUMBO_FRAME_SIZE
- (ETH_HLEN
+ ETH_FCS_LEN
)));
772 dev_info(&adapter
->pdev
->dev
, "MTU changed from %d to %d bytes\n",
773 netdev
->mtu
, new_mtu
);
774 netdev
->mtu
= new_mtu
;
779 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
780 * If the user configures more, place BE in vlan promiscuous mode.
782 static int be_vid_config(struct be_adapter
*adapter
, bool vf
, u32 vf_num
)
784 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf_num
];
785 u16 vtag
[BE_NUM_VLANS_SUPPORTED
];
790 vtag
[0] = cpu_to_le16(vf_cfg
->vlan_tag
);
791 status
= be_cmd_vlan_config(adapter
, vf_cfg
->if_handle
, vtag
,
795 /* No need to further configure vids if in promiscuous mode */
796 if (adapter
->promiscuous
)
799 if (adapter
->vlans_added
<= adapter
->max_vlans
) {
800 /* Construct VLAN Table to give to HW */
801 for (i
= 0; i
< VLAN_N_VID
; i
++) {
802 if (adapter
->vlan_tag
[i
]) {
803 vtag
[ntags
] = cpu_to_le16(i
);
807 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
810 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
817 static int be_vlan_add_vid(struct net_device
*netdev
, u16 vid
)
819 struct be_adapter
*adapter
= netdev_priv(netdev
);
822 if (!be_physfn(adapter
)) {
827 adapter
->vlan_tag
[vid
] = 1;
828 if (adapter
->vlans_added
<= (adapter
->max_vlans
+ 1))
829 status
= be_vid_config(adapter
, false, 0);
832 adapter
->vlans_added
++;
834 adapter
->vlan_tag
[vid
] = 0;
839 static int be_vlan_rem_vid(struct net_device
*netdev
, u16 vid
)
841 struct be_adapter
*adapter
= netdev_priv(netdev
);
844 if (!be_physfn(adapter
)) {
849 adapter
->vlan_tag
[vid
] = 0;
850 if (adapter
->vlans_added
<= adapter
->max_vlans
)
851 status
= be_vid_config(adapter
, false, 0);
854 adapter
->vlans_added
--;
856 adapter
->vlan_tag
[vid
] = 1;
861 static void be_set_rx_mode(struct net_device
*netdev
)
863 struct be_adapter
*adapter
= netdev_priv(netdev
);
865 if (netdev
->flags
& IFF_PROMISC
) {
866 be_cmd_rx_filter(adapter
, IFF_PROMISC
, ON
);
867 adapter
->promiscuous
= true;
871 /* BE was previously in promiscuous mode; disable it */
872 if (adapter
->promiscuous
) {
873 adapter
->promiscuous
= false;
874 be_cmd_rx_filter(adapter
, IFF_PROMISC
, OFF
);
876 if (adapter
->vlans_added
)
877 be_vid_config(adapter
, false, 0);
880 /* Enable multicast promisc if num configured exceeds what we support */
881 if (netdev
->flags
& IFF_ALLMULTI
||
882 netdev_mc_count(netdev
) > BE_MAX_MC
) {
883 be_cmd_rx_filter(adapter
, IFF_ALLMULTI
, ON
);
887 be_cmd_rx_filter(adapter
, IFF_MULTICAST
, ON
);
892 static int be_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
894 struct be_adapter
*adapter
= netdev_priv(netdev
);
895 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
898 if (!sriov_enabled(adapter
))
901 if (!is_valid_ether_addr(mac
) || vf
>= adapter
->num_vfs
)
904 if (lancer_chip(adapter
)) {
905 status
= be_cmd_set_mac_list(adapter
, mac
, 1, vf
+ 1);
907 status
= be_cmd_pmac_del(adapter
, vf_cfg
->if_handle
,
908 vf_cfg
->pmac_id
, vf
+ 1);
910 status
= be_cmd_pmac_add(adapter
, mac
, vf_cfg
->if_handle
,
911 &vf_cfg
->pmac_id
, vf
+ 1);
915 dev_err(&adapter
->pdev
->dev
, "MAC %pM set on VF %d Failed\n",
918 memcpy(vf_cfg
->mac_addr
, mac
, ETH_ALEN
);
923 static int be_get_vf_config(struct net_device
*netdev
, int vf
,
924 struct ifla_vf_info
*vi
)
926 struct be_adapter
*adapter
= netdev_priv(netdev
);
927 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
929 if (!sriov_enabled(adapter
))
932 if (vf
>= adapter
->num_vfs
)
936 vi
->tx_rate
= vf_cfg
->tx_rate
;
937 vi
->vlan
= vf_cfg
->vlan_tag
;
939 memcpy(&vi
->mac
, vf_cfg
->mac_addr
, ETH_ALEN
);
944 static int be_set_vf_vlan(struct net_device
*netdev
,
945 int vf
, u16 vlan
, u8 qos
)
947 struct be_adapter
*adapter
= netdev_priv(netdev
);
950 if (!sriov_enabled(adapter
))
953 if (vf
>= adapter
->num_vfs
|| vlan
> 4095)
957 adapter
->vf_cfg
[vf
].vlan_tag
= vlan
;
958 adapter
->vlans_added
++;
960 adapter
->vf_cfg
[vf
].vlan_tag
= 0;
961 adapter
->vlans_added
--;
964 status
= be_vid_config(adapter
, true, vf
);
967 dev_info(&adapter
->pdev
->dev
,
968 "VLAN %d config on VF %d failed\n", vlan
, vf
);
972 static int be_set_vf_tx_rate(struct net_device
*netdev
,
975 struct be_adapter
*adapter
= netdev_priv(netdev
);
978 if (!sriov_enabled(adapter
))
981 if (vf
>= adapter
->num_vfs
)
984 if (rate
< 100 || rate
> 10000) {
985 dev_err(&adapter
->pdev
->dev
,
986 "tx rate must be between 100 and 10000 Mbps\n");
990 status
= be_cmd_set_qos(adapter
, rate
/ 10, vf
+ 1);
993 dev_err(&adapter
->pdev
->dev
,
994 "tx rate %d on VF %d failed\n", rate
, vf
);
996 adapter
->vf_cfg
[vf
].tx_rate
= rate
;
1000 static void be_rx_eqd_update(struct be_adapter
*adapter
, struct be_rx_obj
*rxo
)
1002 struct be_eq_obj
*rx_eq
= &rxo
->rx_eq
;
1003 struct be_rx_stats
*stats
= rx_stats(rxo
);
1004 ulong now
= jiffies
;
1005 ulong delta
= now
- stats
->rx_jiffies
;
1007 unsigned int start
, eqd
;
1009 if (!rx_eq
->enable_aic
)
1012 /* Wrapped around */
1013 if (time_before(now
, stats
->rx_jiffies
)) {
1014 stats
->rx_jiffies
= now
;
1018 /* Update once a second */
1023 start
= u64_stats_fetch_begin_bh(&stats
->sync
);
1024 pkts
= stats
->rx_pkts
;
1025 } while (u64_stats_fetch_retry_bh(&stats
->sync
, start
));
1027 stats
->rx_pps
= (unsigned long)(pkts
- stats
->rx_pkts_prev
) / (delta
/ HZ
);
1028 stats
->rx_pkts_prev
= pkts
;
1029 stats
->rx_jiffies
= now
;
1030 eqd
= stats
->rx_pps
/ 110000;
1032 if (eqd
> rx_eq
->max_eqd
)
1033 eqd
= rx_eq
->max_eqd
;
1034 if (eqd
< rx_eq
->min_eqd
)
1035 eqd
= rx_eq
->min_eqd
;
1038 if (eqd
!= rx_eq
->cur_eqd
) {
1039 be_cmd_modify_eqd(adapter
, rx_eq
->q
.id
, eqd
);
1040 rx_eq
->cur_eqd
= eqd
;
1044 static void be_rx_stats_update(struct be_rx_obj
*rxo
,
1045 struct be_rx_compl_info
*rxcp
)
1047 struct be_rx_stats
*stats
= rx_stats(rxo
);
1049 u64_stats_update_begin(&stats
->sync
);
1051 stats
->rx_bytes
+= rxcp
->pkt_size
;
1053 if (rxcp
->pkt_type
== BE_MULTICAST_PACKET
)
1054 stats
->rx_mcast_pkts
++;
1056 stats
->rx_compl_err
++;
1057 u64_stats_update_end(&stats
->sync
);
1060 static inline bool csum_passed(struct be_rx_compl_info
*rxcp
)
1062 /* L4 checksum is not reliable for non TCP/UDP packets.
1063 * Also ignore ipcksm for ipv6 pkts */
1064 return (rxcp
->tcpf
|| rxcp
->udpf
) && rxcp
->l4_csum
&&
1065 (rxcp
->ip_csum
|| rxcp
->ipv6
);
1068 static struct be_rx_page_info
*
1069 get_rx_page_info(struct be_adapter
*adapter
,
1070 struct be_rx_obj
*rxo
,
1073 struct be_rx_page_info
*rx_page_info
;
1074 struct be_queue_info
*rxq
= &rxo
->q
;
1076 rx_page_info
= &rxo
->page_info_tbl
[frag_idx
];
1077 BUG_ON(!rx_page_info
->page
);
1079 if (rx_page_info
->last_page_user
) {
1080 dma_unmap_page(&adapter
->pdev
->dev
,
1081 dma_unmap_addr(rx_page_info
, bus
),
1082 adapter
->big_page_size
, DMA_FROM_DEVICE
);
1083 rx_page_info
->last_page_user
= false;
1086 atomic_dec(&rxq
->used
);
1087 return rx_page_info
;
1090 /* Throwaway the data in the Rx completion */
1091 static void be_rx_compl_discard(struct be_adapter
*adapter
,
1092 struct be_rx_obj
*rxo
,
1093 struct be_rx_compl_info
*rxcp
)
1095 struct be_queue_info
*rxq
= &rxo
->q
;
1096 struct be_rx_page_info
*page_info
;
1097 u16 i
, num_rcvd
= rxcp
->num_rcvd
;
1099 for (i
= 0; i
< num_rcvd
; i
++) {
1100 page_info
= get_rx_page_info(adapter
, rxo
, rxcp
->rxq_idx
);
1101 put_page(page_info
->page
);
1102 memset(page_info
, 0, sizeof(*page_info
));
1103 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1108 * skb_fill_rx_data forms a complete skb for an ether frame
1109 * indicated by rxcp.
1111 static void skb_fill_rx_data(struct be_adapter
*adapter
, struct be_rx_obj
*rxo
,
1112 struct sk_buff
*skb
, struct be_rx_compl_info
*rxcp
)
1114 struct be_queue_info
*rxq
= &rxo
->q
;
1115 struct be_rx_page_info
*page_info
;
1117 u16 hdr_len
, curr_frag_len
, remaining
;
1120 page_info
= get_rx_page_info(adapter
, rxo
, rxcp
->rxq_idx
);
1121 start
= page_address(page_info
->page
) + page_info
->page_offset
;
1124 /* Copy data in the first descriptor of this completion */
1125 curr_frag_len
= min(rxcp
->pkt_size
, rx_frag_size
);
1127 /* Copy the header portion into skb_data */
1128 hdr_len
= min(BE_HDR_LEN
, curr_frag_len
);
1129 memcpy(skb
->data
, start
, hdr_len
);
1130 skb
->len
= curr_frag_len
;
1131 if (curr_frag_len
<= BE_HDR_LEN
) { /* tiny packet */
1132 /* Complete packet has now been moved to data */
1133 put_page(page_info
->page
);
1135 skb
->tail
+= curr_frag_len
;
1137 skb_shinfo(skb
)->nr_frags
= 1;
1138 skb_frag_set_page(skb
, 0, page_info
->page
);
1139 skb_shinfo(skb
)->frags
[0].page_offset
=
1140 page_info
->page_offset
+ hdr_len
;
1141 skb_frag_size_set(&skb_shinfo(skb
)->frags
[0], curr_frag_len
- hdr_len
);
1142 skb
->data_len
= curr_frag_len
- hdr_len
;
1143 skb
->truesize
+= rx_frag_size
;
1144 skb
->tail
+= hdr_len
;
1146 page_info
->page
= NULL
;
1148 if (rxcp
->pkt_size
<= rx_frag_size
) {
1149 BUG_ON(rxcp
->num_rcvd
!= 1);
1153 /* More frags present for this completion */
1154 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1155 remaining
= rxcp
->pkt_size
- curr_frag_len
;
1156 for (i
= 1, j
= 0; i
< rxcp
->num_rcvd
; i
++) {
1157 page_info
= get_rx_page_info(adapter
, rxo
, rxcp
->rxq_idx
);
1158 curr_frag_len
= min(remaining
, rx_frag_size
);
1160 /* Coalesce all frags from the same physical page in one slot */
1161 if (page_info
->page_offset
== 0) {
1164 skb_frag_set_page(skb
, j
, page_info
->page
);
1165 skb_shinfo(skb
)->frags
[j
].page_offset
=
1166 page_info
->page_offset
;
1167 skb_frag_size_set(&skb_shinfo(skb
)->frags
[j
], 0);
1168 skb_shinfo(skb
)->nr_frags
++;
1170 put_page(page_info
->page
);
1173 skb_frag_size_add(&skb_shinfo(skb
)->frags
[j
], curr_frag_len
);
1174 skb
->len
+= curr_frag_len
;
1175 skb
->data_len
+= curr_frag_len
;
1176 skb
->truesize
+= rx_frag_size
;
1177 remaining
-= curr_frag_len
;
1178 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1179 page_info
->page
= NULL
;
1181 BUG_ON(j
> MAX_SKB_FRAGS
);
1184 /* Process the RX completion indicated by rxcp when GRO is disabled */
1185 static void be_rx_compl_process(struct be_adapter
*adapter
,
1186 struct be_rx_obj
*rxo
,
1187 struct be_rx_compl_info
*rxcp
)
1189 struct net_device
*netdev
= adapter
->netdev
;
1190 struct sk_buff
*skb
;
1192 skb
= netdev_alloc_skb_ip_align(netdev
, BE_HDR_LEN
);
1193 if (unlikely(!skb
)) {
1194 rx_stats(rxo
)->rx_drops_no_skbs
++;
1195 be_rx_compl_discard(adapter
, rxo
, rxcp
);
1199 skb_fill_rx_data(adapter
, rxo
, skb
, rxcp
);
1201 if (likely((netdev
->features
& NETIF_F_RXCSUM
) && csum_passed(rxcp
)))
1202 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1204 skb_checksum_none_assert(skb
);
1206 skb
->protocol
= eth_type_trans(skb
, netdev
);
1207 if (adapter
->netdev
->features
& NETIF_F_RXHASH
)
1208 skb
->rxhash
= rxcp
->rss_hash
;
1212 __vlan_hwaccel_put_tag(skb
, rxcp
->vlan_tag
);
1214 netif_receive_skb(skb
);
1217 /* Process the RX completion indicated by rxcp when GRO is enabled */
1218 static void be_rx_compl_process_gro(struct be_adapter
*adapter
,
1219 struct be_rx_obj
*rxo
,
1220 struct be_rx_compl_info
*rxcp
)
1222 struct be_rx_page_info
*page_info
;
1223 struct sk_buff
*skb
= NULL
;
1224 struct be_queue_info
*rxq
= &rxo
->q
;
1225 struct be_eq_obj
*eq_obj
= &rxo
->rx_eq
;
1226 u16 remaining
, curr_frag_len
;
1229 skb
= napi_get_frags(&eq_obj
->napi
);
1231 be_rx_compl_discard(adapter
, rxo
, rxcp
);
1235 remaining
= rxcp
->pkt_size
;
1236 for (i
= 0, j
= -1; i
< rxcp
->num_rcvd
; i
++) {
1237 page_info
= get_rx_page_info(adapter
, rxo
, rxcp
->rxq_idx
);
1239 curr_frag_len
= min(remaining
, rx_frag_size
);
1241 /* Coalesce all frags from the same physical page in one slot */
1242 if (i
== 0 || page_info
->page_offset
== 0) {
1243 /* First frag or Fresh page */
1245 skb_frag_set_page(skb
, j
, page_info
->page
);
1246 skb_shinfo(skb
)->frags
[j
].page_offset
=
1247 page_info
->page_offset
;
1248 skb_frag_size_set(&skb_shinfo(skb
)->frags
[j
], 0);
1250 put_page(page_info
->page
);
1252 skb_frag_size_add(&skb_shinfo(skb
)->frags
[j
], curr_frag_len
);
1253 skb
->truesize
+= rx_frag_size
;
1254 remaining
-= curr_frag_len
;
1255 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1256 memset(page_info
, 0, sizeof(*page_info
));
1258 BUG_ON(j
> MAX_SKB_FRAGS
);
1260 skb_shinfo(skb
)->nr_frags
= j
+ 1;
1261 skb
->len
= rxcp
->pkt_size
;
1262 skb
->data_len
= rxcp
->pkt_size
;
1263 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1264 if (adapter
->netdev
->features
& NETIF_F_RXHASH
)
1265 skb
->rxhash
= rxcp
->rss_hash
;
1268 __vlan_hwaccel_put_tag(skb
, rxcp
->vlan_tag
);
1270 napi_gro_frags(&eq_obj
->napi
);
1273 static void be_parse_rx_compl_v1(struct be_adapter
*adapter
,
1274 struct be_eth_rx_compl
*compl,
1275 struct be_rx_compl_info
*rxcp
)
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, pktsize
, compl);
1279 rxcp
->vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vtp
, compl);
1280 rxcp
->err
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, err
, compl);
1281 rxcp
->tcpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, tcpf
, compl);
1282 rxcp
->udpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, udpf
, compl);
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, ipcksm
, compl);
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, l4_cksm
, compl);
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, ip_version
, compl);
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, fragndx
, compl);
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, numfrags
, compl);
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, cast_enc
, compl);
1296 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, rsshash
, rxcp
);
1298 rxcp
->vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vtm
,
1300 rxcp
->vlan_tag
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vlan_tag
,
1303 rxcp
->port
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, port
, compl);
1306 static void be_parse_rx_compl_v0(struct be_adapter
*adapter
,
1307 struct be_eth_rx_compl
*compl,
1308 struct be_rx_compl_info
*rxcp
)
1311 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, pktsize
, compl);
1312 rxcp
->vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vtp
, compl);
1313 rxcp
->err
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, err
, compl);
1314 rxcp
->tcpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, tcpf
, compl);
1315 rxcp
->udpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, udpf
, compl);
1317 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, ipcksm
, compl);
1319 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, l4_cksm
, compl);
1321 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, ip_version
, compl);
1323 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, fragndx
, compl);
1325 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, numfrags
, compl);
1327 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, cast_enc
, compl);
1329 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, rsshash
, rxcp
);
1331 rxcp
->vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vtm
,
1333 rxcp
->vlan_tag
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vlan_tag
,
1336 rxcp
->port
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, port
, compl);
1339 static struct be_rx_compl_info
*be_rx_compl_get(struct be_rx_obj
*rxo
)
1341 struct be_eth_rx_compl
*compl = queue_tail_node(&rxo
->cq
);
1342 struct be_rx_compl_info
*rxcp
= &rxo
->rxcp
;
1343 struct be_adapter
*adapter
= rxo
->adapter
;
1345 /* For checking the valid bit it is Ok to use either definition as the
1346 * valid bit is at the same position in both v0 and v1 Rx compl */
1347 if (compl->dw
[offsetof(struct amap_eth_rx_compl_v1
, valid
) / 32] == 0)
1351 be_dws_le_to_cpu(compl, sizeof(*compl));
1353 if (adapter
->be3_native
)
1354 be_parse_rx_compl_v1(adapter
, compl, rxcp
);
1356 be_parse_rx_compl_v0(adapter
, compl, rxcp
);
1359 /* vlanf could be wrongly set in some cards.
1360 * ignore if vtm is not set */
1361 if ((adapter
->function_mode
& FLEX10_MODE
) && !rxcp
->vtm
)
1364 if (!lancer_chip(adapter
))
1365 rxcp
->vlan_tag
= swab16(rxcp
->vlan_tag
);
1367 if (adapter
->pvid
== (rxcp
->vlan_tag
& VLAN_VID_MASK
) &&
1368 !adapter
->vlan_tag
[rxcp
->vlan_tag
])
1372 /* As the compl has been parsed, reset it; we wont touch it again */
1373 compl->dw
[offsetof(struct amap_eth_rx_compl_v1
, valid
) / 32] = 0;
1375 queue_tail_inc(&rxo
->cq
);
1379 static inline struct page
*be_alloc_pages(u32 size
, gfp_t gfp
)
1381 u32 order
= get_order(size
);
1385 return alloc_pages(gfp
, order
);
1389 * Allocate a page, split it to fragments of size rx_frag_size and post as
1390 * receive buffers to BE
1392 static void be_post_rx_frags(struct be_rx_obj
*rxo
, gfp_t gfp
)
1394 struct be_adapter
*adapter
= rxo
->adapter
;
1395 struct be_rx_page_info
*page_info_tbl
= rxo
->page_info_tbl
;
1396 struct be_rx_page_info
*page_info
= NULL
, *prev_page_info
= NULL
;
1397 struct be_queue_info
*rxq
= &rxo
->q
;
1398 struct page
*pagep
= NULL
;
1399 struct be_eth_rx_d
*rxd
;
1400 u64 page_dmaaddr
= 0, frag_dmaaddr
;
1401 u32 posted
, page_offset
= 0;
1403 page_info
= &rxo
->page_info_tbl
[rxq
->head
];
1404 for (posted
= 0; posted
< MAX_RX_POST
&& !page_info
->page
; posted
++) {
1406 pagep
= be_alloc_pages(adapter
->big_page_size
, gfp
);
1407 if (unlikely(!pagep
)) {
1408 rx_stats(rxo
)->rx_post_fail
++;
1411 page_dmaaddr
= dma_map_page(&adapter
->pdev
->dev
, pagep
,
1412 0, adapter
->big_page_size
,
1414 page_info
->page_offset
= 0;
1417 page_info
->page_offset
= page_offset
+ rx_frag_size
;
1419 page_offset
= page_info
->page_offset
;
1420 page_info
->page
= pagep
;
1421 dma_unmap_addr_set(page_info
, bus
, page_dmaaddr
);
1422 frag_dmaaddr
= page_dmaaddr
+ page_info
->page_offset
;
1424 rxd
= queue_head_node(rxq
);
1425 rxd
->fragpa_lo
= cpu_to_le32(frag_dmaaddr
& 0xFFFFFFFF);
1426 rxd
->fragpa_hi
= cpu_to_le32(upper_32_bits(frag_dmaaddr
));
1428 /* Any space left in the current big page for another frag? */
1429 if ((page_offset
+ rx_frag_size
+ rx_frag_size
) >
1430 adapter
->big_page_size
) {
1432 page_info
->last_page_user
= true;
1435 prev_page_info
= page_info
;
1436 queue_head_inc(rxq
);
1437 page_info
= &page_info_tbl
[rxq
->head
];
1440 prev_page_info
->last_page_user
= true;
1443 atomic_add(posted
, &rxq
->used
);
1444 be_rxq_notify(adapter
, rxq
->id
, posted
);
1445 } else if (atomic_read(&rxq
->used
) == 0) {
1446 /* Let be_worker replenish when memory is available */
1447 rxo
->rx_post_starved
= true;
1451 static struct be_eth_tx_compl
*be_tx_compl_get(struct be_queue_info
*tx_cq
)
1453 struct be_eth_tx_compl
*txcp
= queue_tail_node(tx_cq
);
1455 if (txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] == 0)
1459 be_dws_le_to_cpu(txcp
, sizeof(*txcp
));
1461 txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] = 0;
1463 queue_tail_inc(tx_cq
);
1467 static u16
be_tx_compl_process(struct be_adapter
*adapter
,
1468 struct be_tx_obj
*txo
, u16 last_index
)
1470 struct be_queue_info
*txq
= &txo
->q
;
1471 struct be_eth_wrb
*wrb
;
1472 struct sk_buff
**sent_skbs
= txo
->sent_skb_list
;
1473 struct sk_buff
*sent_skb
;
1474 u16 cur_index
, num_wrbs
= 1; /* account for hdr wrb */
1475 bool unmap_skb_hdr
= true;
1477 sent_skb
= sent_skbs
[txq
->tail
];
1479 sent_skbs
[txq
->tail
] = NULL
;
1481 /* skip header wrb */
1482 queue_tail_inc(txq
);
1485 cur_index
= txq
->tail
;
1486 wrb
= queue_tail_node(txq
);
1487 unmap_tx_frag(&adapter
->pdev
->dev
, wrb
,
1488 (unmap_skb_hdr
&& skb_headlen(sent_skb
)));
1489 unmap_skb_hdr
= false;
1492 queue_tail_inc(txq
);
1493 } while (cur_index
!= last_index
);
1495 kfree_skb(sent_skb
);
1499 static inline struct be_eq_entry
*event_get(struct be_eq_obj
*eq_obj
)
1501 struct be_eq_entry
*eqe
= queue_tail_node(&eq_obj
->q
);
1507 eqe
->evt
= le32_to_cpu(eqe
->evt
);
1508 queue_tail_inc(&eq_obj
->q
);
1512 static int event_handle(struct be_adapter
*adapter
,
1513 struct be_eq_obj
*eq_obj
,
1516 struct be_eq_entry
*eqe
;
1519 while ((eqe
= event_get(eq_obj
)) != NULL
) {
1524 /* Deal with any spurious interrupts that come
1530 be_eq_notify(adapter
, eq_obj
->q
.id
, rearm
, true, num
);
1532 napi_schedule(&eq_obj
->napi
);
1537 /* Just read and notify events without processing them.
1538 * Used at the time of destroying event queues */
1539 static void be_eq_clean(struct be_adapter
*adapter
,
1540 struct be_eq_obj
*eq_obj
)
1542 struct be_eq_entry
*eqe
;
1545 while ((eqe
= event_get(eq_obj
)) != NULL
) {
1551 be_eq_notify(adapter
, eq_obj
->q
.id
, false, true, num
);
1554 static void be_rx_q_clean(struct be_adapter
*adapter
, struct be_rx_obj
*rxo
)
1556 struct be_rx_page_info
*page_info
;
1557 struct be_queue_info
*rxq
= &rxo
->q
;
1558 struct be_queue_info
*rx_cq
= &rxo
->cq
;
1559 struct be_rx_compl_info
*rxcp
;
1562 /* First cleanup pending rx completions */
1563 while ((rxcp
= be_rx_compl_get(rxo
)) != NULL
) {
1564 be_rx_compl_discard(adapter
, rxo
, rxcp
);
1565 be_cq_notify(adapter
, rx_cq
->id
, false, 1);
1568 /* Then free posted rx buffer that were not used */
1569 tail
= (rxq
->head
+ rxq
->len
- atomic_read(&rxq
->used
)) % rxq
->len
;
1570 for (; atomic_read(&rxq
->used
) > 0; index_inc(&tail
, rxq
->len
)) {
1571 page_info
= get_rx_page_info(adapter
, rxo
, tail
);
1572 put_page(page_info
->page
);
1573 memset(page_info
, 0, sizeof(*page_info
));
1575 BUG_ON(atomic_read(&rxq
->used
));
1576 rxq
->tail
= rxq
->head
= 0;
1579 static void be_tx_compl_clean(struct be_adapter
*adapter
,
1580 struct be_tx_obj
*txo
)
1582 struct be_queue_info
*tx_cq
= &txo
->cq
;
1583 struct be_queue_info
*txq
= &txo
->q
;
1584 struct be_eth_tx_compl
*txcp
;
1585 u16 end_idx
, cmpl
= 0, timeo
= 0, num_wrbs
= 0;
1586 struct sk_buff
**sent_skbs
= txo
->sent_skb_list
;
1587 struct sk_buff
*sent_skb
;
1590 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1592 while ((txcp
= be_tx_compl_get(tx_cq
))) {
1593 end_idx
= AMAP_GET_BITS(struct amap_eth_tx_compl
,
1595 num_wrbs
+= be_tx_compl_process(adapter
, txo
, end_idx
);
1599 be_cq_notify(adapter
, tx_cq
->id
, false, cmpl
);
1600 atomic_sub(num_wrbs
, &txq
->used
);
1605 if (atomic_read(&txq
->used
) == 0 || ++timeo
> 200)
1611 if (atomic_read(&txq
->used
))
1612 dev_err(&adapter
->pdev
->dev
, "%d pending tx-completions\n",
1613 atomic_read(&txq
->used
));
1615 /* free posted tx for which compls will never arrive */
1616 while (atomic_read(&txq
->used
)) {
1617 sent_skb
= sent_skbs
[txq
->tail
];
1618 end_idx
= txq
->tail
;
1620 wrb_cnt_for_skb(adapter
, sent_skb
, &dummy_wrb
) - 1,
1622 num_wrbs
= be_tx_compl_process(adapter
, txo
, end_idx
);
1623 atomic_sub(num_wrbs
, &txq
->used
);
1627 static void be_mcc_queues_destroy(struct be_adapter
*adapter
)
1629 struct be_queue_info
*q
;
1631 q
= &adapter
->mcc_obj
.q
;
1633 be_cmd_q_destroy(adapter
, q
, QTYPE_MCCQ
);
1634 be_queue_free(adapter
, q
);
1636 q
= &adapter
->mcc_obj
.cq
;
1638 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1639 be_queue_free(adapter
, q
);
1642 /* Must be called only after TX qs are created as MCC shares TX EQ */
1643 static int be_mcc_queues_create(struct be_adapter
*adapter
)
1645 struct be_queue_info
*q
, *cq
;
1647 /* Alloc MCC compl queue */
1648 cq
= &adapter
->mcc_obj
.cq
;
1649 if (be_queue_alloc(adapter
, cq
, MCC_CQ_LEN
,
1650 sizeof(struct be_mcc_compl
)))
1653 /* Ask BE to create MCC compl queue; share TX's eq */
1654 if (be_cmd_cq_create(adapter
, cq
, &adapter
->tx_eq
.q
, false, true, 0))
1657 /* Alloc MCC queue */
1658 q
= &adapter
->mcc_obj
.q
;
1659 if (be_queue_alloc(adapter
, q
, MCC_Q_LEN
, sizeof(struct be_mcc_wrb
)))
1660 goto mcc_cq_destroy
;
1662 /* Ask BE to create MCC queue */
1663 if (be_cmd_mccq_create(adapter
, q
, cq
))
1669 be_queue_free(adapter
, q
);
1671 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1673 be_queue_free(adapter
, cq
);
1678 static void be_tx_queues_destroy(struct be_adapter
*adapter
)
1680 struct be_queue_info
*q
;
1681 struct be_tx_obj
*txo
;
1684 for_all_tx_queues(adapter
, txo
, i
) {
1687 be_cmd_q_destroy(adapter
, q
, QTYPE_TXQ
);
1688 be_queue_free(adapter
, q
);
1692 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1693 be_queue_free(adapter
, q
);
1696 /* Clear any residual events */
1697 be_eq_clean(adapter
, &adapter
->tx_eq
);
1699 q
= &adapter
->tx_eq
.q
;
1701 be_cmd_q_destroy(adapter
, q
, QTYPE_EQ
);
1702 be_queue_free(adapter
, q
);
1705 static int be_num_txqs_want(struct be_adapter
*adapter
)
1707 if (sriov_enabled(adapter
) || be_is_mc(adapter
) ||
1708 lancer_chip(adapter
) || !be_physfn(adapter
) ||
1709 adapter
->generation
== BE_GEN2
)
1715 /* One TX event queue is shared by all TX compl qs */
1716 static int be_tx_queues_create(struct be_adapter
*adapter
)
1718 struct be_queue_info
*eq
, *q
, *cq
;
1719 struct be_tx_obj
*txo
;
1722 adapter
->num_tx_qs
= be_num_txqs_want(adapter
);
1723 if (adapter
->num_tx_qs
!= MAX_TX_QS
) {
1725 netif_set_real_num_tx_queues(adapter
->netdev
,
1726 adapter
->num_tx_qs
);
1730 adapter
->tx_eq
.max_eqd
= 0;
1731 adapter
->tx_eq
.min_eqd
= 0;
1732 adapter
->tx_eq
.cur_eqd
= 96;
1733 adapter
->tx_eq
.enable_aic
= false;
1735 eq
= &adapter
->tx_eq
.q
;
1736 if (be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
,
1737 sizeof(struct be_eq_entry
)))
1740 if (be_cmd_eq_create(adapter
, eq
, adapter
->tx_eq
.cur_eqd
))
1742 adapter
->tx_eq
.eq_idx
= adapter
->eq_next_idx
++;
1744 for_all_tx_queues(adapter
, txo
, i
) {
1746 if (be_queue_alloc(adapter
, cq
, TX_CQ_LEN
,
1747 sizeof(struct be_eth_tx_compl
)))
1750 if (be_cmd_cq_create(adapter
, cq
, eq
, false, false, 3))
1754 if (be_queue_alloc(adapter
, q
, TX_Q_LEN
,
1755 sizeof(struct be_eth_wrb
)))
1761 be_tx_queues_destroy(adapter
);
1765 static void be_rx_queues_destroy(struct be_adapter
*adapter
)
1767 struct be_queue_info
*q
;
1768 struct be_rx_obj
*rxo
;
1771 for_all_rx_queues(adapter
, rxo
, i
) {
1772 be_queue_free(adapter
, &rxo
->q
);
1776 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1777 be_queue_free(adapter
, q
);
1781 be_cmd_q_destroy(adapter
, q
, QTYPE_EQ
);
1782 be_queue_free(adapter
, q
);
1786 static u32
be_num_rxqs_want(struct be_adapter
*adapter
)
1788 if ((adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
) &&
1789 !sriov_enabled(adapter
) && be_physfn(adapter
)) {
1790 return 1 + MAX_RSS_QS
; /* one default non-RSS queue */
1792 dev_warn(&adapter
->pdev
->dev
,
1793 "No support for multiple RX queues\n");
1798 static int be_rx_queues_create(struct be_adapter
*adapter
)
1800 struct be_queue_info
*eq
, *q
, *cq
;
1801 struct be_rx_obj
*rxo
;
1804 adapter
->num_rx_qs
= min(be_num_rxqs_want(adapter
),
1805 msix_enabled(adapter
) ?
1806 adapter
->num_msix_vec
- 1 : 1);
1807 if (adapter
->num_rx_qs
!= MAX_RX_QS
)
1808 dev_warn(&adapter
->pdev
->dev
,
1809 "Can create only %d RX queues", adapter
->num_rx_qs
);
1811 adapter
->big_page_size
= (1 << get_order(rx_frag_size
)) * PAGE_SIZE
;
1812 for_all_rx_queues(adapter
, rxo
, i
) {
1813 rxo
->adapter
= adapter
;
1814 rxo
->rx_eq
.max_eqd
= BE_MAX_EQD
;
1815 rxo
->rx_eq
.enable_aic
= true;
1819 rc
= be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
,
1820 sizeof(struct be_eq_entry
));
1824 rc
= be_cmd_eq_create(adapter
, eq
, rxo
->rx_eq
.cur_eqd
);
1828 rxo
->rx_eq
.eq_idx
= adapter
->eq_next_idx
++;
1832 rc
= be_queue_alloc(adapter
, cq
, RX_CQ_LEN
,
1833 sizeof(struct be_eth_rx_compl
));
1837 rc
= be_cmd_cq_create(adapter
, cq
, eq
, false, false, 3);
1841 /* Rx Q - will be created in be_open() */
1843 rc
= be_queue_alloc(adapter
, q
, RX_Q_LEN
,
1844 sizeof(struct be_eth_rx_d
));
1852 be_rx_queues_destroy(adapter
);
1856 static bool event_peek(struct be_eq_obj
*eq_obj
)
1858 struct be_eq_entry
*eqe
= queue_tail_node(&eq_obj
->q
);
1865 static irqreturn_t
be_intx(int irq
, void *dev
)
1867 struct be_adapter
*adapter
= dev
;
1868 struct be_rx_obj
*rxo
;
1869 int isr
, i
, tx
= 0 , rx
= 0;
1871 if (lancer_chip(adapter
)) {
1872 if (event_peek(&adapter
->tx_eq
))
1873 tx
= event_handle(adapter
, &adapter
->tx_eq
, false);
1874 for_all_rx_queues(adapter
, rxo
, i
) {
1875 if (event_peek(&rxo
->rx_eq
))
1876 rx
|= event_handle(adapter
, &rxo
->rx_eq
, true);
1883 isr
= ioread32(adapter
->csr
+ CEV_ISR0_OFFSET
+
1884 (adapter
->tx_eq
.q
.id
/ 8) * CEV_ISR_SIZE
);
1888 if ((1 << adapter
->tx_eq
.eq_idx
& isr
))
1889 event_handle(adapter
, &adapter
->tx_eq
, false);
1891 for_all_rx_queues(adapter
, rxo
, i
) {
1892 if ((1 << rxo
->rx_eq
.eq_idx
& isr
))
1893 event_handle(adapter
, &rxo
->rx_eq
, true);
1900 static irqreturn_t
be_msix_rx(int irq
, void *dev
)
1902 struct be_rx_obj
*rxo
= dev
;
1903 struct be_adapter
*adapter
= rxo
->adapter
;
1905 event_handle(adapter
, &rxo
->rx_eq
, true);
1910 static irqreturn_t
be_msix_tx_mcc(int irq
, void *dev
)
1912 struct be_adapter
*adapter
= dev
;
1914 event_handle(adapter
, &adapter
->tx_eq
, false);
1919 static inline bool do_gro(struct be_rx_compl_info
*rxcp
)
1921 return (rxcp
->tcpf
&& !rxcp
->err
) ? true : false;
1924 static int be_poll_rx(struct napi_struct
*napi
, int budget
)
1926 struct be_eq_obj
*rx_eq
= container_of(napi
, struct be_eq_obj
, napi
);
1927 struct be_rx_obj
*rxo
= container_of(rx_eq
, struct be_rx_obj
, rx_eq
);
1928 struct be_adapter
*adapter
= rxo
->adapter
;
1929 struct be_queue_info
*rx_cq
= &rxo
->cq
;
1930 struct be_rx_compl_info
*rxcp
;
1933 rx_stats(rxo
)->rx_polls
++;
1934 for (work_done
= 0; work_done
< budget
; work_done
++) {
1935 rxcp
= be_rx_compl_get(rxo
);
1939 /* Is it a flush compl that has no data */
1940 if (unlikely(rxcp
->num_rcvd
== 0))
1943 /* Discard compl with partial DMA Lancer B0 */
1944 if (unlikely(!rxcp
->pkt_size
)) {
1945 be_rx_compl_discard(adapter
, rxo
, rxcp
);
1949 /* On BE drop pkts that arrive due to imperfect filtering in
1950 * promiscuous mode on some skews
1952 if (unlikely(rxcp
->port
!= adapter
->port_num
&&
1953 !lancer_chip(adapter
))) {
1954 be_rx_compl_discard(adapter
, rxo
, rxcp
);
1959 be_rx_compl_process_gro(adapter
, rxo
, rxcp
);
1961 be_rx_compl_process(adapter
, rxo
, rxcp
);
1963 be_rx_stats_update(rxo
, rxcp
);
1966 be_cq_notify(adapter
, rx_cq
->id
, false, work_done
);
1968 /* Refill the queue */
1969 if (work_done
&& atomic_read(&rxo
->q
.used
) < RX_FRAGS_REFILL_WM
)
1970 be_post_rx_frags(rxo
, GFP_ATOMIC
);
1973 if (work_done
< budget
) {
1974 napi_complete(napi
);
1976 be_cq_notify(adapter
, rx_cq
->id
, true, 0);
1981 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1982 * For TX/MCC we don't honour budget; consume everything
1984 static int be_poll_tx_mcc(struct napi_struct
*napi
, int budget
)
1986 struct be_eq_obj
*tx_eq
= container_of(napi
, struct be_eq_obj
, napi
);
1987 struct be_adapter
*adapter
=
1988 container_of(tx_eq
, struct be_adapter
, tx_eq
);
1989 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
1990 struct be_tx_obj
*txo
;
1991 struct be_eth_tx_compl
*txcp
;
1992 int tx_compl
, mcc_compl
, status
= 0;
1996 for_all_tx_queues(adapter
, txo
, i
) {
1999 while ((txcp
= be_tx_compl_get(&txo
->cq
))) {
2000 num_wrbs
+= be_tx_compl_process(adapter
, txo
,
2001 AMAP_GET_BITS(struct amap_eth_tx_compl
,
2006 be_cq_notify(adapter
, txo
->cq
.id
, true, tx_compl
);
2008 atomic_sub(num_wrbs
, &txo
->q
.used
);
2010 /* As Tx wrbs have been freed up, wake up netdev queue
2011 * if it was stopped due to lack of tx wrbs. */
2012 if (__netif_subqueue_stopped(adapter
->netdev
, i
) &&
2013 atomic_read(&txo
->q
.used
) < txo
->q
.len
/ 2) {
2014 netif_wake_subqueue(adapter
->netdev
, i
);
2017 u64_stats_update_begin(&tx_stats(txo
)->sync_compl
);
2018 tx_stats(txo
)->tx_compl
+= tx_compl
;
2019 u64_stats_update_end(&tx_stats(txo
)->sync_compl
);
2023 mcc_compl
= be_process_mcc(adapter
, &status
);
2026 be_cq_notify(adapter
, mcc_obj
->cq
.id
, true, mcc_compl
);
2029 napi_complete(napi
);
2031 /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
2032 if (lancer_chip(adapter
) && !msix_enabled(adapter
)) {
2033 for_all_tx_queues(adapter
, txo
, i
)
2034 be_cq_notify(adapter
, txo
->cq
.id
, true, 0);
2036 be_cq_notify(adapter
, mcc_obj
->cq
.id
, true, 0);
2039 be_eq_notify(adapter
, tx_eq
->q
.id
, true, false, 0);
2040 adapter
->drv_stats
.tx_events
++;
2044 void be_detect_dump_ue(struct be_adapter
*adapter
)
2046 u32 ue_lo
= 0, ue_hi
= 0, ue_lo_mask
= 0, ue_hi_mask
= 0;
2047 u32 sliport_status
= 0, sliport_err1
= 0, sliport_err2
= 0;
2050 if (adapter
->eeh_err
|| adapter
->ue_detected
)
2053 if (lancer_chip(adapter
)) {
2054 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
2055 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
2056 sliport_err1
= ioread32(adapter
->db
+
2057 SLIPORT_ERROR1_OFFSET
);
2058 sliport_err2
= ioread32(adapter
->db
+
2059 SLIPORT_ERROR2_OFFSET
);
2062 pci_read_config_dword(adapter
->pdev
,
2063 PCICFG_UE_STATUS_LOW
, &ue_lo
);
2064 pci_read_config_dword(adapter
->pdev
,
2065 PCICFG_UE_STATUS_HIGH
, &ue_hi
);
2066 pci_read_config_dword(adapter
->pdev
,
2067 PCICFG_UE_STATUS_LOW_MASK
, &ue_lo_mask
);
2068 pci_read_config_dword(adapter
->pdev
,
2069 PCICFG_UE_STATUS_HI_MASK
, &ue_hi_mask
);
2071 ue_lo
= (ue_lo
& (~ue_lo_mask
));
2072 ue_hi
= (ue_hi
& (~ue_hi_mask
));
2075 if (ue_lo
|| ue_hi
||
2076 sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
2077 adapter
->ue_detected
= true;
2078 adapter
->eeh_err
= true;
2079 dev_err(&adapter
->pdev
->dev
,
2080 "Unrecoverable error in the card\n");
2084 for (i
= 0; ue_lo
; ue_lo
>>= 1, i
++) {
2086 dev_err(&adapter
->pdev
->dev
,
2087 "UE: %s bit set\n", ue_status_low_desc
[i
]);
2091 for (i
= 0; ue_hi
; ue_hi
>>= 1, i
++) {
2093 dev_err(&adapter
->pdev
->dev
,
2094 "UE: %s bit set\n", ue_status_hi_desc
[i
]);
2098 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
2099 dev_err(&adapter
->pdev
->dev
,
2100 "sliport status 0x%x\n", sliport_status
);
2101 dev_err(&adapter
->pdev
->dev
,
2102 "sliport error1 0x%x\n", sliport_err1
);
2103 dev_err(&adapter
->pdev
->dev
,
2104 "sliport error2 0x%x\n", sliport_err2
);
2108 static void be_msix_disable(struct be_adapter
*adapter
)
2110 if (msix_enabled(adapter
)) {
2111 pci_disable_msix(adapter
->pdev
);
2112 adapter
->num_msix_vec
= 0;
2116 static void be_msix_enable(struct be_adapter
*adapter
)
2118 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2119 int i
, status
, num_vec
;
2121 num_vec
= be_num_rxqs_want(adapter
) + 1;
2123 for (i
= 0; i
< num_vec
; i
++)
2124 adapter
->msix_entries
[i
].entry
= i
;
2126 status
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
, num_vec
);
2129 } else if (status
>= BE_MIN_MSIX_VECTORS
) {
2131 if (pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
2137 adapter
->num_msix_vec
= num_vec
;
2141 static int be_sriov_enable(struct be_adapter
*adapter
)
2143 be_check_sriov_fn_type(adapter
);
2145 #ifdef CONFIG_PCI_IOV
2146 if (be_physfn(adapter
) && num_vfs
) {
2150 pos
= pci_find_ext_capability(adapter
->pdev
,
2151 PCI_EXT_CAP_ID_SRIOV
);
2152 pci_read_config_word(adapter
->pdev
,
2153 pos
+ PCI_SRIOV_TOTAL_VF
, &dev_vfs
);
2155 adapter
->num_vfs
= min_t(u16
, num_vfs
, dev_vfs
);
2156 if (adapter
->num_vfs
!= num_vfs
)
2157 dev_info(&adapter
->pdev
->dev
,
2158 "Device supports %d VFs and not %d\n",
2159 adapter
->num_vfs
, num_vfs
);
2161 status
= pci_enable_sriov(adapter
->pdev
, adapter
->num_vfs
);
2163 adapter
->num_vfs
= 0;
2165 if (adapter
->num_vfs
) {
2166 adapter
->vf_cfg
= kcalloc(num_vfs
,
2167 sizeof(struct be_vf_cfg
),
2169 if (!adapter
->vf_cfg
)
2177 static void be_sriov_disable(struct be_adapter
*adapter
)
2179 #ifdef CONFIG_PCI_IOV
2180 if (sriov_enabled(adapter
)) {
2181 pci_disable_sriov(adapter
->pdev
);
2182 kfree(adapter
->vf_cfg
);
2183 adapter
->num_vfs
= 0;
2188 static inline int be_msix_vec_get(struct be_adapter
*adapter
,
2189 struct be_eq_obj
*eq_obj
)
2191 return adapter
->msix_entries
[eq_obj
->eq_idx
].vector
;
2194 static int be_request_irq(struct be_adapter
*adapter
,
2195 struct be_eq_obj
*eq_obj
,
2196 void *handler
, char *desc
, void *context
)
2198 struct net_device
*netdev
= adapter
->netdev
;
2201 sprintf(eq_obj
->desc
, "%s-%s", netdev
->name
, desc
);
2202 vec
= be_msix_vec_get(adapter
, eq_obj
);
2203 return request_irq(vec
, handler
, 0, eq_obj
->desc
, context
);
2206 static void be_free_irq(struct be_adapter
*adapter
, struct be_eq_obj
*eq_obj
,
2209 int vec
= be_msix_vec_get(adapter
, eq_obj
);
2210 free_irq(vec
, context
);
2213 static int be_msix_register(struct be_adapter
*adapter
)
2215 struct be_rx_obj
*rxo
;
2219 status
= be_request_irq(adapter
, &adapter
->tx_eq
, be_msix_tx_mcc
, "tx",
2224 for_all_rx_queues(adapter
, rxo
, i
) {
2225 sprintf(qname
, "rxq%d", i
);
2226 status
= be_request_irq(adapter
, &rxo
->rx_eq
, be_msix_rx
,
2235 be_free_irq(adapter
, &adapter
->tx_eq
, adapter
);
2237 for (i
--, rxo
= &adapter
->rx_obj
[i
]; i
>= 0; i
--, rxo
--)
2238 be_free_irq(adapter
, &rxo
->rx_eq
, rxo
);
2241 dev_warn(&adapter
->pdev
->dev
,
2242 "MSIX Request IRQ failed - err %d\n", status
);
2243 be_msix_disable(adapter
);
2247 static int be_irq_register(struct be_adapter
*adapter
)
2249 struct net_device
*netdev
= adapter
->netdev
;
2252 if (msix_enabled(adapter
)) {
2253 status
= be_msix_register(adapter
);
2256 /* INTx is not supported for VF */
2257 if (!be_physfn(adapter
))
2262 netdev
->irq
= adapter
->pdev
->irq
;
2263 status
= request_irq(netdev
->irq
, be_intx
, IRQF_SHARED
, netdev
->name
,
2266 dev_err(&adapter
->pdev
->dev
,
2267 "INTx request IRQ failed - err %d\n", status
);
2271 adapter
->isr_registered
= true;
2275 static void be_irq_unregister(struct be_adapter
*adapter
)
2277 struct net_device
*netdev
= adapter
->netdev
;
2278 struct be_rx_obj
*rxo
;
2281 if (!adapter
->isr_registered
)
2285 if (!msix_enabled(adapter
)) {
2286 free_irq(netdev
->irq
, adapter
);
2291 be_free_irq(adapter
, &adapter
->tx_eq
, adapter
);
2293 for_all_rx_queues(adapter
, rxo
, i
)
2294 be_free_irq(adapter
, &rxo
->rx_eq
, rxo
);
2297 adapter
->isr_registered
= false;
2300 static void be_rx_queues_clear(struct be_adapter
*adapter
)
2302 struct be_queue_info
*q
;
2303 struct be_rx_obj
*rxo
;
2306 for_all_rx_queues(adapter
, rxo
, i
) {
2309 be_cmd_rxq_destroy(adapter
, q
);
2310 /* After the rxq is invalidated, wait for a grace time
2311 * of 1ms for all dma to end and the flush compl to
2315 be_rx_q_clean(adapter
, rxo
);
2318 /* Clear any residual events */
2321 be_eq_clean(adapter
, &rxo
->rx_eq
);
2325 static int be_close(struct net_device
*netdev
)
2327 struct be_adapter
*adapter
= netdev_priv(netdev
);
2328 struct be_rx_obj
*rxo
;
2329 struct be_tx_obj
*txo
;
2330 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
2333 be_async_mcc_disable(adapter
);
2335 if (!lancer_chip(adapter
))
2336 be_intr_set(adapter
, false);
2338 for_all_rx_queues(adapter
, rxo
, i
)
2339 napi_disable(&rxo
->rx_eq
.napi
);
2341 napi_disable(&tx_eq
->napi
);
2343 if (lancer_chip(adapter
)) {
2344 be_cq_notify(adapter
, adapter
->mcc_obj
.cq
.id
, false, 0);
2345 for_all_rx_queues(adapter
, rxo
, i
)
2346 be_cq_notify(adapter
, rxo
->cq
.id
, false, 0);
2347 for_all_tx_queues(adapter
, txo
, i
)
2348 be_cq_notify(adapter
, txo
->cq
.id
, false, 0);
2351 if (msix_enabled(adapter
)) {
2352 vec
= be_msix_vec_get(adapter
, tx_eq
);
2353 synchronize_irq(vec
);
2355 for_all_rx_queues(adapter
, rxo
, i
) {
2356 vec
= be_msix_vec_get(adapter
, &rxo
->rx_eq
);
2357 synchronize_irq(vec
);
2360 synchronize_irq(netdev
->irq
);
2362 be_irq_unregister(adapter
);
2364 /* Wait for all pending tx completions to arrive so that
2365 * all tx skbs are freed.
2367 for_all_tx_queues(adapter
, txo
, i
)
2368 be_tx_compl_clean(adapter
, txo
);
2370 be_rx_queues_clear(adapter
);
2374 static int be_rx_queues_setup(struct be_adapter
*adapter
)
2376 struct be_rx_obj
*rxo
;
2380 for_all_rx_queues(adapter
, rxo
, i
) {
2381 rc
= be_cmd_rxq_create(adapter
, &rxo
->q
, rxo
->cq
.id
,
2382 rx_frag_size
, BE_MAX_JUMBO_FRAME_SIZE
,
2384 (i
> 0) ? 1 : 0/* rss enable */, &rxo
->rss_id
);
2389 if (be_multi_rxq(adapter
)) {
2390 for (j
= 0; j
< 128; j
+= adapter
->num_rx_qs
- 1) {
2391 for_all_rss_queues(adapter
, rxo
, i
) {
2394 rsstable
[j
+ i
] = rxo
->rss_id
;
2397 rc
= be_cmd_rss_config(adapter
, rsstable
, 128);
2403 /* First time posting */
2404 for_all_rx_queues(adapter
, rxo
, i
) {
2405 be_post_rx_frags(rxo
, GFP_KERNEL
);
2406 napi_enable(&rxo
->rx_eq
.napi
);
2411 static int be_open(struct net_device
*netdev
)
2413 struct be_adapter
*adapter
= netdev_priv(netdev
);
2414 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
2415 struct be_rx_obj
*rxo
;
2419 status
= be_rx_queues_setup(adapter
);
2423 napi_enable(&tx_eq
->napi
);
2425 be_irq_register(adapter
);
2427 if (!lancer_chip(adapter
))
2428 be_intr_set(adapter
, true);
2430 /* The evt queues are created in unarmed state; arm them */
2431 for_all_rx_queues(adapter
, rxo
, i
) {
2432 be_eq_notify(adapter
, rxo
->rx_eq
.q
.id
, true, false, 0);
2433 be_cq_notify(adapter
, rxo
->cq
.id
, true, 0);
2435 be_eq_notify(adapter
, tx_eq
->q
.id
, true, false, 0);
2437 /* Now that interrupts are on we can process async mcc */
2438 be_async_mcc_enable(adapter
);
2440 status
= be_cmd_link_status_query(adapter
, NULL
, NULL
,
2443 be_link_status_update(adapter
, link_status
);
2447 be_close(adapter
->netdev
);
2451 static int be_setup_wol(struct be_adapter
*adapter
, bool enable
)
2453 struct be_dma_mem cmd
;
2457 memset(mac
, 0, ETH_ALEN
);
2459 cmd
.size
= sizeof(struct be_cmd_req_acpi_wol_magic_config
);
2460 cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, cmd
.size
, &cmd
.dma
,
2464 memset(cmd
.va
, 0, cmd
.size
);
2467 status
= pci_write_config_dword(adapter
->pdev
,
2468 PCICFG_PM_CONTROL_OFFSET
, PCICFG_PM_CONTROL_MASK
);
2470 dev_err(&adapter
->pdev
->dev
,
2471 "Could not enable Wake-on-lan\n");
2472 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
,
2476 status
= be_cmd_enable_magic_wol(adapter
,
2477 adapter
->netdev
->dev_addr
, &cmd
);
2478 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 1);
2479 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 1);
2481 status
= be_cmd_enable_magic_wol(adapter
, mac
, &cmd
);
2482 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 0);
2483 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 0);
2486 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
, cmd
.dma
);
2491 * Generate a seed MAC address from the PF MAC Address using jhash.
2492 * MAC Address for VFs are assigned incrementally starting from the seed.
2493 * These addresses are programmed in the ASIC by the PF and the VF driver
2494 * queries for the MAC address during its probe.
2496 static inline int be_vf_eth_addr_config(struct be_adapter
*adapter
)
2501 struct be_vf_cfg
*vf_cfg
;
2503 be_vf_eth_addr_generate(adapter
, mac
);
2505 for_all_vfs(adapter
, vf_cfg
, vf
) {
2506 if (lancer_chip(adapter
)) {
2507 status
= be_cmd_set_mac_list(adapter
, mac
, 1, vf
+ 1);
2509 status
= be_cmd_pmac_add(adapter
, mac
,
2511 &vf_cfg
->pmac_id
, vf
+ 1);
2515 dev_err(&adapter
->pdev
->dev
,
2516 "Mac address assignment failed for VF %d\n", vf
);
2518 memcpy(vf_cfg
->mac_addr
, mac
, ETH_ALEN
);
2525 static void be_vf_clear(struct be_adapter
*adapter
)
2527 struct be_vf_cfg
*vf_cfg
;
2530 for_all_vfs(adapter
, vf_cfg
, vf
) {
2531 if (lancer_chip(adapter
))
2532 be_cmd_set_mac_list(adapter
, NULL
, 0, vf
+ 1);
2534 be_cmd_pmac_del(adapter
, vf_cfg
->if_handle
,
2535 vf_cfg
->pmac_id
, vf
+ 1);
2537 be_cmd_if_destroy(adapter
, vf_cfg
->if_handle
, vf
+ 1);
2541 static int be_clear(struct be_adapter
*adapter
)
2543 if (sriov_enabled(adapter
))
2544 be_vf_clear(adapter
);
2546 be_cmd_if_destroy(adapter
, adapter
->if_handle
, 0);
2548 be_mcc_queues_destroy(adapter
);
2549 be_rx_queues_destroy(adapter
);
2550 be_tx_queues_destroy(adapter
);
2552 /* tell fw we're done with firing cmds */
2553 be_cmd_fw_clean(adapter
);
2557 static void be_vf_setup_init(struct be_adapter
*adapter
)
2559 struct be_vf_cfg
*vf_cfg
;
2562 for_all_vfs(adapter
, vf_cfg
, vf
) {
2563 vf_cfg
->if_handle
= -1;
2564 vf_cfg
->pmac_id
= -1;
2568 static int be_vf_setup(struct be_adapter
*adapter
)
2570 struct be_vf_cfg
*vf_cfg
;
2571 u32 cap_flags
, en_flags
, vf
;
2575 be_vf_setup_init(adapter
);
2577 cap_flags
= en_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
|
2578 BE_IF_FLAGS_MULTICAST
;
2579 for_all_vfs(adapter
, vf_cfg
, vf
) {
2580 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
, NULL
,
2581 &vf_cfg
->if_handle
, NULL
, vf
+ 1);
2586 status
= be_vf_eth_addr_config(adapter
);
2590 for_all_vfs(adapter
, vf_cfg
, vf
) {
2591 status
= be_cmd_link_status_query(adapter
, NULL
, &lnk_speed
,
2595 vf_cfg
->tx_rate
= lnk_speed
* 10;
2602 static void be_setup_init(struct be_adapter
*adapter
)
2604 adapter
->vlan_prio_bmap
= 0xff;
2605 adapter
->link_speed
= -1;
2606 adapter
->if_handle
= -1;
2607 adapter
->be3_native
= false;
2608 adapter
->promiscuous
= false;
2609 adapter
->eq_next_idx
= 0;
2612 static int be_configure_mac_from_list(struct be_adapter
*adapter
, u8
*mac
)
2615 int status
= be_cmd_get_mac_from_list(adapter
, 0, &pmac_id
);
2618 status
= be_cmd_mac_addr_query(adapter
, mac
,
2619 MAC_ADDRESS_TYPE_NETWORK
,
2620 false, adapter
->if_handle
, pmac_id
);
2623 status
= be_cmd_pmac_add(adapter
, mac
, adapter
->if_handle
,
2624 &adapter
->pmac_id
, 0);
2629 static int be_setup(struct be_adapter
*adapter
)
2631 struct net_device
*netdev
= adapter
->netdev
;
2632 u32 cap_flags
, en_flags
;
2636 struct be_tx_obj
*txo
;
2638 be_setup_init(adapter
);
2640 be_cmd_req_native_mode(adapter
);
2642 status
= be_tx_queues_create(adapter
);
2646 status
= be_rx_queues_create(adapter
);
2650 status
= be_mcc_queues_create(adapter
);
2654 memset(mac
, 0, ETH_ALEN
);
2655 status
= be_cmd_mac_addr_query(adapter
, mac
, MAC_ADDRESS_TYPE_NETWORK
,
2656 true /*permanent */, 0, 0);
2659 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
2660 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
2662 en_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
|
2663 BE_IF_FLAGS_MULTICAST
| BE_IF_FLAGS_PASS_L3L4_ERRORS
;
2664 cap_flags
= en_flags
| BE_IF_FLAGS_MCAST_PROMISCUOUS
|
2665 BE_IF_FLAGS_VLAN_PROMISCUOUS
| BE_IF_FLAGS_PROMISCUOUS
;
2667 if (adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
) {
2668 cap_flags
|= BE_IF_FLAGS_RSS
;
2669 en_flags
|= BE_IF_FLAGS_RSS
;
2671 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
,
2672 netdev
->dev_addr
, &adapter
->if_handle
,
2673 &adapter
->pmac_id
, 0);
2677 for_all_tx_queues(adapter
, txo
, i
) {
2678 status
= be_cmd_txq_create(adapter
, &txo
->q
, &txo
->cq
);
2683 /* The VF's permanent mac queried from card is incorrect.
2684 * For BEx: Query the mac configued by the PF using if_handle
2685 * For Lancer: Get and use mac_list to obtain mac address.
2687 if (!be_physfn(adapter
)) {
2688 if (lancer_chip(adapter
))
2689 status
= be_configure_mac_from_list(adapter
, mac
);
2691 status
= be_cmd_mac_addr_query(adapter
, mac
,
2692 MAC_ADDRESS_TYPE_NETWORK
, false,
2693 adapter
->if_handle
, 0);
2695 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
2696 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
2700 be_cmd_get_fw_ver(adapter
, adapter
->fw_ver
, NULL
);
2702 status
= be_vid_config(adapter
, false, 0);
2706 be_set_rx_mode(adapter
->netdev
);
2708 status
= be_cmd_get_flow_control(adapter
, &tx_fc
, &rx_fc
);
2709 /* For Lancer: It is legal for this cmd to fail on VF */
2710 if (status
&& (be_physfn(adapter
) || !lancer_chip(adapter
)))
2713 if (rx_fc
!= adapter
->rx_fc
|| tx_fc
!= adapter
->tx_fc
) {
2714 status
= be_cmd_set_flow_control(adapter
, adapter
->tx_fc
,
2716 /* For Lancer: It is legal for this cmd to fail on VF */
2717 if (status
&& (be_physfn(adapter
) || !lancer_chip(adapter
)))
2721 pcie_set_readrq(adapter
->pdev
, 4096);
2723 if (sriov_enabled(adapter
)) {
2724 status
= be_vf_setup(adapter
);
2735 #ifdef CONFIG_NET_POLL_CONTROLLER
2736 static void be_netpoll(struct net_device
*netdev
)
2738 struct be_adapter
*adapter
= netdev_priv(netdev
);
2739 struct be_rx_obj
*rxo
;
2742 event_handle(adapter
, &adapter
->tx_eq
, false);
2743 for_all_rx_queues(adapter
, rxo
, i
)
2744 event_handle(adapter
, &rxo
->rx_eq
, true);
2748 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2749 static bool be_flash_redboot(struct be_adapter
*adapter
,
2750 const u8
*p
, u32 img_start
, int image_size
,
2757 crc_offset
= hdr_size
+ img_start
+ image_size
- 4;
2761 status
= be_cmd_get_flash_crc(adapter
, flashed_crc
,
2764 dev_err(&adapter
->pdev
->dev
,
2765 "could not get crc from flash, not flashing redboot\n");
2769 /*update redboot only if crc does not match*/
2770 if (!memcmp(flashed_crc
, p
, 4))
2776 static bool phy_flashing_required(struct be_adapter
*adapter
)
2779 struct be_phy_info phy_info
;
2781 status
= be_cmd_get_phy_info(adapter
, &phy_info
);
2784 if ((phy_info
.phy_type
== TN_8022
) &&
2785 (phy_info
.interface_type
== PHY_TYPE_BASET_10GB
)) {
2791 static int be_flash_data(struct be_adapter
*adapter
,
2792 const struct firmware
*fw
,
2793 struct be_dma_mem
*flash_cmd
, int num_of_images
)
2796 int status
= 0, i
, filehdr_size
= 0;
2797 u32 total_bytes
= 0, flash_op
;
2799 const u8
*p
= fw
->data
;
2800 struct be_cmd_write_flashrom
*req
= flash_cmd
->va
;
2801 const struct flash_comp
*pflashcomp
;
2804 static const struct flash_comp gen3_flash_types
[10] = {
2805 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3
, IMG_TYPE_ISCSI_ACTIVE
,
2806 FLASH_IMAGE_MAX_SIZE_g3
},
2807 { FLASH_REDBOOT_START_g3
, IMG_TYPE_REDBOOT
,
2808 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3
},
2809 { FLASH_iSCSI_BIOS_START_g3
, IMG_TYPE_BIOS
,
2810 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2811 { FLASH_PXE_BIOS_START_g3
, IMG_TYPE_PXE_BIOS
,
2812 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2813 { FLASH_FCoE_BIOS_START_g3
, IMG_TYPE_FCOE_BIOS
,
2814 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2815 { FLASH_iSCSI_BACKUP_IMAGE_START_g3
, IMG_TYPE_ISCSI_BACKUP
,
2816 FLASH_IMAGE_MAX_SIZE_g3
},
2817 { FLASH_FCoE_PRIMARY_IMAGE_START_g3
, IMG_TYPE_FCOE_FW_ACTIVE
,
2818 FLASH_IMAGE_MAX_SIZE_g3
},
2819 { FLASH_FCoE_BACKUP_IMAGE_START_g3
, IMG_TYPE_FCOE_FW_BACKUP
,
2820 FLASH_IMAGE_MAX_SIZE_g3
},
2821 { FLASH_NCSI_START_g3
, IMG_TYPE_NCSI_FW
,
2822 FLASH_NCSI_IMAGE_MAX_SIZE_g3
},
2823 { FLASH_PHY_FW_START_g3
, IMG_TYPE_PHY_FW
,
2824 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3
}
2826 static const struct flash_comp gen2_flash_types
[8] = {
2827 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2
, IMG_TYPE_ISCSI_ACTIVE
,
2828 FLASH_IMAGE_MAX_SIZE_g2
},
2829 { FLASH_REDBOOT_START_g2
, IMG_TYPE_REDBOOT
,
2830 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2
},
2831 { FLASH_iSCSI_BIOS_START_g2
, IMG_TYPE_BIOS
,
2832 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2833 { FLASH_PXE_BIOS_START_g2
, IMG_TYPE_PXE_BIOS
,
2834 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2835 { FLASH_FCoE_BIOS_START_g2
, IMG_TYPE_FCOE_BIOS
,
2836 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2837 { FLASH_iSCSI_BACKUP_IMAGE_START_g2
, IMG_TYPE_ISCSI_BACKUP
,
2838 FLASH_IMAGE_MAX_SIZE_g2
},
2839 { FLASH_FCoE_PRIMARY_IMAGE_START_g2
, IMG_TYPE_FCOE_FW_ACTIVE
,
2840 FLASH_IMAGE_MAX_SIZE_g2
},
2841 { FLASH_FCoE_BACKUP_IMAGE_START_g2
, IMG_TYPE_FCOE_FW_BACKUP
,
2842 FLASH_IMAGE_MAX_SIZE_g2
}
2845 if (adapter
->generation
== BE_GEN3
) {
2846 pflashcomp
= gen3_flash_types
;
2847 filehdr_size
= sizeof(struct flash_file_hdr_g3
);
2848 num_comp
= ARRAY_SIZE(gen3_flash_types
);
2850 pflashcomp
= gen2_flash_types
;
2851 filehdr_size
= sizeof(struct flash_file_hdr_g2
);
2852 num_comp
= ARRAY_SIZE(gen2_flash_types
);
2854 for (i
= 0; i
< num_comp
; i
++) {
2855 if ((pflashcomp
[i
].optype
== IMG_TYPE_NCSI_FW
) &&
2856 memcmp(adapter
->fw_ver
, "3.102.148.0", 11) < 0)
2858 if (pflashcomp
[i
].optype
== IMG_TYPE_PHY_FW
) {
2859 if (!phy_flashing_required(adapter
))
2862 if ((pflashcomp
[i
].optype
== IMG_TYPE_REDBOOT
) &&
2863 (!be_flash_redboot(adapter
, fw
->data
,
2864 pflashcomp
[i
].offset
, pflashcomp
[i
].size
, filehdr_size
+
2865 (num_of_images
* sizeof(struct image_hdr
)))))
2868 p
+= filehdr_size
+ pflashcomp
[i
].offset
2869 + (num_of_images
* sizeof(struct image_hdr
));
2870 if (p
+ pflashcomp
[i
].size
> fw
->data
+ fw
->size
)
2872 total_bytes
= pflashcomp
[i
].size
;
2873 while (total_bytes
) {
2874 if (total_bytes
> 32*1024)
2875 num_bytes
= 32*1024;
2877 num_bytes
= total_bytes
;
2878 total_bytes
-= num_bytes
;
2880 if (pflashcomp
[i
].optype
== IMG_TYPE_PHY_FW
)
2881 flash_op
= FLASHROM_OPER_PHY_FLASH
;
2883 flash_op
= FLASHROM_OPER_FLASH
;
2885 if (pflashcomp
[i
].optype
== IMG_TYPE_PHY_FW
)
2886 flash_op
= FLASHROM_OPER_PHY_SAVE
;
2888 flash_op
= FLASHROM_OPER_SAVE
;
2890 memcpy(req
->params
.data_buf
, p
, num_bytes
);
2892 status
= be_cmd_write_flashrom(adapter
, flash_cmd
,
2893 pflashcomp
[i
].optype
, flash_op
, num_bytes
);
2895 if ((status
== ILLEGAL_IOCTL_REQ
) &&
2896 (pflashcomp
[i
].optype
==
2899 dev_err(&adapter
->pdev
->dev
,
2900 "cmd to write to flash rom failed.\n");
2908 static int get_ufigen_type(struct flash_file_hdr_g2
*fhdr
)
2912 if (fhdr
->build
[0] == '3')
2914 else if (fhdr
->build
[0] == '2')
2920 static int lancer_fw_download(struct be_adapter
*adapter
,
2921 const struct firmware
*fw
)
2923 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2924 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2925 struct be_dma_mem flash_cmd
;
2926 const u8
*data_ptr
= NULL
;
2927 u8
*dest_image_ptr
= NULL
;
2928 size_t image_size
= 0;
2930 u32 data_written
= 0;
2935 if (!IS_ALIGNED(fw
->size
, sizeof(u32
))) {
2936 dev_err(&adapter
->pdev
->dev
,
2937 "FW Image not properly aligned. "
2938 "Length must be 4 byte aligned.\n");
2940 goto lancer_fw_exit
;
2943 flash_cmd
.size
= sizeof(struct lancer_cmd_req_write_object
)
2944 + LANCER_FW_DOWNLOAD_CHUNK
;
2945 flash_cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
,
2946 &flash_cmd
.dma
, GFP_KERNEL
);
2947 if (!flash_cmd
.va
) {
2949 dev_err(&adapter
->pdev
->dev
,
2950 "Memory allocation failure while flashing\n");
2951 goto lancer_fw_exit
;
2954 dest_image_ptr
= flash_cmd
.va
+
2955 sizeof(struct lancer_cmd_req_write_object
);
2956 image_size
= fw
->size
;
2957 data_ptr
= fw
->data
;
2959 while (image_size
) {
2960 chunk_size
= min_t(u32
, image_size
, LANCER_FW_DOWNLOAD_CHUNK
);
2962 /* Copy the image chunk content. */
2963 memcpy(dest_image_ptr
, data_ptr
, chunk_size
);
2965 status
= lancer_cmd_write_object(adapter
, &flash_cmd
,
2966 chunk_size
, offset
, LANCER_FW_DOWNLOAD_LOCATION
,
2967 &data_written
, &add_status
);
2972 offset
+= data_written
;
2973 data_ptr
+= data_written
;
2974 image_size
-= data_written
;
2978 /* Commit the FW written */
2979 status
= lancer_cmd_write_object(adapter
, &flash_cmd
,
2980 0, offset
, LANCER_FW_DOWNLOAD_LOCATION
,
2981 &data_written
, &add_status
);
2984 dma_free_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
, flash_cmd
.va
,
2987 dev_err(&adapter
->pdev
->dev
,
2988 "Firmware load error. "
2989 "Status code: 0x%x Additional Status: 0x%x\n",
2990 status
, add_status
);
2991 goto lancer_fw_exit
;
2994 dev_info(&adapter
->pdev
->dev
, "Firmware flashed successfully\n");
2999 static int be_fw_download(struct be_adapter
*adapter
, const struct firmware
* fw
)
3001 struct flash_file_hdr_g2
*fhdr
;
3002 struct flash_file_hdr_g3
*fhdr3
;
3003 struct image_hdr
*img_hdr_ptr
= NULL
;
3004 struct be_dma_mem flash_cmd
;
3006 int status
= 0, i
= 0, num_imgs
= 0;
3009 fhdr
= (struct flash_file_hdr_g2
*) p
;
3011 flash_cmd
.size
= sizeof(struct be_cmd_write_flashrom
) + 32*1024;
3012 flash_cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
,
3013 &flash_cmd
.dma
, GFP_KERNEL
);
3014 if (!flash_cmd
.va
) {
3016 dev_err(&adapter
->pdev
->dev
,
3017 "Memory allocation failure while flashing\n");
3021 if ((adapter
->generation
== BE_GEN3
) &&
3022 (get_ufigen_type(fhdr
) == BE_GEN3
)) {
3023 fhdr3
= (struct flash_file_hdr_g3
*) fw
->data
;
3024 num_imgs
= le32_to_cpu(fhdr3
->num_imgs
);
3025 for (i
= 0; i
< num_imgs
; i
++) {
3026 img_hdr_ptr
= (struct image_hdr
*) (fw
->data
+
3027 (sizeof(struct flash_file_hdr_g3
) +
3028 i
* sizeof(struct image_hdr
)));
3029 if (le32_to_cpu(img_hdr_ptr
->imageid
) == 1)
3030 status
= be_flash_data(adapter
, fw
, &flash_cmd
,
3033 } else if ((adapter
->generation
== BE_GEN2
) &&
3034 (get_ufigen_type(fhdr
) == BE_GEN2
)) {
3035 status
= be_flash_data(adapter
, fw
, &flash_cmd
, 0);
3037 dev_err(&adapter
->pdev
->dev
,
3038 "UFI and Interface are not compatible for flashing\n");
3042 dma_free_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
, flash_cmd
.va
,
3045 dev_err(&adapter
->pdev
->dev
, "Firmware load error\n");
3049 dev_info(&adapter
->pdev
->dev
, "Firmware flashed successfully\n");
3055 int be_load_fw(struct be_adapter
*adapter
, u8
*fw_file
)
3057 const struct firmware
*fw
;
3060 if (!netif_running(adapter
->netdev
)) {
3061 dev_err(&adapter
->pdev
->dev
,
3062 "Firmware load not allowed (interface is down)\n");
3066 status
= request_firmware(&fw
, fw_file
, &adapter
->pdev
->dev
);
3070 dev_info(&adapter
->pdev
->dev
, "Flashing firmware file %s\n", fw_file
);
3072 if (lancer_chip(adapter
))
3073 status
= lancer_fw_download(adapter
, fw
);
3075 status
= be_fw_download(adapter
, fw
);
3078 release_firmware(fw
);
3082 static const struct net_device_ops be_netdev_ops
= {
3083 .ndo_open
= be_open
,
3084 .ndo_stop
= be_close
,
3085 .ndo_start_xmit
= be_xmit
,
3086 .ndo_set_rx_mode
= be_set_rx_mode
,
3087 .ndo_set_mac_address
= be_mac_addr_set
,
3088 .ndo_change_mtu
= be_change_mtu
,
3089 .ndo_get_stats64
= be_get_stats64
,
3090 .ndo_validate_addr
= eth_validate_addr
,
3091 .ndo_vlan_rx_add_vid
= be_vlan_add_vid
,
3092 .ndo_vlan_rx_kill_vid
= be_vlan_rem_vid
,
3093 .ndo_set_vf_mac
= be_set_vf_mac
,
3094 .ndo_set_vf_vlan
= be_set_vf_vlan
,
3095 .ndo_set_vf_tx_rate
= be_set_vf_tx_rate
,
3096 .ndo_get_vf_config
= be_get_vf_config
,
3097 #ifdef CONFIG_NET_POLL_CONTROLLER
3098 .ndo_poll_controller
= be_netpoll
,
3102 static void be_netdev_init(struct net_device
*netdev
)
3104 struct be_adapter
*adapter
= netdev_priv(netdev
);
3105 struct be_rx_obj
*rxo
;
3108 netdev
->hw_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
|
3109 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
3111 if (be_multi_rxq(adapter
))
3112 netdev
->hw_features
|= NETIF_F_RXHASH
;
3114 netdev
->features
|= netdev
->hw_features
|
3115 NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
;
3117 netdev
->vlan_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
|
3118 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
3120 netdev
->flags
|= IFF_MULTICAST
;
3122 netif_set_gso_max_size(netdev
, 65535);
3124 BE_SET_NETDEV_OPS(netdev
, &be_netdev_ops
);
3126 SET_ETHTOOL_OPS(netdev
, &be_ethtool_ops
);
3128 for_all_rx_queues(adapter
, rxo
, i
)
3129 netif_napi_add(netdev
, &rxo
->rx_eq
.napi
, be_poll_rx
,
3132 netif_napi_add(netdev
, &adapter
->tx_eq
.napi
, be_poll_tx_mcc
,
3136 static void be_unmap_pci_bars(struct be_adapter
*adapter
)
3139 iounmap(adapter
->csr
);
3141 iounmap(adapter
->db
);
3144 static int be_map_pci_bars(struct be_adapter
*adapter
)
3149 if (lancer_chip(adapter
)) {
3150 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 0),
3151 pci_resource_len(adapter
->pdev
, 0));
3158 if (be_physfn(adapter
)) {
3159 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 2),
3160 pci_resource_len(adapter
->pdev
, 2));
3163 adapter
->csr
= addr
;
3166 if (adapter
->generation
== BE_GEN2
) {
3169 if (be_physfn(adapter
))
3174 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, db_reg
),
3175 pci_resource_len(adapter
->pdev
, db_reg
));
3182 be_unmap_pci_bars(adapter
);
3187 static void be_ctrl_cleanup(struct be_adapter
*adapter
)
3189 struct be_dma_mem
*mem
= &adapter
->mbox_mem_alloced
;
3191 be_unmap_pci_bars(adapter
);
3194 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
3197 mem
= &adapter
->rx_filter
;
3199 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
3203 static int be_ctrl_init(struct be_adapter
*adapter
)
3205 struct be_dma_mem
*mbox_mem_alloc
= &adapter
->mbox_mem_alloced
;
3206 struct be_dma_mem
*mbox_mem_align
= &adapter
->mbox_mem
;
3207 struct be_dma_mem
*rx_filter
= &adapter
->rx_filter
;
3210 status
= be_map_pci_bars(adapter
);
3214 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
3215 mbox_mem_alloc
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
,
3216 mbox_mem_alloc
->size
,
3217 &mbox_mem_alloc
->dma
,
3219 if (!mbox_mem_alloc
->va
) {
3221 goto unmap_pci_bars
;
3223 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
3224 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
3225 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
3226 memset(mbox_mem_align
->va
, 0, sizeof(struct be_mcc_mailbox
));
3228 rx_filter
->size
= sizeof(struct be_cmd_req_rx_filter
);
3229 rx_filter
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, rx_filter
->size
,
3230 &rx_filter
->dma
, GFP_KERNEL
);
3231 if (rx_filter
->va
== NULL
) {
3235 memset(rx_filter
->va
, 0, rx_filter
->size
);
3237 mutex_init(&adapter
->mbox_lock
);
3238 spin_lock_init(&adapter
->mcc_lock
);
3239 spin_lock_init(&adapter
->mcc_cq_lock
);
3241 init_completion(&adapter
->flash_compl
);
3242 pci_save_state(adapter
->pdev
);
3246 dma_free_coherent(&adapter
->pdev
->dev
, mbox_mem_alloc
->size
,
3247 mbox_mem_alloc
->va
, mbox_mem_alloc
->dma
);
3250 be_unmap_pci_bars(adapter
);
3256 static void be_stats_cleanup(struct be_adapter
*adapter
)
3258 struct be_dma_mem
*cmd
= &adapter
->stats_cmd
;
3261 dma_free_coherent(&adapter
->pdev
->dev
, cmd
->size
,
3265 static int be_stats_init(struct be_adapter
*adapter
)
3267 struct be_dma_mem
*cmd
= &adapter
->stats_cmd
;
3269 if (adapter
->generation
== BE_GEN2
) {
3270 cmd
->size
= sizeof(struct be_cmd_req_get_stats_v0
);
3272 if (lancer_chip(adapter
))
3273 cmd
->size
= sizeof(struct lancer_cmd_req_pport_stats
);
3275 cmd
->size
= sizeof(struct be_cmd_req_get_stats_v1
);
3277 cmd
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, cmd
->size
, &cmd
->dma
,
3279 if (cmd
->va
== NULL
)
3281 memset(cmd
->va
, 0, cmd
->size
);
3285 static void __devexit
be_remove(struct pci_dev
*pdev
)
3287 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3292 cancel_delayed_work_sync(&adapter
->work
);
3294 unregister_netdev(adapter
->netdev
);
3298 be_stats_cleanup(adapter
);
3300 be_ctrl_cleanup(adapter
);
3302 be_sriov_disable(adapter
);
3304 be_msix_disable(adapter
);
3306 pci_set_drvdata(pdev
, NULL
);
3307 pci_release_regions(pdev
);
3308 pci_disable_device(pdev
);
3310 free_netdev(adapter
->netdev
);
3313 static int be_get_config(struct be_adapter
*adapter
)
3317 status
= be_cmd_query_fw_cfg(adapter
, &adapter
->port_num
,
3318 &adapter
->function_mode
, &adapter
->function_caps
);
3322 if (adapter
->function_mode
& FLEX10_MODE
)
3323 adapter
->max_vlans
= BE_NUM_VLANS_SUPPORTED
/4;
3325 adapter
->max_vlans
= BE_NUM_VLANS_SUPPORTED
;
3327 status
= be_cmd_get_cntl_attributes(adapter
);
3334 static int be_dev_family_check(struct be_adapter
*adapter
)
3336 struct pci_dev
*pdev
= adapter
->pdev
;
3337 u32 sli_intf
= 0, if_type
;
3339 switch (pdev
->device
) {
3342 adapter
->generation
= BE_GEN2
;
3347 adapter
->generation
= BE_GEN3
;
3351 pci_read_config_dword(pdev
, SLI_INTF_REG_OFFSET
, &sli_intf
);
3352 if_type
= (sli_intf
& SLI_INTF_IF_TYPE_MASK
) >>
3353 SLI_INTF_IF_TYPE_SHIFT
;
3355 if (((sli_intf
& SLI_INTF_VALID_MASK
) != SLI_INTF_VALID
) ||
3357 dev_err(&pdev
->dev
, "SLI_INTF reg val is not valid\n");
3360 adapter
->sli_family
= ((sli_intf
& SLI_INTF_FAMILY_MASK
) >>
3361 SLI_INTF_FAMILY_SHIFT
);
3362 adapter
->generation
= BE_GEN3
;
3365 adapter
->generation
= 0;
3370 static int lancer_wait_ready(struct be_adapter
*adapter
)
3372 #define SLIPORT_READY_TIMEOUT 30
3376 for (i
= 0; i
< SLIPORT_READY_TIMEOUT
; i
++) {
3377 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
3378 if (sliport_status
& SLIPORT_STATUS_RDY_MASK
)
3384 if (i
== SLIPORT_READY_TIMEOUT
)
3390 static int lancer_test_and_set_rdy_state(struct be_adapter
*adapter
)
3393 u32 sliport_status
, err
, reset_needed
;
3394 status
= lancer_wait_ready(adapter
);
3396 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
3397 err
= sliport_status
& SLIPORT_STATUS_ERR_MASK
;
3398 reset_needed
= sliport_status
& SLIPORT_STATUS_RN_MASK
;
3399 if (err
&& reset_needed
) {
3400 iowrite32(SLI_PORT_CONTROL_IP_MASK
,
3401 adapter
->db
+ SLIPORT_CONTROL_OFFSET
);
3403 /* check adapter has corrected the error */
3404 status
= lancer_wait_ready(adapter
);
3405 sliport_status
= ioread32(adapter
->db
+
3406 SLIPORT_STATUS_OFFSET
);
3407 sliport_status
&= (SLIPORT_STATUS_ERR_MASK
|
3408 SLIPORT_STATUS_RN_MASK
);
3409 if (status
|| sliport_status
)
3411 } else if (err
|| reset_needed
) {
3418 static void lancer_test_and_recover_fn_err(struct be_adapter
*adapter
)
3423 if (adapter
->eeh_err
|| adapter
->ue_detected
)
3426 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
3428 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
3429 dev_err(&adapter
->pdev
->dev
,
3430 "Adapter in error state."
3431 "Trying to recover.\n");
3433 status
= lancer_test_and_set_rdy_state(adapter
);
3437 netif_device_detach(adapter
->netdev
);
3439 if (netif_running(adapter
->netdev
))
3440 be_close(adapter
->netdev
);
3444 adapter
->fw_timeout
= false;
3446 status
= be_setup(adapter
);
3450 if (netif_running(adapter
->netdev
)) {
3451 status
= be_open(adapter
->netdev
);
3456 netif_device_attach(adapter
->netdev
);
3458 dev_err(&adapter
->pdev
->dev
,
3459 "Adapter error recovery succeeded\n");
3463 dev_err(&adapter
->pdev
->dev
,
3464 "Adapter error recovery failed\n");
3467 static void be_worker(struct work_struct
*work
)
3469 struct be_adapter
*adapter
=
3470 container_of(work
, struct be_adapter
, work
.work
);
3471 struct be_rx_obj
*rxo
;
3474 if (lancer_chip(adapter
))
3475 lancer_test_and_recover_fn_err(adapter
);
3477 be_detect_dump_ue(adapter
);
3479 /* when interrupts are not yet enabled, just reap any pending
3480 * mcc completions */
3481 if (!netif_running(adapter
->netdev
)) {
3482 int mcc_compl
, status
= 0;
3484 mcc_compl
= be_process_mcc(adapter
, &status
);
3487 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
3488 be_cq_notify(adapter
, mcc_obj
->cq
.id
, false, mcc_compl
);
3494 if (!adapter
->stats_cmd_sent
) {
3495 if (lancer_chip(adapter
))
3496 lancer_cmd_get_pport_stats(adapter
,
3497 &adapter
->stats_cmd
);
3499 be_cmd_get_stats(adapter
, &adapter
->stats_cmd
);
3502 for_all_rx_queues(adapter
, rxo
, i
) {
3503 be_rx_eqd_update(adapter
, rxo
);
3505 if (rxo
->rx_post_starved
) {
3506 rxo
->rx_post_starved
= false;
3507 be_post_rx_frags(rxo
, GFP_KERNEL
);
3512 adapter
->work_counter
++;
3513 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(1000));
3516 static int __devinit
be_probe(struct pci_dev
*pdev
,
3517 const struct pci_device_id
*pdev_id
)
3520 struct be_adapter
*adapter
;
3521 struct net_device
*netdev
;
3523 status
= pci_enable_device(pdev
);
3527 status
= pci_request_regions(pdev
, DRV_NAME
);
3530 pci_set_master(pdev
);
3532 netdev
= alloc_etherdev_mq(sizeof(struct be_adapter
), MAX_TX_QS
);
3533 if (netdev
== NULL
) {
3537 adapter
= netdev_priv(netdev
);
3538 adapter
->pdev
= pdev
;
3539 pci_set_drvdata(pdev
, adapter
);
3541 status
= be_dev_family_check(adapter
);
3545 adapter
->netdev
= netdev
;
3546 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3548 status
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64));
3550 netdev
->features
|= NETIF_F_HIGHDMA
;
3552 status
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
3554 dev_err(&pdev
->dev
, "Could not set PCI DMA Mask\n");
3559 status
= be_sriov_enable(adapter
);
3563 status
= be_ctrl_init(adapter
);
3567 if (lancer_chip(adapter
)) {
3568 status
= lancer_wait_ready(adapter
);
3570 iowrite32(SLI_PORT_CONTROL_IP_MASK
,
3571 adapter
->db
+ SLIPORT_CONTROL_OFFSET
);
3572 status
= lancer_test_and_set_rdy_state(adapter
);
3575 dev_err(&pdev
->dev
, "Adapter in non recoverable error\n");
3580 /* sync up with fw's ready state */
3581 if (be_physfn(adapter
)) {
3582 status
= be_cmd_POST(adapter
);
3587 /* tell fw we're ready to fire cmds */
3588 status
= be_cmd_fw_init(adapter
);
3592 status
= be_cmd_reset_function(adapter
);
3596 status
= be_stats_init(adapter
);
3600 status
= be_get_config(adapter
);
3604 /* The INTR bit may be set in the card when probed by a kdump kernel
3607 if (!lancer_chip(adapter
))
3608 be_intr_set(adapter
, false);
3610 be_msix_enable(adapter
);
3612 INIT_DELAYED_WORK(&adapter
->work
, be_worker
);
3613 adapter
->rx_fc
= adapter
->tx_fc
= true;
3615 status
= be_setup(adapter
);
3619 be_netdev_init(netdev
);
3620 status
= register_netdev(netdev
);
3624 dev_info(&pdev
->dev
, "%s port %d\n", nic_name(pdev
), adapter
->port_num
);
3626 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(100));
3632 be_msix_disable(adapter
);
3634 be_stats_cleanup(adapter
);
3636 be_ctrl_cleanup(adapter
);
3638 be_sriov_disable(adapter
);
3640 free_netdev(netdev
);
3641 pci_set_drvdata(pdev
, NULL
);
3643 pci_release_regions(pdev
);
3645 pci_disable_device(pdev
);
3647 dev_err(&pdev
->dev
, "%s initialization failed\n", nic_name(pdev
));
3651 static int be_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3653 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3654 struct net_device
*netdev
= adapter
->netdev
;
3656 cancel_delayed_work_sync(&adapter
->work
);
3658 be_setup_wol(adapter
, true);
3660 netif_device_detach(netdev
);
3661 if (netif_running(netdev
)) {
3668 be_msix_disable(adapter
);
3669 pci_save_state(pdev
);
3670 pci_disable_device(pdev
);
3671 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3675 static int be_resume(struct pci_dev
*pdev
)
3678 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3679 struct net_device
*netdev
= adapter
->netdev
;
3681 netif_device_detach(netdev
);
3683 status
= pci_enable_device(pdev
);
3687 pci_set_power_state(pdev
, 0);
3688 pci_restore_state(pdev
);
3690 be_msix_enable(adapter
);
3691 /* tell fw we're ready to fire cmds */
3692 status
= be_cmd_fw_init(adapter
);
3697 if (netif_running(netdev
)) {
3702 netif_device_attach(netdev
);
3705 be_setup_wol(adapter
, false);
3707 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(100));
3712 * An FLR will stop BE from DMAing any data.
3714 static void be_shutdown(struct pci_dev
*pdev
)
3716 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3721 cancel_delayed_work_sync(&adapter
->work
);
3723 netif_device_detach(adapter
->netdev
);
3726 be_setup_wol(adapter
, true);
3728 be_cmd_reset_function(adapter
);
3730 pci_disable_device(pdev
);
3733 static pci_ers_result_t
be_eeh_err_detected(struct pci_dev
*pdev
,
3734 pci_channel_state_t state
)
3736 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3737 struct net_device
*netdev
= adapter
->netdev
;
3739 dev_err(&adapter
->pdev
->dev
, "EEH error detected\n");
3741 adapter
->eeh_err
= true;
3743 netif_device_detach(netdev
);
3745 if (netif_running(netdev
)) {
3752 if (state
== pci_channel_io_perm_failure
)
3753 return PCI_ERS_RESULT_DISCONNECT
;
3755 pci_disable_device(pdev
);
3757 return PCI_ERS_RESULT_NEED_RESET
;
3760 static pci_ers_result_t
be_eeh_reset(struct pci_dev
*pdev
)
3762 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3765 dev_info(&adapter
->pdev
->dev
, "EEH reset\n");
3766 adapter
->eeh_err
= false;
3767 adapter
->ue_detected
= false;
3768 adapter
->fw_timeout
= false;
3770 status
= pci_enable_device(pdev
);
3772 return PCI_ERS_RESULT_DISCONNECT
;
3774 pci_set_master(pdev
);
3775 pci_set_power_state(pdev
, 0);
3776 pci_restore_state(pdev
);
3778 /* Check if card is ok and fw is ready */
3779 status
= be_cmd_POST(adapter
);
3781 return PCI_ERS_RESULT_DISCONNECT
;
3783 return PCI_ERS_RESULT_RECOVERED
;
3786 static void be_eeh_resume(struct pci_dev
*pdev
)
3789 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3790 struct net_device
*netdev
= adapter
->netdev
;
3792 dev_info(&adapter
->pdev
->dev
, "EEH resume\n");
3794 pci_save_state(pdev
);
3796 /* tell fw we're ready to fire cmds */
3797 status
= be_cmd_fw_init(adapter
);
3801 status
= be_setup(adapter
);
3805 if (netif_running(netdev
)) {
3806 status
= be_open(netdev
);
3810 netif_device_attach(netdev
);
3813 dev_err(&adapter
->pdev
->dev
, "EEH resume failed\n");
3816 static struct pci_error_handlers be_eeh_handlers
= {
3817 .error_detected
= be_eeh_err_detected
,
3818 .slot_reset
= be_eeh_reset
,
3819 .resume
= be_eeh_resume
,
3822 static struct pci_driver be_driver
= {
3824 .id_table
= be_dev_ids
,
3826 .remove
= be_remove
,
3827 .suspend
= be_suspend
,
3828 .resume
= be_resume
,
3829 .shutdown
= be_shutdown
,
3830 .err_handler
= &be_eeh_handlers
3833 static int __init
be_init_module(void)
3835 if (rx_frag_size
!= 8192 && rx_frag_size
!= 4096 &&
3836 rx_frag_size
!= 2048) {
3837 printk(KERN_WARNING DRV_NAME
3838 " : Module param rx_frag_size must be 2048/4096/8192."
3840 rx_frag_size
= 2048;
3843 return pci_register_driver(&be_driver
);
3845 module_init(be_init_module
);
3847 static void __exit
be_exit_module(void)
3849 pci_unregister_driver(&be_driver
);
3851 module_exit(be_exit_module
);