2 * Copyright (C) 2005 - 2013 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
27 MODULE_VERSION(DRV_VER
);
28 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
29 MODULE_DESCRIPTION(DRV_DESC
" " DRV_VER
);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
33 static unsigned int num_vfs
;
34 module_param(num_vfs
, uint
, S_IRUGO
);
35 MODULE_PARM_DESC(num_vfs
, "Number of PCI VFs to initialize");
37 static ushort rx_frag_size
= 2048;
38 module_param(rx_frag_size
, ushort
, S_IRUGO
);
39 MODULE_PARM_DESC(rx_frag_size
, "Size of a fragment that holds rcvd data.");
41 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids
) = {
42 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
43 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID2
) },
44 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
45 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
46 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID3
)},
47 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID4
)},
48 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID5
)},
49 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID6
)},
52 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
53 /* UE Status Low CSR */
54 static const char * const ue_status_low_desc
[] = {
88 /* UE Status High CSR */
89 static const char * const ue_status_hi_desc
[] = {
125 static void be_queue_free(struct be_adapter
*adapter
, struct be_queue_info
*q
)
127 struct be_dma_mem
*mem
= &q
->dma_mem
;
129 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
135 static int be_queue_alloc(struct be_adapter
*adapter
, struct be_queue_info
*q
,
136 u16 len
, u16 entry_size
)
138 struct be_dma_mem
*mem
= &q
->dma_mem
;
140 memset(q
, 0, sizeof(*q
));
142 q
->entry_size
= entry_size
;
143 mem
->size
= len
* entry_size
;
144 mem
->va
= dma_zalloc_coherent(&adapter
->pdev
->dev
, mem
->size
, &mem
->dma
,
151 static void be_reg_intr_set(struct be_adapter
*adapter
, bool enable
)
155 pci_read_config_dword(adapter
->pdev
, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
,
157 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
159 if (!enabled
&& enable
)
160 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
161 else if (enabled
&& !enable
)
162 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
166 pci_write_config_dword(adapter
->pdev
,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
, reg
);
170 static void be_intr_set(struct be_adapter
*adapter
, bool enable
)
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter
))
178 if (adapter
->eeh_error
)
181 status
= be_cmd_intr_set(adapter
, enable
);
183 be_reg_intr_set(adapter
, enable
);
186 static void be_rxq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
189 val
|= qid
& DB_RQ_RING_ID_MASK
;
190 val
|= posted
<< DB_RQ_NUM_POSTED_SHIFT
;
193 iowrite32(val
, adapter
->db
+ DB_RQ_OFFSET
);
196 static void be_txq_notify(struct be_adapter
*adapter
, struct be_tx_obj
*txo
,
200 val
|= txo
->q
.id
& DB_TXULP_RING_ID_MASK
;
201 val
|= (posted
& DB_TXULP_NUM_POSTED_MASK
) << DB_TXULP_NUM_POSTED_SHIFT
;
204 iowrite32(val
, adapter
->db
+ txo
->db_offset
);
207 static void be_eq_notify(struct be_adapter
*adapter
, u16 qid
,
208 bool arm
, bool clear_int
, u16 num_popped
)
211 val
|= qid
& DB_EQ_RING_ID_MASK
;
212 val
|= ((qid
& DB_EQ_RING_ID_EXT_MASK
) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT
);
215 if (adapter
->eeh_error
)
219 val
|= 1 << DB_EQ_REARM_SHIFT
;
221 val
|= 1 << DB_EQ_CLR_SHIFT
;
222 val
|= 1 << DB_EQ_EVNT_SHIFT
;
223 val
|= num_popped
<< DB_EQ_NUM_POPPED_SHIFT
;
224 iowrite32(val
, adapter
->db
+ DB_EQ_OFFSET
);
227 void be_cq_notify(struct be_adapter
*adapter
, u16 qid
, bool arm
, u16 num_popped
)
230 val
|= qid
& DB_CQ_RING_ID_MASK
;
231 val
|= ((qid
& DB_CQ_RING_ID_EXT_MASK
) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT
);
234 if (adapter
->eeh_error
)
238 val
|= 1 << DB_CQ_REARM_SHIFT
;
239 val
|= num_popped
<< DB_CQ_NUM_POPPED_SHIFT
;
240 iowrite32(val
, adapter
->db
+ DB_CQ_OFFSET
);
243 static int be_mac_addr_set(struct net_device
*netdev
, void *p
)
245 struct be_adapter
*adapter
= netdev_priv(netdev
);
246 struct device
*dev
= &adapter
->pdev
->dev
;
247 struct sockaddr
*addr
= p
;
250 u32 old_pmac_id
= adapter
->pmac_id
[0], curr_pmac_id
= 0;
252 if (!is_valid_ether_addr(addr
->sa_data
))
253 return -EADDRNOTAVAIL
;
255 /* Proceed further only if, User provided MAC is different
258 if (ether_addr_equal(addr
->sa_data
, netdev
->dev_addr
))
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
267 status
= be_cmd_pmac_add(adapter
, (u8
*)addr
->sa_data
,
268 adapter
->if_handle
, &adapter
->pmac_id
[0], 0);
270 curr_pmac_id
= adapter
->pmac_id
[0];
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
275 if (adapter
->pmac_id
[0] != old_pmac_id
)
276 be_cmd_pmac_del(adapter
, adapter
->if_handle
,
280 /* Decide if the new MAC is successfully activated only after
283 status
= be_cmd_get_active_mac(adapter
, curr_pmac_id
, mac
,
284 adapter
->if_handle
, true, 0);
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
291 if (!ether_addr_equal(addr
->sa_data
, mac
)) {
296 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
297 dev_info(dev
, "MAC address changed to %pM\n", mac
);
300 dev_warn(dev
, "MAC address change to %pM failed\n", addr
->sa_data
);
304 /* BE2 supports only v0 cmd */
305 static void *hw_stats_from_cmd(struct be_adapter
*adapter
)
307 if (BE2_chip(adapter
)) {
308 struct be_cmd_resp_get_stats_v0
*cmd
= adapter
->stats_cmd
.va
;
310 return &cmd
->hw_stats
;
311 } else if (BE3_chip(adapter
)) {
312 struct be_cmd_resp_get_stats_v1
*cmd
= adapter
->stats_cmd
.va
;
314 return &cmd
->hw_stats
;
316 struct be_cmd_resp_get_stats_v2
*cmd
= adapter
->stats_cmd
.va
;
318 return &cmd
->hw_stats
;
322 /* BE2 supports only v0 cmd */
323 static void *be_erx_stats_from_cmd(struct be_adapter
*adapter
)
325 if (BE2_chip(adapter
)) {
326 struct be_hw_stats_v0
*hw_stats
= hw_stats_from_cmd(adapter
);
328 return &hw_stats
->erx
;
329 } else if (BE3_chip(adapter
)) {
330 struct be_hw_stats_v1
*hw_stats
= hw_stats_from_cmd(adapter
);
332 return &hw_stats
->erx
;
334 struct be_hw_stats_v2
*hw_stats
= hw_stats_from_cmd(adapter
);
336 return &hw_stats
->erx
;
340 static void populate_be_v0_stats(struct be_adapter
*adapter
)
342 struct be_hw_stats_v0
*hw_stats
= hw_stats_from_cmd(adapter
);
343 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
344 struct be_rxf_stats_v0
*rxf_stats
= &hw_stats
->rxf
;
345 struct be_port_rxf_stats_v0
*port_stats
=
346 &rxf_stats
->port
[adapter
->port_num
];
347 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
349 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
350 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
351 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
352 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
353 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
354 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
355 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
356 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
357 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
358 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
359 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rx_fifo_overflow
;
360 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
361 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
362 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
363 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
364 drvs
->rx_input_fifo_overflow_drop
= port_stats
->rx_input_fifo_overflow
;
365 drvs
->rx_dropped_header_too_small
=
366 port_stats
->rx_dropped_header_too_small
;
367 drvs
->rx_address_filtered
=
368 port_stats
->rx_address_filtered
+
369 port_stats
->rx_vlan_filtered
;
370 drvs
->rx_alignment_symbol_errors
=
371 port_stats
->rx_alignment_symbol_errors
;
373 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
374 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
376 if (adapter
->port_num
)
377 drvs
->jabber_events
= rxf_stats
->port1_jabber_events
;
379 drvs
->jabber_events
= rxf_stats
->port0_jabber_events
;
380 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
381 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
382 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
383 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
384 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
385 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
386 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
389 static void populate_be_v1_stats(struct be_adapter
*adapter
)
391 struct be_hw_stats_v1
*hw_stats
= hw_stats_from_cmd(adapter
);
392 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
393 struct be_rxf_stats_v1
*rxf_stats
= &hw_stats
->rxf
;
394 struct be_port_rxf_stats_v1
*port_stats
=
395 &rxf_stats
->port
[adapter
->port_num
];
396 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
398 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
399 drvs
->pmem_fifo_overflow_drop
= port_stats
->pmem_fifo_overflow_drop
;
400 drvs
->rx_priority_pause_frames
= port_stats
->rx_priority_pause_frames
;
401 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
402 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
403 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
404 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
405 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
406 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
407 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
408 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
409 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
410 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
411 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
412 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
413 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
414 drvs
->rx_dropped_header_too_small
=
415 port_stats
->rx_dropped_header_too_small
;
416 drvs
->rx_input_fifo_overflow_drop
=
417 port_stats
->rx_input_fifo_overflow_drop
;
418 drvs
->rx_address_filtered
= port_stats
->rx_address_filtered
;
419 drvs
->rx_alignment_symbol_errors
=
420 port_stats
->rx_alignment_symbol_errors
;
421 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rxpp_fifo_overflow_drop
;
422 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
423 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
424 drvs
->tx_priority_pauseframes
= port_stats
->tx_priority_pauseframes
;
425 drvs
->jabber_events
= port_stats
->jabber_events
;
426 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
427 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
428 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
429 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
430 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
431 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
432 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
435 static void populate_be_v2_stats(struct be_adapter
*adapter
)
437 struct be_hw_stats_v2
*hw_stats
= hw_stats_from_cmd(adapter
);
438 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
439 struct be_rxf_stats_v2
*rxf_stats
= &hw_stats
->rxf
;
440 struct be_port_rxf_stats_v2
*port_stats
=
441 &rxf_stats
->port
[adapter
->port_num
];
442 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
444 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
445 drvs
->pmem_fifo_overflow_drop
= port_stats
->pmem_fifo_overflow_drop
;
446 drvs
->rx_priority_pause_frames
= port_stats
->rx_priority_pause_frames
;
447 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
448 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
449 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
450 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
451 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
452 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
453 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
454 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
455 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
456 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
457 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
458 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
459 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
460 drvs
->rx_dropped_header_too_small
=
461 port_stats
->rx_dropped_header_too_small
;
462 drvs
->rx_input_fifo_overflow_drop
=
463 port_stats
->rx_input_fifo_overflow_drop
;
464 drvs
->rx_address_filtered
= port_stats
->rx_address_filtered
;
465 drvs
->rx_alignment_symbol_errors
=
466 port_stats
->rx_alignment_symbol_errors
;
467 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rxpp_fifo_overflow_drop
;
468 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
469 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
470 drvs
->tx_priority_pauseframes
= port_stats
->tx_priority_pauseframes
;
471 drvs
->jabber_events
= port_stats
->jabber_events
;
472 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
473 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
474 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
475 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
476 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
477 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
478 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
479 if (be_roce_supported(adapter
)) {
480 drvs
->rx_roce_bytes_lsd
= port_stats
->roce_bytes_received_lsd
;
481 drvs
->rx_roce_bytes_msd
= port_stats
->roce_bytes_received_msd
;
482 drvs
->rx_roce_frames
= port_stats
->roce_frames_received
;
483 drvs
->roce_drops_crc
= port_stats
->roce_drops_crc
;
484 drvs
->roce_drops_payload_len
=
485 port_stats
->roce_drops_payload_len
;
489 static void populate_lancer_stats(struct be_adapter
*adapter
)
492 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
493 struct lancer_pport_stats
*pport_stats
=
494 pport_stats_from_cmd(adapter
);
496 be_dws_le_to_cpu(pport_stats
, sizeof(*pport_stats
));
497 drvs
->rx_pause_frames
= pport_stats
->rx_pause_frames_lo
;
498 drvs
->rx_crc_errors
= pport_stats
->rx_crc_errors_lo
;
499 drvs
->rx_control_frames
= pport_stats
->rx_control_frames_lo
;
500 drvs
->rx_in_range_errors
= pport_stats
->rx_in_range_errors
;
501 drvs
->rx_frame_too_long
= pport_stats
->rx_frames_too_long_lo
;
502 drvs
->rx_dropped_runt
= pport_stats
->rx_dropped_runt
;
503 drvs
->rx_ip_checksum_errs
= pport_stats
->rx_ip_checksum_errors
;
504 drvs
->rx_tcp_checksum_errs
= pport_stats
->rx_tcp_checksum_errors
;
505 drvs
->rx_udp_checksum_errs
= pport_stats
->rx_udp_checksum_errors
;
506 drvs
->rx_dropped_tcp_length
=
507 pport_stats
->rx_dropped_invalid_tcp_length
;
508 drvs
->rx_dropped_too_small
= pport_stats
->rx_dropped_too_small
;
509 drvs
->rx_dropped_too_short
= pport_stats
->rx_dropped_too_short
;
510 drvs
->rx_out_range_errors
= pport_stats
->rx_out_of_range_errors
;
511 drvs
->rx_dropped_header_too_small
=
512 pport_stats
->rx_dropped_header_too_small
;
513 drvs
->rx_input_fifo_overflow_drop
= pport_stats
->rx_fifo_overflow
;
514 drvs
->rx_address_filtered
=
515 pport_stats
->rx_address_filtered
+
516 pport_stats
->rx_vlan_filtered
;
517 drvs
->rx_alignment_symbol_errors
= pport_stats
->rx_symbol_errors_lo
;
518 drvs
->rxpp_fifo_overflow_drop
= pport_stats
->rx_fifo_overflow
;
519 drvs
->tx_pauseframes
= pport_stats
->tx_pause_frames_lo
;
520 drvs
->tx_controlframes
= pport_stats
->tx_control_frames_lo
;
521 drvs
->jabber_events
= pport_stats
->rx_jabbers
;
522 drvs
->forwarded_packets
= pport_stats
->num_forwards_lo
;
523 drvs
->rx_drops_mtu
= pport_stats
->rx_drops_mtu_lo
;
524 drvs
->rx_drops_too_many_frags
=
525 pport_stats
->rx_drops_too_many_frags_lo
;
528 static void accumulate_16bit_val(u32
*acc
, u16 val
)
530 #define lo(x) (x & 0xFFFF)
531 #define hi(x) (x & 0xFFFF0000)
532 bool wrapped
= val
< lo(*acc
);
533 u32 newacc
= hi(*acc
) + val
;
537 ACCESS_ONCE(*acc
) = newacc
;
540 static void populate_erx_stats(struct be_adapter
*adapter
,
541 struct be_rx_obj
*rxo
,
544 if (!BEx_chip(adapter
))
545 rx_stats(rxo
)->rx_drops_no_frags
= erx_stat
;
547 /* below erx HW counter can actually wrap around after
548 * 65535. Driver accumulates a 32-bit value
550 accumulate_16bit_val(&rx_stats(rxo
)->rx_drops_no_frags
,
554 void be_parse_stats(struct be_adapter
*adapter
)
556 struct be_erx_stats_v2
*erx
= be_erx_stats_from_cmd(adapter
);
557 struct be_rx_obj
*rxo
;
561 if (lancer_chip(adapter
)) {
562 populate_lancer_stats(adapter
);
564 if (BE2_chip(adapter
))
565 populate_be_v0_stats(adapter
);
566 else if (BE3_chip(adapter
))
568 populate_be_v1_stats(adapter
);
570 populate_be_v2_stats(adapter
);
572 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
573 for_all_rx_queues(adapter
, rxo
, i
) {
574 erx_stat
= erx
->rx_drops_no_fragments
[rxo
->q
.id
];
575 populate_erx_stats(adapter
, rxo
, erx_stat
);
580 static struct rtnl_link_stats64
*be_get_stats64(struct net_device
*netdev
,
581 struct rtnl_link_stats64
*stats
)
583 struct be_adapter
*adapter
= netdev_priv(netdev
);
584 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
585 struct be_rx_obj
*rxo
;
586 struct be_tx_obj
*txo
;
591 for_all_rx_queues(adapter
, rxo
, i
) {
592 const struct be_rx_stats
*rx_stats
= rx_stats(rxo
);
594 start
= u64_stats_fetch_begin_bh(&rx_stats
->sync
);
595 pkts
= rx_stats(rxo
)->rx_pkts
;
596 bytes
= rx_stats(rxo
)->rx_bytes
;
597 } while (u64_stats_fetch_retry_bh(&rx_stats
->sync
, start
));
598 stats
->rx_packets
+= pkts
;
599 stats
->rx_bytes
+= bytes
;
600 stats
->multicast
+= rx_stats(rxo
)->rx_mcast_pkts
;
601 stats
->rx_dropped
+= rx_stats(rxo
)->rx_drops_no_skbs
+
602 rx_stats(rxo
)->rx_drops_no_frags
;
605 for_all_tx_queues(adapter
, txo
, i
) {
606 const struct be_tx_stats
*tx_stats
= tx_stats(txo
);
608 start
= u64_stats_fetch_begin_bh(&tx_stats
->sync
);
609 pkts
= tx_stats(txo
)->tx_pkts
;
610 bytes
= tx_stats(txo
)->tx_bytes
;
611 } while (u64_stats_fetch_retry_bh(&tx_stats
->sync
, start
));
612 stats
->tx_packets
+= pkts
;
613 stats
->tx_bytes
+= bytes
;
616 /* bad pkts received */
617 stats
->rx_errors
= drvs
->rx_crc_errors
+
618 drvs
->rx_alignment_symbol_errors
+
619 drvs
->rx_in_range_errors
+
620 drvs
->rx_out_range_errors
+
621 drvs
->rx_frame_too_long
+
622 drvs
->rx_dropped_too_small
+
623 drvs
->rx_dropped_too_short
+
624 drvs
->rx_dropped_header_too_small
+
625 drvs
->rx_dropped_tcp_length
+
626 drvs
->rx_dropped_runt
;
628 /* detailed rx errors */
629 stats
->rx_length_errors
= drvs
->rx_in_range_errors
+
630 drvs
->rx_out_range_errors
+
631 drvs
->rx_frame_too_long
;
633 stats
->rx_crc_errors
= drvs
->rx_crc_errors
;
635 /* frame alignment errors */
636 stats
->rx_frame_errors
= drvs
->rx_alignment_symbol_errors
;
638 /* receiver fifo overrun */
639 /* drops_no_pbuf is no per i/f, it's per BE card */
640 stats
->rx_fifo_errors
= drvs
->rxpp_fifo_overflow_drop
+
641 drvs
->rx_input_fifo_overflow_drop
+
642 drvs
->rx_drops_no_pbuf
;
646 void be_link_status_update(struct be_adapter
*adapter
, u8 link_status
)
648 struct net_device
*netdev
= adapter
->netdev
;
650 if (!(adapter
->flags
& BE_FLAGS_LINK_STATUS_INIT
)) {
651 netif_carrier_off(netdev
);
652 adapter
->flags
|= BE_FLAGS_LINK_STATUS_INIT
;
655 if ((link_status
& LINK_STATUS_MASK
) == LINK_UP
)
656 netif_carrier_on(netdev
);
658 netif_carrier_off(netdev
);
661 static void be_tx_stats_update(struct be_tx_obj
*txo
,
662 u32 wrb_cnt
, u32 copied
, u32 gso_segs
, bool stopped
)
664 struct be_tx_stats
*stats
= tx_stats(txo
);
666 u64_stats_update_begin(&stats
->sync
);
668 stats
->tx_wrbs
+= wrb_cnt
;
669 stats
->tx_bytes
+= copied
;
670 stats
->tx_pkts
+= (gso_segs
? gso_segs
: 1);
673 u64_stats_update_end(&stats
->sync
);
676 /* Determine number of WRB entries needed to xmit data in an skb */
677 static u32
wrb_cnt_for_skb(struct be_adapter
*adapter
, struct sk_buff
*skb
,
680 int cnt
= (skb
->len
> skb
->data_len
);
682 cnt
+= skb_shinfo(skb
)->nr_frags
;
684 /* to account for hdr wrb */
686 if (lancer_chip(adapter
) || !(cnt
& 1)) {
689 /* add a dummy to make it an even num */
693 BUG_ON(cnt
> BE_MAX_TX_FRAG_COUNT
);
697 static inline void wrb_fill(struct be_eth_wrb
*wrb
, u64 addr
, int len
)
699 wrb
->frag_pa_hi
= upper_32_bits(addr
);
700 wrb
->frag_pa_lo
= addr
& 0xFFFFFFFF;
701 wrb
->frag_len
= len
& ETH_WRB_FRAG_LEN_MASK
;
705 static inline u16
be_get_tx_vlan_tag(struct be_adapter
*adapter
,
711 vlan_tag
= vlan_tx_tag_get(skb
);
712 vlan_prio
= (vlan_tag
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
713 /* If vlan priority provided by OS is NOT in available bmap */
714 if (!(adapter
->vlan_prio_bmap
& (1 << vlan_prio
)))
715 vlan_tag
= (vlan_tag
& ~VLAN_PRIO_MASK
) |
716 adapter
->recommended_prio
;
721 static void wrb_fill_hdr(struct be_adapter
*adapter
, struct be_eth_hdr_wrb
*hdr
,
722 struct sk_buff
*skb
, u32 wrb_cnt
, u32 len
, bool skip_hw_vlan
)
726 memset(hdr
, 0, sizeof(*hdr
));
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, crc
, hdr
, 1);
730 if (skb_is_gso(skb
)) {
731 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso
, hdr
, 1);
732 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso_mss
,
733 hdr
, skb_shinfo(skb
)->gso_size
);
734 if (skb_is_gso_v6(skb
) && !lancer_chip(adapter
))
735 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso6
, hdr
, 1);
736 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, tcpcs
, hdr
, 1);
739 else if (is_udp_pkt(skb
))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, udpcs
, hdr
, 1);
743 if (vlan_tx_tag_present(skb
)) {
744 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan
, hdr
, 1);
745 vlan_tag
= be_get_tx_vlan_tag(adapter
, skb
);
746 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan_tag
, hdr
, vlan_tag
);
749 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, complete
, hdr
, !skip_hw_vlan
);
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, event
, hdr
, 1);
752 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, num_wrb
, hdr
, wrb_cnt
);
753 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, len
, hdr
, len
);
756 static void unmap_tx_frag(struct device
*dev
, struct be_eth_wrb
*wrb
,
761 be_dws_le_to_cpu(wrb
, sizeof(*wrb
));
763 dma
= (u64
)wrb
->frag_pa_hi
<< 32 | (u64
)wrb
->frag_pa_lo
;
766 dma_unmap_single(dev
, dma
, wrb
->frag_len
,
769 dma_unmap_page(dev
, dma
, wrb
->frag_len
, DMA_TO_DEVICE
);
773 static int make_tx_wrbs(struct be_adapter
*adapter
, struct be_queue_info
*txq
,
774 struct sk_buff
*skb
, u32 wrb_cnt
, bool dummy_wrb
,
779 struct device
*dev
= &adapter
->pdev
->dev
;
780 struct sk_buff
*first_skb
= skb
;
781 struct be_eth_wrb
*wrb
;
782 struct be_eth_hdr_wrb
*hdr
;
783 bool map_single
= false;
786 hdr
= queue_head_node(txq
);
788 map_head
= txq
->head
;
790 if (skb
->len
> skb
->data_len
) {
791 int len
= skb_headlen(skb
);
792 busaddr
= dma_map_single(dev
, skb
->data
, len
, DMA_TO_DEVICE
);
793 if (dma_mapping_error(dev
, busaddr
))
796 wrb
= queue_head_node(txq
);
797 wrb_fill(wrb
, busaddr
, len
);
798 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
803 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
804 const struct skb_frag_struct
*frag
=
805 &skb_shinfo(skb
)->frags
[i
];
806 busaddr
= skb_frag_dma_map(dev
, frag
, 0,
807 skb_frag_size(frag
), DMA_TO_DEVICE
);
808 if (dma_mapping_error(dev
, busaddr
))
810 wrb
= queue_head_node(txq
);
811 wrb_fill(wrb
, busaddr
, skb_frag_size(frag
));
812 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
814 copied
+= skb_frag_size(frag
);
818 wrb
= queue_head_node(txq
);
820 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
824 wrb_fill_hdr(adapter
, hdr
, first_skb
, wrb_cnt
, copied
, skip_hw_vlan
);
825 be_dws_cpu_to_le(hdr
, sizeof(*hdr
));
829 txq
->head
= map_head
;
831 wrb
= queue_head_node(txq
);
832 unmap_tx_frag(dev
, wrb
, map_single
);
834 copied
-= wrb
->frag_len
;
840 static struct sk_buff
*be_insert_vlan_in_pkt(struct be_adapter
*adapter
,
846 skb
= skb_share_check(skb
, GFP_ATOMIC
);
850 if (vlan_tx_tag_present(skb
))
851 vlan_tag
= be_get_tx_vlan_tag(adapter
, skb
);
853 if (qnq_async_evt_rcvd(adapter
) && adapter
->pvid
) {
855 vlan_tag
= adapter
->pvid
;
856 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857 * skip VLAN insertion
860 *skip_hw_vlan
= true;
864 skb
= __vlan_put_tag(skb
, htons(ETH_P_8021Q
), vlan_tag
);
870 /* Insert the outer VLAN, if any */
871 if (adapter
->qnq_vid
) {
872 vlan_tag
= adapter
->qnq_vid
;
873 skb
= __vlan_put_tag(skb
, htons(ETH_P_8021Q
), vlan_tag
);
877 *skip_hw_vlan
= true;
883 static bool be_ipv6_exthdr_check(struct sk_buff
*skb
)
885 struct ethhdr
*eh
= (struct ethhdr
*)skb
->data
;
886 u16 offset
= ETH_HLEN
;
888 if (eh
->h_proto
== htons(ETH_P_IPV6
)) {
889 struct ipv6hdr
*ip6h
= (struct ipv6hdr
*)(skb
->data
+ offset
);
891 offset
+= sizeof(struct ipv6hdr
);
892 if (ip6h
->nexthdr
!= NEXTHDR_TCP
&&
893 ip6h
->nexthdr
!= NEXTHDR_UDP
) {
894 struct ipv6_opt_hdr
*ehdr
=
895 (struct ipv6_opt_hdr
*) (skb
->data
+ offset
);
897 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898 if (ehdr
->hdrlen
== 0xff)
905 static int be_vlan_tag_tx_chk(struct be_adapter
*adapter
, struct sk_buff
*skb
)
907 return vlan_tx_tag_present(skb
) || adapter
->pvid
|| adapter
->qnq_vid
;
910 static int be_ipv6_tx_stall_chk(struct be_adapter
*adapter
,
913 return BE3_chip(adapter
) && be_ipv6_exthdr_check(skb
);
916 static struct sk_buff
*be_xmit_workarounds(struct be_adapter
*adapter
,
920 struct vlan_ethhdr
*veh
= (struct vlan_ethhdr
*)skb
->data
;
921 unsigned int eth_hdr_len
;
924 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
925 * may cause a transmit stall on that port. So the work-around is to
926 * pad short packets (<= 32 bytes) to a 36-byte length.
928 if (unlikely(!BEx_chip(adapter
) && skb
->len
<= 32)) {
929 if (skb_padto(skb
, 36))
934 /* For padded packets, BE HW modifies tot_len field in IP header
935 * incorrecly when VLAN tag is inserted by HW.
936 * For padded packets, Lancer computes incorrect checksum.
938 eth_hdr_len
= ntohs(skb
->protocol
) == ETH_P_8021Q
?
939 VLAN_ETH_HLEN
: ETH_HLEN
;
940 if (skb
->len
<= 60 &&
941 (lancer_chip(adapter
) || vlan_tx_tag_present(skb
)) &&
943 ip
= (struct iphdr
*)ip_hdr(skb
);
944 pskb_trim(skb
, eth_hdr_len
+ ntohs(ip
->tot_len
));
947 /* If vlan tag is already inlined in the packet, skip HW VLAN
948 * tagging in UMC mode
950 if ((adapter
->function_mode
& UMC_ENABLED
) &&
951 veh
->h_vlan_proto
== htons(ETH_P_8021Q
))
952 *skip_hw_vlan
= true;
954 /* HW has a bug wherein it will calculate CSUM for VLAN
955 * pkts even though it is disabled.
956 * Manually insert VLAN in pkt.
958 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&&
959 vlan_tx_tag_present(skb
)) {
960 skb
= be_insert_vlan_in_pkt(adapter
, skb
, skip_hw_vlan
);
965 /* HW may lockup when VLAN HW tagging is requested on
966 * certain ipv6 packets. Drop such pkts if the HW workaround to
967 * skip HW tagging is not enabled by FW.
969 if (unlikely(be_ipv6_tx_stall_chk(adapter
, skb
) &&
970 (adapter
->pvid
|| adapter
->qnq_vid
) &&
971 !qnq_async_evt_rcvd(adapter
)))
974 /* Manual VLAN tag insertion to prevent:
975 * ASIC lockup when the ASIC inserts VLAN tag into
976 * certain ipv6 packets. Insert VLAN tags in driver,
977 * and set event, completion, vlan bits accordingly
980 if (be_ipv6_tx_stall_chk(adapter
, skb
) &&
981 be_vlan_tag_tx_chk(adapter
, skb
)) {
982 skb
= be_insert_vlan_in_pkt(adapter
, skb
, skip_hw_vlan
);
989 dev_kfree_skb_any(skb
);
993 static netdev_tx_t
be_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
995 struct be_adapter
*adapter
= netdev_priv(netdev
);
996 struct be_tx_obj
*txo
= &adapter
->tx_obj
[skb_get_queue_mapping(skb
)];
997 struct be_queue_info
*txq
= &txo
->q
;
998 bool dummy_wrb
, stopped
= false;
999 u32 wrb_cnt
= 0, copied
= 0;
1000 bool skip_hw_vlan
= false;
1001 u32 start
= txq
->head
;
1003 skb
= be_xmit_workarounds(adapter
, skb
, &skip_hw_vlan
);
1005 tx_stats(txo
)->tx_drv_drops
++;
1006 return NETDEV_TX_OK
;
1009 wrb_cnt
= wrb_cnt_for_skb(adapter
, skb
, &dummy_wrb
);
1011 copied
= make_tx_wrbs(adapter
, txq
, skb
, wrb_cnt
, dummy_wrb
,
1014 int gso_segs
= skb_shinfo(skb
)->gso_segs
;
1016 /* record the sent skb in the sent_skb table */
1017 BUG_ON(txo
->sent_skb_list
[start
]);
1018 txo
->sent_skb_list
[start
] = skb
;
1020 /* Ensure txq has space for the next skb; Else stop the queue
1021 * *BEFORE* ringing the tx doorbell, so that we serialze the
1022 * tx compls of the current transmit which'll wake up the queue
1024 atomic_add(wrb_cnt
, &txq
->used
);
1025 if ((BE_MAX_TX_FRAG_COUNT
+ atomic_read(&txq
->used
)) >=
1027 netif_stop_subqueue(netdev
, skb_get_queue_mapping(skb
));
1031 be_txq_notify(adapter
, txo
, wrb_cnt
);
1033 be_tx_stats_update(txo
, wrb_cnt
, copied
, gso_segs
, stopped
);
1036 tx_stats(txo
)->tx_drv_drops
++;
1037 dev_kfree_skb_any(skb
);
1039 return NETDEV_TX_OK
;
1042 static int be_change_mtu(struct net_device
*netdev
, int new_mtu
)
1044 struct be_adapter
*adapter
= netdev_priv(netdev
);
1045 if (new_mtu
< BE_MIN_MTU
||
1046 new_mtu
> (BE_MAX_JUMBO_FRAME_SIZE
-
1047 (ETH_HLEN
+ ETH_FCS_LEN
))) {
1048 dev_info(&adapter
->pdev
->dev
,
1049 "MTU must be between %d and %d bytes\n",
1051 (BE_MAX_JUMBO_FRAME_SIZE
- (ETH_HLEN
+ ETH_FCS_LEN
)));
1054 dev_info(&adapter
->pdev
->dev
, "MTU changed from %d to %d bytes\n",
1055 netdev
->mtu
, new_mtu
);
1056 netdev
->mtu
= new_mtu
;
1061 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1062 * If the user configures more, place BE in vlan promiscuous mode.
1064 static int be_vid_config(struct be_adapter
*adapter
)
1066 u16 vids
[BE_NUM_VLANS_SUPPORTED
];
1070 /* No need to further configure vids if in promiscuous mode */
1071 if (adapter
->promiscuous
)
1074 if (adapter
->vlans_added
> be_max_vlans(adapter
))
1075 goto set_vlan_promisc
;
1077 /* Construct VLAN Table to give to HW */
1078 for (i
= 0; i
< VLAN_N_VID
; i
++)
1079 if (adapter
->vlan_tag
[i
])
1080 vids
[num
++] = cpu_to_le16(i
);
1082 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
1086 /* Set to VLAN promisc mode as setting VLAN filter failed */
1087 if (status
== MCC_ADDL_STS_INSUFFICIENT_RESOURCES
)
1088 goto set_vlan_promisc
;
1089 dev_err(&adapter
->pdev
->dev
,
1090 "Setting HW VLAN filtering failed.\n");
1092 if (adapter
->flags
& BE_FLAGS_VLAN_PROMISC
) {
1093 /* hw VLAN filtering re-enabled. */
1094 status
= be_cmd_rx_filter(adapter
,
1095 BE_FLAGS_VLAN_PROMISC
, OFF
);
1097 dev_info(&adapter
->pdev
->dev
,
1098 "Disabling VLAN Promiscuous mode.\n");
1099 adapter
->flags
&= ~BE_FLAGS_VLAN_PROMISC
;
1107 if (adapter
->flags
& BE_FLAGS_VLAN_PROMISC
)
1110 status
= be_cmd_rx_filter(adapter
, BE_FLAGS_VLAN_PROMISC
, ON
);
1112 dev_info(&adapter
->pdev
->dev
, "Enable VLAN Promiscuous mode\n");
1113 adapter
->flags
|= BE_FLAGS_VLAN_PROMISC
;
1115 dev_err(&adapter
->pdev
->dev
,
1116 "Failed to enable VLAN Promiscuous mode.\n");
1120 static int be_vlan_add_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
1122 struct be_adapter
*adapter
= netdev_priv(netdev
);
1125 /* Packets with VID 0 are always received by Lancer by default */
1126 if (lancer_chip(adapter
) && vid
== 0)
1129 adapter
->vlan_tag
[vid
] = 1;
1130 adapter
->vlans_added
++;
1132 status
= be_vid_config(adapter
);
1134 adapter
->vlans_added
--;
1135 adapter
->vlan_tag
[vid
] = 0;
1141 static int be_vlan_rem_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
1143 struct be_adapter
*adapter
= netdev_priv(netdev
);
1146 /* Packets with VID 0 are always received by Lancer by default */
1147 if (lancer_chip(adapter
) && vid
== 0)
1150 adapter
->vlan_tag
[vid
] = 0;
1151 status
= be_vid_config(adapter
);
1153 adapter
->vlans_added
--;
1155 adapter
->vlan_tag
[vid
] = 1;
1160 static void be_set_rx_mode(struct net_device
*netdev
)
1162 struct be_adapter
*adapter
= netdev_priv(netdev
);
1165 if (netdev
->flags
& IFF_PROMISC
) {
1166 be_cmd_rx_filter(adapter
, IFF_PROMISC
, ON
);
1167 adapter
->promiscuous
= true;
1171 /* BE was previously in promiscuous mode; disable it */
1172 if (adapter
->promiscuous
) {
1173 adapter
->promiscuous
= false;
1174 be_cmd_rx_filter(adapter
, IFF_PROMISC
, OFF
);
1176 if (adapter
->vlans_added
)
1177 be_vid_config(adapter
);
1180 /* Enable multicast promisc if num configured exceeds what we support */
1181 if (netdev
->flags
& IFF_ALLMULTI
||
1182 netdev_mc_count(netdev
) > be_max_mc(adapter
)) {
1183 be_cmd_rx_filter(adapter
, IFF_ALLMULTI
, ON
);
1187 if (netdev_uc_count(netdev
) != adapter
->uc_macs
) {
1188 struct netdev_hw_addr
*ha
;
1189 int i
= 1; /* First slot is claimed by the Primary MAC */
1191 for (; adapter
->uc_macs
> 0; adapter
->uc_macs
--, i
++) {
1192 be_cmd_pmac_del(adapter
, adapter
->if_handle
,
1193 adapter
->pmac_id
[i
], 0);
1196 if (netdev_uc_count(netdev
) > be_max_uc(adapter
)) {
1197 be_cmd_rx_filter(adapter
, IFF_PROMISC
, ON
);
1198 adapter
->promiscuous
= true;
1202 netdev_for_each_uc_addr(ha
, adapter
->netdev
) {
1203 adapter
->uc_macs
++; /* First slot is for Primary MAC */
1204 be_cmd_pmac_add(adapter
, (u8
*)ha
->addr
,
1206 &adapter
->pmac_id
[adapter
->uc_macs
], 0);
1210 status
= be_cmd_rx_filter(adapter
, IFF_MULTICAST
, ON
);
1212 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1214 dev_info(&adapter
->pdev
->dev
, "Exhausted multicast HW filters.\n");
1215 dev_info(&adapter
->pdev
->dev
, "Disabling HW multicast filtering.\n");
1216 be_cmd_rx_filter(adapter
, IFF_ALLMULTI
, ON
);
1222 static int be_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
1224 struct be_adapter
*adapter
= netdev_priv(netdev
);
1225 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
1228 if (!sriov_enabled(adapter
))
1231 if (!is_valid_ether_addr(mac
) || vf
>= adapter
->num_vfs
)
1234 if (BEx_chip(adapter
)) {
1235 be_cmd_pmac_del(adapter
, vf_cfg
->if_handle
, vf_cfg
->pmac_id
,
1238 status
= be_cmd_pmac_add(adapter
, mac
, vf_cfg
->if_handle
,
1239 &vf_cfg
->pmac_id
, vf
+ 1);
1241 status
= be_cmd_set_mac(adapter
, mac
, vf_cfg
->if_handle
,
1246 dev_err(&adapter
->pdev
->dev
, "MAC %pM set on VF %d Failed\n",
1249 memcpy(vf_cfg
->mac_addr
, mac
, ETH_ALEN
);
1254 static int be_get_vf_config(struct net_device
*netdev
, int vf
,
1255 struct ifla_vf_info
*vi
)
1257 struct be_adapter
*adapter
= netdev_priv(netdev
);
1258 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
1260 if (!sriov_enabled(adapter
))
1263 if (vf
>= adapter
->num_vfs
)
1267 vi
->tx_rate
= vf_cfg
->tx_rate
;
1268 vi
->vlan
= vf_cfg
->vlan_tag
& VLAN_VID_MASK
;
1269 vi
->qos
= vf_cfg
->vlan_tag
>> VLAN_PRIO_SHIFT
;
1270 memcpy(&vi
->mac
, vf_cfg
->mac_addr
, ETH_ALEN
);
1275 static int be_set_vf_vlan(struct net_device
*netdev
,
1276 int vf
, u16 vlan
, u8 qos
)
1278 struct be_adapter
*adapter
= netdev_priv(netdev
);
1279 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
1282 if (!sriov_enabled(adapter
))
1285 if (vf
>= adapter
->num_vfs
|| vlan
> 4095 || qos
> 7)
1289 vlan
|= qos
<< VLAN_PRIO_SHIFT
;
1290 if (vf_cfg
->vlan_tag
!= vlan
) {
1291 /* If this is new value, program it. Else skip. */
1292 vf_cfg
->vlan_tag
= vlan
;
1293 status
= be_cmd_set_hsw_config(adapter
, vlan
, vf
+ 1,
1294 vf_cfg
->if_handle
, 0);
1297 /* Reset Transparent Vlan Tagging. */
1298 vf_cfg
->vlan_tag
= 0;
1299 vlan
= vf_cfg
->def_vid
;
1300 status
= be_cmd_set_hsw_config(adapter
, vlan
, vf
+ 1,
1301 vf_cfg
->if_handle
, 0);
1306 dev_info(&adapter
->pdev
->dev
,
1307 "VLAN %d config on VF %d failed\n", vlan
, vf
);
1311 static int be_set_vf_tx_rate(struct net_device
*netdev
,
1314 struct be_adapter
*adapter
= netdev_priv(netdev
);
1317 if (!sriov_enabled(adapter
))
1320 if (vf
>= adapter
->num_vfs
)
1323 if (rate
< 100 || rate
> 10000) {
1324 dev_err(&adapter
->pdev
->dev
,
1325 "tx rate must be between 100 and 10000 Mbps\n");
1329 if (lancer_chip(adapter
))
1330 status
= be_cmd_set_profile_config(adapter
, rate
/ 10, vf
+ 1);
1332 status
= be_cmd_set_qos(adapter
, rate
/ 10, vf
+ 1);
1335 dev_err(&adapter
->pdev
->dev
,
1336 "tx rate %d on VF %d failed\n", rate
, vf
);
1338 adapter
->vf_cfg
[vf
].tx_rate
= rate
;
1342 static void be_aic_update(struct be_aic_obj
*aic
, u64 rx_pkts
, u64 tx_pkts
,
1345 aic
->rx_pkts_prev
= rx_pkts
;
1346 aic
->tx_reqs_prev
= tx_pkts
;
1350 static void be_eqd_update(struct be_adapter
*adapter
)
1352 struct be_set_eqd set_eqd
[MAX_EVT_QS
];
1353 int eqd
, i
, num
= 0, start
;
1354 struct be_aic_obj
*aic
;
1355 struct be_eq_obj
*eqo
;
1356 struct be_rx_obj
*rxo
;
1357 struct be_tx_obj
*txo
;
1358 u64 rx_pkts
, tx_pkts
;
1362 for_all_evt_queues(adapter
, eqo
, i
) {
1363 aic
= &adapter
->aic_obj
[eqo
->idx
];
1371 rxo
= &adapter
->rx_obj
[eqo
->idx
];
1373 start
= u64_stats_fetch_begin_bh(&rxo
->stats
.sync
);
1374 rx_pkts
= rxo
->stats
.rx_pkts
;
1375 } while (u64_stats_fetch_retry_bh(&rxo
->stats
.sync
, start
));
1377 txo
= &adapter
->tx_obj
[eqo
->idx
];
1379 start
= u64_stats_fetch_begin_bh(&txo
->stats
.sync
);
1380 tx_pkts
= txo
->stats
.tx_reqs
;
1381 } while (u64_stats_fetch_retry_bh(&txo
->stats
.sync
, start
));
1384 /* Skip, if wrapped around or first calculation */
1386 if (!aic
->jiffies
|| time_before(now
, aic
->jiffies
) ||
1387 rx_pkts
< aic
->rx_pkts_prev
||
1388 tx_pkts
< aic
->tx_reqs_prev
) {
1389 be_aic_update(aic
, rx_pkts
, tx_pkts
, now
);
1393 delta
= jiffies_to_msecs(now
- aic
->jiffies
);
1394 pps
= (((u32
)(rx_pkts
- aic
->rx_pkts_prev
) * 1000) / delta
) +
1395 (((u32
)(tx_pkts
- aic
->tx_reqs_prev
) * 1000) / delta
);
1396 eqd
= (pps
/ 15000) << 2;
1400 eqd
= min_t(u32
, eqd
, aic
->max_eqd
);
1401 eqd
= max_t(u32
, eqd
, aic
->min_eqd
);
1403 be_aic_update(aic
, rx_pkts
, tx_pkts
, now
);
1405 if (eqd
!= aic
->prev_eqd
) {
1406 set_eqd
[num
].delay_multiplier
= (eqd
* 65)/100;
1407 set_eqd
[num
].eq_id
= eqo
->q
.id
;
1408 aic
->prev_eqd
= eqd
;
1414 be_cmd_modify_eqd(adapter
, set_eqd
, num
);
1417 static void be_rx_stats_update(struct be_rx_obj
*rxo
,
1418 struct be_rx_compl_info
*rxcp
)
1420 struct be_rx_stats
*stats
= rx_stats(rxo
);
1422 u64_stats_update_begin(&stats
->sync
);
1424 stats
->rx_bytes
+= rxcp
->pkt_size
;
1426 if (rxcp
->pkt_type
== BE_MULTICAST_PACKET
)
1427 stats
->rx_mcast_pkts
++;
1429 stats
->rx_compl_err
++;
1430 u64_stats_update_end(&stats
->sync
);
1433 static inline bool csum_passed(struct be_rx_compl_info
*rxcp
)
1435 /* L4 checksum is not reliable for non TCP/UDP packets.
1436 * Also ignore ipcksm for ipv6 pkts */
1437 return (rxcp
->tcpf
|| rxcp
->udpf
) && rxcp
->l4_csum
&&
1438 (rxcp
->ip_csum
|| rxcp
->ipv6
);
1441 static struct be_rx_page_info
*get_rx_page_info(struct be_rx_obj
*rxo
)
1443 struct be_adapter
*adapter
= rxo
->adapter
;
1444 struct be_rx_page_info
*rx_page_info
;
1445 struct be_queue_info
*rxq
= &rxo
->q
;
1446 u16 frag_idx
= rxq
->tail
;
1448 rx_page_info
= &rxo
->page_info_tbl
[frag_idx
];
1449 BUG_ON(!rx_page_info
->page
);
1451 if (rx_page_info
->last_page_user
) {
1452 dma_unmap_page(&adapter
->pdev
->dev
,
1453 dma_unmap_addr(rx_page_info
, bus
),
1454 adapter
->big_page_size
, DMA_FROM_DEVICE
);
1455 rx_page_info
->last_page_user
= false;
1458 queue_tail_inc(rxq
);
1459 atomic_dec(&rxq
->used
);
1460 return rx_page_info
;
1463 /* Throwaway the data in the Rx completion */
1464 static void be_rx_compl_discard(struct be_rx_obj
*rxo
,
1465 struct be_rx_compl_info
*rxcp
)
1467 struct be_rx_page_info
*page_info
;
1468 u16 i
, num_rcvd
= rxcp
->num_rcvd
;
1470 for (i
= 0; i
< num_rcvd
; i
++) {
1471 page_info
= get_rx_page_info(rxo
);
1472 put_page(page_info
->page
);
1473 memset(page_info
, 0, sizeof(*page_info
));
1478 * skb_fill_rx_data forms a complete skb for an ether frame
1479 * indicated by rxcp.
1481 static void skb_fill_rx_data(struct be_rx_obj
*rxo
, struct sk_buff
*skb
,
1482 struct be_rx_compl_info
*rxcp
)
1484 struct be_rx_page_info
*page_info
;
1486 u16 hdr_len
, curr_frag_len
, remaining
;
1489 page_info
= get_rx_page_info(rxo
);
1490 start
= page_address(page_info
->page
) + page_info
->page_offset
;
1493 /* Copy data in the first descriptor of this completion */
1494 curr_frag_len
= min(rxcp
->pkt_size
, rx_frag_size
);
1496 skb
->len
= curr_frag_len
;
1497 if (curr_frag_len
<= BE_HDR_LEN
) { /* tiny packet */
1498 memcpy(skb
->data
, start
, curr_frag_len
);
1499 /* Complete packet has now been moved to data */
1500 put_page(page_info
->page
);
1502 skb
->tail
+= curr_frag_len
;
1505 memcpy(skb
->data
, start
, hdr_len
);
1506 skb_shinfo(skb
)->nr_frags
= 1;
1507 skb_frag_set_page(skb
, 0, page_info
->page
);
1508 skb_shinfo(skb
)->frags
[0].page_offset
=
1509 page_info
->page_offset
+ hdr_len
;
1510 skb_frag_size_set(&skb_shinfo(skb
)->frags
[0], curr_frag_len
- hdr_len
);
1511 skb
->data_len
= curr_frag_len
- hdr_len
;
1512 skb
->truesize
+= rx_frag_size
;
1513 skb
->tail
+= hdr_len
;
1515 page_info
->page
= NULL
;
1517 if (rxcp
->pkt_size
<= rx_frag_size
) {
1518 BUG_ON(rxcp
->num_rcvd
!= 1);
1522 /* More frags present for this completion */
1523 remaining
= rxcp
->pkt_size
- curr_frag_len
;
1524 for (i
= 1, j
= 0; i
< rxcp
->num_rcvd
; i
++) {
1525 page_info
= get_rx_page_info(rxo
);
1526 curr_frag_len
= min(remaining
, rx_frag_size
);
1528 /* Coalesce all frags from the same physical page in one slot */
1529 if (page_info
->page_offset
== 0) {
1532 skb_frag_set_page(skb
, j
, page_info
->page
);
1533 skb_shinfo(skb
)->frags
[j
].page_offset
=
1534 page_info
->page_offset
;
1535 skb_frag_size_set(&skb_shinfo(skb
)->frags
[j
], 0);
1536 skb_shinfo(skb
)->nr_frags
++;
1538 put_page(page_info
->page
);
1541 skb_frag_size_add(&skb_shinfo(skb
)->frags
[j
], curr_frag_len
);
1542 skb
->len
+= curr_frag_len
;
1543 skb
->data_len
+= curr_frag_len
;
1544 skb
->truesize
+= rx_frag_size
;
1545 remaining
-= curr_frag_len
;
1546 page_info
->page
= NULL
;
1548 BUG_ON(j
> MAX_SKB_FRAGS
);
1551 /* Process the RX completion indicated by rxcp when GRO is disabled */
1552 static void be_rx_compl_process(struct be_rx_obj
*rxo
, struct napi_struct
*napi
,
1553 struct be_rx_compl_info
*rxcp
)
1555 struct be_adapter
*adapter
= rxo
->adapter
;
1556 struct net_device
*netdev
= adapter
->netdev
;
1557 struct sk_buff
*skb
;
1559 skb
= netdev_alloc_skb_ip_align(netdev
, BE_RX_SKB_ALLOC_SIZE
);
1560 if (unlikely(!skb
)) {
1561 rx_stats(rxo
)->rx_drops_no_skbs
++;
1562 be_rx_compl_discard(rxo
, rxcp
);
1566 skb_fill_rx_data(rxo
, skb
, rxcp
);
1568 if (likely((netdev
->features
& NETIF_F_RXCSUM
) && csum_passed(rxcp
)))
1569 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1571 skb_checksum_none_assert(skb
);
1573 skb
->protocol
= eth_type_trans(skb
, netdev
);
1574 skb_record_rx_queue(skb
, rxo
- &adapter
->rx_obj
[0]);
1575 if (netdev
->features
& NETIF_F_RXHASH
)
1576 skb_set_hash(skb
, rxcp
->rss_hash
, PKT_HASH_TYPE_L3
);
1577 skb_mark_napi_id(skb
, napi
);
1580 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), rxcp
->vlan_tag
);
1582 netif_receive_skb(skb
);
1585 /* Process the RX completion indicated by rxcp when GRO is enabled */
1586 static void be_rx_compl_process_gro(struct be_rx_obj
*rxo
,
1587 struct napi_struct
*napi
,
1588 struct be_rx_compl_info
*rxcp
)
1590 struct be_adapter
*adapter
= rxo
->adapter
;
1591 struct be_rx_page_info
*page_info
;
1592 struct sk_buff
*skb
= NULL
;
1593 u16 remaining
, curr_frag_len
;
1596 skb
= napi_get_frags(napi
);
1598 be_rx_compl_discard(rxo
, rxcp
);
1602 remaining
= rxcp
->pkt_size
;
1603 for (i
= 0, j
= -1; i
< rxcp
->num_rcvd
; i
++) {
1604 page_info
= get_rx_page_info(rxo
);
1606 curr_frag_len
= min(remaining
, rx_frag_size
);
1608 /* Coalesce all frags from the same physical page in one slot */
1609 if (i
== 0 || page_info
->page_offset
== 0) {
1610 /* First frag or Fresh page */
1612 skb_frag_set_page(skb
, j
, page_info
->page
);
1613 skb_shinfo(skb
)->frags
[j
].page_offset
=
1614 page_info
->page_offset
;
1615 skb_frag_size_set(&skb_shinfo(skb
)->frags
[j
], 0);
1617 put_page(page_info
->page
);
1619 skb_frag_size_add(&skb_shinfo(skb
)->frags
[j
], curr_frag_len
);
1620 skb
->truesize
+= rx_frag_size
;
1621 remaining
-= curr_frag_len
;
1622 memset(page_info
, 0, sizeof(*page_info
));
1624 BUG_ON(j
> MAX_SKB_FRAGS
);
1626 skb_shinfo(skb
)->nr_frags
= j
+ 1;
1627 skb
->len
= rxcp
->pkt_size
;
1628 skb
->data_len
= rxcp
->pkt_size
;
1629 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1630 skb_record_rx_queue(skb
, rxo
- &adapter
->rx_obj
[0]);
1631 if (adapter
->netdev
->features
& NETIF_F_RXHASH
)
1632 skb_set_hash(skb
, rxcp
->rss_hash
, PKT_HASH_TYPE_L3
);
1633 skb_mark_napi_id(skb
, napi
);
1636 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), rxcp
->vlan_tag
);
1638 napi_gro_frags(napi
);
1641 static void be_parse_rx_compl_v1(struct be_eth_rx_compl
*compl,
1642 struct be_rx_compl_info
*rxcp
)
1645 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, pktsize
, compl);
1646 rxcp
->vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vtp
, compl);
1647 rxcp
->err
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, err
, compl);
1648 rxcp
->tcpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, tcpf
, compl);
1649 rxcp
->udpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, udpf
, compl);
1651 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, ipcksm
, compl);
1653 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, l4_cksm
, compl);
1655 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, ip_version
, compl);
1657 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, numfrags
, compl);
1659 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, cast_enc
, compl);
1661 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, rsshash
, compl);
1663 rxcp
->vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vtm
,
1665 rxcp
->vlan_tag
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vlan_tag
,
1668 rxcp
->port
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, port
, compl);
1671 static void be_parse_rx_compl_v0(struct be_eth_rx_compl
*compl,
1672 struct be_rx_compl_info
*rxcp
)
1675 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, pktsize
, compl);
1676 rxcp
->vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vtp
, compl);
1677 rxcp
->err
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, err
, compl);
1678 rxcp
->tcpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, tcpf
, compl);
1679 rxcp
->udpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, udpf
, compl);
1681 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, ipcksm
, compl);
1683 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, l4_cksm
, compl);
1685 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, ip_version
, compl);
1687 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, numfrags
, compl);
1689 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, cast_enc
, compl);
1691 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, rsshash
, compl);
1693 rxcp
->vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vtm
,
1695 rxcp
->vlan_tag
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vlan_tag
,
1698 rxcp
->port
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, port
, compl);
1699 rxcp
->ip_frag
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
,
1703 static struct be_rx_compl_info
*be_rx_compl_get(struct be_rx_obj
*rxo
)
1705 struct be_eth_rx_compl
*compl = queue_tail_node(&rxo
->cq
);
1706 struct be_rx_compl_info
*rxcp
= &rxo
->rxcp
;
1707 struct be_adapter
*adapter
= rxo
->adapter
;
1709 /* For checking the valid bit it is Ok to use either definition as the
1710 * valid bit is at the same position in both v0 and v1 Rx compl */
1711 if (compl->dw
[offsetof(struct amap_eth_rx_compl_v1
, valid
) / 32] == 0)
1715 be_dws_le_to_cpu(compl, sizeof(*compl));
1717 if (adapter
->be3_native
)
1718 be_parse_rx_compl_v1(compl, rxcp
);
1720 be_parse_rx_compl_v0(compl, rxcp
);
1726 /* vlanf could be wrongly set in some cards.
1727 * ignore if vtm is not set */
1728 if ((adapter
->function_mode
& FLEX10_MODE
) && !rxcp
->vtm
)
1731 if (!lancer_chip(adapter
))
1732 rxcp
->vlan_tag
= swab16(rxcp
->vlan_tag
);
1734 if (adapter
->pvid
== (rxcp
->vlan_tag
& VLAN_VID_MASK
) &&
1735 !adapter
->vlan_tag
[rxcp
->vlan_tag
])
1739 /* As the compl has been parsed, reset it; we wont touch it again */
1740 compl->dw
[offsetof(struct amap_eth_rx_compl_v1
, valid
) / 32] = 0;
1742 queue_tail_inc(&rxo
->cq
);
1746 static inline struct page
*be_alloc_pages(u32 size
, gfp_t gfp
)
1748 u32 order
= get_order(size
);
1752 return alloc_pages(gfp
, order
);
1756 * Allocate a page, split it to fragments of size rx_frag_size and post as
1757 * receive buffers to BE
1759 static void be_post_rx_frags(struct be_rx_obj
*rxo
, gfp_t gfp
)
1761 struct be_adapter
*adapter
= rxo
->adapter
;
1762 struct be_rx_page_info
*page_info
= NULL
, *prev_page_info
= NULL
;
1763 struct be_queue_info
*rxq
= &rxo
->q
;
1764 struct page
*pagep
= NULL
;
1765 struct device
*dev
= &adapter
->pdev
->dev
;
1766 struct be_eth_rx_d
*rxd
;
1767 u64 page_dmaaddr
= 0, frag_dmaaddr
;
1768 u32 posted
, page_offset
= 0;
1770 page_info
= &rxo
->page_info_tbl
[rxq
->head
];
1771 for (posted
= 0; posted
< MAX_RX_POST
&& !page_info
->page
; posted
++) {
1773 pagep
= be_alloc_pages(adapter
->big_page_size
, gfp
);
1774 if (unlikely(!pagep
)) {
1775 rx_stats(rxo
)->rx_post_fail
++;
1778 page_dmaaddr
= dma_map_page(dev
, pagep
, 0,
1779 adapter
->big_page_size
,
1781 if (dma_mapping_error(dev
, page_dmaaddr
)) {
1784 rx_stats(rxo
)->rx_post_fail
++;
1787 page_info
->page_offset
= 0;
1790 page_info
->page_offset
= page_offset
+ rx_frag_size
;
1792 page_offset
= page_info
->page_offset
;
1793 page_info
->page
= pagep
;
1794 dma_unmap_addr_set(page_info
, bus
, page_dmaaddr
);
1795 frag_dmaaddr
= page_dmaaddr
+ page_info
->page_offset
;
1797 rxd
= queue_head_node(rxq
);
1798 rxd
->fragpa_lo
= cpu_to_le32(frag_dmaaddr
& 0xFFFFFFFF);
1799 rxd
->fragpa_hi
= cpu_to_le32(upper_32_bits(frag_dmaaddr
));
1801 /* Any space left in the current big page for another frag? */
1802 if ((page_offset
+ rx_frag_size
+ rx_frag_size
) >
1803 adapter
->big_page_size
) {
1805 page_info
->last_page_user
= true;
1808 prev_page_info
= page_info
;
1809 queue_head_inc(rxq
);
1810 page_info
= &rxo
->page_info_tbl
[rxq
->head
];
1813 prev_page_info
->last_page_user
= true;
1816 atomic_add(posted
, &rxq
->used
);
1817 if (rxo
->rx_post_starved
)
1818 rxo
->rx_post_starved
= false;
1819 be_rxq_notify(adapter
, rxq
->id
, posted
);
1820 } else if (atomic_read(&rxq
->used
) == 0) {
1821 /* Let be_worker replenish when memory is available */
1822 rxo
->rx_post_starved
= true;
1826 static struct be_eth_tx_compl
*be_tx_compl_get(struct be_queue_info
*tx_cq
)
1828 struct be_eth_tx_compl
*txcp
= queue_tail_node(tx_cq
);
1830 if (txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] == 0)
1834 be_dws_le_to_cpu(txcp
, sizeof(*txcp
));
1836 txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] = 0;
1838 queue_tail_inc(tx_cq
);
1842 static u16
be_tx_compl_process(struct be_adapter
*adapter
,
1843 struct be_tx_obj
*txo
, u16 last_index
)
1845 struct be_queue_info
*txq
= &txo
->q
;
1846 struct be_eth_wrb
*wrb
;
1847 struct sk_buff
**sent_skbs
= txo
->sent_skb_list
;
1848 struct sk_buff
*sent_skb
;
1849 u16 cur_index
, num_wrbs
= 1; /* account for hdr wrb */
1850 bool unmap_skb_hdr
= true;
1852 sent_skb
= sent_skbs
[txq
->tail
];
1854 sent_skbs
[txq
->tail
] = NULL
;
1856 /* skip header wrb */
1857 queue_tail_inc(txq
);
1860 cur_index
= txq
->tail
;
1861 wrb
= queue_tail_node(txq
);
1862 unmap_tx_frag(&adapter
->pdev
->dev
, wrb
,
1863 (unmap_skb_hdr
&& skb_headlen(sent_skb
)));
1864 unmap_skb_hdr
= false;
1867 queue_tail_inc(txq
);
1868 } while (cur_index
!= last_index
);
1870 kfree_skb(sent_skb
);
1874 /* Return the number of events in the event queue */
1875 static inline int events_get(struct be_eq_obj
*eqo
)
1877 struct be_eq_entry
*eqe
;
1881 eqe
= queue_tail_node(&eqo
->q
);
1888 queue_tail_inc(&eqo
->q
);
1894 /* Leaves the EQ is disarmed state */
1895 static void be_eq_clean(struct be_eq_obj
*eqo
)
1897 int num
= events_get(eqo
);
1899 be_eq_notify(eqo
->adapter
, eqo
->q
.id
, false, true, num
);
1902 static void be_rx_cq_clean(struct be_rx_obj
*rxo
)
1904 struct be_rx_page_info
*page_info
;
1905 struct be_queue_info
*rxq
= &rxo
->q
;
1906 struct be_queue_info
*rx_cq
= &rxo
->cq
;
1907 struct be_rx_compl_info
*rxcp
;
1908 struct be_adapter
*adapter
= rxo
->adapter
;
1911 /* Consume pending rx completions.
1912 * Wait for the flush completion (identified by zero num_rcvd)
1913 * to arrive. Notify CQ even when there are no more CQ entries
1914 * for HW to flush partially coalesced CQ entries.
1915 * In Lancer, there is no need to wait for flush compl.
1918 rxcp
= be_rx_compl_get(rxo
);
1920 if (lancer_chip(adapter
))
1923 if (flush_wait
++ > 10 || be_hw_error(adapter
)) {
1924 dev_warn(&adapter
->pdev
->dev
,
1925 "did not receive flush compl\n");
1928 be_cq_notify(adapter
, rx_cq
->id
, true, 0);
1931 be_rx_compl_discard(rxo
, rxcp
);
1932 be_cq_notify(adapter
, rx_cq
->id
, false, 1);
1933 if (rxcp
->num_rcvd
== 0)
1938 /* After cleanup, leave the CQ in unarmed state */
1939 be_cq_notify(adapter
, rx_cq
->id
, false, 0);
1941 /* Then free posted rx buffers that were not used */
1942 while (atomic_read(&rxq
->used
) > 0) {
1943 page_info
= get_rx_page_info(rxo
);
1944 put_page(page_info
->page
);
1945 memset(page_info
, 0, sizeof(*page_info
));
1947 BUG_ON(atomic_read(&rxq
->used
));
1948 rxq
->tail
= rxq
->head
= 0;
1951 static void be_tx_compl_clean(struct be_adapter
*adapter
)
1953 struct be_tx_obj
*txo
;
1954 struct be_queue_info
*txq
;
1955 struct be_eth_tx_compl
*txcp
;
1956 u16 end_idx
, cmpl
= 0, timeo
= 0, num_wrbs
= 0;
1957 struct sk_buff
*sent_skb
;
1959 int i
, pending_txqs
;
1961 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1963 pending_txqs
= adapter
->num_tx_qs
;
1965 for_all_tx_queues(adapter
, txo
, i
) {
1967 while ((txcp
= be_tx_compl_get(&txo
->cq
))) {
1969 AMAP_GET_BITS(struct amap_eth_tx_compl
,
1971 num_wrbs
+= be_tx_compl_process(adapter
, txo
,
1976 be_cq_notify(adapter
, txo
->cq
.id
, false, cmpl
);
1977 atomic_sub(num_wrbs
, &txq
->used
);
1981 if (atomic_read(&txq
->used
) == 0)
1985 if (pending_txqs
== 0 || ++timeo
> 200)
1991 for_all_tx_queues(adapter
, txo
, i
) {
1993 if (atomic_read(&txq
->used
))
1994 dev_err(&adapter
->pdev
->dev
, "%d pending tx-compls\n",
1995 atomic_read(&txq
->used
));
1997 /* free posted tx for which compls will never arrive */
1998 while (atomic_read(&txq
->used
)) {
1999 sent_skb
= txo
->sent_skb_list
[txq
->tail
];
2000 end_idx
= txq
->tail
;
2001 num_wrbs
= wrb_cnt_for_skb(adapter
, sent_skb
,
2003 index_adv(&end_idx
, num_wrbs
- 1, txq
->len
);
2004 num_wrbs
= be_tx_compl_process(adapter
, txo
, end_idx
);
2005 atomic_sub(num_wrbs
, &txq
->used
);
2010 static void be_evt_queues_destroy(struct be_adapter
*adapter
)
2012 struct be_eq_obj
*eqo
;
2015 for_all_evt_queues(adapter
, eqo
, i
) {
2016 if (eqo
->q
.created
) {
2018 be_cmd_q_destroy(adapter
, &eqo
->q
, QTYPE_EQ
);
2019 napi_hash_del(&eqo
->napi
);
2020 netif_napi_del(&eqo
->napi
);
2022 be_queue_free(adapter
, &eqo
->q
);
2026 static int be_evt_queues_create(struct be_adapter
*adapter
)
2028 struct be_queue_info
*eq
;
2029 struct be_eq_obj
*eqo
;
2030 struct be_aic_obj
*aic
;
2033 adapter
->num_evt_qs
= min_t(u16
, num_irqs(adapter
),
2034 adapter
->cfg_num_qs
);
2036 for_all_evt_queues(adapter
, eqo
, i
) {
2037 netif_napi_add(adapter
->netdev
, &eqo
->napi
, be_poll
,
2039 napi_hash_add(&eqo
->napi
);
2040 aic
= &adapter
->aic_obj
[i
];
2041 eqo
->adapter
= adapter
;
2042 eqo
->tx_budget
= BE_TX_BUDGET
;
2044 aic
->max_eqd
= BE_MAX_EQD
;
2048 rc
= be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
,
2049 sizeof(struct be_eq_entry
));
2053 rc
= be_cmd_eq_create(adapter
, eqo
);
2060 static void be_mcc_queues_destroy(struct be_adapter
*adapter
)
2062 struct be_queue_info
*q
;
2064 q
= &adapter
->mcc_obj
.q
;
2066 be_cmd_q_destroy(adapter
, q
, QTYPE_MCCQ
);
2067 be_queue_free(adapter
, q
);
2069 q
= &adapter
->mcc_obj
.cq
;
2071 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
2072 be_queue_free(adapter
, q
);
2075 /* Must be called only after TX qs are created as MCC shares TX EQ */
2076 static int be_mcc_queues_create(struct be_adapter
*adapter
)
2078 struct be_queue_info
*q
, *cq
;
2080 cq
= &adapter
->mcc_obj
.cq
;
2081 if (be_queue_alloc(adapter
, cq
, MCC_CQ_LEN
,
2082 sizeof(struct be_mcc_compl
)))
2085 /* Use the default EQ for MCC completions */
2086 if (be_cmd_cq_create(adapter
, cq
, &mcc_eqo(adapter
)->q
, true, 0))
2089 q
= &adapter
->mcc_obj
.q
;
2090 if (be_queue_alloc(adapter
, q
, MCC_Q_LEN
, sizeof(struct be_mcc_wrb
)))
2091 goto mcc_cq_destroy
;
2093 if (be_cmd_mccq_create(adapter
, q
, cq
))
2099 be_queue_free(adapter
, q
);
2101 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
2103 be_queue_free(adapter
, cq
);
2108 static void be_tx_queues_destroy(struct be_adapter
*adapter
)
2110 struct be_queue_info
*q
;
2111 struct be_tx_obj
*txo
;
2114 for_all_tx_queues(adapter
, txo
, i
) {
2117 be_cmd_q_destroy(adapter
, q
, QTYPE_TXQ
);
2118 be_queue_free(adapter
, q
);
2122 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
2123 be_queue_free(adapter
, q
);
2127 static int be_tx_qs_create(struct be_adapter
*adapter
)
2129 struct be_queue_info
*cq
, *eq
;
2130 struct be_tx_obj
*txo
;
2133 adapter
->num_tx_qs
= min(adapter
->num_evt_qs
, be_max_txqs(adapter
));
2135 for_all_tx_queues(adapter
, txo
, i
) {
2137 status
= be_queue_alloc(adapter
, cq
, TX_CQ_LEN
,
2138 sizeof(struct be_eth_tx_compl
));
2142 u64_stats_init(&txo
->stats
.sync
);
2143 u64_stats_init(&txo
->stats
.sync_compl
);
2145 /* If num_evt_qs is less than num_tx_qs, then more than
2146 * one txq share an eq
2148 eq
= &adapter
->eq_obj
[i
% adapter
->num_evt_qs
].q
;
2149 status
= be_cmd_cq_create(adapter
, cq
, eq
, false, 3);
2153 status
= be_queue_alloc(adapter
, &txo
->q
, TX_Q_LEN
,
2154 sizeof(struct be_eth_wrb
));
2158 status
= be_cmd_txq_create(adapter
, txo
);
2163 dev_info(&adapter
->pdev
->dev
, "created %d TX queue(s)\n",
2164 adapter
->num_tx_qs
);
2168 static void be_rx_cqs_destroy(struct be_adapter
*adapter
)
2170 struct be_queue_info
*q
;
2171 struct be_rx_obj
*rxo
;
2174 for_all_rx_queues(adapter
, rxo
, i
) {
2177 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
2178 be_queue_free(adapter
, q
);
2182 static int be_rx_cqs_create(struct be_adapter
*adapter
)
2184 struct be_queue_info
*eq
, *cq
;
2185 struct be_rx_obj
*rxo
;
2188 /* We can create as many RSS rings as there are EQs. */
2189 adapter
->num_rx_qs
= adapter
->num_evt_qs
;
2191 /* We'll use RSS only if atleast 2 RSS rings are supported.
2192 * When RSS is used, we'll need a default RXQ for non-IP traffic.
2194 if (adapter
->num_rx_qs
> 1)
2195 adapter
->num_rx_qs
++;
2197 adapter
->big_page_size
= (1 << get_order(rx_frag_size
)) * PAGE_SIZE
;
2198 for_all_rx_queues(adapter
, rxo
, i
) {
2199 rxo
->adapter
= adapter
;
2201 rc
= be_queue_alloc(adapter
, cq
, RX_CQ_LEN
,
2202 sizeof(struct be_eth_rx_compl
));
2206 u64_stats_init(&rxo
->stats
.sync
);
2207 eq
= &adapter
->eq_obj
[i
% adapter
->num_evt_qs
].q
;
2208 rc
= be_cmd_cq_create(adapter
, cq
, eq
, false, 3);
2213 dev_info(&adapter
->pdev
->dev
,
2214 "created %d RSS queue(s) and 1 default RX queue\n",
2215 adapter
->num_rx_qs
- 1);
2219 static irqreturn_t
be_intx(int irq
, void *dev
)
2221 struct be_eq_obj
*eqo
= dev
;
2222 struct be_adapter
*adapter
= eqo
->adapter
;
2225 /* IRQ is not expected when NAPI is scheduled as the EQ
2226 * will not be armed.
2227 * But, this can happen on Lancer INTx where it takes
2228 * a while to de-assert INTx or in BE2 where occasionaly
2229 * an interrupt may be raised even when EQ is unarmed.
2230 * If NAPI is already scheduled, then counting & notifying
2231 * events will orphan them.
2233 if (napi_schedule_prep(&eqo
->napi
)) {
2234 num_evts
= events_get(eqo
);
2235 __napi_schedule(&eqo
->napi
);
2237 eqo
->spurious_intr
= 0;
2239 be_eq_notify(adapter
, eqo
->q
.id
, false, true, num_evts
);
2241 /* Return IRQ_HANDLED only for the the first spurious intr
2242 * after a valid intr to stop the kernel from branding
2243 * this irq as a bad one!
2245 if (num_evts
|| eqo
->spurious_intr
++ == 0)
2251 static irqreturn_t
be_msix(int irq
, void *dev
)
2253 struct be_eq_obj
*eqo
= dev
;
2255 be_eq_notify(eqo
->adapter
, eqo
->q
.id
, false, true, 0);
2256 napi_schedule(&eqo
->napi
);
2260 static inline bool do_gro(struct be_rx_compl_info
*rxcp
)
2262 return (rxcp
->tcpf
&& !rxcp
->err
&& rxcp
->l4_csum
) ? true : false;
2265 static int be_process_rx(struct be_rx_obj
*rxo
, struct napi_struct
*napi
,
2266 int budget
, int polling
)
2268 struct be_adapter
*adapter
= rxo
->adapter
;
2269 struct be_queue_info
*rx_cq
= &rxo
->cq
;
2270 struct be_rx_compl_info
*rxcp
;
2273 for (work_done
= 0; work_done
< budget
; work_done
++) {
2274 rxcp
= be_rx_compl_get(rxo
);
2278 /* Is it a flush compl that has no data */
2279 if (unlikely(rxcp
->num_rcvd
== 0))
2282 /* Discard compl with partial DMA Lancer B0 */
2283 if (unlikely(!rxcp
->pkt_size
)) {
2284 be_rx_compl_discard(rxo
, rxcp
);
2288 /* On BE drop pkts that arrive due to imperfect filtering in
2289 * promiscuous mode on some skews
2291 if (unlikely(rxcp
->port
!= adapter
->port_num
&&
2292 !lancer_chip(adapter
))) {
2293 be_rx_compl_discard(rxo
, rxcp
);
2297 /* Don't do gro when we're busy_polling */
2298 if (do_gro(rxcp
) && polling
!= BUSY_POLLING
)
2299 be_rx_compl_process_gro(rxo
, napi
, rxcp
);
2301 be_rx_compl_process(rxo
, napi
, rxcp
);
2304 be_rx_stats_update(rxo
, rxcp
);
2308 be_cq_notify(adapter
, rx_cq
->id
, true, work_done
);
2310 /* When an rx-obj gets into post_starved state, just
2311 * let be_worker do the posting.
2313 if (atomic_read(&rxo
->q
.used
) < RX_FRAGS_REFILL_WM
&&
2314 !rxo
->rx_post_starved
)
2315 be_post_rx_frags(rxo
, GFP_ATOMIC
);
2321 static bool be_process_tx(struct be_adapter
*adapter
, struct be_tx_obj
*txo
,
2322 int budget
, int idx
)
2324 struct be_eth_tx_compl
*txcp
;
2325 int num_wrbs
= 0, work_done
;
2327 for (work_done
= 0; work_done
< budget
; work_done
++) {
2328 txcp
= be_tx_compl_get(&txo
->cq
);
2331 num_wrbs
+= be_tx_compl_process(adapter
, txo
,
2332 AMAP_GET_BITS(struct amap_eth_tx_compl
,
2337 be_cq_notify(adapter
, txo
->cq
.id
, true, work_done
);
2338 atomic_sub(num_wrbs
, &txo
->q
.used
);
2340 /* As Tx wrbs have been freed up, wake up netdev queue
2341 * if it was stopped due to lack of tx wrbs. */
2342 if (__netif_subqueue_stopped(adapter
->netdev
, idx
) &&
2343 atomic_read(&txo
->q
.used
) < txo
->q
.len
/ 2) {
2344 netif_wake_subqueue(adapter
->netdev
, idx
);
2347 u64_stats_update_begin(&tx_stats(txo
)->sync_compl
);
2348 tx_stats(txo
)->tx_compl
+= work_done
;
2349 u64_stats_update_end(&tx_stats(txo
)->sync_compl
);
2351 return (work_done
< budget
); /* Done */
2354 int be_poll(struct napi_struct
*napi
, int budget
)
2356 struct be_eq_obj
*eqo
= container_of(napi
, struct be_eq_obj
, napi
);
2357 struct be_adapter
*adapter
= eqo
->adapter
;
2358 int max_work
= 0, work
, i
, num_evts
;
2359 struct be_rx_obj
*rxo
;
2362 num_evts
= events_get(eqo
);
2364 /* Process all TXQs serviced by this EQ */
2365 for (i
= eqo
->idx
; i
< adapter
->num_tx_qs
; i
+= adapter
->num_evt_qs
) {
2366 tx_done
= be_process_tx(adapter
, &adapter
->tx_obj
[i
],
2372 if (be_lock_napi(eqo
)) {
2373 /* This loop will iterate twice for EQ0 in which
2374 * completions of the last RXQ (default one) are also processed
2375 * For other EQs the loop iterates only once
2377 for_all_rx_queues_on_eq(adapter
, eqo
, rxo
, i
) {
2378 work
= be_process_rx(rxo
, napi
, budget
, NAPI_POLLING
);
2379 max_work
= max(work
, max_work
);
2381 be_unlock_napi(eqo
);
2386 if (is_mcc_eqo(eqo
))
2387 be_process_mcc(adapter
);
2389 if (max_work
< budget
) {
2390 napi_complete(napi
);
2391 be_eq_notify(adapter
, eqo
->q
.id
, true, false, num_evts
);
2393 /* As we'll continue in polling mode, count and clear events */
2394 be_eq_notify(adapter
, eqo
->q
.id
, false, false, num_evts
);
2399 #ifdef CONFIG_NET_RX_BUSY_POLL
2400 static int be_busy_poll(struct napi_struct
*napi
)
2402 struct be_eq_obj
*eqo
= container_of(napi
, struct be_eq_obj
, napi
);
2403 struct be_adapter
*adapter
= eqo
->adapter
;
2404 struct be_rx_obj
*rxo
;
2407 if (!be_lock_busy_poll(eqo
))
2408 return LL_FLUSH_BUSY
;
2410 for_all_rx_queues_on_eq(adapter
, eqo
, rxo
, i
) {
2411 work
= be_process_rx(rxo
, napi
, 4, BUSY_POLLING
);
2416 be_unlock_busy_poll(eqo
);
2421 void be_detect_error(struct be_adapter
*adapter
)
2423 u32 ue_lo
= 0, ue_hi
= 0, ue_lo_mask
= 0, ue_hi_mask
= 0;
2424 u32 sliport_status
= 0, sliport_err1
= 0, sliport_err2
= 0;
2427 if (be_hw_error(adapter
))
2430 if (lancer_chip(adapter
)) {
2431 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
2432 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
2433 sliport_err1
= ioread32(adapter
->db
+
2434 SLIPORT_ERROR1_OFFSET
);
2435 sliport_err2
= ioread32(adapter
->db
+
2436 SLIPORT_ERROR2_OFFSET
);
2439 pci_read_config_dword(adapter
->pdev
,
2440 PCICFG_UE_STATUS_LOW
, &ue_lo
);
2441 pci_read_config_dword(adapter
->pdev
,
2442 PCICFG_UE_STATUS_HIGH
, &ue_hi
);
2443 pci_read_config_dword(adapter
->pdev
,
2444 PCICFG_UE_STATUS_LOW_MASK
, &ue_lo_mask
);
2445 pci_read_config_dword(adapter
->pdev
,
2446 PCICFG_UE_STATUS_HI_MASK
, &ue_hi_mask
);
2448 ue_lo
= (ue_lo
& ~ue_lo_mask
);
2449 ue_hi
= (ue_hi
& ~ue_hi_mask
);
2452 /* On certain platforms BE hardware can indicate spurious UEs.
2453 * Allow the h/w to stop working completely in case of a real UE.
2454 * Hence not setting the hw_error for UE detection.
2456 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
2457 adapter
->hw_error
= true;
2458 /* Do not log error messages if its a FW reset */
2459 if (sliport_err1
== SLIPORT_ERROR_FW_RESET1
&&
2460 sliport_err2
== SLIPORT_ERROR_FW_RESET2
) {
2461 dev_info(&adapter
->pdev
->dev
,
2462 "Firmware update in progress\n");
2465 dev_err(&adapter
->pdev
->dev
,
2466 "Error detected in the card\n");
2470 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
2471 dev_err(&adapter
->pdev
->dev
,
2472 "ERR: sliport status 0x%x\n", sliport_status
);
2473 dev_err(&adapter
->pdev
->dev
,
2474 "ERR: sliport error1 0x%x\n", sliport_err1
);
2475 dev_err(&adapter
->pdev
->dev
,
2476 "ERR: sliport error2 0x%x\n", sliport_err2
);
2480 for (i
= 0; ue_lo
; ue_lo
>>= 1, i
++) {
2482 dev_err(&adapter
->pdev
->dev
,
2483 "UE: %s bit set\n", ue_status_low_desc
[i
]);
2488 for (i
= 0; ue_hi
; ue_hi
>>= 1, i
++) {
2490 dev_err(&adapter
->pdev
->dev
,
2491 "UE: %s bit set\n", ue_status_hi_desc
[i
]);
2497 static void be_msix_disable(struct be_adapter
*adapter
)
2499 if (msix_enabled(adapter
)) {
2500 pci_disable_msix(adapter
->pdev
);
2501 adapter
->num_msix_vec
= 0;
2502 adapter
->num_msix_roce_vec
= 0;
2506 static int be_msix_enable(struct be_adapter
*adapter
)
2508 int i
, status
, num_vec
;
2509 struct device
*dev
= &adapter
->pdev
->dev
;
2511 /* If RoCE is supported, program the max number of NIC vectors that
2512 * may be configured via set-channels, along with vectors needed for
2513 * RoCe. Else, just program the number we'll use initially.
2515 if (be_roce_supported(adapter
))
2516 num_vec
= min_t(int, 2 * be_max_eqs(adapter
),
2517 2 * num_online_cpus());
2519 num_vec
= adapter
->cfg_num_qs
;
2521 for (i
= 0; i
< num_vec
; i
++)
2522 adapter
->msix_entries
[i
].entry
= i
;
2524 status
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
, num_vec
);
2527 } else if (status
>= MIN_MSIX_VECTORS
) {
2529 status
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
2535 dev_warn(dev
, "MSIx enable failed\n");
2537 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2538 if (!be_physfn(adapter
))
2542 if (be_roce_supported(adapter
) && num_vec
> MIN_MSIX_VECTORS
) {
2543 adapter
->num_msix_roce_vec
= num_vec
/ 2;
2544 dev_info(dev
, "enabled %d MSI-x vector(s) for RoCE\n",
2545 adapter
->num_msix_roce_vec
);
2548 adapter
->num_msix_vec
= num_vec
- adapter
->num_msix_roce_vec
;
2550 dev_info(dev
, "enabled %d MSI-x vector(s) for NIC\n",
2551 adapter
->num_msix_vec
);
2555 static inline int be_msix_vec_get(struct be_adapter
*adapter
,
2556 struct be_eq_obj
*eqo
)
2558 return adapter
->msix_entries
[eqo
->msix_idx
].vector
;
2561 static int be_msix_register(struct be_adapter
*adapter
)
2563 struct net_device
*netdev
= adapter
->netdev
;
2564 struct be_eq_obj
*eqo
;
2567 for_all_evt_queues(adapter
, eqo
, i
) {
2568 sprintf(eqo
->desc
, "%s-q%d", netdev
->name
, i
);
2569 vec
= be_msix_vec_get(adapter
, eqo
);
2570 status
= request_irq(vec
, be_msix
, 0, eqo
->desc
, eqo
);
2577 for (i
--, eqo
= &adapter
->eq_obj
[i
]; i
>= 0; i
--, eqo
--)
2578 free_irq(be_msix_vec_get(adapter
, eqo
), eqo
);
2579 dev_warn(&adapter
->pdev
->dev
, "MSIX Request IRQ failed - err %d\n",
2581 be_msix_disable(adapter
);
2585 static int be_irq_register(struct be_adapter
*adapter
)
2587 struct net_device
*netdev
= adapter
->netdev
;
2590 if (msix_enabled(adapter
)) {
2591 status
= be_msix_register(adapter
);
2594 /* INTx is not supported for VF */
2595 if (!be_physfn(adapter
))
2599 /* INTx: only the first EQ is used */
2600 netdev
->irq
= adapter
->pdev
->irq
;
2601 status
= request_irq(netdev
->irq
, be_intx
, IRQF_SHARED
, netdev
->name
,
2602 &adapter
->eq_obj
[0]);
2604 dev_err(&adapter
->pdev
->dev
,
2605 "INTx request IRQ failed - err %d\n", status
);
2609 adapter
->isr_registered
= true;
2613 static void be_irq_unregister(struct be_adapter
*adapter
)
2615 struct net_device
*netdev
= adapter
->netdev
;
2616 struct be_eq_obj
*eqo
;
2619 if (!adapter
->isr_registered
)
2623 if (!msix_enabled(adapter
)) {
2624 free_irq(netdev
->irq
, &adapter
->eq_obj
[0]);
2629 for_all_evt_queues(adapter
, eqo
, i
)
2630 free_irq(be_msix_vec_get(adapter
, eqo
), eqo
);
2633 adapter
->isr_registered
= false;
2636 static void be_rx_qs_destroy(struct be_adapter
*adapter
)
2638 struct be_queue_info
*q
;
2639 struct be_rx_obj
*rxo
;
2642 for_all_rx_queues(adapter
, rxo
, i
) {
2645 be_cmd_rxq_destroy(adapter
, q
);
2646 be_rx_cq_clean(rxo
);
2648 be_queue_free(adapter
, q
);
2652 static int be_close(struct net_device
*netdev
)
2654 struct be_adapter
*adapter
= netdev_priv(netdev
);
2655 struct be_eq_obj
*eqo
;
2658 be_roce_dev_close(adapter
);
2660 if (adapter
->flags
& BE_FLAGS_NAPI_ENABLED
) {
2661 for_all_evt_queues(adapter
, eqo
, i
) {
2662 napi_disable(&eqo
->napi
);
2663 be_disable_busy_poll(eqo
);
2665 adapter
->flags
&= ~BE_FLAGS_NAPI_ENABLED
;
2668 be_async_mcc_disable(adapter
);
2670 /* Wait for all pending tx completions to arrive so that
2671 * all tx skbs are freed.
2673 netif_tx_disable(netdev
);
2674 be_tx_compl_clean(adapter
);
2676 be_rx_qs_destroy(adapter
);
2678 for (i
= 1; i
< (adapter
->uc_macs
+ 1); i
++)
2679 be_cmd_pmac_del(adapter
, adapter
->if_handle
,
2680 adapter
->pmac_id
[i
], 0);
2681 adapter
->uc_macs
= 0;
2683 for_all_evt_queues(adapter
, eqo
, i
) {
2684 if (msix_enabled(adapter
))
2685 synchronize_irq(be_msix_vec_get(adapter
, eqo
));
2687 synchronize_irq(netdev
->irq
);
2691 be_irq_unregister(adapter
);
2696 static int be_rx_qs_create(struct be_adapter
*adapter
)
2698 struct be_rx_obj
*rxo
;
2702 for_all_rx_queues(adapter
, rxo
, i
) {
2703 rc
= be_queue_alloc(adapter
, &rxo
->q
, RX_Q_LEN
,
2704 sizeof(struct be_eth_rx_d
));
2709 /* The FW would like the default RXQ to be created first */
2710 rxo
= default_rxo(adapter
);
2711 rc
= be_cmd_rxq_create(adapter
, &rxo
->q
, rxo
->cq
.id
, rx_frag_size
,
2712 adapter
->if_handle
, false, &rxo
->rss_id
);
2716 for_all_rss_queues(adapter
, rxo
, i
) {
2717 rc
= be_cmd_rxq_create(adapter
, &rxo
->q
, rxo
->cq
.id
,
2718 rx_frag_size
, adapter
->if_handle
,
2719 true, &rxo
->rss_id
);
2724 if (be_multi_rxq(adapter
)) {
2725 for (j
= 0; j
< 128; j
+= adapter
->num_rx_qs
- 1) {
2726 for_all_rss_queues(adapter
, rxo
, i
) {
2729 rsstable
[j
+ i
] = rxo
->rss_id
;
2732 adapter
->rss_flags
= RSS_ENABLE_TCP_IPV4
| RSS_ENABLE_IPV4
|
2733 RSS_ENABLE_TCP_IPV6
| RSS_ENABLE_IPV6
;
2735 if (!BEx_chip(adapter
))
2736 adapter
->rss_flags
|= RSS_ENABLE_UDP_IPV4
|
2737 RSS_ENABLE_UDP_IPV6
;
2739 /* Disable RSS, if only default RX Q is created */
2740 adapter
->rss_flags
= RSS_ENABLE_NONE
;
2743 rc
= be_cmd_rss_config(adapter
, rsstable
, adapter
->rss_flags
,
2746 adapter
->rss_flags
= RSS_ENABLE_NONE
;
2750 /* First time posting */
2751 for_all_rx_queues(adapter
, rxo
, i
)
2752 be_post_rx_frags(rxo
, GFP_KERNEL
);
2756 static int be_open(struct net_device
*netdev
)
2758 struct be_adapter
*adapter
= netdev_priv(netdev
);
2759 struct be_eq_obj
*eqo
;
2760 struct be_rx_obj
*rxo
;
2761 struct be_tx_obj
*txo
;
2765 status
= be_rx_qs_create(adapter
);
2769 status
= be_irq_register(adapter
);
2773 for_all_rx_queues(adapter
, rxo
, i
)
2774 be_cq_notify(adapter
, rxo
->cq
.id
, true, 0);
2776 for_all_tx_queues(adapter
, txo
, i
)
2777 be_cq_notify(adapter
, txo
->cq
.id
, true, 0);
2779 be_async_mcc_enable(adapter
);
2781 for_all_evt_queues(adapter
, eqo
, i
) {
2782 napi_enable(&eqo
->napi
);
2783 be_enable_busy_poll(eqo
);
2784 be_eq_notify(adapter
, eqo
->q
.id
, true, false, 0);
2786 adapter
->flags
|= BE_FLAGS_NAPI_ENABLED
;
2788 status
= be_cmd_link_status_query(adapter
, NULL
, &link_status
, 0);
2790 be_link_status_update(adapter
, link_status
);
2792 netif_tx_start_all_queues(netdev
);
2793 be_roce_dev_open(adapter
);
2796 be_close(adapter
->netdev
);
2800 static int be_setup_wol(struct be_adapter
*adapter
, bool enable
)
2802 struct be_dma_mem cmd
;
2806 memset(mac
, 0, ETH_ALEN
);
2808 cmd
.size
= sizeof(struct be_cmd_req_acpi_wol_magic_config
);
2809 cmd
.va
= dma_zalloc_coherent(&adapter
->pdev
->dev
, cmd
.size
, &cmd
.dma
,
2815 status
= pci_write_config_dword(adapter
->pdev
,
2816 PCICFG_PM_CONTROL_OFFSET
, PCICFG_PM_CONTROL_MASK
);
2818 dev_err(&adapter
->pdev
->dev
,
2819 "Could not enable Wake-on-lan\n");
2820 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
,
2824 status
= be_cmd_enable_magic_wol(adapter
,
2825 adapter
->netdev
->dev_addr
, &cmd
);
2826 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 1);
2827 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 1);
2829 status
= be_cmd_enable_magic_wol(adapter
, mac
, &cmd
);
2830 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 0);
2831 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 0);
2834 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
, cmd
.dma
);
2839 * Generate a seed MAC address from the PF MAC Address using jhash.
2840 * MAC Address for VFs are assigned incrementally starting from the seed.
2841 * These addresses are programmed in the ASIC by the PF and the VF driver
2842 * queries for the MAC address during its probe.
2844 static int be_vf_eth_addr_config(struct be_adapter
*adapter
)
2849 struct be_vf_cfg
*vf_cfg
;
2851 be_vf_eth_addr_generate(adapter
, mac
);
2853 for_all_vfs(adapter
, vf_cfg
, vf
) {
2854 if (BEx_chip(adapter
))
2855 status
= be_cmd_pmac_add(adapter
, mac
,
2857 &vf_cfg
->pmac_id
, vf
+ 1);
2859 status
= be_cmd_set_mac(adapter
, mac
, vf_cfg
->if_handle
,
2863 dev_err(&adapter
->pdev
->dev
,
2864 "Mac address assignment failed for VF %d\n", vf
);
2866 memcpy(vf_cfg
->mac_addr
, mac
, ETH_ALEN
);
2873 static int be_vfs_mac_query(struct be_adapter
*adapter
)
2877 struct be_vf_cfg
*vf_cfg
;
2879 for_all_vfs(adapter
, vf_cfg
, vf
) {
2880 status
= be_cmd_get_active_mac(adapter
, vf_cfg
->pmac_id
,
2881 mac
, vf_cfg
->if_handle
,
2885 memcpy(vf_cfg
->mac_addr
, mac
, ETH_ALEN
);
2890 static void be_vf_clear(struct be_adapter
*adapter
)
2892 struct be_vf_cfg
*vf_cfg
;
2895 if (pci_vfs_assigned(adapter
->pdev
)) {
2896 dev_warn(&adapter
->pdev
->dev
,
2897 "VFs are assigned to VMs: not disabling VFs\n");
2901 pci_disable_sriov(adapter
->pdev
);
2903 for_all_vfs(adapter
, vf_cfg
, vf
) {
2904 if (BEx_chip(adapter
))
2905 be_cmd_pmac_del(adapter
, vf_cfg
->if_handle
,
2906 vf_cfg
->pmac_id
, vf
+ 1);
2908 be_cmd_set_mac(adapter
, NULL
, vf_cfg
->if_handle
,
2911 be_cmd_if_destroy(adapter
, vf_cfg
->if_handle
, vf
+ 1);
2914 kfree(adapter
->vf_cfg
);
2915 adapter
->num_vfs
= 0;
2918 static void be_clear_queues(struct be_adapter
*adapter
)
2920 be_mcc_queues_destroy(adapter
);
2921 be_rx_cqs_destroy(adapter
);
2922 be_tx_queues_destroy(adapter
);
2923 be_evt_queues_destroy(adapter
);
2926 static void be_cancel_worker(struct be_adapter
*adapter
)
2928 if (adapter
->flags
& BE_FLAGS_WORKER_SCHEDULED
) {
2929 cancel_delayed_work_sync(&adapter
->work
);
2930 adapter
->flags
&= ~BE_FLAGS_WORKER_SCHEDULED
;
2934 static void be_mac_clear(struct be_adapter
*adapter
)
2938 if (adapter
->pmac_id
) {
2939 for (i
= 0; i
< (adapter
->uc_macs
+ 1); i
++)
2940 be_cmd_pmac_del(adapter
, adapter
->if_handle
,
2941 adapter
->pmac_id
[i
], 0);
2942 adapter
->uc_macs
= 0;
2944 kfree(adapter
->pmac_id
);
2945 adapter
->pmac_id
= NULL
;
2949 static int be_clear(struct be_adapter
*adapter
)
2951 be_cancel_worker(adapter
);
2953 if (sriov_enabled(adapter
))
2954 be_vf_clear(adapter
);
2956 /* delete the primary mac along with the uc-mac list */
2957 be_mac_clear(adapter
);
2959 be_cmd_if_destroy(adapter
, adapter
->if_handle
, 0);
2961 be_clear_queues(adapter
);
2963 be_msix_disable(adapter
);
2967 static int be_vfs_if_create(struct be_adapter
*adapter
)
2969 struct be_resources res
= {0};
2970 struct be_vf_cfg
*vf_cfg
;
2971 u32 cap_flags
, en_flags
, vf
;
2974 cap_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
|
2975 BE_IF_FLAGS_MULTICAST
;
2977 for_all_vfs(adapter
, vf_cfg
, vf
) {
2978 if (!BE3_chip(adapter
)) {
2979 status
= be_cmd_get_profile_config(adapter
, &res
,
2982 cap_flags
= res
.if_cap_flags
;
2985 /* If a FW profile exists, then cap_flags are updated */
2986 en_flags
= cap_flags
& (BE_IF_FLAGS_UNTAGGED
|
2987 BE_IF_FLAGS_BROADCAST
| BE_IF_FLAGS_MULTICAST
);
2988 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
,
2989 &vf_cfg
->if_handle
, vf
+ 1);
2997 static int be_vf_setup_init(struct be_adapter
*adapter
)
2999 struct be_vf_cfg
*vf_cfg
;
3002 adapter
->vf_cfg
= kcalloc(adapter
->num_vfs
, sizeof(*vf_cfg
),
3004 if (!adapter
->vf_cfg
)
3007 for_all_vfs(adapter
, vf_cfg
, vf
) {
3008 vf_cfg
->if_handle
= -1;
3009 vf_cfg
->pmac_id
= -1;
3014 static int be_vf_setup(struct be_adapter
*adapter
)
3016 struct be_vf_cfg
*vf_cfg
;
3017 u16 def_vlan
, lnk_speed
;
3018 int status
, old_vfs
, vf
;
3019 struct device
*dev
= &adapter
->pdev
->dev
;
3022 old_vfs
= pci_num_vf(adapter
->pdev
);
3024 dev_info(dev
, "%d VFs are already enabled\n", old_vfs
);
3025 if (old_vfs
!= num_vfs
)
3026 dev_warn(dev
, "Ignoring num_vfs=%d setting\n", num_vfs
);
3027 adapter
->num_vfs
= old_vfs
;
3029 if (num_vfs
> be_max_vfs(adapter
))
3030 dev_info(dev
, "Device supports %d VFs and not %d\n",
3031 be_max_vfs(adapter
), num_vfs
);
3032 adapter
->num_vfs
= min_t(u16
, num_vfs
, be_max_vfs(adapter
));
3033 if (!adapter
->num_vfs
)
3037 status
= be_vf_setup_init(adapter
);
3042 for_all_vfs(adapter
, vf_cfg
, vf
) {
3043 status
= be_cmd_get_if_id(adapter
, vf_cfg
, vf
);
3048 status
= be_vfs_if_create(adapter
);
3054 status
= be_vfs_mac_query(adapter
);
3058 status
= be_vf_eth_addr_config(adapter
);
3063 for_all_vfs(adapter
, vf_cfg
, vf
) {
3064 /* Allow VFs to programs MAC/VLAN filters */
3065 status
= be_cmd_get_fn_privileges(adapter
, &privileges
, vf
+ 1);
3066 if (!status
&& !(privileges
& BE_PRIV_FILTMGMT
)) {
3067 status
= be_cmd_set_fn_privileges(adapter
,
3072 dev_info(dev
, "VF%d has FILTMGMT privilege\n",
3076 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3077 * Allow full available bandwidth
3079 if (BE3_chip(adapter
) && !old_vfs
)
3080 be_cmd_set_qos(adapter
, 1000, vf
+1);
3082 status
= be_cmd_link_status_query(adapter
, &lnk_speed
,
3085 vf_cfg
->tx_rate
= lnk_speed
;
3087 status
= be_cmd_get_hsw_config(adapter
, &def_vlan
,
3088 vf
+ 1, vf_cfg
->if_handle
, NULL
);
3091 vf_cfg
->def_vid
= def_vlan
;
3094 be_cmd_enable_vf(adapter
, vf
+ 1);
3098 status
= pci_enable_sriov(adapter
->pdev
, adapter
->num_vfs
);
3100 dev_err(dev
, "SRIOV enable failed\n");
3101 adapter
->num_vfs
= 0;
3107 dev_err(dev
, "VF setup failed\n");
3108 be_vf_clear(adapter
);
3112 /* On BE2/BE3 FW does not suggest the supported limits */
3113 static void BEx_get_resources(struct be_adapter
*adapter
,
3114 struct be_resources
*res
)
3116 struct pci_dev
*pdev
= adapter
->pdev
;
3117 bool use_sriov
= false;
3120 max_vfs
= pci_sriov_get_totalvfs(pdev
);
3122 if (BE3_chip(adapter
) && sriov_want(adapter
)) {
3123 res
->max_vfs
= max_vfs
> 0 ? min(MAX_VFS
, max_vfs
) : 0;
3124 use_sriov
= res
->max_vfs
;
3127 if (be_physfn(adapter
))
3128 res
->max_uc_mac
= BE_UC_PMAC_COUNT
;
3130 res
->max_uc_mac
= BE_VF_UC_PMAC_COUNT
;
3132 if (adapter
->function_mode
& FLEX10_MODE
)
3133 res
->max_vlans
= BE_NUM_VLANS_SUPPORTED
/8;
3134 else if (adapter
->function_mode
& UMC_ENABLED
)
3135 res
->max_vlans
= BE_UMC_NUM_VLANS_SUPPORTED
;
3137 res
->max_vlans
= BE_NUM_VLANS_SUPPORTED
;
3138 res
->max_mcast_mac
= BE_MAX_MC
;
3140 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
3141 if (BE2_chip(adapter
) || use_sriov
|| be_is_mc(adapter
) ||
3142 !be_physfn(adapter
) || (adapter
->port_num
> 1))
3145 res
->max_tx_qs
= BE3_MAX_TX_QS
;
3147 if ((adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
) &&
3148 !use_sriov
&& be_physfn(adapter
))
3149 res
->max_rss_qs
= (adapter
->be3_native
) ?
3150 BE3_MAX_RSS_QS
: BE2_MAX_RSS_QS
;
3151 res
->max_rx_qs
= res
->max_rss_qs
+ 1;
3153 if (be_physfn(adapter
))
3154 res
->max_evt_qs
= (max_vfs
> 0) ?
3155 BE3_SRIOV_MAX_EVT_QS
: BE3_MAX_EVT_QS
;
3157 res
->max_evt_qs
= 1;
3159 res
->if_cap_flags
= BE_IF_CAP_FLAGS_WANT
;
3160 if (!(adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
))
3161 res
->if_cap_flags
&= ~BE_IF_FLAGS_RSS
;
3164 static void be_setup_init(struct be_adapter
*adapter
)
3166 adapter
->vlan_prio_bmap
= 0xff;
3167 adapter
->phy
.link_speed
= -1;
3168 adapter
->if_handle
= -1;
3169 adapter
->be3_native
= false;
3170 adapter
->promiscuous
= false;
3171 if (be_physfn(adapter
))
3172 adapter
->cmd_privileges
= MAX_PRIVILEGES
;
3174 adapter
->cmd_privileges
= MIN_PRIVILEGES
;
3177 static int be_get_resources(struct be_adapter
*adapter
)
3179 struct device
*dev
= &adapter
->pdev
->dev
;
3180 struct be_resources res
= {0};
3183 if (BEx_chip(adapter
)) {
3184 BEx_get_resources(adapter
, &res
);
3188 /* For Lancer, SH etc read per-function resource limits from FW.
3189 * GET_FUNC_CONFIG returns per function guaranteed limits.
3190 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3192 if (!BEx_chip(adapter
)) {
3193 status
= be_cmd_get_func_config(adapter
, &res
);
3197 /* If RoCE may be enabled stash away half the EQs for RoCE */
3198 if (be_roce_supported(adapter
))
3199 res
.max_evt_qs
/= 2;
3202 if (be_physfn(adapter
)) {
3203 status
= be_cmd_get_profile_config(adapter
, &res
, 0);
3206 adapter
->res
.max_vfs
= res
.max_vfs
;
3209 dev_info(dev
, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3210 be_max_txqs(adapter
), be_max_rxqs(adapter
),
3211 be_max_rss(adapter
), be_max_eqs(adapter
),
3212 be_max_vfs(adapter
));
3213 dev_info(dev
, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3214 be_max_uc(adapter
), be_max_mc(adapter
),
3215 be_max_vlans(adapter
));
3221 /* Routine to query per function resource limits */
3222 static int be_get_config(struct be_adapter
*adapter
)
3227 status
= be_cmd_query_fw_cfg(adapter
, &adapter
->port_num
,
3228 &adapter
->function_mode
,
3229 &adapter
->function_caps
,
3230 &adapter
->asic_rev
);
3234 if (be_physfn(adapter
)) {
3235 status
= be_cmd_get_active_profile(adapter
, &profile_id
);
3237 dev_info(&adapter
->pdev
->dev
,
3238 "Using profile 0x%x\n", profile_id
);
3241 status
= be_get_resources(adapter
);
3245 /* primary mac needs 1 pmac entry */
3246 adapter
->pmac_id
= kcalloc(be_max_uc(adapter
) + 1, sizeof(u32
),
3248 if (!adapter
->pmac_id
)
3251 /* Sanitize cfg_num_qs based on HW and platform limits */
3252 adapter
->cfg_num_qs
= min(adapter
->cfg_num_qs
, be_max_qs(adapter
));
3257 static int be_mac_setup(struct be_adapter
*adapter
)
3262 if (is_zero_ether_addr(adapter
->netdev
->dev_addr
)) {
3263 status
= be_cmd_get_perm_mac(adapter
, mac
);
3267 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
3268 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
3270 /* Maybe the HW was reset; dev_addr must be re-programmed */
3271 memcpy(mac
, adapter
->netdev
->dev_addr
, ETH_ALEN
);
3274 /* For BE3-R VFs, the PF programs the initial MAC address */
3275 if (!(BEx_chip(adapter
) && be_virtfn(adapter
)))
3276 be_cmd_pmac_add(adapter
, mac
, adapter
->if_handle
,
3277 &adapter
->pmac_id
[0], 0);
3281 static void be_schedule_worker(struct be_adapter
*adapter
)
3283 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(1000));
3284 adapter
->flags
|= BE_FLAGS_WORKER_SCHEDULED
;
3287 static int be_setup_queues(struct be_adapter
*adapter
)
3289 struct net_device
*netdev
= adapter
->netdev
;
3292 status
= be_evt_queues_create(adapter
);
3296 status
= be_tx_qs_create(adapter
);
3300 status
= be_rx_cqs_create(adapter
);
3304 status
= be_mcc_queues_create(adapter
);
3308 status
= netif_set_real_num_rx_queues(netdev
, adapter
->num_rx_qs
);
3312 status
= netif_set_real_num_tx_queues(netdev
, adapter
->num_tx_qs
);
3318 dev_err(&adapter
->pdev
->dev
, "queue_setup failed\n");
3322 int be_update_queues(struct be_adapter
*adapter
)
3324 struct net_device
*netdev
= adapter
->netdev
;
3327 if (netif_running(netdev
))
3330 be_cancel_worker(adapter
);
3332 /* If any vectors have been shared with RoCE we cannot re-program
3335 if (!adapter
->num_msix_roce_vec
)
3336 be_msix_disable(adapter
);
3338 be_clear_queues(adapter
);
3340 if (!msix_enabled(adapter
)) {
3341 status
= be_msix_enable(adapter
);
3346 status
= be_setup_queues(adapter
);
3350 be_schedule_worker(adapter
);
3352 if (netif_running(netdev
))
3353 status
= be_open(netdev
);
3358 static int be_setup(struct be_adapter
*adapter
)
3360 struct device
*dev
= &adapter
->pdev
->dev
;
3361 u32 tx_fc
, rx_fc
, en_flags
;
3364 be_setup_init(adapter
);
3366 if (!lancer_chip(adapter
))
3367 be_cmd_req_native_mode(adapter
);
3369 status
= be_get_config(adapter
);
3373 status
= be_msix_enable(adapter
);
3377 en_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
|
3378 BE_IF_FLAGS_MULTICAST
| BE_IF_FLAGS_PASS_L3L4_ERRORS
;
3379 if (adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
)
3380 en_flags
|= BE_IF_FLAGS_RSS
;
3381 en_flags
= en_flags
& be_if_cap_flags(adapter
);
3382 status
= be_cmd_if_create(adapter
, be_if_cap_flags(adapter
), en_flags
,
3383 &adapter
->if_handle
, 0);
3387 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3389 status
= be_setup_queues(adapter
);
3394 be_cmd_get_fn_privileges(adapter
, &adapter
->cmd_privileges
, 0);
3396 status
= be_mac_setup(adapter
);
3400 be_cmd_get_fw_ver(adapter
, adapter
->fw_ver
, adapter
->fw_on_flash
);
3402 if (BE2_chip(adapter
) && fw_major_num(adapter
->fw_ver
) < 4) {
3403 dev_err(dev
, "Firmware on card is old(%s), IRQs may not work.",
3405 dev_err(dev
, "Please upgrade firmware to version >= 4.0\n");
3408 if (adapter
->vlans_added
)
3409 be_vid_config(adapter
);
3411 be_set_rx_mode(adapter
->netdev
);
3413 be_cmd_get_acpi_wol_cap(adapter
);
3415 be_cmd_get_flow_control(adapter
, &tx_fc
, &rx_fc
);
3417 if (rx_fc
!= adapter
->rx_fc
|| tx_fc
!= adapter
->tx_fc
)
3418 be_cmd_set_flow_control(adapter
, adapter
->tx_fc
,
3421 if (sriov_want(adapter
)) {
3422 if (be_max_vfs(adapter
))
3423 be_vf_setup(adapter
);
3425 dev_warn(dev
, "device doesn't support SRIOV\n");
3428 status
= be_cmd_get_phy_info(adapter
);
3429 if (!status
&& be_pause_supported(adapter
))
3430 adapter
->phy
.fc_autoneg
= 1;
3432 be_schedule_worker(adapter
);
3439 #ifdef CONFIG_NET_POLL_CONTROLLER
3440 static void be_netpoll(struct net_device
*netdev
)
3442 struct be_adapter
*adapter
= netdev_priv(netdev
);
3443 struct be_eq_obj
*eqo
;
3446 for_all_evt_queues(adapter
, eqo
, i
) {
3447 be_eq_notify(eqo
->adapter
, eqo
->q
.id
, false, true, 0);
3448 napi_schedule(&eqo
->napi
);
3455 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
3456 static char flash_cookie
[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3458 static bool be_flash_redboot(struct be_adapter
*adapter
,
3459 const u8
*p
, u32 img_start
, int image_size
,
3466 crc_offset
= hdr_size
+ img_start
+ image_size
- 4;
3470 status
= be_cmd_get_flash_crc(adapter
, flashed_crc
,
3473 dev_err(&adapter
->pdev
->dev
,
3474 "could not get crc from flash, not flashing redboot\n");
3478 /*update redboot only if crc does not match*/
3479 if (!memcmp(flashed_crc
, p
, 4))
3485 static bool phy_flashing_required(struct be_adapter
*adapter
)
3487 return (adapter
->phy
.phy_type
== TN_8022
&&
3488 adapter
->phy
.interface_type
== PHY_TYPE_BASET_10GB
);
3491 static bool is_comp_in_ufi(struct be_adapter
*adapter
,
3492 struct flash_section_info
*fsec
, int type
)
3494 int i
= 0, img_type
= 0;
3495 struct flash_section_info_g2
*fsec_g2
= NULL
;
3497 if (BE2_chip(adapter
))
3498 fsec_g2
= (struct flash_section_info_g2
*)fsec
;
3500 for (i
= 0; i
< MAX_FLASH_COMP
; i
++) {
3502 img_type
= le32_to_cpu(fsec_g2
->fsec_entry
[i
].type
);
3504 img_type
= le32_to_cpu(fsec
->fsec_entry
[i
].type
);
3506 if (img_type
== type
)
3513 static struct flash_section_info
*get_fsec_info(struct be_adapter
*adapter
,
3515 const struct firmware
*fw
)
3517 struct flash_section_info
*fsec
= NULL
;
3518 const u8
*p
= fw
->data
;
3521 while (p
< (fw
->data
+ fw
->size
)) {
3522 fsec
= (struct flash_section_info
*)p
;
3523 if (!memcmp(flash_cookie
, fsec
->cookie
, sizeof(flash_cookie
)))
3530 static int be_flash(struct be_adapter
*adapter
, const u8
*img
,
3531 struct be_dma_mem
*flash_cmd
, int optype
, int img_size
)
3533 u32 total_bytes
= 0, flash_op
, num_bytes
= 0;
3535 struct be_cmd_write_flashrom
*req
= flash_cmd
->va
;
3537 total_bytes
= img_size
;
3538 while (total_bytes
) {
3539 num_bytes
= min_t(u32
, 32*1024, total_bytes
);
3541 total_bytes
-= num_bytes
;
3544 if (optype
== OPTYPE_PHY_FW
)
3545 flash_op
= FLASHROM_OPER_PHY_FLASH
;
3547 flash_op
= FLASHROM_OPER_FLASH
;
3549 if (optype
== OPTYPE_PHY_FW
)
3550 flash_op
= FLASHROM_OPER_PHY_SAVE
;
3552 flash_op
= FLASHROM_OPER_SAVE
;
3555 memcpy(req
->data_buf
, img
, num_bytes
);
3557 status
= be_cmd_write_flashrom(adapter
, flash_cmd
, optype
,
3558 flash_op
, num_bytes
);
3560 if (status
== ILLEGAL_IOCTL_REQ
&&
3561 optype
== OPTYPE_PHY_FW
)
3563 dev_err(&adapter
->pdev
->dev
,
3564 "cmd to write to flash rom failed.\n");
3571 /* For BE2, BE3 and BE3-R */
3572 static int be_flash_BEx(struct be_adapter
*adapter
,
3573 const struct firmware
*fw
,
3574 struct be_dma_mem
*flash_cmd
,
3578 int status
= 0, i
, filehdr_size
= 0;
3579 int img_hdrs_size
= (num_of_images
* sizeof(struct image_hdr
));
3580 const u8
*p
= fw
->data
;
3581 const struct flash_comp
*pflashcomp
;
3582 int num_comp
, redboot
;
3583 struct flash_section_info
*fsec
= NULL
;
3585 struct flash_comp gen3_flash_types
[] = {
3586 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3
, OPTYPE_ISCSI_ACTIVE
,
3587 FLASH_IMAGE_MAX_SIZE_g3
, IMAGE_FIRMWARE_iSCSI
},
3588 { FLASH_REDBOOT_START_g3
, OPTYPE_REDBOOT
,
3589 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3
, IMAGE_BOOT_CODE
},
3590 { FLASH_iSCSI_BIOS_START_g3
, OPTYPE_BIOS
,
3591 FLASH_BIOS_IMAGE_MAX_SIZE_g3
, IMAGE_OPTION_ROM_ISCSI
},
3592 { FLASH_PXE_BIOS_START_g3
, OPTYPE_PXE_BIOS
,
3593 FLASH_BIOS_IMAGE_MAX_SIZE_g3
, IMAGE_OPTION_ROM_PXE
},
3594 { FLASH_FCoE_BIOS_START_g3
, OPTYPE_FCOE_BIOS
,
3595 FLASH_BIOS_IMAGE_MAX_SIZE_g3
, IMAGE_OPTION_ROM_FCoE
},
3596 { FLASH_iSCSI_BACKUP_IMAGE_START_g3
, OPTYPE_ISCSI_BACKUP
,
3597 FLASH_IMAGE_MAX_SIZE_g3
, IMAGE_FIRMWARE_BACKUP_iSCSI
},
3598 { FLASH_FCoE_PRIMARY_IMAGE_START_g3
, OPTYPE_FCOE_FW_ACTIVE
,
3599 FLASH_IMAGE_MAX_SIZE_g3
, IMAGE_FIRMWARE_FCoE
},
3600 { FLASH_FCoE_BACKUP_IMAGE_START_g3
, OPTYPE_FCOE_FW_BACKUP
,
3601 FLASH_IMAGE_MAX_SIZE_g3
, IMAGE_FIRMWARE_BACKUP_FCoE
},
3602 { FLASH_NCSI_START_g3
, OPTYPE_NCSI_FW
,
3603 FLASH_NCSI_IMAGE_MAX_SIZE_g3
, IMAGE_NCSI
},
3604 { FLASH_PHY_FW_START_g3
, OPTYPE_PHY_FW
,
3605 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3
, IMAGE_FIRMWARE_PHY
}
3608 struct flash_comp gen2_flash_types
[] = {
3609 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2
, OPTYPE_ISCSI_ACTIVE
,
3610 FLASH_IMAGE_MAX_SIZE_g2
, IMAGE_FIRMWARE_iSCSI
},
3611 { FLASH_REDBOOT_START_g2
, OPTYPE_REDBOOT
,
3612 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2
, IMAGE_BOOT_CODE
},
3613 { FLASH_iSCSI_BIOS_START_g2
, OPTYPE_BIOS
,
3614 FLASH_BIOS_IMAGE_MAX_SIZE_g2
, IMAGE_OPTION_ROM_ISCSI
},
3615 { FLASH_PXE_BIOS_START_g2
, OPTYPE_PXE_BIOS
,
3616 FLASH_BIOS_IMAGE_MAX_SIZE_g2
, IMAGE_OPTION_ROM_PXE
},
3617 { FLASH_FCoE_BIOS_START_g2
, OPTYPE_FCOE_BIOS
,
3618 FLASH_BIOS_IMAGE_MAX_SIZE_g2
, IMAGE_OPTION_ROM_FCoE
},
3619 { FLASH_iSCSI_BACKUP_IMAGE_START_g2
, OPTYPE_ISCSI_BACKUP
,
3620 FLASH_IMAGE_MAX_SIZE_g2
, IMAGE_FIRMWARE_BACKUP_iSCSI
},
3621 { FLASH_FCoE_PRIMARY_IMAGE_START_g2
, OPTYPE_FCOE_FW_ACTIVE
,
3622 FLASH_IMAGE_MAX_SIZE_g2
, IMAGE_FIRMWARE_FCoE
},
3623 { FLASH_FCoE_BACKUP_IMAGE_START_g2
, OPTYPE_FCOE_FW_BACKUP
,
3624 FLASH_IMAGE_MAX_SIZE_g2
, IMAGE_FIRMWARE_BACKUP_FCoE
}
3627 if (BE3_chip(adapter
)) {
3628 pflashcomp
= gen3_flash_types
;
3629 filehdr_size
= sizeof(struct flash_file_hdr_g3
);
3630 num_comp
= ARRAY_SIZE(gen3_flash_types
);
3632 pflashcomp
= gen2_flash_types
;
3633 filehdr_size
= sizeof(struct flash_file_hdr_g2
);
3634 num_comp
= ARRAY_SIZE(gen2_flash_types
);
3637 /* Get flash section info*/
3638 fsec
= get_fsec_info(adapter
, filehdr_size
+ img_hdrs_size
, fw
);
3640 dev_err(&adapter
->pdev
->dev
,
3641 "Invalid Cookie. UFI corrupted ?\n");
3644 for (i
= 0; i
< num_comp
; i
++) {
3645 if (!is_comp_in_ufi(adapter
, fsec
, pflashcomp
[i
].img_type
))
3648 if ((pflashcomp
[i
].optype
== OPTYPE_NCSI_FW
) &&
3649 memcmp(adapter
->fw_ver
, "3.102.148.0", 11) < 0)
3652 if (pflashcomp
[i
].optype
== OPTYPE_PHY_FW
&&
3653 !phy_flashing_required(adapter
))
3656 if (pflashcomp
[i
].optype
== OPTYPE_REDBOOT
) {
3657 redboot
= be_flash_redboot(adapter
, fw
->data
,
3658 pflashcomp
[i
].offset
, pflashcomp
[i
].size
,
3659 filehdr_size
+ img_hdrs_size
);
3665 p
+= filehdr_size
+ pflashcomp
[i
].offset
+ img_hdrs_size
;
3666 if (p
+ pflashcomp
[i
].size
> fw
->data
+ fw
->size
)
3669 status
= be_flash(adapter
, p
, flash_cmd
, pflashcomp
[i
].optype
,
3670 pflashcomp
[i
].size
);
3672 dev_err(&adapter
->pdev
->dev
,
3673 "Flashing section type %d failed.\n",
3674 pflashcomp
[i
].img_type
);
3681 static int be_flash_skyhawk(struct be_adapter
*adapter
,
3682 const struct firmware
*fw
,
3683 struct be_dma_mem
*flash_cmd
, int num_of_images
)
3685 int status
= 0, i
, filehdr_size
= 0;
3686 int img_offset
, img_size
, img_optype
, redboot
;
3687 int img_hdrs_size
= num_of_images
* sizeof(struct image_hdr
);
3688 const u8
*p
= fw
->data
;
3689 struct flash_section_info
*fsec
= NULL
;
3691 filehdr_size
= sizeof(struct flash_file_hdr_g3
);
3692 fsec
= get_fsec_info(adapter
, filehdr_size
+ img_hdrs_size
, fw
);
3694 dev_err(&adapter
->pdev
->dev
,
3695 "Invalid Cookie. UFI corrupted ?\n");
3699 for (i
= 0; i
< le32_to_cpu(fsec
->fsec_hdr
.num_images
); i
++) {
3700 img_offset
= le32_to_cpu(fsec
->fsec_entry
[i
].offset
);
3701 img_size
= le32_to_cpu(fsec
->fsec_entry
[i
].pad_size
);
3703 switch (le32_to_cpu(fsec
->fsec_entry
[i
].type
)) {
3704 case IMAGE_FIRMWARE_iSCSI
:
3705 img_optype
= OPTYPE_ISCSI_ACTIVE
;
3707 case IMAGE_BOOT_CODE
:
3708 img_optype
= OPTYPE_REDBOOT
;
3710 case IMAGE_OPTION_ROM_ISCSI
:
3711 img_optype
= OPTYPE_BIOS
;
3713 case IMAGE_OPTION_ROM_PXE
:
3714 img_optype
= OPTYPE_PXE_BIOS
;
3716 case IMAGE_OPTION_ROM_FCoE
:
3717 img_optype
= OPTYPE_FCOE_BIOS
;
3719 case IMAGE_FIRMWARE_BACKUP_iSCSI
:
3720 img_optype
= OPTYPE_ISCSI_BACKUP
;
3723 img_optype
= OPTYPE_NCSI_FW
;
3729 if (img_optype
== OPTYPE_REDBOOT
) {
3730 redboot
= be_flash_redboot(adapter
, fw
->data
,
3731 img_offset
, img_size
,
3732 filehdr_size
+ img_hdrs_size
);
3738 p
+= filehdr_size
+ img_offset
+ img_hdrs_size
;
3739 if (p
+ img_size
> fw
->data
+ fw
->size
)
3742 status
= be_flash(adapter
, p
, flash_cmd
, img_optype
, img_size
);
3744 dev_err(&adapter
->pdev
->dev
,
3745 "Flashing section type %d failed.\n",
3746 fsec
->fsec_entry
[i
].type
);
3753 static int lancer_fw_download(struct be_adapter
*adapter
,
3754 const struct firmware
*fw
)
3756 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3757 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3758 struct be_dma_mem flash_cmd
;
3759 const u8
*data_ptr
= NULL
;
3760 u8
*dest_image_ptr
= NULL
;
3761 size_t image_size
= 0;
3763 u32 data_written
= 0;
3769 if (!IS_ALIGNED(fw
->size
, sizeof(u32
))) {
3770 dev_err(&adapter
->pdev
->dev
,
3771 "FW Image not properly aligned. "
3772 "Length must be 4 byte aligned.\n");
3774 goto lancer_fw_exit
;
3777 flash_cmd
.size
= sizeof(struct lancer_cmd_req_write_object
)
3778 + LANCER_FW_DOWNLOAD_CHUNK
;
3779 flash_cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
,
3780 &flash_cmd
.dma
, GFP_KERNEL
);
3781 if (!flash_cmd
.va
) {
3783 goto lancer_fw_exit
;
3786 dest_image_ptr
= flash_cmd
.va
+
3787 sizeof(struct lancer_cmd_req_write_object
);
3788 image_size
= fw
->size
;
3789 data_ptr
= fw
->data
;
3791 while (image_size
) {
3792 chunk_size
= min_t(u32
, image_size
, LANCER_FW_DOWNLOAD_CHUNK
);
3794 /* Copy the image chunk content. */
3795 memcpy(dest_image_ptr
, data_ptr
, chunk_size
);
3797 status
= lancer_cmd_write_object(adapter
, &flash_cmd
,
3799 LANCER_FW_DOWNLOAD_LOCATION
,
3800 &data_written
, &change_status
,
3805 offset
+= data_written
;
3806 data_ptr
+= data_written
;
3807 image_size
-= data_written
;
3811 /* Commit the FW written */
3812 status
= lancer_cmd_write_object(adapter
, &flash_cmd
,
3814 LANCER_FW_DOWNLOAD_LOCATION
,
3815 &data_written
, &change_status
,
3819 dma_free_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
, flash_cmd
.va
,
3822 dev_err(&adapter
->pdev
->dev
,
3823 "Firmware load error. "
3824 "Status code: 0x%x Additional Status: 0x%x\n",
3825 status
, add_status
);
3826 goto lancer_fw_exit
;
3829 if (change_status
== LANCER_FW_RESET_NEEDED
) {
3830 dev_info(&adapter
->pdev
->dev
,
3831 "Resetting adapter to activate new FW\n");
3832 status
= lancer_physdev_ctrl(adapter
,
3833 PHYSDEV_CONTROL_FW_RESET_MASK
);
3835 dev_err(&adapter
->pdev
->dev
,
3836 "Adapter busy for FW reset.\n"
3837 "New FW will not be active.\n");
3838 goto lancer_fw_exit
;
3840 } else if (change_status
!= LANCER_NO_RESET_NEEDED
) {
3841 dev_err(&adapter
->pdev
->dev
,
3842 "System reboot required for new FW"
3846 dev_info(&adapter
->pdev
->dev
, "Firmware flashed successfully\n");
3853 #define UFI_TYPE3R 10
3855 static int be_get_ufi_type(struct be_adapter
*adapter
,
3856 struct flash_file_hdr_g3
*fhdr
)
3859 goto be_get_ufi_exit
;
3861 if (skyhawk_chip(adapter
) && fhdr
->build
[0] == '4')
3863 else if (BE3_chip(adapter
) && fhdr
->build
[0] == '3') {
3864 if (fhdr
->asic_type_rev
== 0x10)
3868 } else if (BE2_chip(adapter
) && fhdr
->build
[0] == '2')
3872 dev_err(&adapter
->pdev
->dev
,
3873 "UFI and Interface are not compatible for flashing\n");
3877 static int be_fw_download(struct be_adapter
*adapter
, const struct firmware
* fw
)
3879 struct flash_file_hdr_g3
*fhdr3
;
3880 struct image_hdr
*img_hdr_ptr
= NULL
;
3881 struct be_dma_mem flash_cmd
;
3883 int status
= 0, i
= 0, num_imgs
= 0, ufi_type
= 0;
3885 flash_cmd
.size
= sizeof(struct be_cmd_write_flashrom
);
3886 flash_cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
,
3887 &flash_cmd
.dma
, GFP_KERNEL
);
3888 if (!flash_cmd
.va
) {
3894 fhdr3
= (struct flash_file_hdr_g3
*)p
;
3896 ufi_type
= be_get_ufi_type(adapter
, fhdr3
);
3898 num_imgs
= le32_to_cpu(fhdr3
->num_imgs
);
3899 for (i
= 0; i
< num_imgs
; i
++) {
3900 img_hdr_ptr
= (struct image_hdr
*)(fw
->data
+
3901 (sizeof(struct flash_file_hdr_g3
) +
3902 i
* sizeof(struct image_hdr
)));
3903 if (le32_to_cpu(img_hdr_ptr
->imageid
) == 1) {
3906 status
= be_flash_skyhawk(adapter
, fw
,
3907 &flash_cmd
, num_imgs
);
3910 status
= be_flash_BEx(adapter
, fw
, &flash_cmd
,
3914 /* Do not flash this ufi on BE3-R cards */
3915 if (adapter
->asic_rev
< 0x10)
3916 status
= be_flash_BEx(adapter
, fw
,
3921 dev_err(&adapter
->pdev
->dev
,
3922 "Can't load BE3 UFI on BE3R\n");
3928 if (ufi_type
== UFI_TYPE2
)
3929 status
= be_flash_BEx(adapter
, fw
, &flash_cmd
, 0);
3930 else if (ufi_type
== -1)
3933 dma_free_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
, flash_cmd
.va
,
3936 dev_err(&adapter
->pdev
->dev
, "Firmware load error\n");
3940 dev_info(&adapter
->pdev
->dev
, "Firmware flashed successfully\n");
3946 int be_load_fw(struct be_adapter
*adapter
, u8
*fw_file
)
3948 const struct firmware
*fw
;
3951 if (!netif_running(adapter
->netdev
)) {
3952 dev_err(&adapter
->pdev
->dev
,
3953 "Firmware load not allowed (interface is down)\n");
3957 status
= request_firmware(&fw
, fw_file
, &adapter
->pdev
->dev
);
3961 dev_info(&adapter
->pdev
->dev
, "Flashing firmware file %s\n", fw_file
);
3963 if (lancer_chip(adapter
))
3964 status
= lancer_fw_download(adapter
, fw
);
3966 status
= be_fw_download(adapter
, fw
);
3969 be_cmd_get_fw_ver(adapter
, adapter
->fw_ver
,
3970 adapter
->fw_on_flash
);
3973 release_firmware(fw
);
3977 static int be_ndo_bridge_setlink(struct net_device
*dev
,
3978 struct nlmsghdr
*nlh
)
3980 struct be_adapter
*adapter
= netdev_priv(dev
);
3981 struct nlattr
*attr
, *br_spec
;
3986 if (!sriov_enabled(adapter
))
3989 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
3991 nla_for_each_nested(attr
, br_spec
, rem
) {
3992 if (nla_type(attr
) != IFLA_BRIDGE_MODE
)
3995 mode
= nla_get_u16(attr
);
3996 if (mode
!= BRIDGE_MODE_VEPA
&& mode
!= BRIDGE_MODE_VEB
)
3999 status
= be_cmd_set_hsw_config(adapter
, 0, 0,
4001 mode
== BRIDGE_MODE_VEPA
?
4002 PORT_FWD_TYPE_VEPA
:
4007 dev_info(&adapter
->pdev
->dev
, "enabled switch mode: %s\n",
4008 mode
== BRIDGE_MODE_VEPA
? "VEPA" : "VEB");
4013 dev_err(&adapter
->pdev
->dev
, "Failed to set switch mode %s\n",
4014 mode
== BRIDGE_MODE_VEPA
? "VEPA" : "VEB");
4019 static int be_ndo_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
4020 struct net_device
*dev
,
4023 struct be_adapter
*adapter
= netdev_priv(dev
);
4027 if (!sriov_enabled(adapter
))
4030 /* BE and Lancer chips support VEB mode only */
4031 if (BEx_chip(adapter
) || lancer_chip(adapter
)) {
4032 hsw_mode
= PORT_FWD_TYPE_VEB
;
4034 status
= be_cmd_get_hsw_config(adapter
, NULL
, 0,
4035 adapter
->if_handle
, &hsw_mode
);
4040 return ndo_dflt_bridge_getlink(skb
, pid
, seq
, dev
,
4041 hsw_mode
== PORT_FWD_TYPE_VEPA
?
4042 BRIDGE_MODE_VEPA
: BRIDGE_MODE_VEB
);
4045 static const struct net_device_ops be_netdev_ops
= {
4046 .ndo_open
= be_open
,
4047 .ndo_stop
= be_close
,
4048 .ndo_start_xmit
= be_xmit
,
4049 .ndo_set_rx_mode
= be_set_rx_mode
,
4050 .ndo_set_mac_address
= be_mac_addr_set
,
4051 .ndo_change_mtu
= be_change_mtu
,
4052 .ndo_get_stats64
= be_get_stats64
,
4053 .ndo_validate_addr
= eth_validate_addr
,
4054 .ndo_vlan_rx_add_vid
= be_vlan_add_vid
,
4055 .ndo_vlan_rx_kill_vid
= be_vlan_rem_vid
,
4056 .ndo_set_vf_mac
= be_set_vf_mac
,
4057 .ndo_set_vf_vlan
= be_set_vf_vlan
,
4058 .ndo_set_vf_tx_rate
= be_set_vf_tx_rate
,
4059 .ndo_get_vf_config
= be_get_vf_config
,
4060 #ifdef CONFIG_NET_POLL_CONTROLLER
4061 .ndo_poll_controller
= be_netpoll
,
4063 .ndo_bridge_setlink
= be_ndo_bridge_setlink
,
4064 .ndo_bridge_getlink
= be_ndo_bridge_getlink
,
4065 #ifdef CONFIG_NET_RX_BUSY_POLL
4066 .ndo_busy_poll
= be_busy_poll
4070 static void be_netdev_init(struct net_device
*netdev
)
4072 struct be_adapter
*adapter
= netdev_priv(netdev
);
4074 netdev
->hw_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
|
4075 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
4076 NETIF_F_HW_VLAN_CTAG_TX
;
4077 if (be_multi_rxq(adapter
))
4078 netdev
->hw_features
|= NETIF_F_RXHASH
;
4080 netdev
->features
|= netdev
->hw_features
|
4081 NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_CTAG_FILTER
;
4083 netdev
->vlan_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
|
4084 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
4086 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
4088 netdev
->flags
|= IFF_MULTICAST
;
4090 netif_set_gso_max_size(netdev
, 65535 - ETH_HLEN
);
4092 netdev
->netdev_ops
= &be_netdev_ops
;
4094 SET_ETHTOOL_OPS(netdev
, &be_ethtool_ops
);
4097 static void be_unmap_pci_bars(struct be_adapter
*adapter
)
4100 pci_iounmap(adapter
->pdev
, adapter
->csr
);
4102 pci_iounmap(adapter
->pdev
, adapter
->db
);
4105 static int db_bar(struct be_adapter
*adapter
)
4107 if (lancer_chip(adapter
) || !be_physfn(adapter
))
4113 static int be_roce_map_pci_bars(struct be_adapter
*adapter
)
4115 if (skyhawk_chip(adapter
)) {
4116 adapter
->roce_db
.size
= 4096;
4117 adapter
->roce_db
.io_addr
= pci_resource_start(adapter
->pdev
,
4119 adapter
->roce_db
.total_size
= pci_resource_len(adapter
->pdev
,
4125 static int be_map_pci_bars(struct be_adapter
*adapter
)
4129 if (BEx_chip(adapter
) && be_physfn(adapter
)) {
4130 adapter
->csr
= pci_iomap(adapter
->pdev
, 2, 0);
4131 if (adapter
->csr
== NULL
)
4135 addr
= pci_iomap(adapter
->pdev
, db_bar(adapter
), 0);
4140 be_roce_map_pci_bars(adapter
);
4144 be_unmap_pci_bars(adapter
);
4148 static void be_ctrl_cleanup(struct be_adapter
*adapter
)
4150 struct be_dma_mem
*mem
= &adapter
->mbox_mem_alloced
;
4152 be_unmap_pci_bars(adapter
);
4155 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
4158 mem
= &adapter
->rx_filter
;
4160 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
4164 static int be_ctrl_init(struct be_adapter
*adapter
)
4166 struct be_dma_mem
*mbox_mem_alloc
= &adapter
->mbox_mem_alloced
;
4167 struct be_dma_mem
*mbox_mem_align
= &adapter
->mbox_mem
;
4168 struct be_dma_mem
*rx_filter
= &adapter
->rx_filter
;
4172 pci_read_config_dword(adapter
->pdev
, SLI_INTF_REG_OFFSET
, &sli_intf
);
4173 adapter
->sli_family
= (sli_intf
& SLI_INTF_FAMILY_MASK
) >>
4174 SLI_INTF_FAMILY_SHIFT
;
4175 adapter
->virtfn
= (sli_intf
& SLI_INTF_FT_MASK
) ? 1 : 0;
4177 status
= be_map_pci_bars(adapter
);
4181 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
4182 mbox_mem_alloc
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
,
4183 mbox_mem_alloc
->size
,
4184 &mbox_mem_alloc
->dma
,
4186 if (!mbox_mem_alloc
->va
) {
4188 goto unmap_pci_bars
;
4190 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
4191 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
4192 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
4193 memset(mbox_mem_align
->va
, 0, sizeof(struct be_mcc_mailbox
));
4195 rx_filter
->size
= sizeof(struct be_cmd_req_rx_filter
);
4196 rx_filter
->va
= dma_zalloc_coherent(&adapter
->pdev
->dev
,
4197 rx_filter
->size
, &rx_filter
->dma
,
4199 if (rx_filter
->va
== NULL
) {
4204 mutex_init(&adapter
->mbox_lock
);
4205 spin_lock_init(&adapter
->mcc_lock
);
4206 spin_lock_init(&adapter
->mcc_cq_lock
);
4208 init_completion(&adapter
->et_cmd_compl
);
4209 pci_save_state(adapter
->pdev
);
4213 dma_free_coherent(&adapter
->pdev
->dev
, mbox_mem_alloc
->size
,
4214 mbox_mem_alloc
->va
, mbox_mem_alloc
->dma
);
4217 be_unmap_pci_bars(adapter
);
4223 static void be_stats_cleanup(struct be_adapter
*adapter
)
4225 struct be_dma_mem
*cmd
= &adapter
->stats_cmd
;
4228 dma_free_coherent(&adapter
->pdev
->dev
, cmd
->size
,
4232 static int be_stats_init(struct be_adapter
*adapter
)
4234 struct be_dma_mem
*cmd
= &adapter
->stats_cmd
;
4236 if (lancer_chip(adapter
))
4237 cmd
->size
= sizeof(struct lancer_cmd_req_pport_stats
);
4238 else if (BE2_chip(adapter
))
4239 cmd
->size
= sizeof(struct be_cmd_req_get_stats_v0
);
4240 else if (BE3_chip(adapter
))
4241 cmd
->size
= sizeof(struct be_cmd_req_get_stats_v1
);
4243 /* ALL non-BE ASICs */
4244 cmd
->size
= sizeof(struct be_cmd_req_get_stats_v2
);
4246 cmd
->va
= dma_zalloc_coherent(&adapter
->pdev
->dev
, cmd
->size
, &cmd
->dma
,
4248 if (cmd
->va
== NULL
)
4253 static void be_remove(struct pci_dev
*pdev
)
4255 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
4260 be_roce_dev_remove(adapter
);
4261 be_intr_set(adapter
, false);
4263 cancel_delayed_work_sync(&adapter
->func_recovery_work
);
4265 unregister_netdev(adapter
->netdev
);
4269 /* tell fw we're done with firing cmds */
4270 be_cmd_fw_clean(adapter
);
4272 be_stats_cleanup(adapter
);
4274 be_ctrl_cleanup(adapter
);
4276 pci_disable_pcie_error_reporting(pdev
);
4278 pci_release_regions(pdev
);
4279 pci_disable_device(pdev
);
4281 free_netdev(adapter
->netdev
);
4284 static int be_get_initial_config(struct be_adapter
*adapter
)
4288 status
= be_cmd_get_cntl_attributes(adapter
);
4292 /* Must be a power of 2 or else MODULO will BUG_ON */
4293 adapter
->be_get_temp_freq
= 64;
4295 if (BEx_chip(adapter
)) {
4296 level
= be_cmd_get_fw_log_level(adapter
);
4297 adapter
->msg_enable
=
4298 level
<= FW_LOG_LEVEL_DEFAULT
? NETIF_MSG_HW
: 0;
4301 adapter
->cfg_num_qs
= netif_get_num_default_rss_queues();
4305 static int lancer_recover_func(struct be_adapter
*adapter
)
4307 struct device
*dev
= &adapter
->pdev
->dev
;
4310 status
= lancer_test_and_set_rdy_state(adapter
);
4314 if (netif_running(adapter
->netdev
))
4315 be_close(adapter
->netdev
);
4319 be_clear_all_error(adapter
);
4321 status
= be_setup(adapter
);
4325 if (netif_running(adapter
->netdev
)) {
4326 status
= be_open(adapter
->netdev
);
4331 dev_err(dev
, "Adapter recovery successful\n");
4334 if (status
== -EAGAIN
)
4335 dev_err(dev
, "Waiting for resource provisioning\n");
4337 dev_err(dev
, "Adapter recovery failed\n");
4342 static void be_func_recovery_task(struct work_struct
*work
)
4344 struct be_adapter
*adapter
=
4345 container_of(work
, struct be_adapter
, func_recovery_work
.work
);
4348 be_detect_error(adapter
);
4350 if (adapter
->hw_error
&& lancer_chip(adapter
)) {
4353 netif_device_detach(adapter
->netdev
);
4356 status
= lancer_recover_func(adapter
);
4358 netif_device_attach(adapter
->netdev
);
4361 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4362 * no need to attempt further recovery.
4364 if (!status
|| status
== -EAGAIN
)
4365 schedule_delayed_work(&adapter
->func_recovery_work
,
4366 msecs_to_jiffies(1000));
4369 static void be_worker(struct work_struct
*work
)
4371 struct be_adapter
*adapter
=
4372 container_of(work
, struct be_adapter
, work
.work
);
4373 struct be_rx_obj
*rxo
;
4376 /* when interrupts are not yet enabled, just reap any pending
4377 * mcc completions */
4378 if (!netif_running(adapter
->netdev
)) {
4380 be_process_mcc(adapter
);
4385 if (!adapter
->stats_cmd_sent
) {
4386 if (lancer_chip(adapter
))
4387 lancer_cmd_get_pport_stats(adapter
,
4388 &adapter
->stats_cmd
);
4390 be_cmd_get_stats(adapter
, &adapter
->stats_cmd
);
4393 if (be_physfn(adapter
) &&
4394 MODULO(adapter
->work_counter
, adapter
->be_get_temp_freq
) == 0)
4395 be_cmd_get_die_temperature(adapter
);
4397 for_all_rx_queues(adapter
, rxo
, i
) {
4398 /* Replenish RX-queues starved due to memory
4399 * allocation failures.
4401 if (rxo
->rx_post_starved
)
4402 be_post_rx_frags(rxo
, GFP_KERNEL
);
4405 be_eqd_update(adapter
);
4408 adapter
->work_counter
++;
4409 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(1000));
4412 /* If any VFs are already enabled don't FLR the PF */
4413 static bool be_reset_required(struct be_adapter
*adapter
)
4415 return pci_num_vf(adapter
->pdev
) ? false : true;
4418 static char *mc_name(struct be_adapter
*adapter
)
4420 if (adapter
->function_mode
& FLEX10_MODE
)
4422 else if (adapter
->function_mode
& VNIC_MODE
)
4424 else if (adapter
->function_mode
& UMC_ENABLED
)
4430 static inline char *func_name(struct be_adapter
*adapter
)
4432 return be_physfn(adapter
) ? "PF" : "VF";
4435 static int be_probe(struct pci_dev
*pdev
, const struct pci_device_id
*pdev_id
)
4438 struct be_adapter
*adapter
;
4439 struct net_device
*netdev
;
4442 status
= pci_enable_device(pdev
);
4446 status
= pci_request_regions(pdev
, DRV_NAME
);
4449 pci_set_master(pdev
);
4451 netdev
= alloc_etherdev_mqs(sizeof(*adapter
), MAX_TX_QS
, MAX_RX_QS
);
4452 if (netdev
== NULL
) {
4456 adapter
= netdev_priv(netdev
);
4457 adapter
->pdev
= pdev
;
4458 pci_set_drvdata(pdev
, adapter
);
4459 adapter
->netdev
= netdev
;
4460 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
4462 status
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
4464 netdev
->features
|= NETIF_F_HIGHDMA
;
4466 status
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
4468 dev_err(&pdev
->dev
, "Could not set PCI DMA Mask\n");
4473 if (be_physfn(adapter
)) {
4474 status
= pci_enable_pcie_error_reporting(pdev
);
4476 dev_info(&pdev
->dev
, "PCIe error reporting enabled\n");
4479 status
= be_ctrl_init(adapter
);
4483 /* sync up with fw's ready state */
4484 if (be_physfn(adapter
)) {
4485 status
= be_fw_wait_ready(adapter
);
4490 if (be_reset_required(adapter
)) {
4491 status
= be_cmd_reset_function(adapter
);
4495 /* Wait for interrupts to quiesce after an FLR */
4499 /* Allow interrupts for other ULPs running on NIC function */
4500 be_intr_set(adapter
, true);
4502 /* tell fw we're ready to fire cmds */
4503 status
= be_cmd_fw_init(adapter
);
4507 status
= be_stats_init(adapter
);
4511 status
= be_get_initial_config(adapter
);
4515 INIT_DELAYED_WORK(&adapter
->work
, be_worker
);
4516 INIT_DELAYED_WORK(&adapter
->func_recovery_work
, be_func_recovery_task
);
4517 adapter
->rx_fc
= adapter
->tx_fc
= true;
4519 status
= be_setup(adapter
);
4523 be_netdev_init(netdev
);
4524 status
= register_netdev(netdev
);
4528 be_roce_dev_add(adapter
);
4530 schedule_delayed_work(&adapter
->func_recovery_work
,
4531 msecs_to_jiffies(1000));
4533 be_cmd_query_port_name(adapter
, &port_name
);
4535 dev_info(&pdev
->dev
, "%s: %s %s port %c\n", nic_name(pdev
),
4536 func_name(adapter
), mc_name(adapter
), port_name
);
4543 be_stats_cleanup(adapter
);
4545 be_ctrl_cleanup(adapter
);
4547 free_netdev(netdev
);
4549 pci_release_regions(pdev
);
4551 pci_disable_device(pdev
);
4553 dev_err(&pdev
->dev
, "%s initialization failed\n", nic_name(pdev
));
4557 static int be_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4559 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
4560 struct net_device
*netdev
= adapter
->netdev
;
4562 if (adapter
->wol_en
)
4563 be_setup_wol(adapter
, true);
4565 be_intr_set(adapter
, false);
4566 cancel_delayed_work_sync(&adapter
->func_recovery_work
);
4568 netif_device_detach(netdev
);
4569 if (netif_running(netdev
)) {
4576 pci_save_state(pdev
);
4577 pci_disable_device(pdev
);
4578 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
4582 static int be_resume(struct pci_dev
*pdev
)
4585 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
4586 struct net_device
*netdev
= adapter
->netdev
;
4588 netif_device_detach(netdev
);
4590 status
= pci_enable_device(pdev
);
4594 pci_set_power_state(pdev
, PCI_D0
);
4595 pci_restore_state(pdev
);
4597 status
= be_fw_wait_ready(adapter
);
4601 be_intr_set(adapter
, true);
4602 /* tell fw we're ready to fire cmds */
4603 status
= be_cmd_fw_init(adapter
);
4608 if (netif_running(netdev
)) {
4614 schedule_delayed_work(&adapter
->func_recovery_work
,
4615 msecs_to_jiffies(1000));
4616 netif_device_attach(netdev
);
4618 if (adapter
->wol_en
)
4619 be_setup_wol(adapter
, false);
4625 * An FLR will stop BE from DMAing any data.
4627 static void be_shutdown(struct pci_dev
*pdev
)
4629 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
4634 cancel_delayed_work_sync(&adapter
->work
);
4635 cancel_delayed_work_sync(&adapter
->func_recovery_work
);
4637 netif_device_detach(adapter
->netdev
);
4639 be_cmd_reset_function(adapter
);
4641 pci_disable_device(pdev
);
4644 static pci_ers_result_t
be_eeh_err_detected(struct pci_dev
*pdev
,
4645 pci_channel_state_t state
)
4647 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
4648 struct net_device
*netdev
= adapter
->netdev
;
4650 dev_err(&adapter
->pdev
->dev
, "EEH error detected\n");
4652 if (!adapter
->eeh_error
) {
4653 adapter
->eeh_error
= true;
4655 cancel_delayed_work_sync(&adapter
->func_recovery_work
);
4658 netif_device_detach(netdev
);
4659 if (netif_running(netdev
))
4666 if (state
== pci_channel_io_perm_failure
)
4667 return PCI_ERS_RESULT_DISCONNECT
;
4669 pci_disable_device(pdev
);
4671 /* The error could cause the FW to trigger a flash debug dump.
4672 * Resetting the card while flash dump is in progress
4673 * can cause it not to recover; wait for it to finish.
4674 * Wait only for first function as it is needed only once per
4677 if (pdev
->devfn
== 0)
4680 return PCI_ERS_RESULT_NEED_RESET
;
4683 static pci_ers_result_t
be_eeh_reset(struct pci_dev
*pdev
)
4685 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
4688 dev_info(&adapter
->pdev
->dev
, "EEH reset\n");
4690 status
= pci_enable_device(pdev
);
4692 return PCI_ERS_RESULT_DISCONNECT
;
4694 pci_set_master(pdev
);
4695 pci_set_power_state(pdev
, PCI_D0
);
4696 pci_restore_state(pdev
);
4698 /* Check if card is ok and fw is ready */
4699 dev_info(&adapter
->pdev
->dev
,
4700 "Waiting for FW to be ready after EEH reset\n");
4701 status
= be_fw_wait_ready(adapter
);
4703 return PCI_ERS_RESULT_DISCONNECT
;
4705 pci_cleanup_aer_uncorrect_error_status(pdev
);
4706 be_clear_all_error(adapter
);
4707 return PCI_ERS_RESULT_RECOVERED
;
4710 static void be_eeh_resume(struct pci_dev
*pdev
)
4713 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
4714 struct net_device
*netdev
= adapter
->netdev
;
4716 dev_info(&adapter
->pdev
->dev
, "EEH resume\n");
4718 pci_save_state(pdev
);
4720 status
= be_cmd_reset_function(adapter
);
4724 /* tell fw we're ready to fire cmds */
4725 status
= be_cmd_fw_init(adapter
);
4729 status
= be_setup(adapter
);
4733 if (netif_running(netdev
)) {
4734 status
= be_open(netdev
);
4739 schedule_delayed_work(&adapter
->func_recovery_work
,
4740 msecs_to_jiffies(1000));
4741 netif_device_attach(netdev
);
4744 dev_err(&adapter
->pdev
->dev
, "EEH resume failed\n");
4747 static const struct pci_error_handlers be_eeh_handlers
= {
4748 .error_detected
= be_eeh_err_detected
,
4749 .slot_reset
= be_eeh_reset
,
4750 .resume
= be_eeh_resume
,
4753 static struct pci_driver be_driver
= {
4755 .id_table
= be_dev_ids
,
4757 .remove
= be_remove
,
4758 .suspend
= be_suspend
,
4759 .resume
= be_resume
,
4760 .shutdown
= be_shutdown
,
4761 .err_handler
= &be_eeh_handlers
4764 static int __init
be_init_module(void)
4766 if (rx_frag_size
!= 8192 && rx_frag_size
!= 4096 &&
4767 rx_frag_size
!= 2048) {
4768 printk(KERN_WARNING DRV_NAME
4769 " : Module param rx_frag_size must be 2048/4096/8192."
4771 rx_frag_size
= 2048;
4774 return pci_register_driver(&be_driver
);
4776 module_init(be_init_module
);
4778 static void __exit
be_exit_module(void)
4780 pci_unregister_driver(&be_driver
);
4782 module_exit(be_exit_module
);