2 * Copyright (C) 2005 - 2016 Broadcom
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26 #include <net/vxlan.h>
28 MODULE_VERSION(DRV_VER
);
29 MODULE_DESCRIPTION(DRV_DESC
" " DRV_VER
);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
33 /* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
36 static unsigned int num_vfs
;
37 module_param(num_vfs
, uint
, S_IRUGO
);
38 MODULE_PARM_DESC(num_vfs
, "Number of PCI VFs to initialize");
40 static ushort rx_frag_size
= 2048;
41 module_param(rx_frag_size
, ushort
, S_IRUGO
);
42 MODULE_PARM_DESC(rx_frag_size
, "Size of a fragment that holds rcvd data.");
44 /* Per-module error detection/recovery workq shared across all functions.
45 * Each function schedules its own work request on this shared workq.
47 static struct workqueue_struct
*be_err_recovery_workq
;
49 static const struct pci_device_id be_dev_ids
[] = {
50 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
51 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID2
) },
52 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
53 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
54 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID3
)},
55 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID4
)},
56 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID5
)},
57 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID6
)},
60 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
62 /* Workqueue used by all functions for defering cmd calls to the adapter */
63 static struct workqueue_struct
*be_wq
;
65 /* UE Status Low CSR */
66 static const char * const ue_status_low_desc
[] = {
101 /* UE Status High CSR */
102 static const char * const ue_status_hi_desc
[] = {
137 #define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
138 BE_IF_FLAGS_BROADCAST | \
139 BE_IF_FLAGS_MULTICAST | \
140 BE_IF_FLAGS_PASS_L3L4_ERRORS)
142 static void be_queue_free(struct be_adapter
*adapter
, struct be_queue_info
*q
)
144 struct be_dma_mem
*mem
= &q
->dma_mem
;
147 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
153 static int be_queue_alloc(struct be_adapter
*adapter
, struct be_queue_info
*q
,
154 u16 len
, u16 entry_size
)
156 struct be_dma_mem
*mem
= &q
->dma_mem
;
158 memset(q
, 0, sizeof(*q
));
160 q
->entry_size
= entry_size
;
161 mem
->size
= len
* entry_size
;
162 mem
->va
= dma_zalloc_coherent(&adapter
->pdev
->dev
, mem
->size
, &mem
->dma
,
169 static void be_reg_intr_set(struct be_adapter
*adapter
, bool enable
)
173 pci_read_config_dword(adapter
->pdev
, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
,
175 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
177 if (!enabled
&& enable
)
178 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
179 else if (enabled
&& !enable
)
180 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
184 pci_write_config_dword(adapter
->pdev
,
185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
, reg
);
188 static void be_intr_set(struct be_adapter
*adapter
, bool enable
)
192 /* On lancer interrupts can't be controlled via this register */
193 if (lancer_chip(adapter
))
196 if (be_check_error(adapter
, BE_ERROR_EEH
))
199 status
= be_cmd_intr_set(adapter
, enable
);
201 be_reg_intr_set(adapter
, enable
);
204 static void be_rxq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
208 if (be_check_error(adapter
, BE_ERROR_HW
))
211 val
|= qid
& DB_RQ_RING_ID_MASK
;
212 val
|= posted
<< DB_RQ_NUM_POSTED_SHIFT
;
215 iowrite32(val
, adapter
->db
+ DB_RQ_OFFSET
);
218 static void be_txq_notify(struct be_adapter
*adapter
, struct be_tx_obj
*txo
,
223 if (be_check_error(adapter
, BE_ERROR_HW
))
226 val
|= txo
->q
.id
& DB_TXULP_RING_ID_MASK
;
227 val
|= (posted
& DB_TXULP_NUM_POSTED_MASK
) << DB_TXULP_NUM_POSTED_SHIFT
;
230 iowrite32(val
, adapter
->db
+ txo
->db_offset
);
233 static void be_eq_notify(struct be_adapter
*adapter
, u16 qid
,
234 bool arm
, bool clear_int
, u16 num_popped
,
235 u32 eq_delay_mult_enc
)
239 val
|= qid
& DB_EQ_RING_ID_MASK
;
240 val
|= ((qid
& DB_EQ_RING_ID_EXT_MASK
) << DB_EQ_RING_ID_EXT_MASK_SHIFT
);
242 if (be_check_error(adapter
, BE_ERROR_HW
))
246 val
|= 1 << DB_EQ_REARM_SHIFT
;
248 val
|= 1 << DB_EQ_CLR_SHIFT
;
249 val
|= 1 << DB_EQ_EVNT_SHIFT
;
250 val
|= num_popped
<< DB_EQ_NUM_POPPED_SHIFT
;
251 val
|= eq_delay_mult_enc
<< DB_EQ_R2I_DLY_SHIFT
;
252 iowrite32(val
, adapter
->db
+ DB_EQ_OFFSET
);
255 void be_cq_notify(struct be_adapter
*adapter
, u16 qid
, bool arm
, u16 num_popped
)
259 val
|= qid
& DB_CQ_RING_ID_MASK
;
260 val
|= ((qid
& DB_CQ_RING_ID_EXT_MASK
) <<
261 DB_CQ_RING_ID_EXT_MASK_SHIFT
);
263 if (be_check_error(adapter
, BE_ERROR_HW
))
267 val
|= 1 << DB_CQ_REARM_SHIFT
;
268 val
|= num_popped
<< DB_CQ_NUM_POPPED_SHIFT
;
269 iowrite32(val
, adapter
->db
+ DB_CQ_OFFSET
);
272 static int be_dev_mac_add(struct be_adapter
*adapter
, u8
*mac
)
276 /* Check if mac has already been added as part of uc-list */
277 for (i
= 0; i
< adapter
->uc_macs
; i
++) {
278 if (ether_addr_equal(adapter
->uc_list
[i
].mac
, mac
)) {
279 /* mac already added, skip addition */
280 adapter
->pmac_id
[0] = adapter
->pmac_id
[i
+ 1];
285 return be_cmd_pmac_add(adapter
, mac
, adapter
->if_handle
,
286 &adapter
->pmac_id
[0], 0);
289 static void be_dev_mac_del(struct be_adapter
*adapter
, int pmac_id
)
293 /* Skip deletion if the programmed mac is
294 * being used in uc-list
296 for (i
= 0; i
< adapter
->uc_macs
; i
++) {
297 if (adapter
->pmac_id
[i
+ 1] == pmac_id
)
300 be_cmd_pmac_del(adapter
, adapter
->if_handle
, pmac_id
, 0);
303 static int be_mac_addr_set(struct net_device
*netdev
, void *p
)
305 struct be_adapter
*adapter
= netdev_priv(netdev
);
306 struct device
*dev
= &adapter
->pdev
->dev
;
307 struct sockaddr
*addr
= p
;
310 u32 old_pmac_id
= adapter
->pmac_id
[0];
312 if (!is_valid_ether_addr(addr
->sa_data
))
313 return -EADDRNOTAVAIL
;
315 /* Proceed further only if, User provided MAC is different
318 if (ether_addr_equal(addr
->sa_data
, adapter
->dev_mac
))
321 /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
324 if (BEx_chip(adapter
) && be_virtfn(adapter
) &&
325 !check_privilege(adapter
, BE_PRIV_FILTMGMT
))
328 /* if device is not running, copy MAC to netdev->dev_addr */
329 if (!netif_running(netdev
))
332 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
333 * privilege or if PF did not provision the new MAC address.
334 * On BE3, this cmd will always fail if the VF doesn't have the
335 * FILTMGMT privilege. This failure is OK, only if the PF programmed
336 * the MAC for the VF.
338 mutex_lock(&adapter
->rx_filter_lock
);
339 status
= be_dev_mac_add(adapter
, (u8
*)addr
->sa_data
);
342 /* Delete the old programmed MAC. This call may fail if the
343 * old MAC was already deleted by the PF driver.
345 if (adapter
->pmac_id
[0] != old_pmac_id
)
346 be_dev_mac_del(adapter
, old_pmac_id
);
349 mutex_unlock(&adapter
->rx_filter_lock
);
350 /* Decide if the new MAC is successfully activated only after
353 status
= be_cmd_get_active_mac(adapter
, adapter
->pmac_id
[0], mac
,
354 adapter
->if_handle
, true, 0);
358 /* The MAC change did not happen, either due to lack of privilege
359 * or PF didn't pre-provision.
361 if (!ether_addr_equal(addr
->sa_data
, mac
)) {
366 /* Remember currently programmed MAC */
367 ether_addr_copy(adapter
->dev_mac
, addr
->sa_data
);
369 ether_addr_copy(netdev
->dev_addr
, addr
->sa_data
);
370 dev_info(dev
, "MAC address changed to %pM\n", addr
->sa_data
);
373 dev_warn(dev
, "MAC address change to %pM failed\n", addr
->sa_data
);
377 /* BE2 supports only v0 cmd */
378 static void *hw_stats_from_cmd(struct be_adapter
*adapter
)
380 if (BE2_chip(adapter
)) {
381 struct be_cmd_resp_get_stats_v0
*cmd
= adapter
->stats_cmd
.va
;
383 return &cmd
->hw_stats
;
384 } else if (BE3_chip(adapter
)) {
385 struct be_cmd_resp_get_stats_v1
*cmd
= adapter
->stats_cmd
.va
;
387 return &cmd
->hw_stats
;
389 struct be_cmd_resp_get_stats_v2
*cmd
= adapter
->stats_cmd
.va
;
391 return &cmd
->hw_stats
;
395 /* BE2 supports only v0 cmd */
396 static void *be_erx_stats_from_cmd(struct be_adapter
*adapter
)
398 if (BE2_chip(adapter
)) {
399 struct be_hw_stats_v0
*hw_stats
= hw_stats_from_cmd(adapter
);
401 return &hw_stats
->erx
;
402 } else if (BE3_chip(adapter
)) {
403 struct be_hw_stats_v1
*hw_stats
= hw_stats_from_cmd(adapter
);
405 return &hw_stats
->erx
;
407 struct be_hw_stats_v2
*hw_stats
= hw_stats_from_cmd(adapter
);
409 return &hw_stats
->erx
;
413 static void populate_be_v0_stats(struct be_adapter
*adapter
)
415 struct be_hw_stats_v0
*hw_stats
= hw_stats_from_cmd(adapter
);
416 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
417 struct be_rxf_stats_v0
*rxf_stats
= &hw_stats
->rxf
;
418 struct be_port_rxf_stats_v0
*port_stats
=
419 &rxf_stats
->port
[adapter
->port_num
];
420 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
422 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
423 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
424 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
425 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
426 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
427 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
428 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
429 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
430 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
431 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
432 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rx_fifo_overflow
;
433 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
434 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
435 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
436 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
437 drvs
->rx_input_fifo_overflow_drop
= port_stats
->rx_input_fifo_overflow
;
438 drvs
->rx_dropped_header_too_small
=
439 port_stats
->rx_dropped_header_too_small
;
440 drvs
->rx_address_filtered
=
441 port_stats
->rx_address_filtered
+
442 port_stats
->rx_vlan_filtered
;
443 drvs
->rx_alignment_symbol_errors
=
444 port_stats
->rx_alignment_symbol_errors
;
446 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
447 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
449 if (adapter
->port_num
)
450 drvs
->jabber_events
= rxf_stats
->port1_jabber_events
;
452 drvs
->jabber_events
= rxf_stats
->port0_jabber_events
;
453 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
454 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
455 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
456 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
457 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
458 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
459 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
462 static void populate_be_v1_stats(struct be_adapter
*adapter
)
464 struct be_hw_stats_v1
*hw_stats
= hw_stats_from_cmd(adapter
);
465 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
466 struct be_rxf_stats_v1
*rxf_stats
= &hw_stats
->rxf
;
467 struct be_port_rxf_stats_v1
*port_stats
=
468 &rxf_stats
->port
[adapter
->port_num
];
469 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
471 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
472 drvs
->pmem_fifo_overflow_drop
= port_stats
->pmem_fifo_overflow_drop
;
473 drvs
->rx_priority_pause_frames
= port_stats
->rx_priority_pause_frames
;
474 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
475 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
476 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
477 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
478 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
479 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
480 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
481 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
482 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
483 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
484 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
485 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
486 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
487 drvs
->rx_dropped_header_too_small
=
488 port_stats
->rx_dropped_header_too_small
;
489 drvs
->rx_input_fifo_overflow_drop
=
490 port_stats
->rx_input_fifo_overflow_drop
;
491 drvs
->rx_address_filtered
= port_stats
->rx_address_filtered
;
492 drvs
->rx_alignment_symbol_errors
=
493 port_stats
->rx_alignment_symbol_errors
;
494 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rxpp_fifo_overflow_drop
;
495 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
496 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
497 drvs
->tx_priority_pauseframes
= port_stats
->tx_priority_pauseframes
;
498 drvs
->jabber_events
= port_stats
->jabber_events
;
499 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
500 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
501 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
502 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
503 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
504 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
505 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
508 static void populate_be_v2_stats(struct be_adapter
*adapter
)
510 struct be_hw_stats_v2
*hw_stats
= hw_stats_from_cmd(adapter
);
511 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
512 struct be_rxf_stats_v2
*rxf_stats
= &hw_stats
->rxf
;
513 struct be_port_rxf_stats_v2
*port_stats
=
514 &rxf_stats
->port
[adapter
->port_num
];
515 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
517 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
518 drvs
->pmem_fifo_overflow_drop
= port_stats
->pmem_fifo_overflow_drop
;
519 drvs
->rx_priority_pause_frames
= port_stats
->rx_priority_pause_frames
;
520 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
521 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
522 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
523 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
524 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
525 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
526 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
527 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
528 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
529 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
530 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
531 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
532 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
533 drvs
->rx_dropped_header_too_small
=
534 port_stats
->rx_dropped_header_too_small
;
535 drvs
->rx_input_fifo_overflow_drop
=
536 port_stats
->rx_input_fifo_overflow_drop
;
537 drvs
->rx_address_filtered
= port_stats
->rx_address_filtered
;
538 drvs
->rx_alignment_symbol_errors
=
539 port_stats
->rx_alignment_symbol_errors
;
540 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rxpp_fifo_overflow_drop
;
541 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
542 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
543 drvs
->tx_priority_pauseframes
= port_stats
->tx_priority_pauseframes
;
544 drvs
->jabber_events
= port_stats
->jabber_events
;
545 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
546 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
547 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
548 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
549 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
550 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
551 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
552 if (be_roce_supported(adapter
)) {
553 drvs
->rx_roce_bytes_lsd
= port_stats
->roce_bytes_received_lsd
;
554 drvs
->rx_roce_bytes_msd
= port_stats
->roce_bytes_received_msd
;
555 drvs
->rx_roce_frames
= port_stats
->roce_frames_received
;
556 drvs
->roce_drops_crc
= port_stats
->roce_drops_crc
;
557 drvs
->roce_drops_payload_len
=
558 port_stats
->roce_drops_payload_len
;
562 static void populate_lancer_stats(struct be_adapter
*adapter
)
564 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
565 struct lancer_pport_stats
*pport_stats
= pport_stats_from_cmd(adapter
);
567 be_dws_le_to_cpu(pport_stats
, sizeof(*pport_stats
));
568 drvs
->rx_pause_frames
= pport_stats
->rx_pause_frames_lo
;
569 drvs
->rx_crc_errors
= pport_stats
->rx_crc_errors_lo
;
570 drvs
->rx_control_frames
= pport_stats
->rx_control_frames_lo
;
571 drvs
->rx_in_range_errors
= pport_stats
->rx_in_range_errors
;
572 drvs
->rx_frame_too_long
= pport_stats
->rx_frames_too_long_lo
;
573 drvs
->rx_dropped_runt
= pport_stats
->rx_dropped_runt
;
574 drvs
->rx_ip_checksum_errs
= pport_stats
->rx_ip_checksum_errors
;
575 drvs
->rx_tcp_checksum_errs
= pport_stats
->rx_tcp_checksum_errors
;
576 drvs
->rx_udp_checksum_errs
= pport_stats
->rx_udp_checksum_errors
;
577 drvs
->rx_dropped_tcp_length
=
578 pport_stats
->rx_dropped_invalid_tcp_length
;
579 drvs
->rx_dropped_too_small
= pport_stats
->rx_dropped_too_small
;
580 drvs
->rx_dropped_too_short
= pport_stats
->rx_dropped_too_short
;
581 drvs
->rx_out_range_errors
= pport_stats
->rx_out_of_range_errors
;
582 drvs
->rx_dropped_header_too_small
=
583 pport_stats
->rx_dropped_header_too_small
;
584 drvs
->rx_input_fifo_overflow_drop
= pport_stats
->rx_fifo_overflow
;
585 drvs
->rx_address_filtered
=
586 pport_stats
->rx_address_filtered
+
587 pport_stats
->rx_vlan_filtered
;
588 drvs
->rx_alignment_symbol_errors
= pport_stats
->rx_symbol_errors_lo
;
589 drvs
->rxpp_fifo_overflow_drop
= pport_stats
->rx_fifo_overflow
;
590 drvs
->tx_pauseframes
= pport_stats
->tx_pause_frames_lo
;
591 drvs
->tx_controlframes
= pport_stats
->tx_control_frames_lo
;
592 drvs
->jabber_events
= pport_stats
->rx_jabbers
;
593 drvs
->forwarded_packets
= pport_stats
->num_forwards_lo
;
594 drvs
->rx_drops_mtu
= pport_stats
->rx_drops_mtu_lo
;
595 drvs
->rx_drops_too_many_frags
=
596 pport_stats
->rx_drops_too_many_frags_lo
;
599 static void accumulate_16bit_val(u32
*acc
, u16 val
)
601 #define lo(x) (x & 0xFFFF)
602 #define hi(x) (x & 0xFFFF0000)
603 bool wrapped
= val
< lo(*acc
);
604 u32 newacc
= hi(*acc
) + val
;
608 WRITE_ONCE(*acc
, newacc
);
611 static void populate_erx_stats(struct be_adapter
*adapter
,
612 struct be_rx_obj
*rxo
, u32 erx_stat
)
614 if (!BEx_chip(adapter
))
615 rx_stats(rxo
)->rx_drops_no_frags
= erx_stat
;
617 /* below erx HW counter can actually wrap around after
618 * 65535. Driver accumulates a 32-bit value
620 accumulate_16bit_val(&rx_stats(rxo
)->rx_drops_no_frags
,
624 void be_parse_stats(struct be_adapter
*adapter
)
626 struct be_erx_stats_v2
*erx
= be_erx_stats_from_cmd(adapter
);
627 struct be_rx_obj
*rxo
;
631 if (lancer_chip(adapter
)) {
632 populate_lancer_stats(adapter
);
634 if (BE2_chip(adapter
))
635 populate_be_v0_stats(adapter
);
636 else if (BE3_chip(adapter
))
638 populate_be_v1_stats(adapter
);
640 populate_be_v2_stats(adapter
);
642 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
643 for_all_rx_queues(adapter
, rxo
, i
) {
644 erx_stat
= erx
->rx_drops_no_fragments
[rxo
->q
.id
];
645 populate_erx_stats(adapter
, rxo
, erx_stat
);
650 static void be_get_stats64(struct net_device
*netdev
,
651 struct rtnl_link_stats64
*stats
)
653 struct be_adapter
*adapter
= netdev_priv(netdev
);
654 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
655 struct be_rx_obj
*rxo
;
656 struct be_tx_obj
*txo
;
661 for_all_rx_queues(adapter
, rxo
, i
) {
662 const struct be_rx_stats
*rx_stats
= rx_stats(rxo
);
665 start
= u64_stats_fetch_begin_irq(&rx_stats
->sync
);
666 pkts
= rx_stats(rxo
)->rx_pkts
;
667 bytes
= rx_stats(rxo
)->rx_bytes
;
668 } while (u64_stats_fetch_retry_irq(&rx_stats
->sync
, start
));
669 stats
->rx_packets
+= pkts
;
670 stats
->rx_bytes
+= bytes
;
671 stats
->multicast
+= rx_stats(rxo
)->rx_mcast_pkts
;
672 stats
->rx_dropped
+= rx_stats(rxo
)->rx_drops_no_skbs
+
673 rx_stats(rxo
)->rx_drops_no_frags
;
676 for_all_tx_queues(adapter
, txo
, i
) {
677 const struct be_tx_stats
*tx_stats
= tx_stats(txo
);
680 start
= u64_stats_fetch_begin_irq(&tx_stats
->sync
);
681 pkts
= tx_stats(txo
)->tx_pkts
;
682 bytes
= tx_stats(txo
)->tx_bytes
;
683 } while (u64_stats_fetch_retry_irq(&tx_stats
->sync
, start
));
684 stats
->tx_packets
+= pkts
;
685 stats
->tx_bytes
+= bytes
;
688 /* bad pkts received */
689 stats
->rx_errors
= drvs
->rx_crc_errors
+
690 drvs
->rx_alignment_symbol_errors
+
691 drvs
->rx_in_range_errors
+
692 drvs
->rx_out_range_errors
+
693 drvs
->rx_frame_too_long
+
694 drvs
->rx_dropped_too_small
+
695 drvs
->rx_dropped_too_short
+
696 drvs
->rx_dropped_header_too_small
+
697 drvs
->rx_dropped_tcp_length
+
698 drvs
->rx_dropped_runt
;
700 /* detailed rx errors */
701 stats
->rx_length_errors
= drvs
->rx_in_range_errors
+
702 drvs
->rx_out_range_errors
+
703 drvs
->rx_frame_too_long
;
705 stats
->rx_crc_errors
= drvs
->rx_crc_errors
;
707 /* frame alignment errors */
708 stats
->rx_frame_errors
= drvs
->rx_alignment_symbol_errors
;
710 /* receiver fifo overrun */
711 /* drops_no_pbuf is no per i/f, it's per BE card */
712 stats
->rx_fifo_errors
= drvs
->rxpp_fifo_overflow_drop
+
713 drvs
->rx_input_fifo_overflow_drop
+
714 drvs
->rx_drops_no_pbuf
;
717 void be_link_status_update(struct be_adapter
*adapter
, u8 link_status
)
719 struct net_device
*netdev
= adapter
->netdev
;
721 if (!(adapter
->flags
& BE_FLAGS_LINK_STATUS_INIT
)) {
722 netif_carrier_off(netdev
);
723 adapter
->flags
|= BE_FLAGS_LINK_STATUS_INIT
;
727 netif_carrier_on(netdev
);
729 netif_carrier_off(netdev
);
731 netdev_info(netdev
, "Link is %s\n", link_status
? "Up" : "Down");
734 static int be_gso_hdr_len(struct sk_buff
*skb
)
736 if (skb
->encapsulation
)
737 return skb_inner_transport_offset(skb
) +
738 inner_tcp_hdrlen(skb
);
739 return skb_transport_offset(skb
) + tcp_hdrlen(skb
);
742 static void be_tx_stats_update(struct be_tx_obj
*txo
, struct sk_buff
*skb
)
744 struct be_tx_stats
*stats
= tx_stats(txo
);
745 u32 tx_pkts
= skb_shinfo(skb
)->gso_segs
? : 1;
746 /* Account for headers which get duplicated in TSO pkt */
747 u32 dup_hdr_len
= tx_pkts
> 1 ? be_gso_hdr_len(skb
) * (tx_pkts
- 1) : 0;
749 u64_stats_update_begin(&stats
->sync
);
751 stats
->tx_bytes
+= skb
->len
+ dup_hdr_len
;
752 stats
->tx_pkts
+= tx_pkts
;
753 if (skb
->encapsulation
&& skb
->ip_summed
== CHECKSUM_PARTIAL
)
754 stats
->tx_vxlan_offload_pkts
+= tx_pkts
;
755 u64_stats_update_end(&stats
->sync
);
758 /* Returns number of WRBs needed for the skb */
759 static u32
skb_wrb_cnt(struct sk_buff
*skb
)
761 /* +1 for the header wrb */
762 return 1 + (skb_headlen(skb
) ? 1 : 0) + skb_shinfo(skb
)->nr_frags
;
765 static inline void wrb_fill(struct be_eth_wrb
*wrb
, u64 addr
, int len
)
767 wrb
->frag_pa_hi
= cpu_to_le32(upper_32_bits(addr
));
768 wrb
->frag_pa_lo
= cpu_to_le32(lower_32_bits(addr
));
769 wrb
->frag_len
= cpu_to_le32(len
& ETH_WRB_FRAG_LEN_MASK
);
773 /* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
774 * to avoid the swap and shift/mask operations in wrb_fill().
776 static inline void wrb_fill_dummy(struct be_eth_wrb
*wrb
)
784 static inline u16
be_get_tx_vlan_tag(struct be_adapter
*adapter
,
790 vlan_tag
= skb_vlan_tag_get(skb
);
791 vlan_prio
= (vlan_tag
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
792 /* If vlan priority provided by OS is NOT in available bmap */
793 if (!(adapter
->vlan_prio_bmap
& (1 << vlan_prio
)))
794 vlan_tag
= (vlan_tag
& ~VLAN_PRIO_MASK
) |
795 adapter
->recommended_prio_bits
;
800 /* Used only for IP tunnel packets */
801 static u16
skb_inner_ip_proto(struct sk_buff
*skb
)
803 return (inner_ip_hdr(skb
)->version
== 4) ?
804 inner_ip_hdr(skb
)->protocol
: inner_ipv6_hdr(skb
)->nexthdr
;
807 static u16
skb_ip_proto(struct sk_buff
*skb
)
809 return (ip_hdr(skb
)->version
== 4) ?
810 ip_hdr(skb
)->protocol
: ipv6_hdr(skb
)->nexthdr
;
813 static inline bool be_is_txq_full(struct be_tx_obj
*txo
)
815 return atomic_read(&txo
->q
.used
) + BE_MAX_TX_FRAG_COUNT
>= txo
->q
.len
;
818 static inline bool be_can_txq_wake(struct be_tx_obj
*txo
)
820 return atomic_read(&txo
->q
.used
) < txo
->q
.len
/ 2;
823 static inline bool be_is_tx_compl_pending(struct be_tx_obj
*txo
)
825 return atomic_read(&txo
->q
.used
) > txo
->pend_wrb_cnt
;
828 static void be_get_wrb_params_from_skb(struct be_adapter
*adapter
,
830 struct be_wrb_params
*wrb_params
)
834 if (skb_is_gso(skb
)) {
835 BE_WRB_F_SET(wrb_params
->features
, LSO
, 1);
836 wrb_params
->lso_mss
= skb_shinfo(skb
)->gso_size
;
837 if (skb_is_gso_v6(skb
) && !lancer_chip(adapter
))
838 BE_WRB_F_SET(wrb_params
->features
, LSO6
, 1);
839 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
840 if (skb
->encapsulation
) {
841 BE_WRB_F_SET(wrb_params
->features
, IPCS
, 1);
842 proto
= skb_inner_ip_proto(skb
);
844 proto
= skb_ip_proto(skb
);
846 if (proto
== IPPROTO_TCP
)
847 BE_WRB_F_SET(wrb_params
->features
, TCPCS
, 1);
848 else if (proto
== IPPROTO_UDP
)
849 BE_WRB_F_SET(wrb_params
->features
, UDPCS
, 1);
852 if (skb_vlan_tag_present(skb
)) {
853 BE_WRB_F_SET(wrb_params
->features
, VLAN
, 1);
854 wrb_params
->vlan_tag
= be_get_tx_vlan_tag(adapter
, skb
);
857 BE_WRB_F_SET(wrb_params
->features
, CRC
, 1);
860 static void wrb_fill_hdr(struct be_adapter
*adapter
,
861 struct be_eth_hdr_wrb
*hdr
,
862 struct be_wrb_params
*wrb_params
,
865 memset(hdr
, 0, sizeof(*hdr
));
867 SET_TX_WRB_HDR_BITS(crc
, hdr
,
868 BE_WRB_F_GET(wrb_params
->features
, CRC
));
869 SET_TX_WRB_HDR_BITS(ipcs
, hdr
,
870 BE_WRB_F_GET(wrb_params
->features
, IPCS
));
871 SET_TX_WRB_HDR_BITS(tcpcs
, hdr
,
872 BE_WRB_F_GET(wrb_params
->features
, TCPCS
));
873 SET_TX_WRB_HDR_BITS(udpcs
, hdr
,
874 BE_WRB_F_GET(wrb_params
->features
, UDPCS
));
876 SET_TX_WRB_HDR_BITS(lso
, hdr
,
877 BE_WRB_F_GET(wrb_params
->features
, LSO
));
878 SET_TX_WRB_HDR_BITS(lso6
, hdr
,
879 BE_WRB_F_GET(wrb_params
->features
, LSO6
));
880 SET_TX_WRB_HDR_BITS(lso_mss
, hdr
, wrb_params
->lso_mss
);
882 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
883 * hack is not needed, the evt bit is set while ringing DB.
885 SET_TX_WRB_HDR_BITS(event
, hdr
,
886 BE_WRB_F_GET(wrb_params
->features
, VLAN_SKIP_HW
));
887 SET_TX_WRB_HDR_BITS(vlan
, hdr
,
888 BE_WRB_F_GET(wrb_params
->features
, VLAN
));
889 SET_TX_WRB_HDR_BITS(vlan_tag
, hdr
, wrb_params
->vlan_tag
);
891 SET_TX_WRB_HDR_BITS(num_wrb
, hdr
, skb_wrb_cnt(skb
));
892 SET_TX_WRB_HDR_BITS(len
, hdr
, skb
->len
);
893 SET_TX_WRB_HDR_BITS(mgmt
, hdr
,
894 BE_WRB_F_GET(wrb_params
->features
, OS2BMC
));
897 static void unmap_tx_frag(struct device
*dev
, struct be_eth_wrb
*wrb
,
901 u32 frag_len
= le32_to_cpu(wrb
->frag_len
);
904 dma
= (u64
)le32_to_cpu(wrb
->frag_pa_hi
) << 32 |
905 (u64
)le32_to_cpu(wrb
->frag_pa_lo
);
908 dma_unmap_single(dev
, dma
, frag_len
, DMA_TO_DEVICE
);
910 dma_unmap_page(dev
, dma
, frag_len
, DMA_TO_DEVICE
);
914 /* Grab a WRB header for xmit */
915 static u32
be_tx_get_wrb_hdr(struct be_tx_obj
*txo
)
917 u32 head
= txo
->q
.head
;
919 queue_head_inc(&txo
->q
);
923 /* Set up the WRB header for xmit */
924 static void be_tx_setup_wrb_hdr(struct be_adapter
*adapter
,
925 struct be_tx_obj
*txo
,
926 struct be_wrb_params
*wrb_params
,
927 struct sk_buff
*skb
, u16 head
)
929 u32 num_frags
= skb_wrb_cnt(skb
);
930 struct be_queue_info
*txq
= &txo
->q
;
931 struct be_eth_hdr_wrb
*hdr
= queue_index_node(txq
, head
);
933 wrb_fill_hdr(adapter
, hdr
, wrb_params
, skb
);
934 be_dws_cpu_to_le(hdr
, sizeof(*hdr
));
936 BUG_ON(txo
->sent_skb_list
[head
]);
937 txo
->sent_skb_list
[head
] = skb
;
938 txo
->last_req_hdr
= head
;
939 atomic_add(num_frags
, &txq
->used
);
940 txo
->last_req_wrb_cnt
= num_frags
;
941 txo
->pend_wrb_cnt
+= num_frags
;
944 /* Setup a WRB fragment (buffer descriptor) for xmit */
945 static void be_tx_setup_wrb_frag(struct be_tx_obj
*txo
, dma_addr_t busaddr
,
948 struct be_eth_wrb
*wrb
;
949 struct be_queue_info
*txq
= &txo
->q
;
951 wrb
= queue_head_node(txq
);
952 wrb_fill(wrb
, busaddr
, len
);
956 /* Bring the queue back to the state it was in before be_xmit_enqueue() routine
957 * was invoked. The producer index is restored to the previous packet and the
958 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
960 static void be_xmit_restore(struct be_adapter
*adapter
,
961 struct be_tx_obj
*txo
, u32 head
, bool map_single
,
965 struct be_eth_wrb
*wrb
;
966 struct be_queue_info
*txq
= &txo
->q
;
968 dev
= &adapter
->pdev
->dev
;
971 /* skip the first wrb (hdr); it's not mapped */
974 wrb
= queue_head_node(txq
);
975 unmap_tx_frag(dev
, wrb
, map_single
);
977 copied
-= le32_to_cpu(wrb
->frag_len
);
984 /* Enqueue the given packet for transmit. This routine allocates WRBs for the
985 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
986 * of WRBs used up by the packet.
988 static u32
be_xmit_enqueue(struct be_adapter
*adapter
, struct be_tx_obj
*txo
,
990 struct be_wrb_params
*wrb_params
)
992 u32 i
, copied
= 0, wrb_cnt
= skb_wrb_cnt(skb
);
993 struct device
*dev
= &adapter
->pdev
->dev
;
994 bool map_single
= false;
999 head
= be_tx_get_wrb_hdr(txo
);
1001 if (skb
->len
> skb
->data_len
) {
1002 len
= skb_headlen(skb
);
1004 busaddr
= dma_map_single(dev
, skb
->data
, len
, DMA_TO_DEVICE
);
1005 if (dma_mapping_error(dev
, busaddr
))
1008 be_tx_setup_wrb_frag(txo
, busaddr
, len
);
1012 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1013 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
1014 len
= skb_frag_size(frag
);
1016 busaddr
= skb_frag_dma_map(dev
, frag
, 0, len
, DMA_TO_DEVICE
);
1017 if (dma_mapping_error(dev
, busaddr
))
1019 be_tx_setup_wrb_frag(txo
, busaddr
, len
);
1023 be_tx_setup_wrb_hdr(adapter
, txo
, wrb_params
, skb
, head
);
1025 be_tx_stats_update(txo
, skb
);
1029 adapter
->drv_stats
.dma_map_errors
++;
1030 be_xmit_restore(adapter
, txo
, head
, map_single
, copied
);
1034 static inline int qnq_async_evt_rcvd(struct be_adapter
*adapter
)
1036 return adapter
->flags
& BE_FLAGS_QNQ_ASYNC_EVT_RCVD
;
1039 static struct sk_buff
*be_insert_vlan_in_pkt(struct be_adapter
*adapter
,
1040 struct sk_buff
*skb
,
1041 struct be_wrb_params
1046 skb
= skb_share_check(skb
, GFP_ATOMIC
);
1050 if (skb_vlan_tag_present(skb
))
1051 vlan_tag
= be_get_tx_vlan_tag(adapter
, skb
);
1053 if (qnq_async_evt_rcvd(adapter
) && adapter
->pvid
) {
1055 vlan_tag
= adapter
->pvid
;
1056 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1057 * skip VLAN insertion
1059 BE_WRB_F_SET(wrb_params
->features
, VLAN_SKIP_HW
, 1);
1063 skb
= vlan_insert_tag_set_proto(skb
, htons(ETH_P_8021Q
),
1070 /* Insert the outer VLAN, if any */
1071 if (adapter
->qnq_vid
) {
1072 vlan_tag
= adapter
->qnq_vid
;
1073 skb
= vlan_insert_tag_set_proto(skb
, htons(ETH_P_8021Q
),
1077 BE_WRB_F_SET(wrb_params
->features
, VLAN_SKIP_HW
, 1);
1083 static bool be_ipv6_exthdr_check(struct sk_buff
*skb
)
1085 struct ethhdr
*eh
= (struct ethhdr
*)skb
->data
;
1086 u16 offset
= ETH_HLEN
;
1088 if (eh
->h_proto
== htons(ETH_P_IPV6
)) {
1089 struct ipv6hdr
*ip6h
= (struct ipv6hdr
*)(skb
->data
+ offset
);
1091 offset
+= sizeof(struct ipv6hdr
);
1092 if (ip6h
->nexthdr
!= NEXTHDR_TCP
&&
1093 ip6h
->nexthdr
!= NEXTHDR_UDP
) {
1094 struct ipv6_opt_hdr
*ehdr
=
1095 (struct ipv6_opt_hdr
*)(skb
->data
+ offset
);
1097 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1098 if (ehdr
->hdrlen
== 0xff)
1105 static int be_vlan_tag_tx_chk(struct be_adapter
*adapter
, struct sk_buff
*skb
)
1107 return skb_vlan_tag_present(skb
) || adapter
->pvid
|| adapter
->qnq_vid
;
1110 static int be_ipv6_tx_stall_chk(struct be_adapter
*adapter
, struct sk_buff
*skb
)
1112 return BE3_chip(adapter
) && be_ipv6_exthdr_check(skb
);
1115 static struct sk_buff
*be_lancer_xmit_workarounds(struct be_adapter
*adapter
,
1116 struct sk_buff
*skb
,
1117 struct be_wrb_params
1120 struct vlan_ethhdr
*veh
= (struct vlan_ethhdr
*)skb
->data
;
1121 unsigned int eth_hdr_len
;
1124 /* For padded packets, BE HW modifies tot_len field in IP header
1125 * incorrecly when VLAN tag is inserted by HW.
1126 * For padded packets, Lancer computes incorrect checksum.
1128 eth_hdr_len
= ntohs(skb
->protocol
) == ETH_P_8021Q
?
1129 VLAN_ETH_HLEN
: ETH_HLEN
;
1130 if (skb
->len
<= 60 &&
1131 (lancer_chip(adapter
) || skb_vlan_tag_present(skb
)) &&
1133 ip
= (struct iphdr
*)ip_hdr(skb
);
1134 pskb_trim(skb
, eth_hdr_len
+ ntohs(ip
->tot_len
));
1137 /* If vlan tag is already inlined in the packet, skip HW VLAN
1138 * tagging in pvid-tagging mode
1140 if (be_pvid_tagging_enabled(adapter
) &&
1141 veh
->h_vlan_proto
== htons(ETH_P_8021Q
))
1142 BE_WRB_F_SET(wrb_params
->features
, VLAN_SKIP_HW
, 1);
1144 /* HW has a bug wherein it will calculate CSUM for VLAN
1145 * pkts even though it is disabled.
1146 * Manually insert VLAN in pkt.
1148 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&&
1149 skb_vlan_tag_present(skb
)) {
1150 skb
= be_insert_vlan_in_pkt(adapter
, skb
, wrb_params
);
1155 /* HW may lockup when VLAN HW tagging is requested on
1156 * certain ipv6 packets. Drop such pkts if the HW workaround to
1157 * skip HW tagging is not enabled by FW.
1159 if (unlikely(be_ipv6_tx_stall_chk(adapter
, skb
) &&
1160 (adapter
->pvid
|| adapter
->qnq_vid
) &&
1161 !qnq_async_evt_rcvd(adapter
)))
1164 /* Manual VLAN tag insertion to prevent:
1165 * ASIC lockup when the ASIC inserts VLAN tag into
1166 * certain ipv6 packets. Insert VLAN tags in driver,
1167 * and set event, completion, vlan bits accordingly
1170 if (be_ipv6_tx_stall_chk(adapter
, skb
) &&
1171 be_vlan_tag_tx_chk(adapter
, skb
)) {
1172 skb
= be_insert_vlan_in_pkt(adapter
, skb
, wrb_params
);
1179 dev_kfree_skb_any(skb
);
1184 static struct sk_buff
*be_xmit_workarounds(struct be_adapter
*adapter
,
1185 struct sk_buff
*skb
,
1186 struct be_wrb_params
*wrb_params
)
1190 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1191 * packets that are 32b or less may cause a transmit stall
1192 * on that port. The workaround is to pad such packets
1193 * (len <= 32 bytes) to a minimum length of 36b.
1195 if (skb
->len
<= 32) {
1196 if (skb_put_padto(skb
, 36))
1200 if (BEx_chip(adapter
) || lancer_chip(adapter
)) {
1201 skb
= be_lancer_xmit_workarounds(adapter
, skb
, wrb_params
);
1206 /* The stack can send us skbs with length greater than
1207 * what the HW can handle. Trim the extra bytes.
1209 WARN_ON_ONCE(skb
->len
> BE_MAX_GSO_SIZE
);
1210 err
= pskb_trim(skb
, BE_MAX_GSO_SIZE
);
1216 static void be_xmit_flush(struct be_adapter
*adapter
, struct be_tx_obj
*txo
)
1218 struct be_queue_info
*txq
= &txo
->q
;
1219 struct be_eth_hdr_wrb
*hdr
= queue_index_node(txq
, txo
->last_req_hdr
);
1221 /* Mark the last request eventable if it hasn't been marked already */
1222 if (!(hdr
->dw
[2] & cpu_to_le32(TX_HDR_WRB_EVT
)))
1223 hdr
->dw
[2] |= cpu_to_le32(TX_HDR_WRB_EVT
| TX_HDR_WRB_COMPL
);
1225 /* compose a dummy wrb if there are odd set of wrbs to notify */
1226 if (!lancer_chip(adapter
) && (txo
->pend_wrb_cnt
& 1)) {
1227 wrb_fill_dummy(queue_head_node(txq
));
1228 queue_head_inc(txq
);
1229 atomic_inc(&txq
->used
);
1230 txo
->pend_wrb_cnt
++;
1231 hdr
->dw
[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK
<<
1232 TX_HDR_WRB_NUM_SHIFT
);
1233 hdr
->dw
[2] |= cpu_to_le32((txo
->last_req_wrb_cnt
+ 1) <<
1234 TX_HDR_WRB_NUM_SHIFT
);
1236 be_txq_notify(adapter
, txo
, txo
->pend_wrb_cnt
);
1237 txo
->pend_wrb_cnt
= 0;
1240 /* OS2BMC related */
1242 #define DHCP_CLIENT_PORT 68
1243 #define DHCP_SERVER_PORT 67
1244 #define NET_BIOS_PORT1 137
1245 #define NET_BIOS_PORT2 138
1246 #define DHCPV6_RAS_PORT 547
1248 #define is_mc_allowed_on_bmc(adapter, eh) \
1249 (!is_multicast_filt_enabled(adapter) && \
1250 is_multicast_ether_addr(eh->h_dest) && \
1251 !is_broadcast_ether_addr(eh->h_dest))
1253 #define is_bc_allowed_on_bmc(adapter, eh) \
1254 (!is_broadcast_filt_enabled(adapter) && \
1255 is_broadcast_ether_addr(eh->h_dest))
1257 #define is_arp_allowed_on_bmc(adapter, skb) \
1258 (is_arp(skb) && is_arp_filt_enabled(adapter))
1260 #define is_broadcast_packet(eh, adapter) \
1261 (is_multicast_ether_addr(eh->h_dest) && \
1262 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1264 #define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1266 #define is_arp_filt_enabled(adapter) \
1267 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1269 #define is_dhcp_client_filt_enabled(adapter) \
1270 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1272 #define is_dhcp_srvr_filt_enabled(adapter) \
1273 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1275 #define is_nbios_filt_enabled(adapter) \
1276 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1278 #define is_ipv6_na_filt_enabled(adapter) \
1279 (adapter->bmc_filt_mask & \
1280 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1282 #define is_ipv6_ra_filt_enabled(adapter) \
1283 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1285 #define is_ipv6_ras_filt_enabled(adapter) \
1286 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1288 #define is_broadcast_filt_enabled(adapter) \
1289 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1291 #define is_multicast_filt_enabled(adapter) \
1292 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1294 static bool be_send_pkt_to_bmc(struct be_adapter
*adapter
,
1295 struct sk_buff
**skb
)
1297 struct ethhdr
*eh
= (struct ethhdr
*)(*skb
)->data
;
1298 bool os2bmc
= false;
1300 if (!be_is_os2bmc_enabled(adapter
))
1303 if (!is_multicast_ether_addr(eh
->h_dest
))
1306 if (is_mc_allowed_on_bmc(adapter
, eh
) ||
1307 is_bc_allowed_on_bmc(adapter
, eh
) ||
1308 is_arp_allowed_on_bmc(adapter
, (*skb
))) {
1313 if ((*skb
)->protocol
== htons(ETH_P_IPV6
)) {
1314 struct ipv6hdr
*hdr
= ipv6_hdr((*skb
));
1315 u8 nexthdr
= hdr
->nexthdr
;
1317 if (nexthdr
== IPPROTO_ICMPV6
) {
1318 struct icmp6hdr
*icmp6
= icmp6_hdr((*skb
));
1320 switch (icmp6
->icmp6_type
) {
1321 case NDISC_ROUTER_ADVERTISEMENT
:
1322 os2bmc
= is_ipv6_ra_filt_enabled(adapter
);
1324 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
1325 os2bmc
= is_ipv6_na_filt_enabled(adapter
);
1333 if (is_udp_pkt((*skb
))) {
1334 struct udphdr
*udp
= udp_hdr((*skb
));
1336 switch (ntohs(udp
->dest
)) {
1337 case DHCP_CLIENT_PORT
:
1338 os2bmc
= is_dhcp_client_filt_enabled(adapter
);
1340 case DHCP_SERVER_PORT
:
1341 os2bmc
= is_dhcp_srvr_filt_enabled(adapter
);
1343 case NET_BIOS_PORT1
:
1344 case NET_BIOS_PORT2
:
1345 os2bmc
= is_nbios_filt_enabled(adapter
);
1347 case DHCPV6_RAS_PORT
:
1348 os2bmc
= is_ipv6_ras_filt_enabled(adapter
);
1355 /* For packets over a vlan, which are destined
1356 * to BMC, asic expects the vlan to be inline in the packet.
1359 *skb
= be_insert_vlan_in_pkt(adapter
, *skb
, NULL
);
1364 static netdev_tx_t
be_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1366 struct be_adapter
*adapter
= netdev_priv(netdev
);
1367 u16 q_idx
= skb_get_queue_mapping(skb
);
1368 struct be_tx_obj
*txo
= &adapter
->tx_obj
[q_idx
];
1369 struct be_wrb_params wrb_params
= { 0 };
1370 bool flush
= !skb
->xmit_more
;
1373 skb
= be_xmit_workarounds(adapter
, skb
, &wrb_params
);
1377 be_get_wrb_params_from_skb(adapter
, skb
, &wrb_params
);
1379 wrb_cnt
= be_xmit_enqueue(adapter
, txo
, skb
, &wrb_params
);
1380 if (unlikely(!wrb_cnt
)) {
1381 dev_kfree_skb_any(skb
);
1385 /* if os2bmc is enabled and if the pkt is destined to bmc,
1386 * enqueue the pkt a 2nd time with mgmt bit set.
1388 if (be_send_pkt_to_bmc(adapter
, &skb
)) {
1389 BE_WRB_F_SET(wrb_params
.features
, OS2BMC
, 1);
1390 wrb_cnt
= be_xmit_enqueue(adapter
, txo
, skb
, &wrb_params
);
1391 if (unlikely(!wrb_cnt
))
1397 if (be_is_txq_full(txo
)) {
1398 netif_stop_subqueue(netdev
, q_idx
);
1399 tx_stats(txo
)->tx_stops
++;
1402 if (flush
|| __netif_subqueue_stopped(netdev
, q_idx
))
1403 be_xmit_flush(adapter
, txo
);
1405 return NETDEV_TX_OK
;
1407 tx_stats(txo
)->tx_drv_drops
++;
1408 /* Flush the already enqueued tx requests */
1409 if (flush
&& txo
->pend_wrb_cnt
)
1410 be_xmit_flush(adapter
, txo
);
1412 return NETDEV_TX_OK
;
1415 static inline bool be_in_all_promisc(struct be_adapter
*adapter
)
1417 return (adapter
->if_flags
& BE_IF_FLAGS_ALL_PROMISCUOUS
) ==
1418 BE_IF_FLAGS_ALL_PROMISCUOUS
;
1421 static int be_set_vlan_promisc(struct be_adapter
*adapter
)
1423 struct device
*dev
= &adapter
->pdev
->dev
;
1426 if (adapter
->if_flags
& BE_IF_FLAGS_VLAN_PROMISCUOUS
)
1429 status
= be_cmd_rx_filter(adapter
, BE_IF_FLAGS_VLAN_PROMISCUOUS
, ON
);
1431 dev_info(dev
, "Enabled VLAN promiscuous mode\n");
1432 adapter
->if_flags
|= BE_IF_FLAGS_VLAN_PROMISCUOUS
;
1434 dev_err(dev
, "Failed to enable VLAN promiscuous mode\n");
1439 static int be_clear_vlan_promisc(struct be_adapter
*adapter
)
1441 struct device
*dev
= &adapter
->pdev
->dev
;
1444 status
= be_cmd_rx_filter(adapter
, BE_IF_FLAGS_VLAN_PROMISCUOUS
, OFF
);
1446 dev_info(dev
, "Disabling VLAN promiscuous mode\n");
1447 adapter
->if_flags
&= ~BE_IF_FLAGS_VLAN_PROMISCUOUS
;
1453 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1454 * If the user configures more, place BE in vlan promiscuous mode.
1456 static int be_vid_config(struct be_adapter
*adapter
)
1458 struct device
*dev
= &adapter
->pdev
->dev
;
1459 u16 vids
[BE_NUM_VLANS_SUPPORTED
];
1463 /* No need to change the VLAN state if the I/F is in promiscuous */
1464 if (adapter
->netdev
->flags
& IFF_PROMISC
)
1467 if (adapter
->vlans_added
> be_max_vlans(adapter
))
1468 return be_set_vlan_promisc(adapter
);
1470 if (adapter
->if_flags
& BE_IF_FLAGS_VLAN_PROMISCUOUS
) {
1471 status
= be_clear_vlan_promisc(adapter
);
1475 /* Construct VLAN Table to give to HW */
1476 for_each_set_bit(i
, adapter
->vids
, VLAN_N_VID
)
1477 vids
[num
++] = cpu_to_le16(i
);
1479 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
, vids
, num
, 0);
1481 dev_err(dev
, "Setting HW VLAN filtering failed\n");
1482 /* Set to VLAN promisc mode as setting VLAN filter failed */
1483 if (addl_status(status
) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS
||
1484 addl_status(status
) ==
1485 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES
)
1486 return be_set_vlan_promisc(adapter
);
1491 static int be_vlan_add_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
1493 struct be_adapter
*adapter
= netdev_priv(netdev
);
1496 mutex_lock(&adapter
->rx_filter_lock
);
1498 /* Packets with VID 0 are always received by Lancer by default */
1499 if (lancer_chip(adapter
) && vid
== 0)
1502 if (test_bit(vid
, adapter
->vids
))
1505 set_bit(vid
, adapter
->vids
);
1506 adapter
->vlans_added
++;
1508 status
= be_vid_config(adapter
);
1510 mutex_unlock(&adapter
->rx_filter_lock
);
1514 static int be_vlan_rem_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
1516 struct be_adapter
*adapter
= netdev_priv(netdev
);
1519 mutex_lock(&adapter
->rx_filter_lock
);
1521 /* Packets with VID 0 are always received by Lancer by default */
1522 if (lancer_chip(adapter
) && vid
== 0)
1525 if (!test_bit(vid
, adapter
->vids
))
1528 clear_bit(vid
, adapter
->vids
);
1529 adapter
->vlans_added
--;
1531 status
= be_vid_config(adapter
);
1533 mutex_unlock(&adapter
->rx_filter_lock
);
1537 static void be_set_all_promisc(struct be_adapter
*adapter
)
1539 be_cmd_rx_filter(adapter
, BE_IF_FLAGS_ALL_PROMISCUOUS
, ON
);
1540 adapter
->if_flags
|= BE_IF_FLAGS_ALL_PROMISCUOUS
;
1543 static void be_set_mc_promisc(struct be_adapter
*adapter
)
1547 if (adapter
->if_flags
& BE_IF_FLAGS_MCAST_PROMISCUOUS
)
1550 status
= be_cmd_rx_filter(adapter
, BE_IF_FLAGS_MCAST_PROMISCUOUS
, ON
);
1552 adapter
->if_flags
|= BE_IF_FLAGS_MCAST_PROMISCUOUS
;
1555 static void be_set_uc_promisc(struct be_adapter
*adapter
)
1559 if (adapter
->if_flags
& BE_IF_FLAGS_PROMISCUOUS
)
1562 status
= be_cmd_rx_filter(adapter
, BE_IF_FLAGS_PROMISCUOUS
, ON
);
1564 adapter
->if_flags
|= BE_IF_FLAGS_PROMISCUOUS
;
1567 static void be_clear_uc_promisc(struct be_adapter
*adapter
)
1571 if (!(adapter
->if_flags
& BE_IF_FLAGS_PROMISCUOUS
))
1574 status
= be_cmd_rx_filter(adapter
, BE_IF_FLAGS_PROMISCUOUS
, OFF
);
1576 adapter
->if_flags
&= ~BE_IF_FLAGS_PROMISCUOUS
;
1579 /* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1580 * We use a single callback function for both sync and unsync. We really don't
1581 * add/remove addresses through this callback. But, we use it to detect changes
1582 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1584 static int be_uc_list_update(struct net_device
*netdev
,
1585 const unsigned char *addr
)
1587 struct be_adapter
*adapter
= netdev_priv(netdev
);
1589 adapter
->update_uc_list
= true;
1593 static int be_mc_list_update(struct net_device
*netdev
,
1594 const unsigned char *addr
)
1596 struct be_adapter
*adapter
= netdev_priv(netdev
);
1598 adapter
->update_mc_list
= true;
1602 static void be_set_mc_list(struct be_adapter
*adapter
)
1604 struct net_device
*netdev
= adapter
->netdev
;
1605 struct netdev_hw_addr
*ha
;
1606 bool mc_promisc
= false;
1609 netif_addr_lock_bh(netdev
);
1610 __dev_mc_sync(netdev
, be_mc_list_update
, be_mc_list_update
);
1612 if (netdev
->flags
& IFF_PROMISC
) {
1613 adapter
->update_mc_list
= false;
1614 } else if (netdev
->flags
& IFF_ALLMULTI
||
1615 netdev_mc_count(netdev
) > be_max_mc(adapter
)) {
1616 /* Enable multicast promisc if num configured exceeds
1620 adapter
->update_mc_list
= false;
1621 } else if (adapter
->if_flags
& BE_IF_FLAGS_MCAST_PROMISCUOUS
) {
1622 /* Update mc-list unconditionally if the iface was previously
1623 * in mc-promisc mode and now is out of that mode.
1625 adapter
->update_mc_list
= true;
1628 if (adapter
->update_mc_list
) {
1631 /* cache the mc-list in adapter */
1632 netdev_for_each_mc_addr(ha
, netdev
) {
1633 ether_addr_copy(adapter
->mc_list
[i
].mac
, ha
->addr
);
1636 adapter
->mc_count
= netdev_mc_count(netdev
);
1638 netif_addr_unlock_bh(netdev
);
1641 be_set_mc_promisc(adapter
);
1642 } else if (adapter
->update_mc_list
) {
1643 status
= be_cmd_rx_filter(adapter
, BE_IF_FLAGS_MULTICAST
, ON
);
1645 adapter
->if_flags
&= ~BE_IF_FLAGS_MCAST_PROMISCUOUS
;
1647 be_set_mc_promisc(adapter
);
1649 adapter
->update_mc_list
= false;
1653 static void be_clear_mc_list(struct be_adapter
*adapter
)
1655 struct net_device
*netdev
= adapter
->netdev
;
1657 __dev_mc_unsync(netdev
, NULL
);
1658 be_cmd_rx_filter(adapter
, BE_IF_FLAGS_MULTICAST
, OFF
);
1659 adapter
->mc_count
= 0;
1662 static int be_uc_mac_add(struct be_adapter
*adapter
, int uc_idx
)
1664 if (ether_addr_equal(adapter
->uc_list
[uc_idx
].mac
, adapter
->dev_mac
)) {
1665 adapter
->pmac_id
[uc_idx
+ 1] = adapter
->pmac_id
[0];
1669 return be_cmd_pmac_add(adapter
, adapter
->uc_list
[uc_idx
].mac
,
1671 &adapter
->pmac_id
[uc_idx
+ 1], 0);
1674 static void be_uc_mac_del(struct be_adapter
*adapter
, int pmac_id
)
1676 if (pmac_id
== adapter
->pmac_id
[0])
1679 be_cmd_pmac_del(adapter
, adapter
->if_handle
, pmac_id
, 0);
1682 static void be_set_uc_list(struct be_adapter
*adapter
)
1684 struct net_device
*netdev
= adapter
->netdev
;
1685 struct netdev_hw_addr
*ha
;
1686 bool uc_promisc
= false;
1687 int curr_uc_macs
= 0, i
;
1689 netif_addr_lock_bh(netdev
);
1690 __dev_uc_sync(netdev
, be_uc_list_update
, be_uc_list_update
);
1692 if (netdev
->flags
& IFF_PROMISC
) {
1693 adapter
->update_uc_list
= false;
1694 } else if (netdev_uc_count(netdev
) > (be_max_uc(adapter
) - 1)) {
1696 adapter
->update_uc_list
= false;
1697 } else if (adapter
->if_flags
& BE_IF_FLAGS_PROMISCUOUS
) {
1698 /* Update uc-list unconditionally if the iface was previously
1699 * in uc-promisc mode and now is out of that mode.
1701 adapter
->update_uc_list
= true;
1704 if (adapter
->update_uc_list
) {
1705 /* cache the uc-list in adapter array */
1707 netdev_for_each_uc_addr(ha
, netdev
) {
1708 ether_addr_copy(adapter
->uc_list
[i
].mac
, ha
->addr
);
1711 curr_uc_macs
= netdev_uc_count(netdev
);
1713 netif_addr_unlock_bh(netdev
);
1716 be_set_uc_promisc(adapter
);
1717 } else if (adapter
->update_uc_list
) {
1718 be_clear_uc_promisc(adapter
);
1720 for (i
= 0; i
< adapter
->uc_macs
; i
++)
1721 be_uc_mac_del(adapter
, adapter
->pmac_id
[i
+ 1]);
1723 for (i
= 0; i
< curr_uc_macs
; i
++)
1724 be_uc_mac_add(adapter
, i
);
1725 adapter
->uc_macs
= curr_uc_macs
;
1726 adapter
->update_uc_list
= false;
1730 static void be_clear_uc_list(struct be_adapter
*adapter
)
1732 struct net_device
*netdev
= adapter
->netdev
;
1735 __dev_uc_unsync(netdev
, NULL
);
1736 for (i
= 0; i
< adapter
->uc_macs
; i
++)
1737 be_uc_mac_del(adapter
, adapter
->pmac_id
[i
+ 1]);
1739 adapter
->uc_macs
= 0;
1742 static void __be_set_rx_mode(struct be_adapter
*adapter
)
1744 struct net_device
*netdev
= adapter
->netdev
;
1746 mutex_lock(&adapter
->rx_filter_lock
);
1748 if (netdev
->flags
& IFF_PROMISC
) {
1749 if (!be_in_all_promisc(adapter
))
1750 be_set_all_promisc(adapter
);
1751 } else if (be_in_all_promisc(adapter
)) {
1752 /* We need to re-program the vlan-list or clear
1753 * vlan-promisc mode (if needed) when the interface
1754 * comes out of promisc mode.
1756 be_vid_config(adapter
);
1759 be_set_uc_list(adapter
);
1760 be_set_mc_list(adapter
);
1762 mutex_unlock(&adapter
->rx_filter_lock
);
1765 static void be_work_set_rx_mode(struct work_struct
*work
)
1767 struct be_cmd_work
*cmd_work
=
1768 container_of(work
, struct be_cmd_work
, work
);
1770 __be_set_rx_mode(cmd_work
->adapter
);
1774 static int be_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
1776 struct be_adapter
*adapter
= netdev_priv(netdev
);
1777 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
1780 if (!sriov_enabled(adapter
))
1783 if (!is_valid_ether_addr(mac
) || vf
>= adapter
->num_vfs
)
1786 /* Proceed further only if user provided MAC is different
1789 if (ether_addr_equal(mac
, vf_cfg
->mac_addr
))
1792 if (BEx_chip(adapter
)) {
1793 be_cmd_pmac_del(adapter
, vf_cfg
->if_handle
, vf_cfg
->pmac_id
,
1796 status
= be_cmd_pmac_add(adapter
, mac
, vf_cfg
->if_handle
,
1797 &vf_cfg
->pmac_id
, vf
+ 1);
1799 status
= be_cmd_set_mac(adapter
, mac
, vf_cfg
->if_handle
,
1804 dev_err(&adapter
->pdev
->dev
, "MAC %pM set on VF %d Failed: %#x",
1806 return be_cmd_status(status
);
1809 ether_addr_copy(vf_cfg
->mac_addr
, mac
);
1814 static int be_get_vf_config(struct net_device
*netdev
, int vf
,
1815 struct ifla_vf_info
*vi
)
1817 struct be_adapter
*adapter
= netdev_priv(netdev
);
1818 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
1820 if (!sriov_enabled(adapter
))
1823 if (vf
>= adapter
->num_vfs
)
1827 vi
->max_tx_rate
= vf_cfg
->tx_rate
;
1828 vi
->min_tx_rate
= 0;
1829 vi
->vlan
= vf_cfg
->vlan_tag
& VLAN_VID_MASK
;
1830 vi
->qos
= vf_cfg
->vlan_tag
>> VLAN_PRIO_SHIFT
;
1831 memcpy(&vi
->mac
, vf_cfg
->mac_addr
, ETH_ALEN
);
1832 vi
->linkstate
= adapter
->vf_cfg
[vf
].plink_tracking
;
1833 vi
->spoofchk
= adapter
->vf_cfg
[vf
].spoofchk
;
1838 static int be_set_vf_tvt(struct be_adapter
*adapter
, int vf
, u16 vlan
)
1840 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
1841 u16 vids
[BE_NUM_VLANS_SUPPORTED
];
1842 int vf_if_id
= vf_cfg
->if_handle
;
1845 /* Enable Transparent VLAN Tagging */
1846 status
= be_cmd_set_hsw_config(adapter
, vlan
, vf
+ 1, vf_if_id
, 0, 0);
1850 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1852 status
= be_cmd_vlan_config(adapter
, vf_if_id
, vids
, 1, vf
+ 1);
1854 dev_info(&adapter
->pdev
->dev
,
1855 "Cleared guest VLANs on VF%d", vf
);
1857 /* After TVT is enabled, disallow VFs to program VLAN filters */
1858 if (vf_cfg
->privileges
& BE_PRIV_FILTMGMT
) {
1859 status
= be_cmd_set_fn_privileges(adapter
, vf_cfg
->privileges
&
1860 ~BE_PRIV_FILTMGMT
, vf
+ 1);
1862 vf_cfg
->privileges
&= ~BE_PRIV_FILTMGMT
;
1867 static int be_clear_vf_tvt(struct be_adapter
*adapter
, int vf
)
1869 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
1870 struct device
*dev
= &adapter
->pdev
->dev
;
1873 /* Reset Transparent VLAN Tagging. */
1874 status
= be_cmd_set_hsw_config(adapter
, BE_RESET_VLAN_TAG_ID
, vf
+ 1,
1875 vf_cfg
->if_handle
, 0, 0);
1879 /* Allow VFs to program VLAN filtering */
1880 if (!(vf_cfg
->privileges
& BE_PRIV_FILTMGMT
)) {
1881 status
= be_cmd_set_fn_privileges(adapter
, vf_cfg
->privileges
|
1882 BE_PRIV_FILTMGMT
, vf
+ 1);
1884 vf_cfg
->privileges
|= BE_PRIV_FILTMGMT
;
1885 dev_info(dev
, "VF%d: FILTMGMT priv enabled", vf
);
1890 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1894 static int be_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
, u8 qos
,
1897 struct be_adapter
*adapter
= netdev_priv(netdev
);
1898 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
1901 if (!sriov_enabled(adapter
))
1904 if (vf
>= adapter
->num_vfs
|| vlan
> 4095 || qos
> 7)
1907 if (vlan_proto
!= htons(ETH_P_8021Q
))
1908 return -EPROTONOSUPPORT
;
1911 vlan
|= qos
<< VLAN_PRIO_SHIFT
;
1912 status
= be_set_vf_tvt(adapter
, vf
, vlan
);
1914 status
= be_clear_vf_tvt(adapter
, vf
);
1918 dev_err(&adapter
->pdev
->dev
,
1919 "VLAN %d config on VF %d failed : %#x\n", vlan
, vf
,
1921 return be_cmd_status(status
);
1924 vf_cfg
->vlan_tag
= vlan
;
1928 static int be_set_vf_tx_rate(struct net_device
*netdev
, int vf
,
1929 int min_tx_rate
, int max_tx_rate
)
1931 struct be_adapter
*adapter
= netdev_priv(netdev
);
1932 struct device
*dev
= &adapter
->pdev
->dev
;
1933 int percent_rate
, status
= 0;
1937 if (!sriov_enabled(adapter
))
1940 if (vf
>= adapter
->num_vfs
)
1949 status
= be_cmd_link_status_query(adapter
, &link_speed
,
1955 dev_err(dev
, "TX-rate setting not allowed when link is down\n");
1960 if (max_tx_rate
< 100 || max_tx_rate
> link_speed
) {
1961 dev_err(dev
, "TX-rate must be between 100 and %d Mbps\n",
1967 /* On Skyhawk the QOS setting must be done only as a % value */
1968 percent_rate
= link_speed
/ 100;
1969 if (skyhawk_chip(adapter
) && (max_tx_rate
% percent_rate
)) {
1970 dev_err(dev
, "TX-rate must be a multiple of %d Mbps\n",
1977 status
= be_cmd_config_qos(adapter
, max_tx_rate
, link_speed
, vf
+ 1);
1981 adapter
->vf_cfg
[vf
].tx_rate
= max_tx_rate
;
1985 dev_err(dev
, "TX-rate setting of %dMbps on VF%d failed\n",
1987 return be_cmd_status(status
);
1990 static int be_set_vf_link_state(struct net_device
*netdev
, int vf
,
1993 struct be_adapter
*adapter
= netdev_priv(netdev
);
1996 if (!sriov_enabled(adapter
))
1999 if (vf
>= adapter
->num_vfs
)
2002 status
= be_cmd_set_logical_link_config(adapter
, link_state
, vf
+1);
2004 dev_err(&adapter
->pdev
->dev
,
2005 "Link state change on VF %d failed: %#x\n", vf
, status
);
2006 return be_cmd_status(status
);
2009 adapter
->vf_cfg
[vf
].plink_tracking
= link_state
;
2014 static int be_set_vf_spoofchk(struct net_device
*netdev
, int vf
, bool enable
)
2016 struct be_adapter
*adapter
= netdev_priv(netdev
);
2017 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
2021 if (!sriov_enabled(adapter
))
2024 if (vf
>= adapter
->num_vfs
)
2027 if (BEx_chip(adapter
))
2030 if (enable
== vf_cfg
->spoofchk
)
2033 spoofchk
= enable
? ENABLE_MAC_SPOOFCHK
: DISABLE_MAC_SPOOFCHK
;
2035 status
= be_cmd_set_hsw_config(adapter
, 0, vf
+ 1, vf_cfg
->if_handle
,
2038 dev_err(&adapter
->pdev
->dev
,
2039 "Spoofchk change on VF %d failed: %#x\n", vf
, status
);
2040 return be_cmd_status(status
);
2043 vf_cfg
->spoofchk
= enable
;
2047 static void be_aic_update(struct be_aic_obj
*aic
, u64 rx_pkts
, u64 tx_pkts
,
2050 aic
->rx_pkts_prev
= rx_pkts
;
2051 aic
->tx_reqs_prev
= tx_pkts
;
2055 static int be_get_new_eqd(struct be_eq_obj
*eqo
)
2057 struct be_adapter
*adapter
= eqo
->adapter
;
2059 struct be_aic_obj
*aic
;
2060 struct be_rx_obj
*rxo
;
2061 struct be_tx_obj
*txo
;
2062 u64 rx_pkts
= 0, tx_pkts
= 0;
2067 aic
= &adapter
->aic_obj
[eqo
->idx
];
2075 for_all_rx_queues_on_eq(adapter
, eqo
, rxo
, i
) {
2077 start
= u64_stats_fetch_begin_irq(&rxo
->stats
.sync
);
2078 rx_pkts
+= rxo
->stats
.rx_pkts
;
2079 } while (u64_stats_fetch_retry_irq(&rxo
->stats
.sync
, start
));
2082 for_all_tx_queues_on_eq(adapter
, eqo
, txo
, i
) {
2084 start
= u64_stats_fetch_begin_irq(&txo
->stats
.sync
);
2085 tx_pkts
+= txo
->stats
.tx_reqs
;
2086 } while (u64_stats_fetch_retry_irq(&txo
->stats
.sync
, start
));
2089 /* Skip, if wrapped around or first calculation */
2091 if (!aic
->jiffies
|| time_before(now
, aic
->jiffies
) ||
2092 rx_pkts
< aic
->rx_pkts_prev
||
2093 tx_pkts
< aic
->tx_reqs_prev
) {
2094 be_aic_update(aic
, rx_pkts
, tx_pkts
, now
);
2095 return aic
->prev_eqd
;
2098 delta
= jiffies_to_msecs(now
- aic
->jiffies
);
2100 return aic
->prev_eqd
;
2102 pps
= (((u32
)(rx_pkts
- aic
->rx_pkts_prev
) * 1000) / delta
) +
2103 (((u32
)(tx_pkts
- aic
->tx_reqs_prev
) * 1000) / delta
);
2104 eqd
= (pps
/ 15000) << 2;
2108 eqd
= min_t(u32
, eqd
, aic
->max_eqd
);
2109 eqd
= max_t(u32
, eqd
, aic
->min_eqd
);
2111 be_aic_update(aic
, rx_pkts
, tx_pkts
, now
);
2116 /* For Skyhawk-R only */
2117 static u32
be_get_eq_delay_mult_enc(struct be_eq_obj
*eqo
)
2119 struct be_adapter
*adapter
= eqo
->adapter
;
2120 struct be_aic_obj
*aic
= &adapter
->aic_obj
[eqo
->idx
];
2121 ulong now
= jiffies
;
2128 if (jiffies_to_msecs(now
- aic
->jiffies
) < 1)
2129 eqd
= aic
->prev_eqd
;
2131 eqd
= be_get_new_eqd(eqo
);
2134 mult_enc
= R2I_DLY_ENC_1
;
2136 mult_enc
= R2I_DLY_ENC_2
;
2138 mult_enc
= R2I_DLY_ENC_3
;
2140 mult_enc
= R2I_DLY_ENC_0
;
2142 aic
->prev_eqd
= eqd
;
2147 void be_eqd_update(struct be_adapter
*adapter
, bool force_update
)
2149 struct be_set_eqd set_eqd
[MAX_EVT_QS
];
2150 struct be_aic_obj
*aic
;
2151 struct be_eq_obj
*eqo
;
2152 int i
, num
= 0, eqd
;
2154 for_all_evt_queues(adapter
, eqo
, i
) {
2155 aic
= &adapter
->aic_obj
[eqo
->idx
];
2156 eqd
= be_get_new_eqd(eqo
);
2157 if (force_update
|| eqd
!= aic
->prev_eqd
) {
2158 set_eqd
[num
].delay_multiplier
= (eqd
* 65)/100;
2159 set_eqd
[num
].eq_id
= eqo
->q
.id
;
2160 aic
->prev_eqd
= eqd
;
2166 be_cmd_modify_eqd(adapter
, set_eqd
, num
);
2169 static void be_rx_stats_update(struct be_rx_obj
*rxo
,
2170 struct be_rx_compl_info
*rxcp
)
2172 struct be_rx_stats
*stats
= rx_stats(rxo
);
2174 u64_stats_update_begin(&stats
->sync
);
2176 stats
->rx_bytes
+= rxcp
->pkt_size
;
2179 stats
->rx_vxlan_offload_pkts
++;
2180 if (rxcp
->pkt_type
== BE_MULTICAST_PACKET
)
2181 stats
->rx_mcast_pkts
++;
2183 stats
->rx_compl_err
++;
2184 u64_stats_update_end(&stats
->sync
);
2187 static inline bool csum_passed(struct be_rx_compl_info
*rxcp
)
2189 /* L4 checksum is not reliable for non TCP/UDP packets.
2190 * Also ignore ipcksm for ipv6 pkts
2192 return (rxcp
->tcpf
|| rxcp
->udpf
) && rxcp
->l4_csum
&&
2193 (rxcp
->ip_csum
|| rxcp
->ipv6
) && !rxcp
->err
;
2196 static struct be_rx_page_info
*get_rx_page_info(struct be_rx_obj
*rxo
)
2198 struct be_adapter
*adapter
= rxo
->adapter
;
2199 struct be_rx_page_info
*rx_page_info
;
2200 struct be_queue_info
*rxq
= &rxo
->q
;
2201 u32 frag_idx
= rxq
->tail
;
2203 rx_page_info
= &rxo
->page_info_tbl
[frag_idx
];
2204 BUG_ON(!rx_page_info
->page
);
2206 if (rx_page_info
->last_frag
) {
2207 dma_unmap_page(&adapter
->pdev
->dev
,
2208 dma_unmap_addr(rx_page_info
, bus
),
2209 adapter
->big_page_size
, DMA_FROM_DEVICE
);
2210 rx_page_info
->last_frag
= false;
2212 dma_sync_single_for_cpu(&adapter
->pdev
->dev
,
2213 dma_unmap_addr(rx_page_info
, bus
),
2214 rx_frag_size
, DMA_FROM_DEVICE
);
2217 queue_tail_inc(rxq
);
2218 atomic_dec(&rxq
->used
);
2219 return rx_page_info
;
2222 /* Throwaway the data in the Rx completion */
2223 static void be_rx_compl_discard(struct be_rx_obj
*rxo
,
2224 struct be_rx_compl_info
*rxcp
)
2226 struct be_rx_page_info
*page_info
;
2227 u16 i
, num_rcvd
= rxcp
->num_rcvd
;
2229 for (i
= 0; i
< num_rcvd
; i
++) {
2230 page_info
= get_rx_page_info(rxo
);
2231 put_page(page_info
->page
);
2232 memset(page_info
, 0, sizeof(*page_info
));
2237 * skb_fill_rx_data forms a complete skb for an ether frame
2238 * indicated by rxcp.
2240 static void skb_fill_rx_data(struct be_rx_obj
*rxo
, struct sk_buff
*skb
,
2241 struct be_rx_compl_info
*rxcp
)
2243 struct be_rx_page_info
*page_info
;
2245 u16 hdr_len
, curr_frag_len
, remaining
;
2248 page_info
= get_rx_page_info(rxo
);
2249 start
= page_address(page_info
->page
) + page_info
->page_offset
;
2252 /* Copy data in the first descriptor of this completion */
2253 curr_frag_len
= min(rxcp
->pkt_size
, rx_frag_size
);
2255 skb
->len
= curr_frag_len
;
2256 if (curr_frag_len
<= BE_HDR_LEN
) { /* tiny packet */
2257 memcpy(skb
->data
, start
, curr_frag_len
);
2258 /* Complete packet has now been moved to data */
2259 put_page(page_info
->page
);
2261 skb
->tail
+= curr_frag_len
;
2264 memcpy(skb
->data
, start
, hdr_len
);
2265 skb_shinfo(skb
)->nr_frags
= 1;
2266 skb_frag_set_page(skb
, 0, page_info
->page
);
2267 skb_shinfo(skb
)->frags
[0].page_offset
=
2268 page_info
->page_offset
+ hdr_len
;
2269 skb_frag_size_set(&skb_shinfo(skb
)->frags
[0],
2270 curr_frag_len
- hdr_len
);
2271 skb
->data_len
= curr_frag_len
- hdr_len
;
2272 skb
->truesize
+= rx_frag_size
;
2273 skb
->tail
+= hdr_len
;
2275 page_info
->page
= NULL
;
2277 if (rxcp
->pkt_size
<= rx_frag_size
) {
2278 BUG_ON(rxcp
->num_rcvd
!= 1);
2282 /* More frags present for this completion */
2283 remaining
= rxcp
->pkt_size
- curr_frag_len
;
2284 for (i
= 1, j
= 0; i
< rxcp
->num_rcvd
; i
++) {
2285 page_info
= get_rx_page_info(rxo
);
2286 curr_frag_len
= min(remaining
, rx_frag_size
);
2288 /* Coalesce all frags from the same physical page in one slot */
2289 if (page_info
->page_offset
== 0) {
2292 skb_frag_set_page(skb
, j
, page_info
->page
);
2293 skb_shinfo(skb
)->frags
[j
].page_offset
=
2294 page_info
->page_offset
;
2295 skb_frag_size_set(&skb_shinfo(skb
)->frags
[j
], 0);
2296 skb_shinfo(skb
)->nr_frags
++;
2298 put_page(page_info
->page
);
2301 skb_frag_size_add(&skb_shinfo(skb
)->frags
[j
], curr_frag_len
);
2302 skb
->len
+= curr_frag_len
;
2303 skb
->data_len
+= curr_frag_len
;
2304 skb
->truesize
+= rx_frag_size
;
2305 remaining
-= curr_frag_len
;
2306 page_info
->page
= NULL
;
2308 BUG_ON(j
> MAX_SKB_FRAGS
);
2311 /* Process the RX completion indicated by rxcp when GRO is disabled */
2312 static void be_rx_compl_process(struct be_rx_obj
*rxo
, struct napi_struct
*napi
,
2313 struct be_rx_compl_info
*rxcp
)
2315 struct be_adapter
*adapter
= rxo
->adapter
;
2316 struct net_device
*netdev
= adapter
->netdev
;
2317 struct sk_buff
*skb
;
2319 skb
= netdev_alloc_skb_ip_align(netdev
, BE_RX_SKB_ALLOC_SIZE
);
2320 if (unlikely(!skb
)) {
2321 rx_stats(rxo
)->rx_drops_no_skbs
++;
2322 be_rx_compl_discard(rxo
, rxcp
);
2326 skb_fill_rx_data(rxo
, skb
, rxcp
);
2328 if (likely((netdev
->features
& NETIF_F_RXCSUM
) && csum_passed(rxcp
)))
2329 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2331 skb_checksum_none_assert(skb
);
2333 skb
->protocol
= eth_type_trans(skb
, netdev
);
2334 skb_record_rx_queue(skb
, rxo
- &adapter
->rx_obj
[0]);
2335 if (netdev
->features
& NETIF_F_RXHASH
)
2336 skb_set_hash(skb
, rxcp
->rss_hash
, PKT_HASH_TYPE_L3
);
2338 skb
->csum_level
= rxcp
->tunneled
;
2339 skb_mark_napi_id(skb
, napi
);
2342 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), rxcp
->vlan_tag
);
2344 netif_receive_skb(skb
);
2347 /* Process the RX completion indicated by rxcp when GRO is enabled */
2348 static void be_rx_compl_process_gro(struct be_rx_obj
*rxo
,
2349 struct napi_struct
*napi
,
2350 struct be_rx_compl_info
*rxcp
)
2352 struct be_adapter
*adapter
= rxo
->adapter
;
2353 struct be_rx_page_info
*page_info
;
2354 struct sk_buff
*skb
= NULL
;
2355 u16 remaining
, curr_frag_len
;
2358 skb
= napi_get_frags(napi
);
2360 be_rx_compl_discard(rxo
, rxcp
);
2364 remaining
= rxcp
->pkt_size
;
2365 for (i
= 0, j
= -1; i
< rxcp
->num_rcvd
; i
++) {
2366 page_info
= get_rx_page_info(rxo
);
2368 curr_frag_len
= min(remaining
, rx_frag_size
);
2370 /* Coalesce all frags from the same physical page in one slot */
2371 if (i
== 0 || page_info
->page_offset
== 0) {
2372 /* First frag or Fresh page */
2374 skb_frag_set_page(skb
, j
, page_info
->page
);
2375 skb_shinfo(skb
)->frags
[j
].page_offset
=
2376 page_info
->page_offset
;
2377 skb_frag_size_set(&skb_shinfo(skb
)->frags
[j
], 0);
2379 put_page(page_info
->page
);
2381 skb_frag_size_add(&skb_shinfo(skb
)->frags
[j
], curr_frag_len
);
2382 skb
->truesize
+= rx_frag_size
;
2383 remaining
-= curr_frag_len
;
2384 memset(page_info
, 0, sizeof(*page_info
));
2386 BUG_ON(j
> MAX_SKB_FRAGS
);
2388 skb_shinfo(skb
)->nr_frags
= j
+ 1;
2389 skb
->len
= rxcp
->pkt_size
;
2390 skb
->data_len
= rxcp
->pkt_size
;
2391 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2392 skb_record_rx_queue(skb
, rxo
- &adapter
->rx_obj
[0]);
2393 if (adapter
->netdev
->features
& NETIF_F_RXHASH
)
2394 skb_set_hash(skb
, rxcp
->rss_hash
, PKT_HASH_TYPE_L3
);
2396 skb
->csum_level
= rxcp
->tunneled
;
2399 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), rxcp
->vlan_tag
);
2401 napi_gro_frags(napi
);
2404 static void be_parse_rx_compl_v1(struct be_eth_rx_compl
*compl,
2405 struct be_rx_compl_info
*rxcp
)
2407 rxcp
->pkt_size
= GET_RX_COMPL_V1_BITS(pktsize
, compl);
2408 rxcp
->vlanf
= GET_RX_COMPL_V1_BITS(vtp
, compl);
2409 rxcp
->err
= GET_RX_COMPL_V1_BITS(err
, compl);
2410 rxcp
->tcpf
= GET_RX_COMPL_V1_BITS(tcpf
, compl);
2411 rxcp
->udpf
= GET_RX_COMPL_V1_BITS(udpf
, compl);
2412 rxcp
->ip_csum
= GET_RX_COMPL_V1_BITS(ipcksm
, compl);
2413 rxcp
->l4_csum
= GET_RX_COMPL_V1_BITS(l4_cksm
, compl);
2414 rxcp
->ipv6
= GET_RX_COMPL_V1_BITS(ip_version
, compl);
2415 rxcp
->num_rcvd
= GET_RX_COMPL_V1_BITS(numfrags
, compl);
2416 rxcp
->pkt_type
= GET_RX_COMPL_V1_BITS(cast_enc
, compl);
2417 rxcp
->rss_hash
= GET_RX_COMPL_V1_BITS(rsshash
, compl);
2419 rxcp
->qnq
= GET_RX_COMPL_V1_BITS(qnq
, compl);
2420 rxcp
->vlan_tag
= GET_RX_COMPL_V1_BITS(vlan_tag
, compl);
2422 rxcp
->port
= GET_RX_COMPL_V1_BITS(port
, compl);
2424 GET_RX_COMPL_V1_BITS(tunneled
, compl);
2427 static void be_parse_rx_compl_v0(struct be_eth_rx_compl
*compl,
2428 struct be_rx_compl_info
*rxcp
)
2430 rxcp
->pkt_size
= GET_RX_COMPL_V0_BITS(pktsize
, compl);
2431 rxcp
->vlanf
= GET_RX_COMPL_V0_BITS(vtp
, compl);
2432 rxcp
->err
= GET_RX_COMPL_V0_BITS(err
, compl);
2433 rxcp
->tcpf
= GET_RX_COMPL_V0_BITS(tcpf
, compl);
2434 rxcp
->udpf
= GET_RX_COMPL_V0_BITS(udpf
, compl);
2435 rxcp
->ip_csum
= GET_RX_COMPL_V0_BITS(ipcksm
, compl);
2436 rxcp
->l4_csum
= GET_RX_COMPL_V0_BITS(l4_cksm
, compl);
2437 rxcp
->ipv6
= GET_RX_COMPL_V0_BITS(ip_version
, compl);
2438 rxcp
->num_rcvd
= GET_RX_COMPL_V0_BITS(numfrags
, compl);
2439 rxcp
->pkt_type
= GET_RX_COMPL_V0_BITS(cast_enc
, compl);
2440 rxcp
->rss_hash
= GET_RX_COMPL_V0_BITS(rsshash
, compl);
2442 rxcp
->qnq
= GET_RX_COMPL_V0_BITS(qnq
, compl);
2443 rxcp
->vlan_tag
= GET_RX_COMPL_V0_BITS(vlan_tag
, compl);
2445 rxcp
->port
= GET_RX_COMPL_V0_BITS(port
, compl);
2446 rxcp
->ip_frag
= GET_RX_COMPL_V0_BITS(ip_frag
, compl);
2449 static struct be_rx_compl_info
*be_rx_compl_get(struct be_rx_obj
*rxo
)
2451 struct be_eth_rx_compl
*compl = queue_tail_node(&rxo
->cq
);
2452 struct be_rx_compl_info
*rxcp
= &rxo
->rxcp
;
2453 struct be_adapter
*adapter
= rxo
->adapter
;
2455 /* For checking the valid bit it is Ok to use either definition as the
2456 * valid bit is at the same position in both v0 and v1 Rx compl */
2457 if (compl->dw
[offsetof(struct amap_eth_rx_compl_v1
, valid
) / 32] == 0)
2461 be_dws_le_to_cpu(compl, sizeof(*compl));
2463 if (adapter
->be3_native
)
2464 be_parse_rx_compl_v1(compl, rxcp
);
2466 be_parse_rx_compl_v0(compl, rxcp
);
2472 /* In QNQ modes, if qnq bit is not set, then the packet was
2473 * tagged only with the transparent outer vlan-tag and must
2474 * not be treated as a vlan packet by host
2476 if (be_is_qnq_mode(adapter
) && !rxcp
->qnq
)
2479 if (!lancer_chip(adapter
))
2480 rxcp
->vlan_tag
= swab16(rxcp
->vlan_tag
);
2482 if (adapter
->pvid
== (rxcp
->vlan_tag
& VLAN_VID_MASK
) &&
2483 !test_bit(rxcp
->vlan_tag
, adapter
->vids
))
2487 /* As the compl has been parsed, reset it; we wont touch it again */
2488 compl->dw
[offsetof(struct amap_eth_rx_compl_v1
, valid
) / 32] = 0;
2490 queue_tail_inc(&rxo
->cq
);
2494 static inline struct page
*be_alloc_pages(u32 size
, gfp_t gfp
)
2496 u32 order
= get_order(size
);
2500 return alloc_pages(gfp
, order
);
2504 * Allocate a page, split it to fragments of size rx_frag_size and post as
2505 * receive buffers to BE
2507 static void be_post_rx_frags(struct be_rx_obj
*rxo
, gfp_t gfp
, u32 frags_needed
)
2509 struct be_adapter
*adapter
= rxo
->adapter
;
2510 struct be_rx_page_info
*page_info
= NULL
, *prev_page_info
= NULL
;
2511 struct be_queue_info
*rxq
= &rxo
->q
;
2512 struct page
*pagep
= NULL
;
2513 struct device
*dev
= &adapter
->pdev
->dev
;
2514 struct be_eth_rx_d
*rxd
;
2515 u64 page_dmaaddr
= 0, frag_dmaaddr
;
2516 u32 posted
, page_offset
= 0, notify
= 0;
2518 page_info
= &rxo
->page_info_tbl
[rxq
->head
];
2519 for (posted
= 0; posted
< frags_needed
&& !page_info
->page
; posted
++) {
2521 pagep
= be_alloc_pages(adapter
->big_page_size
, gfp
);
2522 if (unlikely(!pagep
)) {
2523 rx_stats(rxo
)->rx_post_fail
++;
2526 page_dmaaddr
= dma_map_page(dev
, pagep
, 0,
2527 adapter
->big_page_size
,
2529 if (dma_mapping_error(dev
, page_dmaaddr
)) {
2532 adapter
->drv_stats
.dma_map_errors
++;
2538 page_offset
+= rx_frag_size
;
2540 page_info
->page_offset
= page_offset
;
2541 page_info
->page
= pagep
;
2543 rxd
= queue_head_node(rxq
);
2544 frag_dmaaddr
= page_dmaaddr
+ page_info
->page_offset
;
2545 rxd
->fragpa_lo
= cpu_to_le32(frag_dmaaddr
& 0xFFFFFFFF);
2546 rxd
->fragpa_hi
= cpu_to_le32(upper_32_bits(frag_dmaaddr
));
2548 /* Any space left in the current big page for another frag? */
2549 if ((page_offset
+ rx_frag_size
+ rx_frag_size
) >
2550 adapter
->big_page_size
) {
2552 page_info
->last_frag
= true;
2553 dma_unmap_addr_set(page_info
, bus
, page_dmaaddr
);
2555 dma_unmap_addr_set(page_info
, bus
, frag_dmaaddr
);
2558 prev_page_info
= page_info
;
2559 queue_head_inc(rxq
);
2560 page_info
= &rxo
->page_info_tbl
[rxq
->head
];
2563 /* Mark the last frag of a page when we break out of the above loop
2564 * with no more slots available in the RXQ
2567 prev_page_info
->last_frag
= true;
2568 dma_unmap_addr_set(prev_page_info
, bus
, page_dmaaddr
);
2572 atomic_add(posted
, &rxq
->used
);
2573 if (rxo
->rx_post_starved
)
2574 rxo
->rx_post_starved
= false;
2576 notify
= min(MAX_NUM_POST_ERX_DB
, posted
);
2577 be_rxq_notify(adapter
, rxq
->id
, notify
);
2580 } else if (atomic_read(&rxq
->used
) == 0) {
2581 /* Let be_worker replenish when memory is available */
2582 rxo
->rx_post_starved
= true;
2586 static inline void be_update_tx_err(struct be_tx_obj
*txo
, u8 status
)
2589 case BE_TX_COMP_HDR_PARSE_ERR
:
2590 tx_stats(txo
)->tx_hdr_parse_err
++;
2592 case BE_TX_COMP_NDMA_ERR
:
2593 tx_stats(txo
)->tx_dma_err
++;
2595 case BE_TX_COMP_ACL_ERR
:
2596 tx_stats(txo
)->tx_spoof_check_err
++;
2601 static inline void lancer_update_tx_err(struct be_tx_obj
*txo
, u8 status
)
2604 case LANCER_TX_COMP_LSO_ERR
:
2605 tx_stats(txo
)->tx_tso_err
++;
2607 case LANCER_TX_COMP_HSW_DROP_MAC_ERR
:
2608 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR
:
2609 tx_stats(txo
)->tx_spoof_check_err
++;
2611 case LANCER_TX_COMP_QINQ_ERR
:
2612 tx_stats(txo
)->tx_qinq_err
++;
2614 case LANCER_TX_COMP_PARITY_ERR
:
2615 tx_stats(txo
)->tx_internal_parity_err
++;
2617 case LANCER_TX_COMP_DMA_ERR
:
2618 tx_stats(txo
)->tx_dma_err
++;
2620 case LANCER_TX_COMP_SGE_ERR
:
2621 tx_stats(txo
)->tx_sge_err
++;
2626 static struct be_tx_compl_info
*be_tx_compl_get(struct be_adapter
*adapter
,
2627 struct be_tx_obj
*txo
)
2629 struct be_queue_info
*tx_cq
= &txo
->cq
;
2630 struct be_tx_compl_info
*txcp
= &txo
->txcp
;
2631 struct be_eth_tx_compl
*compl = queue_tail_node(tx_cq
);
2633 if (compl->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] == 0)
2636 /* Ensure load ordering of valid bit dword and other dwords below */
2638 be_dws_le_to_cpu(compl, sizeof(*compl));
2640 txcp
->status
= GET_TX_COMPL_BITS(status
, compl);
2641 txcp
->end_index
= GET_TX_COMPL_BITS(wrb_index
, compl);
2644 if (lancer_chip(adapter
)) {
2645 lancer_update_tx_err(txo
, txcp
->status
);
2646 /* Reset the adapter incase of TSO,
2647 * SGE or Parity error
2649 if (txcp
->status
== LANCER_TX_COMP_LSO_ERR
||
2650 txcp
->status
== LANCER_TX_COMP_PARITY_ERR
||
2651 txcp
->status
== LANCER_TX_COMP_SGE_ERR
)
2652 be_set_error(adapter
, BE_ERROR_TX
);
2654 be_update_tx_err(txo
, txcp
->status
);
2658 if (be_check_error(adapter
, BE_ERROR_TX
))
2661 compl->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] = 0;
2662 queue_tail_inc(tx_cq
);
2666 static u16
be_tx_compl_process(struct be_adapter
*adapter
,
2667 struct be_tx_obj
*txo
, u16 last_index
)
2669 struct sk_buff
**sent_skbs
= txo
->sent_skb_list
;
2670 struct be_queue_info
*txq
= &txo
->q
;
2671 struct sk_buff
*skb
= NULL
;
2672 bool unmap_skb_hdr
= false;
2673 struct be_eth_wrb
*wrb
;
2678 if (sent_skbs
[txq
->tail
]) {
2679 /* Free skb from prev req */
2681 dev_consume_skb_any(skb
);
2682 skb
= sent_skbs
[txq
->tail
];
2683 sent_skbs
[txq
->tail
] = NULL
;
2684 queue_tail_inc(txq
); /* skip hdr wrb */
2686 unmap_skb_hdr
= true;
2688 wrb
= queue_tail_node(txq
);
2689 frag_index
= txq
->tail
;
2690 unmap_tx_frag(&adapter
->pdev
->dev
, wrb
,
2691 (unmap_skb_hdr
&& skb_headlen(skb
)));
2692 unmap_skb_hdr
= false;
2693 queue_tail_inc(txq
);
2695 } while (frag_index
!= last_index
);
2696 dev_consume_skb_any(skb
);
2701 /* Return the number of events in the event queue */
2702 static inline int events_get(struct be_eq_obj
*eqo
)
2704 struct be_eq_entry
*eqe
;
2708 eqe
= queue_tail_node(&eqo
->q
);
2715 queue_tail_inc(&eqo
->q
);
2721 /* Leaves the EQ is disarmed state */
2722 static void be_eq_clean(struct be_eq_obj
*eqo
)
2724 int num
= events_get(eqo
);
2726 be_eq_notify(eqo
->adapter
, eqo
->q
.id
, false, true, num
, 0);
2729 /* Free posted rx buffers that were not used */
2730 static void be_rxq_clean(struct be_rx_obj
*rxo
)
2732 struct be_queue_info
*rxq
= &rxo
->q
;
2733 struct be_rx_page_info
*page_info
;
2735 while (atomic_read(&rxq
->used
) > 0) {
2736 page_info
= get_rx_page_info(rxo
);
2737 put_page(page_info
->page
);
2738 memset(page_info
, 0, sizeof(*page_info
));
2740 BUG_ON(atomic_read(&rxq
->used
));
2745 static void be_rx_cq_clean(struct be_rx_obj
*rxo
)
2747 struct be_queue_info
*rx_cq
= &rxo
->cq
;
2748 struct be_rx_compl_info
*rxcp
;
2749 struct be_adapter
*adapter
= rxo
->adapter
;
2752 /* Consume pending rx completions.
2753 * Wait for the flush completion (identified by zero num_rcvd)
2754 * to arrive. Notify CQ even when there are no more CQ entries
2755 * for HW to flush partially coalesced CQ entries.
2756 * In Lancer, there is no need to wait for flush compl.
2759 rxcp
= be_rx_compl_get(rxo
);
2761 if (lancer_chip(adapter
))
2764 if (flush_wait
++ > 50 ||
2765 be_check_error(adapter
,
2767 dev_warn(&adapter
->pdev
->dev
,
2768 "did not receive flush compl\n");
2771 be_cq_notify(adapter
, rx_cq
->id
, true, 0);
2774 be_rx_compl_discard(rxo
, rxcp
);
2775 be_cq_notify(adapter
, rx_cq
->id
, false, 1);
2776 if (rxcp
->num_rcvd
== 0)
2781 /* After cleanup, leave the CQ in unarmed state */
2782 be_cq_notify(adapter
, rx_cq
->id
, false, 0);
2785 static void be_tx_compl_clean(struct be_adapter
*adapter
)
2787 struct device
*dev
= &adapter
->pdev
->dev
;
2788 u16 cmpl
= 0, timeo
= 0, num_wrbs
= 0;
2789 struct be_tx_compl_info
*txcp
;
2790 struct be_queue_info
*txq
;
2791 u32 end_idx
, notified_idx
;
2792 struct be_tx_obj
*txo
;
2793 int i
, pending_txqs
;
2795 /* Stop polling for compls when HW has been silent for 10ms */
2797 pending_txqs
= adapter
->num_tx_qs
;
2799 for_all_tx_queues(adapter
, txo
, i
) {
2803 while ((txcp
= be_tx_compl_get(adapter
, txo
))) {
2805 be_tx_compl_process(adapter
, txo
,
2810 be_cq_notify(adapter
, txo
->cq
.id
, false, cmpl
);
2811 atomic_sub(num_wrbs
, &txq
->used
);
2814 if (!be_is_tx_compl_pending(txo
))
2818 if (pending_txqs
== 0 || ++timeo
> 10 ||
2819 be_check_error(adapter
, BE_ERROR_HW
))
2825 /* Free enqueued TX that was never notified to HW */
2826 for_all_tx_queues(adapter
, txo
, i
) {
2829 if (atomic_read(&txq
->used
)) {
2830 dev_info(dev
, "txq%d: cleaning %d pending tx-wrbs\n",
2831 i
, atomic_read(&txq
->used
));
2832 notified_idx
= txq
->tail
;
2833 end_idx
= txq
->tail
;
2834 index_adv(&end_idx
, atomic_read(&txq
->used
) - 1,
2836 /* Use the tx-compl process logic to handle requests
2837 * that were not sent to the HW.
2839 num_wrbs
= be_tx_compl_process(adapter
, txo
, end_idx
);
2840 atomic_sub(num_wrbs
, &txq
->used
);
2841 BUG_ON(atomic_read(&txq
->used
));
2842 txo
->pend_wrb_cnt
= 0;
2843 /* Since hw was never notified of these requests,
2846 txq
->head
= notified_idx
;
2847 txq
->tail
= notified_idx
;
2852 static void be_evt_queues_destroy(struct be_adapter
*adapter
)
2854 struct be_eq_obj
*eqo
;
2857 for_all_evt_queues(adapter
, eqo
, i
) {
2858 if (eqo
->q
.created
) {
2860 be_cmd_q_destroy(adapter
, &eqo
->q
, QTYPE_EQ
);
2861 netif_napi_del(&eqo
->napi
);
2862 free_cpumask_var(eqo
->affinity_mask
);
2864 be_queue_free(adapter
, &eqo
->q
);
2868 static int be_evt_queues_create(struct be_adapter
*adapter
)
2870 struct be_queue_info
*eq
;
2871 struct be_eq_obj
*eqo
;
2872 struct be_aic_obj
*aic
;
2875 /* need enough EQs to service both RX and TX queues */
2876 adapter
->num_evt_qs
= min_t(u16
, num_irqs(adapter
),
2877 max(adapter
->cfg_num_rx_irqs
,
2878 adapter
->cfg_num_tx_irqs
));
2880 for_all_evt_queues(adapter
, eqo
, i
) {
2881 int numa_node
= dev_to_node(&adapter
->pdev
->dev
);
2883 aic
= &adapter
->aic_obj
[i
];
2884 eqo
->adapter
= adapter
;
2886 aic
->max_eqd
= BE_MAX_EQD
;
2890 rc
= be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
,
2891 sizeof(struct be_eq_entry
));
2895 rc
= be_cmd_eq_create(adapter
, eqo
);
2899 if (!zalloc_cpumask_var(&eqo
->affinity_mask
, GFP_KERNEL
))
2901 cpumask_set_cpu(cpumask_local_spread(i
, numa_node
),
2902 eqo
->affinity_mask
);
2903 netif_napi_add(adapter
->netdev
, &eqo
->napi
, be_poll
,
2909 static void be_mcc_queues_destroy(struct be_adapter
*adapter
)
2911 struct be_queue_info
*q
;
2913 q
= &adapter
->mcc_obj
.q
;
2915 be_cmd_q_destroy(adapter
, q
, QTYPE_MCCQ
);
2916 be_queue_free(adapter
, q
);
2918 q
= &adapter
->mcc_obj
.cq
;
2920 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
2921 be_queue_free(adapter
, q
);
2924 /* Must be called only after TX qs are created as MCC shares TX EQ */
2925 static int be_mcc_queues_create(struct be_adapter
*adapter
)
2927 struct be_queue_info
*q
, *cq
;
2929 cq
= &adapter
->mcc_obj
.cq
;
2930 if (be_queue_alloc(adapter
, cq
, MCC_CQ_LEN
,
2931 sizeof(struct be_mcc_compl
)))
2934 /* Use the default EQ for MCC completions */
2935 if (be_cmd_cq_create(adapter
, cq
, &mcc_eqo(adapter
)->q
, true, 0))
2938 q
= &adapter
->mcc_obj
.q
;
2939 if (be_queue_alloc(adapter
, q
, MCC_Q_LEN
, sizeof(struct be_mcc_wrb
)))
2940 goto mcc_cq_destroy
;
2942 if (be_cmd_mccq_create(adapter
, q
, cq
))
2948 be_queue_free(adapter
, q
);
2950 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
2952 be_queue_free(adapter
, cq
);
2957 static void be_tx_queues_destroy(struct be_adapter
*adapter
)
2959 struct be_queue_info
*q
;
2960 struct be_tx_obj
*txo
;
2963 for_all_tx_queues(adapter
, txo
, i
) {
2966 be_cmd_q_destroy(adapter
, q
, QTYPE_TXQ
);
2967 be_queue_free(adapter
, q
);
2971 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
2972 be_queue_free(adapter
, q
);
2976 static int be_tx_qs_create(struct be_adapter
*adapter
)
2978 struct be_queue_info
*cq
;
2979 struct be_tx_obj
*txo
;
2980 struct be_eq_obj
*eqo
;
2983 adapter
->num_tx_qs
= min(adapter
->num_evt_qs
, adapter
->cfg_num_tx_irqs
);
2985 for_all_tx_queues(adapter
, txo
, i
) {
2987 status
= be_queue_alloc(adapter
, cq
, TX_CQ_LEN
,
2988 sizeof(struct be_eth_tx_compl
));
2992 u64_stats_init(&txo
->stats
.sync
);
2993 u64_stats_init(&txo
->stats
.sync_compl
);
2995 /* If num_evt_qs is less than num_tx_qs, then more than
2996 * one txq share an eq
2998 eqo
= &adapter
->eq_obj
[i
% adapter
->num_evt_qs
];
2999 status
= be_cmd_cq_create(adapter
, cq
, &eqo
->q
, false, 3);
3003 status
= be_queue_alloc(adapter
, &txo
->q
, TX_Q_LEN
,
3004 sizeof(struct be_eth_wrb
));
3008 status
= be_cmd_txq_create(adapter
, txo
);
3012 netif_set_xps_queue(adapter
->netdev
, eqo
->affinity_mask
,
3016 dev_info(&adapter
->pdev
->dev
, "created %d TX queue(s)\n",
3017 adapter
->num_tx_qs
);
3021 static void be_rx_cqs_destroy(struct be_adapter
*adapter
)
3023 struct be_queue_info
*q
;
3024 struct be_rx_obj
*rxo
;
3027 for_all_rx_queues(adapter
, rxo
, i
) {
3030 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
3031 be_queue_free(adapter
, q
);
3035 static int be_rx_cqs_create(struct be_adapter
*adapter
)
3037 struct be_queue_info
*eq
, *cq
;
3038 struct be_rx_obj
*rxo
;
3041 adapter
->num_rss_qs
=
3042 min(adapter
->num_evt_qs
, adapter
->cfg_num_rx_irqs
);
3044 /* We'll use RSS only if atleast 2 RSS rings are supported. */
3045 if (adapter
->num_rss_qs
< 2)
3046 adapter
->num_rss_qs
= 0;
3048 adapter
->num_rx_qs
= adapter
->num_rss_qs
+ adapter
->need_def_rxq
;
3050 /* When the interface is not capable of RSS rings (and there is no
3051 * need to create a default RXQ) we'll still need one RXQ
3053 if (adapter
->num_rx_qs
== 0)
3054 adapter
->num_rx_qs
= 1;
3056 adapter
->big_page_size
= (1 << get_order(rx_frag_size
)) * PAGE_SIZE
;
3057 for_all_rx_queues(adapter
, rxo
, i
) {
3058 rxo
->adapter
= adapter
;
3060 rc
= be_queue_alloc(adapter
, cq
, RX_CQ_LEN
,
3061 sizeof(struct be_eth_rx_compl
));
3065 u64_stats_init(&rxo
->stats
.sync
);
3066 eq
= &adapter
->eq_obj
[i
% adapter
->num_evt_qs
].q
;
3067 rc
= be_cmd_cq_create(adapter
, cq
, eq
, false, 3);
3072 dev_info(&adapter
->pdev
->dev
,
3073 "created %d RX queue(s)\n", adapter
->num_rx_qs
);
3077 static irqreturn_t
be_intx(int irq
, void *dev
)
3079 struct be_eq_obj
*eqo
= dev
;
3080 struct be_adapter
*adapter
= eqo
->adapter
;
3083 /* IRQ is not expected when NAPI is scheduled as the EQ
3084 * will not be armed.
3085 * But, this can happen on Lancer INTx where it takes
3086 * a while to de-assert INTx or in BE2 where occasionaly
3087 * an interrupt may be raised even when EQ is unarmed.
3088 * If NAPI is already scheduled, then counting & notifying
3089 * events will orphan them.
3091 if (napi_schedule_prep(&eqo
->napi
)) {
3092 num_evts
= events_get(eqo
);
3093 __napi_schedule(&eqo
->napi
);
3095 eqo
->spurious_intr
= 0;
3097 be_eq_notify(adapter
, eqo
->q
.id
, false, true, num_evts
, 0);
3099 /* Return IRQ_HANDLED only for the the first spurious intr
3100 * after a valid intr to stop the kernel from branding
3101 * this irq as a bad one!
3103 if (num_evts
|| eqo
->spurious_intr
++ == 0)
3109 static irqreturn_t
be_msix(int irq
, void *dev
)
3111 struct be_eq_obj
*eqo
= dev
;
3113 be_eq_notify(eqo
->adapter
, eqo
->q
.id
, false, true, 0, 0);
3114 napi_schedule(&eqo
->napi
);
3118 static inline bool do_gro(struct be_rx_compl_info
*rxcp
)
3120 return (rxcp
->tcpf
&& !rxcp
->err
&& rxcp
->l4_csum
) ? true : false;
3123 static int be_process_rx(struct be_rx_obj
*rxo
, struct napi_struct
*napi
,
3126 struct be_adapter
*adapter
= rxo
->adapter
;
3127 struct be_queue_info
*rx_cq
= &rxo
->cq
;
3128 struct be_rx_compl_info
*rxcp
;
3130 u32 frags_consumed
= 0;
3132 for (work_done
= 0; work_done
< budget
; work_done
++) {
3133 rxcp
= be_rx_compl_get(rxo
);
3137 /* Is it a flush compl that has no data */
3138 if (unlikely(rxcp
->num_rcvd
== 0))
3141 /* Discard compl with partial DMA Lancer B0 */
3142 if (unlikely(!rxcp
->pkt_size
)) {
3143 be_rx_compl_discard(rxo
, rxcp
);
3147 /* On BE drop pkts that arrive due to imperfect filtering in
3148 * promiscuous mode on some skews
3150 if (unlikely(rxcp
->port
!= adapter
->port_num
&&
3151 !lancer_chip(adapter
))) {
3152 be_rx_compl_discard(rxo
, rxcp
);
3157 be_rx_compl_process_gro(rxo
, napi
, rxcp
);
3159 be_rx_compl_process(rxo
, napi
, rxcp
);
3162 frags_consumed
+= rxcp
->num_rcvd
;
3163 be_rx_stats_update(rxo
, rxcp
);
3167 be_cq_notify(adapter
, rx_cq
->id
, true, work_done
);
3169 /* When an rx-obj gets into post_starved state, just
3170 * let be_worker do the posting.
3172 if (atomic_read(&rxo
->q
.used
) < RX_FRAGS_REFILL_WM
&&
3173 !rxo
->rx_post_starved
)
3174 be_post_rx_frags(rxo
, GFP_ATOMIC
,
3175 max_t(u32
, MAX_RX_POST
,
3183 static void be_process_tx(struct be_adapter
*adapter
, struct be_tx_obj
*txo
,
3186 int num_wrbs
= 0, work_done
= 0;
3187 struct be_tx_compl_info
*txcp
;
3189 while ((txcp
= be_tx_compl_get(adapter
, txo
))) {
3190 num_wrbs
+= be_tx_compl_process(adapter
, txo
, txcp
->end_index
);
3195 be_cq_notify(adapter
, txo
->cq
.id
, true, work_done
);
3196 atomic_sub(num_wrbs
, &txo
->q
.used
);
3198 /* As Tx wrbs have been freed up, wake up netdev queue
3199 * if it was stopped due to lack of tx wrbs. */
3200 if (__netif_subqueue_stopped(adapter
->netdev
, idx
) &&
3201 be_can_txq_wake(txo
)) {
3202 netif_wake_subqueue(adapter
->netdev
, idx
);
3205 u64_stats_update_begin(&tx_stats(txo
)->sync_compl
);
3206 tx_stats(txo
)->tx_compl
+= work_done
;
3207 u64_stats_update_end(&tx_stats(txo
)->sync_compl
);
3211 int be_poll(struct napi_struct
*napi
, int budget
)
3213 struct be_eq_obj
*eqo
= container_of(napi
, struct be_eq_obj
, napi
);
3214 struct be_adapter
*adapter
= eqo
->adapter
;
3215 int max_work
= 0, work
, i
, num_evts
;
3216 struct be_rx_obj
*rxo
;
3217 struct be_tx_obj
*txo
;
3220 num_evts
= events_get(eqo
);
3222 for_all_tx_queues_on_eq(adapter
, eqo
, txo
, i
)
3223 be_process_tx(adapter
, txo
, i
);
3225 /* This loop will iterate twice for EQ0 in which
3226 * completions of the last RXQ (default one) are also processed
3227 * For other EQs the loop iterates only once
3229 for_all_rx_queues_on_eq(adapter
, eqo
, rxo
, i
) {
3230 work
= be_process_rx(rxo
, napi
, budget
);
3231 max_work
= max(work
, max_work
);
3234 if (is_mcc_eqo(eqo
))
3235 be_process_mcc(adapter
);
3237 if (max_work
< budget
) {
3238 napi_complete_done(napi
, max_work
);
3240 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3241 * delay via a delay multiplier encoding value
3243 if (skyhawk_chip(adapter
))
3244 mult_enc
= be_get_eq_delay_mult_enc(eqo
);
3246 be_eq_notify(adapter
, eqo
->q
.id
, true, false, num_evts
,
3249 /* As we'll continue in polling mode, count and clear events */
3250 be_eq_notify(adapter
, eqo
->q
.id
, false, false, num_evts
, 0);
3255 void be_detect_error(struct be_adapter
*adapter
)
3257 u32 ue_lo
= 0, ue_hi
= 0, ue_lo_mask
= 0, ue_hi_mask
= 0;
3258 u32 sliport_status
= 0, sliport_err1
= 0, sliport_err2
= 0;
3259 struct device
*dev
= &adapter
->pdev
->dev
;
3263 if (be_check_error(adapter
, BE_ERROR_HW
))
3266 if (lancer_chip(adapter
)) {
3267 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
3268 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
3269 be_set_error(adapter
, BE_ERROR_UE
);
3270 sliport_err1
= ioread32(adapter
->db
+
3271 SLIPORT_ERROR1_OFFSET
);
3272 sliport_err2
= ioread32(adapter
->db
+
3273 SLIPORT_ERROR2_OFFSET
);
3274 /* Do not log error messages if its a FW reset */
3275 if (sliport_err1
== SLIPORT_ERROR_FW_RESET1
&&
3276 sliport_err2
== SLIPORT_ERROR_FW_RESET2
) {
3277 dev_info(dev
, "Firmware update in progress\n");
3279 dev_err(dev
, "Error detected in the card\n");
3280 dev_err(dev
, "ERR: sliport status 0x%x\n",
3282 dev_err(dev
, "ERR: sliport error1 0x%x\n",
3284 dev_err(dev
, "ERR: sliport error2 0x%x\n",
3289 ue_lo
= ioread32(adapter
->pcicfg
+ PCICFG_UE_STATUS_LOW
);
3290 ue_hi
= ioread32(adapter
->pcicfg
+ PCICFG_UE_STATUS_HIGH
);
3291 ue_lo_mask
= ioread32(adapter
->pcicfg
+
3292 PCICFG_UE_STATUS_LOW_MASK
);
3293 ue_hi_mask
= ioread32(adapter
->pcicfg
+
3294 PCICFG_UE_STATUS_HI_MASK
);
3296 ue_lo
= (ue_lo
& ~ue_lo_mask
);
3297 ue_hi
= (ue_hi
& ~ue_hi_mask
);
3299 if (ue_lo
|| ue_hi
) {
3300 /* On certain platforms BE3 hardware can indicate
3301 * spurious UEs. In case of a UE in the chip,
3302 * the POST register correctly reports either a
3303 * FAT_LOG_START state (FW is currently dumping
3304 * FAT log data) or a ARMFW_UE state. Check for the
3305 * above states to ascertain if the UE is valid or not.
3307 if (BE3_chip(adapter
)) {
3308 val
= be_POST_stage_get(adapter
);
3309 if ((val
& POST_STAGE_FAT_LOG_START
)
3310 != POST_STAGE_FAT_LOG_START
&&
3311 (val
& POST_STAGE_ARMFW_UE
)
3312 != POST_STAGE_ARMFW_UE
)
3316 dev_err(dev
, "Error detected in the adapter");
3317 be_set_error(adapter
, BE_ERROR_UE
);
3319 for (i
= 0; ue_lo
; ue_lo
>>= 1, i
++) {
3321 dev_err(dev
, "UE: %s bit set\n",
3322 ue_status_low_desc
[i
]);
3324 for (i
= 0; ue_hi
; ue_hi
>>= 1, i
++) {
3326 dev_err(dev
, "UE: %s bit set\n",
3327 ue_status_hi_desc
[i
]);
3333 static void be_msix_disable(struct be_adapter
*adapter
)
3335 if (msix_enabled(adapter
)) {
3336 pci_disable_msix(adapter
->pdev
);
3337 adapter
->num_msix_vec
= 0;
3338 adapter
->num_msix_roce_vec
= 0;
3342 static int be_msix_enable(struct be_adapter
*adapter
)
3344 unsigned int i
, max_roce_eqs
;
3345 struct device
*dev
= &adapter
->pdev
->dev
;
3348 /* If RoCE is supported, program the max number of vectors that
3349 * could be used for NIC and RoCE, else, just program the number
3350 * we'll use initially.
3352 if (be_roce_supported(adapter
)) {
3354 be_max_func_eqs(adapter
) - be_max_nic_eqs(adapter
);
3355 max_roce_eqs
= min(max_roce_eqs
, num_online_cpus());
3356 num_vec
= be_max_any_irqs(adapter
) + max_roce_eqs
;
3358 num_vec
= max(adapter
->cfg_num_rx_irqs
,
3359 adapter
->cfg_num_tx_irqs
);
3362 for (i
= 0; i
< num_vec
; i
++)
3363 adapter
->msix_entries
[i
].entry
= i
;
3365 num_vec
= pci_enable_msix_range(adapter
->pdev
, adapter
->msix_entries
,
3366 MIN_MSIX_VECTORS
, num_vec
);
3370 if (be_roce_supported(adapter
) && num_vec
> MIN_MSIX_VECTORS
) {
3371 adapter
->num_msix_roce_vec
= num_vec
/ 2;
3372 dev_info(dev
, "enabled %d MSI-x vector(s) for RoCE\n",
3373 adapter
->num_msix_roce_vec
);
3376 adapter
->num_msix_vec
= num_vec
- adapter
->num_msix_roce_vec
;
3378 dev_info(dev
, "enabled %d MSI-x vector(s) for NIC\n",
3379 adapter
->num_msix_vec
);
3383 dev_warn(dev
, "MSIx enable failed\n");
3385 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3386 if (be_virtfn(adapter
))
3391 static inline int be_msix_vec_get(struct be_adapter
*adapter
,
3392 struct be_eq_obj
*eqo
)
3394 return adapter
->msix_entries
[eqo
->msix_idx
].vector
;
3397 static int be_msix_register(struct be_adapter
*adapter
)
3399 struct net_device
*netdev
= adapter
->netdev
;
3400 struct be_eq_obj
*eqo
;
3403 for_all_evt_queues(adapter
, eqo
, i
) {
3404 sprintf(eqo
->desc
, "%s-q%d", netdev
->name
, i
);
3405 vec
= be_msix_vec_get(adapter
, eqo
);
3406 status
= request_irq(vec
, be_msix
, 0, eqo
->desc
, eqo
);
3410 irq_set_affinity_hint(vec
, eqo
->affinity_mask
);
3415 for (i
--; i
>= 0; i
--) {
3416 eqo
= &adapter
->eq_obj
[i
];
3417 free_irq(be_msix_vec_get(adapter
, eqo
), eqo
);
3419 dev_warn(&adapter
->pdev
->dev
, "MSIX Request IRQ failed - err %d\n",
3421 be_msix_disable(adapter
);
3425 static int be_irq_register(struct be_adapter
*adapter
)
3427 struct net_device
*netdev
= adapter
->netdev
;
3430 if (msix_enabled(adapter
)) {
3431 status
= be_msix_register(adapter
);
3434 /* INTx is not supported for VF */
3435 if (be_virtfn(adapter
))
3439 /* INTx: only the first EQ is used */
3440 netdev
->irq
= adapter
->pdev
->irq
;
3441 status
= request_irq(netdev
->irq
, be_intx
, IRQF_SHARED
, netdev
->name
,
3442 &adapter
->eq_obj
[0]);
3444 dev_err(&adapter
->pdev
->dev
,
3445 "INTx request IRQ failed - err %d\n", status
);
3449 adapter
->isr_registered
= true;
3453 static void be_irq_unregister(struct be_adapter
*adapter
)
3455 struct net_device
*netdev
= adapter
->netdev
;
3456 struct be_eq_obj
*eqo
;
3459 if (!adapter
->isr_registered
)
3463 if (!msix_enabled(adapter
)) {
3464 free_irq(netdev
->irq
, &adapter
->eq_obj
[0]);
3469 for_all_evt_queues(adapter
, eqo
, i
) {
3470 vec
= be_msix_vec_get(adapter
, eqo
);
3471 irq_set_affinity_hint(vec
, NULL
);
3476 adapter
->isr_registered
= false;
3479 static void be_rx_qs_destroy(struct be_adapter
*adapter
)
3481 struct rss_info
*rss
= &adapter
->rss_info
;
3482 struct be_queue_info
*q
;
3483 struct be_rx_obj
*rxo
;
3486 for_all_rx_queues(adapter
, rxo
, i
) {
3489 /* If RXQs are destroyed while in an "out of buffer"
3490 * state, there is a possibility of an HW stall on
3491 * Lancer. So, post 64 buffers to each queue to relieve
3492 * the "out of buffer" condition.
3493 * Make sure there's space in the RXQ before posting.
3495 if (lancer_chip(adapter
)) {
3496 be_rx_cq_clean(rxo
);
3497 if (atomic_read(&q
->used
) == 0)
3498 be_post_rx_frags(rxo
, GFP_KERNEL
,
3502 be_cmd_rxq_destroy(adapter
, q
);
3503 be_rx_cq_clean(rxo
);
3506 be_queue_free(adapter
, q
);
3509 if (rss
->rss_flags
) {
3510 rss
->rss_flags
= RSS_ENABLE_NONE
;
3511 be_cmd_rss_config(adapter
, rss
->rsstable
, rss
->rss_flags
,
3512 128, rss
->rss_hkey
);
3516 static void be_disable_if_filters(struct be_adapter
*adapter
)
3518 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3519 if (!BEx_chip(adapter
) || !be_virtfn(adapter
) ||
3520 check_privilege(adapter
, BE_PRIV_FILTMGMT
)) {
3521 be_dev_mac_del(adapter
, adapter
->pmac_id
[0]);
3522 eth_zero_addr(adapter
->dev_mac
);
3525 be_clear_uc_list(adapter
);
3526 be_clear_mc_list(adapter
);
3528 /* The IFACE flags are enabled in the open path and cleared
3529 * in the close path. When a VF gets detached from the host and
3530 * assigned to a VM the following happens:
3531 * - VF's IFACE flags get cleared in the detach path
3532 * - IFACE create is issued by the VF in the attach path
3533 * Due to a bug in the BE3/Skyhawk-R FW
3534 * (Lancer FW doesn't have the bug), the IFACE capability flags
3535 * specified along with the IFACE create cmd issued by a VF are not
3536 * honoured by FW. As a consequence, if a *new* driver
3537 * (that enables/disables IFACE flags in open/close)
3538 * is loaded in the host and an *old* driver is * used by a VM/VF,
3539 * the IFACE gets created *without* the needed flags.
3540 * To avoid this, disable RX-filter flags only for Lancer.
3542 if (lancer_chip(adapter
)) {
3543 be_cmd_rx_filter(adapter
, BE_IF_ALL_FILT_FLAGS
, OFF
);
3544 adapter
->if_flags
&= ~BE_IF_ALL_FILT_FLAGS
;
3548 static int be_close(struct net_device
*netdev
)
3550 struct be_adapter
*adapter
= netdev_priv(netdev
);
3551 struct be_eq_obj
*eqo
;
3554 /* This protection is needed as be_close() may be called even when the
3555 * adapter is in cleared state (after eeh perm failure)
3557 if (!(adapter
->flags
& BE_FLAGS_SETUP_DONE
))
3560 /* Before attempting cleanup ensure all the pending cmds in the
3561 * config_wq have finished execution
3563 flush_workqueue(be_wq
);
3565 be_disable_if_filters(adapter
);
3567 if (adapter
->flags
& BE_FLAGS_NAPI_ENABLED
) {
3568 for_all_evt_queues(adapter
, eqo
, i
) {
3569 napi_disable(&eqo
->napi
);
3571 adapter
->flags
&= ~BE_FLAGS_NAPI_ENABLED
;
3574 be_async_mcc_disable(adapter
);
3576 /* Wait for all pending tx completions to arrive so that
3577 * all tx skbs are freed.
3579 netif_tx_disable(netdev
);
3580 be_tx_compl_clean(adapter
);
3582 be_rx_qs_destroy(adapter
);
3584 for_all_evt_queues(adapter
, eqo
, i
) {
3585 if (msix_enabled(adapter
))
3586 synchronize_irq(be_msix_vec_get(adapter
, eqo
));
3588 synchronize_irq(netdev
->irq
);
3592 be_irq_unregister(adapter
);
3597 static int be_rx_qs_create(struct be_adapter
*adapter
)
3599 struct rss_info
*rss
= &adapter
->rss_info
;
3600 u8 rss_key
[RSS_HASH_KEY_LEN
];
3601 struct be_rx_obj
*rxo
;
3604 for_all_rx_queues(adapter
, rxo
, i
) {
3605 rc
= be_queue_alloc(adapter
, &rxo
->q
, RX_Q_LEN
,
3606 sizeof(struct be_eth_rx_d
));
3611 if (adapter
->need_def_rxq
|| !adapter
->num_rss_qs
) {
3612 rxo
= default_rxo(adapter
);
3613 rc
= be_cmd_rxq_create(adapter
, &rxo
->q
, rxo
->cq
.id
,
3614 rx_frag_size
, adapter
->if_handle
,
3615 false, &rxo
->rss_id
);
3620 for_all_rss_queues(adapter
, rxo
, i
) {
3621 rc
= be_cmd_rxq_create(adapter
, &rxo
->q
, rxo
->cq
.id
,
3622 rx_frag_size
, adapter
->if_handle
,
3623 true, &rxo
->rss_id
);
3628 if (be_multi_rxq(adapter
)) {
3629 for (j
= 0; j
< RSS_INDIR_TABLE_LEN
; j
+= adapter
->num_rss_qs
) {
3630 for_all_rss_queues(adapter
, rxo
, i
) {
3631 if ((j
+ i
) >= RSS_INDIR_TABLE_LEN
)
3633 rss
->rsstable
[j
+ i
] = rxo
->rss_id
;
3634 rss
->rss_queue
[j
+ i
] = i
;
3637 rss
->rss_flags
= RSS_ENABLE_TCP_IPV4
| RSS_ENABLE_IPV4
|
3638 RSS_ENABLE_TCP_IPV6
| RSS_ENABLE_IPV6
;
3640 if (!BEx_chip(adapter
))
3641 rss
->rss_flags
|= RSS_ENABLE_UDP_IPV4
|
3642 RSS_ENABLE_UDP_IPV6
;
3644 netdev_rss_key_fill(rss_key
, RSS_HASH_KEY_LEN
);
3645 rc
= be_cmd_rss_config(adapter
, rss
->rsstable
, rss
->rss_flags
,
3646 RSS_INDIR_TABLE_LEN
, rss_key
);
3648 rss
->rss_flags
= RSS_ENABLE_NONE
;
3652 memcpy(rss
->rss_hkey
, rss_key
, RSS_HASH_KEY_LEN
);
3654 /* Disable RSS, if only default RX Q is created */
3655 rss
->rss_flags
= RSS_ENABLE_NONE
;
3659 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3660 * which is a queue empty condition
3662 for_all_rx_queues(adapter
, rxo
, i
)
3663 be_post_rx_frags(rxo
, GFP_KERNEL
, RX_Q_LEN
- 1);
3668 static int be_enable_if_filters(struct be_adapter
*adapter
)
3672 status
= be_cmd_rx_filter(adapter
, BE_IF_FILT_FLAGS_BASIC
, ON
);
3676 /* Normally this condition usually true as the ->dev_mac is zeroed.
3677 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3678 * subsequent be_dev_mac_add() can fail (after fresh boot)
3680 if (!ether_addr_equal(adapter
->dev_mac
, adapter
->netdev
->dev_addr
)) {
3681 int old_pmac_id
= -1;
3683 /* Remember old programmed MAC if any - can happen on BE3 VF */
3684 if (!is_zero_ether_addr(adapter
->dev_mac
))
3685 old_pmac_id
= adapter
->pmac_id
[0];
3687 status
= be_dev_mac_add(adapter
, adapter
->netdev
->dev_addr
);
3691 /* Delete the old programmed MAC as we successfully programmed
3694 if (old_pmac_id
>= 0 && old_pmac_id
!= adapter
->pmac_id
[0])
3695 be_dev_mac_del(adapter
, old_pmac_id
);
3697 ether_addr_copy(adapter
->dev_mac
, adapter
->netdev
->dev_addr
);
3700 if (adapter
->vlans_added
)
3701 be_vid_config(adapter
);
3703 __be_set_rx_mode(adapter
);
3708 static int be_open(struct net_device
*netdev
)
3710 struct be_adapter
*adapter
= netdev_priv(netdev
);
3711 struct be_eq_obj
*eqo
;
3712 struct be_rx_obj
*rxo
;
3713 struct be_tx_obj
*txo
;
3717 status
= be_rx_qs_create(adapter
);
3721 status
= be_enable_if_filters(adapter
);
3725 status
= be_irq_register(adapter
);
3729 for_all_rx_queues(adapter
, rxo
, i
)
3730 be_cq_notify(adapter
, rxo
->cq
.id
, true, 0);
3732 for_all_tx_queues(adapter
, txo
, i
)
3733 be_cq_notify(adapter
, txo
->cq
.id
, true, 0);
3735 be_async_mcc_enable(adapter
);
3737 for_all_evt_queues(adapter
, eqo
, i
) {
3738 napi_enable(&eqo
->napi
);
3739 be_eq_notify(adapter
, eqo
->q
.id
, true, true, 0, 0);
3741 adapter
->flags
|= BE_FLAGS_NAPI_ENABLED
;
3743 status
= be_cmd_link_status_query(adapter
, NULL
, &link_status
, 0);
3745 be_link_status_update(adapter
, link_status
);
3747 netif_tx_start_all_queues(netdev
);
3748 if (skyhawk_chip(adapter
))
3749 udp_tunnel_get_rx_info(netdev
);
3753 be_close(adapter
->netdev
);
3757 static void be_vf_eth_addr_generate(struct be_adapter
*adapter
, u8
*mac
)
3761 addr
= jhash(adapter
->netdev
->dev_addr
, ETH_ALEN
, 0);
3763 mac
[5] = (u8
)(addr
& 0xFF);
3764 mac
[4] = (u8
)((addr
>> 8) & 0xFF);
3765 mac
[3] = (u8
)((addr
>> 16) & 0xFF);
3766 /* Use the OUI from the current MAC address */
3767 memcpy(mac
, adapter
->netdev
->dev_addr
, 3);
3771 * Generate a seed MAC address from the PF MAC Address using jhash.
3772 * MAC Address for VFs are assigned incrementally starting from the seed.
3773 * These addresses are programmed in the ASIC by the PF and the VF driver
3774 * queries for the MAC address during its probe.
3776 static int be_vf_eth_addr_config(struct be_adapter
*adapter
)
3781 struct be_vf_cfg
*vf_cfg
;
3783 be_vf_eth_addr_generate(adapter
, mac
);
3785 for_all_vfs(adapter
, vf_cfg
, vf
) {
3786 if (BEx_chip(adapter
))
3787 status
= be_cmd_pmac_add(adapter
, mac
,
3789 &vf_cfg
->pmac_id
, vf
+ 1);
3791 status
= be_cmd_set_mac(adapter
, mac
, vf_cfg
->if_handle
,
3795 dev_err(&adapter
->pdev
->dev
,
3796 "Mac address assignment failed for VF %d\n",
3799 memcpy(vf_cfg
->mac_addr
, mac
, ETH_ALEN
);
3806 static int be_vfs_mac_query(struct be_adapter
*adapter
)
3810 struct be_vf_cfg
*vf_cfg
;
3812 for_all_vfs(adapter
, vf_cfg
, vf
) {
3813 status
= be_cmd_get_active_mac(adapter
, vf_cfg
->pmac_id
,
3814 mac
, vf_cfg
->if_handle
,
3818 memcpy(vf_cfg
->mac_addr
, mac
, ETH_ALEN
);
3823 static void be_vf_clear(struct be_adapter
*adapter
)
3825 struct be_vf_cfg
*vf_cfg
;
3828 if (pci_vfs_assigned(adapter
->pdev
)) {
3829 dev_warn(&adapter
->pdev
->dev
,
3830 "VFs are assigned to VMs: not disabling VFs\n");
3834 pci_disable_sriov(adapter
->pdev
);
3836 for_all_vfs(adapter
, vf_cfg
, vf
) {
3837 if (BEx_chip(adapter
))
3838 be_cmd_pmac_del(adapter
, vf_cfg
->if_handle
,
3839 vf_cfg
->pmac_id
, vf
+ 1);
3841 be_cmd_set_mac(adapter
, NULL
, vf_cfg
->if_handle
,
3844 be_cmd_if_destroy(adapter
, vf_cfg
->if_handle
, vf
+ 1);
3847 if (BE3_chip(adapter
))
3848 be_cmd_set_hsw_config(adapter
, 0, 0,
3850 PORT_FWD_TYPE_PASSTHRU
, 0);
3852 kfree(adapter
->vf_cfg
);
3853 adapter
->num_vfs
= 0;
3854 adapter
->flags
&= ~BE_FLAGS_SRIOV_ENABLED
;
3857 static void be_clear_queues(struct be_adapter
*adapter
)
3859 be_mcc_queues_destroy(adapter
);
3860 be_rx_cqs_destroy(adapter
);
3861 be_tx_queues_destroy(adapter
);
3862 be_evt_queues_destroy(adapter
);
3865 static void be_cancel_worker(struct be_adapter
*adapter
)
3867 if (adapter
->flags
& BE_FLAGS_WORKER_SCHEDULED
) {
3868 cancel_delayed_work_sync(&adapter
->work
);
3869 adapter
->flags
&= ~BE_FLAGS_WORKER_SCHEDULED
;
3873 static void be_cancel_err_detection(struct be_adapter
*adapter
)
3875 struct be_error_recovery
*err_rec
= &adapter
->error_recovery
;
3877 if (!be_err_recovery_workq
)
3880 if (adapter
->flags
& BE_FLAGS_ERR_DETECTION_SCHEDULED
) {
3881 cancel_delayed_work_sync(&err_rec
->err_detection_work
);
3882 adapter
->flags
&= ~BE_FLAGS_ERR_DETECTION_SCHEDULED
;
3886 static int be_enable_vxlan_offloads(struct be_adapter
*adapter
)
3888 struct net_device
*netdev
= adapter
->netdev
;
3889 struct device
*dev
= &adapter
->pdev
->dev
;
3890 struct be_vxlan_port
*vxlan_port
;
3894 vxlan_port
= list_first_entry(&adapter
->vxlan_port_list
,
3895 struct be_vxlan_port
, list
);
3896 port
= vxlan_port
->port
;
3898 status
= be_cmd_manage_iface(adapter
, adapter
->if_handle
,
3899 OP_CONVERT_NORMAL_TO_TUNNEL
);
3901 dev_warn(dev
, "Failed to convert normal interface to tunnel\n");
3904 adapter
->flags
|= BE_FLAGS_VXLAN_OFFLOADS
;
3906 status
= be_cmd_set_vxlan_port(adapter
, port
);
3908 dev_warn(dev
, "Failed to add VxLAN port\n");
3911 adapter
->vxlan_port
= port
;
3913 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3914 NETIF_F_TSO
| NETIF_F_TSO6
|
3915 NETIF_F_GSO_UDP_TUNNEL
;
3916 netdev
->hw_features
|= NETIF_F_GSO_UDP_TUNNEL
;
3917 netdev
->features
|= NETIF_F_GSO_UDP_TUNNEL
;
3919 dev_info(dev
, "Enabled VxLAN offloads for UDP port %d\n",
3924 static void be_disable_vxlan_offloads(struct be_adapter
*adapter
)
3926 struct net_device
*netdev
= adapter
->netdev
;
3928 if (adapter
->flags
& BE_FLAGS_VXLAN_OFFLOADS
)
3929 be_cmd_manage_iface(adapter
, adapter
->if_handle
,
3930 OP_CONVERT_TUNNEL_TO_NORMAL
);
3932 if (adapter
->vxlan_port
)
3933 be_cmd_set_vxlan_port(adapter
, 0);
3935 adapter
->flags
&= ~BE_FLAGS_VXLAN_OFFLOADS
;
3936 adapter
->vxlan_port
= 0;
3938 netdev
->hw_enc_features
= 0;
3939 netdev
->hw_features
&= ~(NETIF_F_GSO_UDP_TUNNEL
);
3940 netdev
->features
&= ~(NETIF_F_GSO_UDP_TUNNEL
);
3943 static void be_calculate_vf_res(struct be_adapter
*adapter
, u16 num_vfs
,
3944 struct be_resources
*vft_res
)
3946 struct be_resources res
= adapter
->pool_res
;
3947 u32 vf_if_cap_flags
= res
.vf_if_cap_flags
;
3948 struct be_resources res_mod
= {0};
3951 /* Distribute the queue resources among the PF and it's VFs */
3953 /* Divide the rx queues evenly among the VFs and the PF, capped
3954 * at VF-EQ-count. Any remainder queues belong to the PF.
3956 num_vf_qs
= min(SH_VF_MAX_NIC_EQS
,
3957 res
.max_rss_qs
/ (num_vfs
+ 1));
3959 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
3960 * RSS Tables per port. Provide RSS on VFs, only if number of
3961 * VFs requested is less than it's PF Pool's RSS Tables limit.
3963 if (num_vfs
>= be_max_pf_pool_rss_tables(adapter
))
3967 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3968 * which are modifiable using SET_PROFILE_CONFIG cmd.
3970 be_cmd_get_profile_config(adapter
, &res_mod
, NULL
, ACTIVE_PROFILE_TYPE
,
3971 RESOURCE_MODIFIABLE
, 0);
3973 /* If RSS IFACE capability flags are modifiable for a VF, set the
3974 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3975 * more than 1 RSSQ is available for a VF.
3976 * Otherwise, provision only 1 queue pair for VF.
3978 if (res_mod
.vf_if_cap_flags
& BE_IF_FLAGS_RSS
) {
3979 vft_res
->flags
|= BIT(IF_CAPS_FLAGS_VALID_SHIFT
);
3980 if (num_vf_qs
> 1) {
3981 vf_if_cap_flags
|= BE_IF_FLAGS_RSS
;
3982 if (res
.if_cap_flags
& BE_IF_FLAGS_DEFQ_RSS
)
3983 vf_if_cap_flags
|= BE_IF_FLAGS_DEFQ_RSS
;
3985 vf_if_cap_flags
&= ~(BE_IF_FLAGS_RSS
|
3986 BE_IF_FLAGS_DEFQ_RSS
);
3992 if (res_mod
.vf_if_cap_flags
& BE_IF_FLAGS_VLAN_PROMISCUOUS
) {
3993 vft_res
->flags
|= BIT(IF_CAPS_FLAGS_VALID_SHIFT
);
3994 vf_if_cap_flags
&= ~BE_IF_FLAGS_VLAN_PROMISCUOUS
;
3997 vft_res
->vf_if_cap_flags
= vf_if_cap_flags
;
3998 vft_res
->max_rx_qs
= num_vf_qs
;
3999 vft_res
->max_rss_qs
= num_vf_qs
;
4000 vft_res
->max_tx_qs
= res
.max_tx_qs
/ (num_vfs
+ 1);
4001 vft_res
->max_cq_count
= res
.max_cq_count
/ (num_vfs
+ 1);
4003 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4004 * among the PF and it's VFs, if the fields are changeable
4006 if (res_mod
.max_uc_mac
== FIELD_MODIFIABLE
)
4007 vft_res
->max_uc_mac
= res
.max_uc_mac
/ (num_vfs
+ 1);
4009 if (res_mod
.max_vlans
== FIELD_MODIFIABLE
)
4010 vft_res
->max_vlans
= res
.max_vlans
/ (num_vfs
+ 1);
4012 if (res_mod
.max_iface_count
== FIELD_MODIFIABLE
)
4013 vft_res
->max_iface_count
= res
.max_iface_count
/ (num_vfs
+ 1);
4015 if (res_mod
.max_mcc_count
== FIELD_MODIFIABLE
)
4016 vft_res
->max_mcc_count
= res
.max_mcc_count
/ (num_vfs
+ 1);
4019 static void be_if_destroy(struct be_adapter
*adapter
)
4021 be_cmd_if_destroy(adapter
, adapter
->if_handle
, 0);
4023 kfree(adapter
->pmac_id
);
4024 adapter
->pmac_id
= NULL
;
4026 kfree(adapter
->mc_list
);
4027 adapter
->mc_list
= NULL
;
4029 kfree(adapter
->uc_list
);
4030 adapter
->uc_list
= NULL
;
4033 static int be_clear(struct be_adapter
*adapter
)
4035 struct pci_dev
*pdev
= adapter
->pdev
;
4036 struct be_resources vft_res
= {0};
4038 be_cancel_worker(adapter
);
4040 flush_workqueue(be_wq
);
4042 if (sriov_enabled(adapter
))
4043 be_vf_clear(adapter
);
4045 /* Re-configure FW to distribute resources evenly across max-supported
4046 * number of VFs, only when VFs are not already enabled.
4048 if (skyhawk_chip(adapter
) && be_physfn(adapter
) &&
4049 !pci_vfs_assigned(pdev
)) {
4050 be_calculate_vf_res(adapter
,
4051 pci_sriov_get_totalvfs(pdev
),
4053 be_cmd_set_sriov_config(adapter
, adapter
->pool_res
,
4054 pci_sriov_get_totalvfs(pdev
),
4058 be_disable_vxlan_offloads(adapter
);
4060 be_if_destroy(adapter
);
4062 be_clear_queues(adapter
);
4064 be_msix_disable(adapter
);
4065 adapter
->flags
&= ~BE_FLAGS_SETUP_DONE
;
4069 static int be_vfs_if_create(struct be_adapter
*adapter
)
4071 struct be_resources res
= {0};
4072 u32 cap_flags
, en_flags
, vf
;
4073 struct be_vf_cfg
*vf_cfg
;
4076 /* If a FW profile exists, then cap_flags are updated */
4077 cap_flags
= BE_VF_IF_EN_FLAGS
;
4079 for_all_vfs(adapter
, vf_cfg
, vf
) {
4080 if (!BE3_chip(adapter
)) {
4081 status
= be_cmd_get_profile_config(adapter
, &res
, NULL
,
4082 ACTIVE_PROFILE_TYPE
,
4086 cap_flags
= res
.if_cap_flags
;
4087 /* Prevent VFs from enabling VLAN promiscuous
4090 cap_flags
&= ~BE_IF_FLAGS_VLAN_PROMISCUOUS
;
4094 /* PF should enable IF flags during proxy if_create call */
4095 en_flags
= cap_flags
& BE_VF_IF_EN_FLAGS
;
4096 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
,
4097 &vf_cfg
->if_handle
, vf
+ 1);
4105 static int be_vf_setup_init(struct be_adapter
*adapter
)
4107 struct be_vf_cfg
*vf_cfg
;
4110 adapter
->vf_cfg
= kcalloc(adapter
->num_vfs
, sizeof(*vf_cfg
),
4112 if (!adapter
->vf_cfg
)
4115 for_all_vfs(adapter
, vf_cfg
, vf
) {
4116 vf_cfg
->if_handle
= -1;
4117 vf_cfg
->pmac_id
= -1;
4122 static int be_vf_setup(struct be_adapter
*adapter
)
4124 struct device
*dev
= &adapter
->pdev
->dev
;
4125 struct be_vf_cfg
*vf_cfg
;
4126 int status
, old_vfs
, vf
;
4129 old_vfs
= pci_num_vf(adapter
->pdev
);
4131 status
= be_vf_setup_init(adapter
);
4136 for_all_vfs(adapter
, vf_cfg
, vf
) {
4137 status
= be_cmd_get_if_id(adapter
, vf_cfg
, vf
);
4142 status
= be_vfs_mac_query(adapter
);
4146 status
= be_vfs_if_create(adapter
);
4150 status
= be_vf_eth_addr_config(adapter
);
4155 for_all_vfs(adapter
, vf_cfg
, vf
) {
4156 /* Allow VFs to programs MAC/VLAN filters */
4157 status
= be_cmd_get_fn_privileges(adapter
, &vf_cfg
->privileges
,
4159 if (!status
&& !(vf_cfg
->privileges
& BE_PRIV_FILTMGMT
)) {
4160 status
= be_cmd_set_fn_privileges(adapter
,
4161 vf_cfg
->privileges
|
4165 vf_cfg
->privileges
|= BE_PRIV_FILTMGMT
;
4166 dev_info(dev
, "VF%d has FILTMGMT privilege\n",
4171 /* Allow full available bandwidth */
4173 be_cmd_config_qos(adapter
, 0, 0, vf
+ 1);
4175 status
= be_cmd_get_hsw_config(adapter
, NULL
, vf
+ 1,
4176 vf_cfg
->if_handle
, NULL
,
4179 vf_cfg
->spoofchk
= spoofchk
;
4182 be_cmd_enable_vf(adapter
, vf
+ 1);
4183 be_cmd_set_logical_link_config(adapter
,
4184 IFLA_VF_LINK_STATE_AUTO
,
4190 status
= pci_enable_sriov(adapter
->pdev
, adapter
->num_vfs
);
4192 dev_err(dev
, "SRIOV enable failed\n");
4193 adapter
->num_vfs
= 0;
4198 if (BE3_chip(adapter
)) {
4199 /* On BE3, enable VEB only when SRIOV is enabled */
4200 status
= be_cmd_set_hsw_config(adapter
, 0, 0,
4202 PORT_FWD_TYPE_VEB
, 0);
4207 adapter
->flags
|= BE_FLAGS_SRIOV_ENABLED
;
4210 dev_err(dev
, "VF setup failed\n");
4211 be_vf_clear(adapter
);
4215 /* Converting function_mode bits on BE3 to SH mc_type enums */
4217 static u8
be_convert_mc_type(u32 function_mode
)
4219 if (function_mode
& VNIC_MODE
&& function_mode
& QNQ_MODE
)
4221 else if (function_mode
& QNQ_MODE
)
4223 else if (function_mode
& VNIC_MODE
)
4225 else if (function_mode
& UMC_ENABLED
)
4231 /* On BE2/BE3 FW does not suggest the supported limits */
4232 static void BEx_get_resources(struct be_adapter
*adapter
,
4233 struct be_resources
*res
)
4235 bool use_sriov
= adapter
->num_vfs
? 1 : 0;
4237 if (be_physfn(adapter
))
4238 res
->max_uc_mac
= BE_UC_PMAC_COUNT
;
4240 res
->max_uc_mac
= BE_VF_UC_PMAC_COUNT
;
4242 adapter
->mc_type
= be_convert_mc_type(adapter
->function_mode
);
4244 if (be_is_mc(adapter
)) {
4245 /* Assuming that there are 4 channels per port,
4246 * when multi-channel is enabled
4248 if (be_is_qnq_mode(adapter
))
4249 res
->max_vlans
= BE_NUM_VLANS_SUPPORTED
/8;
4251 /* In a non-qnq multichannel mode, the pvid
4252 * takes up one vlan entry
4254 res
->max_vlans
= (BE_NUM_VLANS_SUPPORTED
/ 4) - 1;
4256 res
->max_vlans
= BE_NUM_VLANS_SUPPORTED
;
4259 res
->max_mcast_mac
= BE_MAX_MC
;
4261 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4262 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4263 * *only* if it is RSS-capable.
4265 if (BE2_chip(adapter
) || use_sriov
|| (adapter
->port_num
> 1) ||
4266 be_virtfn(adapter
) ||
4267 (be_is_mc(adapter
) &&
4268 !(adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
))) {
4270 } else if (adapter
->function_caps
& BE_FUNCTION_CAPS_SUPER_NIC
) {
4271 struct be_resources super_nic_res
= {0};
4273 /* On a SuperNIC profile, the driver needs to use the
4274 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4276 be_cmd_get_profile_config(adapter
, &super_nic_res
, NULL
,
4277 ACTIVE_PROFILE_TYPE
, RESOURCE_LIMITS
,
4279 /* Some old versions of BE3 FW don't report max_tx_qs value */
4280 res
->max_tx_qs
= super_nic_res
.max_tx_qs
? : BE3_MAX_TX_QS
;
4282 res
->max_tx_qs
= BE3_MAX_TX_QS
;
4285 if ((adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
) &&
4286 !use_sriov
&& be_physfn(adapter
))
4287 res
->max_rss_qs
= (adapter
->be3_native
) ?
4288 BE3_MAX_RSS_QS
: BE2_MAX_RSS_QS
;
4289 res
->max_rx_qs
= res
->max_rss_qs
+ 1;
4291 if (be_physfn(adapter
))
4292 res
->max_evt_qs
= (be_max_vfs(adapter
) > 0) ?
4293 BE3_SRIOV_MAX_EVT_QS
: BE3_MAX_EVT_QS
;
4295 res
->max_evt_qs
= 1;
4297 res
->if_cap_flags
= BE_IF_CAP_FLAGS_WANT
;
4298 res
->if_cap_flags
&= ~BE_IF_FLAGS_DEFQ_RSS
;
4299 if (!(adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
))
4300 res
->if_cap_flags
&= ~BE_IF_FLAGS_RSS
;
4303 static void be_setup_init(struct be_adapter
*adapter
)
4305 adapter
->vlan_prio_bmap
= 0xff;
4306 adapter
->phy
.link_speed
= -1;
4307 adapter
->if_handle
= -1;
4308 adapter
->be3_native
= false;
4309 adapter
->if_flags
= 0;
4310 adapter
->phy_state
= BE_UNKNOWN_PHY_STATE
;
4311 if (be_physfn(adapter
))
4312 adapter
->cmd_privileges
= MAX_PRIVILEGES
;
4314 adapter
->cmd_privileges
= MIN_PRIVILEGES
;
4317 /* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4318 * However, this HW limitation is not exposed to the host via any SLI cmd.
4319 * As a result, in the case of SRIOV and in particular multi-partition configs
4320 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4321 * for distribution between the VFs. This self-imposed limit will determine the
4322 * no: of VFs for which RSS can be enabled.
4324 static void be_calculate_pf_pool_rss_tables(struct be_adapter
*adapter
)
4326 struct be_port_resources port_res
= {0};
4327 u8 rss_tables_on_port
;
4328 u16 max_vfs
= be_max_vfs(adapter
);
4330 be_cmd_get_profile_config(adapter
, NULL
, &port_res
, SAVED_PROFILE_TYPE
,
4331 RESOURCE_LIMITS
, 0);
4333 rss_tables_on_port
= MAX_PORT_RSS_TABLES
- port_res
.nic_pfs
;
4335 /* Each PF Pool's RSS Tables limit =
4336 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4338 adapter
->pool_res
.max_rss_tables
=
4339 max_vfs
* rss_tables_on_port
/ port_res
.max_vfs
;
4342 static int be_get_sriov_config(struct be_adapter
*adapter
)
4344 struct be_resources res
= {0};
4345 int max_vfs
, old_vfs
;
4347 be_cmd_get_profile_config(adapter
, &res
, NULL
, ACTIVE_PROFILE_TYPE
,
4348 RESOURCE_LIMITS
, 0);
4350 /* Some old versions of BE3 FW don't report max_vfs value */
4351 if (BE3_chip(adapter
) && !res
.max_vfs
) {
4352 max_vfs
= pci_sriov_get_totalvfs(adapter
->pdev
);
4353 res
.max_vfs
= max_vfs
> 0 ? min(MAX_VFS
, max_vfs
) : 0;
4356 adapter
->pool_res
= res
;
4358 /* If during previous unload of the driver, the VFs were not disabled,
4359 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4360 * Instead use the TotalVFs value stored in the pci-dev struct.
4362 old_vfs
= pci_num_vf(adapter
->pdev
);
4364 dev_info(&adapter
->pdev
->dev
, "%d VFs are already enabled\n",
4367 adapter
->pool_res
.max_vfs
=
4368 pci_sriov_get_totalvfs(adapter
->pdev
);
4369 adapter
->num_vfs
= old_vfs
;
4372 if (skyhawk_chip(adapter
) && be_max_vfs(adapter
) && !old_vfs
) {
4373 be_calculate_pf_pool_rss_tables(adapter
);
4374 dev_info(&adapter
->pdev
->dev
,
4375 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4376 be_max_pf_pool_rss_tables(adapter
));
4381 static void be_alloc_sriov_res(struct be_adapter
*adapter
)
4383 int old_vfs
= pci_num_vf(adapter
->pdev
);
4384 struct be_resources vft_res
= {0};
4387 be_get_sriov_config(adapter
);
4390 pci_sriov_set_totalvfs(adapter
->pdev
, be_max_vfs(adapter
));
4392 /* When the HW is in SRIOV capable configuration, the PF-pool
4393 * resources are given to PF during driver load, if there are no
4394 * old VFs. This facility is not available in BE3 FW.
4395 * Also, this is done by FW in Lancer chip.
4397 if (skyhawk_chip(adapter
) && be_max_vfs(adapter
) && !old_vfs
) {
4398 be_calculate_vf_res(adapter
, 0, &vft_res
);
4399 status
= be_cmd_set_sriov_config(adapter
, adapter
->pool_res
, 0,
4402 dev_err(&adapter
->pdev
->dev
,
4403 "Failed to optimize SRIOV resources\n");
4407 static int be_get_resources(struct be_adapter
*adapter
)
4409 struct device
*dev
= &adapter
->pdev
->dev
;
4410 struct be_resources res
= {0};
4413 /* For Lancer, SH etc read per-function resource limits from FW.
4414 * GET_FUNC_CONFIG returns per function guaranteed limits.
4415 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4417 if (BEx_chip(adapter
)) {
4418 BEx_get_resources(adapter
, &res
);
4420 status
= be_cmd_get_func_config(adapter
, &res
);
4424 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4425 if (res
.max_rss_qs
&& res
.max_rss_qs
== res
.max_rx_qs
&&
4426 !(res
.if_cap_flags
& BE_IF_FLAGS_DEFQ_RSS
))
4427 res
.max_rss_qs
-= 1;
4430 /* If RoCE is supported stash away half the EQs for RoCE */
4431 res
.max_nic_evt_qs
= be_roce_supported(adapter
) ?
4432 res
.max_evt_qs
/ 2 : res
.max_evt_qs
;
4435 /* If FW supports RSS default queue, then skip creating non-RSS
4436 * queue for non-IP traffic.
4438 adapter
->need_def_rxq
= (be_if_cap_flags(adapter
) &
4439 BE_IF_FLAGS_DEFQ_RSS
) ? 0 : 1;
4441 dev_info(dev
, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4442 be_max_txqs(adapter
), be_max_rxqs(adapter
),
4443 be_max_rss(adapter
), be_max_nic_eqs(adapter
),
4444 be_max_vfs(adapter
));
4445 dev_info(dev
, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4446 be_max_uc(adapter
), be_max_mc(adapter
),
4447 be_max_vlans(adapter
));
4449 /* Ensure RX and TX queues are created in pairs at init time */
4450 adapter
->cfg_num_rx_irqs
=
4451 min_t(u16
, netif_get_num_default_rss_queues(),
4452 be_max_qp_irqs(adapter
));
4453 adapter
->cfg_num_tx_irqs
= adapter
->cfg_num_rx_irqs
;
4457 static int be_get_config(struct be_adapter
*adapter
)
4462 status
= be_cmd_get_cntl_attributes(adapter
);
4466 status
= be_cmd_query_fw_cfg(adapter
);
4470 if (!lancer_chip(adapter
) && be_physfn(adapter
))
4471 be_cmd_get_fat_dump_len(adapter
, &adapter
->fat_dump_len
);
4473 if (BEx_chip(adapter
)) {
4474 level
= be_cmd_get_fw_log_level(adapter
);
4475 adapter
->msg_enable
=
4476 level
<= FW_LOG_LEVEL_DEFAULT
? NETIF_MSG_HW
: 0;
4479 be_cmd_get_acpi_wol_cap(adapter
);
4480 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, adapter
->wol_en
);
4481 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, adapter
->wol_en
);
4483 be_cmd_query_port_name(adapter
);
4485 if (be_physfn(adapter
)) {
4486 status
= be_cmd_get_active_profile(adapter
, &profile_id
);
4488 dev_info(&adapter
->pdev
->dev
,
4489 "Using profile 0x%x\n", profile_id
);
4495 static int be_mac_setup(struct be_adapter
*adapter
)
4500 if (is_zero_ether_addr(adapter
->netdev
->dev_addr
)) {
4501 status
= be_cmd_get_perm_mac(adapter
, mac
);
4505 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
4506 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
4508 /* Initial MAC for BE3 VFs is already programmed by PF */
4509 if (BEx_chip(adapter
) && be_virtfn(adapter
))
4510 memcpy(adapter
->dev_mac
, mac
, ETH_ALEN
);
4516 static void be_schedule_worker(struct be_adapter
*adapter
)
4518 queue_delayed_work(be_wq
, &adapter
->work
, msecs_to_jiffies(1000));
4519 adapter
->flags
|= BE_FLAGS_WORKER_SCHEDULED
;
4522 static void be_destroy_err_recovery_workq(void)
4524 if (!be_err_recovery_workq
)
4527 flush_workqueue(be_err_recovery_workq
);
4528 destroy_workqueue(be_err_recovery_workq
);
4529 be_err_recovery_workq
= NULL
;
4532 static void be_schedule_err_detection(struct be_adapter
*adapter
, u32 delay
)
4534 struct be_error_recovery
*err_rec
= &adapter
->error_recovery
;
4536 if (!be_err_recovery_workq
)
4539 queue_delayed_work(be_err_recovery_workq
, &err_rec
->err_detection_work
,
4540 msecs_to_jiffies(delay
));
4541 adapter
->flags
|= BE_FLAGS_ERR_DETECTION_SCHEDULED
;
4544 static int be_setup_queues(struct be_adapter
*adapter
)
4546 struct net_device
*netdev
= adapter
->netdev
;
4549 status
= be_evt_queues_create(adapter
);
4553 status
= be_tx_qs_create(adapter
);
4557 status
= be_rx_cqs_create(adapter
);
4561 status
= be_mcc_queues_create(adapter
);
4565 status
= netif_set_real_num_rx_queues(netdev
, adapter
->num_rx_qs
);
4569 status
= netif_set_real_num_tx_queues(netdev
, adapter
->num_tx_qs
);
4575 dev_err(&adapter
->pdev
->dev
, "queue_setup failed\n");
4579 static int be_if_create(struct be_adapter
*adapter
)
4581 u32 en_flags
= BE_IF_FLAGS_RSS
| BE_IF_FLAGS_DEFQ_RSS
;
4582 u32 cap_flags
= be_if_cap_flags(adapter
);
4585 /* alloc required memory for other filtering fields */
4586 adapter
->pmac_id
= kcalloc(be_max_uc(adapter
),
4587 sizeof(*adapter
->pmac_id
), GFP_KERNEL
);
4588 if (!adapter
->pmac_id
)
4591 adapter
->mc_list
= kcalloc(be_max_mc(adapter
),
4592 sizeof(*adapter
->mc_list
), GFP_KERNEL
);
4593 if (!adapter
->mc_list
)
4596 adapter
->uc_list
= kcalloc(be_max_uc(adapter
),
4597 sizeof(*adapter
->uc_list
), GFP_KERNEL
);
4598 if (!adapter
->uc_list
)
4601 if (adapter
->cfg_num_rx_irqs
== 1)
4602 cap_flags
&= ~(BE_IF_FLAGS_DEFQ_RSS
| BE_IF_FLAGS_RSS
);
4604 en_flags
&= cap_flags
;
4605 /* will enable all the needed filter flags in be_open() */
4606 status
= be_cmd_if_create(adapter
, be_if_cap_flags(adapter
), en_flags
,
4607 &adapter
->if_handle
, 0);
4615 int be_update_queues(struct be_adapter
*adapter
)
4617 struct net_device
*netdev
= adapter
->netdev
;
4620 if (netif_running(netdev
))
4623 be_cancel_worker(adapter
);
4625 /* If any vectors have been shared with RoCE we cannot re-program
4628 if (!adapter
->num_msix_roce_vec
)
4629 be_msix_disable(adapter
);
4631 be_clear_queues(adapter
);
4632 status
= be_cmd_if_destroy(adapter
, adapter
->if_handle
, 0);
4636 if (!msix_enabled(adapter
)) {
4637 status
= be_msix_enable(adapter
);
4642 status
= be_if_create(adapter
);
4646 status
= be_setup_queues(adapter
);
4650 be_schedule_worker(adapter
);
4652 /* The IF was destroyed and re-created. We need to clear
4653 * all promiscuous flags valid for the destroyed IF.
4654 * Without this promisc mode is not restored during
4655 * be_open() because the driver thinks that it is
4656 * already enabled in HW.
4658 adapter
->if_flags
&= ~BE_IF_FLAGS_ALL_PROMISCUOUS
;
4660 if (netif_running(netdev
))
4661 status
= be_open(netdev
);
4666 static inline int fw_major_num(const char *fw_ver
)
4668 int fw_major
= 0, i
;
4670 i
= sscanf(fw_ver
, "%d.", &fw_major
);
4677 /* If it is error recovery, FLR the PF
4678 * Else if any VFs are already enabled don't FLR the PF
4680 static bool be_reset_required(struct be_adapter
*adapter
)
4682 if (be_error_recovering(adapter
))
4685 return pci_num_vf(adapter
->pdev
) == 0;
4688 /* Wait for the FW to be ready and perform the required initialization */
4689 static int be_func_init(struct be_adapter
*adapter
)
4693 status
= be_fw_wait_ready(adapter
);
4697 /* FW is now ready; clear errors to allow cmds/doorbell */
4698 be_clear_error(adapter
, BE_CLEAR_ALL
);
4700 if (be_reset_required(adapter
)) {
4701 status
= be_cmd_reset_function(adapter
);
4705 /* Wait for interrupts to quiesce after an FLR */
4709 /* Tell FW we're ready to fire cmds */
4710 status
= be_cmd_fw_init(adapter
);
4714 /* Allow interrupts for other ULPs running on NIC function */
4715 be_intr_set(adapter
, true);
4720 static int be_setup(struct be_adapter
*adapter
)
4722 struct device
*dev
= &adapter
->pdev
->dev
;
4725 status
= be_func_init(adapter
);
4729 be_setup_init(adapter
);
4731 if (!lancer_chip(adapter
))
4732 be_cmd_req_native_mode(adapter
);
4734 /* invoke this cmd first to get pf_num and vf_num which are needed
4735 * for issuing profile related cmds
4737 if (!BEx_chip(adapter
)) {
4738 status
= be_cmd_get_func_config(adapter
, NULL
);
4743 status
= be_get_config(adapter
);
4747 if (!BE2_chip(adapter
) && be_physfn(adapter
))
4748 be_alloc_sriov_res(adapter
);
4750 status
= be_get_resources(adapter
);
4754 status
= be_msix_enable(adapter
);
4758 /* will enable all the needed filter flags in be_open() */
4759 status
= be_if_create(adapter
);
4763 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4765 status
= be_setup_queues(adapter
);
4770 be_cmd_get_fn_privileges(adapter
, &adapter
->cmd_privileges
, 0);
4772 status
= be_mac_setup(adapter
);
4776 be_cmd_get_fw_ver(adapter
);
4777 dev_info(dev
, "FW version is %s\n", adapter
->fw_ver
);
4779 if (BE2_chip(adapter
) && fw_major_num(adapter
->fw_ver
) < 4) {
4780 dev_err(dev
, "Firmware on card is old(%s), IRQs may not work",
4782 dev_err(dev
, "Please upgrade firmware to version >= 4.0\n");
4785 status
= be_cmd_set_flow_control(adapter
, adapter
->tx_fc
,
4788 be_cmd_get_flow_control(adapter
, &adapter
->tx_fc
,
4791 dev_info(&adapter
->pdev
->dev
, "HW Flow control - TX:%d RX:%d\n",
4792 adapter
->tx_fc
, adapter
->rx_fc
);
4794 if (be_physfn(adapter
))
4795 be_cmd_set_logical_link_config(adapter
,
4796 IFLA_VF_LINK_STATE_AUTO
, 0);
4798 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4799 * confusing a linux bridge or OVS that it might be connected to.
4800 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4801 * when SRIOV is not enabled.
4803 if (BE3_chip(adapter
))
4804 be_cmd_set_hsw_config(adapter
, 0, 0, adapter
->if_handle
,
4805 PORT_FWD_TYPE_PASSTHRU
, 0);
4807 if (adapter
->num_vfs
)
4808 be_vf_setup(adapter
);
4810 status
= be_cmd_get_phy_info(adapter
);
4811 if (!status
&& be_pause_supported(adapter
))
4812 adapter
->phy
.fc_autoneg
= 1;
4814 if (be_physfn(adapter
) && !lancer_chip(adapter
))
4815 be_cmd_set_features(adapter
);
4817 be_schedule_worker(adapter
);
4818 adapter
->flags
|= BE_FLAGS_SETUP_DONE
;
4825 #ifdef CONFIG_NET_POLL_CONTROLLER
4826 static void be_netpoll(struct net_device
*netdev
)
4828 struct be_adapter
*adapter
= netdev_priv(netdev
);
4829 struct be_eq_obj
*eqo
;
4832 for_all_evt_queues(adapter
, eqo
, i
) {
4833 be_eq_notify(eqo
->adapter
, eqo
->q
.id
, false, true, 0, 0);
4834 napi_schedule(&eqo
->napi
);
4839 int be_load_fw(struct be_adapter
*adapter
, u8
*fw_file
)
4841 const struct firmware
*fw
;
4844 if (!netif_running(adapter
->netdev
)) {
4845 dev_err(&adapter
->pdev
->dev
,
4846 "Firmware load not allowed (interface is down)\n");
4850 status
= request_firmware(&fw
, fw_file
, &adapter
->pdev
->dev
);
4854 dev_info(&adapter
->pdev
->dev
, "Flashing firmware file %s\n", fw_file
);
4856 if (lancer_chip(adapter
))
4857 status
= lancer_fw_download(adapter
, fw
);
4859 status
= be_fw_download(adapter
, fw
);
4862 be_cmd_get_fw_ver(adapter
);
4865 release_firmware(fw
);
4869 static int be_ndo_bridge_setlink(struct net_device
*dev
, struct nlmsghdr
*nlh
,
4872 struct be_adapter
*adapter
= netdev_priv(dev
);
4873 struct nlattr
*attr
, *br_spec
;
4878 if (!sriov_enabled(adapter
))
4881 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
4885 nla_for_each_nested(attr
, br_spec
, rem
) {
4886 if (nla_type(attr
) != IFLA_BRIDGE_MODE
)
4889 if (nla_len(attr
) < sizeof(mode
))
4892 mode
= nla_get_u16(attr
);
4893 if (BE3_chip(adapter
) && mode
== BRIDGE_MODE_VEPA
)
4896 if (mode
!= BRIDGE_MODE_VEPA
&& mode
!= BRIDGE_MODE_VEB
)
4899 status
= be_cmd_set_hsw_config(adapter
, 0, 0,
4901 mode
== BRIDGE_MODE_VEPA
?
4902 PORT_FWD_TYPE_VEPA
:
4903 PORT_FWD_TYPE_VEB
, 0);
4907 dev_info(&adapter
->pdev
->dev
, "enabled switch mode: %s\n",
4908 mode
== BRIDGE_MODE_VEPA
? "VEPA" : "VEB");
4913 dev_err(&adapter
->pdev
->dev
, "Failed to set switch mode %s\n",
4914 mode
== BRIDGE_MODE_VEPA
? "VEPA" : "VEB");
4919 static int be_ndo_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
4920 struct net_device
*dev
, u32 filter_mask
,
4923 struct be_adapter
*adapter
= netdev_priv(dev
);
4927 /* BE and Lancer chips support VEB mode only */
4928 if (BEx_chip(adapter
) || lancer_chip(adapter
)) {
4929 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4930 if (!pci_sriov_get_totalvfs(adapter
->pdev
))
4932 hsw_mode
= PORT_FWD_TYPE_VEB
;
4934 status
= be_cmd_get_hsw_config(adapter
, NULL
, 0,
4935 adapter
->if_handle
, &hsw_mode
,
4940 if (hsw_mode
== PORT_FWD_TYPE_PASSTHRU
)
4944 return ndo_dflt_bridge_getlink(skb
, pid
, seq
, dev
,
4945 hsw_mode
== PORT_FWD_TYPE_VEPA
?
4946 BRIDGE_MODE_VEPA
: BRIDGE_MODE_VEB
,
4947 0, 0, nlflags
, filter_mask
, NULL
);
4950 static struct be_cmd_work
*be_alloc_work(struct be_adapter
*adapter
,
4951 void (*func
)(struct work_struct
*))
4953 struct be_cmd_work
*work
;
4955 work
= kzalloc(sizeof(*work
), GFP_ATOMIC
);
4957 dev_err(&adapter
->pdev
->dev
,
4958 "be_work memory allocation failed\n");
4962 INIT_WORK(&work
->work
, func
);
4963 work
->adapter
= adapter
;
4967 /* VxLAN offload Notes:
4969 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4970 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4971 * is expected to work across all types of IP tunnels once exported. Skyhawk
4972 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
4973 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4974 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4975 * those other tunnels are unexported on the fly through ndo_features_check().
4977 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4978 * adds more than one port, disable offloads and re-enable them again when
4979 * there's only one port left. We maintain a list of ports for this purpose.
4981 static void be_work_add_vxlan_port(struct work_struct
*work
)
4983 struct be_cmd_work
*cmd_work
=
4984 container_of(work
, struct be_cmd_work
, work
);
4985 struct be_adapter
*adapter
= cmd_work
->adapter
;
4986 struct device
*dev
= &adapter
->pdev
->dev
;
4987 __be16 port
= cmd_work
->info
.vxlan_port
;
4988 struct be_vxlan_port
*vxlan_port
;
4991 /* Bump up the alias count if it is an existing port */
4992 list_for_each_entry(vxlan_port
, &adapter
->vxlan_port_list
, list
) {
4993 if (vxlan_port
->port
== port
) {
4994 vxlan_port
->port_aliases
++;
4999 /* Add a new port to our list. We don't need a lock here since port
5000 * add/delete are done only in the context of a single-threaded work
5003 vxlan_port
= kzalloc(sizeof(*vxlan_port
), GFP_KERNEL
);
5007 vxlan_port
->port
= port
;
5008 INIT_LIST_HEAD(&vxlan_port
->list
);
5009 list_add_tail(&vxlan_port
->list
, &adapter
->vxlan_port_list
);
5010 adapter
->vxlan_port_count
++;
5012 if (adapter
->flags
& BE_FLAGS_VXLAN_OFFLOADS
) {
5014 "Only one UDP port supported for VxLAN offloads\n");
5015 dev_info(dev
, "Disabling VxLAN offloads\n");
5019 if (adapter
->vxlan_port_count
> 1)
5022 status
= be_enable_vxlan_offloads(adapter
);
5027 be_disable_vxlan_offloads(adapter
);
5033 static void be_work_del_vxlan_port(struct work_struct
*work
)
5035 struct be_cmd_work
*cmd_work
=
5036 container_of(work
, struct be_cmd_work
, work
);
5037 struct be_adapter
*adapter
= cmd_work
->adapter
;
5038 __be16 port
= cmd_work
->info
.vxlan_port
;
5039 struct be_vxlan_port
*vxlan_port
;
5041 /* Nothing to be done if a port alias is being deleted */
5042 list_for_each_entry(vxlan_port
, &adapter
->vxlan_port_list
, list
) {
5043 if (vxlan_port
->port
== port
) {
5044 if (vxlan_port
->port_aliases
) {
5045 vxlan_port
->port_aliases
--;
5052 /* No port aliases left; delete the port from the list */
5053 list_del(&vxlan_port
->list
);
5054 adapter
->vxlan_port_count
--;
5056 /* Disable VxLAN offload if this is the offloaded port */
5057 if (adapter
->vxlan_port
== vxlan_port
->port
) {
5058 WARN_ON(adapter
->vxlan_port_count
);
5059 be_disable_vxlan_offloads(adapter
);
5060 dev_info(&adapter
->pdev
->dev
,
5061 "Disabled VxLAN offloads for UDP port %d\n",
5066 /* If only 1 port is left, re-enable VxLAN offload */
5067 if (adapter
->vxlan_port_count
== 1)
5068 be_enable_vxlan_offloads(adapter
);
5076 static void be_cfg_vxlan_port(struct net_device
*netdev
,
5077 struct udp_tunnel_info
*ti
,
5078 void (*func
)(struct work_struct
*))
5080 struct be_adapter
*adapter
= netdev_priv(netdev
);
5081 struct be_cmd_work
*cmd_work
;
5083 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
5086 if (lancer_chip(adapter
) || BEx_chip(adapter
) || be_is_mc(adapter
))
5089 cmd_work
= be_alloc_work(adapter
, func
);
5091 cmd_work
->info
.vxlan_port
= ti
->port
;
5092 queue_work(be_wq
, &cmd_work
->work
);
5096 static void be_del_vxlan_port(struct net_device
*netdev
,
5097 struct udp_tunnel_info
*ti
)
5099 be_cfg_vxlan_port(netdev
, ti
, be_work_del_vxlan_port
);
5102 static void be_add_vxlan_port(struct net_device
*netdev
,
5103 struct udp_tunnel_info
*ti
)
5105 be_cfg_vxlan_port(netdev
, ti
, be_work_add_vxlan_port
);
5108 static netdev_features_t
be_features_check(struct sk_buff
*skb
,
5109 struct net_device
*dev
,
5110 netdev_features_t features
)
5112 struct be_adapter
*adapter
= netdev_priv(dev
);
5115 if (skb_is_gso(skb
)) {
5116 /* IPv6 TSO requests with extension hdrs are a problem
5117 * to Lancer and BE3 HW. Disable TSO6 feature.
5119 if (!skyhawk_chip(adapter
) && is_ipv6_ext_hdr(skb
))
5120 features
&= ~NETIF_F_TSO6
;
5122 /* Lancer cannot handle the packet with MSS less than 256.
5123 * Also it can't handle a TSO packet with a single segment
5124 * Disable the GSO support in such cases
5126 if (lancer_chip(adapter
) &&
5127 (skb_shinfo(skb
)->gso_size
< 256 ||
5128 skb_shinfo(skb
)->gso_segs
== 1))
5129 features
&= ~NETIF_F_GSO_MASK
;
5132 /* The code below restricts offload features for some tunneled and
5134 * Offload features for normal (non tunnel) packets are unchanged.
5136 features
= vlan_features_check(skb
, features
);
5137 if (!skb
->encapsulation
||
5138 !(adapter
->flags
& BE_FLAGS_VXLAN_OFFLOADS
))
5141 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5142 * should disable tunnel offload features if it's not a VxLAN packet,
5143 * as tunnel offloads have been enabled only for VxLAN. This is done to
5144 * allow other tunneled traffic like GRE work fine while VxLAN
5145 * offloads are configured in Skyhawk-R.
5147 switch (vlan_get_protocol(skb
)) {
5148 case htons(ETH_P_IP
):
5149 l4_hdr
= ip_hdr(skb
)->protocol
;
5151 case htons(ETH_P_IPV6
):
5152 l4_hdr
= ipv6_hdr(skb
)->nexthdr
;
5158 if (l4_hdr
!= IPPROTO_UDP
||
5159 skb
->inner_protocol_type
!= ENCAP_TYPE_ETHER
||
5160 skb
->inner_protocol
!= htons(ETH_P_TEB
) ||
5161 skb_inner_mac_header(skb
) - skb_transport_header(skb
) !=
5162 sizeof(struct udphdr
) + sizeof(struct vxlanhdr
) ||
5163 !adapter
->vxlan_port
||
5164 udp_hdr(skb
)->dest
!= adapter
->vxlan_port
)
5165 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
5170 static int be_get_phys_port_id(struct net_device
*dev
,
5171 struct netdev_phys_item_id
*ppid
)
5173 int i
, id_len
= CNTL_SERIAL_NUM_WORDS
* CNTL_SERIAL_NUM_WORD_SZ
+ 1;
5174 struct be_adapter
*adapter
= netdev_priv(dev
);
5177 if (MAX_PHYS_ITEM_ID_LEN
< id_len
)
5180 ppid
->id
[0] = adapter
->hba_port_num
+ 1;
5182 for (i
= CNTL_SERIAL_NUM_WORDS
- 1; i
>= 0;
5183 i
--, id
+= CNTL_SERIAL_NUM_WORD_SZ
)
5184 memcpy(id
, &adapter
->serial_num
[i
], CNTL_SERIAL_NUM_WORD_SZ
);
5186 ppid
->id_len
= id_len
;
5191 static void be_set_rx_mode(struct net_device
*dev
)
5193 struct be_adapter
*adapter
= netdev_priv(dev
);
5194 struct be_cmd_work
*work
;
5196 work
= be_alloc_work(adapter
, be_work_set_rx_mode
);
5198 queue_work(be_wq
, &work
->work
);
5201 static const struct net_device_ops be_netdev_ops
= {
5202 .ndo_open
= be_open
,
5203 .ndo_stop
= be_close
,
5204 .ndo_start_xmit
= be_xmit
,
5205 .ndo_set_rx_mode
= be_set_rx_mode
,
5206 .ndo_set_mac_address
= be_mac_addr_set
,
5207 .ndo_get_stats64
= be_get_stats64
,
5208 .ndo_validate_addr
= eth_validate_addr
,
5209 .ndo_vlan_rx_add_vid
= be_vlan_add_vid
,
5210 .ndo_vlan_rx_kill_vid
= be_vlan_rem_vid
,
5211 .ndo_set_vf_mac
= be_set_vf_mac
,
5212 .ndo_set_vf_vlan
= be_set_vf_vlan
,
5213 .ndo_set_vf_rate
= be_set_vf_tx_rate
,
5214 .ndo_get_vf_config
= be_get_vf_config
,
5215 .ndo_set_vf_link_state
= be_set_vf_link_state
,
5216 .ndo_set_vf_spoofchk
= be_set_vf_spoofchk
,
5217 #ifdef CONFIG_NET_POLL_CONTROLLER
5218 .ndo_poll_controller
= be_netpoll
,
5220 .ndo_bridge_setlink
= be_ndo_bridge_setlink
,
5221 .ndo_bridge_getlink
= be_ndo_bridge_getlink
,
5222 .ndo_udp_tunnel_add
= be_add_vxlan_port
,
5223 .ndo_udp_tunnel_del
= be_del_vxlan_port
,
5224 .ndo_features_check
= be_features_check
,
5225 .ndo_get_phys_port_id
= be_get_phys_port_id
,
5228 static void be_netdev_init(struct net_device
*netdev
)
5230 struct be_adapter
*adapter
= netdev_priv(netdev
);
5232 netdev
->hw_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
|
5233 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
5234 NETIF_F_HW_VLAN_CTAG_TX
;
5235 if ((be_if_cap_flags(adapter
) & BE_IF_FLAGS_RSS
))
5236 netdev
->hw_features
|= NETIF_F_RXHASH
;
5238 netdev
->features
|= netdev
->hw_features
|
5239 NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_CTAG_FILTER
;
5241 netdev
->vlan_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
|
5242 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
5244 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
5246 netdev
->flags
|= IFF_MULTICAST
;
5248 netif_set_gso_max_size(netdev
, BE_MAX_GSO_SIZE
- ETH_HLEN
);
5250 netdev
->netdev_ops
= &be_netdev_ops
;
5252 netdev
->ethtool_ops
= &be_ethtool_ops
;
5254 /* MTU range: 256 - 9000 */
5255 netdev
->min_mtu
= BE_MIN_MTU
;
5256 netdev
->max_mtu
= BE_MAX_MTU
;
5259 static void be_cleanup(struct be_adapter
*adapter
)
5261 struct net_device
*netdev
= adapter
->netdev
;
5264 netif_device_detach(netdev
);
5265 if (netif_running(netdev
))
5272 static int be_resume(struct be_adapter
*adapter
)
5274 struct net_device
*netdev
= adapter
->netdev
;
5277 status
= be_setup(adapter
);
5282 if (netif_running(netdev
))
5283 status
= be_open(netdev
);
5289 netif_device_attach(netdev
);
5294 static void be_soft_reset(struct be_adapter
*adapter
)
5298 dev_info(&adapter
->pdev
->dev
, "Initiating chip soft reset\n");
5299 val
= ioread32(adapter
->pcicfg
+ SLIPORT_SOFTRESET_OFFSET
);
5300 val
|= SLIPORT_SOFTRESET_SR_MASK
;
5301 iowrite32(val
, adapter
->pcicfg
+ SLIPORT_SOFTRESET_OFFSET
);
5304 static bool be_err_is_recoverable(struct be_adapter
*adapter
)
5306 struct be_error_recovery
*err_rec
= &adapter
->error_recovery
;
5307 unsigned long initial_idle_time
=
5308 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME
);
5309 unsigned long recovery_interval
=
5310 msecs_to_jiffies(ERR_RECOVERY_INTERVAL
);
5314 val
= be_POST_stage_get(adapter
);
5315 if ((val
& POST_STAGE_RECOVERABLE_ERR
) != POST_STAGE_RECOVERABLE_ERR
)
5317 ue_err_code
= val
& POST_ERR_RECOVERY_CODE_MASK
;
5318 if (ue_err_code
== 0)
5321 dev_err(&adapter
->pdev
->dev
, "Recoverable HW error code: 0x%x\n",
5324 if (time_before_eq(jiffies
- err_rec
->probe_time
, initial_idle_time
)) {
5325 dev_err(&adapter
->pdev
->dev
,
5326 "Cannot recover within %lu sec from driver load\n",
5327 jiffies_to_msecs(initial_idle_time
) / MSEC_PER_SEC
);
5331 if (err_rec
->last_recovery_time
&& time_before_eq(
5332 jiffies
- err_rec
->last_recovery_time
, recovery_interval
)) {
5333 dev_err(&adapter
->pdev
->dev
,
5334 "Cannot recover within %lu sec from last recovery\n",
5335 jiffies_to_msecs(recovery_interval
) / MSEC_PER_SEC
);
5339 if (ue_err_code
== err_rec
->last_err_code
) {
5340 dev_err(&adapter
->pdev
->dev
,
5341 "Cannot recover from a consecutive TPE error\n");
5345 err_rec
->last_recovery_time
= jiffies
;
5346 err_rec
->last_err_code
= ue_err_code
;
5350 static int be_tpe_recover(struct be_adapter
*adapter
)
5352 struct be_error_recovery
*err_rec
= &adapter
->error_recovery
;
5353 int status
= -EAGAIN
;
5356 switch (err_rec
->recovery_state
) {
5357 case ERR_RECOVERY_ST_NONE
:
5358 err_rec
->recovery_state
= ERR_RECOVERY_ST_DETECT
;
5359 err_rec
->resched_delay
= ERR_RECOVERY_UE_DETECT_DURATION
;
5362 case ERR_RECOVERY_ST_DETECT
:
5363 val
= be_POST_stage_get(adapter
);
5364 if ((val
& POST_STAGE_RECOVERABLE_ERR
) !=
5365 POST_STAGE_RECOVERABLE_ERR
) {
5366 dev_err(&adapter
->pdev
->dev
,
5367 "Unrecoverable HW error detected: 0x%x\n", val
);
5369 err_rec
->resched_delay
= 0;
5373 dev_err(&adapter
->pdev
->dev
, "Recoverable HW error detected\n");
5375 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5376 * milliseconds before it checks for final error status in
5377 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5378 * If it does, then PF0 initiates a Soft Reset.
5380 if (adapter
->pf_num
== 0) {
5381 err_rec
->recovery_state
= ERR_RECOVERY_ST_RESET
;
5382 err_rec
->resched_delay
= err_rec
->ue_to_reset_time
-
5383 ERR_RECOVERY_UE_DETECT_DURATION
;
5387 err_rec
->recovery_state
= ERR_RECOVERY_ST_PRE_POLL
;
5388 err_rec
->resched_delay
= err_rec
->ue_to_poll_time
-
5389 ERR_RECOVERY_UE_DETECT_DURATION
;
5392 case ERR_RECOVERY_ST_RESET
:
5393 if (!be_err_is_recoverable(adapter
)) {
5394 dev_err(&adapter
->pdev
->dev
,
5395 "Failed to meet recovery criteria\n");
5397 err_rec
->resched_delay
= 0;
5400 be_soft_reset(adapter
);
5401 err_rec
->recovery_state
= ERR_RECOVERY_ST_PRE_POLL
;
5402 err_rec
->resched_delay
= err_rec
->ue_to_poll_time
-
5403 err_rec
->ue_to_reset_time
;
5406 case ERR_RECOVERY_ST_PRE_POLL
:
5407 err_rec
->recovery_state
= ERR_RECOVERY_ST_REINIT
;
5408 err_rec
->resched_delay
= 0;
5409 status
= 0; /* done */
5414 err_rec
->resched_delay
= 0;
5421 static int be_err_recover(struct be_adapter
*adapter
)
5425 if (!lancer_chip(adapter
)) {
5426 if (!adapter
->error_recovery
.recovery_supported
||
5427 adapter
->priv_flags
& BE_DISABLE_TPE_RECOVERY
)
5429 status
= be_tpe_recover(adapter
);
5434 /* Wait for adapter to reach quiescent state before
5437 status
= be_fw_wait_ready(adapter
);
5441 adapter
->flags
|= BE_FLAGS_TRY_RECOVERY
;
5443 be_cleanup(adapter
);
5445 status
= be_resume(adapter
);
5449 adapter
->flags
&= ~BE_FLAGS_TRY_RECOVERY
;
5455 static void be_err_detection_task(struct work_struct
*work
)
5457 struct be_error_recovery
*err_rec
=
5458 container_of(work
, struct be_error_recovery
,
5459 err_detection_work
.work
);
5460 struct be_adapter
*adapter
=
5461 container_of(err_rec
, struct be_adapter
,
5463 u32 resched_delay
= ERR_RECOVERY_DETECTION_DELAY
;
5464 struct device
*dev
= &adapter
->pdev
->dev
;
5465 int recovery_status
;
5467 be_detect_error(adapter
);
5468 if (!be_check_error(adapter
, BE_ERROR_HW
))
5469 goto reschedule_task
;
5471 recovery_status
= be_err_recover(adapter
);
5472 if (!recovery_status
) {
5473 err_rec
->recovery_retries
= 0;
5474 err_rec
->recovery_state
= ERR_RECOVERY_ST_NONE
;
5475 dev_info(dev
, "Adapter recovery successful\n");
5476 goto reschedule_task
;
5477 } else if (!lancer_chip(adapter
) && err_rec
->resched_delay
) {
5478 /* BEx/SH recovery state machine */
5479 if (adapter
->pf_num
== 0 &&
5480 err_rec
->recovery_state
> ERR_RECOVERY_ST_DETECT
)
5481 dev_err(&adapter
->pdev
->dev
,
5482 "Adapter recovery in progress\n");
5483 resched_delay
= err_rec
->resched_delay
;
5484 goto reschedule_task
;
5485 } else if (lancer_chip(adapter
) && be_virtfn(adapter
)) {
5486 /* For VFs, check if PF have allocated resources
5489 dev_err(dev
, "Re-trying adapter recovery\n");
5490 goto reschedule_task
;
5491 } else if (lancer_chip(adapter
) && err_rec
->recovery_retries
++ <
5492 ERR_RECOVERY_MAX_RETRY_COUNT
) {
5493 /* In case of another error during recovery, it takes 30 sec
5494 * for adapter to come out of error. Retry error recovery after
5495 * this time interval.
5497 dev_err(&adapter
->pdev
->dev
, "Re-trying adapter recovery\n");
5498 resched_delay
= ERR_RECOVERY_RETRY_DELAY
;
5499 goto reschedule_task
;
5501 dev_err(dev
, "Adapter recovery failed\n");
5502 dev_err(dev
, "Please reboot server to recover\n");
5508 be_schedule_err_detection(adapter
, resched_delay
);
5511 static void be_log_sfp_info(struct be_adapter
*adapter
)
5515 status
= be_cmd_query_sfp_info(adapter
);
5517 dev_err(&adapter
->pdev
->dev
,
5518 "Port %c: %s Vendor: %s part no: %s",
5520 be_misconfig_evt_port_state
[adapter
->phy_state
],
5521 adapter
->phy
.vendor_name
,
5522 adapter
->phy
.vendor_pn
);
5524 adapter
->flags
&= ~BE_FLAGS_PHY_MISCONFIGURED
;
5527 static void be_worker(struct work_struct
*work
)
5529 struct be_adapter
*adapter
=
5530 container_of(work
, struct be_adapter
, work
.work
);
5531 struct be_rx_obj
*rxo
;
5534 if (be_physfn(adapter
) &&
5535 MODULO(adapter
->work_counter
, adapter
->be_get_temp_freq
) == 0)
5536 be_cmd_get_die_temperature(adapter
);
5538 /* when interrupts are not yet enabled, just reap any pending
5541 if (!netif_running(adapter
->netdev
)) {
5543 be_process_mcc(adapter
);
5548 if (!adapter
->stats_cmd_sent
) {
5549 if (lancer_chip(adapter
))
5550 lancer_cmd_get_pport_stats(adapter
,
5551 &adapter
->stats_cmd
);
5553 be_cmd_get_stats(adapter
, &adapter
->stats_cmd
);
5556 for_all_rx_queues(adapter
, rxo
, i
) {
5557 /* Replenish RX-queues starved due to memory
5558 * allocation failures.
5560 if (rxo
->rx_post_starved
)
5561 be_post_rx_frags(rxo
, GFP_KERNEL
, MAX_RX_POST
);
5564 /* EQ-delay update for Skyhawk is done while notifying EQ */
5565 if (!skyhawk_chip(adapter
))
5566 be_eqd_update(adapter
, false);
5568 if (adapter
->flags
& BE_FLAGS_PHY_MISCONFIGURED
)
5569 be_log_sfp_info(adapter
);
5572 adapter
->work_counter
++;
5573 queue_delayed_work(be_wq
, &adapter
->work
, msecs_to_jiffies(1000));
5576 static void be_unmap_pci_bars(struct be_adapter
*adapter
)
5579 pci_iounmap(adapter
->pdev
, adapter
->csr
);
5581 pci_iounmap(adapter
->pdev
, adapter
->db
);
5582 if (adapter
->pcicfg
&& adapter
->pcicfg_mapped
)
5583 pci_iounmap(adapter
->pdev
, adapter
->pcicfg
);
5586 static int db_bar(struct be_adapter
*adapter
)
5588 if (lancer_chip(adapter
) || be_virtfn(adapter
))
5594 static int be_roce_map_pci_bars(struct be_adapter
*adapter
)
5596 if (skyhawk_chip(adapter
)) {
5597 adapter
->roce_db
.size
= 4096;
5598 adapter
->roce_db
.io_addr
= pci_resource_start(adapter
->pdev
,
5600 adapter
->roce_db
.total_size
= pci_resource_len(adapter
->pdev
,
5606 static int be_map_pci_bars(struct be_adapter
*adapter
)
5608 struct pci_dev
*pdev
= adapter
->pdev
;
5612 pci_read_config_dword(adapter
->pdev
, SLI_INTF_REG_OFFSET
, &sli_intf
);
5613 adapter
->sli_family
= (sli_intf
& SLI_INTF_FAMILY_MASK
) >>
5614 SLI_INTF_FAMILY_SHIFT
;
5615 adapter
->virtfn
= (sli_intf
& SLI_INTF_FT_MASK
) ? 1 : 0;
5617 if (BEx_chip(adapter
) && be_physfn(adapter
)) {
5618 adapter
->csr
= pci_iomap(pdev
, 2, 0);
5623 addr
= pci_iomap(pdev
, db_bar(adapter
), 0);
5628 if (skyhawk_chip(adapter
) || BEx_chip(adapter
)) {
5629 if (be_physfn(adapter
)) {
5630 /* PCICFG is the 2nd BAR in BE2 */
5631 addr
= pci_iomap(pdev
, BE2_chip(adapter
) ? 1 : 0, 0);
5634 adapter
->pcicfg
= addr
;
5635 adapter
->pcicfg_mapped
= true;
5637 adapter
->pcicfg
= adapter
->db
+ SRIOV_VF_PCICFG_OFFSET
;
5638 adapter
->pcicfg_mapped
= false;
5642 be_roce_map_pci_bars(adapter
);
5646 dev_err(&pdev
->dev
, "Error in mapping PCI BARs\n");
5647 be_unmap_pci_bars(adapter
);
5651 static void be_drv_cleanup(struct be_adapter
*adapter
)
5653 struct be_dma_mem
*mem
= &adapter
->mbox_mem_alloced
;
5654 struct device
*dev
= &adapter
->pdev
->dev
;
5657 dma_free_coherent(dev
, mem
->size
, mem
->va
, mem
->dma
);
5659 mem
= &adapter
->rx_filter
;
5661 dma_free_coherent(dev
, mem
->size
, mem
->va
, mem
->dma
);
5663 mem
= &adapter
->stats_cmd
;
5665 dma_free_coherent(dev
, mem
->size
, mem
->va
, mem
->dma
);
5668 /* Allocate and initialize various fields in be_adapter struct */
5669 static int be_drv_init(struct be_adapter
*adapter
)
5671 struct be_dma_mem
*mbox_mem_alloc
= &adapter
->mbox_mem_alloced
;
5672 struct be_dma_mem
*mbox_mem_align
= &adapter
->mbox_mem
;
5673 struct be_dma_mem
*rx_filter
= &adapter
->rx_filter
;
5674 struct be_dma_mem
*stats_cmd
= &adapter
->stats_cmd
;
5675 struct device
*dev
= &adapter
->pdev
->dev
;
5678 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
5679 mbox_mem_alloc
->va
= dma_zalloc_coherent(dev
, mbox_mem_alloc
->size
,
5680 &mbox_mem_alloc
->dma
,
5682 if (!mbox_mem_alloc
->va
)
5685 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
5686 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
5687 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
5689 rx_filter
->size
= sizeof(struct be_cmd_req_rx_filter
);
5690 rx_filter
->va
= dma_zalloc_coherent(dev
, rx_filter
->size
,
5691 &rx_filter
->dma
, GFP_KERNEL
);
5692 if (!rx_filter
->va
) {
5697 if (lancer_chip(adapter
))
5698 stats_cmd
->size
= sizeof(struct lancer_cmd_req_pport_stats
);
5699 else if (BE2_chip(adapter
))
5700 stats_cmd
->size
= sizeof(struct be_cmd_req_get_stats_v0
);
5701 else if (BE3_chip(adapter
))
5702 stats_cmd
->size
= sizeof(struct be_cmd_req_get_stats_v1
);
5704 stats_cmd
->size
= sizeof(struct be_cmd_req_get_stats_v2
);
5705 stats_cmd
->va
= dma_zalloc_coherent(dev
, stats_cmd
->size
,
5706 &stats_cmd
->dma
, GFP_KERNEL
);
5707 if (!stats_cmd
->va
) {
5709 goto free_rx_filter
;
5712 mutex_init(&adapter
->mbox_lock
);
5713 mutex_init(&adapter
->mcc_lock
);
5714 mutex_init(&adapter
->rx_filter_lock
);
5715 spin_lock_init(&adapter
->mcc_cq_lock
);
5716 init_completion(&adapter
->et_cmd_compl
);
5718 pci_save_state(adapter
->pdev
);
5720 INIT_DELAYED_WORK(&adapter
->work
, be_worker
);
5722 adapter
->error_recovery
.recovery_state
= ERR_RECOVERY_ST_NONE
;
5723 adapter
->error_recovery
.resched_delay
= 0;
5724 INIT_DELAYED_WORK(&adapter
->error_recovery
.err_detection_work
,
5725 be_err_detection_task
);
5727 adapter
->rx_fc
= true;
5728 adapter
->tx_fc
= true;
5730 /* Must be a power of 2 or else MODULO will BUG_ON */
5731 adapter
->be_get_temp_freq
= 64;
5733 INIT_LIST_HEAD(&adapter
->vxlan_port_list
);
5737 dma_free_coherent(dev
, rx_filter
->size
, rx_filter
->va
, rx_filter
->dma
);
5739 dma_free_coherent(dev
, mbox_mem_alloc
->size
, mbox_mem_alloc
->va
,
5740 mbox_mem_alloc
->dma
);
5744 static void be_remove(struct pci_dev
*pdev
)
5746 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
5751 be_roce_dev_remove(adapter
);
5752 be_intr_set(adapter
, false);
5754 be_cancel_err_detection(adapter
);
5756 unregister_netdev(adapter
->netdev
);
5760 if (!pci_vfs_assigned(adapter
->pdev
))
5761 be_cmd_reset_function(adapter
);
5763 /* tell fw we're done with firing cmds */
5764 be_cmd_fw_clean(adapter
);
5766 be_unmap_pci_bars(adapter
);
5767 be_drv_cleanup(adapter
);
5769 pci_disable_pcie_error_reporting(pdev
);
5771 pci_release_regions(pdev
);
5772 pci_disable_device(pdev
);
5774 free_netdev(adapter
->netdev
);
5777 static ssize_t
be_hwmon_show_temp(struct device
*dev
,
5778 struct device_attribute
*dev_attr
,
5781 struct be_adapter
*adapter
= dev_get_drvdata(dev
);
5783 /* Unit: millidegree Celsius */
5784 if (adapter
->hwmon_info
.be_on_die_temp
== BE_INVALID_DIE_TEMP
)
5787 return sprintf(buf
, "%u\n",
5788 adapter
->hwmon_info
.be_on_die_temp
* 1000);
5791 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
,
5792 be_hwmon_show_temp
, NULL
, 1);
5794 static struct attribute
*be_hwmon_attrs
[] = {
5795 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
5799 ATTRIBUTE_GROUPS(be_hwmon
);
5801 static char *mc_name(struct be_adapter
*adapter
)
5803 char *str
= ""; /* default */
5805 switch (adapter
->mc_type
) {
5831 static inline char *func_name(struct be_adapter
*adapter
)
5833 return be_physfn(adapter
) ? "PF" : "VF";
5836 static inline char *nic_name(struct pci_dev
*pdev
)
5838 switch (pdev
->device
) {
5845 return OC_NAME_LANCER
;
5856 static int be_probe(struct pci_dev
*pdev
, const struct pci_device_id
*pdev_id
)
5858 struct be_adapter
*adapter
;
5859 struct net_device
*netdev
;
5862 dev_info(&pdev
->dev
, "%s version is %s\n", DRV_NAME
, DRV_VER
);
5864 status
= pci_enable_device(pdev
);
5868 status
= pci_request_regions(pdev
, DRV_NAME
);
5871 pci_set_master(pdev
);
5873 netdev
= alloc_etherdev_mqs(sizeof(*adapter
), MAX_TX_QS
, MAX_RX_QS
);
5878 adapter
= netdev_priv(netdev
);
5879 adapter
->pdev
= pdev
;
5880 pci_set_drvdata(pdev
, adapter
);
5881 adapter
->netdev
= netdev
;
5882 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
5884 status
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
5886 netdev
->features
|= NETIF_F_HIGHDMA
;
5888 status
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
5890 dev_err(&pdev
->dev
, "Could not set PCI DMA Mask\n");
5895 status
= pci_enable_pcie_error_reporting(pdev
);
5897 dev_info(&pdev
->dev
, "PCIe error reporting enabled\n");
5899 status
= be_map_pci_bars(adapter
);
5903 status
= be_drv_init(adapter
);
5907 status
= be_setup(adapter
);
5911 be_netdev_init(netdev
);
5912 status
= register_netdev(netdev
);
5916 be_roce_dev_add(adapter
);
5918 be_schedule_err_detection(adapter
, ERR_DETECTION_DELAY
);
5919 adapter
->error_recovery
.probe_time
= jiffies
;
5921 /* On Die temperature not supported for VF. */
5922 if (be_physfn(adapter
) && IS_ENABLED(CONFIG_BE2NET_HWMON
)) {
5923 adapter
->hwmon_info
.hwmon_dev
=
5924 devm_hwmon_device_register_with_groups(&pdev
->dev
,
5928 adapter
->hwmon_info
.be_on_die_temp
= BE_INVALID_DIE_TEMP
;
5931 dev_info(&pdev
->dev
, "%s: %s %s port %c\n", nic_name(pdev
),
5932 func_name(adapter
), mc_name(adapter
), adapter
->port_name
);
5939 be_drv_cleanup(adapter
);
5941 be_unmap_pci_bars(adapter
);
5943 free_netdev(netdev
);
5945 pci_release_regions(pdev
);
5947 pci_disable_device(pdev
);
5949 dev_err(&pdev
->dev
, "%s initialization failed\n", nic_name(pdev
));
5953 static int be_suspend(struct pci_dev
*pdev
, pm_message_t state
)
5955 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
5957 be_intr_set(adapter
, false);
5958 be_cancel_err_detection(adapter
);
5960 be_cleanup(adapter
);
5962 pci_save_state(pdev
);
5963 pci_disable_device(pdev
);
5964 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
5968 static int be_pci_resume(struct pci_dev
*pdev
)
5970 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
5973 status
= pci_enable_device(pdev
);
5977 pci_restore_state(pdev
);
5979 status
= be_resume(adapter
);
5983 be_schedule_err_detection(adapter
, ERR_DETECTION_DELAY
);
5989 * An FLR will stop BE from DMAing any data.
5991 static void be_shutdown(struct pci_dev
*pdev
)
5993 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
5998 be_roce_dev_shutdown(adapter
);
5999 cancel_delayed_work_sync(&adapter
->work
);
6000 be_cancel_err_detection(adapter
);
6002 netif_device_detach(adapter
->netdev
);
6004 be_cmd_reset_function(adapter
);
6006 pci_disable_device(pdev
);
6009 static pci_ers_result_t
be_eeh_err_detected(struct pci_dev
*pdev
,
6010 pci_channel_state_t state
)
6012 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
6014 dev_err(&adapter
->pdev
->dev
, "EEH error detected\n");
6016 be_roce_dev_remove(adapter
);
6018 if (!be_check_error(adapter
, BE_ERROR_EEH
)) {
6019 be_set_error(adapter
, BE_ERROR_EEH
);
6021 be_cancel_err_detection(adapter
);
6023 be_cleanup(adapter
);
6026 if (state
== pci_channel_io_perm_failure
)
6027 return PCI_ERS_RESULT_DISCONNECT
;
6029 pci_disable_device(pdev
);
6031 /* The error could cause the FW to trigger a flash debug dump.
6032 * Resetting the card while flash dump is in progress
6033 * can cause it not to recover; wait for it to finish.
6034 * Wait only for first function as it is needed only once per
6037 if (pdev
->devfn
== 0)
6040 return PCI_ERS_RESULT_NEED_RESET
;
6043 static pci_ers_result_t
be_eeh_reset(struct pci_dev
*pdev
)
6045 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
6048 dev_info(&adapter
->pdev
->dev
, "EEH reset\n");
6050 status
= pci_enable_device(pdev
);
6052 return PCI_ERS_RESULT_DISCONNECT
;
6054 pci_set_master(pdev
);
6055 pci_restore_state(pdev
);
6057 /* Check if card is ok and fw is ready */
6058 dev_info(&adapter
->pdev
->dev
,
6059 "Waiting for FW to be ready after EEH reset\n");
6060 status
= be_fw_wait_ready(adapter
);
6062 return PCI_ERS_RESULT_DISCONNECT
;
6064 pci_cleanup_aer_uncorrect_error_status(pdev
);
6065 be_clear_error(adapter
, BE_CLEAR_ALL
);
6066 return PCI_ERS_RESULT_RECOVERED
;
6069 static void be_eeh_resume(struct pci_dev
*pdev
)
6072 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
6074 dev_info(&adapter
->pdev
->dev
, "EEH resume\n");
6076 pci_save_state(pdev
);
6078 status
= be_resume(adapter
);
6082 be_roce_dev_add(adapter
);
6084 be_schedule_err_detection(adapter
, ERR_DETECTION_DELAY
);
6087 dev_err(&adapter
->pdev
->dev
, "EEH resume failed\n");
6090 static int be_pci_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
6092 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
6093 struct be_resources vft_res
= {0};
6097 be_vf_clear(adapter
);
6099 adapter
->num_vfs
= num_vfs
;
6101 if (adapter
->num_vfs
== 0 && pci_vfs_assigned(pdev
)) {
6102 dev_warn(&pdev
->dev
,
6103 "Cannot disable VFs while they are assigned\n");
6107 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6108 * are equally distributed across the max-number of VFs. The user may
6109 * request only a subset of the max-vfs to be enabled.
6110 * Based on num_vfs, redistribute the resources across num_vfs so that
6111 * each VF will have access to more number of resources.
6112 * This facility is not available in BE3 FW.
6113 * Also, this is done by FW in Lancer chip.
6115 if (skyhawk_chip(adapter
) && !pci_num_vf(pdev
)) {
6116 be_calculate_vf_res(adapter
, adapter
->num_vfs
,
6118 status
= be_cmd_set_sriov_config(adapter
, adapter
->pool_res
,
6119 adapter
->num_vfs
, &vft_res
);
6122 "Failed to optimize SR-IOV resources\n");
6125 status
= be_get_resources(adapter
);
6127 return be_cmd_status(status
);
6129 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6131 status
= be_update_queues(adapter
);
6134 return be_cmd_status(status
);
6136 if (adapter
->num_vfs
)
6137 status
= be_vf_setup(adapter
);
6140 return adapter
->num_vfs
;
6145 static const struct pci_error_handlers be_eeh_handlers
= {
6146 .error_detected
= be_eeh_err_detected
,
6147 .slot_reset
= be_eeh_reset
,
6148 .resume
= be_eeh_resume
,
6151 static struct pci_driver be_driver
= {
6153 .id_table
= be_dev_ids
,
6155 .remove
= be_remove
,
6156 .suspend
= be_suspend
,
6157 .resume
= be_pci_resume
,
6158 .shutdown
= be_shutdown
,
6159 .sriov_configure
= be_pci_sriov_configure
,
6160 .err_handler
= &be_eeh_handlers
6163 static int __init
be_init_module(void)
6167 if (rx_frag_size
!= 8192 && rx_frag_size
!= 4096 &&
6168 rx_frag_size
!= 2048) {
6169 printk(KERN_WARNING DRV_NAME
6170 " : Module param rx_frag_size must be 2048/4096/8192."
6172 rx_frag_size
= 2048;
6176 pr_info(DRV_NAME
" : Module param num_vfs is obsolete.");
6177 pr_info(DRV_NAME
" : Use sysfs method to enable VFs\n");
6180 be_wq
= create_singlethread_workqueue("be_wq");
6182 pr_warn(DRV_NAME
"workqueue creation failed\n");
6186 be_err_recovery_workq
=
6187 create_singlethread_workqueue("be_err_recover");
6188 if (!be_err_recovery_workq
)
6189 pr_warn(DRV_NAME
"Could not create error recovery workqueue\n");
6191 status
= pci_register_driver(&be_driver
);
6193 destroy_workqueue(be_wq
);
6194 be_destroy_err_recovery_workq();
6198 module_init(be_init_module
);
6200 static void __exit
be_exit_module(void)
6202 pci_unregister_driver(&be_driver
);
6204 be_destroy_err_recovery_workq();
6207 destroy_workqueue(be_wq
);
6209 module_exit(be_exit_module
);