PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / net / ethernet / emulex / benet / be_main.c
blob04ac9c6a0d3972d4e18ee91a8ce3a8bf2e141cd7
1 /*
2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
27 MODULE_VERSION(DRV_VER);
28 MODULE_DEVICE_TABLE(pci, be_dev_ids);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
33 static unsigned int num_vfs;
34 module_param(num_vfs, uint, S_IRUGO);
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
37 static ushort rx_frag_size = 2048;
38 module_param(rx_frag_size, ushort, S_IRUGO);
39 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
50 { 0 }
52 MODULE_DEVICE_TABLE(pci, be_dev_ids);
53 /* UE Status Low CSR */
54 static const char * const ue_status_low_desc[] = {
55 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
88 /* UE Status High CSR */
89 static const char * const ue_status_hi_desc[] = {
90 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
113 "NETC",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
125 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127 struct be_dma_mem *mem = &q->dma_mem;
128 if (mem->va) {
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
131 mem->va = NULL;
135 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
138 struct be_dma_mem *mem = &q->dma_mem;
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
144 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
146 if (!mem->va)
147 return -ENOMEM;
148 return 0;
151 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
153 u32 reg, enabled;
155 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156 &reg);
157 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159 if (!enabled && enable)
160 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
161 else if (enabled && !enable)
162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163 else
164 return;
166 pci_write_config_dword(adapter->pdev,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
170 static void be_intr_set(struct be_adapter *adapter, bool enable)
172 int status = 0;
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter))
176 return;
178 if (adapter->eeh_error)
179 return;
181 status = be_cmd_intr_set(adapter, enable);
182 if (status)
183 be_reg_intr_set(adapter, enable);
186 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
188 u32 val = 0;
189 val |= qid & DB_RQ_RING_ID_MASK;
190 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
192 wmb();
193 iowrite32(val, adapter->db + DB_RQ_OFFSET);
196 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197 u16 posted)
199 u32 val = 0;
200 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
201 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
203 wmb();
204 iowrite32(val, adapter->db + txo->db_offset);
207 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
208 bool arm, bool clear_int, u16 num_popped)
210 u32 val = 0;
211 val |= qid & DB_EQ_RING_ID_MASK;
212 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT);
215 if (adapter->eeh_error)
216 return;
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
227 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
234 if (adapter->eeh_error)
235 return;
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
243 static int be_mac_addr_set(struct net_device *netdev, void *p)
245 struct be_adapter *adapter = netdev_priv(netdev);
246 struct device *dev = &adapter->pdev->dev;
247 struct sockaddr *addr = p;
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
285 if (status)
286 goto err;
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
291 if (!ether_addr_equal(addr->sa_data, mac)) {
292 status = -EPERM;
293 goto err;
296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
297 dev_info(dev, "MAC address changed to %pM\n", mac);
298 return 0;
299 err:
300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
301 return status;
304 /* BE2 supports only v0 cmd */
305 static void *hw_stats_from_cmd(struct be_adapter *adapter)
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
310 return &cmd->hw_stats;
311 } else if (BE3_chip(adapter)) {
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
318 return &cmd->hw_stats;
322 /* BE2 supports only v0 cmd */
323 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
328 return &hw_stats->erx;
329 } else if (BE3_chip(adapter)) {
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
336 return &hw_stats->erx;
340 static void populate_be_v0_stats(struct be_adapter *adapter)
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
345 struct be_port_rxf_stats_v0 *port_stats =
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
376 if (adapter->port_num)
377 drvs->jabber_events = rxf_stats->port1_jabber_events;
378 else
379 drvs->jabber_events = rxf_stats->port0_jabber_events;
380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
389 static void populate_be_v1_stats(struct be_adapter *adapter)
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
394 struct be_port_rxf_stats_v1 *port_stats =
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
435 static void populate_be_v2_stats(struct be_adapter *adapter)
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
479 if (be_roce_supported(adapter)) {
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
489 static void populate_lancer_stats(struct be_adapter *adapter)
492 struct be_drv_stats *drvs = &adapter->drv_stats;
493 struct lancer_pport_stats *pport_stats =
494 pport_stats_from_cmd(adapter);
496 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
500 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
501 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
502 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506 drvs->rx_dropped_tcp_length =
507 pport_stats->rx_dropped_invalid_tcp_length;
508 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511 drvs->rx_dropped_header_too_small =
512 pport_stats->rx_dropped_header_too_small;
513 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
514 drvs->rx_address_filtered =
515 pport_stats->rx_address_filtered +
516 pport_stats->rx_vlan_filtered;
517 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
518 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
519 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
521 drvs->jabber_events = pport_stats->rx_jabbers;
522 drvs->forwarded_packets = pport_stats->num_forwards_lo;
523 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
524 drvs->rx_drops_too_many_frags =
525 pport_stats->rx_drops_too_many_frags_lo;
528 static void accumulate_16bit_val(u32 *acc, u16 val)
530 #define lo(x) (x & 0xFFFF)
531 #define hi(x) (x & 0xFFFF0000)
532 bool wrapped = val < lo(*acc);
533 u32 newacc = hi(*acc) + val;
535 if (wrapped)
536 newacc += 65536;
537 ACCESS_ONCE(*acc) = newacc;
540 static void populate_erx_stats(struct be_adapter *adapter,
541 struct be_rx_obj *rxo,
542 u32 erx_stat)
544 if (!BEx_chip(adapter))
545 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546 else
547 /* below erx HW counter can actually wrap around after
548 * 65535. Driver accumulates a 32-bit value
550 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551 (u16)erx_stat);
554 void be_parse_stats(struct be_adapter *adapter)
556 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
557 struct be_rx_obj *rxo;
558 int i;
559 u32 erx_stat;
561 if (lancer_chip(adapter)) {
562 populate_lancer_stats(adapter);
563 } else {
564 if (BE2_chip(adapter))
565 populate_be_v0_stats(adapter);
566 else if (BE3_chip(adapter))
567 /* for BE3 */
568 populate_be_v1_stats(adapter);
569 else
570 populate_be_v2_stats(adapter);
572 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
573 for_all_rx_queues(adapter, rxo, i) {
574 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575 populate_erx_stats(adapter, rxo, erx_stat);
580 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581 struct rtnl_link_stats64 *stats)
583 struct be_adapter *adapter = netdev_priv(netdev);
584 struct be_drv_stats *drvs = &adapter->drv_stats;
585 struct be_rx_obj *rxo;
586 struct be_tx_obj *txo;
587 u64 pkts, bytes;
588 unsigned int start;
589 int i;
591 for_all_rx_queues(adapter, rxo, i) {
592 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do {
594 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes;
597 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602 rx_stats(rxo)->rx_drops_no_frags;
605 for_all_tx_queues(adapter, txo, i) {
606 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do {
608 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes;
611 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes;
616 /* bad pkts received */
617 stats->rx_errors = drvs->rx_crc_errors +
618 drvs->rx_alignment_symbol_errors +
619 drvs->rx_in_range_errors +
620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long +
622 drvs->rx_dropped_too_small +
623 drvs->rx_dropped_too_short +
624 drvs->rx_dropped_header_too_small +
625 drvs->rx_dropped_tcp_length +
626 drvs->rx_dropped_runt;
628 /* detailed rx errors */
629 stats->rx_length_errors = drvs->rx_in_range_errors +
630 drvs->rx_out_range_errors +
631 drvs->rx_frame_too_long;
633 stats->rx_crc_errors = drvs->rx_crc_errors;
635 /* frame alignment errors */
636 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
638 /* receiver fifo overrun */
639 /* drops_no_pbuf is no per i/f, it's per BE card */
640 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
641 drvs->rx_input_fifo_overflow_drop +
642 drvs->rx_drops_no_pbuf;
643 return stats;
646 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
648 struct net_device *netdev = adapter->netdev;
650 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
651 netif_carrier_off(netdev);
652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
655 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656 netif_carrier_on(netdev);
657 else
658 netif_carrier_off(netdev);
661 static void be_tx_stats_update(struct be_tx_obj *txo,
662 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
664 struct be_tx_stats *stats = tx_stats(txo);
666 u64_stats_update_begin(&stats->sync);
667 stats->tx_reqs++;
668 stats->tx_wrbs += wrb_cnt;
669 stats->tx_bytes += copied;
670 stats->tx_pkts += (gso_segs ? gso_segs : 1);
671 if (stopped)
672 stats->tx_stops++;
673 u64_stats_update_end(&stats->sync);
676 /* Determine number of WRB entries needed to xmit data in an skb */
677 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678 bool *dummy)
680 int cnt = (skb->len > skb->data_len);
682 cnt += skb_shinfo(skb)->nr_frags;
684 /* to account for hdr wrb */
685 cnt++;
686 if (lancer_chip(adapter) || !(cnt & 1)) {
687 *dummy = false;
688 } else {
689 /* add a dummy to make it an even num */
690 cnt++;
691 *dummy = true;
693 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694 return cnt;
697 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
699 wrb->frag_pa_hi = upper_32_bits(addr);
700 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
702 wrb->rsvd0 = 0;
705 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706 struct sk_buff *skb)
708 u8 vlan_prio;
709 u16 vlan_tag;
711 vlan_tag = vlan_tx_tag_get(skb);
712 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713 /* If vlan priority provided by OS is NOT in available bmap */
714 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716 adapter->recommended_prio;
718 return vlan_tag;
721 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
722 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
724 u16 vlan_tag;
726 memset(hdr, 0, sizeof(*hdr));
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
730 if (skb_is_gso(skb)) {
731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733 hdr, skb_shinfo(skb)->gso_size);
734 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
735 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
736 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737 if (is_tcp_pkt(skb))
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739 else if (is_udp_pkt(skb))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
743 if (vlan_tx_tag_present(skb)) {
744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
749 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
756 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
757 bool unmap_single)
759 dma_addr_t dma;
761 be_dws_le_to_cpu(wrb, sizeof(*wrb));
763 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
764 if (wrb->frag_len) {
765 if (unmap_single)
766 dma_unmap_single(dev, dma, wrb->frag_len,
767 DMA_TO_DEVICE);
768 else
769 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
773 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
774 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775 bool skip_hw_vlan)
777 dma_addr_t busaddr;
778 int i, copied = 0;
779 struct device *dev = &adapter->pdev->dev;
780 struct sk_buff *first_skb = skb;
781 struct be_eth_wrb *wrb;
782 struct be_eth_hdr_wrb *hdr;
783 bool map_single = false;
784 u16 map_head;
786 hdr = queue_head_node(txq);
787 queue_head_inc(txq);
788 map_head = txq->head;
790 if (skb->len > skb->data_len) {
791 int len = skb_headlen(skb);
792 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793 if (dma_mapping_error(dev, busaddr))
794 goto dma_err;
795 map_single = true;
796 wrb = queue_head_node(txq);
797 wrb_fill(wrb, busaddr, len);
798 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799 queue_head_inc(txq);
800 copied += len;
803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
804 const struct skb_frag_struct *frag =
805 &skb_shinfo(skb)->frags[i];
806 busaddr = skb_frag_dma_map(dev, frag, 0,
807 skb_frag_size(frag), DMA_TO_DEVICE);
808 if (dma_mapping_error(dev, busaddr))
809 goto dma_err;
810 wrb = queue_head_node(txq);
811 wrb_fill(wrb, busaddr, skb_frag_size(frag));
812 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813 queue_head_inc(txq);
814 copied += skb_frag_size(frag);
817 if (dummy_wrb) {
818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, 0, 0);
820 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821 queue_head_inc(txq);
824 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
825 be_dws_cpu_to_le(hdr, sizeof(*hdr));
827 return copied;
828 dma_err:
829 txq->head = map_head;
830 while (copied) {
831 wrb = queue_head_node(txq);
832 unmap_tx_frag(dev, wrb, map_single);
833 map_single = false;
834 copied -= wrb->frag_len;
835 queue_head_inc(txq);
837 return 0;
840 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
841 struct sk_buff *skb,
842 bool *skip_hw_vlan)
844 u16 vlan_tag = 0;
846 skb = skb_share_check(skb, GFP_ATOMIC);
847 if (unlikely(!skb))
848 return skb;
850 if (vlan_tx_tag_present(skb))
851 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
853 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854 if (!vlan_tag)
855 vlan_tag = adapter->pvid;
856 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857 * skip VLAN insertion
859 if (skip_hw_vlan)
860 *skip_hw_vlan = true;
863 if (vlan_tag) {
864 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
865 if (unlikely(!skb))
866 return skb;
867 skb->vlan_tci = 0;
870 /* Insert the outer VLAN, if any */
871 if (adapter->qnq_vid) {
872 vlan_tag = adapter->qnq_vid;
873 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
874 if (unlikely(!skb))
875 return skb;
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
880 return skb;
883 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
885 struct ethhdr *eh = (struct ethhdr *)skb->data;
886 u16 offset = ETH_HLEN;
888 if (eh->h_proto == htons(ETH_P_IPV6)) {
889 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
891 offset += sizeof(struct ipv6hdr);
892 if (ip6h->nexthdr != NEXTHDR_TCP &&
893 ip6h->nexthdr != NEXTHDR_UDP) {
894 struct ipv6_opt_hdr *ehdr =
895 (struct ipv6_opt_hdr *) (skb->data + offset);
897 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898 if (ehdr->hdrlen == 0xff)
899 return true;
902 return false;
905 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
907 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
910 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911 struct sk_buff *skb)
913 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
916 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
917 struct sk_buff *skb,
918 bool *skip_hw_vlan)
920 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
921 unsigned int eth_hdr_len;
922 struct iphdr *ip;
924 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
925 * may cause a transmit stall on that port. So the work-around is to
926 * pad short packets (<= 32 bytes) to a 36-byte length.
928 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
929 if (skb_padto(skb, 36))
930 goto tx_drop;
931 skb->len = 36;
934 /* For padded packets, BE HW modifies tot_len field in IP header
935 * incorrecly when VLAN tag is inserted by HW.
936 * For padded packets, Lancer computes incorrect checksum.
938 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
939 VLAN_ETH_HLEN : ETH_HLEN;
940 if (skb->len <= 60 &&
941 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
942 is_ipv4_pkt(skb)) {
943 ip = (struct iphdr *)ip_hdr(skb);
944 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
947 /* If vlan tag is already inlined in the packet, skip HW VLAN
948 * tagging in UMC mode
950 if ((adapter->function_mode & UMC_ENABLED) &&
951 veh->h_vlan_proto == htons(ETH_P_8021Q))
952 *skip_hw_vlan = true;
954 /* HW has a bug wherein it will calculate CSUM for VLAN
955 * pkts even though it is disabled.
956 * Manually insert VLAN in pkt.
958 if (skb->ip_summed != CHECKSUM_PARTIAL &&
959 vlan_tx_tag_present(skb)) {
960 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
961 if (unlikely(!skb))
962 goto tx_drop;
965 /* HW may lockup when VLAN HW tagging is requested on
966 * certain ipv6 packets. Drop such pkts if the HW workaround to
967 * skip HW tagging is not enabled by FW.
969 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
970 (adapter->pvid || adapter->qnq_vid) &&
971 !qnq_async_evt_rcvd(adapter)))
972 goto tx_drop;
974 /* Manual VLAN tag insertion to prevent:
975 * ASIC lockup when the ASIC inserts VLAN tag into
976 * certain ipv6 packets. Insert VLAN tags in driver,
977 * and set event, completion, vlan bits accordingly
978 * in the Tx WRB.
980 if (be_ipv6_tx_stall_chk(adapter, skb) &&
981 be_vlan_tag_tx_chk(adapter, skb)) {
982 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
983 if (unlikely(!skb))
984 goto tx_drop;
987 return skb;
988 tx_drop:
989 dev_kfree_skb_any(skb);
990 return NULL;
993 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
995 struct be_adapter *adapter = netdev_priv(netdev);
996 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
997 struct be_queue_info *txq = &txo->q;
998 bool dummy_wrb, stopped = false;
999 u32 wrb_cnt = 0, copied = 0;
1000 bool skip_hw_vlan = false;
1001 u32 start = txq->head;
1003 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1004 if (!skb) {
1005 tx_stats(txo)->tx_drv_drops++;
1006 return NETDEV_TX_OK;
1009 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1011 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1012 skip_hw_vlan);
1013 if (copied) {
1014 int gso_segs = skb_shinfo(skb)->gso_segs;
1016 /* record the sent skb in the sent_skb table */
1017 BUG_ON(txo->sent_skb_list[start]);
1018 txo->sent_skb_list[start] = skb;
1020 /* Ensure txq has space for the next skb; Else stop the queue
1021 * *BEFORE* ringing the tx doorbell, so that we serialze the
1022 * tx compls of the current transmit which'll wake up the queue
1024 atomic_add(wrb_cnt, &txq->used);
1025 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1026 txq->len) {
1027 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1028 stopped = true;
1031 be_txq_notify(adapter, txo, wrb_cnt);
1033 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1034 } else {
1035 txq->head = start;
1036 tx_stats(txo)->tx_drv_drops++;
1037 dev_kfree_skb_any(skb);
1039 return NETDEV_TX_OK;
1042 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1044 struct be_adapter *adapter = netdev_priv(netdev);
1045 if (new_mtu < BE_MIN_MTU ||
1046 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1047 (ETH_HLEN + ETH_FCS_LEN))) {
1048 dev_info(&adapter->pdev->dev,
1049 "MTU must be between %d and %d bytes\n",
1050 BE_MIN_MTU,
1051 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1052 return -EINVAL;
1054 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1055 netdev->mtu, new_mtu);
1056 netdev->mtu = new_mtu;
1057 return 0;
1061 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1062 * If the user configures more, place BE in vlan promiscuous mode.
1064 static int be_vid_config(struct be_adapter *adapter)
1066 u16 vids[BE_NUM_VLANS_SUPPORTED];
1067 u16 num = 0, i;
1068 int status = 0;
1070 /* No need to further configure vids if in promiscuous mode */
1071 if (adapter->promiscuous)
1072 return 0;
1074 if (adapter->vlans_added > be_max_vlans(adapter))
1075 goto set_vlan_promisc;
1077 /* Construct VLAN Table to give to HW */
1078 for (i = 0; i < VLAN_N_VID; i++)
1079 if (adapter->vlan_tag[i])
1080 vids[num++] = cpu_to_le16(i);
1082 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1083 vids, num, 0);
1085 if (status) {
1086 /* Set to VLAN promisc mode as setting VLAN filter failed */
1087 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1088 goto set_vlan_promisc;
1089 dev_err(&adapter->pdev->dev,
1090 "Setting HW VLAN filtering failed.\n");
1091 } else {
1092 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1093 /* hw VLAN filtering re-enabled. */
1094 status = be_cmd_rx_filter(adapter,
1095 BE_FLAGS_VLAN_PROMISC, OFF);
1096 if (!status) {
1097 dev_info(&adapter->pdev->dev,
1098 "Disabling VLAN Promiscuous mode.\n");
1099 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1104 return status;
1106 set_vlan_promisc:
1107 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1108 return 0;
1110 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1111 if (!status) {
1112 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1113 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1114 } else
1115 dev_err(&adapter->pdev->dev,
1116 "Failed to enable VLAN Promiscuous mode.\n");
1117 return status;
1120 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1122 struct be_adapter *adapter = netdev_priv(netdev);
1123 int status = 0;
1125 /* Packets with VID 0 are always received by Lancer by default */
1126 if (lancer_chip(adapter) && vid == 0)
1127 goto ret;
1129 adapter->vlan_tag[vid] = 1;
1130 adapter->vlans_added++;
1132 status = be_vid_config(adapter);
1133 if (status) {
1134 adapter->vlans_added--;
1135 adapter->vlan_tag[vid] = 0;
1137 ret:
1138 return status;
1141 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1143 struct be_adapter *adapter = netdev_priv(netdev);
1144 int status = 0;
1146 /* Packets with VID 0 are always received by Lancer by default */
1147 if (lancer_chip(adapter) && vid == 0)
1148 goto ret;
1150 adapter->vlan_tag[vid] = 0;
1151 status = be_vid_config(adapter);
1152 if (!status)
1153 adapter->vlans_added--;
1154 else
1155 adapter->vlan_tag[vid] = 1;
1156 ret:
1157 return status;
1160 static void be_set_rx_mode(struct net_device *netdev)
1162 struct be_adapter *adapter = netdev_priv(netdev);
1163 int status;
1165 if (netdev->flags & IFF_PROMISC) {
1166 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1167 adapter->promiscuous = true;
1168 goto done;
1171 /* BE was previously in promiscuous mode; disable it */
1172 if (adapter->promiscuous) {
1173 adapter->promiscuous = false;
1174 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1176 if (adapter->vlans_added)
1177 be_vid_config(adapter);
1180 /* Enable multicast promisc if num configured exceeds what we support */
1181 if (netdev->flags & IFF_ALLMULTI ||
1182 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1183 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1184 goto done;
1187 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1188 struct netdev_hw_addr *ha;
1189 int i = 1; /* First slot is claimed by the Primary MAC */
1191 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1192 be_cmd_pmac_del(adapter, adapter->if_handle,
1193 adapter->pmac_id[i], 0);
1196 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1197 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1198 adapter->promiscuous = true;
1199 goto done;
1202 netdev_for_each_uc_addr(ha, adapter->netdev) {
1203 adapter->uc_macs++; /* First slot is for Primary MAC */
1204 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1205 adapter->if_handle,
1206 &adapter->pmac_id[adapter->uc_macs], 0);
1210 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1212 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1213 if (status) {
1214 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1215 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1216 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1218 done:
1219 return;
1222 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1224 struct be_adapter *adapter = netdev_priv(netdev);
1225 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1226 int status;
1228 if (!sriov_enabled(adapter))
1229 return -EPERM;
1231 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1232 return -EINVAL;
1234 if (BEx_chip(adapter)) {
1235 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1236 vf + 1);
1238 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1239 &vf_cfg->pmac_id, vf + 1);
1240 } else {
1241 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1242 vf + 1);
1245 if (status)
1246 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1247 mac, vf);
1248 else
1249 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1251 return status;
1254 static int be_get_vf_config(struct net_device *netdev, int vf,
1255 struct ifla_vf_info *vi)
1257 struct be_adapter *adapter = netdev_priv(netdev);
1258 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1260 if (!sriov_enabled(adapter))
1261 return -EPERM;
1263 if (vf >= adapter->num_vfs)
1264 return -EINVAL;
1266 vi->vf = vf;
1267 vi->tx_rate = vf_cfg->tx_rate;
1268 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1269 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1270 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1272 return 0;
1275 static int be_set_vf_vlan(struct net_device *netdev,
1276 int vf, u16 vlan, u8 qos)
1278 struct be_adapter *adapter = netdev_priv(netdev);
1279 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1280 int status = 0;
1282 if (!sriov_enabled(adapter))
1283 return -EPERM;
1285 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1286 return -EINVAL;
1288 if (vlan || qos) {
1289 vlan |= qos << VLAN_PRIO_SHIFT;
1290 if (vf_cfg->vlan_tag != vlan) {
1291 /* If this is new value, program it. Else skip. */
1292 vf_cfg->vlan_tag = vlan;
1293 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1294 vf_cfg->if_handle, 0);
1296 } else {
1297 /* Reset Transparent Vlan Tagging. */
1298 vf_cfg->vlan_tag = 0;
1299 vlan = vf_cfg->def_vid;
1300 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1301 vf_cfg->if_handle, 0);
1305 if (status)
1306 dev_info(&adapter->pdev->dev,
1307 "VLAN %d config on VF %d failed\n", vlan, vf);
1308 return status;
1311 static int be_set_vf_tx_rate(struct net_device *netdev,
1312 int vf, int rate)
1314 struct be_adapter *adapter = netdev_priv(netdev);
1315 int status = 0;
1317 if (!sriov_enabled(adapter))
1318 return -EPERM;
1320 if (vf >= adapter->num_vfs)
1321 return -EINVAL;
1323 if (rate < 100 || rate > 10000) {
1324 dev_err(&adapter->pdev->dev,
1325 "tx rate must be between 100 and 10000 Mbps\n");
1326 return -EINVAL;
1329 if (lancer_chip(adapter))
1330 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1331 else
1332 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1334 if (status)
1335 dev_err(&adapter->pdev->dev,
1336 "tx rate %d on VF %d failed\n", rate, vf);
1337 else
1338 adapter->vf_cfg[vf].tx_rate = rate;
1339 return status;
1342 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1343 ulong now)
1345 aic->rx_pkts_prev = rx_pkts;
1346 aic->tx_reqs_prev = tx_pkts;
1347 aic->jiffies = now;
1350 static void be_eqd_update(struct be_adapter *adapter)
1352 struct be_set_eqd set_eqd[MAX_EVT_QS];
1353 int eqd, i, num = 0, start;
1354 struct be_aic_obj *aic;
1355 struct be_eq_obj *eqo;
1356 struct be_rx_obj *rxo;
1357 struct be_tx_obj *txo;
1358 u64 rx_pkts, tx_pkts;
1359 ulong now;
1360 u32 pps, delta;
1362 for_all_evt_queues(adapter, eqo, i) {
1363 aic = &adapter->aic_obj[eqo->idx];
1364 if (!aic->enable) {
1365 if (aic->jiffies)
1366 aic->jiffies = 0;
1367 eqd = aic->et_eqd;
1368 goto modify_eqd;
1371 rxo = &adapter->rx_obj[eqo->idx];
1372 do {
1373 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1374 rx_pkts = rxo->stats.rx_pkts;
1375 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
1377 txo = &adapter->tx_obj[eqo->idx];
1378 do {
1379 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1380 tx_pkts = txo->stats.tx_reqs;
1381 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
1384 /* Skip, if wrapped around or first calculation */
1385 now = jiffies;
1386 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1387 rx_pkts < aic->rx_pkts_prev ||
1388 tx_pkts < aic->tx_reqs_prev) {
1389 be_aic_update(aic, rx_pkts, tx_pkts, now);
1390 continue;
1393 delta = jiffies_to_msecs(now - aic->jiffies);
1394 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1395 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1396 eqd = (pps / 15000) << 2;
1398 if (eqd < 8)
1399 eqd = 0;
1400 eqd = min_t(u32, eqd, aic->max_eqd);
1401 eqd = max_t(u32, eqd, aic->min_eqd);
1403 be_aic_update(aic, rx_pkts, tx_pkts, now);
1404 modify_eqd:
1405 if (eqd != aic->prev_eqd) {
1406 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1407 set_eqd[num].eq_id = eqo->q.id;
1408 aic->prev_eqd = eqd;
1409 num++;
1413 if (num)
1414 be_cmd_modify_eqd(adapter, set_eqd, num);
1417 static void be_rx_stats_update(struct be_rx_obj *rxo,
1418 struct be_rx_compl_info *rxcp)
1420 struct be_rx_stats *stats = rx_stats(rxo);
1422 u64_stats_update_begin(&stats->sync);
1423 stats->rx_compl++;
1424 stats->rx_bytes += rxcp->pkt_size;
1425 stats->rx_pkts++;
1426 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1427 stats->rx_mcast_pkts++;
1428 if (rxcp->err)
1429 stats->rx_compl_err++;
1430 u64_stats_update_end(&stats->sync);
1433 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1435 /* L4 checksum is not reliable for non TCP/UDP packets.
1436 * Also ignore ipcksm for ipv6 pkts */
1437 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1438 (rxcp->ip_csum || rxcp->ipv6);
1441 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1443 struct be_adapter *adapter = rxo->adapter;
1444 struct be_rx_page_info *rx_page_info;
1445 struct be_queue_info *rxq = &rxo->q;
1446 u16 frag_idx = rxq->tail;
1448 rx_page_info = &rxo->page_info_tbl[frag_idx];
1449 BUG_ON(!rx_page_info->page);
1451 if (rx_page_info->last_page_user) {
1452 dma_unmap_page(&adapter->pdev->dev,
1453 dma_unmap_addr(rx_page_info, bus),
1454 adapter->big_page_size, DMA_FROM_DEVICE);
1455 rx_page_info->last_page_user = false;
1458 queue_tail_inc(rxq);
1459 atomic_dec(&rxq->used);
1460 return rx_page_info;
1463 /* Throwaway the data in the Rx completion */
1464 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1465 struct be_rx_compl_info *rxcp)
1467 struct be_rx_page_info *page_info;
1468 u16 i, num_rcvd = rxcp->num_rcvd;
1470 for (i = 0; i < num_rcvd; i++) {
1471 page_info = get_rx_page_info(rxo);
1472 put_page(page_info->page);
1473 memset(page_info, 0, sizeof(*page_info));
1478 * skb_fill_rx_data forms a complete skb for an ether frame
1479 * indicated by rxcp.
1481 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1482 struct be_rx_compl_info *rxcp)
1484 struct be_rx_page_info *page_info;
1485 u16 i, j;
1486 u16 hdr_len, curr_frag_len, remaining;
1487 u8 *start;
1489 page_info = get_rx_page_info(rxo);
1490 start = page_address(page_info->page) + page_info->page_offset;
1491 prefetch(start);
1493 /* Copy data in the first descriptor of this completion */
1494 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1496 skb->len = curr_frag_len;
1497 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1498 memcpy(skb->data, start, curr_frag_len);
1499 /* Complete packet has now been moved to data */
1500 put_page(page_info->page);
1501 skb->data_len = 0;
1502 skb->tail += curr_frag_len;
1503 } else {
1504 hdr_len = ETH_HLEN;
1505 memcpy(skb->data, start, hdr_len);
1506 skb_shinfo(skb)->nr_frags = 1;
1507 skb_frag_set_page(skb, 0, page_info->page);
1508 skb_shinfo(skb)->frags[0].page_offset =
1509 page_info->page_offset + hdr_len;
1510 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1511 skb->data_len = curr_frag_len - hdr_len;
1512 skb->truesize += rx_frag_size;
1513 skb->tail += hdr_len;
1515 page_info->page = NULL;
1517 if (rxcp->pkt_size <= rx_frag_size) {
1518 BUG_ON(rxcp->num_rcvd != 1);
1519 return;
1522 /* More frags present for this completion */
1523 remaining = rxcp->pkt_size - curr_frag_len;
1524 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1525 page_info = get_rx_page_info(rxo);
1526 curr_frag_len = min(remaining, rx_frag_size);
1528 /* Coalesce all frags from the same physical page in one slot */
1529 if (page_info->page_offset == 0) {
1530 /* Fresh page */
1531 j++;
1532 skb_frag_set_page(skb, j, page_info->page);
1533 skb_shinfo(skb)->frags[j].page_offset =
1534 page_info->page_offset;
1535 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1536 skb_shinfo(skb)->nr_frags++;
1537 } else {
1538 put_page(page_info->page);
1541 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1542 skb->len += curr_frag_len;
1543 skb->data_len += curr_frag_len;
1544 skb->truesize += rx_frag_size;
1545 remaining -= curr_frag_len;
1546 page_info->page = NULL;
1548 BUG_ON(j > MAX_SKB_FRAGS);
1551 /* Process the RX completion indicated by rxcp when GRO is disabled */
1552 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1553 struct be_rx_compl_info *rxcp)
1555 struct be_adapter *adapter = rxo->adapter;
1556 struct net_device *netdev = adapter->netdev;
1557 struct sk_buff *skb;
1559 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1560 if (unlikely(!skb)) {
1561 rx_stats(rxo)->rx_drops_no_skbs++;
1562 be_rx_compl_discard(rxo, rxcp);
1563 return;
1566 skb_fill_rx_data(rxo, skb, rxcp);
1568 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1569 skb->ip_summed = CHECKSUM_UNNECESSARY;
1570 else
1571 skb_checksum_none_assert(skb);
1573 skb->protocol = eth_type_trans(skb, netdev);
1574 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1575 if (netdev->features & NETIF_F_RXHASH)
1576 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1577 skb_mark_napi_id(skb, napi);
1579 if (rxcp->vlanf)
1580 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1582 netif_receive_skb(skb);
1585 /* Process the RX completion indicated by rxcp when GRO is enabled */
1586 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1587 struct napi_struct *napi,
1588 struct be_rx_compl_info *rxcp)
1590 struct be_adapter *adapter = rxo->adapter;
1591 struct be_rx_page_info *page_info;
1592 struct sk_buff *skb = NULL;
1593 u16 remaining, curr_frag_len;
1594 u16 i, j;
1596 skb = napi_get_frags(napi);
1597 if (!skb) {
1598 be_rx_compl_discard(rxo, rxcp);
1599 return;
1602 remaining = rxcp->pkt_size;
1603 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1604 page_info = get_rx_page_info(rxo);
1606 curr_frag_len = min(remaining, rx_frag_size);
1608 /* Coalesce all frags from the same physical page in one slot */
1609 if (i == 0 || page_info->page_offset == 0) {
1610 /* First frag or Fresh page */
1611 j++;
1612 skb_frag_set_page(skb, j, page_info->page);
1613 skb_shinfo(skb)->frags[j].page_offset =
1614 page_info->page_offset;
1615 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1616 } else {
1617 put_page(page_info->page);
1619 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1620 skb->truesize += rx_frag_size;
1621 remaining -= curr_frag_len;
1622 memset(page_info, 0, sizeof(*page_info));
1624 BUG_ON(j > MAX_SKB_FRAGS);
1626 skb_shinfo(skb)->nr_frags = j + 1;
1627 skb->len = rxcp->pkt_size;
1628 skb->data_len = rxcp->pkt_size;
1629 skb->ip_summed = CHECKSUM_UNNECESSARY;
1630 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1631 if (adapter->netdev->features & NETIF_F_RXHASH)
1632 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1633 skb_mark_napi_id(skb, napi);
1635 if (rxcp->vlanf)
1636 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1638 napi_gro_frags(napi);
1641 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1642 struct be_rx_compl_info *rxcp)
1644 rxcp->pkt_size =
1645 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1646 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1647 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1648 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1649 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1650 rxcp->ip_csum =
1651 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1652 rxcp->l4_csum =
1653 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1654 rxcp->ipv6 =
1655 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1656 rxcp->num_rcvd =
1657 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1658 rxcp->pkt_type =
1659 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1660 rxcp->rss_hash =
1661 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1662 if (rxcp->vlanf) {
1663 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1664 compl);
1665 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1666 compl);
1668 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1671 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1672 struct be_rx_compl_info *rxcp)
1674 rxcp->pkt_size =
1675 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1676 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1677 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1678 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1679 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1680 rxcp->ip_csum =
1681 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1682 rxcp->l4_csum =
1683 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1684 rxcp->ipv6 =
1685 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1686 rxcp->num_rcvd =
1687 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1688 rxcp->pkt_type =
1689 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1690 rxcp->rss_hash =
1691 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1692 if (rxcp->vlanf) {
1693 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1694 compl);
1695 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1696 compl);
1698 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1699 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1700 ip_frag, compl);
1703 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1705 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1706 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1707 struct be_adapter *adapter = rxo->adapter;
1709 /* For checking the valid bit it is Ok to use either definition as the
1710 * valid bit is at the same position in both v0 and v1 Rx compl */
1711 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1712 return NULL;
1714 rmb();
1715 be_dws_le_to_cpu(compl, sizeof(*compl));
1717 if (adapter->be3_native)
1718 be_parse_rx_compl_v1(compl, rxcp);
1719 else
1720 be_parse_rx_compl_v0(compl, rxcp);
1722 if (rxcp->ip_frag)
1723 rxcp->l4_csum = 0;
1725 if (rxcp->vlanf) {
1726 /* vlanf could be wrongly set in some cards.
1727 * ignore if vtm is not set */
1728 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1729 rxcp->vlanf = 0;
1731 if (!lancer_chip(adapter))
1732 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1734 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1735 !adapter->vlan_tag[rxcp->vlan_tag])
1736 rxcp->vlanf = 0;
1739 /* As the compl has been parsed, reset it; we wont touch it again */
1740 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1742 queue_tail_inc(&rxo->cq);
1743 return rxcp;
1746 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1748 u32 order = get_order(size);
1750 if (order > 0)
1751 gfp |= __GFP_COMP;
1752 return alloc_pages(gfp, order);
1756 * Allocate a page, split it to fragments of size rx_frag_size and post as
1757 * receive buffers to BE
1759 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1761 struct be_adapter *adapter = rxo->adapter;
1762 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1763 struct be_queue_info *rxq = &rxo->q;
1764 struct page *pagep = NULL;
1765 struct device *dev = &adapter->pdev->dev;
1766 struct be_eth_rx_d *rxd;
1767 u64 page_dmaaddr = 0, frag_dmaaddr;
1768 u32 posted, page_offset = 0;
1770 page_info = &rxo->page_info_tbl[rxq->head];
1771 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1772 if (!pagep) {
1773 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1774 if (unlikely(!pagep)) {
1775 rx_stats(rxo)->rx_post_fail++;
1776 break;
1778 page_dmaaddr = dma_map_page(dev, pagep, 0,
1779 adapter->big_page_size,
1780 DMA_FROM_DEVICE);
1781 if (dma_mapping_error(dev, page_dmaaddr)) {
1782 put_page(pagep);
1783 pagep = NULL;
1784 rx_stats(rxo)->rx_post_fail++;
1785 break;
1787 page_info->page_offset = 0;
1788 } else {
1789 get_page(pagep);
1790 page_info->page_offset = page_offset + rx_frag_size;
1792 page_offset = page_info->page_offset;
1793 page_info->page = pagep;
1794 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1795 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1797 rxd = queue_head_node(rxq);
1798 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1799 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1801 /* Any space left in the current big page for another frag? */
1802 if ((page_offset + rx_frag_size + rx_frag_size) >
1803 adapter->big_page_size) {
1804 pagep = NULL;
1805 page_info->last_page_user = true;
1808 prev_page_info = page_info;
1809 queue_head_inc(rxq);
1810 page_info = &rxo->page_info_tbl[rxq->head];
1812 if (pagep)
1813 prev_page_info->last_page_user = true;
1815 if (posted) {
1816 atomic_add(posted, &rxq->used);
1817 if (rxo->rx_post_starved)
1818 rxo->rx_post_starved = false;
1819 be_rxq_notify(adapter, rxq->id, posted);
1820 } else if (atomic_read(&rxq->used) == 0) {
1821 /* Let be_worker replenish when memory is available */
1822 rxo->rx_post_starved = true;
1826 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1828 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1830 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1831 return NULL;
1833 rmb();
1834 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1836 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1838 queue_tail_inc(tx_cq);
1839 return txcp;
1842 static u16 be_tx_compl_process(struct be_adapter *adapter,
1843 struct be_tx_obj *txo, u16 last_index)
1845 struct be_queue_info *txq = &txo->q;
1846 struct be_eth_wrb *wrb;
1847 struct sk_buff **sent_skbs = txo->sent_skb_list;
1848 struct sk_buff *sent_skb;
1849 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1850 bool unmap_skb_hdr = true;
1852 sent_skb = sent_skbs[txq->tail];
1853 BUG_ON(!sent_skb);
1854 sent_skbs[txq->tail] = NULL;
1856 /* skip header wrb */
1857 queue_tail_inc(txq);
1859 do {
1860 cur_index = txq->tail;
1861 wrb = queue_tail_node(txq);
1862 unmap_tx_frag(&adapter->pdev->dev, wrb,
1863 (unmap_skb_hdr && skb_headlen(sent_skb)));
1864 unmap_skb_hdr = false;
1866 num_wrbs++;
1867 queue_tail_inc(txq);
1868 } while (cur_index != last_index);
1870 kfree_skb(sent_skb);
1871 return num_wrbs;
1874 /* Return the number of events in the event queue */
1875 static inline int events_get(struct be_eq_obj *eqo)
1877 struct be_eq_entry *eqe;
1878 int num = 0;
1880 do {
1881 eqe = queue_tail_node(&eqo->q);
1882 if (eqe->evt == 0)
1883 break;
1885 rmb();
1886 eqe->evt = 0;
1887 num++;
1888 queue_tail_inc(&eqo->q);
1889 } while (true);
1891 return num;
1894 /* Leaves the EQ is disarmed state */
1895 static void be_eq_clean(struct be_eq_obj *eqo)
1897 int num = events_get(eqo);
1899 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1902 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1904 struct be_rx_page_info *page_info;
1905 struct be_queue_info *rxq = &rxo->q;
1906 struct be_queue_info *rx_cq = &rxo->cq;
1907 struct be_rx_compl_info *rxcp;
1908 struct be_adapter *adapter = rxo->adapter;
1909 int flush_wait = 0;
1911 /* Consume pending rx completions.
1912 * Wait for the flush completion (identified by zero num_rcvd)
1913 * to arrive. Notify CQ even when there are no more CQ entries
1914 * for HW to flush partially coalesced CQ entries.
1915 * In Lancer, there is no need to wait for flush compl.
1917 for (;;) {
1918 rxcp = be_rx_compl_get(rxo);
1919 if (rxcp == NULL) {
1920 if (lancer_chip(adapter))
1921 break;
1923 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1924 dev_warn(&adapter->pdev->dev,
1925 "did not receive flush compl\n");
1926 break;
1928 be_cq_notify(adapter, rx_cq->id, true, 0);
1929 mdelay(1);
1930 } else {
1931 be_rx_compl_discard(rxo, rxcp);
1932 be_cq_notify(adapter, rx_cq->id, false, 1);
1933 if (rxcp->num_rcvd == 0)
1934 break;
1938 /* After cleanup, leave the CQ in unarmed state */
1939 be_cq_notify(adapter, rx_cq->id, false, 0);
1941 /* Then free posted rx buffers that were not used */
1942 while (atomic_read(&rxq->used) > 0) {
1943 page_info = get_rx_page_info(rxo);
1944 put_page(page_info->page);
1945 memset(page_info, 0, sizeof(*page_info));
1947 BUG_ON(atomic_read(&rxq->used));
1948 rxq->tail = rxq->head = 0;
1951 static void be_tx_compl_clean(struct be_adapter *adapter)
1953 struct be_tx_obj *txo;
1954 struct be_queue_info *txq;
1955 struct be_eth_tx_compl *txcp;
1956 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1957 struct sk_buff *sent_skb;
1958 bool dummy_wrb;
1959 int i, pending_txqs;
1961 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1962 do {
1963 pending_txqs = adapter->num_tx_qs;
1965 for_all_tx_queues(adapter, txo, i) {
1966 txq = &txo->q;
1967 while ((txcp = be_tx_compl_get(&txo->cq))) {
1968 end_idx =
1969 AMAP_GET_BITS(struct amap_eth_tx_compl,
1970 wrb_index, txcp);
1971 num_wrbs += be_tx_compl_process(adapter, txo,
1972 end_idx);
1973 cmpl++;
1975 if (cmpl) {
1976 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1977 atomic_sub(num_wrbs, &txq->used);
1978 cmpl = 0;
1979 num_wrbs = 0;
1981 if (atomic_read(&txq->used) == 0)
1982 pending_txqs--;
1985 if (pending_txqs == 0 || ++timeo > 200)
1986 break;
1988 mdelay(1);
1989 } while (true);
1991 for_all_tx_queues(adapter, txo, i) {
1992 txq = &txo->q;
1993 if (atomic_read(&txq->used))
1994 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1995 atomic_read(&txq->used));
1997 /* free posted tx for which compls will never arrive */
1998 while (atomic_read(&txq->used)) {
1999 sent_skb = txo->sent_skb_list[txq->tail];
2000 end_idx = txq->tail;
2001 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2002 &dummy_wrb);
2003 index_adv(&end_idx, num_wrbs - 1, txq->len);
2004 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2005 atomic_sub(num_wrbs, &txq->used);
2010 static void be_evt_queues_destroy(struct be_adapter *adapter)
2012 struct be_eq_obj *eqo;
2013 int i;
2015 for_all_evt_queues(adapter, eqo, i) {
2016 if (eqo->q.created) {
2017 be_eq_clean(eqo);
2018 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2019 napi_hash_del(&eqo->napi);
2020 netif_napi_del(&eqo->napi);
2022 be_queue_free(adapter, &eqo->q);
2026 static int be_evt_queues_create(struct be_adapter *adapter)
2028 struct be_queue_info *eq;
2029 struct be_eq_obj *eqo;
2030 struct be_aic_obj *aic;
2031 int i, rc;
2033 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2034 adapter->cfg_num_qs);
2036 for_all_evt_queues(adapter, eqo, i) {
2037 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2038 BE_NAPI_WEIGHT);
2039 napi_hash_add(&eqo->napi);
2040 aic = &adapter->aic_obj[i];
2041 eqo->adapter = adapter;
2042 eqo->tx_budget = BE_TX_BUDGET;
2043 eqo->idx = i;
2044 aic->max_eqd = BE_MAX_EQD;
2045 aic->enable = true;
2047 eq = &eqo->q;
2048 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2049 sizeof(struct be_eq_entry));
2050 if (rc)
2051 return rc;
2053 rc = be_cmd_eq_create(adapter, eqo);
2054 if (rc)
2055 return rc;
2057 return 0;
2060 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2062 struct be_queue_info *q;
2064 q = &adapter->mcc_obj.q;
2065 if (q->created)
2066 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2067 be_queue_free(adapter, q);
2069 q = &adapter->mcc_obj.cq;
2070 if (q->created)
2071 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2072 be_queue_free(adapter, q);
2075 /* Must be called only after TX qs are created as MCC shares TX EQ */
2076 static int be_mcc_queues_create(struct be_adapter *adapter)
2078 struct be_queue_info *q, *cq;
2080 cq = &adapter->mcc_obj.cq;
2081 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2082 sizeof(struct be_mcc_compl)))
2083 goto err;
2085 /* Use the default EQ for MCC completions */
2086 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2087 goto mcc_cq_free;
2089 q = &adapter->mcc_obj.q;
2090 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2091 goto mcc_cq_destroy;
2093 if (be_cmd_mccq_create(adapter, q, cq))
2094 goto mcc_q_free;
2096 return 0;
2098 mcc_q_free:
2099 be_queue_free(adapter, q);
2100 mcc_cq_destroy:
2101 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2102 mcc_cq_free:
2103 be_queue_free(adapter, cq);
2104 err:
2105 return -1;
2108 static void be_tx_queues_destroy(struct be_adapter *adapter)
2110 struct be_queue_info *q;
2111 struct be_tx_obj *txo;
2112 u8 i;
2114 for_all_tx_queues(adapter, txo, i) {
2115 q = &txo->q;
2116 if (q->created)
2117 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2118 be_queue_free(adapter, q);
2120 q = &txo->cq;
2121 if (q->created)
2122 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2123 be_queue_free(adapter, q);
2127 static int be_tx_qs_create(struct be_adapter *adapter)
2129 struct be_queue_info *cq, *eq;
2130 struct be_tx_obj *txo;
2131 int status, i;
2133 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2135 for_all_tx_queues(adapter, txo, i) {
2136 cq = &txo->cq;
2137 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2138 sizeof(struct be_eth_tx_compl));
2139 if (status)
2140 return status;
2142 u64_stats_init(&txo->stats.sync);
2143 u64_stats_init(&txo->stats.sync_compl);
2145 /* If num_evt_qs is less than num_tx_qs, then more than
2146 * one txq share an eq
2148 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2149 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2150 if (status)
2151 return status;
2153 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2154 sizeof(struct be_eth_wrb));
2155 if (status)
2156 return status;
2158 status = be_cmd_txq_create(adapter, txo);
2159 if (status)
2160 return status;
2163 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2164 adapter->num_tx_qs);
2165 return 0;
2168 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2170 struct be_queue_info *q;
2171 struct be_rx_obj *rxo;
2172 int i;
2174 for_all_rx_queues(adapter, rxo, i) {
2175 q = &rxo->cq;
2176 if (q->created)
2177 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2178 be_queue_free(adapter, q);
2182 static int be_rx_cqs_create(struct be_adapter *adapter)
2184 struct be_queue_info *eq, *cq;
2185 struct be_rx_obj *rxo;
2186 int rc, i;
2188 /* We can create as many RSS rings as there are EQs. */
2189 adapter->num_rx_qs = adapter->num_evt_qs;
2191 /* We'll use RSS only if atleast 2 RSS rings are supported.
2192 * When RSS is used, we'll need a default RXQ for non-IP traffic.
2194 if (adapter->num_rx_qs > 1)
2195 adapter->num_rx_qs++;
2197 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2198 for_all_rx_queues(adapter, rxo, i) {
2199 rxo->adapter = adapter;
2200 cq = &rxo->cq;
2201 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2202 sizeof(struct be_eth_rx_compl));
2203 if (rc)
2204 return rc;
2206 u64_stats_init(&rxo->stats.sync);
2207 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2208 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2209 if (rc)
2210 return rc;
2213 dev_info(&adapter->pdev->dev,
2214 "created %d RSS queue(s) and 1 default RX queue\n",
2215 adapter->num_rx_qs - 1);
2216 return 0;
2219 static irqreturn_t be_intx(int irq, void *dev)
2221 struct be_eq_obj *eqo = dev;
2222 struct be_adapter *adapter = eqo->adapter;
2223 int num_evts = 0;
2225 /* IRQ is not expected when NAPI is scheduled as the EQ
2226 * will not be armed.
2227 * But, this can happen on Lancer INTx where it takes
2228 * a while to de-assert INTx or in BE2 where occasionaly
2229 * an interrupt may be raised even when EQ is unarmed.
2230 * If NAPI is already scheduled, then counting & notifying
2231 * events will orphan them.
2233 if (napi_schedule_prep(&eqo->napi)) {
2234 num_evts = events_get(eqo);
2235 __napi_schedule(&eqo->napi);
2236 if (num_evts)
2237 eqo->spurious_intr = 0;
2239 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2241 /* Return IRQ_HANDLED only for the the first spurious intr
2242 * after a valid intr to stop the kernel from branding
2243 * this irq as a bad one!
2245 if (num_evts || eqo->spurious_intr++ == 0)
2246 return IRQ_HANDLED;
2247 else
2248 return IRQ_NONE;
2251 static irqreturn_t be_msix(int irq, void *dev)
2253 struct be_eq_obj *eqo = dev;
2255 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2256 napi_schedule(&eqo->napi);
2257 return IRQ_HANDLED;
2260 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2262 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2265 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2266 int budget, int polling)
2268 struct be_adapter *adapter = rxo->adapter;
2269 struct be_queue_info *rx_cq = &rxo->cq;
2270 struct be_rx_compl_info *rxcp;
2271 u32 work_done;
2273 for (work_done = 0; work_done < budget; work_done++) {
2274 rxcp = be_rx_compl_get(rxo);
2275 if (!rxcp)
2276 break;
2278 /* Is it a flush compl that has no data */
2279 if (unlikely(rxcp->num_rcvd == 0))
2280 goto loop_continue;
2282 /* Discard compl with partial DMA Lancer B0 */
2283 if (unlikely(!rxcp->pkt_size)) {
2284 be_rx_compl_discard(rxo, rxcp);
2285 goto loop_continue;
2288 /* On BE drop pkts that arrive due to imperfect filtering in
2289 * promiscuous mode on some skews
2291 if (unlikely(rxcp->port != adapter->port_num &&
2292 !lancer_chip(adapter))) {
2293 be_rx_compl_discard(rxo, rxcp);
2294 goto loop_continue;
2297 /* Don't do gro when we're busy_polling */
2298 if (do_gro(rxcp) && polling != BUSY_POLLING)
2299 be_rx_compl_process_gro(rxo, napi, rxcp);
2300 else
2301 be_rx_compl_process(rxo, napi, rxcp);
2303 loop_continue:
2304 be_rx_stats_update(rxo, rxcp);
2307 if (work_done) {
2308 be_cq_notify(adapter, rx_cq->id, true, work_done);
2310 /* When an rx-obj gets into post_starved state, just
2311 * let be_worker do the posting.
2313 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2314 !rxo->rx_post_starved)
2315 be_post_rx_frags(rxo, GFP_ATOMIC);
2318 return work_done;
2321 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2322 int budget, int idx)
2324 struct be_eth_tx_compl *txcp;
2325 int num_wrbs = 0, work_done;
2327 for (work_done = 0; work_done < budget; work_done++) {
2328 txcp = be_tx_compl_get(&txo->cq);
2329 if (!txcp)
2330 break;
2331 num_wrbs += be_tx_compl_process(adapter, txo,
2332 AMAP_GET_BITS(struct amap_eth_tx_compl,
2333 wrb_index, txcp));
2336 if (work_done) {
2337 be_cq_notify(adapter, txo->cq.id, true, work_done);
2338 atomic_sub(num_wrbs, &txo->q.used);
2340 /* As Tx wrbs have been freed up, wake up netdev queue
2341 * if it was stopped due to lack of tx wrbs. */
2342 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2343 atomic_read(&txo->q.used) < txo->q.len / 2) {
2344 netif_wake_subqueue(adapter->netdev, idx);
2347 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2348 tx_stats(txo)->tx_compl += work_done;
2349 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2351 return (work_done < budget); /* Done */
2354 int be_poll(struct napi_struct *napi, int budget)
2356 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2357 struct be_adapter *adapter = eqo->adapter;
2358 int max_work = 0, work, i, num_evts;
2359 struct be_rx_obj *rxo;
2360 bool tx_done;
2362 num_evts = events_get(eqo);
2364 /* Process all TXQs serviced by this EQ */
2365 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2366 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2367 eqo->tx_budget, i);
2368 if (!tx_done)
2369 max_work = budget;
2372 if (be_lock_napi(eqo)) {
2373 /* This loop will iterate twice for EQ0 in which
2374 * completions of the last RXQ (default one) are also processed
2375 * For other EQs the loop iterates only once
2377 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2378 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2379 max_work = max(work, max_work);
2381 be_unlock_napi(eqo);
2382 } else {
2383 max_work = budget;
2386 if (is_mcc_eqo(eqo))
2387 be_process_mcc(adapter);
2389 if (max_work < budget) {
2390 napi_complete(napi);
2391 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2392 } else {
2393 /* As we'll continue in polling mode, count and clear events */
2394 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2396 return max_work;
2399 #ifdef CONFIG_NET_RX_BUSY_POLL
2400 static int be_busy_poll(struct napi_struct *napi)
2402 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2403 struct be_adapter *adapter = eqo->adapter;
2404 struct be_rx_obj *rxo;
2405 int i, work = 0;
2407 if (!be_lock_busy_poll(eqo))
2408 return LL_FLUSH_BUSY;
2410 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2411 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2412 if (work)
2413 break;
2416 be_unlock_busy_poll(eqo);
2417 return work;
2419 #endif
2421 void be_detect_error(struct be_adapter *adapter)
2423 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2424 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2425 u32 i;
2427 if (be_hw_error(adapter))
2428 return;
2430 if (lancer_chip(adapter)) {
2431 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2432 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2433 sliport_err1 = ioread32(adapter->db +
2434 SLIPORT_ERROR1_OFFSET);
2435 sliport_err2 = ioread32(adapter->db +
2436 SLIPORT_ERROR2_OFFSET);
2438 } else {
2439 pci_read_config_dword(adapter->pdev,
2440 PCICFG_UE_STATUS_LOW, &ue_lo);
2441 pci_read_config_dword(adapter->pdev,
2442 PCICFG_UE_STATUS_HIGH, &ue_hi);
2443 pci_read_config_dword(adapter->pdev,
2444 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2445 pci_read_config_dword(adapter->pdev,
2446 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2448 ue_lo = (ue_lo & ~ue_lo_mask);
2449 ue_hi = (ue_hi & ~ue_hi_mask);
2452 /* On certain platforms BE hardware can indicate spurious UEs.
2453 * Allow the h/w to stop working completely in case of a real UE.
2454 * Hence not setting the hw_error for UE detection.
2456 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2457 adapter->hw_error = true;
2458 /* Do not log error messages if its a FW reset */
2459 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2460 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2461 dev_info(&adapter->pdev->dev,
2462 "Firmware update in progress\n");
2463 return;
2464 } else {
2465 dev_err(&adapter->pdev->dev,
2466 "Error detected in the card\n");
2470 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2471 dev_err(&adapter->pdev->dev,
2472 "ERR: sliport status 0x%x\n", sliport_status);
2473 dev_err(&adapter->pdev->dev,
2474 "ERR: sliport error1 0x%x\n", sliport_err1);
2475 dev_err(&adapter->pdev->dev,
2476 "ERR: sliport error2 0x%x\n", sliport_err2);
2479 if (ue_lo) {
2480 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2481 if (ue_lo & 1)
2482 dev_err(&adapter->pdev->dev,
2483 "UE: %s bit set\n", ue_status_low_desc[i]);
2487 if (ue_hi) {
2488 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2489 if (ue_hi & 1)
2490 dev_err(&adapter->pdev->dev,
2491 "UE: %s bit set\n", ue_status_hi_desc[i]);
2497 static void be_msix_disable(struct be_adapter *adapter)
2499 if (msix_enabled(adapter)) {
2500 pci_disable_msix(adapter->pdev);
2501 adapter->num_msix_vec = 0;
2502 adapter->num_msix_roce_vec = 0;
2506 static int be_msix_enable(struct be_adapter *adapter)
2508 int i, status, num_vec;
2509 struct device *dev = &adapter->pdev->dev;
2511 /* If RoCE is supported, program the max number of NIC vectors that
2512 * may be configured via set-channels, along with vectors needed for
2513 * RoCe. Else, just program the number we'll use initially.
2515 if (be_roce_supported(adapter))
2516 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2517 2 * num_online_cpus());
2518 else
2519 num_vec = adapter->cfg_num_qs;
2521 for (i = 0; i < num_vec; i++)
2522 adapter->msix_entries[i].entry = i;
2524 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2525 if (status == 0) {
2526 goto done;
2527 } else if (status >= MIN_MSIX_VECTORS) {
2528 num_vec = status;
2529 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2530 num_vec);
2531 if (!status)
2532 goto done;
2535 dev_warn(dev, "MSIx enable failed\n");
2537 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2538 if (!be_physfn(adapter))
2539 return status;
2540 return 0;
2541 done:
2542 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2543 adapter->num_msix_roce_vec = num_vec / 2;
2544 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2545 adapter->num_msix_roce_vec);
2548 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2550 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2551 adapter->num_msix_vec);
2552 return 0;
2555 static inline int be_msix_vec_get(struct be_adapter *adapter,
2556 struct be_eq_obj *eqo)
2558 return adapter->msix_entries[eqo->msix_idx].vector;
2561 static int be_msix_register(struct be_adapter *adapter)
2563 struct net_device *netdev = adapter->netdev;
2564 struct be_eq_obj *eqo;
2565 int status, i, vec;
2567 for_all_evt_queues(adapter, eqo, i) {
2568 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2569 vec = be_msix_vec_get(adapter, eqo);
2570 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2571 if (status)
2572 goto err_msix;
2575 return 0;
2576 err_msix:
2577 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2578 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2579 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2580 status);
2581 be_msix_disable(adapter);
2582 return status;
2585 static int be_irq_register(struct be_adapter *adapter)
2587 struct net_device *netdev = adapter->netdev;
2588 int status;
2590 if (msix_enabled(adapter)) {
2591 status = be_msix_register(adapter);
2592 if (status == 0)
2593 goto done;
2594 /* INTx is not supported for VF */
2595 if (!be_physfn(adapter))
2596 return status;
2599 /* INTx: only the first EQ is used */
2600 netdev->irq = adapter->pdev->irq;
2601 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2602 &adapter->eq_obj[0]);
2603 if (status) {
2604 dev_err(&adapter->pdev->dev,
2605 "INTx request IRQ failed - err %d\n", status);
2606 return status;
2608 done:
2609 adapter->isr_registered = true;
2610 return 0;
2613 static void be_irq_unregister(struct be_adapter *adapter)
2615 struct net_device *netdev = adapter->netdev;
2616 struct be_eq_obj *eqo;
2617 int i;
2619 if (!adapter->isr_registered)
2620 return;
2622 /* INTx */
2623 if (!msix_enabled(adapter)) {
2624 free_irq(netdev->irq, &adapter->eq_obj[0]);
2625 goto done;
2628 /* MSIx */
2629 for_all_evt_queues(adapter, eqo, i)
2630 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2632 done:
2633 adapter->isr_registered = false;
2636 static void be_rx_qs_destroy(struct be_adapter *adapter)
2638 struct be_queue_info *q;
2639 struct be_rx_obj *rxo;
2640 int i;
2642 for_all_rx_queues(adapter, rxo, i) {
2643 q = &rxo->q;
2644 if (q->created) {
2645 be_cmd_rxq_destroy(adapter, q);
2646 be_rx_cq_clean(rxo);
2648 be_queue_free(adapter, q);
2652 static int be_close(struct net_device *netdev)
2654 struct be_adapter *adapter = netdev_priv(netdev);
2655 struct be_eq_obj *eqo;
2656 int i;
2658 be_roce_dev_close(adapter);
2660 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2661 for_all_evt_queues(adapter, eqo, i) {
2662 napi_disable(&eqo->napi);
2663 be_disable_busy_poll(eqo);
2665 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2668 be_async_mcc_disable(adapter);
2670 /* Wait for all pending tx completions to arrive so that
2671 * all tx skbs are freed.
2673 netif_tx_disable(netdev);
2674 be_tx_compl_clean(adapter);
2676 be_rx_qs_destroy(adapter);
2678 for (i = 1; i < (adapter->uc_macs + 1); i++)
2679 be_cmd_pmac_del(adapter, adapter->if_handle,
2680 adapter->pmac_id[i], 0);
2681 adapter->uc_macs = 0;
2683 for_all_evt_queues(adapter, eqo, i) {
2684 if (msix_enabled(adapter))
2685 synchronize_irq(be_msix_vec_get(adapter, eqo));
2686 else
2687 synchronize_irq(netdev->irq);
2688 be_eq_clean(eqo);
2691 be_irq_unregister(adapter);
2693 return 0;
2696 static int be_rx_qs_create(struct be_adapter *adapter)
2698 struct be_rx_obj *rxo;
2699 int rc, i, j;
2700 u8 rsstable[128];
2702 for_all_rx_queues(adapter, rxo, i) {
2703 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2704 sizeof(struct be_eth_rx_d));
2705 if (rc)
2706 return rc;
2709 /* The FW would like the default RXQ to be created first */
2710 rxo = default_rxo(adapter);
2711 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2712 adapter->if_handle, false, &rxo->rss_id);
2713 if (rc)
2714 return rc;
2716 for_all_rss_queues(adapter, rxo, i) {
2717 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2718 rx_frag_size, adapter->if_handle,
2719 true, &rxo->rss_id);
2720 if (rc)
2721 return rc;
2724 if (be_multi_rxq(adapter)) {
2725 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2726 for_all_rss_queues(adapter, rxo, i) {
2727 if ((j + i) >= 128)
2728 break;
2729 rsstable[j + i] = rxo->rss_id;
2732 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2733 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2735 if (!BEx_chip(adapter))
2736 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2737 RSS_ENABLE_UDP_IPV6;
2738 } else {
2739 /* Disable RSS, if only default RX Q is created */
2740 adapter->rss_flags = RSS_ENABLE_NONE;
2743 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2744 128);
2745 if (rc) {
2746 adapter->rss_flags = RSS_ENABLE_NONE;
2747 return rc;
2750 /* First time posting */
2751 for_all_rx_queues(adapter, rxo, i)
2752 be_post_rx_frags(rxo, GFP_KERNEL);
2753 return 0;
2756 static int be_open(struct net_device *netdev)
2758 struct be_adapter *adapter = netdev_priv(netdev);
2759 struct be_eq_obj *eqo;
2760 struct be_rx_obj *rxo;
2761 struct be_tx_obj *txo;
2762 u8 link_status;
2763 int status, i;
2765 status = be_rx_qs_create(adapter);
2766 if (status)
2767 goto err;
2769 status = be_irq_register(adapter);
2770 if (status)
2771 goto err;
2773 for_all_rx_queues(adapter, rxo, i)
2774 be_cq_notify(adapter, rxo->cq.id, true, 0);
2776 for_all_tx_queues(adapter, txo, i)
2777 be_cq_notify(adapter, txo->cq.id, true, 0);
2779 be_async_mcc_enable(adapter);
2781 for_all_evt_queues(adapter, eqo, i) {
2782 napi_enable(&eqo->napi);
2783 be_enable_busy_poll(eqo);
2784 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2786 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2788 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2789 if (!status)
2790 be_link_status_update(adapter, link_status);
2792 netif_tx_start_all_queues(netdev);
2793 be_roce_dev_open(adapter);
2794 return 0;
2795 err:
2796 be_close(adapter->netdev);
2797 return -EIO;
2800 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2802 struct be_dma_mem cmd;
2803 int status = 0;
2804 u8 mac[ETH_ALEN];
2806 memset(mac, 0, ETH_ALEN);
2808 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2809 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2810 GFP_KERNEL);
2811 if (cmd.va == NULL)
2812 return -1;
2814 if (enable) {
2815 status = pci_write_config_dword(adapter->pdev,
2816 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2817 if (status) {
2818 dev_err(&adapter->pdev->dev,
2819 "Could not enable Wake-on-lan\n");
2820 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2821 cmd.dma);
2822 return status;
2824 status = be_cmd_enable_magic_wol(adapter,
2825 adapter->netdev->dev_addr, &cmd);
2826 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2827 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2828 } else {
2829 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2830 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2831 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2834 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2835 return status;
2839 * Generate a seed MAC address from the PF MAC Address using jhash.
2840 * MAC Address for VFs are assigned incrementally starting from the seed.
2841 * These addresses are programmed in the ASIC by the PF and the VF driver
2842 * queries for the MAC address during its probe.
2844 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2846 u32 vf;
2847 int status = 0;
2848 u8 mac[ETH_ALEN];
2849 struct be_vf_cfg *vf_cfg;
2851 be_vf_eth_addr_generate(adapter, mac);
2853 for_all_vfs(adapter, vf_cfg, vf) {
2854 if (BEx_chip(adapter))
2855 status = be_cmd_pmac_add(adapter, mac,
2856 vf_cfg->if_handle,
2857 &vf_cfg->pmac_id, vf + 1);
2858 else
2859 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2860 vf + 1);
2862 if (status)
2863 dev_err(&adapter->pdev->dev,
2864 "Mac address assignment failed for VF %d\n", vf);
2865 else
2866 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2868 mac[5] += 1;
2870 return status;
2873 static int be_vfs_mac_query(struct be_adapter *adapter)
2875 int status, vf;
2876 u8 mac[ETH_ALEN];
2877 struct be_vf_cfg *vf_cfg;
2879 for_all_vfs(adapter, vf_cfg, vf) {
2880 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2881 mac, vf_cfg->if_handle,
2882 false, vf+1);
2883 if (status)
2884 return status;
2885 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2887 return 0;
2890 static void be_vf_clear(struct be_adapter *adapter)
2892 struct be_vf_cfg *vf_cfg;
2893 u32 vf;
2895 if (pci_vfs_assigned(adapter->pdev)) {
2896 dev_warn(&adapter->pdev->dev,
2897 "VFs are assigned to VMs: not disabling VFs\n");
2898 goto done;
2901 pci_disable_sriov(adapter->pdev);
2903 for_all_vfs(adapter, vf_cfg, vf) {
2904 if (BEx_chip(adapter))
2905 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2906 vf_cfg->pmac_id, vf + 1);
2907 else
2908 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2909 vf + 1);
2911 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2913 done:
2914 kfree(adapter->vf_cfg);
2915 adapter->num_vfs = 0;
2918 static void be_clear_queues(struct be_adapter *adapter)
2920 be_mcc_queues_destroy(adapter);
2921 be_rx_cqs_destroy(adapter);
2922 be_tx_queues_destroy(adapter);
2923 be_evt_queues_destroy(adapter);
2926 static void be_cancel_worker(struct be_adapter *adapter)
2928 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2929 cancel_delayed_work_sync(&adapter->work);
2930 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2934 static void be_mac_clear(struct be_adapter *adapter)
2936 int i;
2938 if (adapter->pmac_id) {
2939 for (i = 0; i < (adapter->uc_macs + 1); i++)
2940 be_cmd_pmac_del(adapter, adapter->if_handle,
2941 adapter->pmac_id[i], 0);
2942 adapter->uc_macs = 0;
2944 kfree(adapter->pmac_id);
2945 adapter->pmac_id = NULL;
2949 static int be_clear(struct be_adapter *adapter)
2951 be_cancel_worker(adapter);
2953 if (sriov_enabled(adapter))
2954 be_vf_clear(adapter);
2956 /* delete the primary mac along with the uc-mac list */
2957 be_mac_clear(adapter);
2959 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2961 be_clear_queues(adapter);
2963 be_msix_disable(adapter);
2964 return 0;
2967 static int be_vfs_if_create(struct be_adapter *adapter)
2969 struct be_resources res = {0};
2970 struct be_vf_cfg *vf_cfg;
2971 u32 cap_flags, en_flags, vf;
2972 int status = 0;
2974 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2975 BE_IF_FLAGS_MULTICAST;
2977 for_all_vfs(adapter, vf_cfg, vf) {
2978 if (!BE3_chip(adapter)) {
2979 status = be_cmd_get_profile_config(adapter, &res,
2980 vf + 1);
2981 if (!status)
2982 cap_flags = res.if_cap_flags;
2985 /* If a FW profile exists, then cap_flags are updated */
2986 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2987 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2988 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2989 &vf_cfg->if_handle, vf + 1);
2990 if (status)
2991 goto err;
2993 err:
2994 return status;
2997 static int be_vf_setup_init(struct be_adapter *adapter)
2999 struct be_vf_cfg *vf_cfg;
3000 int vf;
3002 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3003 GFP_KERNEL);
3004 if (!adapter->vf_cfg)
3005 return -ENOMEM;
3007 for_all_vfs(adapter, vf_cfg, vf) {
3008 vf_cfg->if_handle = -1;
3009 vf_cfg->pmac_id = -1;
3011 return 0;
3014 static int be_vf_setup(struct be_adapter *adapter)
3016 struct be_vf_cfg *vf_cfg;
3017 u16 def_vlan, lnk_speed;
3018 int status, old_vfs, vf;
3019 struct device *dev = &adapter->pdev->dev;
3020 u32 privileges;
3022 old_vfs = pci_num_vf(adapter->pdev);
3023 if (old_vfs) {
3024 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3025 if (old_vfs != num_vfs)
3026 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3027 adapter->num_vfs = old_vfs;
3028 } else {
3029 if (num_vfs > be_max_vfs(adapter))
3030 dev_info(dev, "Device supports %d VFs and not %d\n",
3031 be_max_vfs(adapter), num_vfs);
3032 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3033 if (!adapter->num_vfs)
3034 return 0;
3037 status = be_vf_setup_init(adapter);
3038 if (status)
3039 goto err;
3041 if (old_vfs) {
3042 for_all_vfs(adapter, vf_cfg, vf) {
3043 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3044 if (status)
3045 goto err;
3047 } else {
3048 status = be_vfs_if_create(adapter);
3049 if (status)
3050 goto err;
3053 if (old_vfs) {
3054 status = be_vfs_mac_query(adapter);
3055 if (status)
3056 goto err;
3057 } else {
3058 status = be_vf_eth_addr_config(adapter);
3059 if (status)
3060 goto err;
3063 for_all_vfs(adapter, vf_cfg, vf) {
3064 /* Allow VFs to programs MAC/VLAN filters */
3065 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3066 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3067 status = be_cmd_set_fn_privileges(adapter,
3068 privileges |
3069 BE_PRIV_FILTMGMT,
3070 vf + 1);
3071 if (!status)
3072 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3073 vf);
3076 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3077 * Allow full available bandwidth
3079 if (BE3_chip(adapter) && !old_vfs)
3080 be_cmd_set_qos(adapter, 1000, vf+1);
3082 status = be_cmd_link_status_query(adapter, &lnk_speed,
3083 NULL, vf + 1);
3084 if (!status)
3085 vf_cfg->tx_rate = lnk_speed;
3087 status = be_cmd_get_hsw_config(adapter, &def_vlan,
3088 vf + 1, vf_cfg->if_handle, NULL);
3089 if (status)
3090 goto err;
3091 vf_cfg->def_vid = def_vlan;
3093 if (!old_vfs)
3094 be_cmd_enable_vf(adapter, vf + 1);
3097 if (!old_vfs) {
3098 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3099 if (status) {
3100 dev_err(dev, "SRIOV enable failed\n");
3101 adapter->num_vfs = 0;
3102 goto err;
3105 return 0;
3106 err:
3107 dev_err(dev, "VF setup failed\n");
3108 be_vf_clear(adapter);
3109 return status;
3112 /* On BE2/BE3 FW does not suggest the supported limits */
3113 static void BEx_get_resources(struct be_adapter *adapter,
3114 struct be_resources *res)
3116 struct pci_dev *pdev = adapter->pdev;
3117 bool use_sriov = false;
3118 int max_vfs;
3120 max_vfs = pci_sriov_get_totalvfs(pdev);
3122 if (BE3_chip(adapter) && sriov_want(adapter)) {
3123 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3124 use_sriov = res->max_vfs;
3127 if (be_physfn(adapter))
3128 res->max_uc_mac = BE_UC_PMAC_COUNT;
3129 else
3130 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3132 if (adapter->function_mode & FLEX10_MODE)
3133 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3134 else if (adapter->function_mode & UMC_ENABLED)
3135 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
3136 else
3137 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3138 res->max_mcast_mac = BE_MAX_MC;
3140 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
3141 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
3142 !be_physfn(adapter) || (adapter->port_num > 1))
3143 res->max_tx_qs = 1;
3144 else
3145 res->max_tx_qs = BE3_MAX_TX_QS;
3147 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3148 !use_sriov && be_physfn(adapter))
3149 res->max_rss_qs = (adapter->be3_native) ?
3150 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3151 res->max_rx_qs = res->max_rss_qs + 1;
3153 if (be_physfn(adapter))
3154 res->max_evt_qs = (max_vfs > 0) ?
3155 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3156 else
3157 res->max_evt_qs = 1;
3159 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3160 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3161 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3164 static void be_setup_init(struct be_adapter *adapter)
3166 adapter->vlan_prio_bmap = 0xff;
3167 adapter->phy.link_speed = -1;
3168 adapter->if_handle = -1;
3169 adapter->be3_native = false;
3170 adapter->promiscuous = false;
3171 if (be_physfn(adapter))
3172 adapter->cmd_privileges = MAX_PRIVILEGES;
3173 else
3174 adapter->cmd_privileges = MIN_PRIVILEGES;
3177 static int be_get_resources(struct be_adapter *adapter)
3179 struct device *dev = &adapter->pdev->dev;
3180 struct be_resources res = {0};
3181 int status;
3183 if (BEx_chip(adapter)) {
3184 BEx_get_resources(adapter, &res);
3185 adapter->res = res;
3188 /* For Lancer, SH etc read per-function resource limits from FW.
3189 * GET_FUNC_CONFIG returns per function guaranteed limits.
3190 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3192 if (!BEx_chip(adapter)) {
3193 status = be_cmd_get_func_config(adapter, &res);
3194 if (status)
3195 return status;
3197 /* If RoCE may be enabled stash away half the EQs for RoCE */
3198 if (be_roce_supported(adapter))
3199 res.max_evt_qs /= 2;
3200 adapter->res = res;
3202 if (be_physfn(adapter)) {
3203 status = be_cmd_get_profile_config(adapter, &res, 0);
3204 if (status)
3205 return status;
3206 adapter->res.max_vfs = res.max_vfs;
3209 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3210 be_max_txqs(adapter), be_max_rxqs(adapter),
3211 be_max_rss(adapter), be_max_eqs(adapter),
3212 be_max_vfs(adapter));
3213 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3214 be_max_uc(adapter), be_max_mc(adapter),
3215 be_max_vlans(adapter));
3218 return 0;
3221 /* Routine to query per function resource limits */
3222 static int be_get_config(struct be_adapter *adapter)
3224 u16 profile_id;
3225 int status;
3227 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3228 &adapter->function_mode,
3229 &adapter->function_caps,
3230 &adapter->asic_rev);
3231 if (status)
3232 return status;
3234 if (be_physfn(adapter)) {
3235 status = be_cmd_get_active_profile(adapter, &profile_id);
3236 if (!status)
3237 dev_info(&adapter->pdev->dev,
3238 "Using profile 0x%x\n", profile_id);
3241 status = be_get_resources(adapter);
3242 if (status)
3243 return status;
3245 /* primary mac needs 1 pmac entry */
3246 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3247 GFP_KERNEL);
3248 if (!adapter->pmac_id)
3249 return -ENOMEM;
3251 /* Sanitize cfg_num_qs based on HW and platform limits */
3252 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3254 return 0;
3257 static int be_mac_setup(struct be_adapter *adapter)
3259 u8 mac[ETH_ALEN];
3260 int status;
3262 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3263 status = be_cmd_get_perm_mac(adapter, mac);
3264 if (status)
3265 return status;
3267 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3268 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3269 } else {
3270 /* Maybe the HW was reset; dev_addr must be re-programmed */
3271 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3274 /* For BE3-R VFs, the PF programs the initial MAC address */
3275 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3276 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3277 &adapter->pmac_id[0], 0);
3278 return 0;
3281 static void be_schedule_worker(struct be_adapter *adapter)
3283 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3284 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3287 static int be_setup_queues(struct be_adapter *adapter)
3289 struct net_device *netdev = adapter->netdev;
3290 int status;
3292 status = be_evt_queues_create(adapter);
3293 if (status)
3294 goto err;
3296 status = be_tx_qs_create(adapter);
3297 if (status)
3298 goto err;
3300 status = be_rx_cqs_create(adapter);
3301 if (status)
3302 goto err;
3304 status = be_mcc_queues_create(adapter);
3305 if (status)
3306 goto err;
3308 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3309 if (status)
3310 goto err;
3312 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3313 if (status)
3314 goto err;
3316 return 0;
3317 err:
3318 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3319 return status;
3322 int be_update_queues(struct be_adapter *adapter)
3324 struct net_device *netdev = adapter->netdev;
3325 int status;
3327 if (netif_running(netdev))
3328 be_close(netdev);
3330 be_cancel_worker(adapter);
3332 /* If any vectors have been shared with RoCE we cannot re-program
3333 * the MSIx table.
3335 if (!adapter->num_msix_roce_vec)
3336 be_msix_disable(adapter);
3338 be_clear_queues(adapter);
3340 if (!msix_enabled(adapter)) {
3341 status = be_msix_enable(adapter);
3342 if (status)
3343 return status;
3346 status = be_setup_queues(adapter);
3347 if (status)
3348 return status;
3350 be_schedule_worker(adapter);
3352 if (netif_running(netdev))
3353 status = be_open(netdev);
3355 return status;
3358 static int be_setup(struct be_adapter *adapter)
3360 struct device *dev = &adapter->pdev->dev;
3361 u32 tx_fc, rx_fc, en_flags;
3362 int status;
3364 be_setup_init(adapter);
3366 if (!lancer_chip(adapter))
3367 be_cmd_req_native_mode(adapter);
3369 status = be_get_config(adapter);
3370 if (status)
3371 goto err;
3373 status = be_msix_enable(adapter);
3374 if (status)
3375 goto err;
3377 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3378 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3379 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3380 en_flags |= BE_IF_FLAGS_RSS;
3381 en_flags = en_flags & be_if_cap_flags(adapter);
3382 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3383 &adapter->if_handle, 0);
3384 if (status)
3385 goto err;
3387 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3388 rtnl_lock();
3389 status = be_setup_queues(adapter);
3390 rtnl_unlock();
3391 if (status)
3392 goto err;
3394 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3396 status = be_mac_setup(adapter);
3397 if (status)
3398 goto err;
3400 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3402 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3403 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3404 adapter->fw_ver);
3405 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3408 if (adapter->vlans_added)
3409 be_vid_config(adapter);
3411 be_set_rx_mode(adapter->netdev);
3413 be_cmd_get_acpi_wol_cap(adapter);
3415 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3417 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3418 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3419 adapter->rx_fc);
3421 if (sriov_want(adapter)) {
3422 if (be_max_vfs(adapter))
3423 be_vf_setup(adapter);
3424 else
3425 dev_warn(dev, "device doesn't support SRIOV\n");
3428 status = be_cmd_get_phy_info(adapter);
3429 if (!status && be_pause_supported(adapter))
3430 adapter->phy.fc_autoneg = 1;
3432 be_schedule_worker(adapter);
3433 return 0;
3434 err:
3435 be_clear(adapter);
3436 return status;
3439 #ifdef CONFIG_NET_POLL_CONTROLLER
3440 static void be_netpoll(struct net_device *netdev)
3442 struct be_adapter *adapter = netdev_priv(netdev);
3443 struct be_eq_obj *eqo;
3444 int i;
3446 for_all_evt_queues(adapter, eqo, i) {
3447 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3448 napi_schedule(&eqo->napi);
3451 return;
3453 #endif
3455 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
3456 static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3458 static bool be_flash_redboot(struct be_adapter *adapter,
3459 const u8 *p, u32 img_start, int image_size,
3460 int hdr_size)
3462 u32 crc_offset;
3463 u8 flashed_crc[4];
3464 int status;
3466 crc_offset = hdr_size + img_start + image_size - 4;
3468 p += crc_offset;
3470 status = be_cmd_get_flash_crc(adapter, flashed_crc,
3471 (image_size - 4));
3472 if (status) {
3473 dev_err(&adapter->pdev->dev,
3474 "could not get crc from flash, not flashing redboot\n");
3475 return false;
3478 /*update redboot only if crc does not match*/
3479 if (!memcmp(flashed_crc, p, 4))
3480 return false;
3481 else
3482 return true;
3485 static bool phy_flashing_required(struct be_adapter *adapter)
3487 return (adapter->phy.phy_type == TN_8022 &&
3488 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3491 static bool is_comp_in_ufi(struct be_adapter *adapter,
3492 struct flash_section_info *fsec, int type)
3494 int i = 0, img_type = 0;
3495 struct flash_section_info_g2 *fsec_g2 = NULL;
3497 if (BE2_chip(adapter))
3498 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3500 for (i = 0; i < MAX_FLASH_COMP; i++) {
3501 if (fsec_g2)
3502 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3503 else
3504 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3506 if (img_type == type)
3507 return true;
3509 return false;
3513 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3514 int header_size,
3515 const struct firmware *fw)
3517 struct flash_section_info *fsec = NULL;
3518 const u8 *p = fw->data;
3520 p += header_size;
3521 while (p < (fw->data + fw->size)) {
3522 fsec = (struct flash_section_info *)p;
3523 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3524 return fsec;
3525 p += 32;
3527 return NULL;
3530 static int be_flash(struct be_adapter *adapter, const u8 *img,
3531 struct be_dma_mem *flash_cmd, int optype, int img_size)
3533 u32 total_bytes = 0, flash_op, num_bytes = 0;
3534 int status = 0;
3535 struct be_cmd_write_flashrom *req = flash_cmd->va;
3537 total_bytes = img_size;
3538 while (total_bytes) {
3539 num_bytes = min_t(u32, 32*1024, total_bytes);
3541 total_bytes -= num_bytes;
3543 if (!total_bytes) {
3544 if (optype == OPTYPE_PHY_FW)
3545 flash_op = FLASHROM_OPER_PHY_FLASH;
3546 else
3547 flash_op = FLASHROM_OPER_FLASH;
3548 } else {
3549 if (optype == OPTYPE_PHY_FW)
3550 flash_op = FLASHROM_OPER_PHY_SAVE;
3551 else
3552 flash_op = FLASHROM_OPER_SAVE;
3555 memcpy(req->data_buf, img, num_bytes);
3556 img += num_bytes;
3557 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3558 flash_op, num_bytes);
3559 if (status) {
3560 if (status == ILLEGAL_IOCTL_REQ &&
3561 optype == OPTYPE_PHY_FW)
3562 break;
3563 dev_err(&adapter->pdev->dev,
3564 "cmd to write to flash rom failed.\n");
3565 return status;
3568 return 0;
3571 /* For BE2, BE3 and BE3-R */
3572 static int be_flash_BEx(struct be_adapter *adapter,
3573 const struct firmware *fw,
3574 struct be_dma_mem *flash_cmd,
3575 int num_of_images)
3578 int status = 0, i, filehdr_size = 0;
3579 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3580 const u8 *p = fw->data;
3581 const struct flash_comp *pflashcomp;
3582 int num_comp, redboot;
3583 struct flash_section_info *fsec = NULL;
3585 struct flash_comp gen3_flash_types[] = {
3586 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3587 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3588 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3589 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3590 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3591 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3592 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3593 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3594 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3595 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3596 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3597 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3598 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3599 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3600 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3601 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3602 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3603 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3604 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3605 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3608 struct flash_comp gen2_flash_types[] = {
3609 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3610 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3611 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3612 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3613 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3614 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3615 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3616 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3617 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3618 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3619 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3620 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3621 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3622 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3623 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3624 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3627 if (BE3_chip(adapter)) {
3628 pflashcomp = gen3_flash_types;
3629 filehdr_size = sizeof(struct flash_file_hdr_g3);
3630 num_comp = ARRAY_SIZE(gen3_flash_types);
3631 } else {
3632 pflashcomp = gen2_flash_types;
3633 filehdr_size = sizeof(struct flash_file_hdr_g2);
3634 num_comp = ARRAY_SIZE(gen2_flash_types);
3637 /* Get flash section info*/
3638 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3639 if (!fsec) {
3640 dev_err(&adapter->pdev->dev,
3641 "Invalid Cookie. UFI corrupted ?\n");
3642 return -1;
3644 for (i = 0; i < num_comp; i++) {
3645 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3646 continue;
3648 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3649 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3650 continue;
3652 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3653 !phy_flashing_required(adapter))
3654 continue;
3656 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3657 redboot = be_flash_redboot(adapter, fw->data,
3658 pflashcomp[i].offset, pflashcomp[i].size,
3659 filehdr_size + img_hdrs_size);
3660 if (!redboot)
3661 continue;
3664 p = fw->data;
3665 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3666 if (p + pflashcomp[i].size > fw->data + fw->size)
3667 return -1;
3669 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3670 pflashcomp[i].size);
3671 if (status) {
3672 dev_err(&adapter->pdev->dev,
3673 "Flashing section type %d failed.\n",
3674 pflashcomp[i].img_type);
3675 return status;
3678 return 0;
3681 static int be_flash_skyhawk(struct be_adapter *adapter,
3682 const struct firmware *fw,
3683 struct be_dma_mem *flash_cmd, int num_of_images)
3685 int status = 0, i, filehdr_size = 0;
3686 int img_offset, img_size, img_optype, redboot;
3687 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3688 const u8 *p = fw->data;
3689 struct flash_section_info *fsec = NULL;
3691 filehdr_size = sizeof(struct flash_file_hdr_g3);
3692 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3693 if (!fsec) {
3694 dev_err(&adapter->pdev->dev,
3695 "Invalid Cookie. UFI corrupted ?\n");
3696 return -1;
3699 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3700 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3701 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3703 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3704 case IMAGE_FIRMWARE_iSCSI:
3705 img_optype = OPTYPE_ISCSI_ACTIVE;
3706 break;
3707 case IMAGE_BOOT_CODE:
3708 img_optype = OPTYPE_REDBOOT;
3709 break;
3710 case IMAGE_OPTION_ROM_ISCSI:
3711 img_optype = OPTYPE_BIOS;
3712 break;
3713 case IMAGE_OPTION_ROM_PXE:
3714 img_optype = OPTYPE_PXE_BIOS;
3715 break;
3716 case IMAGE_OPTION_ROM_FCoE:
3717 img_optype = OPTYPE_FCOE_BIOS;
3718 break;
3719 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3720 img_optype = OPTYPE_ISCSI_BACKUP;
3721 break;
3722 case IMAGE_NCSI:
3723 img_optype = OPTYPE_NCSI_FW;
3724 break;
3725 default:
3726 continue;
3729 if (img_optype == OPTYPE_REDBOOT) {
3730 redboot = be_flash_redboot(adapter, fw->data,
3731 img_offset, img_size,
3732 filehdr_size + img_hdrs_size);
3733 if (!redboot)
3734 continue;
3737 p = fw->data;
3738 p += filehdr_size + img_offset + img_hdrs_size;
3739 if (p + img_size > fw->data + fw->size)
3740 return -1;
3742 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3743 if (status) {
3744 dev_err(&adapter->pdev->dev,
3745 "Flashing section type %d failed.\n",
3746 fsec->fsec_entry[i].type);
3747 return status;
3750 return 0;
3753 static int lancer_fw_download(struct be_adapter *adapter,
3754 const struct firmware *fw)
3756 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3757 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3758 struct be_dma_mem flash_cmd;
3759 const u8 *data_ptr = NULL;
3760 u8 *dest_image_ptr = NULL;
3761 size_t image_size = 0;
3762 u32 chunk_size = 0;
3763 u32 data_written = 0;
3764 u32 offset = 0;
3765 int status = 0;
3766 u8 add_status = 0;
3767 u8 change_status;
3769 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3770 dev_err(&adapter->pdev->dev,
3771 "FW Image not properly aligned. "
3772 "Length must be 4 byte aligned.\n");
3773 status = -EINVAL;
3774 goto lancer_fw_exit;
3777 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3778 + LANCER_FW_DOWNLOAD_CHUNK;
3779 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3780 &flash_cmd.dma, GFP_KERNEL);
3781 if (!flash_cmd.va) {
3782 status = -ENOMEM;
3783 goto lancer_fw_exit;
3786 dest_image_ptr = flash_cmd.va +
3787 sizeof(struct lancer_cmd_req_write_object);
3788 image_size = fw->size;
3789 data_ptr = fw->data;
3791 while (image_size) {
3792 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3794 /* Copy the image chunk content. */
3795 memcpy(dest_image_ptr, data_ptr, chunk_size);
3797 status = lancer_cmd_write_object(adapter, &flash_cmd,
3798 chunk_size, offset,
3799 LANCER_FW_DOWNLOAD_LOCATION,
3800 &data_written, &change_status,
3801 &add_status);
3802 if (status)
3803 break;
3805 offset += data_written;
3806 data_ptr += data_written;
3807 image_size -= data_written;
3810 if (!status) {
3811 /* Commit the FW written */
3812 status = lancer_cmd_write_object(adapter, &flash_cmd,
3813 0, offset,
3814 LANCER_FW_DOWNLOAD_LOCATION,
3815 &data_written, &change_status,
3816 &add_status);
3819 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3820 flash_cmd.dma);
3821 if (status) {
3822 dev_err(&adapter->pdev->dev,
3823 "Firmware load error. "
3824 "Status code: 0x%x Additional Status: 0x%x\n",
3825 status, add_status);
3826 goto lancer_fw_exit;
3829 if (change_status == LANCER_FW_RESET_NEEDED) {
3830 dev_info(&adapter->pdev->dev,
3831 "Resetting adapter to activate new FW\n");
3832 status = lancer_physdev_ctrl(adapter,
3833 PHYSDEV_CONTROL_FW_RESET_MASK);
3834 if (status) {
3835 dev_err(&adapter->pdev->dev,
3836 "Adapter busy for FW reset.\n"
3837 "New FW will not be active.\n");
3838 goto lancer_fw_exit;
3840 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3841 dev_err(&adapter->pdev->dev,
3842 "System reboot required for new FW"
3843 " to be active\n");
3846 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3847 lancer_fw_exit:
3848 return status;
3851 #define UFI_TYPE2 2
3852 #define UFI_TYPE3 3
3853 #define UFI_TYPE3R 10
3854 #define UFI_TYPE4 4
3855 static int be_get_ufi_type(struct be_adapter *adapter,
3856 struct flash_file_hdr_g3 *fhdr)
3858 if (fhdr == NULL)
3859 goto be_get_ufi_exit;
3861 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3862 return UFI_TYPE4;
3863 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3864 if (fhdr->asic_type_rev == 0x10)
3865 return UFI_TYPE3R;
3866 else
3867 return UFI_TYPE3;
3868 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3869 return UFI_TYPE2;
3871 be_get_ufi_exit:
3872 dev_err(&adapter->pdev->dev,
3873 "UFI and Interface are not compatible for flashing\n");
3874 return -1;
3877 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3879 struct flash_file_hdr_g3 *fhdr3;
3880 struct image_hdr *img_hdr_ptr = NULL;
3881 struct be_dma_mem flash_cmd;
3882 const u8 *p;
3883 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3885 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3886 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3887 &flash_cmd.dma, GFP_KERNEL);
3888 if (!flash_cmd.va) {
3889 status = -ENOMEM;
3890 goto be_fw_exit;
3893 p = fw->data;
3894 fhdr3 = (struct flash_file_hdr_g3 *)p;
3896 ufi_type = be_get_ufi_type(adapter, fhdr3);
3898 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3899 for (i = 0; i < num_imgs; i++) {
3900 img_hdr_ptr = (struct image_hdr *)(fw->data +
3901 (sizeof(struct flash_file_hdr_g3) +
3902 i * sizeof(struct image_hdr)));
3903 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3904 switch (ufi_type) {
3905 case UFI_TYPE4:
3906 status = be_flash_skyhawk(adapter, fw,
3907 &flash_cmd, num_imgs);
3908 break;
3909 case UFI_TYPE3R:
3910 status = be_flash_BEx(adapter, fw, &flash_cmd,
3911 num_imgs);
3912 break;
3913 case UFI_TYPE3:
3914 /* Do not flash this ufi on BE3-R cards */
3915 if (adapter->asic_rev < 0x10)
3916 status = be_flash_BEx(adapter, fw,
3917 &flash_cmd,
3918 num_imgs);
3919 else {
3920 status = -1;
3921 dev_err(&adapter->pdev->dev,
3922 "Can't load BE3 UFI on BE3R\n");
3928 if (ufi_type == UFI_TYPE2)
3929 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3930 else if (ufi_type == -1)
3931 status = -1;
3933 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3934 flash_cmd.dma);
3935 if (status) {
3936 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3937 goto be_fw_exit;
3940 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3942 be_fw_exit:
3943 return status;
3946 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3948 const struct firmware *fw;
3949 int status;
3951 if (!netif_running(adapter->netdev)) {
3952 dev_err(&adapter->pdev->dev,
3953 "Firmware load not allowed (interface is down)\n");
3954 return -1;
3957 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3958 if (status)
3959 goto fw_exit;
3961 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3963 if (lancer_chip(adapter))
3964 status = lancer_fw_download(adapter, fw);
3965 else
3966 status = be_fw_download(adapter, fw);
3968 if (!status)
3969 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3970 adapter->fw_on_flash);
3972 fw_exit:
3973 release_firmware(fw);
3974 return status;
3977 static int be_ndo_bridge_setlink(struct net_device *dev,
3978 struct nlmsghdr *nlh)
3980 struct be_adapter *adapter = netdev_priv(dev);
3981 struct nlattr *attr, *br_spec;
3982 int rem;
3983 int status = 0;
3984 u16 mode = 0;
3986 if (!sriov_enabled(adapter))
3987 return -EOPNOTSUPP;
3989 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3991 nla_for_each_nested(attr, br_spec, rem) {
3992 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3993 continue;
3995 mode = nla_get_u16(attr);
3996 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3997 return -EINVAL;
3999 status = be_cmd_set_hsw_config(adapter, 0, 0,
4000 adapter->if_handle,
4001 mode == BRIDGE_MODE_VEPA ?
4002 PORT_FWD_TYPE_VEPA :
4003 PORT_FWD_TYPE_VEB);
4004 if (status)
4005 goto err;
4007 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4008 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4010 return status;
4012 err:
4013 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4014 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4016 return status;
4019 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4020 struct net_device *dev,
4021 u32 filter_mask)
4023 struct be_adapter *adapter = netdev_priv(dev);
4024 int status = 0;
4025 u8 hsw_mode;
4027 if (!sriov_enabled(adapter))
4028 return 0;
4030 /* BE and Lancer chips support VEB mode only */
4031 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4032 hsw_mode = PORT_FWD_TYPE_VEB;
4033 } else {
4034 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4035 adapter->if_handle, &hsw_mode);
4036 if (status)
4037 return 0;
4040 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4041 hsw_mode == PORT_FWD_TYPE_VEPA ?
4042 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4045 static const struct net_device_ops be_netdev_ops = {
4046 .ndo_open = be_open,
4047 .ndo_stop = be_close,
4048 .ndo_start_xmit = be_xmit,
4049 .ndo_set_rx_mode = be_set_rx_mode,
4050 .ndo_set_mac_address = be_mac_addr_set,
4051 .ndo_change_mtu = be_change_mtu,
4052 .ndo_get_stats64 = be_get_stats64,
4053 .ndo_validate_addr = eth_validate_addr,
4054 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4055 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
4056 .ndo_set_vf_mac = be_set_vf_mac,
4057 .ndo_set_vf_vlan = be_set_vf_vlan,
4058 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
4059 .ndo_get_vf_config = be_get_vf_config,
4060 #ifdef CONFIG_NET_POLL_CONTROLLER
4061 .ndo_poll_controller = be_netpoll,
4062 #endif
4063 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4064 .ndo_bridge_getlink = be_ndo_bridge_getlink,
4065 #ifdef CONFIG_NET_RX_BUSY_POLL
4066 .ndo_busy_poll = be_busy_poll
4067 #endif
4070 static void be_netdev_init(struct net_device *netdev)
4072 struct be_adapter *adapter = netdev_priv(netdev);
4074 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4075 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4076 NETIF_F_HW_VLAN_CTAG_TX;
4077 if (be_multi_rxq(adapter))
4078 netdev->hw_features |= NETIF_F_RXHASH;
4080 netdev->features |= netdev->hw_features |
4081 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4083 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4084 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4086 netdev->priv_flags |= IFF_UNICAST_FLT;
4088 netdev->flags |= IFF_MULTICAST;
4090 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4092 netdev->netdev_ops = &be_netdev_ops;
4094 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4097 static void be_unmap_pci_bars(struct be_adapter *adapter)
4099 if (adapter->csr)
4100 pci_iounmap(adapter->pdev, adapter->csr);
4101 if (adapter->db)
4102 pci_iounmap(adapter->pdev, adapter->db);
4105 static int db_bar(struct be_adapter *adapter)
4107 if (lancer_chip(adapter) || !be_physfn(adapter))
4108 return 0;
4109 else
4110 return 4;
4113 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4115 if (skyhawk_chip(adapter)) {
4116 adapter->roce_db.size = 4096;
4117 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4118 db_bar(adapter));
4119 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4120 db_bar(adapter));
4122 return 0;
4125 static int be_map_pci_bars(struct be_adapter *adapter)
4127 u8 __iomem *addr;
4129 if (BEx_chip(adapter) && be_physfn(adapter)) {
4130 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4131 if (adapter->csr == NULL)
4132 return -ENOMEM;
4135 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4136 if (addr == NULL)
4137 goto pci_map_err;
4138 adapter->db = addr;
4140 be_roce_map_pci_bars(adapter);
4141 return 0;
4143 pci_map_err:
4144 be_unmap_pci_bars(adapter);
4145 return -ENOMEM;
4148 static void be_ctrl_cleanup(struct be_adapter *adapter)
4150 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4152 be_unmap_pci_bars(adapter);
4154 if (mem->va)
4155 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4156 mem->dma);
4158 mem = &adapter->rx_filter;
4159 if (mem->va)
4160 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4161 mem->dma);
4164 static int be_ctrl_init(struct be_adapter *adapter)
4166 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4167 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4168 struct be_dma_mem *rx_filter = &adapter->rx_filter;
4169 u32 sli_intf;
4170 int status;
4172 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4173 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4174 SLI_INTF_FAMILY_SHIFT;
4175 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4177 status = be_map_pci_bars(adapter);
4178 if (status)
4179 goto done;
4181 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4182 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4183 mbox_mem_alloc->size,
4184 &mbox_mem_alloc->dma,
4185 GFP_KERNEL);
4186 if (!mbox_mem_alloc->va) {
4187 status = -ENOMEM;
4188 goto unmap_pci_bars;
4190 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4191 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4192 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4193 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4195 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4196 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4197 rx_filter->size, &rx_filter->dma,
4198 GFP_KERNEL);
4199 if (rx_filter->va == NULL) {
4200 status = -ENOMEM;
4201 goto free_mbox;
4204 mutex_init(&adapter->mbox_lock);
4205 spin_lock_init(&adapter->mcc_lock);
4206 spin_lock_init(&adapter->mcc_cq_lock);
4208 init_completion(&adapter->et_cmd_compl);
4209 pci_save_state(adapter->pdev);
4210 return 0;
4212 free_mbox:
4213 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4214 mbox_mem_alloc->va, mbox_mem_alloc->dma);
4216 unmap_pci_bars:
4217 be_unmap_pci_bars(adapter);
4219 done:
4220 return status;
4223 static void be_stats_cleanup(struct be_adapter *adapter)
4225 struct be_dma_mem *cmd = &adapter->stats_cmd;
4227 if (cmd->va)
4228 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4229 cmd->va, cmd->dma);
4232 static int be_stats_init(struct be_adapter *adapter)
4234 struct be_dma_mem *cmd = &adapter->stats_cmd;
4236 if (lancer_chip(adapter))
4237 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4238 else if (BE2_chip(adapter))
4239 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4240 else if (BE3_chip(adapter))
4241 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4242 else
4243 /* ALL non-BE ASICs */
4244 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4246 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4247 GFP_KERNEL);
4248 if (cmd->va == NULL)
4249 return -1;
4250 return 0;
4253 static void be_remove(struct pci_dev *pdev)
4255 struct be_adapter *adapter = pci_get_drvdata(pdev);
4257 if (!adapter)
4258 return;
4260 be_roce_dev_remove(adapter);
4261 be_intr_set(adapter, false);
4263 cancel_delayed_work_sync(&adapter->func_recovery_work);
4265 unregister_netdev(adapter->netdev);
4267 be_clear(adapter);
4269 /* tell fw we're done with firing cmds */
4270 be_cmd_fw_clean(adapter);
4272 be_stats_cleanup(adapter);
4274 be_ctrl_cleanup(adapter);
4276 pci_disable_pcie_error_reporting(pdev);
4278 pci_release_regions(pdev);
4279 pci_disable_device(pdev);
4281 free_netdev(adapter->netdev);
4284 static int be_get_initial_config(struct be_adapter *adapter)
4286 int status, level;
4288 status = be_cmd_get_cntl_attributes(adapter);
4289 if (status)
4290 return status;
4292 /* Must be a power of 2 or else MODULO will BUG_ON */
4293 adapter->be_get_temp_freq = 64;
4295 if (BEx_chip(adapter)) {
4296 level = be_cmd_get_fw_log_level(adapter);
4297 adapter->msg_enable =
4298 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4301 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4302 return 0;
4305 static int lancer_recover_func(struct be_adapter *adapter)
4307 struct device *dev = &adapter->pdev->dev;
4308 int status;
4310 status = lancer_test_and_set_rdy_state(adapter);
4311 if (status)
4312 goto err;
4314 if (netif_running(adapter->netdev))
4315 be_close(adapter->netdev);
4317 be_clear(adapter);
4319 be_clear_all_error(adapter);
4321 status = be_setup(adapter);
4322 if (status)
4323 goto err;
4325 if (netif_running(adapter->netdev)) {
4326 status = be_open(adapter->netdev);
4327 if (status)
4328 goto err;
4331 dev_err(dev, "Adapter recovery successful\n");
4332 return 0;
4333 err:
4334 if (status == -EAGAIN)
4335 dev_err(dev, "Waiting for resource provisioning\n");
4336 else
4337 dev_err(dev, "Adapter recovery failed\n");
4339 return status;
4342 static void be_func_recovery_task(struct work_struct *work)
4344 struct be_adapter *adapter =
4345 container_of(work, struct be_adapter, func_recovery_work.work);
4346 int status = 0;
4348 be_detect_error(adapter);
4350 if (adapter->hw_error && lancer_chip(adapter)) {
4352 rtnl_lock();
4353 netif_device_detach(adapter->netdev);
4354 rtnl_unlock();
4356 status = lancer_recover_func(adapter);
4357 if (!status)
4358 netif_device_attach(adapter->netdev);
4361 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4362 * no need to attempt further recovery.
4364 if (!status || status == -EAGAIN)
4365 schedule_delayed_work(&adapter->func_recovery_work,
4366 msecs_to_jiffies(1000));
4369 static void be_worker(struct work_struct *work)
4371 struct be_adapter *adapter =
4372 container_of(work, struct be_adapter, work.work);
4373 struct be_rx_obj *rxo;
4374 int i;
4376 /* when interrupts are not yet enabled, just reap any pending
4377 * mcc completions */
4378 if (!netif_running(adapter->netdev)) {
4379 local_bh_disable();
4380 be_process_mcc(adapter);
4381 local_bh_enable();
4382 goto reschedule;
4385 if (!adapter->stats_cmd_sent) {
4386 if (lancer_chip(adapter))
4387 lancer_cmd_get_pport_stats(adapter,
4388 &adapter->stats_cmd);
4389 else
4390 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4393 if (be_physfn(adapter) &&
4394 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4395 be_cmd_get_die_temperature(adapter);
4397 for_all_rx_queues(adapter, rxo, i) {
4398 /* Replenish RX-queues starved due to memory
4399 * allocation failures.
4401 if (rxo->rx_post_starved)
4402 be_post_rx_frags(rxo, GFP_KERNEL);
4405 be_eqd_update(adapter);
4407 reschedule:
4408 adapter->work_counter++;
4409 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4412 /* If any VFs are already enabled don't FLR the PF */
4413 static bool be_reset_required(struct be_adapter *adapter)
4415 return pci_num_vf(adapter->pdev) ? false : true;
4418 static char *mc_name(struct be_adapter *adapter)
4420 if (adapter->function_mode & FLEX10_MODE)
4421 return "FLEX10";
4422 else if (adapter->function_mode & VNIC_MODE)
4423 return "vNIC";
4424 else if (adapter->function_mode & UMC_ENABLED)
4425 return "UMC";
4426 else
4427 return "";
4430 static inline char *func_name(struct be_adapter *adapter)
4432 return be_physfn(adapter) ? "PF" : "VF";
4435 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4437 int status = 0;
4438 struct be_adapter *adapter;
4439 struct net_device *netdev;
4440 char port_name;
4442 status = pci_enable_device(pdev);
4443 if (status)
4444 goto do_none;
4446 status = pci_request_regions(pdev, DRV_NAME);
4447 if (status)
4448 goto disable_dev;
4449 pci_set_master(pdev);
4451 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4452 if (netdev == NULL) {
4453 status = -ENOMEM;
4454 goto rel_reg;
4456 adapter = netdev_priv(netdev);
4457 adapter->pdev = pdev;
4458 pci_set_drvdata(pdev, adapter);
4459 adapter->netdev = netdev;
4460 SET_NETDEV_DEV(netdev, &pdev->dev);
4462 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4463 if (!status) {
4464 netdev->features |= NETIF_F_HIGHDMA;
4465 } else {
4466 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4467 if (status) {
4468 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4469 goto free_netdev;
4473 if (be_physfn(adapter)) {
4474 status = pci_enable_pcie_error_reporting(pdev);
4475 if (!status)
4476 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4479 status = be_ctrl_init(adapter);
4480 if (status)
4481 goto free_netdev;
4483 /* sync up with fw's ready state */
4484 if (be_physfn(adapter)) {
4485 status = be_fw_wait_ready(adapter);
4486 if (status)
4487 goto ctrl_clean;
4490 if (be_reset_required(adapter)) {
4491 status = be_cmd_reset_function(adapter);
4492 if (status)
4493 goto ctrl_clean;
4495 /* Wait for interrupts to quiesce after an FLR */
4496 msleep(100);
4499 /* Allow interrupts for other ULPs running on NIC function */
4500 be_intr_set(adapter, true);
4502 /* tell fw we're ready to fire cmds */
4503 status = be_cmd_fw_init(adapter);
4504 if (status)
4505 goto ctrl_clean;
4507 status = be_stats_init(adapter);
4508 if (status)
4509 goto ctrl_clean;
4511 status = be_get_initial_config(adapter);
4512 if (status)
4513 goto stats_clean;
4515 INIT_DELAYED_WORK(&adapter->work, be_worker);
4516 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4517 adapter->rx_fc = adapter->tx_fc = true;
4519 status = be_setup(adapter);
4520 if (status)
4521 goto stats_clean;
4523 be_netdev_init(netdev);
4524 status = register_netdev(netdev);
4525 if (status != 0)
4526 goto unsetup;
4528 be_roce_dev_add(adapter);
4530 schedule_delayed_work(&adapter->func_recovery_work,
4531 msecs_to_jiffies(1000));
4533 be_cmd_query_port_name(adapter, &port_name);
4535 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4536 func_name(adapter), mc_name(adapter), port_name);
4538 return 0;
4540 unsetup:
4541 be_clear(adapter);
4542 stats_clean:
4543 be_stats_cleanup(adapter);
4544 ctrl_clean:
4545 be_ctrl_cleanup(adapter);
4546 free_netdev:
4547 free_netdev(netdev);
4548 rel_reg:
4549 pci_release_regions(pdev);
4550 disable_dev:
4551 pci_disable_device(pdev);
4552 do_none:
4553 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4554 return status;
4557 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4559 struct be_adapter *adapter = pci_get_drvdata(pdev);
4560 struct net_device *netdev = adapter->netdev;
4562 if (adapter->wol_en)
4563 be_setup_wol(adapter, true);
4565 be_intr_set(adapter, false);
4566 cancel_delayed_work_sync(&adapter->func_recovery_work);
4568 netif_device_detach(netdev);
4569 if (netif_running(netdev)) {
4570 rtnl_lock();
4571 be_close(netdev);
4572 rtnl_unlock();
4574 be_clear(adapter);
4576 pci_save_state(pdev);
4577 pci_disable_device(pdev);
4578 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4579 return 0;
4582 static int be_resume(struct pci_dev *pdev)
4584 int status = 0;
4585 struct be_adapter *adapter = pci_get_drvdata(pdev);
4586 struct net_device *netdev = adapter->netdev;
4588 netif_device_detach(netdev);
4590 status = pci_enable_device(pdev);
4591 if (status)
4592 return status;
4594 pci_set_power_state(pdev, PCI_D0);
4595 pci_restore_state(pdev);
4597 status = be_fw_wait_ready(adapter);
4598 if (status)
4599 return status;
4601 be_intr_set(adapter, true);
4602 /* tell fw we're ready to fire cmds */
4603 status = be_cmd_fw_init(adapter);
4604 if (status)
4605 return status;
4607 be_setup(adapter);
4608 if (netif_running(netdev)) {
4609 rtnl_lock();
4610 be_open(netdev);
4611 rtnl_unlock();
4614 schedule_delayed_work(&adapter->func_recovery_work,
4615 msecs_to_jiffies(1000));
4616 netif_device_attach(netdev);
4618 if (adapter->wol_en)
4619 be_setup_wol(adapter, false);
4621 return 0;
4625 * An FLR will stop BE from DMAing any data.
4627 static void be_shutdown(struct pci_dev *pdev)
4629 struct be_adapter *adapter = pci_get_drvdata(pdev);
4631 if (!adapter)
4632 return;
4634 cancel_delayed_work_sync(&adapter->work);
4635 cancel_delayed_work_sync(&adapter->func_recovery_work);
4637 netif_device_detach(adapter->netdev);
4639 be_cmd_reset_function(adapter);
4641 pci_disable_device(pdev);
4644 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4645 pci_channel_state_t state)
4647 struct be_adapter *adapter = pci_get_drvdata(pdev);
4648 struct net_device *netdev = adapter->netdev;
4650 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4652 if (!adapter->eeh_error) {
4653 adapter->eeh_error = true;
4655 cancel_delayed_work_sync(&adapter->func_recovery_work);
4657 rtnl_lock();
4658 netif_device_detach(netdev);
4659 if (netif_running(netdev))
4660 be_close(netdev);
4661 rtnl_unlock();
4663 be_clear(adapter);
4666 if (state == pci_channel_io_perm_failure)
4667 return PCI_ERS_RESULT_DISCONNECT;
4669 pci_disable_device(pdev);
4671 /* The error could cause the FW to trigger a flash debug dump.
4672 * Resetting the card while flash dump is in progress
4673 * can cause it not to recover; wait for it to finish.
4674 * Wait only for first function as it is needed only once per
4675 * adapter.
4677 if (pdev->devfn == 0)
4678 ssleep(30);
4680 return PCI_ERS_RESULT_NEED_RESET;
4683 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4685 struct be_adapter *adapter = pci_get_drvdata(pdev);
4686 int status;
4688 dev_info(&adapter->pdev->dev, "EEH reset\n");
4690 status = pci_enable_device(pdev);
4691 if (status)
4692 return PCI_ERS_RESULT_DISCONNECT;
4694 pci_set_master(pdev);
4695 pci_set_power_state(pdev, PCI_D0);
4696 pci_restore_state(pdev);
4698 /* Check if card is ok and fw is ready */
4699 dev_info(&adapter->pdev->dev,
4700 "Waiting for FW to be ready after EEH reset\n");
4701 status = be_fw_wait_ready(adapter);
4702 if (status)
4703 return PCI_ERS_RESULT_DISCONNECT;
4705 pci_cleanup_aer_uncorrect_error_status(pdev);
4706 be_clear_all_error(adapter);
4707 return PCI_ERS_RESULT_RECOVERED;
4710 static void be_eeh_resume(struct pci_dev *pdev)
4712 int status = 0;
4713 struct be_adapter *adapter = pci_get_drvdata(pdev);
4714 struct net_device *netdev = adapter->netdev;
4716 dev_info(&adapter->pdev->dev, "EEH resume\n");
4718 pci_save_state(pdev);
4720 status = be_cmd_reset_function(adapter);
4721 if (status)
4722 goto err;
4724 /* tell fw we're ready to fire cmds */
4725 status = be_cmd_fw_init(adapter);
4726 if (status)
4727 goto err;
4729 status = be_setup(adapter);
4730 if (status)
4731 goto err;
4733 if (netif_running(netdev)) {
4734 status = be_open(netdev);
4735 if (status)
4736 goto err;
4739 schedule_delayed_work(&adapter->func_recovery_work,
4740 msecs_to_jiffies(1000));
4741 netif_device_attach(netdev);
4742 return;
4743 err:
4744 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4747 static const struct pci_error_handlers be_eeh_handlers = {
4748 .error_detected = be_eeh_err_detected,
4749 .slot_reset = be_eeh_reset,
4750 .resume = be_eeh_resume,
4753 static struct pci_driver be_driver = {
4754 .name = DRV_NAME,
4755 .id_table = be_dev_ids,
4756 .probe = be_probe,
4757 .remove = be_remove,
4758 .suspend = be_suspend,
4759 .resume = be_resume,
4760 .shutdown = be_shutdown,
4761 .err_handler = &be_eeh_handlers
4764 static int __init be_init_module(void)
4766 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4767 rx_frag_size != 2048) {
4768 printk(KERN_WARNING DRV_NAME
4769 " : Module param rx_frag_size must be 2048/4096/8192."
4770 " Using 2048\n");
4771 rx_frag_size = 2048;
4774 return pci_register_driver(&be_driver);
4776 module_init(be_init_module);
4778 static void __exit be_exit_module(void)
4780 pci_unregister_driver(&be_driver);
4782 module_exit(be_exit_module);