Linux 4.19.133
[linux/fpc-iii.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_ethtool.c
blob00f9ed93360c6707860c5aa3538d518f45224f59
1 /* bnx2x_ethtool.c: QLogic Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
4 * Copyright (c) 2014 QLogic Corporation
5 * All rights reserved
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/ethtool.h>
23 #include <linux/netdevice.h>
24 #include <linux/types.h>
25 #include <linux/sched.h>
26 #include <linux/crc32.h>
27 #include "bnx2x.h"
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_dump.h"
30 #include "bnx2x_init.h"
32 /* Note: in the format strings below %s is replaced by the queue-name which is
33 * either its index or 'fcoe' for the fcoe queue. Make sure the format string
34 * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2
36 #define MAX_QUEUE_NAME_LEN 4
37 static const struct {
38 long offset;
39 int size;
40 char string[ETH_GSTRING_LEN];
41 } bnx2x_q_stats_arr[] = {
42 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
43 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
44 8, "[%s]: rx_ucast_packets" },
45 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
46 8, "[%s]: rx_mcast_packets" },
47 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
48 8, "[%s]: rx_bcast_packets" },
49 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
50 { Q_STATS_OFFSET32(rx_err_discard_pkt),
51 4, "[%s]: rx_phy_ip_err_discards"},
52 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
53 4, "[%s]: rx_skb_alloc_discard" },
54 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
55 { Q_STATS_OFFSET32(driver_xoff), 4, "[%s]: tx_exhaustion_events" },
56 { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
57 /* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
58 8, "[%s]: tx_ucast_packets" },
59 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
60 8, "[%s]: tx_mcast_packets" },
61 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
62 8, "[%s]: tx_bcast_packets" },
63 { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
64 8, "[%s]: tpa_aggregations" },
65 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
66 8, "[%s]: tpa_aggregated_frames"},
67 { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"},
68 { Q_STATS_OFFSET32(driver_filtered_tx_pkt),
69 4, "[%s]: driver_filtered_tx_pkt" }
72 #define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
74 static const struct {
75 long offset;
76 int size;
77 bool is_port_stat;
78 char string[ETH_GSTRING_LEN];
79 } bnx2x_stats_arr[] = {
80 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
81 8, false, "rx_bytes" },
82 { STATS_OFFSET32(error_bytes_received_hi),
83 8, false, "rx_error_bytes" },
84 { STATS_OFFSET32(total_unicast_packets_received_hi),
85 8, false, "rx_ucast_packets" },
86 { STATS_OFFSET32(total_multicast_packets_received_hi),
87 8, false, "rx_mcast_packets" },
88 { STATS_OFFSET32(total_broadcast_packets_received_hi),
89 8, false, "rx_bcast_packets" },
90 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
91 8, true, "rx_crc_errors" },
92 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
93 8, true, "rx_align_errors" },
94 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
95 8, true, "rx_undersize_packets" },
96 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
97 8, true, "rx_oversize_packets" },
98 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
99 8, true, "rx_fragments" },
100 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
101 8, true, "rx_jabbers" },
102 { STATS_OFFSET32(no_buff_discard_hi),
103 8, false, "rx_discards" },
104 { STATS_OFFSET32(mac_filter_discard),
105 4, true, "rx_filtered_packets" },
106 { STATS_OFFSET32(mf_tag_discard),
107 4, true, "rx_mf_tag_discard" },
108 { STATS_OFFSET32(pfc_frames_received_hi),
109 8, true, "pfc_frames_received" },
110 { STATS_OFFSET32(pfc_frames_sent_hi),
111 8, true, "pfc_frames_sent" },
112 { STATS_OFFSET32(brb_drop_hi),
113 8, true, "rx_brb_discard" },
114 { STATS_OFFSET32(brb_truncate_hi),
115 8, true, "rx_brb_truncate" },
116 { STATS_OFFSET32(pause_frames_received_hi),
117 8, true, "rx_pause_frames" },
118 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
119 8, true, "rx_mac_ctrl_frames" },
120 { STATS_OFFSET32(nig_timer_max),
121 4, true, "rx_constant_pause_events" },
122 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
123 4, false, "rx_phy_ip_err_discards"},
124 { STATS_OFFSET32(rx_skb_alloc_failed),
125 4, false, "rx_skb_alloc_discard" },
126 { STATS_OFFSET32(hw_csum_err),
127 4, false, "rx_csum_offload_errors" },
128 { STATS_OFFSET32(driver_xoff),
129 4, false, "tx_exhaustion_events" },
130 { STATS_OFFSET32(total_bytes_transmitted_hi),
131 8, false, "tx_bytes" },
132 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
133 8, true, "tx_error_bytes" },
134 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
135 8, false, "tx_ucast_packets" },
136 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
137 8, false, "tx_mcast_packets" },
138 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
139 8, false, "tx_bcast_packets" },
140 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
141 8, true, "tx_mac_errors" },
142 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
143 8, true, "tx_carrier_errors" },
144 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
145 8, true, "tx_single_collisions" },
146 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
147 8, true, "tx_multi_collisions" },
148 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
149 8, true, "tx_deferred" },
150 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
151 8, true, "tx_excess_collisions" },
152 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
153 8, true, "tx_late_collisions" },
154 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
155 8, true, "tx_total_collisions" },
156 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
157 8, true, "tx_64_byte_packets" },
158 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
159 8, true, "tx_65_to_127_byte_packets" },
160 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
161 8, true, "tx_128_to_255_byte_packets" },
162 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
163 8, true, "tx_256_to_511_byte_packets" },
164 /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
165 8, true, "tx_512_to_1023_byte_packets" },
166 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
167 8, true, "tx_1024_to_1522_byte_packets" },
168 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
169 8, true, "tx_1523_to_9022_byte_packets" },
170 { STATS_OFFSET32(pause_frames_sent_hi),
171 8, true, "tx_pause_frames" },
172 { STATS_OFFSET32(total_tpa_aggregations_hi),
173 8, false, "tpa_aggregations" },
174 { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
175 8, false, "tpa_aggregated_frames"},
176 { STATS_OFFSET32(total_tpa_bytes_hi),
177 8, false, "tpa_bytes"},
178 { STATS_OFFSET32(recoverable_error),
179 4, false, "recoverable_errors" },
180 { STATS_OFFSET32(unrecoverable_error),
181 4, false, "unrecoverable_errors" },
182 { STATS_OFFSET32(driver_filtered_tx_pkt),
183 4, false, "driver_filtered_tx_pkt" },
184 { STATS_OFFSET32(eee_tx_lpi),
185 4, true, "Tx LPI entry count"},
186 { STATS_OFFSET32(ptp_skip_tx_ts),
187 4, false, "ptp_skipped_tx_tstamp" },
190 #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
192 static int bnx2x_get_port_type(struct bnx2x *bp)
194 int port_type;
195 u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
196 switch (bp->link_params.phy[phy_idx].media_type) {
197 case ETH_PHY_SFPP_10G_FIBER:
198 case ETH_PHY_SFP_1G_FIBER:
199 case ETH_PHY_XFP_FIBER:
200 case ETH_PHY_KR:
201 case ETH_PHY_CX4:
202 port_type = PORT_FIBRE;
203 break;
204 case ETH_PHY_DA_TWINAX:
205 port_type = PORT_DA;
206 break;
207 case ETH_PHY_BASE_T:
208 port_type = PORT_TP;
209 break;
210 case ETH_PHY_NOT_PRESENT:
211 port_type = PORT_NONE;
212 break;
213 case ETH_PHY_UNSPECIFIED:
214 default:
215 port_type = PORT_OTHER;
216 break;
218 return port_type;
221 static int bnx2x_get_vf_link_ksettings(struct net_device *dev,
222 struct ethtool_link_ksettings *cmd)
224 struct bnx2x *bp = netdev_priv(dev);
225 u32 supported, advertising;
227 ethtool_convert_link_mode_to_legacy_u32(&supported,
228 cmd->link_modes.supported);
229 ethtool_convert_link_mode_to_legacy_u32(&advertising,
230 cmd->link_modes.advertising);
232 if (bp->state == BNX2X_STATE_OPEN) {
233 if (test_bit(BNX2X_LINK_REPORT_FD,
234 &bp->vf_link_vars.link_report_flags))
235 cmd->base.duplex = DUPLEX_FULL;
236 else
237 cmd->base.duplex = DUPLEX_HALF;
239 cmd->base.speed = bp->vf_link_vars.line_speed;
240 } else {
241 cmd->base.duplex = DUPLEX_UNKNOWN;
242 cmd->base.speed = SPEED_UNKNOWN;
245 cmd->base.port = PORT_OTHER;
246 cmd->base.phy_address = 0;
247 cmd->base.autoneg = AUTONEG_DISABLE;
249 DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
250 " supported 0x%x advertising 0x%x speed %u\n"
251 " duplex %d port %d phy_address %d\n"
252 " autoneg %d\n",
253 cmd->base.cmd, supported, advertising,
254 cmd->base.speed,
255 cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
256 cmd->base.autoneg);
258 return 0;
261 static int bnx2x_get_link_ksettings(struct net_device *dev,
262 struct ethtool_link_ksettings *cmd)
264 struct bnx2x *bp = netdev_priv(dev);
265 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
266 u32 media_type;
267 u32 supported, advertising, lp_advertising;
269 ethtool_convert_link_mode_to_legacy_u32(&lp_advertising,
270 cmd->link_modes.lp_advertising);
272 /* Dual Media boards present all available port types */
273 supported = bp->port.supported[cfg_idx] |
274 (bp->port.supported[cfg_idx ^ 1] &
275 (SUPPORTED_TP | SUPPORTED_FIBRE));
276 advertising = bp->port.advertising[cfg_idx];
277 media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type;
278 if (media_type == ETH_PHY_SFP_1G_FIBER) {
279 supported &= ~(SUPPORTED_10000baseT_Full);
280 advertising &= ~(ADVERTISED_10000baseT_Full);
283 if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up &&
284 !(bp->flags & MF_FUNC_DIS)) {
285 cmd->base.duplex = bp->link_vars.duplex;
287 if (IS_MF(bp) && !BP_NOMCP(bp))
288 cmd->base.speed = bnx2x_get_mf_speed(bp);
289 else
290 cmd->base.speed = bp->link_vars.line_speed;
291 } else {
292 cmd->base.duplex = DUPLEX_UNKNOWN;
293 cmd->base.speed = SPEED_UNKNOWN;
296 cmd->base.port = bnx2x_get_port_type(bp);
298 cmd->base.phy_address = bp->mdio.prtad;
300 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
301 cmd->base.autoneg = AUTONEG_ENABLE;
302 else
303 cmd->base.autoneg = AUTONEG_DISABLE;
305 /* Publish LP advertised speeds and FC */
306 if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
307 u32 status = bp->link_vars.link_status;
309 lp_advertising |= ADVERTISED_Autoneg;
310 if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE)
311 lp_advertising |= ADVERTISED_Pause;
312 if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
313 lp_advertising |= ADVERTISED_Asym_Pause;
315 if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE)
316 lp_advertising |= ADVERTISED_10baseT_Half;
317 if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE)
318 lp_advertising |= ADVERTISED_10baseT_Full;
319 if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE)
320 lp_advertising |= ADVERTISED_100baseT_Half;
321 if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE)
322 lp_advertising |= ADVERTISED_100baseT_Full;
323 if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE)
324 lp_advertising |= ADVERTISED_1000baseT_Half;
325 if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) {
326 if (media_type == ETH_PHY_KR) {
327 lp_advertising |=
328 ADVERTISED_1000baseKX_Full;
329 } else {
330 lp_advertising |=
331 ADVERTISED_1000baseT_Full;
334 if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE)
335 lp_advertising |= ADVERTISED_2500baseX_Full;
336 if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) {
337 if (media_type == ETH_PHY_KR) {
338 lp_advertising |=
339 ADVERTISED_10000baseKR_Full;
340 } else {
341 lp_advertising |=
342 ADVERTISED_10000baseT_Full;
345 if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
346 lp_advertising |= ADVERTISED_20000baseKR2_Full;
349 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
350 supported);
351 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
352 advertising);
353 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
354 lp_advertising);
356 DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
357 " supported 0x%x advertising 0x%x speed %u\n"
358 " duplex %d port %d phy_address %d\n"
359 " autoneg %d\n",
360 cmd->base.cmd, supported, advertising,
361 cmd->base.speed,
362 cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
363 cmd->base.autoneg);
365 return 0;
368 static int bnx2x_set_link_ksettings(struct net_device *dev,
369 const struct ethtool_link_ksettings *cmd)
371 struct bnx2x *bp = netdev_priv(dev);
372 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
373 u32 speed, phy_idx;
374 u32 supported;
375 u8 duplex = cmd->base.duplex;
377 ethtool_convert_link_mode_to_legacy_u32(&supported,
378 cmd->link_modes.supported);
379 ethtool_convert_link_mode_to_legacy_u32(&advertising,
380 cmd->link_modes.advertising);
382 if (IS_MF_SD(bp))
383 return 0;
385 DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
386 " supported 0x%x advertising 0x%x speed %u\n"
387 " duplex %d port %d phy_address %d\n"
388 " autoneg %d\n",
389 cmd->base.cmd, supported, advertising,
390 cmd->base.speed,
391 cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
392 cmd->base.autoneg);
394 speed = cmd->base.speed;
396 /* If received a request for an unknown duplex, assume full*/
397 if (duplex == DUPLEX_UNKNOWN)
398 duplex = DUPLEX_FULL;
400 if (IS_MF_SI(bp)) {
401 u32 part;
402 u32 line_speed = bp->link_vars.line_speed;
404 /* use 10G if no link detected */
405 if (!line_speed)
406 line_speed = 10000;
408 if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
409 DP(BNX2X_MSG_ETHTOOL,
410 "To set speed BC %X or higher is required, please upgrade BC\n",
411 REQ_BC_VER_4_SET_MF_BW);
412 return -EINVAL;
415 part = (speed * 100) / line_speed;
417 if (line_speed < speed || !part) {
418 DP(BNX2X_MSG_ETHTOOL,
419 "Speed setting should be in a range from 1%% to 100%% of actual line speed\n");
420 return -EINVAL;
423 if (bp->state != BNX2X_STATE_OPEN)
424 /* store value for following "load" */
425 bp->pending_max = part;
426 else
427 bnx2x_update_max_mf_config(bp, part);
429 return 0;
432 cfg_idx = bnx2x_get_link_cfg_idx(bp);
433 old_multi_phy_config = bp->link_params.multi_phy_config;
434 if (cmd->base.port != bnx2x_get_port_type(bp)) {
435 switch (cmd->base.port) {
436 case PORT_TP:
437 if (!(bp->port.supported[0] & SUPPORTED_TP ||
438 bp->port.supported[1] & SUPPORTED_TP)) {
439 DP(BNX2X_MSG_ETHTOOL,
440 "Unsupported port type\n");
441 return -EINVAL;
443 bp->link_params.multi_phy_config &=
444 ~PORT_HW_CFG_PHY_SELECTION_MASK;
445 if (bp->link_params.multi_phy_config &
446 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
447 bp->link_params.multi_phy_config |=
448 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
449 else
450 bp->link_params.multi_phy_config |=
451 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
452 break;
453 case PORT_FIBRE:
454 case PORT_DA:
455 case PORT_NONE:
456 if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
457 bp->port.supported[1] & SUPPORTED_FIBRE)) {
458 DP(BNX2X_MSG_ETHTOOL,
459 "Unsupported port type\n");
460 return -EINVAL;
462 bp->link_params.multi_phy_config &=
463 ~PORT_HW_CFG_PHY_SELECTION_MASK;
464 if (bp->link_params.multi_phy_config &
465 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
466 bp->link_params.multi_phy_config |=
467 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
468 else
469 bp->link_params.multi_phy_config |=
470 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
471 break;
472 default:
473 DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
474 return -EINVAL;
477 /* Save new config in case command complete successfully */
478 new_multi_phy_config = bp->link_params.multi_phy_config;
479 /* Get the new cfg_idx */
480 cfg_idx = bnx2x_get_link_cfg_idx(bp);
481 /* Restore old config in case command failed */
482 bp->link_params.multi_phy_config = old_multi_phy_config;
483 DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx);
485 if (cmd->base.autoneg == AUTONEG_ENABLE) {
486 u32 an_supported_speed = bp->port.supported[cfg_idx];
487 if (bp->link_params.phy[EXT_PHY1].type ==
488 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
489 an_supported_speed |= (SUPPORTED_100baseT_Half |
490 SUPPORTED_100baseT_Full);
491 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
492 DP(BNX2X_MSG_ETHTOOL, "Autoneg not supported\n");
493 return -EINVAL;
496 /* advertise the requested speed and duplex if supported */
497 if (advertising & ~an_supported_speed) {
498 DP(BNX2X_MSG_ETHTOOL,
499 "Advertisement parameters are not supported\n");
500 return -EINVAL;
503 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
504 bp->link_params.req_duplex[cfg_idx] = duplex;
505 bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
506 advertising);
507 if (advertising) {
509 bp->link_params.speed_cap_mask[cfg_idx] = 0;
510 if (advertising & ADVERTISED_10baseT_Half) {
511 bp->link_params.speed_cap_mask[cfg_idx] |=
512 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
514 if (advertising & ADVERTISED_10baseT_Full)
515 bp->link_params.speed_cap_mask[cfg_idx] |=
516 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
518 if (advertising & ADVERTISED_100baseT_Full)
519 bp->link_params.speed_cap_mask[cfg_idx] |=
520 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
522 if (advertising & ADVERTISED_100baseT_Half) {
523 bp->link_params.speed_cap_mask[cfg_idx] |=
524 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
526 if (advertising & ADVERTISED_1000baseT_Half) {
527 bp->link_params.speed_cap_mask[cfg_idx] |=
528 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
530 if (advertising & (ADVERTISED_1000baseT_Full |
531 ADVERTISED_1000baseKX_Full))
532 bp->link_params.speed_cap_mask[cfg_idx] |=
533 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
535 if (advertising & (ADVERTISED_10000baseT_Full |
536 ADVERTISED_10000baseKX4_Full |
537 ADVERTISED_10000baseKR_Full))
538 bp->link_params.speed_cap_mask[cfg_idx] |=
539 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
541 if (advertising & ADVERTISED_20000baseKR2_Full)
542 bp->link_params.speed_cap_mask[cfg_idx] |=
543 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
545 } else { /* forced speed */
546 /* advertise the requested speed and duplex if supported */
547 switch (speed) {
548 case SPEED_10:
549 if (duplex == DUPLEX_FULL) {
550 if (!(bp->port.supported[cfg_idx] &
551 SUPPORTED_10baseT_Full)) {
552 DP(BNX2X_MSG_ETHTOOL,
553 "10M full not supported\n");
554 return -EINVAL;
557 advertising = (ADVERTISED_10baseT_Full |
558 ADVERTISED_TP);
559 } else {
560 if (!(bp->port.supported[cfg_idx] &
561 SUPPORTED_10baseT_Half)) {
562 DP(BNX2X_MSG_ETHTOOL,
563 "10M half not supported\n");
564 return -EINVAL;
567 advertising = (ADVERTISED_10baseT_Half |
568 ADVERTISED_TP);
570 break;
572 case SPEED_100:
573 if (duplex == DUPLEX_FULL) {
574 if (!(bp->port.supported[cfg_idx] &
575 SUPPORTED_100baseT_Full)) {
576 DP(BNX2X_MSG_ETHTOOL,
577 "100M full not supported\n");
578 return -EINVAL;
581 advertising = (ADVERTISED_100baseT_Full |
582 ADVERTISED_TP);
583 } else {
584 if (!(bp->port.supported[cfg_idx] &
585 SUPPORTED_100baseT_Half)) {
586 DP(BNX2X_MSG_ETHTOOL,
587 "100M half not supported\n");
588 return -EINVAL;
591 advertising = (ADVERTISED_100baseT_Half |
592 ADVERTISED_TP);
594 break;
596 case SPEED_1000:
597 if (duplex != DUPLEX_FULL) {
598 DP(BNX2X_MSG_ETHTOOL,
599 "1G half not supported\n");
600 return -EINVAL;
603 if (bp->port.supported[cfg_idx] &
604 SUPPORTED_1000baseT_Full) {
605 advertising = (ADVERTISED_1000baseT_Full |
606 ADVERTISED_TP);
608 } else if (bp->port.supported[cfg_idx] &
609 SUPPORTED_1000baseKX_Full) {
610 advertising = ADVERTISED_1000baseKX_Full;
611 } else {
612 DP(BNX2X_MSG_ETHTOOL,
613 "1G full not supported\n");
614 return -EINVAL;
617 break;
619 case SPEED_2500:
620 if (duplex != DUPLEX_FULL) {
621 DP(BNX2X_MSG_ETHTOOL,
622 "2.5G half not supported\n");
623 return -EINVAL;
626 if (!(bp->port.supported[cfg_idx]
627 & SUPPORTED_2500baseX_Full)) {
628 DP(BNX2X_MSG_ETHTOOL,
629 "2.5G full not supported\n");
630 return -EINVAL;
633 advertising = (ADVERTISED_2500baseX_Full |
634 ADVERTISED_TP);
635 break;
637 case SPEED_10000:
638 if (duplex != DUPLEX_FULL) {
639 DP(BNX2X_MSG_ETHTOOL,
640 "10G half not supported\n");
641 return -EINVAL;
643 phy_idx = bnx2x_get_cur_phy_idx(bp);
644 if ((bp->port.supported[cfg_idx] &
645 SUPPORTED_10000baseT_Full) &&
646 (bp->link_params.phy[phy_idx].media_type !=
647 ETH_PHY_SFP_1G_FIBER)) {
648 advertising = (ADVERTISED_10000baseT_Full |
649 ADVERTISED_FIBRE);
650 } else if (bp->port.supported[cfg_idx] &
651 SUPPORTED_10000baseKR_Full) {
652 advertising = (ADVERTISED_10000baseKR_Full |
653 ADVERTISED_FIBRE);
654 } else {
655 DP(BNX2X_MSG_ETHTOOL,
656 "10G full not supported\n");
657 return -EINVAL;
660 break;
662 default:
663 DP(BNX2X_MSG_ETHTOOL, "Unsupported speed %u\n", speed);
664 return -EINVAL;
667 bp->link_params.req_line_speed[cfg_idx] = speed;
668 bp->link_params.req_duplex[cfg_idx] = duplex;
669 bp->port.advertising[cfg_idx] = advertising;
672 DP(BNX2X_MSG_ETHTOOL, "req_line_speed %d\n"
673 " req_duplex %d advertising 0x%x\n",
674 bp->link_params.req_line_speed[cfg_idx],
675 bp->link_params.req_duplex[cfg_idx],
676 bp->port.advertising[cfg_idx]);
678 /* Set new config */
679 bp->link_params.multi_phy_config = new_multi_phy_config;
680 if (netif_running(dev)) {
681 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
682 bnx2x_force_link_reset(bp);
683 bnx2x_link_set(bp);
686 return 0;
689 #define DUMP_ALL_PRESETS 0x1FFF
690 #define DUMP_MAX_PRESETS 13
692 static int __bnx2x_get_preset_regs_len(struct bnx2x *bp, u32 preset)
694 if (CHIP_IS_E1(bp))
695 return dump_num_registers[0][preset-1];
696 else if (CHIP_IS_E1H(bp))
697 return dump_num_registers[1][preset-1];
698 else if (CHIP_IS_E2(bp))
699 return dump_num_registers[2][preset-1];
700 else if (CHIP_IS_E3A0(bp))
701 return dump_num_registers[3][preset-1];
702 else if (CHIP_IS_E3B0(bp))
703 return dump_num_registers[4][preset-1];
704 else
705 return 0;
708 static int __bnx2x_get_regs_len(struct bnx2x *bp)
710 u32 preset_idx;
711 int regdump_len = 0;
713 /* Calculate the total preset regs length */
714 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++)
715 regdump_len += __bnx2x_get_preset_regs_len(bp, preset_idx);
717 return regdump_len;
720 static int bnx2x_get_regs_len(struct net_device *dev)
722 struct bnx2x *bp = netdev_priv(dev);
723 int regdump_len = 0;
725 if (IS_VF(bp))
726 return 0;
728 regdump_len = __bnx2x_get_regs_len(bp);
729 regdump_len *= 4;
730 regdump_len += sizeof(struct dump_header);
732 return regdump_len;
735 #define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
736 #define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
737 #define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
738 #define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
739 #define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
741 #define IS_REG_IN_PRESET(presets, idx) \
742 ((presets & (1 << (idx-1))) == (1 << (idx-1)))
744 /******* Paged registers info selectors ********/
745 static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
747 if (CHIP_IS_E2(bp))
748 return page_vals_e2;
749 else if (CHIP_IS_E3(bp))
750 return page_vals_e3;
751 else
752 return NULL;
755 static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
757 if (CHIP_IS_E2(bp))
758 return PAGE_MODE_VALUES_E2;
759 else if (CHIP_IS_E3(bp))
760 return PAGE_MODE_VALUES_E3;
761 else
762 return 0;
765 static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
767 if (CHIP_IS_E2(bp))
768 return page_write_regs_e2;
769 else if (CHIP_IS_E3(bp))
770 return page_write_regs_e3;
771 else
772 return NULL;
775 static u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
777 if (CHIP_IS_E2(bp))
778 return PAGE_WRITE_REGS_E2;
779 else if (CHIP_IS_E3(bp))
780 return PAGE_WRITE_REGS_E3;
781 else
782 return 0;
785 static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
787 if (CHIP_IS_E2(bp))
788 return page_read_regs_e2;
789 else if (CHIP_IS_E3(bp))
790 return page_read_regs_e3;
791 else
792 return NULL;
795 static u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
797 if (CHIP_IS_E2(bp))
798 return PAGE_READ_REGS_E2;
799 else if (CHIP_IS_E3(bp))
800 return PAGE_READ_REGS_E3;
801 else
802 return 0;
805 static bool bnx2x_is_reg_in_chip(struct bnx2x *bp,
806 const struct reg_addr *reg_info)
808 if (CHIP_IS_E1(bp))
809 return IS_E1_REG(reg_info->chips);
810 else if (CHIP_IS_E1H(bp))
811 return IS_E1H_REG(reg_info->chips);
812 else if (CHIP_IS_E2(bp))
813 return IS_E2_REG(reg_info->chips);
814 else if (CHIP_IS_E3A0(bp))
815 return IS_E3A0_REG(reg_info->chips);
816 else if (CHIP_IS_E3B0(bp))
817 return IS_E3B0_REG(reg_info->chips);
818 else
819 return false;
822 static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp,
823 const struct wreg_addr *wreg_info)
825 if (CHIP_IS_E1(bp))
826 return IS_E1_REG(wreg_info->chips);
827 else if (CHIP_IS_E1H(bp))
828 return IS_E1H_REG(wreg_info->chips);
829 else if (CHIP_IS_E2(bp))
830 return IS_E2_REG(wreg_info->chips);
831 else if (CHIP_IS_E3A0(bp))
832 return IS_E3A0_REG(wreg_info->chips);
833 else if (CHIP_IS_E3B0(bp))
834 return IS_E3B0_REG(wreg_info->chips);
835 else
836 return false;
840 * bnx2x_read_pages_regs - read "paged" registers
842 * @bp device handle
843 * @p output buffer
845 * Reads "paged" memories: memories that may only be read by first writing to a
846 * specific address ("write address") and then reading from a specific address
847 * ("read address"). There may be more than one write address per "page" and
848 * more than one read address per write address.
850 static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset)
852 u32 i, j, k, n;
854 /* addresses of the paged registers */
855 const u32 *page_addr = __bnx2x_get_page_addr_ar(bp);
856 /* number of paged registers */
857 int num_pages = __bnx2x_get_page_reg_num(bp);
858 /* write addresses */
859 const u32 *write_addr = __bnx2x_get_page_write_ar(bp);
860 /* number of write addresses */
861 int write_num = __bnx2x_get_page_write_num(bp);
862 /* read addresses info */
863 const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp);
864 /* number of read addresses */
865 int read_num = __bnx2x_get_page_read_num(bp);
866 u32 addr, size;
868 for (i = 0; i < num_pages; i++) {
869 for (j = 0; j < write_num; j++) {
870 REG_WR(bp, write_addr[j], page_addr[i]);
872 for (k = 0; k < read_num; k++) {
873 if (IS_REG_IN_PRESET(read_addr[k].presets,
874 preset)) {
875 size = read_addr[k].size;
876 for (n = 0; n < size; n++) {
877 addr = read_addr[k].addr + n*4;
878 *p++ = REG_RD(bp, addr);
886 static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset)
888 u32 i, j, addr;
889 const struct wreg_addr *wreg_addr_p = NULL;
891 if (CHIP_IS_E1(bp))
892 wreg_addr_p = &wreg_addr_e1;
893 else if (CHIP_IS_E1H(bp))
894 wreg_addr_p = &wreg_addr_e1h;
895 else if (CHIP_IS_E2(bp))
896 wreg_addr_p = &wreg_addr_e2;
897 else if (CHIP_IS_E3A0(bp))
898 wreg_addr_p = &wreg_addr_e3;
899 else if (CHIP_IS_E3B0(bp))
900 wreg_addr_p = &wreg_addr_e3b0;
902 /* Read the idle_chk registers */
903 for (i = 0; i < IDLE_REGS_COUNT; i++) {
904 if (bnx2x_is_reg_in_chip(bp, &idle_reg_addrs[i]) &&
905 IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
906 for (j = 0; j < idle_reg_addrs[i].size; j++)
907 *p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4);
911 /* Read the regular registers */
912 for (i = 0; i < REGS_COUNT; i++) {
913 if (bnx2x_is_reg_in_chip(bp, &reg_addrs[i]) &&
914 IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
915 for (j = 0; j < reg_addrs[i].size; j++)
916 *p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
920 /* Read the CAM registers */
921 if (bnx2x_is_wreg_in_chip(bp, wreg_addr_p) &&
922 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
923 for (i = 0; i < wreg_addr_p->size; i++) {
924 *p++ = REG_RD(bp, wreg_addr_p->addr + i*4);
926 /* In case of wreg_addr register, read additional
927 registers from read_regs array
929 for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
930 addr = *(wreg_addr_p->read_regs);
931 *p++ = REG_RD(bp, addr + j*4);
936 /* Paged registers are supported in E2 & E3 only */
937 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) {
938 /* Read "paged" registers */
939 bnx2x_read_pages_regs(bp, p, preset);
942 return 0;
945 static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
947 u32 preset_idx;
949 /* Read all registers, by reading all preset registers */
950 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
951 /* Skip presets with IOR */
952 if ((preset_idx == 2) ||
953 (preset_idx == 5) ||
954 (preset_idx == 8) ||
955 (preset_idx == 11))
956 continue;
957 __bnx2x_get_preset_regs(bp, p, preset_idx);
958 p += __bnx2x_get_preset_regs_len(bp, preset_idx);
962 static void bnx2x_get_regs(struct net_device *dev,
963 struct ethtool_regs *regs, void *_p)
965 u32 *p = _p;
966 struct bnx2x *bp = netdev_priv(dev);
967 struct dump_header dump_hdr = {0};
969 regs->version = 2;
970 memset(p, 0, regs->len);
972 if (!netif_running(bp->dev))
973 return;
975 /* Disable parity attentions as long as following dump may
976 * cause false alarms by reading never written registers. We
977 * will re-enable parity attentions right after the dump.
980 bnx2x_disable_blocks_parity(bp);
982 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
983 dump_hdr.preset = DUMP_ALL_PRESETS;
984 dump_hdr.version = BNX2X_DUMP_VERSION;
986 /* dump_meta_data presents OR of CHIP and PATH. */
987 if (CHIP_IS_E1(bp)) {
988 dump_hdr.dump_meta_data = DUMP_CHIP_E1;
989 } else if (CHIP_IS_E1H(bp)) {
990 dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
991 } else if (CHIP_IS_E2(bp)) {
992 dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
993 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
994 } else if (CHIP_IS_E3A0(bp)) {
995 dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
996 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
997 } else if (CHIP_IS_E3B0(bp)) {
998 dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
999 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
1002 memcpy(p, &dump_hdr, sizeof(struct dump_header));
1003 p += dump_hdr.header_size + 1;
1005 /* This isn't really an error, but since attention handling is going
1006 * to print the GRC timeouts using this macro, we use the same.
1008 BNX2X_ERR("Generating register dump. Might trigger harmless GRC timeouts\n");
1010 /* Actually read the registers */
1011 __bnx2x_get_regs(bp, p);
1013 /* Re-enable parity attentions */
1014 bnx2x_clear_blocks_parity(bp);
1015 bnx2x_enable_blocks_parity(bp);
1018 static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
1020 struct bnx2x *bp = netdev_priv(dev);
1021 int regdump_len = 0;
1023 regdump_len = __bnx2x_get_preset_regs_len(bp, preset);
1024 regdump_len *= 4;
1025 regdump_len += sizeof(struct dump_header);
1027 return regdump_len;
1030 static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
1032 struct bnx2x *bp = netdev_priv(dev);
1034 /* Use the ethtool_dump "flag" field as the dump preset index */
1035 if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS)
1036 return -EINVAL;
1038 bp->dump_preset_idx = val->flag;
1039 return 0;
1042 static int bnx2x_get_dump_flag(struct net_device *dev,
1043 struct ethtool_dump *dump)
1045 struct bnx2x *bp = netdev_priv(dev);
1047 dump->version = BNX2X_DUMP_VERSION;
1048 dump->flag = bp->dump_preset_idx;
1049 /* Calculate the requested preset idx length */
1050 dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx);
1051 DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n",
1052 bp->dump_preset_idx, dump->len);
1053 return 0;
1056 static int bnx2x_get_dump_data(struct net_device *dev,
1057 struct ethtool_dump *dump,
1058 void *buffer)
1060 u32 *p = buffer;
1061 struct bnx2x *bp = netdev_priv(dev);
1062 struct dump_header dump_hdr = {0};
1064 /* Disable parity attentions as long as following dump may
1065 * cause false alarms by reading never written registers. We
1066 * will re-enable parity attentions right after the dump.
1069 bnx2x_disable_blocks_parity(bp);
1071 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
1072 dump_hdr.preset = bp->dump_preset_idx;
1073 dump_hdr.version = BNX2X_DUMP_VERSION;
1075 DP(BNX2X_MSG_ETHTOOL, "Get dump data of preset %d\n", dump_hdr.preset);
1077 /* dump_meta_data presents OR of CHIP and PATH. */
1078 if (CHIP_IS_E1(bp)) {
1079 dump_hdr.dump_meta_data = DUMP_CHIP_E1;
1080 } else if (CHIP_IS_E1H(bp)) {
1081 dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
1082 } else if (CHIP_IS_E2(bp)) {
1083 dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
1084 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
1085 } else if (CHIP_IS_E3A0(bp)) {
1086 dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
1087 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
1088 } else if (CHIP_IS_E3B0(bp)) {
1089 dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
1090 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
1093 memcpy(p, &dump_hdr, sizeof(struct dump_header));
1094 p += dump_hdr.header_size + 1;
1096 /* Actually read the registers */
1097 __bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
1099 /* Re-enable parity attentions */
1100 bnx2x_clear_blocks_parity(bp);
1101 bnx2x_enable_blocks_parity(bp);
1103 return 0;
1106 static void bnx2x_get_drvinfo(struct net_device *dev,
1107 struct ethtool_drvinfo *info)
1109 struct bnx2x *bp = netdev_priv(dev);
1111 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1112 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1114 bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
1116 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
1119 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1121 struct bnx2x *bp = netdev_priv(dev);
1123 if (bp->flags & NO_WOL_FLAG) {
1124 wol->supported = 0;
1125 wol->wolopts = 0;
1126 } else {
1127 wol->supported = WAKE_MAGIC;
1128 if (bp->wol)
1129 wol->wolopts = WAKE_MAGIC;
1130 else
1131 wol->wolopts = 0;
1133 memset(&wol->sopass, 0, sizeof(wol->sopass));
1136 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1138 struct bnx2x *bp = netdev_priv(dev);
1140 if (wol->wolopts & ~WAKE_MAGIC) {
1141 DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
1142 return -EINVAL;
1145 if (wol->wolopts & WAKE_MAGIC) {
1146 if (bp->flags & NO_WOL_FLAG) {
1147 DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
1148 return -EINVAL;
1150 bp->wol = 1;
1151 } else
1152 bp->wol = 0;
1154 if (SHMEM2_HAS(bp, curr_cfg))
1155 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
1157 return 0;
1160 static u32 bnx2x_get_msglevel(struct net_device *dev)
1162 struct bnx2x *bp = netdev_priv(dev);
1164 return bp->msg_enable;
1167 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
1169 struct bnx2x *bp = netdev_priv(dev);
1171 if (capable(CAP_NET_ADMIN)) {
1172 /* dump MCP trace */
1173 if (IS_PF(bp) && (level & BNX2X_MSG_MCP))
1174 bnx2x_fw_dump_lvl(bp, KERN_INFO);
1175 bp->msg_enable = level;
1179 static int bnx2x_nway_reset(struct net_device *dev)
1181 struct bnx2x *bp = netdev_priv(dev);
1183 if (!bp->port.pmf)
1184 return 0;
1186 if (netif_running(dev)) {
1187 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1188 bnx2x_force_link_reset(bp);
1189 bnx2x_link_set(bp);
1192 return 0;
1195 static u32 bnx2x_get_link(struct net_device *dev)
1197 struct bnx2x *bp = netdev_priv(dev);
1199 if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
1200 return 0;
1202 if (IS_VF(bp))
1203 return !test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1204 &bp->vf_link_vars.link_report_flags);
1206 return bp->link_vars.link_up;
1209 static int bnx2x_get_eeprom_len(struct net_device *dev)
1211 struct bnx2x *bp = netdev_priv(dev);
1213 return bp->common.flash_size;
1216 /* Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1217 * had we done things the other way around, if two pfs from the same port would
1218 * attempt to access nvram at the same time, we could run into a scenario such
1219 * as:
1220 * pf A takes the port lock.
1221 * pf B succeeds in taking the same lock since they are from the same port.
1222 * pf A takes the per pf misc lock. Performs eeprom access.
1223 * pf A finishes. Unlocks the per pf misc lock.
1224 * Pf B takes the lock and proceeds to perform it's own access.
1225 * pf A unlocks the per port lock, while pf B is still working (!).
1226 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1227 * access corrupted by pf B)
1229 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
1231 int port = BP_PORT(bp);
1232 int count, i;
1233 u32 val;
1235 /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1236 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
1238 /* adjust timeout for emulation/FPGA */
1239 count = BNX2X_NVRAM_TIMEOUT_COUNT;
1240 if (CHIP_REV_IS_SLOW(bp))
1241 count *= 100;
1243 /* request access to nvram interface */
1244 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
1245 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1247 for (i = 0; i < count*10; i++) {
1248 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
1249 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
1250 break;
1252 udelay(5);
1255 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1256 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1257 "cannot get access to nvram interface\n");
1258 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
1259 return -EBUSY;
1262 return 0;
1265 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
1267 int port = BP_PORT(bp);
1268 int count, i;
1269 u32 val;
1271 /* adjust timeout for emulation/FPGA */
1272 count = BNX2X_NVRAM_TIMEOUT_COUNT;
1273 if (CHIP_REV_IS_SLOW(bp))
1274 count *= 100;
1276 /* relinquish nvram interface */
1277 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
1278 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1280 for (i = 0; i < count*10; i++) {
1281 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
1282 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
1283 break;
1285 udelay(5);
1288 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1289 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1290 "cannot free access to nvram interface\n");
1291 return -EBUSY;
1294 /* release HW lock: protect against other PFs in PF Direct Assignment */
1295 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
1296 return 0;
1299 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
1301 u32 val;
1303 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1305 /* enable both bits, even on read */
1306 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1307 (val | MCPR_NVM_ACCESS_ENABLE_EN |
1308 MCPR_NVM_ACCESS_ENABLE_WR_EN));
1311 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
1313 u32 val;
1315 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1317 /* disable both bits, even after read */
1318 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1319 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1320 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1323 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
1324 u32 cmd_flags)
1326 int count, i, rc;
1327 u32 val;
1329 /* build the command word */
1330 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1332 /* need to clear DONE bit separately */
1333 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1335 /* address of the NVRAM to read from */
1336 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
1337 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1339 /* issue a read command */
1340 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1342 /* adjust timeout for emulation/FPGA */
1343 count = BNX2X_NVRAM_TIMEOUT_COUNT;
1344 if (CHIP_REV_IS_SLOW(bp))
1345 count *= 100;
1347 /* wait for completion */
1348 *ret_val = 0;
1349 rc = -EBUSY;
1350 for (i = 0; i < count; i++) {
1351 udelay(5);
1352 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
1354 if (val & MCPR_NVM_COMMAND_DONE) {
1355 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
1356 /* we read nvram data in cpu order
1357 * but ethtool sees it as an array of bytes
1358 * converting to big-endian will do the work
1360 *ret_val = cpu_to_be32(val);
1361 rc = 0;
1362 break;
1365 if (rc == -EBUSY)
1366 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1367 "nvram read timeout expired\n");
1368 return rc;
1371 int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
1372 int buf_size)
1374 int rc;
1375 u32 cmd_flags;
1376 __be32 val;
1378 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1379 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1380 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
1381 offset, buf_size);
1382 return -EINVAL;
1385 if (offset + buf_size > bp->common.flash_size) {
1386 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1387 "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
1388 offset, buf_size, bp->common.flash_size);
1389 return -EINVAL;
1392 /* request access to nvram interface */
1393 rc = bnx2x_acquire_nvram_lock(bp);
1394 if (rc)
1395 return rc;
1397 /* enable access to nvram interface */
1398 bnx2x_enable_nvram_access(bp);
1400 /* read the first word(s) */
1401 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1402 while ((buf_size > sizeof(u32)) && (rc == 0)) {
1403 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
1404 memcpy(ret_buf, &val, 4);
1406 /* advance to the next dword */
1407 offset += sizeof(u32);
1408 ret_buf += sizeof(u32);
1409 buf_size -= sizeof(u32);
1410 cmd_flags = 0;
1413 if (rc == 0) {
1414 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1415 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
1416 memcpy(ret_buf, &val, 4);
1419 /* disable access to nvram interface */
1420 bnx2x_disable_nvram_access(bp);
1421 bnx2x_release_nvram_lock(bp);
1423 return rc;
1426 static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf,
1427 int buf_size)
1429 int rc;
1431 rc = bnx2x_nvram_read(bp, offset, (u8 *)buf, buf_size);
1433 if (!rc) {
1434 __be32 *be = (__be32 *)buf;
1436 while ((buf_size -= 4) >= 0)
1437 *buf++ = be32_to_cpu(*be++);
1440 return rc;
1443 static bool bnx2x_is_nvm_accessible(struct bnx2x *bp)
1445 int rc = 1;
1446 u16 pm = 0;
1447 struct net_device *dev = pci_get_drvdata(bp->pdev);
1449 if (bp->pdev->pm_cap)
1450 rc = pci_read_config_word(bp->pdev,
1451 bp->pdev->pm_cap + PCI_PM_CTRL, &pm);
1453 if ((rc && !netif_running(dev)) ||
1454 (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0)))
1455 return false;
1457 return true;
1460 static int bnx2x_get_eeprom(struct net_device *dev,
1461 struct ethtool_eeprom *eeprom, u8 *eebuf)
1463 struct bnx2x *bp = netdev_priv(dev);
1465 if (!bnx2x_is_nvm_accessible(bp)) {
1466 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1467 "cannot access eeprom when the interface is down\n");
1468 return -EAGAIN;
1471 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
1472 " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
1473 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
1474 eeprom->len, eeprom->len);
1476 /* parameters already validated in ethtool_get_eeprom */
1478 return bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
1481 static int bnx2x_get_module_eeprom(struct net_device *dev,
1482 struct ethtool_eeprom *ee,
1483 u8 *data)
1485 struct bnx2x *bp = netdev_priv(dev);
1486 int rc = -EINVAL, phy_idx;
1487 u8 *user_data = data;
1488 unsigned int start_addr = ee->offset, xfer_size = 0;
1490 if (!bnx2x_is_nvm_accessible(bp)) {
1491 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1492 "cannot access eeprom when the interface is down\n");
1493 return -EAGAIN;
1496 phy_idx = bnx2x_get_cur_phy_idx(bp);
1498 /* Read A0 section */
1499 if (start_addr < ETH_MODULE_SFF_8079_LEN) {
1500 /* Limit transfer size to the A0 section boundary */
1501 if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN)
1502 xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr;
1503 else
1504 xfer_size = ee->len;
1505 bnx2x_acquire_phy_lock(bp);
1506 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1507 &bp->link_params,
1508 I2C_DEV_ADDR_A0,
1509 start_addr,
1510 xfer_size,
1511 user_data);
1512 bnx2x_release_phy_lock(bp);
1513 if (rc) {
1514 DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n");
1516 return -EINVAL;
1518 user_data += xfer_size;
1519 start_addr += xfer_size;
1522 /* Read A2 section */
1523 if ((start_addr >= ETH_MODULE_SFF_8079_LEN) &&
1524 (start_addr < ETH_MODULE_SFF_8472_LEN)) {
1525 xfer_size = ee->len - xfer_size;
1526 /* Limit transfer size to the A2 section boundary */
1527 if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN)
1528 xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr;
1529 start_addr -= ETH_MODULE_SFF_8079_LEN;
1530 bnx2x_acquire_phy_lock(bp);
1531 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1532 &bp->link_params,
1533 I2C_DEV_ADDR_A2,
1534 start_addr,
1535 xfer_size,
1536 user_data);
1537 bnx2x_release_phy_lock(bp);
1538 if (rc) {
1539 DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n");
1540 return -EINVAL;
1543 return rc;
1546 static int bnx2x_get_module_info(struct net_device *dev,
1547 struct ethtool_modinfo *modinfo)
1549 struct bnx2x *bp = netdev_priv(dev);
1550 int phy_idx, rc;
1551 u8 sff8472_comp, diag_type;
1553 if (!bnx2x_is_nvm_accessible(bp)) {
1554 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1555 "cannot access eeprom when the interface is down\n");
1556 return -EAGAIN;
1558 phy_idx = bnx2x_get_cur_phy_idx(bp);
1559 bnx2x_acquire_phy_lock(bp);
1560 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1561 &bp->link_params,
1562 I2C_DEV_ADDR_A0,
1563 SFP_EEPROM_SFF_8472_COMP_ADDR,
1564 SFP_EEPROM_SFF_8472_COMP_SIZE,
1565 &sff8472_comp);
1566 bnx2x_release_phy_lock(bp);
1567 if (rc) {
1568 DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n");
1569 return -EINVAL;
1572 bnx2x_acquire_phy_lock(bp);
1573 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1574 &bp->link_params,
1575 I2C_DEV_ADDR_A0,
1576 SFP_EEPROM_DIAG_TYPE_ADDR,
1577 SFP_EEPROM_DIAG_TYPE_SIZE,
1578 &diag_type);
1579 bnx2x_release_phy_lock(bp);
1580 if (rc) {
1581 DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n");
1582 return -EINVAL;
1585 if (!sff8472_comp ||
1586 (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ) ||
1587 !(diag_type & SFP_EEPROM_DDM_IMPLEMENTED)) {
1588 modinfo->type = ETH_MODULE_SFF_8079;
1589 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
1590 } else {
1591 modinfo->type = ETH_MODULE_SFF_8472;
1592 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1594 return 0;
1597 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
1598 u32 cmd_flags)
1600 int count, i, rc;
1602 /* build the command word */
1603 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
1605 /* need to clear DONE bit separately */
1606 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1608 /* write the data */
1609 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
1611 /* address of the NVRAM to write to */
1612 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
1613 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1615 /* issue the write command */
1616 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1618 /* adjust timeout for emulation/FPGA */
1619 count = BNX2X_NVRAM_TIMEOUT_COUNT;
1620 if (CHIP_REV_IS_SLOW(bp))
1621 count *= 100;
1623 /* wait for completion */
1624 rc = -EBUSY;
1625 for (i = 0; i < count; i++) {
1626 udelay(5);
1627 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
1628 if (val & MCPR_NVM_COMMAND_DONE) {
1629 rc = 0;
1630 break;
1634 if (rc == -EBUSY)
1635 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1636 "nvram write timeout expired\n");
1637 return rc;
1640 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1642 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
1643 int buf_size)
1645 int rc;
1646 u32 cmd_flags, align_offset, val;
1647 __be32 val_be;
1649 if (offset + buf_size > bp->common.flash_size) {
1650 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1651 "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
1652 offset, buf_size, bp->common.flash_size);
1653 return -EINVAL;
1656 /* request access to nvram interface */
1657 rc = bnx2x_acquire_nvram_lock(bp);
1658 if (rc)
1659 return rc;
1661 /* enable access to nvram interface */
1662 bnx2x_enable_nvram_access(bp);
1664 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1665 align_offset = (offset & ~0x03);
1666 rc = bnx2x_nvram_read_dword(bp, align_offset, &val_be, cmd_flags);
1668 if (rc == 0) {
1669 /* nvram data is returned as an array of bytes
1670 * convert it back to cpu order
1672 val = be32_to_cpu(val_be);
1674 val &= ~le32_to_cpu((__force __le32)
1675 (0xff << BYTE_OFFSET(offset)));
1676 val |= le32_to_cpu((__force __le32)
1677 (*data_buf << BYTE_OFFSET(offset)));
1679 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
1680 cmd_flags);
1683 /* disable access to nvram interface */
1684 bnx2x_disable_nvram_access(bp);
1685 bnx2x_release_nvram_lock(bp);
1687 return rc;
1690 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
1691 int buf_size)
1693 int rc;
1694 u32 cmd_flags;
1695 u32 val;
1696 u32 written_so_far;
1698 if (buf_size == 1) /* ethtool */
1699 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
1701 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1702 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1703 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
1704 offset, buf_size);
1705 return -EINVAL;
1708 if (offset + buf_size > bp->common.flash_size) {
1709 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1710 "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
1711 offset, buf_size, bp->common.flash_size);
1712 return -EINVAL;
1715 /* request access to nvram interface */
1716 rc = bnx2x_acquire_nvram_lock(bp);
1717 if (rc)
1718 return rc;
1720 /* enable access to nvram interface */
1721 bnx2x_enable_nvram_access(bp);
1723 written_so_far = 0;
1724 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1725 while ((written_so_far < buf_size) && (rc == 0)) {
1726 if (written_so_far == (buf_size - sizeof(u32)))
1727 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1728 else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0)
1729 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1730 else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0)
1731 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1733 memcpy(&val, data_buf, 4);
1735 /* Notice unlike bnx2x_nvram_read_dword() this will not
1736 * change val using be32_to_cpu(), which causes data to flip
1737 * if the eeprom is read and then written back. This is due
1738 * to tools utilizing this functionality that would break
1739 * if this would be resolved.
1741 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
1743 /* advance to the next dword */
1744 offset += sizeof(u32);
1745 data_buf += sizeof(u32);
1746 written_so_far += sizeof(u32);
1748 /* At end of each 4Kb page, release nvram lock to allow MFW
1749 * chance to take it for its own use.
1751 if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
1752 (written_so_far < buf_size)) {
1753 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1754 "Releasing NVM lock after offset 0x%x\n",
1755 (u32)(offset - sizeof(u32)));
1756 bnx2x_release_nvram_lock(bp);
1757 usleep_range(1000, 2000);
1758 rc = bnx2x_acquire_nvram_lock(bp);
1759 if (rc)
1760 return rc;
1763 cmd_flags = 0;
1766 /* disable access to nvram interface */
1767 bnx2x_disable_nvram_access(bp);
1768 bnx2x_release_nvram_lock(bp);
1770 return rc;
1773 static int bnx2x_set_eeprom(struct net_device *dev,
1774 struct ethtool_eeprom *eeprom, u8 *eebuf)
1776 struct bnx2x *bp = netdev_priv(dev);
1777 int port = BP_PORT(bp);
1778 int rc = 0;
1779 u32 ext_phy_config;
1781 if (!bnx2x_is_nvm_accessible(bp)) {
1782 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1783 "cannot access eeprom when the interface is down\n");
1784 return -EAGAIN;
1787 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
1788 " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
1789 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
1790 eeprom->len, eeprom->len);
1792 /* parameters already validated in ethtool_set_eeprom */
1794 /* PHY eeprom can be accessed only by the PMF */
1795 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
1796 !bp->port.pmf) {
1797 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1798 "wrong magic or interface is not pmf\n");
1799 return -EINVAL;
1802 ext_phy_config =
1803 SHMEM_RD(bp,
1804 dev_info.port_hw_config[port].external_phy_config);
1806 if (eeprom->magic == 0x50485950) {
1807 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
1808 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1810 bnx2x_acquire_phy_lock(bp);
1811 rc |= bnx2x_link_reset(&bp->link_params,
1812 &bp->link_vars, 0);
1813 if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
1814 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
1815 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
1816 MISC_REGISTERS_GPIO_HIGH, port);
1817 bnx2x_release_phy_lock(bp);
1818 bnx2x_link_report(bp);
1820 } else if (eeprom->magic == 0x50485952) {
1821 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
1822 if (bp->state == BNX2X_STATE_OPEN) {
1823 bnx2x_acquire_phy_lock(bp);
1824 rc |= bnx2x_link_reset(&bp->link_params,
1825 &bp->link_vars, 1);
1827 rc |= bnx2x_phy_init(&bp->link_params,
1828 &bp->link_vars);
1829 bnx2x_release_phy_lock(bp);
1830 bnx2x_calc_fc_adv(bp);
1832 } else if (eeprom->magic == 0x53985943) {
1833 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
1834 if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
1835 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
1837 /* DSP Remove Download Mode */
1838 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
1839 MISC_REGISTERS_GPIO_LOW, port);
1841 bnx2x_acquire_phy_lock(bp);
1843 bnx2x_sfx7101_sp_sw_reset(bp,
1844 &bp->link_params.phy[EXT_PHY1]);
1846 /* wait 0.5 sec to allow it to run */
1847 msleep(500);
1848 bnx2x_ext_phy_hw_reset(bp, port);
1849 msleep(500);
1850 bnx2x_release_phy_lock(bp);
1852 } else
1853 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
1855 return rc;
1858 static int bnx2x_get_coalesce(struct net_device *dev,
1859 struct ethtool_coalesce *coal)
1861 struct bnx2x *bp = netdev_priv(dev);
1863 memset(coal, 0, sizeof(struct ethtool_coalesce));
1865 coal->rx_coalesce_usecs = bp->rx_ticks;
1866 coal->tx_coalesce_usecs = bp->tx_ticks;
1868 return 0;
1871 static int bnx2x_set_coalesce(struct net_device *dev,
1872 struct ethtool_coalesce *coal)
1874 struct bnx2x *bp = netdev_priv(dev);
1876 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
1877 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
1878 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
1880 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
1881 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
1882 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
1884 if (netif_running(dev))
1885 bnx2x_update_coalesce(bp);
1887 return 0;
1890 static void bnx2x_get_ringparam(struct net_device *dev,
1891 struct ethtool_ringparam *ering)
1893 struct bnx2x *bp = netdev_priv(dev);
1895 ering->rx_max_pending = MAX_RX_AVAIL;
1897 /* If size isn't already set, we give an estimation of the number
1898 * of buffers we'll have. We're neglecting some possible conditions
1899 * [we couldn't know for certain at this point if number of queues
1900 * might shrink] but the number would be correct for the likely
1901 * scenario.
1903 if (bp->rx_ring_size)
1904 ering->rx_pending = bp->rx_ring_size;
1905 else if (BNX2X_NUM_RX_QUEUES(bp))
1906 ering->rx_pending = MAX_RX_AVAIL / BNX2X_NUM_RX_QUEUES(bp);
1907 else
1908 ering->rx_pending = MAX_RX_AVAIL;
1910 ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
1911 ering->tx_pending = bp->tx_ring_size;
1914 static int bnx2x_set_ringparam(struct net_device *dev,
1915 struct ethtool_ringparam *ering)
1917 struct bnx2x *bp = netdev_priv(dev);
1919 DP(BNX2X_MSG_ETHTOOL,
1920 "set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
1921 ering->rx_pending, ering->tx_pending);
1923 if (pci_num_vf(bp->pdev)) {
1924 DP(BNX2X_MSG_IOV,
1925 "VFs are enabled, can not change ring parameters\n");
1926 return -EPERM;
1929 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1930 DP(BNX2X_MSG_ETHTOOL,
1931 "Handling parity error recovery. Try again later\n");
1932 return -EAGAIN;
1935 if ((ering->rx_pending > MAX_RX_AVAIL) ||
1936 (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
1937 MIN_RX_SIZE_TPA)) ||
1938 (ering->tx_pending > (IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL)) ||
1939 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
1940 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
1941 return -EINVAL;
1944 bp->rx_ring_size = ering->rx_pending;
1945 bp->tx_ring_size = ering->tx_pending;
1947 return bnx2x_reload_if_running(dev);
1950 static void bnx2x_get_pauseparam(struct net_device *dev,
1951 struct ethtool_pauseparam *epause)
1953 struct bnx2x *bp = netdev_priv(dev);
1954 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
1955 int cfg_reg;
1957 epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
1958 BNX2X_FLOW_CTRL_AUTO);
1960 if (!epause->autoneg)
1961 cfg_reg = bp->link_params.req_flow_ctrl[cfg_idx];
1962 else
1963 cfg_reg = bp->link_params.req_fc_auto_adv;
1965 epause->rx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_RX) ==
1966 BNX2X_FLOW_CTRL_RX);
1967 epause->tx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_TX) ==
1968 BNX2X_FLOW_CTRL_TX);
1970 DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n"
1971 " autoneg %d rx_pause %d tx_pause %d\n",
1972 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
1975 static int bnx2x_set_pauseparam(struct net_device *dev,
1976 struct ethtool_pauseparam *epause)
1978 struct bnx2x *bp = netdev_priv(dev);
1979 u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1980 if (IS_MF(bp))
1981 return 0;
1983 DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n"
1984 " autoneg %d rx_pause %d tx_pause %d\n",
1985 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
1987 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
1989 if (epause->rx_pause)
1990 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX;
1992 if (epause->tx_pause)
1993 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX;
1995 if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO)
1996 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE;
1998 if (epause->autoneg) {
1999 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
2000 DP(BNX2X_MSG_ETHTOOL, "autoneg not supported\n");
2001 return -EINVAL;
2004 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) {
2005 bp->link_params.req_flow_ctrl[cfg_idx] =
2006 BNX2X_FLOW_CTRL_AUTO;
2008 bp->link_params.req_fc_auto_adv = 0;
2009 if (epause->rx_pause)
2010 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX;
2012 if (epause->tx_pause)
2013 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX;
2015 if (!bp->link_params.req_fc_auto_adv)
2016 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_NONE;
2019 DP(BNX2X_MSG_ETHTOOL,
2020 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
2022 if (netif_running(dev)) {
2023 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2024 bnx2x_force_link_reset(bp);
2025 bnx2x_link_set(bp);
2028 return 0;
2031 static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = {
2032 "register_test (offline) ",
2033 "memory_test (offline) ",
2034 "int_loopback_test (offline)",
2035 "ext_loopback_test (offline)",
2036 "nvram_test (online) ",
2037 "interrupt_test (online) ",
2038 "link_test (online) "
2041 enum {
2042 BNX2X_PRI_FLAG_ISCSI,
2043 BNX2X_PRI_FLAG_FCOE,
2044 BNX2X_PRI_FLAG_STORAGE,
2045 BNX2X_PRI_FLAG_LEN,
2048 static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
2049 "iSCSI offload support",
2050 "FCoE offload support",
2051 "Storage only interface"
2054 static u32 bnx2x_eee_to_adv(u32 eee_adv)
2056 u32 modes = 0;
2058 if (eee_adv & SHMEM_EEE_100M_ADV)
2059 modes |= ADVERTISED_100baseT_Full;
2060 if (eee_adv & SHMEM_EEE_1G_ADV)
2061 modes |= ADVERTISED_1000baseT_Full;
2062 if (eee_adv & SHMEM_EEE_10G_ADV)
2063 modes |= ADVERTISED_10000baseT_Full;
2065 return modes;
2068 static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
2070 u32 eee_adv = 0;
2071 if (modes & ADVERTISED_100baseT_Full)
2072 eee_adv |= SHMEM_EEE_100M_ADV;
2073 if (modes & ADVERTISED_1000baseT_Full)
2074 eee_adv |= SHMEM_EEE_1G_ADV;
2075 if (modes & ADVERTISED_10000baseT_Full)
2076 eee_adv |= SHMEM_EEE_10G_ADV;
2078 return eee_adv << shift;
2081 static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
2083 struct bnx2x *bp = netdev_priv(dev);
2084 u32 eee_cfg;
2086 if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
2087 DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
2088 return -EOPNOTSUPP;
2091 eee_cfg = bp->link_vars.eee_status;
2093 edata->supported =
2094 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
2095 SHMEM_EEE_SUPPORTED_SHIFT);
2097 edata->advertised =
2098 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
2099 SHMEM_EEE_ADV_STATUS_SHIFT);
2100 edata->lp_advertised =
2101 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
2102 SHMEM_EEE_LP_ADV_STATUS_SHIFT);
2104 /* SHMEM value is in 16u units --> Convert to 1u units. */
2105 edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
2107 edata->eee_enabled = (eee_cfg & SHMEM_EEE_REQUESTED_BIT) ? 1 : 0;
2108 edata->eee_active = (eee_cfg & SHMEM_EEE_ACTIVE_BIT) ? 1 : 0;
2109 edata->tx_lpi_enabled = (eee_cfg & SHMEM_EEE_LPI_REQUESTED_BIT) ? 1 : 0;
2111 return 0;
2114 static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
2116 struct bnx2x *bp = netdev_priv(dev);
2117 u32 eee_cfg;
2118 u32 advertised;
2120 if (IS_MF(bp))
2121 return 0;
2123 if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
2124 DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
2125 return -EOPNOTSUPP;
2128 eee_cfg = bp->link_vars.eee_status;
2130 if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
2131 DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
2132 return -EOPNOTSUPP;
2135 advertised = bnx2x_adv_to_eee(edata->advertised,
2136 SHMEM_EEE_ADV_STATUS_SHIFT);
2137 if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
2138 DP(BNX2X_MSG_ETHTOOL,
2139 "Direct manipulation of EEE advertisement is not supported\n");
2140 return -EINVAL;
2143 if (edata->tx_lpi_timer > EEE_MODE_TIMER_MASK) {
2144 DP(BNX2X_MSG_ETHTOOL,
2145 "Maximal Tx Lpi timer supported is %x(u)\n",
2146 EEE_MODE_TIMER_MASK);
2147 return -EINVAL;
2149 if (edata->tx_lpi_enabled &&
2150 (edata->tx_lpi_timer < EEE_MODE_NVRAM_AGGRESSIVE_TIME)) {
2151 DP(BNX2X_MSG_ETHTOOL,
2152 "Minimal Tx Lpi timer supported is %d(u)\n",
2153 EEE_MODE_NVRAM_AGGRESSIVE_TIME);
2154 return -EINVAL;
2157 /* All is well; Apply changes*/
2158 if (edata->eee_enabled)
2159 bp->link_params.eee_mode |= EEE_MODE_ADV_LPI;
2160 else
2161 bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI;
2163 if (edata->tx_lpi_enabled)
2164 bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI;
2165 else
2166 bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI;
2168 bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK;
2169 bp->link_params.eee_mode |= (edata->tx_lpi_timer &
2170 EEE_MODE_TIMER_MASK) |
2171 EEE_MODE_OVERRIDE_NVRAM |
2172 EEE_MODE_OUTPUT_TIME;
2174 /* Restart link to propagate changes */
2175 if (netif_running(dev)) {
2176 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2177 bnx2x_force_link_reset(bp);
2178 bnx2x_link_set(bp);
2181 return 0;
2184 enum {
2185 BNX2X_CHIP_E1_OFST = 0,
2186 BNX2X_CHIP_E1H_OFST,
2187 BNX2X_CHIP_E2_OFST,
2188 BNX2X_CHIP_E3_OFST,
2189 BNX2X_CHIP_E3B0_OFST,
2190 BNX2X_CHIP_MAX_OFST
2193 #define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST)
2194 #define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST)
2195 #define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST)
2196 #define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST)
2197 #define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST)
2199 #define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1)
2200 #define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H)
2202 static int bnx2x_test_registers(struct bnx2x *bp)
2204 int idx, i, rc = -ENODEV;
2205 u32 wr_val = 0, hw;
2206 int port = BP_PORT(bp);
2207 static const struct {
2208 u32 hw;
2209 u32 offset0;
2210 u32 offset1;
2211 u32 mask;
2212 } reg_tbl[] = {
2213 /* 0 */ { BNX2X_CHIP_MASK_ALL,
2214 BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
2215 { BNX2X_CHIP_MASK_ALL,
2216 DORQ_REG_DB_ADDR0, 4, 0xffffffff },
2217 { BNX2X_CHIP_MASK_E1X,
2218 HC_REG_AGG_INT_0, 4, 0x000003ff },
2219 { BNX2X_CHIP_MASK_ALL,
2220 PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
2221 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3,
2222 PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
2223 { BNX2X_CHIP_MASK_E3B0,
2224 PBF_REG_INIT_CRD_Q0, 4, 0x000007ff },
2225 { BNX2X_CHIP_MASK_ALL,
2226 PRS_REG_CID_PORT_0, 4, 0x00ffffff },
2227 { BNX2X_CHIP_MASK_ALL,
2228 PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
2229 { BNX2X_CHIP_MASK_ALL,
2230 PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
2231 { BNX2X_CHIP_MASK_ALL,
2232 PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
2233 /* 10 */ { BNX2X_CHIP_MASK_ALL,
2234 PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
2235 { BNX2X_CHIP_MASK_ALL,
2236 PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
2237 { BNX2X_CHIP_MASK_ALL,
2238 QM_REG_CONNNUM_0, 4, 0x000fffff },
2239 { BNX2X_CHIP_MASK_ALL,
2240 TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
2241 { BNX2X_CHIP_MASK_ALL,
2242 SRC_REG_KEYRSS0_0, 40, 0xffffffff },
2243 { BNX2X_CHIP_MASK_ALL,
2244 SRC_REG_KEYRSS0_7, 40, 0xffffffff },
2245 { BNX2X_CHIP_MASK_ALL,
2246 XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
2247 { BNX2X_CHIP_MASK_ALL,
2248 XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
2249 { BNX2X_CHIP_MASK_ALL,
2250 XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
2251 { BNX2X_CHIP_MASK_ALL,
2252 NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
2253 /* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
2254 NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
2255 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
2256 NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
2257 { BNX2X_CHIP_MASK_ALL,
2258 NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
2259 { BNX2X_CHIP_MASK_ALL,
2260 NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
2261 { BNX2X_CHIP_MASK_ALL,
2262 NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
2263 { BNX2X_CHIP_MASK_ALL,
2264 NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
2265 { BNX2X_CHIP_MASK_ALL,
2266 NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
2267 { BNX2X_CHIP_MASK_ALL,
2268 NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
2269 { BNX2X_CHIP_MASK_ALL,
2270 NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
2271 { BNX2X_CHIP_MASK_ALL,
2272 NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
2273 /* 30 */ { BNX2X_CHIP_MASK_ALL,
2274 NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
2275 { BNX2X_CHIP_MASK_ALL,
2276 NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
2277 { BNX2X_CHIP_MASK_ALL,
2278 NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
2279 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
2280 NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
2281 { BNX2X_CHIP_MASK_ALL,
2282 NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001},
2283 { BNX2X_CHIP_MASK_ALL,
2284 NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
2285 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
2286 NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
2287 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
2288 NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
2290 { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
2293 if (!bnx2x_is_nvm_accessible(bp)) {
2294 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2295 "cannot access eeprom when the interface is down\n");
2296 return rc;
2299 if (CHIP_IS_E1(bp))
2300 hw = BNX2X_CHIP_MASK_E1;
2301 else if (CHIP_IS_E1H(bp))
2302 hw = BNX2X_CHIP_MASK_E1H;
2303 else if (CHIP_IS_E2(bp))
2304 hw = BNX2X_CHIP_MASK_E2;
2305 else if (CHIP_IS_E3B0(bp))
2306 hw = BNX2X_CHIP_MASK_E3B0;
2307 else /* e3 A0 */
2308 hw = BNX2X_CHIP_MASK_E3;
2310 /* Repeat the test twice:
2311 * First by writing 0x00000000, second by writing 0xffffffff
2313 for (idx = 0; idx < 2; idx++) {
2315 switch (idx) {
2316 case 0:
2317 wr_val = 0;
2318 break;
2319 case 1:
2320 wr_val = 0xffffffff;
2321 break;
2324 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
2325 u32 offset, mask, save_val, val;
2326 if (!(hw & reg_tbl[i].hw))
2327 continue;
2329 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
2330 mask = reg_tbl[i].mask;
2332 save_val = REG_RD(bp, offset);
2334 REG_WR(bp, offset, wr_val & mask);
2336 val = REG_RD(bp, offset);
2338 /* Restore the original register's value */
2339 REG_WR(bp, offset, save_val);
2341 /* verify value is as expected */
2342 if ((val & mask) != (wr_val & mask)) {
2343 DP(BNX2X_MSG_ETHTOOL,
2344 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
2345 offset, val, wr_val, mask);
2346 goto test_reg_exit;
2351 rc = 0;
2353 test_reg_exit:
2354 return rc;
2357 static int bnx2x_test_memory(struct bnx2x *bp)
2359 int i, j, rc = -ENODEV;
2360 u32 val, index;
2361 static const struct {
2362 u32 offset;
2363 int size;
2364 } mem_tbl[] = {
2365 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
2366 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
2367 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
2368 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
2369 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
2370 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
2371 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
2373 { 0xffffffff, 0 }
2376 static const struct {
2377 char *name;
2378 u32 offset;
2379 u32 hw_mask[BNX2X_CHIP_MAX_OFST];
2380 } prty_tbl[] = {
2381 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS,
2382 {0x3ffc0, 0, 0, 0} },
2383 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS,
2384 {0x2, 0x2, 0, 0} },
2385 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS,
2386 {0, 0, 0, 0} },
2387 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS,
2388 {0x3ffc0, 0, 0, 0} },
2389 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS,
2390 {0x3ffc0, 0, 0, 0} },
2391 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS,
2392 {0x3ffc1, 0, 0, 0} },
2394 { NULL, 0xffffffff, {0, 0, 0, 0} }
2397 if (!bnx2x_is_nvm_accessible(bp)) {
2398 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2399 "cannot access eeprom when the interface is down\n");
2400 return rc;
2403 if (CHIP_IS_E1(bp))
2404 index = BNX2X_CHIP_E1_OFST;
2405 else if (CHIP_IS_E1H(bp))
2406 index = BNX2X_CHIP_E1H_OFST;
2407 else if (CHIP_IS_E2(bp))
2408 index = BNX2X_CHIP_E2_OFST;
2409 else /* e3 */
2410 index = BNX2X_CHIP_E3_OFST;
2412 /* pre-Check the parity status */
2413 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
2414 val = REG_RD(bp, prty_tbl[i].offset);
2415 if (val & ~(prty_tbl[i].hw_mask[index])) {
2416 DP(BNX2X_MSG_ETHTOOL,
2417 "%s is 0x%x\n", prty_tbl[i].name, val);
2418 goto test_mem_exit;
2422 /* Go through all the memories */
2423 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
2424 for (j = 0; j < mem_tbl[i].size; j++)
2425 REG_RD(bp, mem_tbl[i].offset + j*4);
2427 /* Check the parity status */
2428 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
2429 val = REG_RD(bp, prty_tbl[i].offset);
2430 if (val & ~(prty_tbl[i].hw_mask[index])) {
2431 DP(BNX2X_MSG_ETHTOOL,
2432 "%s is 0x%x\n", prty_tbl[i].name, val);
2433 goto test_mem_exit;
2437 rc = 0;
2439 test_mem_exit:
2440 return rc;
2443 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
2445 int cnt = 1400;
2447 if (link_up) {
2448 while (bnx2x_link_test(bp, is_serdes) && cnt--)
2449 msleep(20);
2451 if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
2452 DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n");
2454 cnt = 1400;
2455 while (!bp->link_vars.link_up && cnt--)
2456 msleep(20);
2458 if (cnt <= 0 && !bp->link_vars.link_up)
2459 DP(BNX2X_MSG_ETHTOOL,
2460 "Timeout waiting for link init\n");
2464 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
2466 unsigned int pkt_size, num_pkts, i;
2467 struct sk_buff *skb;
2468 unsigned char *packet;
2469 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
2470 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
2471 struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0];
2472 u16 tx_start_idx, tx_idx;
2473 u16 rx_start_idx, rx_idx;
2474 u16 pkt_prod, bd_prod;
2475 struct sw_tx_bd *tx_buf;
2476 struct eth_tx_start_bd *tx_start_bd;
2477 dma_addr_t mapping;
2478 union eth_rx_cqe *cqe;
2479 u8 cqe_fp_flags, cqe_fp_type;
2480 struct sw_rx_bd *rx_buf;
2481 u16 len;
2482 int rc = -ENODEV;
2483 u8 *data;
2484 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev,
2485 txdata->txq_index);
2487 /* check the loopback mode */
2488 switch (loopback_mode) {
2489 case BNX2X_PHY_LOOPBACK:
2490 if (bp->link_params.loopback_mode != LOOPBACK_XGXS) {
2491 DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n");
2492 return -EINVAL;
2494 break;
2495 case BNX2X_MAC_LOOPBACK:
2496 if (CHIP_IS_E3(bp)) {
2497 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
2498 if (bp->port.supported[cfg_idx] &
2499 (SUPPORTED_10000baseT_Full |
2500 SUPPORTED_20000baseMLD2_Full |
2501 SUPPORTED_20000baseKR2_Full))
2502 bp->link_params.loopback_mode = LOOPBACK_XMAC;
2503 else
2504 bp->link_params.loopback_mode = LOOPBACK_UMAC;
2505 } else
2506 bp->link_params.loopback_mode = LOOPBACK_BMAC;
2508 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2509 break;
2510 case BNX2X_EXT_LOOPBACK:
2511 if (bp->link_params.loopback_mode != LOOPBACK_EXT) {
2512 DP(BNX2X_MSG_ETHTOOL,
2513 "Can't configure external loopback\n");
2514 return -EINVAL;
2516 break;
2517 default:
2518 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
2519 return -EINVAL;
2522 /* prepare the loopback packet */
2523 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
2524 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
2525 skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
2526 if (!skb) {
2527 DP(BNX2X_MSG_ETHTOOL, "Can't allocate skb\n");
2528 rc = -ENOMEM;
2529 goto test_loopback_exit;
2531 packet = skb_put(skb, pkt_size);
2532 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
2533 eth_zero_addr(packet + ETH_ALEN);
2534 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
2535 for (i = ETH_HLEN; i < pkt_size; i++)
2536 packet[i] = (unsigned char) (i & 0xff);
2537 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2538 skb_headlen(skb), DMA_TO_DEVICE);
2539 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2540 rc = -ENOMEM;
2541 dev_kfree_skb(skb);
2542 DP(BNX2X_MSG_ETHTOOL, "Unable to map SKB\n");
2543 goto test_loopback_exit;
2546 /* send the loopback packet */
2547 num_pkts = 0;
2548 tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
2549 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
2551 netdev_tx_sent_queue(txq, skb->len);
2553 pkt_prod = txdata->tx_pkt_prod++;
2554 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2555 tx_buf->first_bd = txdata->tx_bd_prod;
2556 tx_buf->skb = skb;
2557 tx_buf->flags = 0;
2559 bd_prod = TX_BD(txdata->tx_bd_prod);
2560 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
2561 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2562 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2563 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
2564 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2565 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2566 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2567 SET_FLAG(tx_start_bd->general_data,
2568 ETH_TX_START_BD_HDR_NBDS,
2570 SET_FLAG(tx_start_bd->general_data,
2571 ETH_TX_START_BD_PARSE_NBDS,
2574 /* turn on parsing and get a BD */
2575 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2577 if (CHIP_IS_E1x(bp)) {
2578 u16 global_data = 0;
2579 struct eth_tx_parse_bd_e1x *pbd_e1x =
2580 &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
2581 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2582 SET_FLAG(global_data,
2583 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, UNICAST_ADDRESS);
2584 pbd_e1x->global_data = cpu_to_le16(global_data);
2585 } else {
2586 u32 parsing_data = 0;
2587 struct eth_tx_parse_bd_e2 *pbd_e2 =
2588 &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2589 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2590 SET_FLAG(parsing_data,
2591 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, UNICAST_ADDRESS);
2592 pbd_e2->parsing_data = cpu_to_le32(parsing_data);
2594 wmb();
2596 txdata->tx_db.data.prod += 2;
2597 /* make sure descriptor update is observed by the HW */
2598 wmb();
2599 DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
2601 mmiowb();
2602 barrier();
2604 num_pkts++;
2605 txdata->tx_bd_prod += 2; /* start + pbd */
2607 udelay(100);
2609 tx_idx = le16_to_cpu(*txdata->tx_cons_sb);
2610 if (tx_idx != tx_start_idx + num_pkts)
2611 goto test_loopback_exit;
2613 /* Unlike HC IGU won't generate an interrupt for status block
2614 * updates that have been performed while interrupts were
2615 * disabled.
2617 if (bp->common.int_block == INT_BLOCK_IGU) {
2618 /* Disable local BHes to prevent a dead-lock situation between
2619 * sch_direct_xmit() and bnx2x_run_loopback() (calling
2620 * bnx2x_tx_int()), as both are taking netif_tx_lock().
2622 local_bh_disable();
2623 bnx2x_tx_int(bp, txdata);
2624 local_bh_enable();
2627 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
2628 if (rx_idx != rx_start_idx + num_pkts)
2629 goto test_loopback_exit;
2631 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
2632 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2633 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
2634 if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
2635 goto test_loopback_rx_exit;
2637 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len);
2638 if (len != pkt_size)
2639 goto test_loopback_rx_exit;
2641 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
2642 dma_sync_single_for_cpu(&bp->pdev->dev,
2643 dma_unmap_addr(rx_buf, mapping),
2644 fp_rx->rx_buf_size, DMA_FROM_DEVICE);
2645 data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset;
2646 for (i = ETH_HLEN; i < pkt_size; i++)
2647 if (*(data + i) != (unsigned char) (i & 0xff))
2648 goto test_loopback_rx_exit;
2650 rc = 0;
2652 test_loopback_rx_exit:
2654 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
2655 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
2656 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
2657 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
2659 /* Update producers */
2660 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
2661 fp_rx->rx_sge_prod);
2663 test_loopback_exit:
2664 bp->link_params.loopback_mode = LOOPBACK_NONE;
2666 return rc;
2669 static int bnx2x_test_loopback(struct bnx2x *bp)
2671 int rc = 0, res;
2673 if (BP_NOMCP(bp))
2674 return rc;
2676 if (!netif_running(bp->dev))
2677 return BNX2X_LOOPBACK_FAILED;
2679 bnx2x_netif_stop(bp, 1);
2680 bnx2x_acquire_phy_lock(bp);
2682 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
2683 if (res) {
2684 DP(BNX2X_MSG_ETHTOOL, " PHY loopback failed (res %d)\n", res);
2685 rc |= BNX2X_PHY_LOOPBACK_FAILED;
2688 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
2689 if (res) {
2690 DP(BNX2X_MSG_ETHTOOL, " MAC loopback failed (res %d)\n", res);
2691 rc |= BNX2X_MAC_LOOPBACK_FAILED;
2694 bnx2x_release_phy_lock(bp);
2695 bnx2x_netif_start(bp);
2697 return rc;
2700 static int bnx2x_test_ext_loopback(struct bnx2x *bp)
2702 int rc;
2703 u8 is_serdes =
2704 (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
2706 if (BP_NOMCP(bp))
2707 return -ENODEV;
2709 if (!netif_running(bp->dev))
2710 return BNX2X_EXT_LOOPBACK_FAILED;
2712 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
2713 rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
2714 if (rc) {
2715 DP(BNX2X_MSG_ETHTOOL,
2716 "Can't perform self-test, nic_load (for external lb) failed\n");
2717 return -ENODEV;
2719 bnx2x_wait_for_link(bp, 1, is_serdes);
2721 bnx2x_netif_stop(bp, 1);
2723 rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK);
2724 if (rc)
2725 DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc);
2727 bnx2x_netif_start(bp);
2729 return rc;
2732 struct code_entry {
2733 u32 sram_start_addr;
2734 u32 code_attribute;
2735 #define CODE_IMAGE_TYPE_MASK 0xf0800003
2736 #define CODE_IMAGE_VNTAG_PROFILES_DATA 0xd0000003
2737 #define CODE_IMAGE_LENGTH_MASK 0x007ffffc
2738 #define CODE_IMAGE_TYPE_EXTENDED_DIR 0xe0000000
2739 u32 nvm_start_addr;
2742 #define CODE_ENTRY_MAX 16
2743 #define CODE_ENTRY_EXTENDED_DIR_IDX 15
2744 #define MAX_IMAGES_IN_EXTENDED_DIR 64
2745 #define NVRAM_DIR_OFFSET 0x14
2747 #define EXTENDED_DIR_EXISTS(code) \
2748 ((code & CODE_IMAGE_TYPE_MASK) == CODE_IMAGE_TYPE_EXTENDED_DIR && \
2749 (code & CODE_IMAGE_LENGTH_MASK) != 0)
2751 #define CRC32_RESIDUAL 0xdebb20e3
2752 #define CRC_BUFF_SIZE 256
2754 static int bnx2x_nvram_crc(struct bnx2x *bp,
2755 int offset,
2756 int size,
2757 u8 *buff)
2759 u32 crc = ~0;
2760 int rc = 0, done = 0;
2762 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2763 "NVRAM CRC from 0x%08x to 0x%08x\n", offset, offset + size);
2765 while (done < size) {
2766 int count = min_t(int, size - done, CRC_BUFF_SIZE);
2768 rc = bnx2x_nvram_read(bp, offset + done, buff, count);
2770 if (rc)
2771 return rc;
2773 crc = crc32_le(crc, buff, count);
2774 done += count;
2777 if (crc != CRC32_RESIDUAL)
2778 rc = -EINVAL;
2780 return rc;
2783 static int bnx2x_test_nvram_dir(struct bnx2x *bp,
2784 struct code_entry *entry,
2785 u8 *buff)
2787 size_t size = entry->code_attribute & CODE_IMAGE_LENGTH_MASK;
2788 u32 type = entry->code_attribute & CODE_IMAGE_TYPE_MASK;
2789 int rc;
2791 /* Zero-length images and AFEX profiles do not have CRC */
2792 if (size == 0 || type == CODE_IMAGE_VNTAG_PROFILES_DATA)
2793 return 0;
2795 rc = bnx2x_nvram_crc(bp, entry->nvm_start_addr, size, buff);
2796 if (rc)
2797 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2798 "image %x has failed crc test (rc %d)\n", type, rc);
2800 return rc;
2803 static int bnx2x_test_dir_entry(struct bnx2x *bp, u32 addr, u8 *buff)
2805 int rc;
2806 struct code_entry entry;
2808 rc = bnx2x_nvram_read32(bp, addr, (u32 *)&entry, sizeof(entry));
2809 if (rc)
2810 return rc;
2812 return bnx2x_test_nvram_dir(bp, &entry, buff);
2815 static int bnx2x_test_nvram_ext_dirs(struct bnx2x *bp, u8 *buff)
2817 u32 rc, cnt, dir_offset = NVRAM_DIR_OFFSET;
2818 struct code_entry entry;
2819 int i;
2821 rc = bnx2x_nvram_read32(bp,
2822 dir_offset +
2823 sizeof(entry) * CODE_ENTRY_EXTENDED_DIR_IDX,
2824 (u32 *)&entry, sizeof(entry));
2825 if (rc)
2826 return rc;
2828 if (!EXTENDED_DIR_EXISTS(entry.code_attribute))
2829 return 0;
2831 rc = bnx2x_nvram_read32(bp, entry.nvm_start_addr,
2832 &cnt, sizeof(u32));
2833 if (rc)
2834 return rc;
2836 dir_offset = entry.nvm_start_addr + 8;
2838 for (i = 0; i < cnt && i < MAX_IMAGES_IN_EXTENDED_DIR; i++) {
2839 rc = bnx2x_test_dir_entry(bp, dir_offset +
2840 sizeof(struct code_entry) * i,
2841 buff);
2842 if (rc)
2843 return rc;
2846 return 0;
2849 static int bnx2x_test_nvram_dirs(struct bnx2x *bp, u8 *buff)
2851 u32 rc, dir_offset = NVRAM_DIR_OFFSET;
2852 int i;
2854 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "NVRAM DIRS CRC test-set\n");
2856 for (i = 0; i < CODE_ENTRY_EXTENDED_DIR_IDX; i++) {
2857 rc = bnx2x_test_dir_entry(bp, dir_offset +
2858 sizeof(struct code_entry) * i,
2859 buff);
2860 if (rc)
2861 return rc;
2864 return bnx2x_test_nvram_ext_dirs(bp, buff);
2867 struct crc_pair {
2868 int offset;
2869 int size;
2872 static int bnx2x_test_nvram_tbl(struct bnx2x *bp,
2873 const struct crc_pair *nvram_tbl, u8 *buf)
2875 int i;
2877 for (i = 0; nvram_tbl[i].size; i++) {
2878 int rc = bnx2x_nvram_crc(bp, nvram_tbl[i].offset,
2879 nvram_tbl[i].size, buf);
2880 if (rc) {
2881 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2882 "nvram_tbl[%d] has failed crc test (rc %d)\n",
2883 i, rc);
2884 return rc;
2888 return 0;
2891 static int bnx2x_test_nvram(struct bnx2x *bp)
2893 static const struct crc_pair nvram_tbl[] = {
2894 { 0, 0x14 }, /* bootstrap */
2895 { 0x14, 0xec }, /* dir */
2896 { 0x100, 0x350 }, /* manuf_info */
2897 { 0x450, 0xf0 }, /* feature_info */
2898 { 0x640, 0x64 }, /* upgrade_key_info */
2899 { 0x708, 0x70 }, /* manuf_key_info */
2900 { 0, 0 }
2902 static const struct crc_pair nvram_tbl2[] = {
2903 { 0x7e8, 0x350 }, /* manuf_info2 */
2904 { 0xb38, 0xf0 }, /* feature_info */
2905 { 0, 0 }
2908 u8 *buf;
2909 int rc;
2910 u32 magic;
2912 if (BP_NOMCP(bp))
2913 return 0;
2915 buf = kmalloc(CRC_BUFF_SIZE, GFP_KERNEL);
2916 if (!buf) {
2917 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n");
2918 rc = -ENOMEM;
2919 goto test_nvram_exit;
2922 rc = bnx2x_nvram_read32(bp, 0, &magic, sizeof(magic));
2923 if (rc) {
2924 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2925 "magic value read (rc %d)\n", rc);
2926 goto test_nvram_exit;
2929 if (magic != 0x669955aa) {
2930 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2931 "wrong magic value (0x%08x)\n", magic);
2932 rc = -ENODEV;
2933 goto test_nvram_exit;
2936 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "Port 0 CRC test-set\n");
2937 rc = bnx2x_test_nvram_tbl(bp, nvram_tbl, buf);
2938 if (rc)
2939 goto test_nvram_exit;
2941 if (!CHIP_IS_E1x(bp) && !CHIP_IS_57811xx(bp)) {
2942 u32 hide = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
2943 SHARED_HW_CFG_HIDE_PORT1;
2945 if (!hide) {
2946 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2947 "Port 1 CRC test-set\n");
2948 rc = bnx2x_test_nvram_tbl(bp, nvram_tbl2, buf);
2949 if (rc)
2950 goto test_nvram_exit;
2954 rc = bnx2x_test_nvram_dirs(bp, buf);
2956 test_nvram_exit:
2957 kfree(buf);
2958 return rc;
2961 /* Send an EMPTY ramrod on the first queue */
2962 static int bnx2x_test_intr(struct bnx2x *bp)
2964 struct bnx2x_queue_state_params params = {NULL};
2966 if (!netif_running(bp->dev)) {
2967 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2968 "cannot access eeprom when the interface is down\n");
2969 return -ENODEV;
2972 params.q_obj = &bp->sp_objs->q_obj;
2973 params.cmd = BNX2X_Q_CMD_EMPTY;
2975 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2977 return bnx2x_queue_state_change(bp, &params);
2980 static void bnx2x_self_test(struct net_device *dev,
2981 struct ethtool_test *etest, u64 *buf)
2983 struct bnx2x *bp = netdev_priv(dev);
2984 u8 is_serdes, link_up;
2985 int rc, cnt = 0;
2987 if (pci_num_vf(bp->pdev)) {
2988 DP(BNX2X_MSG_IOV,
2989 "VFs are enabled, can not perform self test\n");
2990 return;
2993 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2994 netdev_err(bp->dev,
2995 "Handling parity error recovery. Try again later\n");
2996 etest->flags |= ETH_TEST_FL_FAILED;
2997 return;
3000 DP(BNX2X_MSG_ETHTOOL,
3001 "Self-test command parameters: offline = %d, external_lb = %d\n",
3002 (etest->flags & ETH_TEST_FL_OFFLINE),
3003 (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2);
3005 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
3007 if (bnx2x_test_nvram(bp) != 0) {
3008 if (!IS_MF(bp))
3009 buf[4] = 1;
3010 else
3011 buf[0] = 1;
3012 etest->flags |= ETH_TEST_FL_FAILED;
3015 if (!netif_running(dev)) {
3016 DP(BNX2X_MSG_ETHTOOL, "Interface is down\n");
3017 return;
3020 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
3021 link_up = bp->link_vars.link_up;
3022 /* offline tests are not supported in MF mode */
3023 if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
3024 int port = BP_PORT(bp);
3025 u32 val;
3027 /* save current value of input enable for TX port IF */
3028 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
3029 /* disable input for TX port IF */
3030 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
3032 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
3033 rc = bnx2x_nic_load(bp, LOAD_DIAG);
3034 if (rc) {
3035 etest->flags |= ETH_TEST_FL_FAILED;
3036 DP(BNX2X_MSG_ETHTOOL,
3037 "Can't perform self-test, nic_load (for offline) failed\n");
3038 return;
3041 /* wait until link state is restored */
3042 bnx2x_wait_for_link(bp, 1, is_serdes);
3044 if (bnx2x_test_registers(bp) != 0) {
3045 buf[0] = 1;
3046 etest->flags |= ETH_TEST_FL_FAILED;
3048 if (bnx2x_test_memory(bp) != 0) {
3049 buf[1] = 1;
3050 etest->flags |= ETH_TEST_FL_FAILED;
3053 buf[2] = bnx2x_test_loopback(bp); /* internal LB */
3054 if (buf[2] != 0)
3055 etest->flags |= ETH_TEST_FL_FAILED;
3057 if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) {
3058 buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */
3059 if (buf[3] != 0)
3060 etest->flags |= ETH_TEST_FL_FAILED;
3061 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
3064 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
3066 /* restore input for TX port IF */
3067 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
3068 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
3069 if (rc) {
3070 etest->flags |= ETH_TEST_FL_FAILED;
3071 DP(BNX2X_MSG_ETHTOOL,
3072 "Can't perform self-test, nic_load (for online) failed\n");
3073 return;
3075 /* wait until link state is restored */
3076 bnx2x_wait_for_link(bp, link_up, is_serdes);
3079 if (bnx2x_test_intr(bp) != 0) {
3080 if (!IS_MF(bp))
3081 buf[5] = 1;
3082 else
3083 buf[1] = 1;
3084 etest->flags |= ETH_TEST_FL_FAILED;
3087 if (link_up) {
3088 cnt = 100;
3089 while (bnx2x_link_test(bp, is_serdes) && --cnt)
3090 msleep(20);
3093 if (!cnt) {
3094 if (!IS_MF(bp))
3095 buf[6] = 1;
3096 else
3097 buf[2] = 1;
3098 etest->flags |= ETH_TEST_FL_FAILED;
3102 #define IS_PORT_STAT(i) (bnx2x_stats_arr[i].is_port_stat)
3103 #define HIDE_PORT_STAT(bp) IS_VF(bp)
3105 /* ethtool statistics are displayed for all regular ethernet queues and the
3106 * fcoe L2 queue if not disabled
3108 static int bnx2x_num_stat_queues(struct bnx2x *bp)
3110 return BNX2X_NUM_ETH_QUEUES(bp);
3113 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
3115 struct bnx2x *bp = netdev_priv(dev);
3116 int i, num_strings = 0;
3118 switch (stringset) {
3119 case ETH_SS_STATS:
3120 if (is_multi(bp)) {
3121 num_strings = bnx2x_num_stat_queues(bp) *
3122 BNX2X_NUM_Q_STATS;
3123 } else
3124 num_strings = 0;
3125 if (HIDE_PORT_STAT(bp)) {
3126 for (i = 0; i < BNX2X_NUM_STATS; i++)
3127 if (!IS_PORT_STAT(i))
3128 num_strings++;
3129 } else
3130 num_strings += BNX2X_NUM_STATS;
3132 return num_strings;
3134 case ETH_SS_TEST:
3135 return BNX2X_NUM_TESTS(bp);
3137 case ETH_SS_PRIV_FLAGS:
3138 return BNX2X_PRI_FLAG_LEN;
3140 default:
3141 return -EINVAL;
3145 static u32 bnx2x_get_private_flags(struct net_device *dev)
3147 struct bnx2x *bp = netdev_priv(dev);
3148 u32 flags = 0;
3150 flags |= (!(bp->flags & NO_ISCSI_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_ISCSI;
3151 flags |= (!(bp->flags & NO_FCOE_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_FCOE;
3152 flags |= (!!IS_MF_STORAGE_ONLY(bp)) << BNX2X_PRI_FLAG_STORAGE;
3154 return flags;
3157 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
3159 struct bnx2x *bp = netdev_priv(dev);
3160 int i, j, k, start;
3161 char queue_name[MAX_QUEUE_NAME_LEN+1];
3163 switch (stringset) {
3164 case ETH_SS_STATS:
3165 k = 0;
3166 if (is_multi(bp)) {
3167 for_each_eth_queue(bp, i) {
3168 memset(queue_name, 0, sizeof(queue_name));
3169 snprintf(queue_name, sizeof(queue_name),
3170 "%d", i);
3171 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
3172 snprintf(buf + (k + j)*ETH_GSTRING_LEN,
3173 ETH_GSTRING_LEN,
3174 bnx2x_q_stats_arr[j].string,
3175 queue_name);
3176 k += BNX2X_NUM_Q_STATS;
3180 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
3181 if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
3182 continue;
3183 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
3184 bnx2x_stats_arr[i].string);
3185 j++;
3188 break;
3190 case ETH_SS_TEST:
3191 /* First 4 tests cannot be done in MF mode */
3192 if (!IS_MF(bp))
3193 start = 0;
3194 else
3195 start = 4;
3196 memcpy(buf, bnx2x_tests_str_arr + start,
3197 ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
3198 break;
3200 case ETH_SS_PRIV_FLAGS:
3201 memcpy(buf, bnx2x_private_arr,
3202 ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN);
3203 break;
3207 static void bnx2x_get_ethtool_stats(struct net_device *dev,
3208 struct ethtool_stats *stats, u64 *buf)
3210 struct bnx2x *bp = netdev_priv(dev);
3211 u32 *hw_stats, *offset;
3212 int i, j, k = 0;
3214 if (is_multi(bp)) {
3215 for_each_eth_queue(bp, i) {
3216 hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats;
3217 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
3218 if (bnx2x_q_stats_arr[j].size == 0) {
3219 /* skip this counter */
3220 buf[k + j] = 0;
3221 continue;
3223 offset = (hw_stats +
3224 bnx2x_q_stats_arr[j].offset);
3225 if (bnx2x_q_stats_arr[j].size == 4) {
3226 /* 4-byte counter */
3227 buf[k + j] = (u64) *offset;
3228 continue;
3230 /* 8-byte counter */
3231 buf[k + j] = HILO_U64(*offset, *(offset + 1));
3233 k += BNX2X_NUM_Q_STATS;
3237 hw_stats = (u32 *)&bp->eth_stats;
3238 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
3239 if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
3240 continue;
3241 if (bnx2x_stats_arr[i].size == 0) {
3242 /* skip this counter */
3243 buf[k + j] = 0;
3244 j++;
3245 continue;
3247 offset = (hw_stats + bnx2x_stats_arr[i].offset);
3248 if (bnx2x_stats_arr[i].size == 4) {
3249 /* 4-byte counter */
3250 buf[k + j] = (u64) *offset;
3251 j++;
3252 continue;
3254 /* 8-byte counter */
3255 buf[k + j] = HILO_U64(*offset, *(offset + 1));
3256 j++;
3260 static int bnx2x_set_phys_id(struct net_device *dev,
3261 enum ethtool_phys_id_state state)
3263 struct bnx2x *bp = netdev_priv(dev);
3265 if (!bnx2x_is_nvm_accessible(bp)) {
3266 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
3267 "cannot access eeprom when the interface is down\n");
3268 return -EAGAIN;
3271 switch (state) {
3272 case ETHTOOL_ID_ACTIVE:
3273 return 1; /* cycle on/off once per second */
3275 case ETHTOOL_ID_ON:
3276 bnx2x_acquire_phy_lock(bp);
3277 bnx2x_set_led(&bp->link_params, &bp->link_vars,
3278 LED_MODE_ON, SPEED_1000);
3279 bnx2x_release_phy_lock(bp);
3280 break;
3282 case ETHTOOL_ID_OFF:
3283 bnx2x_acquire_phy_lock(bp);
3284 bnx2x_set_led(&bp->link_params, &bp->link_vars,
3285 LED_MODE_FRONT_PANEL_OFF, 0);
3286 bnx2x_release_phy_lock(bp);
3287 break;
3289 case ETHTOOL_ID_INACTIVE:
3290 bnx2x_acquire_phy_lock(bp);
3291 bnx2x_set_led(&bp->link_params, &bp->link_vars,
3292 LED_MODE_OPER,
3293 bp->link_vars.line_speed);
3294 bnx2x_release_phy_lock(bp);
3297 return 0;
3300 static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
3302 switch (info->flow_type) {
3303 case TCP_V4_FLOW:
3304 case TCP_V6_FLOW:
3305 info->data = RXH_IP_SRC | RXH_IP_DST |
3306 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3307 break;
3308 case UDP_V4_FLOW:
3309 if (bp->rss_conf_obj.udp_rss_v4)
3310 info->data = RXH_IP_SRC | RXH_IP_DST |
3311 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3312 else
3313 info->data = RXH_IP_SRC | RXH_IP_DST;
3314 break;
3315 case UDP_V6_FLOW:
3316 if (bp->rss_conf_obj.udp_rss_v6)
3317 info->data = RXH_IP_SRC | RXH_IP_DST |
3318 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3319 else
3320 info->data = RXH_IP_SRC | RXH_IP_DST;
3321 break;
3322 case IPV4_FLOW:
3323 case IPV6_FLOW:
3324 info->data = RXH_IP_SRC | RXH_IP_DST;
3325 break;
3326 default:
3327 info->data = 0;
3328 break;
3331 return 0;
3334 static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
3335 u32 *rules __always_unused)
3337 struct bnx2x *bp = netdev_priv(dev);
3339 switch (info->cmd) {
3340 case ETHTOOL_GRXRINGS:
3341 info->data = BNX2X_NUM_ETH_QUEUES(bp);
3342 return 0;
3343 case ETHTOOL_GRXFH:
3344 return bnx2x_get_rss_flags(bp, info);
3345 default:
3346 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
3347 return -EOPNOTSUPP;
3351 static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
3353 int udp_rss_requested;
3355 DP(BNX2X_MSG_ETHTOOL,
3356 "Set rss flags command parameters: flow type = %d, data = %llu\n",
3357 info->flow_type, info->data);
3359 switch (info->flow_type) {
3360 case TCP_V4_FLOW:
3361 case TCP_V6_FLOW:
3362 /* For TCP only 4-tupple hash is supported */
3363 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
3364 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
3365 DP(BNX2X_MSG_ETHTOOL,
3366 "Command parameters not supported\n");
3367 return -EINVAL;
3369 return 0;
3371 case UDP_V4_FLOW:
3372 case UDP_V6_FLOW:
3373 /* For UDP either 2-tupple hash or 4-tupple hash is supported */
3374 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
3375 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3376 udp_rss_requested = 1;
3377 else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
3378 udp_rss_requested = 0;
3379 else
3380 return -EINVAL;
3382 if (CHIP_IS_E1x(bp) && udp_rss_requested) {
3383 DP(BNX2X_MSG_ETHTOOL,
3384 "57710, 57711 boards don't support RSS according to UDP 4-tuple\n");
3385 return -EINVAL;
3388 if ((info->flow_type == UDP_V4_FLOW) &&
3389 (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
3390 bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
3391 DP(BNX2X_MSG_ETHTOOL,
3392 "rss re-configured, UDP 4-tupple %s\n",
3393 udp_rss_requested ? "enabled" : "disabled");
3394 if (bp->state == BNX2X_STATE_OPEN)
3395 return bnx2x_rss(bp, &bp->rss_conf_obj, false,
3396 true);
3397 } else if ((info->flow_type == UDP_V6_FLOW) &&
3398 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
3399 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
3400 DP(BNX2X_MSG_ETHTOOL,
3401 "rss re-configured, UDP 4-tupple %s\n",
3402 udp_rss_requested ? "enabled" : "disabled");
3403 if (bp->state == BNX2X_STATE_OPEN)
3404 return bnx2x_rss(bp, &bp->rss_conf_obj, false,
3405 true);
3407 return 0;
3409 case IPV4_FLOW:
3410 case IPV6_FLOW:
3411 /* For IP only 2-tupple hash is supported */
3412 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
3413 DP(BNX2X_MSG_ETHTOOL,
3414 "Command parameters not supported\n");
3415 return -EINVAL;
3417 return 0;
3419 case SCTP_V4_FLOW:
3420 case AH_ESP_V4_FLOW:
3421 case AH_V4_FLOW:
3422 case ESP_V4_FLOW:
3423 case SCTP_V6_FLOW:
3424 case AH_ESP_V6_FLOW:
3425 case AH_V6_FLOW:
3426 case ESP_V6_FLOW:
3427 case IP_USER_FLOW:
3428 case ETHER_FLOW:
3429 /* RSS is not supported for these protocols */
3430 if (info->data) {
3431 DP(BNX2X_MSG_ETHTOOL,
3432 "Command parameters not supported\n");
3433 return -EINVAL;
3435 return 0;
3437 default:
3438 return -EINVAL;
3442 static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
3444 struct bnx2x *bp = netdev_priv(dev);
3446 switch (info->cmd) {
3447 case ETHTOOL_SRXFH:
3448 return bnx2x_set_rss_flags(bp, info);
3449 default:
3450 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
3451 return -EOPNOTSUPP;
3455 static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
3457 return T_ETH_INDIRECTION_TABLE_SIZE;
3460 static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
3461 u8 *hfunc)
3463 struct bnx2x *bp = netdev_priv(dev);
3464 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
3465 size_t i;
3467 if (hfunc)
3468 *hfunc = ETH_RSS_HASH_TOP;
3469 if (!indir)
3470 return 0;
3472 /* Get the current configuration of the RSS indirection table */
3473 bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
3476 * We can't use a memcpy() as an internal storage of an
3477 * indirection table is a u8 array while indir->ring_index
3478 * points to an array of u32.
3480 * Indirection table contains the FW Client IDs, so we need to
3481 * align the returned table to the Client ID of the leading RSS
3482 * queue.
3484 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
3485 indir[i] = ind_table[i] - bp->fp->cl_id;
3487 return 0;
3490 static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
3491 const u8 *key, const u8 hfunc)
3493 struct bnx2x *bp = netdev_priv(dev);
3494 size_t i;
3496 /* We require at least one supported parameter to be changed and no
3497 * change in any of the unsupported parameters
3499 if (key ||
3500 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
3501 return -EOPNOTSUPP;
3503 if (!indir)
3504 return 0;
3506 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
3508 * The same as in bnx2x_get_rxfh: we can't use a memcpy()
3509 * as an internal storage of an indirection table is a u8 array
3510 * while indir->ring_index points to an array of u32.
3512 * Indirection table contains the FW Client IDs, so we need to
3513 * align the received table to the Client ID of the leading RSS
3514 * queue
3516 bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
3519 if (bp->state == BNX2X_STATE_OPEN)
3520 return bnx2x_config_rss_eth(bp, false);
3522 return 0;
3526 * bnx2x_get_channels - gets the number of RSS queues.
3528 * @dev: net device
3529 * @channels: returns the number of max / current queues
3531 static void bnx2x_get_channels(struct net_device *dev,
3532 struct ethtool_channels *channels)
3534 struct bnx2x *bp = netdev_priv(dev);
3536 channels->max_combined = BNX2X_MAX_RSS_COUNT(bp);
3537 channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp);
3541 * bnx2x_change_num_queues - change the number of RSS queues.
3543 * @bp: bnx2x private structure
3545 * Re-configure interrupt mode to get the new number of MSI-X
3546 * vectors and re-add NAPI objects.
3548 static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
3550 bnx2x_disable_msi(bp);
3551 bp->num_ethernet_queues = num_rss;
3552 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
3553 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
3554 bnx2x_set_int_mode(bp);
3558 * bnx2x_set_channels - sets the number of RSS queues.
3560 * @dev: net device
3561 * @channels: includes the number of queues requested
3563 static int bnx2x_set_channels(struct net_device *dev,
3564 struct ethtool_channels *channels)
3566 struct bnx2x *bp = netdev_priv(dev);
3568 DP(BNX2X_MSG_ETHTOOL,
3569 "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
3570 channels->rx_count, channels->tx_count, channels->other_count,
3571 channels->combined_count);
3573 if (pci_num_vf(bp->pdev)) {
3574 DP(BNX2X_MSG_IOV, "VFs are enabled, can not set channels\n");
3575 return -EPERM;
3578 /* We don't support separate rx / tx channels.
3579 * We don't allow setting 'other' channels.
3581 if (channels->rx_count || channels->tx_count || channels->other_count
3582 || (channels->combined_count == 0) ||
3583 (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) {
3584 DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n");
3585 return -EINVAL;
3588 /* Check if there was a change in the active parameters */
3589 if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) {
3590 DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n");
3591 return 0;
3594 /* Set the requested number of queues in bp context.
3595 * Note that the actual number of queues created during load may be
3596 * less than requested if memory is low.
3598 if (unlikely(!netif_running(dev))) {
3599 bnx2x_change_num_queues(bp, channels->combined_count);
3600 return 0;
3602 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
3603 bnx2x_change_num_queues(bp, channels->combined_count);
3604 return bnx2x_nic_load(bp, LOAD_NORMAL);
3607 static int bnx2x_get_ts_info(struct net_device *dev,
3608 struct ethtool_ts_info *info)
3610 struct bnx2x *bp = netdev_priv(dev);
3612 if (bp->flags & PTP_SUPPORTED) {
3613 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
3614 SOF_TIMESTAMPING_RX_SOFTWARE |
3615 SOF_TIMESTAMPING_SOFTWARE |
3616 SOF_TIMESTAMPING_TX_HARDWARE |
3617 SOF_TIMESTAMPING_RX_HARDWARE |
3618 SOF_TIMESTAMPING_RAW_HARDWARE;
3620 if (bp->ptp_clock)
3621 info->phc_index = ptp_clock_index(bp->ptp_clock);
3622 else
3623 info->phc_index = -1;
3625 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
3626 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
3627 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
3628 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
3630 info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON);
3632 return 0;
3635 return ethtool_op_get_ts_info(dev, info);
3638 static const struct ethtool_ops bnx2x_ethtool_ops = {
3639 .get_drvinfo = bnx2x_get_drvinfo,
3640 .get_regs_len = bnx2x_get_regs_len,
3641 .get_regs = bnx2x_get_regs,
3642 .get_dump_flag = bnx2x_get_dump_flag,
3643 .get_dump_data = bnx2x_get_dump_data,
3644 .set_dump = bnx2x_set_dump,
3645 .get_wol = bnx2x_get_wol,
3646 .set_wol = bnx2x_set_wol,
3647 .get_msglevel = bnx2x_get_msglevel,
3648 .set_msglevel = bnx2x_set_msglevel,
3649 .nway_reset = bnx2x_nway_reset,
3650 .get_link = bnx2x_get_link,
3651 .get_eeprom_len = bnx2x_get_eeprom_len,
3652 .get_eeprom = bnx2x_get_eeprom,
3653 .set_eeprom = bnx2x_set_eeprom,
3654 .get_coalesce = bnx2x_get_coalesce,
3655 .set_coalesce = bnx2x_set_coalesce,
3656 .get_ringparam = bnx2x_get_ringparam,
3657 .set_ringparam = bnx2x_set_ringparam,
3658 .get_pauseparam = bnx2x_get_pauseparam,
3659 .set_pauseparam = bnx2x_set_pauseparam,
3660 .self_test = bnx2x_self_test,
3661 .get_sset_count = bnx2x_get_sset_count,
3662 .get_priv_flags = bnx2x_get_private_flags,
3663 .get_strings = bnx2x_get_strings,
3664 .set_phys_id = bnx2x_set_phys_id,
3665 .get_ethtool_stats = bnx2x_get_ethtool_stats,
3666 .get_rxnfc = bnx2x_get_rxnfc,
3667 .set_rxnfc = bnx2x_set_rxnfc,
3668 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
3669 .get_rxfh = bnx2x_get_rxfh,
3670 .set_rxfh = bnx2x_set_rxfh,
3671 .get_channels = bnx2x_get_channels,
3672 .set_channels = bnx2x_set_channels,
3673 .get_module_info = bnx2x_get_module_info,
3674 .get_module_eeprom = bnx2x_get_module_eeprom,
3675 .get_eee = bnx2x_get_eee,
3676 .set_eee = bnx2x_set_eee,
3677 .get_ts_info = bnx2x_get_ts_info,
3678 .get_link_ksettings = bnx2x_get_link_ksettings,
3679 .set_link_ksettings = bnx2x_set_link_ksettings,
3682 static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
3683 .get_drvinfo = bnx2x_get_drvinfo,
3684 .get_msglevel = bnx2x_get_msglevel,
3685 .set_msglevel = bnx2x_set_msglevel,
3686 .get_link = bnx2x_get_link,
3687 .get_coalesce = bnx2x_get_coalesce,
3688 .get_ringparam = bnx2x_get_ringparam,
3689 .set_ringparam = bnx2x_set_ringparam,
3690 .get_sset_count = bnx2x_get_sset_count,
3691 .get_strings = bnx2x_get_strings,
3692 .get_ethtool_stats = bnx2x_get_ethtool_stats,
3693 .get_rxnfc = bnx2x_get_rxnfc,
3694 .set_rxnfc = bnx2x_set_rxnfc,
3695 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
3696 .get_rxfh = bnx2x_get_rxfh,
3697 .set_rxfh = bnx2x_set_rxfh,
3698 .get_channels = bnx2x_get_channels,
3699 .set_channels = bnx2x_set_channels,
3700 .get_link_ksettings = bnx2x_get_vf_link_ksettings,
3703 void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
3705 netdev->ethtool_ops = (IS_PF(bp)) ?
3706 &bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops;