1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2015 Chelsio Communications. All rights reserved.
6 #include <linux/firmware.h>
7 #include <linux/mdio.h>
12 #include "cxgb4_cudbg.h"
13 #include "cxgb4_filter.h"
14 #include "cxgb4_tc_flower.h"
16 #define EEPROM_MAGIC 0x38E2F10C
18 static u32
get_msglevel(struct net_device
*dev
)
20 return netdev2adap(dev
)->msg_enable
;
23 static void set_msglevel(struct net_device
*dev
, u32 val
)
25 netdev2adap(dev
)->msg_enable
= val
;
28 enum cxgb4_ethtool_tests
{
29 CXGB4_ETHTOOL_LB_TEST
,
30 CXGB4_ETHTOOL_MAX_TEST
,
33 static const char cxgb4_selftest_strings
[CXGB4_ETHTOOL_MAX_TEST
][ETH_GSTRING_LEN
] = {
34 "Loop back test (offline)",
37 static const char * const flash_region_strings
[] = {
45 static const char stats_strings
[][ETH_GSTRING_LEN
] = {
48 "tx_broadcast_frames ",
49 "tx_multicast_frames ",
54 "tx_frames_65_to_127 ",
55 "tx_frames_128_to_255 ",
56 "tx_frames_256_to_511 ",
57 "tx_frames_512_to_1023 ",
58 "tx_frames_1024_to_1518 ",
59 "tx_frames_1519_to_max ",
74 "rx_broadcast_frames ",
75 "rx_multicast_frames ",
78 "rx_frames_too_long ",
86 "rx_frames_65_to_127 ",
87 "rx_frames_128_to_255 ",
88 "rx_frames_256_to_511 ",
89 "rx_frames_512_to_1023 ",
90 "rx_frames_1024_to_1518 ",
91 "rx_frames_1519_to_max ",
103 "rx_bg0_frames_dropped ",
104 "rx_bg1_frames_dropped ",
105 "rx_bg2_frames_dropped ",
106 "rx_bg3_frames_dropped ",
107 "rx_bg0_frames_trunc ",
108 "rx_bg1_frames_trunc ",
109 "rx_bg2_frames_trunc ",
110 "rx_bg3_frames_trunc ",
120 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
121 "tx_tls_encrypted_packets",
122 "tx_tls_encrypted_bytes ",
125 "tx_tls_skip_no_sync_data",
126 "tx_tls_drop_no_sync_data",
127 "tx_tls_drop_bypass_req ",
131 static char adapter_stats_strings
[][ETH_GSTRING_LEN
] = {
135 "write_coal_success ",
139 static char loopback_stats_strings
[][ETH_GSTRING_LEN
] = {
140 "-------Loopback----------- ",
149 "frames_128_to_255 ",
150 "frames_256_to_511 ",
151 "frames_512_to_1023 ",
152 "frames_1024_to_1518 ",
153 "frames_1519_to_max ",
155 "bg0_frames_dropped ",
156 "bg1_frames_dropped ",
157 "bg2_frames_dropped ",
158 "bg3_frames_dropped ",
165 static const char cxgb4_priv_flags_strings
[][ETH_GSTRING_LEN
] = {
166 [PRIV_FLAG_PORT_TX_VM_BIT
] = "port_tx_vm_wr",
169 static int get_sset_count(struct net_device
*dev
, int sset
)
173 return ARRAY_SIZE(stats_strings
) +
174 ARRAY_SIZE(adapter_stats_strings
) +
175 ARRAY_SIZE(loopback_stats_strings
);
176 case ETH_SS_PRIV_FLAGS
:
177 return ARRAY_SIZE(cxgb4_priv_flags_strings
);
179 return ARRAY_SIZE(cxgb4_selftest_strings
);
185 static int get_regs_len(struct net_device
*dev
)
187 struct adapter
*adap
= netdev2adap(dev
);
189 return t4_get_regs_len(adap
);
192 static int get_eeprom_len(struct net_device
*dev
)
197 static void get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
199 struct adapter
*adapter
= netdev2adap(dev
);
202 strlcpy(info
->driver
, cxgb4_driver_name
, sizeof(info
->driver
));
203 strlcpy(info
->bus_info
, pci_name(adapter
->pdev
),
204 sizeof(info
->bus_info
));
205 info
->regdump_len
= get_regs_len(dev
);
207 if (adapter
->params
.fw_vers
)
208 snprintf(info
->fw_version
, sizeof(info
->fw_version
),
209 "%u.%u.%u.%u, TP %u.%u.%u.%u",
210 FW_HDR_FW_VER_MAJOR_G(adapter
->params
.fw_vers
),
211 FW_HDR_FW_VER_MINOR_G(adapter
->params
.fw_vers
),
212 FW_HDR_FW_VER_MICRO_G(adapter
->params
.fw_vers
),
213 FW_HDR_FW_VER_BUILD_G(adapter
->params
.fw_vers
),
214 FW_HDR_FW_VER_MAJOR_G(adapter
->params
.tp_vers
),
215 FW_HDR_FW_VER_MINOR_G(adapter
->params
.tp_vers
),
216 FW_HDR_FW_VER_MICRO_G(adapter
->params
.tp_vers
),
217 FW_HDR_FW_VER_BUILD_G(adapter
->params
.tp_vers
));
219 if (!t4_get_exprom_version(adapter
, &exprom_vers
))
220 snprintf(info
->erom_version
, sizeof(info
->erom_version
),
222 FW_HDR_FW_VER_MAJOR_G(exprom_vers
),
223 FW_HDR_FW_VER_MINOR_G(exprom_vers
),
224 FW_HDR_FW_VER_MICRO_G(exprom_vers
),
225 FW_HDR_FW_VER_BUILD_G(exprom_vers
));
226 info
->n_priv_flags
= ARRAY_SIZE(cxgb4_priv_flags_strings
);
229 static void get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
231 if (stringset
== ETH_SS_STATS
) {
232 memcpy(data
, stats_strings
, sizeof(stats_strings
));
233 data
+= sizeof(stats_strings
);
234 memcpy(data
, adapter_stats_strings
,
235 sizeof(adapter_stats_strings
));
236 data
+= sizeof(adapter_stats_strings
);
237 memcpy(data
, loopback_stats_strings
,
238 sizeof(loopback_stats_strings
));
239 } else if (stringset
== ETH_SS_PRIV_FLAGS
) {
240 memcpy(data
, cxgb4_priv_flags_strings
,
241 sizeof(cxgb4_priv_flags_strings
));
242 } else if (stringset
== ETH_SS_TEST
) {
243 memcpy(data
, cxgb4_selftest_strings
,
244 sizeof(cxgb4_selftest_strings
));
248 /* port stats maintained per queue of the port. They should be in the same
249 * order as in stats_strings above.
251 struct queue_port_stats
{
260 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
261 u64 tx_tls_encrypted_packets
;
262 u64 tx_tls_encrypted_bytes
;
265 u64 tx_tls_skip_no_sync_data
;
266 u64 tx_tls_drop_no_sync_data
;
267 u64 tx_tls_drop_bypass_req
;
271 struct adapter_stats
{
279 static void collect_sge_port_stats(const struct adapter
*adap
,
280 const struct port_info
*p
,
281 struct queue_port_stats
*s
)
283 const struct sge_eth_txq
*tx
= &adap
->sge
.ethtxq
[p
->first_qset
];
284 const struct sge_eth_rxq
*rx
= &adap
->sge
.ethrxq
[p
->first_qset
];
285 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
286 const struct ch_ktls_port_stats_debug
*ktls_stats
;
288 struct sge_eohw_txq
*eohw_tx
;
291 memset(s
, 0, sizeof(*s
));
292 for (i
= 0; i
< p
->nqsets
; i
++, rx
++, tx
++) {
295 s
->tx_csum
+= tx
->tx_cso
;
296 s
->rx_csum
+= rx
->stats
.rx_cso
;
297 s
->vlan_ex
+= rx
->stats
.vlan_ex
;
298 s
->vlan_ins
+= tx
->vlan_ins
;
299 s
->gro_pkts
+= rx
->stats
.lro_pkts
;
300 s
->gro_merged
+= rx
->stats
.lro_merged
;
303 if (adap
->sge
.eohw_txq
) {
304 eohw_tx
= &adap
->sge
.eohw_txq
[p
->first_qset
];
305 for (i
= 0; i
< p
->nqsets
; i
++, eohw_tx
++) {
306 s
->tso
+= eohw_tx
->tso
;
307 s
->uso
+= eohw_tx
->uso
;
308 s
->tx_csum
+= eohw_tx
->tx_cso
;
309 s
->vlan_ins
+= eohw_tx
->vlan_ins
;
312 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
313 ktls_stats
= &adap
->ch_ktls_stats
.ktls_port
[p
->port_id
];
314 s
->tx_tls_encrypted_packets
=
315 atomic64_read(&ktls_stats
->ktls_tx_encrypted_packets
);
316 s
->tx_tls_encrypted_bytes
=
317 atomic64_read(&ktls_stats
->ktls_tx_encrypted_bytes
);
318 s
->tx_tls_ctx
= atomic64_read(&ktls_stats
->ktls_tx_ctx
);
319 s
->tx_tls_ooo
= atomic64_read(&ktls_stats
->ktls_tx_ooo
);
320 s
->tx_tls_skip_no_sync_data
=
321 atomic64_read(&ktls_stats
->ktls_tx_skip_no_sync_data
);
322 s
->tx_tls_drop_no_sync_data
=
323 atomic64_read(&ktls_stats
->ktls_tx_drop_no_sync_data
);
324 s
->tx_tls_drop_bypass_req
=
325 atomic64_read(&ktls_stats
->ktls_tx_drop_bypass_req
);
329 static void collect_adapter_stats(struct adapter
*adap
, struct adapter_stats
*s
)
333 memset(s
, 0, sizeof(*s
));
335 s
->db_drop
= adap
->db_stats
.db_drop
;
336 s
->db_full
= adap
->db_stats
.db_full
;
337 s
->db_empty
= adap
->db_stats
.db_empty
;
339 if (!is_t4(adap
->params
.chip
)) {
342 v
= t4_read_reg(adap
, SGE_STAT_CFG_A
);
343 if (STATSOURCE_T5_G(v
) == 7) {
344 val2
= t4_read_reg(adap
, SGE_STAT_MATCH_A
);
345 val1
= t4_read_reg(adap
, SGE_STAT_TOTAL_A
);
346 s
->wc_success
= val1
- val2
;
352 static void get_stats(struct net_device
*dev
, struct ethtool_stats
*stats
,
355 struct port_info
*pi
= netdev_priv(dev
);
356 struct adapter
*adapter
= pi
->adapter
;
357 struct lb_port_stats s
;
361 t4_get_port_stats_offset(adapter
, pi
->tx_chan
,
362 (struct port_stats
*)data
,
365 data
+= sizeof(struct port_stats
) / sizeof(u64
);
366 collect_sge_port_stats(adapter
, pi
, (struct queue_port_stats
*)data
);
367 data
+= sizeof(struct queue_port_stats
) / sizeof(u64
);
368 collect_adapter_stats(adapter
, (struct adapter_stats
*)data
);
369 data
+= sizeof(struct adapter_stats
) / sizeof(u64
);
371 *data
++ = (u64
)pi
->port_id
;
372 memset(&s
, 0, sizeof(s
));
373 t4_get_lb_stats(adapter
, pi
->port_id
, &s
);
376 for (i
= 0; i
< ARRAY_SIZE(loopback_stats_strings
) - 1; i
++)
377 *data
++ = (unsigned long long)*p0
++;
380 static void get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
383 struct adapter
*adap
= netdev2adap(dev
);
386 buf_size
= t4_get_regs_len(adap
);
387 regs
->version
= mk_adap_vers(adap
);
388 t4_get_regs(adap
, buf
, buf_size
);
391 static int restart_autoneg(struct net_device
*dev
)
393 struct port_info
*p
= netdev_priv(dev
);
395 if (!netif_running(dev
))
397 if (p
->link_cfg
.autoneg
!= AUTONEG_ENABLE
)
399 t4_restart_aneg(p
->adapter
, p
->adapter
->pf
, p
->tx_chan
);
403 static int identify_port(struct net_device
*dev
,
404 enum ethtool_phys_id_state state
)
407 struct adapter
*adap
= netdev2adap(dev
);
409 if (state
== ETHTOOL_ID_ACTIVE
)
411 else if (state
== ETHTOOL_ID_INACTIVE
)
416 return t4_identify_port(adap
, adap
->pf
, netdev2pinfo(dev
)->viid
, val
);
420 * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
421 * @port_type: Firmware Port Type
422 * @mod_type: Firmware Module Type
424 * Translate Firmware Port/Module type to Ethtool Port Type.
426 static int from_fw_port_mod_type(enum fw_port_type port_type
,
427 enum fw_port_module_type mod_type
)
429 if (port_type
== FW_PORT_TYPE_BT_SGMII
||
430 port_type
== FW_PORT_TYPE_BT_XFI
||
431 port_type
== FW_PORT_TYPE_BT_XAUI
) {
433 } else if (port_type
== FW_PORT_TYPE_FIBER_XFI
||
434 port_type
== FW_PORT_TYPE_FIBER_XAUI
) {
436 } else if (port_type
== FW_PORT_TYPE_SFP
||
437 port_type
== FW_PORT_TYPE_QSFP_10G
||
438 port_type
== FW_PORT_TYPE_QSA
||
439 port_type
== FW_PORT_TYPE_QSFP
||
440 port_type
== FW_PORT_TYPE_CR4_QSFP
||
441 port_type
== FW_PORT_TYPE_CR_QSFP
||
442 port_type
== FW_PORT_TYPE_CR2_QSFP
||
443 port_type
== FW_PORT_TYPE_SFP28
) {
444 if (mod_type
== FW_PORT_MOD_TYPE_LR
||
445 mod_type
== FW_PORT_MOD_TYPE_SR
||
446 mod_type
== FW_PORT_MOD_TYPE_ER
||
447 mod_type
== FW_PORT_MOD_TYPE_LRM
)
449 else if (mod_type
== FW_PORT_MOD_TYPE_TWINAX_PASSIVE
||
450 mod_type
== FW_PORT_MOD_TYPE_TWINAX_ACTIVE
)
454 } else if (port_type
== FW_PORT_TYPE_KR4_100G
||
455 port_type
== FW_PORT_TYPE_KR_SFP28
||
456 port_type
== FW_PORT_TYPE_KR_XLAUI
) {
464 * speed_to_fw_caps - translate Port Speed to Firmware Port Capabilities
465 * @speed: speed in Kb/s
467 * Translates a specific Port Speed into a Firmware Port Capabilities
470 static unsigned int speed_to_fw_caps(int speed
)
473 return FW_PORT_CAP32_SPEED_100M
;
475 return FW_PORT_CAP32_SPEED_1G
;
477 return FW_PORT_CAP32_SPEED_10G
;
479 return FW_PORT_CAP32_SPEED_25G
;
481 return FW_PORT_CAP32_SPEED_40G
;
483 return FW_PORT_CAP32_SPEED_50G
;
485 return FW_PORT_CAP32_SPEED_100G
;
487 return FW_PORT_CAP32_SPEED_200G
;
489 return FW_PORT_CAP32_SPEED_400G
;
494 * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
495 * @port_type: Firmware Port Type
496 * @fw_caps: Firmware Port Capabilities
497 * @link_mode_mask: ethtool Link Mode Mask
499 * Translate a Firmware Port Capabilities specification to an ethtool
502 static void fw_caps_to_lmm(enum fw_port_type port_type
,
503 fw_port_cap32_t fw_caps
,
504 unsigned long *link_mode_mask
)
506 #define SET_LMM(__lmm_name) \
508 __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
512 #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
514 if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
515 SET_LMM(__lmm_name); \
519 case FW_PORT_TYPE_BT_SGMII
:
520 case FW_PORT_TYPE_BT_XFI
:
521 case FW_PORT_TYPE_BT_XAUI
:
523 FW_CAPS_TO_LMM(SPEED_100M
, 100baseT_Full
);
524 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseT_Full
);
525 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseT_Full
);
528 case FW_PORT_TYPE_KX4
:
529 case FW_PORT_TYPE_KX
:
531 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseKX_Full
);
532 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseKX4_Full
);
535 case FW_PORT_TYPE_KR
:
537 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseKR_Full
);
540 case FW_PORT_TYPE_BP_AP
:
542 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseKX_Full
);
543 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseR_FEC
);
544 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseKR_Full
);
547 case FW_PORT_TYPE_BP4_AP
:
549 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseKX_Full
);
550 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseR_FEC
);
551 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseKR_Full
);
552 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseKX4_Full
);
555 case FW_PORT_TYPE_FIBER_XFI
:
556 case FW_PORT_TYPE_FIBER_XAUI
:
557 case FW_PORT_TYPE_SFP
:
558 case FW_PORT_TYPE_QSFP_10G
:
559 case FW_PORT_TYPE_QSA
:
561 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseT_Full
);
562 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseT_Full
);
565 case FW_PORT_TYPE_BP40_BA
:
566 case FW_PORT_TYPE_QSFP
:
568 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseT_Full
);
569 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseT_Full
);
570 FW_CAPS_TO_LMM(SPEED_40G
, 40000baseSR4_Full
);
573 case FW_PORT_TYPE_CR_QSFP
:
574 case FW_PORT_TYPE_SFP28
:
576 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseT_Full
);
577 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseT_Full
);
578 FW_CAPS_TO_LMM(SPEED_25G
, 25000baseCR_Full
);
581 case FW_PORT_TYPE_KR_SFP28
:
583 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseT_Full
);
584 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseKR_Full
);
585 FW_CAPS_TO_LMM(SPEED_25G
, 25000baseKR_Full
);
588 case FW_PORT_TYPE_KR_XLAUI
:
590 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseKX_Full
);
591 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseKR_Full
);
592 FW_CAPS_TO_LMM(SPEED_40G
, 40000baseKR4_Full
);
595 case FW_PORT_TYPE_CR2_QSFP
:
597 FW_CAPS_TO_LMM(SPEED_50G
, 50000baseSR2_Full
);
600 case FW_PORT_TYPE_KR4_100G
:
601 case FW_PORT_TYPE_CR4_QSFP
:
603 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseT_Full
);
604 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseKR_Full
);
605 FW_CAPS_TO_LMM(SPEED_40G
, 40000baseSR4_Full
);
606 FW_CAPS_TO_LMM(SPEED_25G
, 25000baseCR_Full
);
607 FW_CAPS_TO_LMM(SPEED_50G
, 50000baseCR2_Full
);
608 FW_CAPS_TO_LMM(SPEED_100G
, 100000baseCR4_Full
);
615 if (fw_caps
& FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M
)) {
616 FW_CAPS_TO_LMM(FEC_RS
, FEC_RS
);
617 FW_CAPS_TO_LMM(FEC_BASER_RS
, FEC_BASER
);
622 FW_CAPS_TO_LMM(ANEG
, Autoneg
);
623 FW_CAPS_TO_LMM(802_3_PAUSE
, Pause
);
624 FW_CAPS_TO_LMM(802_3_ASM_DIR
, Asym_Pause
);
626 #undef FW_CAPS_TO_LMM
631 * lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware
633 * @link_mode_mask: ethtool Link Mode Mask
635 * Translate ethtool Link Mode Mask into a Firmware Port capabilities
638 static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask
)
640 unsigned int fw_caps
= 0;
642 #define LMM_TO_FW_CAPS(__lmm_name, __fw_name) \
644 if (test_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
646 fw_caps |= FW_PORT_CAP32_ ## __fw_name; \
649 LMM_TO_FW_CAPS(100baseT_Full
, SPEED_100M
);
650 LMM_TO_FW_CAPS(1000baseT_Full
, SPEED_1G
);
651 LMM_TO_FW_CAPS(10000baseT_Full
, SPEED_10G
);
652 LMM_TO_FW_CAPS(40000baseSR4_Full
, SPEED_40G
);
653 LMM_TO_FW_CAPS(25000baseCR_Full
, SPEED_25G
);
654 LMM_TO_FW_CAPS(50000baseCR2_Full
, SPEED_50G
);
655 LMM_TO_FW_CAPS(100000baseCR4_Full
, SPEED_100G
);
657 #undef LMM_TO_FW_CAPS
662 static int get_link_ksettings(struct net_device
*dev
,
663 struct ethtool_link_ksettings
*link_ksettings
)
665 struct port_info
*pi
= netdev_priv(dev
);
666 struct ethtool_link_settings
*base
= &link_ksettings
->base
;
668 /* For the nonce, the Firmware doesn't send up Port State changes
669 * when the Virtual Interface attached to the Port is down. So
670 * if it's down, let's grab any changes.
672 if (!netif_running(dev
))
673 (void)t4_update_port_info(pi
);
675 ethtool_link_ksettings_zero_link_mode(link_ksettings
, supported
);
676 ethtool_link_ksettings_zero_link_mode(link_ksettings
, advertising
);
677 ethtool_link_ksettings_zero_link_mode(link_ksettings
, lp_advertising
);
679 base
->port
= from_fw_port_mod_type(pi
->port_type
, pi
->mod_type
);
681 if (pi
->mdio_addr
>= 0) {
682 base
->phy_address
= pi
->mdio_addr
;
683 base
->mdio_support
= (pi
->port_type
== FW_PORT_TYPE_BT_SGMII
684 ? ETH_MDIO_SUPPORTS_C22
685 : ETH_MDIO_SUPPORTS_C45
);
687 base
->phy_address
= 255;
688 base
->mdio_support
= 0;
691 fw_caps_to_lmm(pi
->port_type
, pi
->link_cfg
.pcaps
,
692 link_ksettings
->link_modes
.supported
);
693 fw_caps_to_lmm(pi
->port_type
,
694 t4_link_acaps(pi
->adapter
,
697 link_ksettings
->link_modes
.advertising
);
698 fw_caps_to_lmm(pi
->port_type
, pi
->link_cfg
.lpacaps
,
699 link_ksettings
->link_modes
.lp_advertising
);
701 base
->speed
= (netif_carrier_ok(dev
)
704 base
->duplex
= DUPLEX_FULL
;
706 base
->autoneg
= pi
->link_cfg
.autoneg
;
707 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_ANEG
)
708 ethtool_link_ksettings_add_link_mode(link_ksettings
,
710 if (pi
->link_cfg
.autoneg
)
711 ethtool_link_ksettings_add_link_mode(link_ksettings
,
712 advertising
, Autoneg
);
717 static int set_link_ksettings(struct net_device
*dev
,
718 const struct ethtool_link_ksettings
*link_ksettings
)
720 struct port_info
*pi
= netdev_priv(dev
);
721 struct link_config
*lc
= &pi
->link_cfg
;
722 const struct ethtool_link_settings
*base
= &link_ksettings
->base
;
723 struct link_config old_lc
;
724 unsigned int fw_caps
;
727 /* only full-duplex supported */
728 if (base
->duplex
!= DUPLEX_FULL
)
732 if (!(lc
->pcaps
& FW_PORT_CAP32_ANEG
) ||
733 base
->autoneg
== AUTONEG_DISABLE
) {
734 fw_caps
= speed_to_fw_caps(base
->speed
);
736 /* Speed must be supported by Physical Port Capabilities. */
737 if (!(lc
->pcaps
& fw_caps
))
740 lc
->speed_caps
= fw_caps
;
744 lmm_to_fw_caps(link_ksettings
->link_modes
.advertising
);
745 if (!(lc
->pcaps
& fw_caps
))
748 lc
->acaps
= fw_caps
| FW_PORT_CAP32_ANEG
;
750 lc
->autoneg
= base
->autoneg
;
752 /* If the firmware rejects the Link Configuration request, back out
753 * the changes and report the error.
755 ret
= t4_link_l1cfg(pi
->adapter
, pi
->adapter
->mbox
, pi
->tx_chan
, lc
);
762 /* Translate the Firmware FEC value into the ethtool value. */
763 static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec
)
765 unsigned int eth_fec
= 0;
767 if (fw_fec
& FW_PORT_CAP32_FEC_RS
)
768 eth_fec
|= ETHTOOL_FEC_RS
;
769 if (fw_fec
& FW_PORT_CAP32_FEC_BASER_RS
)
770 eth_fec
|= ETHTOOL_FEC_BASER
;
772 /* if nothing is set, then FEC is off */
774 eth_fec
= ETHTOOL_FEC_OFF
;
779 /* Translate Common Code FEC value into ethtool value. */
780 static inline unsigned int cc_to_eth_fec(unsigned int cc_fec
)
782 unsigned int eth_fec
= 0;
784 if (cc_fec
& FEC_AUTO
)
785 eth_fec
|= ETHTOOL_FEC_AUTO
;
787 eth_fec
|= ETHTOOL_FEC_RS
;
788 if (cc_fec
& FEC_BASER_RS
)
789 eth_fec
|= ETHTOOL_FEC_BASER
;
791 /* if nothing is set, then FEC is off */
793 eth_fec
= ETHTOOL_FEC_OFF
;
798 /* Translate ethtool FEC value into Common Code value. */
799 static inline unsigned int eth_to_cc_fec(unsigned int eth_fec
)
801 unsigned int cc_fec
= 0;
803 if (eth_fec
& ETHTOOL_FEC_OFF
)
806 if (eth_fec
& ETHTOOL_FEC_AUTO
)
808 if (eth_fec
& ETHTOOL_FEC_RS
)
810 if (eth_fec
& ETHTOOL_FEC_BASER
)
811 cc_fec
|= FEC_BASER_RS
;
816 static int get_fecparam(struct net_device
*dev
, struct ethtool_fecparam
*fec
)
818 const struct port_info
*pi
= netdev_priv(dev
);
819 const struct link_config
*lc
= &pi
->link_cfg
;
821 /* Translate the Firmware FEC Support into the ethtool value. We
822 * always support IEEE 802.3 "automatic" selection of Link FEC type if
823 * any FEC is supported.
825 fec
->fec
= fwcap_to_eth_fec(lc
->pcaps
);
826 if (fec
->fec
!= ETHTOOL_FEC_OFF
)
827 fec
->fec
|= ETHTOOL_FEC_AUTO
;
829 /* Translate the current internal FEC parameters into the
832 fec
->active_fec
= cc_to_eth_fec(lc
->fec
);
837 static int set_fecparam(struct net_device
*dev
, struct ethtool_fecparam
*fec
)
839 struct port_info
*pi
= netdev_priv(dev
);
840 struct link_config
*lc
= &pi
->link_cfg
;
841 struct link_config old_lc
;
844 /* Save old Link Configuration in case the L1 Configure below
849 /* Try to perform the L1 Configure and return the result of that
850 * effort. If it fails, revert the attempted change.
852 lc
->requested_fec
= eth_to_cc_fec(fec
->fec
);
853 ret
= t4_link_l1cfg(pi
->adapter
, pi
->adapter
->mbox
,
860 static void get_pauseparam(struct net_device
*dev
,
861 struct ethtool_pauseparam
*epause
)
863 struct port_info
*p
= netdev_priv(dev
);
865 epause
->autoneg
= (p
->link_cfg
.requested_fc
& PAUSE_AUTONEG
) != 0;
866 epause
->rx_pause
= (p
->link_cfg
.advertised_fc
& PAUSE_RX
) != 0;
867 epause
->tx_pause
= (p
->link_cfg
.advertised_fc
& PAUSE_TX
) != 0;
870 static int set_pauseparam(struct net_device
*dev
,
871 struct ethtool_pauseparam
*epause
)
873 struct port_info
*p
= netdev_priv(dev
);
874 struct link_config
*lc
= &p
->link_cfg
;
876 if (epause
->autoneg
== AUTONEG_DISABLE
)
877 lc
->requested_fc
= 0;
878 else if (lc
->pcaps
& FW_PORT_CAP32_ANEG
)
879 lc
->requested_fc
= PAUSE_AUTONEG
;
883 if (epause
->rx_pause
)
884 lc
->requested_fc
|= PAUSE_RX
;
885 if (epause
->tx_pause
)
886 lc
->requested_fc
|= PAUSE_TX
;
887 if (netif_running(dev
))
888 return t4_link_l1cfg(p
->adapter
, p
->adapter
->mbox
, p
->tx_chan
,
893 static void get_sge_param(struct net_device
*dev
, struct ethtool_ringparam
*e
)
895 const struct port_info
*pi
= netdev_priv(dev
);
896 const struct sge
*s
= &pi
->adapter
->sge
;
898 e
->rx_max_pending
= MAX_RX_BUFFERS
;
899 e
->rx_mini_max_pending
= MAX_RSPQ_ENTRIES
;
900 e
->rx_jumbo_max_pending
= 0;
901 e
->tx_max_pending
= MAX_TXQ_ENTRIES
;
903 e
->rx_pending
= s
->ethrxq
[pi
->first_qset
].fl
.size
- 8;
904 e
->rx_mini_pending
= s
->ethrxq
[pi
->first_qset
].rspq
.size
;
905 e
->rx_jumbo_pending
= 0;
906 e
->tx_pending
= s
->ethtxq
[pi
->first_qset
].q
.size
;
909 static int set_sge_param(struct net_device
*dev
, struct ethtool_ringparam
*e
)
912 const struct port_info
*pi
= netdev_priv(dev
);
913 struct adapter
*adapter
= pi
->adapter
;
914 struct sge
*s
= &adapter
->sge
;
916 if (e
->rx_pending
> MAX_RX_BUFFERS
|| e
->rx_jumbo_pending
||
917 e
->tx_pending
> MAX_TXQ_ENTRIES
||
918 e
->rx_mini_pending
> MAX_RSPQ_ENTRIES
||
919 e
->rx_mini_pending
< MIN_RSPQ_ENTRIES
||
920 e
->rx_pending
< MIN_FL_ENTRIES
|| e
->tx_pending
< MIN_TXQ_ENTRIES
)
923 if (adapter
->flags
& CXGB4_FULL_INIT_DONE
)
926 for (i
= 0; i
< pi
->nqsets
; ++i
) {
927 s
->ethtxq
[pi
->first_qset
+ i
].q
.size
= e
->tx_pending
;
928 s
->ethrxq
[pi
->first_qset
+ i
].fl
.size
= e
->rx_pending
+ 8;
929 s
->ethrxq
[pi
->first_qset
+ i
].rspq
.size
= e
->rx_mini_pending
;
935 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
936 * @dev: the network device
937 * @us: the hold-off time in us, or 0 to disable timer
938 * @cnt: the hold-off packet count, or 0 to disable counter
940 * Set the RX interrupt hold-off parameters for a network device.
942 static int set_rx_intr_params(struct net_device
*dev
,
943 unsigned int us
, unsigned int cnt
)
946 struct port_info
*pi
= netdev_priv(dev
);
947 struct adapter
*adap
= pi
->adapter
;
948 struct sge_eth_rxq
*q
= &adap
->sge
.ethrxq
[pi
->first_qset
];
950 for (i
= 0; i
< pi
->nqsets
; i
++, q
++) {
951 err
= cxgb4_set_rspq_intr_params(&q
->rspq
, us
, cnt
);
958 static int set_adaptive_rx_setting(struct net_device
*dev
, int adaptive_rx
)
961 struct port_info
*pi
= netdev_priv(dev
);
962 struct adapter
*adap
= pi
->adapter
;
963 struct sge_eth_rxq
*q
= &adap
->sge
.ethrxq
[pi
->first_qset
];
965 for (i
= 0; i
< pi
->nqsets
; i
++, q
++)
966 q
->rspq
.adaptive_rx
= adaptive_rx
;
971 static int get_adaptive_rx_setting(struct net_device
*dev
)
973 struct port_info
*pi
= netdev_priv(dev
);
974 struct adapter
*adap
= pi
->adapter
;
975 struct sge_eth_rxq
*q
= &adap
->sge
.ethrxq
[pi
->first_qset
];
977 return q
->rspq
.adaptive_rx
;
980 /* Return the current global Adapter SGE Doorbell Queue Timer Tick for all
981 * Ethernet TX Queues.
983 static int get_dbqtimer_tick(struct net_device
*dev
)
985 struct port_info
*pi
= netdev_priv(dev
);
986 struct adapter
*adap
= pi
->adapter
;
988 if (!(adap
->flags
& CXGB4_SGE_DBQ_TIMER
))
991 return adap
->sge
.dbqtimer_tick
;
994 /* Return the SGE Doorbell Queue Timer Value for the Ethernet TX Queues
995 * associated with a Network Device.
997 static int get_dbqtimer(struct net_device
*dev
)
999 struct port_info
*pi
= netdev_priv(dev
);
1000 struct adapter
*adap
= pi
->adapter
;
1001 struct sge_eth_txq
*txq
;
1003 txq
= &adap
->sge
.ethtxq
[pi
->first_qset
];
1005 if (!(adap
->flags
& CXGB4_SGE_DBQ_TIMER
))
1008 /* all of the TX Queues use the same Timer Index */
1009 return adap
->sge
.dbqtimer_val
[txq
->dbqtimerix
];
1012 /* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX
1013 * Queues. This is the fundamental "Tick" that sets the scale of values which
1014 * can be used. Individual Ethernet TX Queues index into a relatively small
1015 * array of Tick Multipliers. Changing the base Tick will thus change all of
1016 * the resulting Timer Values associated with those multipliers for all
1017 * Ethernet TX Queues.
1019 static int set_dbqtimer_tick(struct net_device
*dev
, int usecs
)
1021 struct port_info
*pi
= netdev_priv(dev
);
1022 struct adapter
*adap
= pi
->adapter
;
1023 struct sge
*s
= &adap
->sge
;
1027 if (!(adap
->flags
& CXGB4_SGE_DBQ_TIMER
))
1030 /* return early if it's the same Timer Tick we're already using */
1031 if (s
->dbqtimer_tick
== usecs
)
1034 /* attempt to set the new Timer Tick value */
1035 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
1036 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK
));
1038 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, ¶m
, &val
);
1041 s
->dbqtimer_tick
= usecs
;
1043 /* if successful, reread resulting dependent Timer values */
1044 ret
= t4_read_sge_dbqtimers(adap
, ARRAY_SIZE(s
->dbqtimer_val
),
1049 /* Set the SGE Doorbell Queue Timer Value for the Ethernet TX Queues
1050 * associated with a Network Device. There is a relatively small array of
1051 * possible Timer Values so we need to pick the closest value available.
1053 static int set_dbqtimer(struct net_device
*dev
, int usecs
)
1055 int qix
, timerix
, min_timerix
, delta
, min_delta
;
1056 struct port_info
*pi
= netdev_priv(dev
);
1057 struct adapter
*adap
= pi
->adapter
;
1058 struct sge
*s
= &adap
->sge
;
1059 struct sge_eth_txq
*txq
;
1063 if (!(adap
->flags
& CXGB4_SGE_DBQ_TIMER
))
1066 /* Find the SGE Doorbell Timer Value that's closest to the requested
1069 min_delta
= INT_MAX
;
1071 for (timerix
= 0; timerix
< ARRAY_SIZE(s
->dbqtimer_val
); timerix
++) {
1072 delta
= s
->dbqtimer_val
[timerix
] - usecs
;
1075 if (delta
< min_delta
) {
1077 min_timerix
= timerix
;
1081 /* Return early if it's the same Timer Index we're already using.
1082 * We use the same Timer Index for all of the TX Queues for an
1083 * interface so it's only necessary to check the first one.
1085 txq
= &s
->ethtxq
[pi
->first_qset
];
1086 if (txq
->dbqtimerix
== min_timerix
)
1089 for (qix
= 0; qix
< pi
->nqsets
; qix
++, txq
++) {
1090 if (adap
->flags
& CXGB4_FULL_INIT_DONE
) {
1092 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
1093 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX
) |
1094 FW_PARAMS_PARAM_YZ_V(txq
->q
.cntxt_id
));
1096 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0,
1101 txq
->dbqtimerix
= min_timerix
;
1106 /* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX
1107 * Queues and the Timer Value for the Ethernet TX Queues associated with a
1108 * Network Device. Since changing the global Tick changes all of the
1109 * available Timer Values, we need to do this first before selecting the
1110 * resulting closest Timer Value. Moreover, since the Tick is global,
1111 * changing it affects the Timer Values for all Network Devices on the
1112 * adapter. So, before changing the Tick, we grab all of the current Timer
1113 * Values for other Network Devices on this Adapter and then attempt to select
1114 * new Timer Values which are close to the old values ...
1116 static int set_dbqtimer_tickval(struct net_device
*dev
,
1117 int tick_usecs
, int timer_usecs
)
1119 struct port_info
*pi
= netdev_priv(dev
);
1120 struct adapter
*adap
= pi
->adapter
;
1121 int timer
[MAX_NPORTS
];
1125 /* Grab the other adapter Network Interface current timers and fill in
1126 * the new one for this Network Interface.
1128 for_each_port(adap
, port
)
1129 if (port
== pi
->port_id
)
1130 timer
[port
] = timer_usecs
;
1132 timer
[port
] = get_dbqtimer(adap
->port
[port
]);
1134 /* Change the global Tick first ... */
1135 ret
= set_dbqtimer_tick(dev
, tick_usecs
);
1139 /* ... and then set all of the Network Interface Timer Values ... */
1140 for_each_port(adap
, port
) {
1141 ret
= set_dbqtimer(adap
->port
[port
], timer
[port
]);
1149 static int set_coalesce(struct net_device
*dev
,
1150 struct ethtool_coalesce
*coalesce
)
1154 set_adaptive_rx_setting(dev
, coalesce
->use_adaptive_rx_coalesce
);
1156 ret
= set_rx_intr_params(dev
, coalesce
->rx_coalesce_usecs
,
1157 coalesce
->rx_max_coalesced_frames
);
1161 return set_dbqtimer_tickval(dev
,
1162 coalesce
->tx_coalesce_usecs_irq
,
1163 coalesce
->tx_coalesce_usecs
);
1166 static int get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*c
)
1168 const struct port_info
*pi
= netdev_priv(dev
);
1169 const struct adapter
*adap
= pi
->adapter
;
1170 const struct sge_rspq
*rq
= &adap
->sge
.ethrxq
[pi
->first_qset
].rspq
;
1172 c
->rx_coalesce_usecs
= qtimer_val(adap
, rq
);
1173 c
->rx_max_coalesced_frames
= (rq
->intr_params
& QINTR_CNT_EN_F
) ?
1174 adap
->sge
.counter_val
[rq
->pktcnt_idx
] : 0;
1175 c
->use_adaptive_rx_coalesce
= get_adaptive_rx_setting(dev
);
1176 c
->tx_coalesce_usecs_irq
= get_dbqtimer_tick(dev
);
1177 c
->tx_coalesce_usecs
= get_dbqtimer(dev
);
1181 /* The next two routines implement eeprom read/write from physical addresses.
1183 static int eeprom_rd_phys(struct adapter
*adap
, unsigned int phys_addr
, u32
*v
)
1185 int vaddr
= t4_eeprom_ptov(phys_addr
, adap
->pf
, EEPROMPFSIZE
);
1188 vaddr
= pci_read_vpd(adap
->pdev
, vaddr
, sizeof(u32
), v
);
1189 return vaddr
< 0 ? vaddr
: 0;
1192 static int eeprom_wr_phys(struct adapter
*adap
, unsigned int phys_addr
, u32 v
)
1194 int vaddr
= t4_eeprom_ptov(phys_addr
, adap
->pf
, EEPROMPFSIZE
);
1197 vaddr
= pci_write_vpd(adap
->pdev
, vaddr
, sizeof(u32
), &v
);
1198 return vaddr
< 0 ? vaddr
: 0;
1201 #define EEPROM_MAGIC 0x38E2F10C
1203 static int get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*e
,
1207 struct adapter
*adapter
= netdev2adap(dev
);
1208 u8
*buf
= kvzalloc(EEPROMSIZE
, GFP_KERNEL
);
1213 e
->magic
= EEPROM_MAGIC
;
1214 for (i
= e
->offset
& ~3; !err
&& i
< e
->offset
+ e
->len
; i
+= 4)
1215 err
= eeprom_rd_phys(adapter
, i
, (u32
*)&buf
[i
]);
1218 memcpy(data
, buf
+ e
->offset
, e
->len
);
1223 static int set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
1228 u32 aligned_offset
, aligned_len
, *p
;
1229 struct adapter
*adapter
= netdev2adap(dev
);
1231 if (eeprom
->magic
!= EEPROM_MAGIC
)
1234 aligned_offset
= eeprom
->offset
& ~3;
1235 aligned_len
= (eeprom
->len
+ (eeprom
->offset
& 3) + 3) & ~3;
1237 if (adapter
->pf
> 0) {
1238 u32 start
= 1024 + adapter
->pf
* EEPROMPFSIZE
;
1240 if (aligned_offset
< start
||
1241 aligned_offset
+ aligned_len
> start
+ EEPROMPFSIZE
)
1245 if (aligned_offset
!= eeprom
->offset
|| aligned_len
!= eeprom
->len
) {
1246 /* RMW possibly needed for first or last words.
1248 buf
= kvzalloc(aligned_len
, GFP_KERNEL
);
1251 err
= eeprom_rd_phys(adapter
, aligned_offset
, (u32
*)buf
);
1252 if (!err
&& aligned_len
> 4)
1253 err
= eeprom_rd_phys(adapter
,
1254 aligned_offset
+ aligned_len
- 4,
1255 (u32
*)&buf
[aligned_len
- 4]);
1258 memcpy(buf
+ (eeprom
->offset
& 3), data
, eeprom
->len
);
1263 err
= t4_seeprom_wp(adapter
, false);
1267 for (p
= (u32
*)buf
; !err
&& aligned_len
; aligned_len
-= 4, p
++) {
1268 err
= eeprom_wr_phys(adapter
, aligned_offset
, *p
);
1269 aligned_offset
+= 4;
1273 err
= t4_seeprom_wp(adapter
, true);
1280 static int cxgb4_ethtool_flash_bootcfg(struct net_device
*netdev
,
1281 const u8
*data
, u32 size
)
1283 struct adapter
*adap
= netdev2adap(netdev
);
1286 ret
= t4_load_bootcfg(adap
, data
, size
);
1288 dev_err(adap
->pdev_dev
, "Failed to load boot cfg image\n");
1293 static int cxgb4_ethtool_flash_boot(struct net_device
*netdev
,
1294 const u8
*bdata
, u32 size
)
1296 struct adapter
*adap
= netdev2adap(netdev
);
1297 unsigned int offset
;
1301 data
= kmemdup(bdata
, size
, GFP_KERNEL
);
1305 offset
= OFFSET_G(t4_read_reg(adap
, PF_REG(0, PCIE_PF_EXPROM_OFST_A
)));
1307 ret
= t4_load_boot(adap
, data
, offset
, size
);
1309 dev_err(adap
->pdev_dev
, "Failed to load boot image\n");
1315 #define CXGB4_PHY_SIG 0x130000ea
1317 static int cxgb4_validate_phy_image(const u8
*data
, u32
*size
)
1319 struct cxgb4_fw_data
*header
;
1321 header
= (struct cxgb4_fw_data
*)data
;
1322 if (be32_to_cpu(header
->signature
) != CXGB4_PHY_SIG
)
1328 static int cxgb4_ethtool_flash_phy(struct net_device
*netdev
,
1329 const u8
*data
, u32 size
)
1331 struct adapter
*adap
= netdev2adap(netdev
);
1334 ret
= cxgb4_validate_phy_image(data
, NULL
);
1336 dev_err(adap
->pdev_dev
, "PHY signature mismatch\n");
1340 spin_lock_bh(&adap
->win0_lock
);
1341 ret
= t4_load_phy_fw(adap
, MEMWIN_NIC
, NULL
, data
, size
);
1342 spin_unlock_bh(&adap
->win0_lock
);
1344 dev_err(adap
->pdev_dev
, "Failed to load PHY FW\n");
1349 static int cxgb4_ethtool_flash_fw(struct net_device
*netdev
,
1350 const u8
*data
, u32 size
)
1352 struct adapter
*adap
= netdev2adap(netdev
);
1353 unsigned int mbox
= PCIE_FW_MASTER_M
+ 1;
1356 /* If the adapter has been fully initialized then we'll go ahead and
1357 * try to get the firmware's cooperation in upgrading to the new
1358 * firmware image otherwise we'll try to do the entire job from the
1359 * host ... and we always "force" the operation in this path.
1361 if (adap
->flags
& CXGB4_FULL_INIT_DONE
)
1364 ret
= t4_fw_upgrade(adap
, mbox
, data
, size
, 1);
1366 dev_err(adap
->pdev_dev
,
1367 "Failed to flash firmware\n");
1372 static int cxgb4_ethtool_flash_region(struct net_device
*netdev
,
1373 const u8
*data
, u32 size
, u32 region
)
1375 struct adapter
*adap
= netdev2adap(netdev
);
1379 case CXGB4_ETHTOOL_FLASH_FW
:
1380 ret
= cxgb4_ethtool_flash_fw(netdev
, data
, size
);
1382 case CXGB4_ETHTOOL_FLASH_PHY
:
1383 ret
= cxgb4_ethtool_flash_phy(netdev
, data
, size
);
1385 case CXGB4_ETHTOOL_FLASH_BOOT
:
1386 ret
= cxgb4_ethtool_flash_boot(netdev
, data
, size
);
1388 case CXGB4_ETHTOOL_FLASH_BOOTCFG
:
1389 ret
= cxgb4_ethtool_flash_bootcfg(netdev
, data
, size
);
1397 dev_info(adap
->pdev_dev
,
1398 "loading %s successful, reload cxgb4 driver\n",
1399 flash_region_strings
[region
]);
1403 #define CXGB4_FW_SIG 0x4368656c
1404 #define CXGB4_FW_SIG_OFFSET 0x160
1406 static int cxgb4_validate_fw_image(const u8
*data
, u32
*size
)
1408 struct cxgb4_fw_data
*header
;
1410 header
= (struct cxgb4_fw_data
*)&data
[CXGB4_FW_SIG_OFFSET
];
1411 if (be32_to_cpu(header
->signature
) != CXGB4_FW_SIG
)
1415 *size
= be16_to_cpu(((struct fw_hdr
*)data
)->len512
) * 512;
1420 static int cxgb4_validate_bootcfg_image(const u8
*data
, u32
*size
)
1422 struct cxgb4_bootcfg_data
*header
;
1424 header
= (struct cxgb4_bootcfg_data
*)data
;
1425 if (le16_to_cpu(header
->signature
) != BOOT_CFG_SIG
)
1431 static int cxgb4_validate_boot_image(const u8
*data
, u32
*size
)
1433 struct cxgb4_pci_exp_rom_header
*exp_header
;
1434 struct cxgb4_pcir_data
*pcir_header
;
1435 struct legacy_pci_rom_hdr
*header
;
1436 const u8
*cur_header
= data
;
1439 exp_header
= (struct cxgb4_pci_exp_rom_header
*)data
;
1441 if (le16_to_cpu(exp_header
->signature
) != BOOT_SIGNATURE
)
1446 header
= (struct legacy_pci_rom_hdr
*)cur_header
;
1447 pcir_offset
= le16_to_cpu(header
->pcir_offset
);
1448 pcir_header
= (struct cxgb4_pcir_data
*)(cur_header
+
1451 *size
+= header
->size512
* 512;
1452 cur_header
+= header
->size512
* 512;
1453 } while (!(pcir_header
->indicator
& CXGB4_HDR_INDI
));
1459 static int cxgb4_ethtool_get_flash_region(const u8
*data
, u32
*size
)
1461 if (!cxgb4_validate_fw_image(data
, size
))
1462 return CXGB4_ETHTOOL_FLASH_FW
;
1463 if (!cxgb4_validate_boot_image(data
, size
))
1464 return CXGB4_ETHTOOL_FLASH_BOOT
;
1465 if (!cxgb4_validate_phy_image(data
, size
))
1466 return CXGB4_ETHTOOL_FLASH_PHY
;
1467 if (!cxgb4_validate_bootcfg_image(data
, size
))
1468 return CXGB4_ETHTOOL_FLASH_BOOTCFG
;
1473 static int set_flash(struct net_device
*netdev
, struct ethtool_flash
*ef
)
1475 struct adapter
*adap
= netdev2adap(netdev
);
1476 const struct firmware
*fw
;
1477 unsigned int master
;
1486 pcie_fw
= t4_read_reg(adap
, PCIE_FW_A
);
1487 master
= PCIE_FW_MASTER_G(pcie_fw
);
1488 if (pcie_fw
& PCIE_FW_MASTER_VLD_F
)
1490 /* if csiostor is the master return */
1491 if (master_vld
&& (master
!= adap
->pf
)) {
1492 dev_warn(adap
->pdev_dev
,
1493 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
1497 ef
->data
[sizeof(ef
->data
) - 1] = '\0';
1498 ret
= request_firmware(&fw
, ef
->data
, adap
->pdev_dev
);
1504 if (ef
->region
== ETHTOOL_FLASH_ALL_REGIONS
) {
1505 while (fw_size
> 0) {
1507 region
= cxgb4_ethtool_get_flash_region(fw_data
, &size
);
1508 if (region
< 0 || !size
) {
1513 ret
= cxgb4_ethtool_flash_region(netdev
, fw_data
, size
,
1522 ret
= cxgb4_ethtool_flash_region(netdev
, fw_data
, fw_size
,
1527 release_firmware(fw
);
1531 static int get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*ts_info
)
1533 struct port_info
*pi
= netdev_priv(dev
);
1534 struct adapter
*adapter
= pi
->adapter
;
1536 ts_info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
1537 SOF_TIMESTAMPING_RX_SOFTWARE
|
1538 SOF_TIMESTAMPING_SOFTWARE
;
1540 ts_info
->so_timestamping
|= SOF_TIMESTAMPING_RX_HARDWARE
|
1541 SOF_TIMESTAMPING_TX_HARDWARE
|
1542 SOF_TIMESTAMPING_RAW_HARDWARE
;
1544 ts_info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) |
1545 (1 << HWTSTAMP_TX_ON
);
1547 ts_info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
1548 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
) |
1549 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC
) |
1550 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
) |
1551 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC
) |
1552 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
);
1554 if (adapter
->ptp_clock
)
1555 ts_info
->phc_index
= ptp_clock_index(adapter
->ptp_clock
);
1557 ts_info
->phc_index
= -1;
1562 static u32
get_rss_table_size(struct net_device
*dev
)
1564 const struct port_info
*pi
= netdev_priv(dev
);
1566 return pi
->rss_size
;
1569 static int get_rss_table(struct net_device
*dev
, u32
*p
, u8
*key
, u8
*hfunc
)
1571 const struct port_info
*pi
= netdev_priv(dev
);
1572 unsigned int n
= pi
->rss_size
;
1575 *hfunc
= ETH_RSS_HASH_TOP
;
1583 static int set_rss_table(struct net_device
*dev
, const u32
*p
, const u8
*key
,
1587 struct port_info
*pi
= netdev_priv(dev
);
1589 /* We require at least one supported parameter to be changed and no
1590 * change in any of the unsupported parameters
1593 (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
))
1598 /* Interface must be brought up atleast once */
1599 if (pi
->adapter
->flags
& CXGB4_FULL_INIT_DONE
) {
1600 for (i
= 0; i
< pi
->rss_size
; i
++)
1603 return cxgb4_write_rss(pi
, pi
->rss
);
1609 static struct filter_entry
*cxgb4_get_filter_entry(struct adapter
*adap
,
1612 struct tid_info
*t
= &adap
->tids
;
1613 struct filter_entry
*f
;
1615 if (ftid
< t
->nhpftids
)
1616 f
= &adap
->tids
.hpftid_tab
[ftid
];
1617 else if (ftid
< t
->nftids
)
1618 f
= &adap
->tids
.ftid_tab
[ftid
- t
->nhpftids
];
1620 f
= lookup_tid(&adap
->tids
, ftid
);
1625 static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec
*fs
,
1626 struct ch_filter_specification
*dfs
)
1628 switch (dfs
->val
.proto
) {
1631 fs
->flow_type
= TCP_V6_FLOW
;
1633 fs
->flow_type
= TCP_V4_FLOW
;
1637 fs
->flow_type
= UDP_V6_FLOW
;
1639 fs
->flow_type
= UDP_V4_FLOW
;
1644 fs
->h_u
.tcp_ip6_spec
.psrc
= cpu_to_be16(dfs
->val
.fport
);
1645 fs
->m_u
.tcp_ip6_spec
.psrc
= cpu_to_be16(dfs
->mask
.fport
);
1646 fs
->h_u
.tcp_ip6_spec
.pdst
= cpu_to_be16(dfs
->val
.lport
);
1647 fs
->m_u
.tcp_ip6_spec
.pdst
= cpu_to_be16(dfs
->mask
.lport
);
1648 memcpy(&fs
->h_u
.tcp_ip6_spec
.ip6src
, &dfs
->val
.fip
[0],
1649 sizeof(fs
->h_u
.tcp_ip6_spec
.ip6src
));
1650 memcpy(&fs
->m_u
.tcp_ip6_spec
.ip6src
, &dfs
->mask
.fip
[0],
1651 sizeof(fs
->m_u
.tcp_ip6_spec
.ip6src
));
1652 memcpy(&fs
->h_u
.tcp_ip6_spec
.ip6dst
, &dfs
->val
.lip
[0],
1653 sizeof(fs
->h_u
.tcp_ip6_spec
.ip6dst
));
1654 memcpy(&fs
->m_u
.tcp_ip6_spec
.ip6dst
, &dfs
->mask
.lip
[0],
1655 sizeof(fs
->m_u
.tcp_ip6_spec
.ip6dst
));
1656 fs
->h_u
.tcp_ip6_spec
.tclass
= dfs
->val
.tos
;
1657 fs
->m_u
.tcp_ip6_spec
.tclass
= dfs
->mask
.tos
;
1659 fs
->h_u
.tcp_ip4_spec
.psrc
= cpu_to_be16(dfs
->val
.fport
);
1660 fs
->m_u
.tcp_ip4_spec
.psrc
= cpu_to_be16(dfs
->mask
.fport
);
1661 fs
->h_u
.tcp_ip4_spec
.pdst
= cpu_to_be16(dfs
->val
.lport
);
1662 fs
->m_u
.tcp_ip4_spec
.pdst
= cpu_to_be16(dfs
->mask
.lport
);
1663 memcpy(&fs
->h_u
.tcp_ip4_spec
.ip4src
, &dfs
->val
.fip
[0],
1664 sizeof(fs
->h_u
.tcp_ip4_spec
.ip4src
));
1665 memcpy(&fs
->m_u
.tcp_ip4_spec
.ip4src
, &dfs
->mask
.fip
[0],
1666 sizeof(fs
->m_u
.tcp_ip4_spec
.ip4src
));
1667 memcpy(&fs
->h_u
.tcp_ip4_spec
.ip4dst
, &dfs
->val
.lip
[0],
1668 sizeof(fs
->h_u
.tcp_ip4_spec
.ip4dst
));
1669 memcpy(&fs
->m_u
.tcp_ip4_spec
.ip4dst
, &dfs
->mask
.lip
[0],
1670 sizeof(fs
->m_u
.tcp_ip4_spec
.ip4dst
));
1671 fs
->h_u
.tcp_ip4_spec
.tos
= dfs
->val
.tos
;
1672 fs
->m_u
.tcp_ip4_spec
.tos
= dfs
->mask
.tos
;
1674 fs
->h_ext
.vlan_tci
= cpu_to_be16(dfs
->val
.ivlan
);
1675 fs
->m_ext
.vlan_tci
= cpu_to_be16(dfs
->mask
.ivlan
);
1676 fs
->flow_type
|= FLOW_EXT
;
1678 if (dfs
->action
== FILTER_DROP
)
1679 fs
->ring_cookie
= RX_CLS_FLOW_DISC
;
1681 fs
->ring_cookie
= dfs
->iq
;
1684 static int cxgb4_ntuple_get_filter(struct net_device
*dev
,
1685 struct ethtool_rxnfc
*cmd
,
1688 const struct port_info
*pi
= netdev_priv(dev
);
1689 struct adapter
*adap
= netdev2adap(dev
);
1690 struct filter_entry
*f
;
1693 if (!(adap
->flags
& CXGB4_FULL_INIT_DONE
))
1696 /* Check for maximum filter range */
1697 if (!adap
->ethtool_filters
)
1700 if (loc
>= adap
->ethtool_filters
->nentries
)
1703 if (!test_bit(loc
, adap
->ethtool_filters
->port
[pi
->port_id
].bmap
))
1706 ftid
= adap
->ethtool_filters
->port
[pi
->port_id
].loc_array
[loc
];
1708 /* Fetch filter_entry */
1709 f
= cxgb4_get_filter_entry(adap
, ftid
);
1711 cxgb4_fill_filter_rule(&cmd
->fs
, &f
->fs
);
1716 static int get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
1719 const struct port_info
*pi
= netdev_priv(dev
);
1720 struct adapter
*adap
= netdev2adap(dev
);
1721 unsigned int count
= 0, index
= 0;
1724 switch (info
->cmd
) {
1725 case ETHTOOL_GRXFH
: {
1726 unsigned int v
= pi
->rss_mode
;
1729 switch (info
->flow_type
) {
1731 if (v
& FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F
)
1732 info
->data
= RXH_IP_SRC
| RXH_IP_DST
|
1733 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1734 else if (v
& FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F
)
1735 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
1738 if ((v
& FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F
) &&
1739 (v
& FW_RSS_VI_CONFIG_CMD_UDPEN_F
))
1740 info
->data
= RXH_IP_SRC
| RXH_IP_DST
|
1741 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1742 else if (v
& FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F
)
1743 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
1746 case AH_ESP_V4_FLOW
:
1748 if (v
& FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F
)
1749 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
1752 if (v
& FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F
)
1753 info
->data
= RXH_IP_SRC
| RXH_IP_DST
|
1754 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1755 else if (v
& FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F
)
1756 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
1759 if ((v
& FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F
) &&
1760 (v
& FW_RSS_VI_CONFIG_CMD_UDPEN_F
))
1761 info
->data
= RXH_IP_SRC
| RXH_IP_DST
|
1762 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1763 else if (v
& FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F
)
1764 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
1767 case AH_ESP_V6_FLOW
:
1769 if (v
& FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F
)
1770 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
1775 case ETHTOOL_GRXRINGS
:
1776 info
->data
= pi
->nqsets
;
1778 case ETHTOOL_GRXCLSRLCNT
:
1780 adap
->ethtool_filters
->port
[pi
->port_id
].in_use
;
1782 case ETHTOOL_GRXCLSRULE
:
1783 return cxgb4_ntuple_get_filter(dev
, info
, info
->fs
.location
);
1784 case ETHTOOL_GRXCLSRLALL
:
1785 info
->data
= adap
->ethtool_filters
->nentries
;
1786 while (count
< info
->rule_cnt
) {
1787 ret
= cxgb4_ntuple_get_filter(dev
, info
, index
);
1789 rules
[count
++] = index
;
1798 static int cxgb4_ntuple_del_filter(struct net_device
*dev
,
1799 struct ethtool_rxnfc
*cmd
)
1801 struct cxgb4_ethtool_filter_info
*filter_info
;
1802 struct adapter
*adapter
= netdev2adap(dev
);
1803 struct port_info
*pi
= netdev_priv(dev
);
1804 struct filter_entry
*f
;
1808 if (!(adapter
->flags
& CXGB4_FULL_INIT_DONE
))
1809 return -EAGAIN
; /* can still change nfilters */
1811 if (!adapter
->ethtool_filters
)
1814 if (cmd
->fs
.location
>= adapter
->ethtool_filters
->nentries
) {
1815 dev_err(adapter
->pdev_dev
,
1816 "Location must be < %u",
1817 adapter
->ethtool_filters
->nentries
);
1821 filter_info
= &adapter
->ethtool_filters
->port
[pi
->port_id
];
1823 if (!test_bit(cmd
->fs
.location
, filter_info
->bmap
))
1826 filter_id
= filter_info
->loc_array
[cmd
->fs
.location
];
1827 f
= cxgb4_get_filter_entry(adapter
, filter_id
);
1829 ret
= cxgb4_flow_rule_destroy(dev
, f
->fs
.tc_prio
, &f
->fs
, filter_id
);
1833 clear_bit(cmd
->fs
.location
, filter_info
->bmap
);
1834 filter_info
->in_use
--;
1840 /* Add Ethtool n-tuple filters. */
1841 static int cxgb4_ntuple_set_filter(struct net_device
*netdev
,
1842 struct ethtool_rxnfc
*cmd
)
1844 struct ethtool_rx_flow_spec_input input
= {};
1845 struct cxgb4_ethtool_filter_info
*filter_info
;
1846 struct adapter
*adapter
= netdev2adap(netdev
);
1847 struct port_info
*pi
= netdev_priv(netdev
);
1848 struct ch_filter_specification fs
;
1849 struct ethtool_rx_flow_rule
*flow
;
1853 if (!(adapter
->flags
& CXGB4_FULL_INIT_DONE
))
1854 return -EAGAIN
; /* can still change nfilters */
1856 if (!adapter
->ethtool_filters
)
1859 if (cmd
->fs
.location
>= adapter
->ethtool_filters
->nentries
) {
1860 dev_err(adapter
->pdev_dev
,
1861 "Location must be < %u",
1862 adapter
->ethtool_filters
->nentries
);
1866 if (test_bit(cmd
->fs
.location
,
1867 adapter
->ethtool_filters
->port
[pi
->port_id
].bmap
))
1870 memset(&fs
, 0, sizeof(fs
));
1872 input
.fs
= &cmd
->fs
;
1873 flow
= ethtool_rx_flow_rule_create(&input
);
1875 ret
= PTR_ERR(flow
);
1881 ret
= cxgb4_flow_rule_replace(netdev
, flow
->rule
, cmd
->fs
.location
,
1886 filter_info
= &adapter
->ethtool_filters
->port
[pi
->port_id
];
1888 filter_info
->loc_array
[cmd
->fs
.location
] = tid
;
1889 set_bit(cmd
->fs
.location
, filter_info
->bmap
);
1890 filter_info
->in_use
++;
1893 ethtool_rx_flow_rule_destroy(flow
);
1898 static int set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
1900 int ret
= -EOPNOTSUPP
;
1903 case ETHTOOL_SRXCLSRLINS
:
1904 ret
= cxgb4_ntuple_set_filter(dev
, cmd
);
1906 case ETHTOOL_SRXCLSRLDEL
:
1907 ret
= cxgb4_ntuple_del_filter(dev
, cmd
);
1916 static int set_dump(struct net_device
*dev
, struct ethtool_dump
*eth_dump
)
1918 struct adapter
*adapter
= netdev2adap(dev
);
1921 len
= sizeof(struct cudbg_hdr
) +
1922 sizeof(struct cudbg_entity_hdr
) * CUDBG_MAX_ENTITY
;
1923 len
+= cxgb4_get_dump_length(adapter
, eth_dump
->flag
);
1925 adapter
->eth_dump
.flag
= eth_dump
->flag
;
1926 adapter
->eth_dump
.len
= len
;
1930 static int get_dump_flag(struct net_device
*dev
, struct ethtool_dump
*eth_dump
)
1932 struct adapter
*adapter
= netdev2adap(dev
);
1934 eth_dump
->flag
= adapter
->eth_dump
.flag
;
1935 eth_dump
->len
= adapter
->eth_dump
.len
;
1936 eth_dump
->version
= adapter
->eth_dump
.version
;
1940 static int get_dump_data(struct net_device
*dev
, struct ethtool_dump
*eth_dump
,
1943 struct adapter
*adapter
= netdev2adap(dev
);
1947 if (adapter
->eth_dump
.flag
== CXGB4_ETH_DUMP_NONE
)
1950 len
= sizeof(struct cudbg_hdr
) +
1951 sizeof(struct cudbg_entity_hdr
) * CUDBG_MAX_ENTITY
;
1952 len
+= cxgb4_get_dump_length(adapter
, adapter
->eth_dump
.flag
);
1953 if (eth_dump
->len
< len
)
1956 ret
= cxgb4_cudbg_collect(adapter
, buf
, &len
, adapter
->eth_dump
.flag
);
1960 eth_dump
->flag
= adapter
->eth_dump
.flag
;
1961 eth_dump
->len
= len
;
1962 eth_dump
->version
= adapter
->eth_dump
.version
;
1966 static int cxgb4_get_module_info(struct net_device
*dev
,
1967 struct ethtool_modinfo
*modinfo
)
1969 struct port_info
*pi
= netdev_priv(dev
);
1970 u8 sff8472_comp
, sff_diag_type
, sff_rev
;
1971 struct adapter
*adapter
= pi
->adapter
;
1974 if (!t4_is_inserted_mod_type(pi
->mod_type
))
1977 switch (pi
->port_type
) {
1978 case FW_PORT_TYPE_SFP
:
1979 case FW_PORT_TYPE_QSA
:
1980 case FW_PORT_TYPE_SFP28
:
1981 ret
= t4_i2c_rd(adapter
, adapter
->mbox
, pi
->tx_chan
,
1982 I2C_DEV_ADDR_A0
, SFF_8472_COMP_ADDR
,
1983 SFF_8472_COMP_LEN
, &sff8472_comp
);
1986 ret
= t4_i2c_rd(adapter
, adapter
->mbox
, pi
->tx_chan
,
1987 I2C_DEV_ADDR_A0
, SFP_DIAG_TYPE_ADDR
,
1988 SFP_DIAG_TYPE_LEN
, &sff_diag_type
);
1992 if (!sff8472_comp
|| (sff_diag_type
& 4)) {
1993 modinfo
->type
= ETH_MODULE_SFF_8079
;
1994 modinfo
->eeprom_len
= ETH_MODULE_SFF_8079_LEN
;
1996 modinfo
->type
= ETH_MODULE_SFF_8472
;
1997 modinfo
->eeprom_len
= ETH_MODULE_SFF_8472_LEN
;
2001 case FW_PORT_TYPE_QSFP
:
2002 case FW_PORT_TYPE_QSFP_10G
:
2003 case FW_PORT_TYPE_CR_QSFP
:
2004 case FW_PORT_TYPE_CR2_QSFP
:
2005 case FW_PORT_TYPE_CR4_QSFP
:
2006 ret
= t4_i2c_rd(adapter
, adapter
->mbox
, pi
->tx_chan
,
2007 I2C_DEV_ADDR_A0
, SFF_REV_ADDR
,
2008 SFF_REV_LEN
, &sff_rev
);
2009 /* For QSFP type ports, revision value >= 3
2010 * means the SFP is 8636 compliant.
2014 if (sff_rev
>= 0x3) {
2015 modinfo
->type
= ETH_MODULE_SFF_8636
;
2016 modinfo
->eeprom_len
= ETH_MODULE_SFF_8636_LEN
;
2018 modinfo
->type
= ETH_MODULE_SFF_8436
;
2019 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_LEN
;
2030 static int cxgb4_get_module_eeprom(struct net_device
*dev
,
2031 struct ethtool_eeprom
*eprom
, u8
*data
)
2033 int ret
= 0, offset
= eprom
->offset
, len
= eprom
->len
;
2034 struct port_info
*pi
= netdev_priv(dev
);
2035 struct adapter
*adapter
= pi
->adapter
;
2037 memset(data
, 0, eprom
->len
);
2038 if (offset
+ len
<= I2C_PAGE_SIZE
)
2039 return t4_i2c_rd(adapter
, adapter
->mbox
, pi
->tx_chan
,
2040 I2C_DEV_ADDR_A0
, offset
, len
, data
);
2042 /* offset + len spans 0xa0 and 0xa1 pages */
2043 if (offset
<= I2C_PAGE_SIZE
) {
2044 /* read 0xa0 page */
2045 len
= I2C_PAGE_SIZE
- offset
;
2046 ret
= t4_i2c_rd(adapter
, adapter
->mbox
, pi
->tx_chan
,
2047 I2C_DEV_ADDR_A0
, offset
, len
, data
);
2050 offset
= I2C_PAGE_SIZE
;
2051 /* Remaining bytes to be read from second page =
2052 * Total length - bytes read from first page
2054 len
= eprom
->len
- len
;
2056 /* Read additional optical diagnostics from page 0xa2 if supported */
2057 return t4_i2c_rd(adapter
, adapter
->mbox
, pi
->tx_chan
, I2C_DEV_ADDR_A2
,
2058 offset
, len
, &data
[eprom
->len
- len
]);
2061 static u32
cxgb4_get_priv_flags(struct net_device
*netdev
)
2063 struct port_info
*pi
= netdev_priv(netdev
);
2064 struct adapter
*adapter
= pi
->adapter
;
2066 return (adapter
->eth_flags
| pi
->eth_flags
);
2070 * set_flags - set/unset specified flags if passed in new_flags
2071 * @cur_flags: pointer to current flags
2072 * @new_flags: new incoming flags
2073 * @flags: set of flags to set/unset
2075 static inline void set_flags(u32
*cur_flags
, u32 new_flags
, u32 flags
)
2077 *cur_flags
= (*cur_flags
& ~flags
) | (new_flags
& flags
);
2080 static int cxgb4_set_priv_flags(struct net_device
*netdev
, u32 flags
)
2082 struct port_info
*pi
= netdev_priv(netdev
);
2083 struct adapter
*adapter
= pi
->adapter
;
2085 set_flags(&adapter
->eth_flags
, flags
, PRIV_FLAGS_ADAP
);
2086 set_flags(&pi
->eth_flags
, flags
, PRIV_FLAGS_PORT
);
2091 static void cxgb4_lb_test(struct net_device
*netdev
, u64
*lb_status
)
2093 int dev_state
= netif_running(netdev
);
2096 netif_tx_stop_all_queues(netdev
);
2097 netif_carrier_off(netdev
);
2100 *lb_status
= cxgb4_selftest_lb_pkt(netdev
);
2103 netif_tx_start_all_queues(netdev
);
2104 netif_carrier_on(netdev
);
2108 static void cxgb4_self_test(struct net_device
*netdev
,
2109 struct ethtool_test
*eth_test
, u64
*data
)
2111 struct port_info
*pi
= netdev_priv(netdev
);
2112 struct adapter
*adap
= pi
->adapter
;
2114 memset(data
, 0, sizeof(u64
) * CXGB4_ETHTOOL_MAX_TEST
);
2116 if (!(adap
->flags
& CXGB4_FULL_INIT_DONE
) ||
2117 !(adap
->flags
& CXGB4_FW_OK
)) {
2118 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2122 if (eth_test
->flags
& ETH_TEST_FL_OFFLINE
)
2123 cxgb4_lb_test(netdev
, &data
[CXGB4_ETHTOOL_LB_TEST
]);
2125 if (data
[CXGB4_ETHTOOL_LB_TEST
])
2126 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2129 static const struct ethtool_ops cxgb_ethtool_ops
= {
2130 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
2131 ETHTOOL_COALESCE_RX_MAX_FRAMES
|
2132 ETHTOOL_COALESCE_TX_USECS_IRQ
|
2133 ETHTOOL_COALESCE_USE_ADAPTIVE_RX
,
2134 .get_link_ksettings
= get_link_ksettings
,
2135 .set_link_ksettings
= set_link_ksettings
,
2136 .get_fecparam
= get_fecparam
,
2137 .set_fecparam
= set_fecparam
,
2138 .get_drvinfo
= get_drvinfo
,
2139 .get_msglevel
= get_msglevel
,
2140 .set_msglevel
= set_msglevel
,
2141 .get_ringparam
= get_sge_param
,
2142 .set_ringparam
= set_sge_param
,
2143 .get_coalesce
= get_coalesce
,
2144 .set_coalesce
= set_coalesce
,
2145 .get_eeprom_len
= get_eeprom_len
,
2146 .get_eeprom
= get_eeprom
,
2147 .set_eeprom
= set_eeprom
,
2148 .get_pauseparam
= get_pauseparam
,
2149 .set_pauseparam
= set_pauseparam
,
2150 .get_link
= ethtool_op_get_link
,
2151 .get_strings
= get_strings
,
2152 .set_phys_id
= identify_port
,
2153 .nway_reset
= restart_autoneg
,
2154 .get_sset_count
= get_sset_count
,
2155 .get_ethtool_stats
= get_stats
,
2156 .get_regs_len
= get_regs_len
,
2157 .get_regs
= get_regs
,
2158 .get_rxnfc
= get_rxnfc
,
2159 .set_rxnfc
= set_rxnfc
,
2160 .get_rxfh_indir_size
= get_rss_table_size
,
2161 .get_rxfh
= get_rss_table
,
2162 .set_rxfh
= set_rss_table
,
2163 .self_test
= cxgb4_self_test
,
2164 .flash_device
= set_flash
,
2165 .get_ts_info
= get_ts_info
,
2166 .set_dump
= set_dump
,
2167 .get_dump_flag
= get_dump_flag
,
2168 .get_dump_data
= get_dump_data
,
2169 .get_module_info
= cxgb4_get_module_info
,
2170 .get_module_eeprom
= cxgb4_get_module_eeprom
,
2171 .get_priv_flags
= cxgb4_get_priv_flags
,
2172 .set_priv_flags
= cxgb4_set_priv_flags
,
2175 void cxgb4_cleanup_ethtool_filters(struct adapter
*adap
)
2177 struct cxgb4_ethtool_filter_info
*eth_filter_info
;
2180 if (!adap
->ethtool_filters
)
2183 eth_filter_info
= adap
->ethtool_filters
->port
;
2185 if (eth_filter_info
) {
2186 for (i
= 0; i
< adap
->params
.nports
; i
++) {
2187 kvfree(eth_filter_info
[i
].loc_array
);
2188 kfree(eth_filter_info
[i
].bmap
);
2190 kfree(eth_filter_info
);
2193 kfree(adap
->ethtool_filters
);
2196 int cxgb4_init_ethtool_filters(struct adapter
*adap
)
2198 struct cxgb4_ethtool_filter_info
*eth_filter_info
;
2199 struct cxgb4_ethtool_filter
*eth_filter
;
2200 struct tid_info
*tids
= &adap
->tids
;
2204 eth_filter
= kzalloc(sizeof(*eth_filter
), GFP_KERNEL
);
2208 eth_filter_info
= kcalloc(adap
->params
.nports
,
2209 sizeof(*eth_filter_info
),
2211 if (!eth_filter_info
) {
2213 goto free_eth_filter
;
2216 eth_filter
->port
= eth_filter_info
;
2218 nentries
= tids
->nhpftids
+ tids
->nftids
;
2219 if (is_hashfilter(adap
))
2220 nentries
+= tids
->nhash
+
2221 (adap
->tids
.stid_base
- adap
->tids
.tid_base
);
2222 eth_filter
->nentries
= nentries
;
2224 for (i
= 0; i
< adap
->params
.nports
; i
++) {
2225 eth_filter
->port
[i
].loc_array
= kvzalloc(nentries
, GFP_KERNEL
);
2226 if (!eth_filter
->port
[i
].loc_array
) {
2228 goto free_eth_finfo
;
2231 eth_filter
->port
[i
].bmap
= kcalloc(BITS_TO_LONGS(nentries
),
2232 sizeof(unsigned long),
2234 if (!eth_filter
->port
[i
].bmap
) {
2236 goto free_eth_finfo
;
2240 adap
->ethtool_filters
= eth_filter
;
2245 kfree(eth_filter
->port
[i
].bmap
);
2246 kvfree(eth_filter
->port
[i
].loc_array
);
2248 kfree(eth_filter_info
);
2256 void cxgb4_set_ethtool_ops(struct net_device
*netdev
)
2258 netdev
->ethtool_ops
= &cxgb_ethtool_ops
;