1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/ethtool.h>
19 #include <linux/netdevice.h>
20 #include <linux/net_tstamp.h>
21 #include <linux/pci.h>
22 #include "liquidio_common.h"
23 #include "octeon_droq.h"
24 #include "octeon_iq.h"
25 #include "response_manager.h"
26 #include "octeon_device.h"
27 #include "octeon_nic.h"
28 #include "octeon_main.h"
29 #include "octeon_network.h"
30 #include "cn66xx_regs.h"
31 #include "cn66xx_device.h"
32 #include "cn23xx_pf_device.h"
33 #include "cn23xx_vf_device.h"
35 static int lio_reset_queues(struct net_device
*netdev
, uint32_t num_qs
);
37 struct oct_intrmod_resp
{
39 struct oct_intrmod_cfg intrmod
;
43 struct oct_mdio_cmd_resp
{
45 struct oct_mdio_cmd resp
;
49 #define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
51 /* Octeon's interface mode of operation */
53 INTERFACE_MODE_DISABLED
,
66 INTERFACE_MODE_QSGMII
,
70 INTERFACE_MODE_10G_KR
,
71 INTERFACE_MODE_40G_KR4
,
75 #define OCT_ETHTOOL_REGDUMP_LEN 4096
76 #define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11)
77 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2)
78 #define OCT_ETHTOOL_REGSVER 1
80 /* statistics of PF */
81 static const char oct_stats_strings
[][ETH_GSTRING_LEN
] = {
107 "mac_tx_total_bytes",
110 "mac_tx_ctl_packets",
111 "mac_tx_total_collisions",
112 "mac_tx_one_collision",
113 "mac_tx_multi_collision",
114 "mac_tx_max_collision_fail",
115 "mac_tx_max_deferral_fail",
138 "rx_lro_aborts_port",
140 "rx_lro_aborts_tsval",
141 "rx_lro_aborts_timer",
149 "mac_rx_ctl_packets",
154 "link_state_changes",
157 /* statistics of VF */
158 static const char oct_vf_stats_strings
[][ETH_GSTRING_LEN
] = {
171 "link_state_changes",
174 /* statistics of host tx queue */
175 static const char oct_iq_stats_strings
[][ETH_GSTRING_LEN
] = {
183 "fw_instr_processed",
192 /* statistics of host rx queue */
193 static const char oct_droq_stats_strings
[][ETH_GSTRING_LEN
] = {
202 "fw_dropped_nodispatch",
205 "buffer_alloc_failure",
208 /* LiquidIO driver private flags */
209 static const char oct_priv_flags_strings
[][ETH_GSTRING_LEN
] = {
212 #define OCTNIC_NCMD_AUTONEG_ON 0x1
213 #define OCTNIC_NCMD_PHY_ON 0x2
215 static int lio_get_link_ksettings(struct net_device
*netdev
,
216 struct ethtool_link_ksettings
*ecmd
)
218 struct lio
*lio
= GET_LIO(netdev
);
219 struct octeon_device
*oct
= lio
->oct_dev
;
220 struct oct_link_info
*linfo
;
224 ethtool_link_ksettings_zero_link_mode(ecmd
, supported
);
225 ethtool_link_ksettings_zero_link_mode(ecmd
, advertising
);
227 switch (linfo
->link
.s
.phy_type
) {
228 case LIO_PHY_PORT_TP
:
229 ecmd
->base
.port
= PORT_TP
;
230 ecmd
->base
.autoneg
= AUTONEG_DISABLE
;
231 ethtool_link_ksettings_add_link_mode(ecmd
, supported
, TP
);
232 ethtool_link_ksettings_add_link_mode(ecmd
, supported
, Pause
);
233 ethtool_link_ksettings_add_link_mode(ecmd
, supported
,
236 ethtool_link_ksettings_add_link_mode(ecmd
, advertising
, Pause
);
237 ethtool_link_ksettings_add_link_mode(ecmd
, advertising
,
242 case LIO_PHY_PORT_FIBRE
:
243 if (linfo
->link
.s
.if_mode
== INTERFACE_MODE_XAUI
||
244 linfo
->link
.s
.if_mode
== INTERFACE_MODE_RXAUI
||
245 linfo
->link
.s
.if_mode
== INTERFACE_MODE_XLAUI
||
246 linfo
->link
.s
.if_mode
== INTERFACE_MODE_XFI
) {
247 dev_dbg(&oct
->pci_dev
->dev
, "ecmd->base.transceiver is XCVR_EXTERNAL\n");
248 ecmd
->base
.transceiver
= XCVR_EXTERNAL
;
250 dev_err(&oct
->pci_dev
->dev
, "Unknown link interface mode: %d\n",
251 linfo
->link
.s
.if_mode
);
254 ecmd
->base
.port
= PORT_FIBRE
;
255 ecmd
->base
.autoneg
= AUTONEG_DISABLE
;
256 ethtool_link_ksettings_add_link_mode(ecmd
, supported
, FIBRE
);
258 ethtool_link_ksettings_add_link_mode(ecmd
, supported
, Pause
);
259 ethtool_link_ksettings_add_link_mode(ecmd
, advertising
, Pause
);
260 if (oct
->subsystem_id
== OCTEON_CN2350_25GB_SUBSYS_ID
||
261 oct
->subsystem_id
== OCTEON_CN2360_25GB_SUBSYS_ID
) {
262 if (OCTEON_CN23XX_PF(oct
)) {
263 ethtool_link_ksettings_add_link_mode
264 (ecmd
, supported
, 25000baseSR_Full
);
265 ethtool_link_ksettings_add_link_mode
266 (ecmd
, supported
, 25000baseKR_Full
);
267 ethtool_link_ksettings_add_link_mode
268 (ecmd
, supported
, 25000baseCR_Full
);
270 if (oct
->no_speed_setting
== 0) {
271 ethtool_link_ksettings_add_link_mode
274 ethtool_link_ksettings_add_link_mode
277 ethtool_link_ksettings_add_link_mode
282 if (oct
->no_speed_setting
== 0) {
283 liquidio_get_speed(lio
);
284 liquidio_get_fec(lio
);
286 oct
->speed_setting
= 25;
289 if (oct
->speed_setting
== 10) {
290 ethtool_link_ksettings_add_link_mode
293 ethtool_link_ksettings_add_link_mode
296 ethtool_link_ksettings_add_link_mode
300 if (oct
->speed_setting
== 25) {
301 ethtool_link_ksettings_add_link_mode
304 ethtool_link_ksettings_add_link_mode
307 ethtool_link_ksettings_add_link_mode
312 if (oct
->no_speed_setting
)
315 ethtool_link_ksettings_add_link_mode
316 (ecmd
, supported
, FEC_RS
);
317 ethtool_link_ksettings_add_link_mode
318 (ecmd
, supported
, FEC_NONE
);
320 if (oct
->props
[lio
->ifidx
].fec
== 1) {
322 ethtool_link_ksettings_add_link_mode
323 (ecmd
, advertising
, FEC_RS
);
325 /* ETHTOOL_FEC_OFF */
326 ethtool_link_ksettings_add_link_mode
327 (ecmd
, advertising
, FEC_NONE
);
330 if (linfo
->link
.s
.speed
== 10000) {
331 ethtool_link_ksettings_add_link_mode
334 ethtool_link_ksettings_add_link_mode
337 ethtool_link_ksettings_add_link_mode
341 ethtool_link_ksettings_add_link_mode
344 ethtool_link_ksettings_add_link_mode
347 ethtool_link_ksettings_add_link_mode
352 if (linfo
->link
.s
.speed
== 25000) {
353 ethtool_link_ksettings_add_link_mode
356 ethtool_link_ksettings_add_link_mode
359 ethtool_link_ksettings_add_link_mode
363 ethtool_link_ksettings_add_link_mode
366 ethtool_link_ksettings_add_link_mode
369 ethtool_link_ksettings_add_link_mode
375 ethtool_link_ksettings_add_link_mode(ecmd
, supported
,
377 ethtool_link_ksettings_add_link_mode(ecmd
, advertising
,
383 if (linfo
->link
.s
.link_up
) {
384 ecmd
->base
.speed
= linfo
->link
.s
.speed
;
385 ecmd
->base
.duplex
= linfo
->link
.s
.duplex
;
387 ecmd
->base
.speed
= SPEED_UNKNOWN
;
388 ecmd
->base
.duplex
= DUPLEX_UNKNOWN
;
394 static int lio_set_link_ksettings(struct net_device
*netdev
,
395 const struct ethtool_link_ksettings
*ecmd
)
397 const int speed
= ecmd
->base
.speed
;
398 struct lio
*lio
= GET_LIO(netdev
);
399 struct oct_link_info
*linfo
;
400 struct octeon_device
*oct
;
406 if (!(oct
->subsystem_id
== OCTEON_CN2350_25GB_SUBSYS_ID
||
407 oct
->subsystem_id
== OCTEON_CN2360_25GB_SUBSYS_ID
))
410 if (oct
->no_speed_setting
) {
411 dev_err(&oct
->pci_dev
->dev
, "%s: Changing speed is not supported\n",
416 if ((ecmd
->base
.duplex
!= DUPLEX_UNKNOWN
&&
417 ecmd
->base
.duplex
!= linfo
->link
.s
.duplex
) ||
418 ecmd
->base
.autoneg
!= AUTONEG_DISABLE
||
419 (ecmd
->base
.speed
!= 10000 && ecmd
->base
.speed
!= 25000 &&
420 ecmd
->base
.speed
!= SPEED_UNKNOWN
))
423 if ((oct
->speed_boot
== speed
/ 1000) &&
424 oct
->speed_boot
== oct
->speed_setting
)
427 liquidio_set_speed(lio
, speed
/ 1000);
429 dev_dbg(&oct
->pci_dev
->dev
, "Port speed is set to %dG\n",
436 lio_get_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*drvinfo
)
439 struct octeon_device
*oct
;
441 lio
= GET_LIO(netdev
);
444 memset(drvinfo
, 0, sizeof(struct ethtool_drvinfo
));
445 strcpy(drvinfo
->driver
, "liquidio");
446 strncpy(drvinfo
->fw_version
, oct
->fw_info
.liquidio_firmware_version
,
448 strncpy(drvinfo
->bus_info
, pci_name(oct
->pci_dev
), 32);
452 lio_get_vf_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*drvinfo
)
454 struct octeon_device
*oct
;
457 lio
= GET_LIO(netdev
);
460 memset(drvinfo
, 0, sizeof(struct ethtool_drvinfo
));
461 strcpy(drvinfo
->driver
, "liquidio_vf");
462 strncpy(drvinfo
->fw_version
, oct
->fw_info
.liquidio_firmware_version
,
464 strncpy(drvinfo
->bus_info
, pci_name(oct
->pci_dev
), 32);
468 lio_send_queue_count_update(struct net_device
*netdev
, uint32_t num_queues
)
470 struct lio
*lio
= GET_LIO(netdev
);
471 struct octeon_device
*oct
= lio
->oct_dev
;
472 struct octnic_ctrl_pkt nctrl
;
475 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
478 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_QUEUE_COUNT_CTL
;
479 nctrl
.ncmd
.s
.param1
= num_queues
;
480 nctrl
.ncmd
.s
.param2
= num_queues
;
481 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
482 nctrl
.netpndev
= (u64
)netdev
;
483 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
485 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
487 dev_err(&oct
->pci_dev
->dev
, "Failed to send Queue reset command (ret: 0x%x)\n",
496 lio_ethtool_get_channels(struct net_device
*dev
,
497 struct ethtool_channels
*channel
)
499 struct lio
*lio
= GET_LIO(dev
);
500 struct octeon_device
*oct
= lio
->oct_dev
;
501 u32 max_rx
= 0, max_tx
= 0, tx_count
= 0, rx_count
= 0;
502 u32 combined_count
= 0, max_combined
= 0;
504 if (OCTEON_CN6XXX(oct
)) {
505 struct octeon_config
*conf6x
= CHIP_CONF(oct
, cn6xxx
);
507 max_rx
= CFG_GET_OQ_MAX_Q(conf6x
);
508 max_tx
= CFG_GET_IQ_MAX_Q(conf6x
);
509 rx_count
= CFG_GET_NUM_RXQS_NIC_IF(conf6x
, lio
->ifidx
);
510 tx_count
= CFG_GET_NUM_TXQS_NIC_IF(conf6x
, lio
->ifidx
);
511 } else if (OCTEON_CN23XX_PF(oct
)) {
512 if (oct
->sriov_info
.sriov_enabled
) {
513 max_combined
= lio
->linfo
.num_txpciq
;
515 struct octeon_config
*conf23_pf
=
516 CHIP_CONF(oct
, cn23xx_pf
);
518 max_combined
= CFG_GET_IQ_MAX_Q(conf23_pf
);
520 combined_count
= oct
->num_iqs
;
521 } else if (OCTEON_CN23XX_VF(oct
)) {
523 u64 ctrl
= CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
525 reg_val
= octeon_read_csr64(oct
, ctrl
);
526 reg_val
= reg_val
>> CN23XX_PKT_INPUT_CTL_RPVF_POS
;
527 max_combined
= reg_val
& CN23XX_PKT_INPUT_CTL_RPVF_MASK
;
528 combined_count
= oct
->num_iqs
;
531 channel
->max_rx
= max_rx
;
532 channel
->max_tx
= max_tx
;
533 channel
->max_combined
= max_combined
;
534 channel
->rx_count
= rx_count
;
535 channel
->tx_count
= tx_count
;
536 channel
->combined_count
= combined_count
;
540 lio_irq_reallocate_irqs(struct octeon_device
*oct
, uint32_t num_ioqs
)
542 struct msix_entry
*msix_entries
;
543 int num_msix_irqs
= 0;
549 /* Disable the input and output queues now. No more packets will
550 * arrive from Octeon.
552 oct
->fn_list
.disable_interrupt(oct
, OCTEON_ALL_INTR
);
555 if (OCTEON_CN23XX_PF(oct
))
556 num_msix_irqs
= oct
->num_msix_irqs
- 1;
557 else if (OCTEON_CN23XX_VF(oct
))
558 num_msix_irqs
= oct
->num_msix_irqs
;
560 msix_entries
= (struct msix_entry
*)oct
->msix_entries
;
561 for (i
= 0; i
< num_msix_irqs
; i
++) {
562 if (oct
->ioq_vector
[i
].vector
) {
563 /* clear the affinity_cpumask */
564 irq_set_affinity_hint(msix_entries
[i
].vector
,
566 free_irq(msix_entries
[i
].vector
,
567 &oct
->ioq_vector
[i
]);
568 oct
->ioq_vector
[i
].vector
= 0;
572 /* non-iov vector's argument is oct struct */
573 if (OCTEON_CN23XX_PF(oct
))
574 free_irq(msix_entries
[i
].vector
, oct
);
576 pci_disable_msix(oct
->pci_dev
);
577 kfree(oct
->msix_entries
);
578 oct
->msix_entries
= NULL
;
581 kfree(oct
->irq_name_storage
);
582 oct
->irq_name_storage
= NULL
;
584 if (octeon_allocate_ioq_vector(oct
, num_ioqs
)) {
585 dev_err(&oct
->pci_dev
->dev
, "OCTEON: ioq vector allocation failed\n");
589 if (octeon_setup_interrupt(oct
, num_ioqs
)) {
590 dev_info(&oct
->pci_dev
->dev
, "Setup interrupt failed\n");
594 /* Enable Octeon device interrupts */
595 oct
->fn_list
.enable_interrupt(oct
, OCTEON_ALL_INTR
);
601 lio_ethtool_set_channels(struct net_device
*dev
,
602 struct ethtool_channels
*channel
)
604 u32 combined_count
, max_combined
;
605 struct lio
*lio
= GET_LIO(dev
);
606 struct octeon_device
*oct
= lio
->oct_dev
;
609 if (strcmp(oct
->fw_info
.liquidio_firmware_version
, "1.6.1") < 0) {
610 dev_err(&oct
->pci_dev
->dev
, "Minimum firmware version required is 1.6.1\n");
614 if (!channel
->combined_count
|| channel
->other_count
||
615 channel
->rx_count
|| channel
->tx_count
)
618 combined_count
= channel
->combined_count
;
620 if (OCTEON_CN23XX_PF(oct
)) {
621 if (oct
->sriov_info
.sriov_enabled
) {
622 max_combined
= lio
->linfo
.num_txpciq
;
624 struct octeon_config
*conf23_pf
=
629 CFG_GET_IQ_MAX_Q(conf23_pf
);
631 } else if (OCTEON_CN23XX_VF(oct
)) {
633 u64 ctrl
= CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
635 reg_val
= octeon_read_csr64(oct
, ctrl
);
636 reg_val
= reg_val
>> CN23XX_PKT_INPUT_CTL_RPVF_POS
;
637 max_combined
= reg_val
& CN23XX_PKT_INPUT_CTL_RPVF_MASK
;
642 if (combined_count
> max_combined
|| combined_count
< 1)
645 if (combined_count
== oct
->num_iqs
)
648 ifstate_set(lio
, LIO_IFSTATE_RESETTING
);
650 if (netif_running(dev
)) {
651 dev
->netdev_ops
->ndo_stop(dev
);
655 if (lio_reset_queues(dev
, combined_count
))
659 dev
->netdev_ops
->ndo_open(dev
);
661 ifstate_reset(lio
, LIO_IFSTATE_RESETTING
);
666 static int lio_get_eeprom_len(struct net_device
*netdev
)
669 struct lio
*lio
= GET_LIO(netdev
);
670 struct octeon_device
*oct_dev
= lio
->oct_dev
;
671 struct octeon_board_info
*board_info
;
674 board_info
= (struct octeon_board_info
*)(&oct_dev
->boardinfo
);
675 len
= sprintf(buf
, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
676 board_info
->name
, board_info
->serial_number
,
677 board_info
->major
, board_info
->minor
);
683 lio_get_eeprom(struct net_device
*netdev
, struct ethtool_eeprom
*eeprom
,
686 struct lio
*lio
= GET_LIO(netdev
);
687 struct octeon_device
*oct_dev
= lio
->oct_dev
;
688 struct octeon_board_info
*board_info
;
693 eeprom
->magic
= oct_dev
->pci_dev
->vendor
;
694 board_info
= (struct octeon_board_info
*)(&oct_dev
->boardinfo
);
695 sprintf((char *)bytes
,
696 "boardname:%s serialnum:%s maj:%lld min:%lld\n",
697 board_info
->name
, board_info
->serial_number
,
698 board_info
->major
, board_info
->minor
);
703 static int octnet_gpio_access(struct net_device
*netdev
, int addr
, int val
)
705 struct lio
*lio
= GET_LIO(netdev
);
706 struct octeon_device
*oct
= lio
->oct_dev
;
707 struct octnic_ctrl_pkt nctrl
;
710 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
713 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_GPIO_ACCESS
;
714 nctrl
.ncmd
.s
.param1
= addr
;
715 nctrl
.ncmd
.s
.param2
= val
;
716 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
717 nctrl
.netpndev
= (u64
)netdev
;
718 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
720 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
722 dev_err(&oct
->pci_dev
->dev
,
723 "Failed to configure gpio value, ret=%d\n", ret
);
730 static int octnet_id_active(struct net_device
*netdev
, int val
)
732 struct lio
*lio
= GET_LIO(netdev
);
733 struct octeon_device
*oct
= lio
->oct_dev
;
734 struct octnic_ctrl_pkt nctrl
;
737 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
740 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_ID_ACTIVE
;
741 nctrl
.ncmd
.s
.param1
= val
;
742 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
743 nctrl
.netpndev
= (u64
)netdev
;
744 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
746 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
748 dev_err(&oct
->pci_dev
->dev
,
749 "Failed to configure gpio value, ret=%d\n", ret
);
756 /* This routine provides PHY access routines for
760 octnet_mdio45_access(struct lio
*lio
, int op
, int loc
, int *value
)
762 struct octeon_device
*oct_dev
= lio
->oct_dev
;
763 struct octeon_soft_command
*sc
;
764 struct oct_mdio_cmd_resp
*mdio_cmd_rsp
;
765 struct oct_mdio_cmd
*mdio_cmd
;
768 sc
= (struct octeon_soft_command
*)
769 octeon_alloc_soft_command(oct_dev
,
770 sizeof(struct oct_mdio_cmd
),
771 sizeof(struct oct_mdio_cmd_resp
), 0);
776 mdio_cmd_rsp
= (struct oct_mdio_cmd_resp
*)sc
->virtrptr
;
777 mdio_cmd
= (struct oct_mdio_cmd
*)sc
->virtdptr
;
780 mdio_cmd
->mdio_addr
= loc
;
782 mdio_cmd
->value1
= *value
;
783 octeon_swap_8B_data((u64
*)mdio_cmd
, sizeof(struct oct_mdio_cmd
) / 8);
785 sc
->iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
787 octeon_prepare_soft_command(oct_dev
, sc
, OPCODE_NIC
, OPCODE_NIC_MDIO45
,
790 init_completion(&sc
->complete
);
791 sc
->sc_status
= OCTEON_REQUEST_PENDING
;
793 retval
= octeon_send_soft_command(oct_dev
, sc
);
794 if (retval
== IQ_SEND_FAILED
) {
795 dev_err(&oct_dev
->pci_dev
->dev
,
796 "octnet_mdio45_access instruction failed status: %x\n",
798 octeon_free_soft_command(oct_dev
, sc
);
801 /* Sleep on a wait queue till the cond flag indicates that the
804 retval
= wait_for_sc_completion_timeout(oct_dev
, sc
, 0);
808 retval
= mdio_cmd_rsp
->status
;
810 dev_err(&oct_dev
->pci_dev
->dev
,
811 "octnet mdio45 access failed: %x\n", retval
);
812 WRITE_ONCE(sc
->caller_is_done
, true);
816 octeon_swap_8B_data((u64
*)(&mdio_cmd_rsp
->resp
),
817 sizeof(struct oct_mdio_cmd
) / 8);
820 *value
= mdio_cmd_rsp
->resp
.value1
;
822 WRITE_ONCE(sc
->caller_is_done
, true);
828 static int lio_set_phys_id(struct net_device
*netdev
,
829 enum ethtool_phys_id_state state
)
831 struct lio
*lio
= GET_LIO(netdev
);
832 struct octeon_device
*oct
= lio
->oct_dev
;
833 struct oct_link_info
*linfo
;
838 cur_ver
= OCT_FW_VER(oct
->fw_info
.ver
.maj
,
839 oct
->fw_info
.ver
.min
,
840 oct
->fw_info
.ver
.rev
);
843 case ETHTOOL_ID_ACTIVE
:
844 if (oct
->chip_id
== OCTEON_CN66XX
) {
845 octnet_gpio_access(netdev
, VITESSE_PHY_GPIO_CFG
,
846 VITESSE_PHY_GPIO_DRIVEON
);
849 } else if (oct
->chip_id
== OCTEON_CN68XX
) {
850 /* Save the current LED settings */
851 ret
= octnet_mdio45_access(lio
, 0,
852 LIO68XX_LED_BEACON_ADDR
,
853 &lio
->phy_beacon_val
);
857 ret
= octnet_mdio45_access(lio
, 0,
858 LIO68XX_LED_CTRL_ADDR
,
863 /* Configure Beacon values */
864 value
= LIO68XX_LED_BEACON_CFGON
;
865 ret
= octnet_mdio45_access(lio
, 1,
866 LIO68XX_LED_BEACON_ADDR
,
871 value
= LIO68XX_LED_CTRL_CFGON
;
872 ret
= octnet_mdio45_access(lio
, 1,
873 LIO68XX_LED_CTRL_ADDR
,
877 } else if (oct
->chip_id
== OCTEON_CN23XX_PF_VID
) {
878 octnet_id_active(netdev
, LED_IDENTIFICATION_ON
);
879 if (linfo
->link
.s
.phy_type
== LIO_PHY_PORT_TP
&&
880 cur_ver
> OCT_FW_VER(1, 7, 2))
890 if (oct
->chip_id
== OCTEON_CN23XX_PF_VID
&&
891 linfo
->link
.s
.phy_type
== LIO_PHY_PORT_TP
&&
892 cur_ver
> OCT_FW_VER(1, 7, 2))
893 octnet_id_active(netdev
, LED_IDENTIFICATION_ON
);
894 else if (oct
->chip_id
== OCTEON_CN66XX
)
895 octnet_gpio_access(netdev
, VITESSE_PHY_GPIO_CFG
,
896 VITESSE_PHY_GPIO_HIGH
);
903 if (oct
->chip_id
== OCTEON_CN23XX_PF_VID
&&
904 linfo
->link
.s
.phy_type
== LIO_PHY_PORT_TP
&&
905 cur_ver
> OCT_FW_VER(1, 7, 2))
906 octnet_id_active(netdev
, LED_IDENTIFICATION_OFF
);
907 else if (oct
->chip_id
== OCTEON_CN66XX
)
908 octnet_gpio_access(netdev
, VITESSE_PHY_GPIO_CFG
,
909 VITESSE_PHY_GPIO_LOW
);
915 case ETHTOOL_ID_INACTIVE
:
916 if (oct
->chip_id
== OCTEON_CN66XX
) {
917 octnet_gpio_access(netdev
, VITESSE_PHY_GPIO_CFG
,
918 VITESSE_PHY_GPIO_DRIVEOFF
);
919 } else if (oct
->chip_id
== OCTEON_CN68XX
) {
920 /* Restore LED settings */
921 ret
= octnet_mdio45_access(lio
, 1,
922 LIO68XX_LED_CTRL_ADDR
,
927 ret
= octnet_mdio45_access(lio
, 1,
928 LIO68XX_LED_BEACON_ADDR
,
929 &lio
->phy_beacon_val
);
932 } else if (oct
->chip_id
== OCTEON_CN23XX_PF_VID
) {
933 octnet_id_active(netdev
, LED_IDENTIFICATION_OFF
);
949 lio_ethtool_get_ringparam(struct net_device
*netdev
,
950 struct ethtool_ringparam
*ering
)
952 struct lio
*lio
= GET_LIO(netdev
);
953 struct octeon_device
*oct
= lio
->oct_dev
;
954 u32 tx_max_pending
= 0, rx_max_pending
= 0, tx_pending
= 0,
957 if (ifstate_check(lio
, LIO_IFSTATE_RESETTING
))
960 if (OCTEON_CN6XXX(oct
)) {
961 struct octeon_config
*conf6x
= CHIP_CONF(oct
, cn6xxx
);
963 tx_max_pending
= CN6XXX_MAX_IQ_DESCRIPTORS
;
964 rx_max_pending
= CN6XXX_MAX_OQ_DESCRIPTORS
;
965 rx_pending
= CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x
, lio
->ifidx
);
966 tx_pending
= CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x
, lio
->ifidx
);
967 } else if (OCTEON_CN23XX_PF(oct
) || OCTEON_CN23XX_VF(oct
)) {
968 tx_max_pending
= CN23XX_MAX_IQ_DESCRIPTORS
;
969 rx_max_pending
= CN23XX_MAX_OQ_DESCRIPTORS
;
970 rx_pending
= oct
->droq
[0]->max_count
;
971 tx_pending
= oct
->instr_queue
[0]->max_count
;
974 ering
->tx_pending
= tx_pending
;
975 ering
->tx_max_pending
= tx_max_pending
;
976 ering
->rx_pending
= rx_pending
;
977 ering
->rx_max_pending
= rx_max_pending
;
978 ering
->rx_mini_pending
= 0;
979 ering
->rx_jumbo_pending
= 0;
980 ering
->rx_mini_max_pending
= 0;
981 ering
->rx_jumbo_max_pending
= 0;
984 static int lio_23xx_reconfigure_queue_count(struct lio
*lio
)
986 struct octeon_device
*oct
= lio
->oct_dev
;
987 u32 resp_size
, data_size
;
988 struct liquidio_if_cfg_resp
*resp
;
989 struct octeon_soft_command
*sc
;
990 union oct_nic_if_cfg if_cfg
;
991 struct lio_version
*vdata
;
996 resp_size
= sizeof(struct liquidio_if_cfg_resp
);
997 data_size
= sizeof(struct lio_version
);
998 sc
= (struct octeon_soft_command
*)
999 octeon_alloc_soft_command(oct
, data_size
,
1002 dev_err(&oct
->pci_dev
->dev
, "%s: Failed to allocate soft command\n",
1007 resp
= (struct liquidio_if_cfg_resp
*)sc
->virtrptr
;
1008 vdata
= (struct lio_version
*)sc
->virtdptr
;
1010 vdata
->major
= (__force u16
)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION
);
1011 vdata
->minor
= (__force u16
)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION
);
1012 vdata
->micro
= (__force u16
)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION
);
1014 ifidx_or_pfnum
= oct
->pf_num
;
1017 if_cfg
.s
.num_iqueues
= oct
->sriov_info
.num_pf_rings
;
1018 if_cfg
.s
.num_oqueues
= oct
->sriov_info
.num_pf_rings
;
1019 if_cfg
.s
.base_queue
= oct
->sriov_info
.pf_srn
;
1020 if_cfg
.s
.gmx_port_id
= oct
->pf_num
;
1023 octeon_prepare_soft_command(oct
, sc
, OPCODE_NIC
,
1024 OPCODE_NIC_QCOUNT_UPDATE
, 0,
1027 init_completion(&sc
->complete
);
1028 sc
->sc_status
= OCTEON_REQUEST_PENDING
;
1030 retval
= octeon_send_soft_command(oct
, sc
);
1031 if (retval
== IQ_SEND_FAILED
) {
1032 dev_err(&oct
->pci_dev
->dev
,
1033 "Sending iq/oq config failed status: %x\n",
1035 octeon_free_soft_command(oct
, sc
);
1039 retval
= wait_for_sc_completion_timeout(oct
, sc
, 0);
1043 retval
= resp
->status
;
1045 dev_err(&oct
->pci_dev
->dev
,
1046 "iq/oq config failed: %x\n", retval
);
1047 WRITE_ONCE(sc
->caller_is_done
, true);
1051 octeon_swap_8B_data((u64
*)(&resp
->cfg_info
),
1052 (sizeof(struct liquidio_if_cfg_info
)) >> 3);
1054 lio
->ifidx
= ifidx_or_pfnum
;
1055 lio
->linfo
.num_rxpciq
= hweight64(resp
->cfg_info
.iqmask
);
1056 lio
->linfo
.num_txpciq
= hweight64(resp
->cfg_info
.iqmask
);
1057 for (j
= 0; j
< lio
->linfo
.num_rxpciq
; j
++) {
1058 lio
->linfo
.rxpciq
[j
].u64
=
1059 resp
->cfg_info
.linfo
.rxpciq
[j
].u64
;
1062 for (j
= 0; j
< lio
->linfo
.num_txpciq
; j
++) {
1063 lio
->linfo
.txpciq
[j
].u64
=
1064 resp
->cfg_info
.linfo
.txpciq
[j
].u64
;
1067 lio
->linfo
.hw_addr
= resp
->cfg_info
.linfo
.hw_addr
;
1068 lio
->linfo
.gmxport
= resp
->cfg_info
.linfo
.gmxport
;
1069 lio
->linfo
.link
.u64
= resp
->cfg_info
.linfo
.link
.u64
;
1070 lio
->txq
= lio
->linfo
.txpciq
[0].s
.q_no
;
1071 lio
->rxq
= lio
->linfo
.rxpciq
[0].s
.q_no
;
1073 dev_info(&oct
->pci_dev
->dev
, "Queue count updated to %d\n",
1074 lio
->linfo
.num_rxpciq
);
1076 WRITE_ONCE(sc
->caller_is_done
, true);
1081 static int lio_reset_queues(struct net_device
*netdev
, uint32_t num_qs
)
1083 struct lio
*lio
= GET_LIO(netdev
);
1084 struct octeon_device
*oct
= lio
->oct_dev
;
1085 int i
, queue_count_update
= 0;
1086 struct napi_struct
*napi
, *n
;
1089 schedule_timeout_uninterruptible(msecs_to_jiffies(100));
1091 if (wait_for_pending_requests(oct
))
1092 dev_err(&oct
->pci_dev
->dev
, "There were pending requests\n");
1094 if (lio_wait_for_instr_fetch(oct
))
1095 dev_err(&oct
->pci_dev
->dev
, "IQ had pending instructions\n");
1097 if (octeon_set_io_queues_off(oct
)) {
1098 dev_err(&oct
->pci_dev
->dev
, "Setting io queues off failed\n");
1102 /* Disable the input and output queues now. No more packets will
1103 * arrive from Octeon.
1105 oct
->fn_list
.disable_io_queues(oct
);
1107 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
1108 netif_napi_del(napi
);
1110 if (num_qs
!= oct
->num_iqs
) {
1111 ret
= netif_set_real_num_rx_queues(netdev
, num_qs
);
1113 dev_err(&oct
->pci_dev
->dev
,
1114 "Setting real number rx failed\n");
1118 ret
= netif_set_real_num_tx_queues(netdev
, num_qs
);
1120 dev_err(&oct
->pci_dev
->dev
,
1121 "Setting real number tx failed\n");
1125 /* The value of queue_count_update decides whether it is the
1126 * queue count or the descriptor count that is being
1129 queue_count_update
= 1;
1132 /* Re-configuration of queues can happen in two scenarios, SRIOV enabled
1133 * and SRIOV disabled. Few things like recreating queue zero, resetting
1134 * glists and IRQs are required for both. For the latter, some more
1135 * steps like updating sriov_info for the octeon device need to be done.
1137 if (queue_count_update
) {
1138 cleanup_rx_oom_poll_fn(netdev
);
1140 lio_delete_glists(lio
);
1142 /* Delete mbox for PF which is SRIOV disabled because sriov_info
1143 * will be now changed.
1145 if ((OCTEON_CN23XX_PF(oct
)) && !oct
->sriov_info
.sriov_enabled
)
1146 oct
->fn_list
.free_mbox(oct
);
1149 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
1150 if (!(oct
->io_qmask
.oq
& BIT_ULL(i
)))
1152 octeon_delete_droq(oct
, i
);
1155 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
1156 if (!(oct
->io_qmask
.iq
& BIT_ULL(i
)))
1158 octeon_delete_instr_queue(oct
, i
);
1161 if (queue_count_update
) {
1162 /* For PF re-configure sriov related information */
1163 if ((OCTEON_CN23XX_PF(oct
)) &&
1164 !oct
->sriov_info
.sriov_enabled
) {
1165 oct
->sriov_info
.num_pf_rings
= num_qs
;
1166 if (cn23xx_sriov_config(oct
)) {
1167 dev_err(&oct
->pci_dev
->dev
,
1168 "Queue reset aborted: SRIOV config failed\n");
1172 num_qs
= oct
->sriov_info
.num_pf_rings
;
1176 if (oct
->fn_list
.setup_device_regs(oct
)) {
1177 dev_err(&oct
->pci_dev
->dev
, "Failed to configure device registers\n");
1181 /* The following are needed in case of queue count re-configuration and
1182 * not for descriptor count re-configuration.
1184 if (queue_count_update
) {
1185 if (octeon_setup_instr_queues(oct
))
1188 if (octeon_setup_output_queues(oct
))
1191 /* Recreating mbox for PF that is SRIOV disabled */
1192 if (OCTEON_CN23XX_PF(oct
) && !oct
->sriov_info
.sriov_enabled
) {
1193 if (oct
->fn_list
.setup_mbox(oct
)) {
1194 dev_err(&oct
->pci_dev
->dev
, "Mailbox setup failed\n");
1199 /* Deleting and recreating IRQs whether the interface is SRIOV
1200 * enabled or disabled.
1202 if (lio_irq_reallocate_irqs(oct
, num_qs
)) {
1203 dev_err(&oct
->pci_dev
->dev
, "IRQs could not be allocated\n");
1207 /* Enable the input and output queues for this Octeon device */
1208 if (oct
->fn_list
.enable_io_queues(oct
)) {
1209 dev_err(&oct
->pci_dev
->dev
, "Failed to enable input/output queues\n");
1213 for (i
= 0; i
< oct
->num_oqs
; i
++)
1214 writel(oct
->droq
[i
]->max_count
,
1215 oct
->droq
[i
]->pkts_credit_reg
);
1217 /* Informing firmware about the new queue count. It is required
1218 * for firmware to allocate more number of queues than those at
1221 if (OCTEON_CN23XX_PF(oct
) && !oct
->sriov_info
.sriov_enabled
) {
1222 if (lio_23xx_reconfigure_queue_count(lio
))
1227 /* Once firmware is aware of the new value, queues can be recreated */
1228 if (liquidio_setup_io_queues(oct
, 0, num_qs
, num_qs
)) {
1229 dev_err(&oct
->pci_dev
->dev
, "I/O queues creation failed\n");
1233 if (queue_count_update
) {
1234 if (lio_setup_glists(oct
, lio
, num_qs
)) {
1235 dev_err(&oct
->pci_dev
->dev
, "Gather list allocation failed\n");
1239 if (setup_rx_oom_poll_fn(netdev
)) {
1240 dev_err(&oct
->pci_dev
->dev
, "lio_setup_rx_oom_poll_fn failed\n");
1244 /* Send firmware the information about new number of queues
1245 * if the interface is a VF or a PF that is SRIOV enabled.
1247 if (oct
->sriov_info
.sriov_enabled
|| OCTEON_CN23XX_VF(oct
))
1248 if (lio_send_queue_count_update(netdev
, num_qs
))
1255 static int lio_ethtool_set_ringparam(struct net_device
*netdev
,
1256 struct ethtool_ringparam
*ering
)
1258 u32 rx_count
, tx_count
, rx_count_old
, tx_count_old
;
1259 struct lio
*lio
= GET_LIO(netdev
);
1260 struct octeon_device
*oct
= lio
->oct_dev
;
1263 if (!OCTEON_CN23XX_PF(oct
) && !OCTEON_CN23XX_VF(oct
))
1266 if (ering
->rx_mini_pending
|| ering
->rx_jumbo_pending
)
1269 rx_count
= clamp_t(u32
, ering
->rx_pending
, CN23XX_MIN_OQ_DESCRIPTORS
,
1270 CN23XX_MAX_OQ_DESCRIPTORS
);
1271 tx_count
= clamp_t(u32
, ering
->tx_pending
, CN23XX_MIN_IQ_DESCRIPTORS
,
1272 CN23XX_MAX_IQ_DESCRIPTORS
);
1274 rx_count_old
= oct
->droq
[0]->max_count
;
1275 tx_count_old
= oct
->instr_queue
[0]->max_count
;
1277 if (rx_count
== rx_count_old
&& tx_count
== tx_count_old
)
1280 ifstate_set(lio
, LIO_IFSTATE_RESETTING
);
1282 if (netif_running(netdev
)) {
1283 netdev
->netdev_ops
->ndo_stop(netdev
);
1287 /* Change RX/TX DESCS count */
1288 if (tx_count
!= tx_count_old
)
1289 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct
), lio
->ifidx
,
1291 if (rx_count
!= rx_count_old
)
1292 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct
), lio
->ifidx
,
1295 if (lio_reset_queues(netdev
, oct
->num_iqs
))
1296 goto err_lio_reset_queues
;
1299 netdev
->netdev_ops
->ndo_open(netdev
);
1301 ifstate_reset(lio
, LIO_IFSTATE_RESETTING
);
1305 err_lio_reset_queues
:
1306 if (tx_count
!= tx_count_old
)
1307 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct
), lio
->ifidx
,
1309 if (rx_count
!= rx_count_old
)
1310 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct
), lio
->ifidx
,
1315 static u32
lio_get_msglevel(struct net_device
*netdev
)
1317 struct lio
*lio
= GET_LIO(netdev
);
1319 return lio
->msg_enable
;
1322 static void lio_set_msglevel(struct net_device
*netdev
, u32 msglvl
)
1324 struct lio
*lio
= GET_LIO(netdev
);
1326 if ((msglvl
^ lio
->msg_enable
) & NETIF_MSG_HW
) {
1327 if (msglvl
& NETIF_MSG_HW
)
1328 liquidio_set_feature(netdev
,
1329 OCTNET_CMD_VERBOSE_ENABLE
, 0);
1331 liquidio_set_feature(netdev
,
1332 OCTNET_CMD_VERBOSE_DISABLE
, 0);
1335 lio
->msg_enable
= msglvl
;
1338 static void lio_vf_set_msglevel(struct net_device
*netdev
, u32 msglvl
)
1340 struct lio
*lio
= GET_LIO(netdev
);
1342 lio
->msg_enable
= msglvl
;
1346 lio_get_pauseparam(struct net_device
*netdev
, struct ethtool_pauseparam
*pause
)
1348 /* Notes: Not supporting any auto negotiation in these
1349 * drivers. Just report pause frame support.
1351 struct lio
*lio
= GET_LIO(netdev
);
1352 struct octeon_device
*oct
= lio
->oct_dev
;
1356 pause
->tx_pause
= oct
->tx_pause
;
1357 pause
->rx_pause
= oct
->rx_pause
;
1361 lio_set_pauseparam(struct net_device
*netdev
, struct ethtool_pauseparam
*pause
)
1363 /* Notes: Not supporting any auto negotiation in these
1366 struct lio
*lio
= GET_LIO(netdev
);
1367 struct octeon_device
*oct
= lio
->oct_dev
;
1368 struct octnic_ctrl_pkt nctrl
;
1369 struct oct_link_info
*linfo
= &lio
->linfo
;
1373 if (oct
->chip_id
!= OCTEON_CN23XX_PF_VID
)
1376 if (linfo
->link
.s
.duplex
== 0) {
1377 /*no flow control for half duplex*/
1378 if (pause
->rx_pause
|| pause
->tx_pause
)
1382 /*do not support autoneg of link flow control*/
1383 if (pause
->autoneg
== AUTONEG_ENABLE
)
1386 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
1389 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_SET_FLOW_CTL
;
1390 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
1391 nctrl
.netpndev
= (u64
)netdev
;
1392 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
1394 if (pause
->rx_pause
) {
1396 nctrl
.ncmd
.s
.param1
= 1;
1398 /*disable rx pause*/
1399 nctrl
.ncmd
.s
.param1
= 0;
1402 if (pause
->tx_pause
) {
1404 nctrl
.ncmd
.s
.param2
= 1;
1406 /*disable tx pause*/
1407 nctrl
.ncmd
.s
.param2
= 0;
1410 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
1412 dev_err(&oct
->pci_dev
->dev
,
1413 "Failed to set pause parameter, ret=%d\n", ret
);
1417 oct
->rx_pause
= pause
->rx_pause
;
1418 oct
->tx_pause
= pause
->tx_pause
;
1424 lio_get_ethtool_stats(struct net_device
*netdev
,
1425 struct ethtool_stats
*stats
__attribute__((unused
)),
1428 struct lio
*lio
= GET_LIO(netdev
);
1429 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1430 struct rtnl_link_stats64 lstats
;
1433 if (ifstate_check(lio
, LIO_IFSTATE_RESETTING
))
1436 netdev
->netdev_ops
->ndo_get_stats64(netdev
, &lstats
);
1437 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
1438 data
[i
++] = lstats
.rx_packets
;
1439 /*sum of oct->instr_queue[iq_no]->stats.tx_done */
1440 data
[i
++] = lstats
.tx_packets
;
1441 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
1442 data
[i
++] = lstats
.rx_bytes
;
1443 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1444 data
[i
++] = lstats
.tx_bytes
;
1445 data
[i
++] = lstats
.rx_errors
+
1446 oct_dev
->link_stats
.fromwire
.fcs_err
+
1447 oct_dev
->link_stats
.fromwire
.jabber_err
+
1448 oct_dev
->link_stats
.fromwire
.l2_err
+
1449 oct_dev
->link_stats
.fromwire
.frame_err
;
1450 data
[i
++] = lstats
.tx_errors
;
1451 /*sum of oct->droq[oq_no]->stats->rx_dropped +
1452 *oct->droq[oq_no]->stats->dropped_nodispatch +
1453 *oct->droq[oq_no]->stats->dropped_toomany +
1454 *oct->droq[oq_no]->stats->dropped_nomem
1456 data
[i
++] = lstats
.rx_dropped
+
1457 oct_dev
->link_stats
.fromwire
.fifo_err
+
1458 oct_dev
->link_stats
.fromwire
.dmac_drop
+
1459 oct_dev
->link_stats
.fromwire
.red_drops
+
1460 oct_dev
->link_stats
.fromwire
.fw_err_pko
+
1461 oct_dev
->link_stats
.fromwire
.fw_err_link
+
1462 oct_dev
->link_stats
.fromwire
.fw_err_drop
;
1463 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1464 data
[i
++] = lstats
.tx_dropped
+
1465 oct_dev
->link_stats
.fromhost
.max_collision_fail
+
1466 oct_dev
->link_stats
.fromhost
.max_deferral_fail
+
1467 oct_dev
->link_stats
.fromhost
.total_collisions
+
1468 oct_dev
->link_stats
.fromhost
.fw_err_pko
+
1469 oct_dev
->link_stats
.fromhost
.fw_err_link
+
1470 oct_dev
->link_stats
.fromhost
.fw_err_drop
+
1471 oct_dev
->link_stats
.fromhost
.fw_err_pki
;
1473 /* firmware tx stats */
1474 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
1475 *fromhost.fw_total_sent
1477 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_total_sent
);
1478 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
1479 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_total_fwd
);
1480 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
1481 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_err_pko
);
1482 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */
1483 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_err_pki
);
1484 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
1485 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_err_link
);
1486 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1489 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_err_drop
);
1491 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
1492 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_tso
);
1493 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1496 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_tso_fwd
);
1497 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1500 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_err_tso
);
1501 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1504 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_tx_vxlan
);
1506 /* Multicast packets sent by this port */
1507 data
[i
++] = oct_dev
->link_stats
.fromhost
.fw_total_mcast_sent
;
1508 data
[i
++] = oct_dev
->link_stats
.fromhost
.fw_total_bcast_sent
;
1510 /* mac tx statistics */
1511 /*CVMX_BGXX_CMRX_TX_STAT5 */
1512 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.total_pkts_sent
);
1513 /*CVMX_BGXX_CMRX_TX_STAT4 */
1514 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.total_bytes_sent
);
1515 /*CVMX_BGXX_CMRX_TX_STAT15 */
1516 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.mcast_pkts_sent
);
1517 /*CVMX_BGXX_CMRX_TX_STAT14 */
1518 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.bcast_pkts_sent
);
1519 /*CVMX_BGXX_CMRX_TX_STAT17 */
1520 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.ctl_sent
);
1521 /*CVMX_BGXX_CMRX_TX_STAT0 */
1522 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.total_collisions
);
1523 /*CVMX_BGXX_CMRX_TX_STAT3 */
1524 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.one_collision_sent
);
1525 /*CVMX_BGXX_CMRX_TX_STAT2 */
1527 CVM_CAST64(oct_dev
->link_stats
.fromhost
.multi_collision_sent
);
1528 /*CVMX_BGXX_CMRX_TX_STAT0 */
1529 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.max_collision_fail
);
1530 /*CVMX_BGXX_CMRX_TX_STAT1 */
1531 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.max_deferral_fail
);
1532 /*CVMX_BGXX_CMRX_TX_STAT16 */
1533 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fifo_err
);
1534 /*CVMX_BGXX_CMRX_TX_STAT6 */
1535 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.runts
);
1537 /* RX firmware stats */
1538 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1541 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_total_rcvd
);
1542 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1545 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_total_fwd
);
1546 /* Multicast packets received on this port */
1547 data
[i
++] = oct_dev
->link_stats
.fromwire
.fw_total_mcast
;
1548 data
[i
++] = oct_dev
->link_stats
.fromwire
.fw_total_bcast
;
1549 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
1550 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.jabber_err
);
1551 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
1552 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.l2_err
);
1553 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
1554 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.frame_err
);
1555 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1558 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_err_pko
);
1559 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
1560 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_err_link
);
1561 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1562 *fromwire.fw_err_drop
1564 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_err_drop
);
1566 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1567 *fromwire.fw_rx_vxlan
1569 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_rx_vxlan
);
1570 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1571 *fromwire.fw_rx_vxlan_err
1573 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_rx_vxlan_err
);
1576 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1579 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_pkts
);
1580 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1583 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_octs
);
1584 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
1585 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_total_lro
);
1586 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1587 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_aborts
);
1588 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1591 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_aborts_port
);
1592 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1595 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_aborts_seq
);
1596 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1597 *fw_lro_aborts_tsval
1600 CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_aborts_tsval
);
1601 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1602 *fw_lro_aborts_timer
1604 /* intrmod: packet forward rate */
1606 CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_aborts_timer
);
1607 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1608 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fwd_rate
);
1610 /* mac: link-level stats */
1611 /*CVMX_BGXX_CMRX_RX_STAT0 */
1612 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.total_rcvd
);
1613 /*CVMX_BGXX_CMRX_RX_STAT1 */
1614 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.bytes_rcvd
);
1615 /*CVMX_PKI_STATX_STAT5 */
1616 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.total_bcst
);
1617 /*CVMX_PKI_STATX_STAT5 */
1618 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.total_mcst
);
1619 /*wqe->word2.err_code or wqe->word2.err_level */
1620 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.runts
);
1621 /*CVMX_BGXX_CMRX_RX_STAT2 */
1622 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.ctl_rcvd
);
1623 /*CVMX_BGXX_CMRX_RX_STAT6 */
1624 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fifo_err
);
1625 /*CVMX_BGXX_CMRX_RX_STAT4 */
1626 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.dmac_drop
);
1627 /*wqe->word2.err_code or wqe->word2.err_level */
1628 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fcs_err
);
1629 /*lio->link_changes*/
1630 data
[i
++] = CVM_CAST64(lio
->link_changes
);
1632 for (j
= 0; j
< MAX_OCTEON_INSTR_QUEUES(oct_dev
); j
++) {
1633 if (!(oct_dev
->io_qmask
.iq
& BIT_ULL(j
)))
1635 /*packets to network port*/
1636 /*# of packets tx to network */
1637 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_done
);
1638 /*# of bytes tx to network */
1640 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_tot_bytes
);
1641 /*# of packets dropped */
1643 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_dropped
);
1644 /*# of tx fails due to queue full */
1646 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_iq_busy
);
1647 /*XXX gather entries sent */
1649 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.sgentry_sent
);
1651 /*instruction to firmware: data and control */
1652 /*# of instructions to the queue */
1654 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.instr_posted
);
1655 /*# of instructions processed */
1656 data
[i
++] = CVM_CAST64(
1657 oct_dev
->instr_queue
[j
]->stats
.instr_processed
);
1658 /*# of instructions could not be processed */
1659 data
[i
++] = CVM_CAST64(
1660 oct_dev
->instr_queue
[j
]->stats
.instr_dropped
);
1661 /*bytes sent through the queue */
1663 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.bytes_sent
);
1666 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_gso
);
1668 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_vxlan
);
1671 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_restart
);
1675 for (j
= 0; j
< MAX_OCTEON_OUTPUT_QUEUES(oct_dev
); j
++) {
1676 if (!(oct_dev
->io_qmask
.oq
& BIT_ULL(j
)))
1679 /*packets send to TCP/IP network stack */
1680 /*# of packets to network stack */
1682 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_pkts_received
);
1683 /*# of bytes to network stack */
1685 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_bytes_received
);
1686 /*# of packets dropped */
1687 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_nomem
+
1688 oct_dev
->droq
[j
]->stats
.dropped_toomany
+
1689 oct_dev
->droq
[j
]->stats
.rx_dropped
);
1691 CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_nomem
);
1693 CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_toomany
);
1695 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_dropped
);
1697 /*control and data path*/
1699 CVM_CAST64(oct_dev
->droq
[j
]->stats
.pkts_received
);
1701 CVM_CAST64(oct_dev
->droq
[j
]->stats
.bytes_received
);
1703 CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_nodispatch
);
1706 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_vxlan
);
1708 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_alloc_failure
);
1712 static void lio_vf_get_ethtool_stats(struct net_device
*netdev
,
1713 struct ethtool_stats
*stats
1714 __attribute__((unused
)),
1717 struct rtnl_link_stats64 lstats
;
1718 struct lio
*lio
= GET_LIO(netdev
);
1719 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1722 if (ifstate_check(lio
, LIO_IFSTATE_RESETTING
))
1725 netdev
->netdev_ops
->ndo_get_stats64(netdev
, &lstats
);
1726 /* sum of oct->droq[oq_no]->stats->rx_pkts_received */
1727 data
[i
++] = lstats
.rx_packets
;
1728 /* sum of oct->instr_queue[iq_no]->stats.tx_done */
1729 data
[i
++] = lstats
.tx_packets
;
1730 /* sum of oct->droq[oq_no]->stats->rx_bytes_received */
1731 data
[i
++] = lstats
.rx_bytes
;
1732 /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1733 data
[i
++] = lstats
.tx_bytes
;
1734 data
[i
++] = lstats
.rx_errors
;
1735 data
[i
++] = lstats
.tx_errors
;
1736 /* sum of oct->droq[oq_no]->stats->rx_dropped +
1737 * oct->droq[oq_no]->stats->dropped_nodispatch +
1738 * oct->droq[oq_no]->stats->dropped_toomany +
1739 * oct->droq[oq_no]->stats->dropped_nomem
1741 data
[i
++] = lstats
.rx_dropped
;
1742 /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1743 data
[i
++] = lstats
.tx_dropped
+
1744 oct_dev
->link_stats
.fromhost
.fw_err_drop
;
1746 data
[i
++] = oct_dev
->link_stats
.fromwire
.fw_total_mcast
;
1747 data
[i
++] = oct_dev
->link_stats
.fromhost
.fw_total_mcast_sent
;
1748 data
[i
++] = oct_dev
->link_stats
.fromwire
.fw_total_bcast
;
1749 data
[i
++] = oct_dev
->link_stats
.fromhost
.fw_total_bcast_sent
;
1751 /* lio->link_changes */
1752 data
[i
++] = CVM_CAST64(lio
->link_changes
);
1754 for (vj
= 0; vj
< oct_dev
->num_iqs
; vj
++) {
1755 j
= lio
->linfo
.txpciq
[vj
].s
.q_no
;
1757 /* packets to network port */
1758 /* # of packets tx to network */
1759 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_done
);
1760 /* # of bytes tx to network */
1761 data
[i
++] = CVM_CAST64(
1762 oct_dev
->instr_queue
[j
]->stats
.tx_tot_bytes
);
1763 /* # of packets dropped */
1764 data
[i
++] = CVM_CAST64(
1765 oct_dev
->instr_queue
[j
]->stats
.tx_dropped
);
1766 /* # of tx fails due to queue full */
1767 data
[i
++] = CVM_CAST64(
1768 oct_dev
->instr_queue
[j
]->stats
.tx_iq_busy
);
1769 /* XXX gather entries sent */
1770 data
[i
++] = CVM_CAST64(
1771 oct_dev
->instr_queue
[j
]->stats
.sgentry_sent
);
1773 /* instruction to firmware: data and control */
1774 /* # of instructions to the queue */
1775 data
[i
++] = CVM_CAST64(
1776 oct_dev
->instr_queue
[j
]->stats
.instr_posted
);
1777 /* # of instructions processed */
1779 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.instr_processed
);
1780 /* # of instructions could not be processed */
1782 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.instr_dropped
);
1783 /* bytes sent through the queue */
1784 data
[i
++] = CVM_CAST64(
1785 oct_dev
->instr_queue
[j
]->stats
.bytes_sent
);
1787 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_gso
);
1789 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_vxlan
);
1791 data
[i
++] = CVM_CAST64(
1792 oct_dev
->instr_queue
[j
]->stats
.tx_restart
);
1796 for (vj
= 0; vj
< oct_dev
->num_oqs
; vj
++) {
1797 j
= lio
->linfo
.rxpciq
[vj
].s
.q_no
;
1799 /* packets send to TCP/IP network stack */
1800 /* # of packets to network stack */
1801 data
[i
++] = CVM_CAST64(
1802 oct_dev
->droq
[j
]->stats
.rx_pkts_received
);
1803 /* # of bytes to network stack */
1804 data
[i
++] = CVM_CAST64(
1805 oct_dev
->droq
[j
]->stats
.rx_bytes_received
);
1806 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_nomem
+
1807 oct_dev
->droq
[j
]->stats
.dropped_toomany
+
1808 oct_dev
->droq
[j
]->stats
.rx_dropped
);
1809 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_nomem
);
1810 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_toomany
);
1811 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_dropped
);
1813 /* control and data path */
1814 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.pkts_received
);
1815 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.bytes_received
);
1817 CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_nodispatch
);
1819 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_vxlan
);
1821 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_alloc_failure
);
1825 static void lio_get_priv_flags_strings(struct lio
*lio
, u8
*data
)
1827 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1830 switch (oct_dev
->chip_id
) {
1831 case OCTEON_CN23XX_PF_VID
:
1832 case OCTEON_CN23XX_VF_VID
:
1833 for (i
= 0; i
< ARRAY_SIZE(oct_priv_flags_strings
); i
++) {
1834 sprintf(data
, "%s", oct_priv_flags_strings
[i
]);
1835 data
+= ETH_GSTRING_LEN
;
1842 netif_info(lio
, drv
, lio
->netdev
, "Unknown Chip !!\n");
1847 static void lio_get_strings(struct net_device
*netdev
, u32 stringset
, u8
*data
)
1849 struct lio
*lio
= GET_LIO(netdev
);
1850 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1851 int num_iq_stats
, num_oq_stats
, i
, j
;
1854 switch (stringset
) {
1856 num_stats
= ARRAY_SIZE(oct_stats_strings
);
1857 for (j
= 0; j
< num_stats
; j
++) {
1858 sprintf(data
, "%s", oct_stats_strings
[j
]);
1859 data
+= ETH_GSTRING_LEN
;
1862 num_iq_stats
= ARRAY_SIZE(oct_iq_stats_strings
);
1863 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct_dev
); i
++) {
1864 if (!(oct_dev
->io_qmask
.iq
& BIT_ULL(i
)))
1866 for (j
= 0; j
< num_iq_stats
; j
++) {
1867 sprintf(data
, "tx-%d-%s", i
,
1868 oct_iq_stats_strings
[j
]);
1869 data
+= ETH_GSTRING_LEN
;
1873 num_oq_stats
= ARRAY_SIZE(oct_droq_stats_strings
);
1874 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct_dev
); i
++) {
1875 if (!(oct_dev
->io_qmask
.oq
& BIT_ULL(i
)))
1877 for (j
= 0; j
< num_oq_stats
; j
++) {
1878 sprintf(data
, "rx-%d-%s", i
,
1879 oct_droq_stats_strings
[j
]);
1880 data
+= ETH_GSTRING_LEN
;
1885 case ETH_SS_PRIV_FLAGS
:
1886 lio_get_priv_flags_strings(lio
, data
);
1889 netif_info(lio
, drv
, lio
->netdev
, "Unknown Stringset !!\n");
1894 static void lio_vf_get_strings(struct net_device
*netdev
, u32 stringset
,
1897 int num_iq_stats
, num_oq_stats
, i
, j
;
1898 struct lio
*lio
= GET_LIO(netdev
);
1899 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1902 switch (stringset
) {
1904 num_stats
= ARRAY_SIZE(oct_vf_stats_strings
);
1905 for (j
= 0; j
< num_stats
; j
++) {
1906 sprintf(data
, "%s", oct_vf_stats_strings
[j
]);
1907 data
+= ETH_GSTRING_LEN
;
1910 num_iq_stats
= ARRAY_SIZE(oct_iq_stats_strings
);
1911 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct_dev
); i
++) {
1912 if (!(oct_dev
->io_qmask
.iq
& BIT_ULL(i
)))
1914 for (j
= 0; j
< num_iq_stats
; j
++) {
1915 sprintf(data
, "tx-%d-%s", i
,
1916 oct_iq_stats_strings
[j
]);
1917 data
+= ETH_GSTRING_LEN
;
1921 num_oq_stats
= ARRAY_SIZE(oct_droq_stats_strings
);
1922 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct_dev
); i
++) {
1923 if (!(oct_dev
->io_qmask
.oq
& BIT_ULL(i
)))
1925 for (j
= 0; j
< num_oq_stats
; j
++) {
1926 sprintf(data
, "rx-%d-%s", i
,
1927 oct_droq_stats_strings
[j
]);
1928 data
+= ETH_GSTRING_LEN
;
1933 case ETH_SS_PRIV_FLAGS
:
1934 lio_get_priv_flags_strings(lio
, data
);
1937 netif_info(lio
, drv
, lio
->netdev
, "Unknown Stringset !!\n");
1942 static int lio_get_priv_flags_ss_count(struct lio
*lio
)
1944 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1946 switch (oct_dev
->chip_id
) {
1947 case OCTEON_CN23XX_PF_VID
:
1948 case OCTEON_CN23XX_VF_VID
:
1949 return ARRAY_SIZE(oct_priv_flags_strings
);
1954 netif_info(lio
, drv
, lio
->netdev
, "Unknown Chip !!\n");
1959 static int lio_get_sset_count(struct net_device
*netdev
, int sset
)
1961 struct lio
*lio
= GET_LIO(netdev
);
1962 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1966 return (ARRAY_SIZE(oct_stats_strings
) +
1967 ARRAY_SIZE(oct_iq_stats_strings
) * oct_dev
->num_iqs
+
1968 ARRAY_SIZE(oct_droq_stats_strings
) * oct_dev
->num_oqs
);
1969 case ETH_SS_PRIV_FLAGS
:
1970 return lio_get_priv_flags_ss_count(lio
);
1976 static int lio_vf_get_sset_count(struct net_device
*netdev
, int sset
)
1978 struct lio
*lio
= GET_LIO(netdev
);
1979 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1983 return (ARRAY_SIZE(oct_vf_stats_strings
) +
1984 ARRAY_SIZE(oct_iq_stats_strings
) * oct_dev
->num_iqs
+
1985 ARRAY_SIZE(oct_droq_stats_strings
) * oct_dev
->num_oqs
);
1986 case ETH_SS_PRIV_FLAGS
:
1987 return lio_get_priv_flags_ss_count(lio
);
1993 /* get interrupt moderation parameters */
1994 static int octnet_get_intrmod_cfg(struct lio
*lio
,
1995 struct oct_intrmod_cfg
*intr_cfg
)
1997 struct octeon_soft_command
*sc
;
1998 struct oct_intrmod_resp
*resp
;
2000 struct octeon_device
*oct_dev
= lio
->oct_dev
;
2002 /* Alloc soft command */
2003 sc
= (struct octeon_soft_command
*)
2004 octeon_alloc_soft_command(oct_dev
,
2006 sizeof(struct oct_intrmod_resp
), 0);
2011 resp
= (struct oct_intrmod_resp
*)sc
->virtrptr
;
2012 memset(resp
, 0, sizeof(struct oct_intrmod_resp
));
2014 sc
->iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2016 octeon_prepare_soft_command(oct_dev
, sc
, OPCODE_NIC
,
2017 OPCODE_NIC_INTRMOD_PARAMS
, 0, 0, 0);
2019 init_completion(&sc
->complete
);
2020 sc
->sc_status
= OCTEON_REQUEST_PENDING
;
2022 retval
= octeon_send_soft_command(oct_dev
, sc
);
2023 if (retval
== IQ_SEND_FAILED
) {
2024 octeon_free_soft_command(oct_dev
, sc
);
2028 /* Sleep on a wait queue till the cond flag indicates that the
2029 * response arrived or timed-out.
2031 retval
= wait_for_sc_completion_timeout(oct_dev
, sc
, 0);
2036 dev_err(&oct_dev
->pci_dev
->dev
,
2037 "Get interrupt moderation parameters failed\n");
2038 WRITE_ONCE(sc
->caller_is_done
, true);
2042 octeon_swap_8B_data((u64
*)&resp
->intrmod
,
2043 (sizeof(struct oct_intrmod_cfg
)) / 8);
2044 memcpy(intr_cfg
, &resp
->intrmod
, sizeof(struct oct_intrmod_cfg
));
2045 WRITE_ONCE(sc
->caller_is_done
, true);
2050 /* Configure interrupt moderation parameters */
2051 static int octnet_set_intrmod_cfg(struct lio
*lio
,
2052 struct oct_intrmod_cfg
*intr_cfg
)
2054 struct octeon_soft_command
*sc
;
2055 struct oct_intrmod_cfg
*cfg
;
2057 struct octeon_device
*oct_dev
= lio
->oct_dev
;
2059 /* Alloc soft command */
2060 sc
= (struct octeon_soft_command
*)
2061 octeon_alloc_soft_command(oct_dev
,
2062 sizeof(struct oct_intrmod_cfg
),
2068 cfg
= (struct oct_intrmod_cfg
*)sc
->virtdptr
;
2070 memcpy(cfg
, intr_cfg
, sizeof(struct oct_intrmod_cfg
));
2071 octeon_swap_8B_data((u64
*)cfg
, (sizeof(struct oct_intrmod_cfg
)) / 8);
2073 sc
->iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2075 octeon_prepare_soft_command(oct_dev
, sc
, OPCODE_NIC
,
2076 OPCODE_NIC_INTRMOD_CFG
, 0, 0, 0);
2078 init_completion(&sc
->complete
);
2079 sc
->sc_status
= OCTEON_REQUEST_PENDING
;
2081 retval
= octeon_send_soft_command(oct_dev
, sc
);
2082 if (retval
== IQ_SEND_FAILED
) {
2083 octeon_free_soft_command(oct_dev
, sc
);
2087 /* Sleep on a wait queue till the cond flag indicates that the
2088 * response arrived or timed-out.
2090 retval
= wait_for_sc_completion_timeout(oct_dev
, sc
, 0);
2094 retval
= sc
->sc_status
;
2096 dev_info(&oct_dev
->pci_dev
->dev
,
2097 "Rx-Adaptive Interrupt moderation %s\n",
2098 (intr_cfg
->rx_enable
) ?
2099 "enabled" : "disabled");
2100 WRITE_ONCE(sc
->caller_is_done
, true);
2104 dev_err(&oct_dev
->pci_dev
->dev
,
2105 "intrmod config failed. Status: %x\n", retval
);
2106 WRITE_ONCE(sc
->caller_is_done
, true);
2110 static int lio_get_intr_coalesce(struct net_device
*netdev
,
2111 struct ethtool_coalesce
*intr_coal
)
2113 struct lio
*lio
= GET_LIO(netdev
);
2114 struct octeon_device
*oct
= lio
->oct_dev
;
2115 struct octeon_instr_queue
*iq
;
2116 struct oct_intrmod_cfg intrmod_cfg
;
2118 if (octnet_get_intrmod_cfg(lio
, &intrmod_cfg
))
2121 switch (oct
->chip_id
) {
2122 case OCTEON_CN23XX_PF_VID
:
2123 case OCTEON_CN23XX_VF_VID
: {
2124 if (!intrmod_cfg
.rx_enable
) {
2125 intr_coal
->rx_coalesce_usecs
= oct
->rx_coalesce_usecs
;
2126 intr_coal
->rx_max_coalesced_frames
=
2127 oct
->rx_max_coalesced_frames
;
2129 if (!intrmod_cfg
.tx_enable
)
2130 intr_coal
->tx_max_coalesced_frames
=
2131 oct
->tx_max_coalesced_frames
;
2135 case OCTEON_CN66XX
: {
2136 struct octeon_cn6xxx
*cn6xxx
=
2137 (struct octeon_cn6xxx
*)oct
->chip
;
2139 if (!intrmod_cfg
.rx_enable
) {
2140 intr_coal
->rx_coalesce_usecs
=
2141 CFG_GET_OQ_INTR_TIME(cn6xxx
->conf
);
2142 intr_coal
->rx_max_coalesced_frames
=
2143 CFG_GET_OQ_INTR_PKT(cn6xxx
->conf
);
2145 iq
= oct
->instr_queue
[lio
->linfo
.txpciq
[0].s
.q_no
];
2146 intr_coal
->tx_max_coalesced_frames
= iq
->fill_threshold
;
2150 netif_info(lio
, drv
, lio
->netdev
, "Unknown Chip !!\n");
2153 if (intrmod_cfg
.rx_enable
) {
2154 intr_coal
->use_adaptive_rx_coalesce
=
2155 intrmod_cfg
.rx_enable
;
2156 intr_coal
->rate_sample_interval
=
2157 intrmod_cfg
.check_intrvl
;
2158 intr_coal
->pkt_rate_high
=
2159 intrmod_cfg
.maxpkt_ratethr
;
2160 intr_coal
->pkt_rate_low
=
2161 intrmod_cfg
.minpkt_ratethr
;
2162 intr_coal
->rx_max_coalesced_frames_high
=
2163 intrmod_cfg
.rx_maxcnt_trigger
;
2164 intr_coal
->rx_coalesce_usecs_high
=
2165 intrmod_cfg
.rx_maxtmr_trigger
;
2166 intr_coal
->rx_coalesce_usecs_low
=
2167 intrmod_cfg
.rx_mintmr_trigger
;
2168 intr_coal
->rx_max_coalesced_frames_low
=
2169 intrmod_cfg
.rx_mincnt_trigger
;
2171 if ((OCTEON_CN23XX_PF(oct
) || OCTEON_CN23XX_VF(oct
)) &&
2172 (intrmod_cfg
.tx_enable
)) {
2173 intr_coal
->use_adaptive_tx_coalesce
=
2174 intrmod_cfg
.tx_enable
;
2175 intr_coal
->tx_max_coalesced_frames_high
=
2176 intrmod_cfg
.tx_maxcnt_trigger
;
2177 intr_coal
->tx_max_coalesced_frames_low
=
2178 intrmod_cfg
.tx_mincnt_trigger
;
2183 /* Enable/Disable auto interrupt Moderation */
2184 static int oct_cfg_adaptive_intr(struct lio
*lio
,
2185 struct oct_intrmod_cfg
*intrmod_cfg
,
2186 struct ethtool_coalesce
*intr_coal
)
2190 if (intrmod_cfg
->rx_enable
|| intrmod_cfg
->tx_enable
) {
2191 intrmod_cfg
->check_intrvl
= intr_coal
->rate_sample_interval
;
2192 intrmod_cfg
->maxpkt_ratethr
= intr_coal
->pkt_rate_high
;
2193 intrmod_cfg
->minpkt_ratethr
= intr_coal
->pkt_rate_low
;
2195 if (intrmod_cfg
->rx_enable
) {
2196 intrmod_cfg
->rx_maxcnt_trigger
=
2197 intr_coal
->rx_max_coalesced_frames_high
;
2198 intrmod_cfg
->rx_maxtmr_trigger
=
2199 intr_coal
->rx_coalesce_usecs_high
;
2200 intrmod_cfg
->rx_mintmr_trigger
=
2201 intr_coal
->rx_coalesce_usecs_low
;
2202 intrmod_cfg
->rx_mincnt_trigger
=
2203 intr_coal
->rx_max_coalesced_frames_low
;
2205 if (intrmod_cfg
->tx_enable
) {
2206 intrmod_cfg
->tx_maxcnt_trigger
=
2207 intr_coal
->tx_max_coalesced_frames_high
;
2208 intrmod_cfg
->tx_mincnt_trigger
=
2209 intr_coal
->tx_max_coalesced_frames_low
;
2212 ret
= octnet_set_intrmod_cfg(lio
, intrmod_cfg
);
2218 oct_cfg_rx_intrcnt(struct lio
*lio
,
2219 struct oct_intrmod_cfg
*intrmod
,
2220 struct ethtool_coalesce
*intr_coal
)
2222 struct octeon_device
*oct
= lio
->oct_dev
;
2223 u32 rx_max_coalesced_frames
;
2225 /* Config Cnt based interrupt values */
2226 switch (oct
->chip_id
) {
2228 case OCTEON_CN66XX
: {
2229 struct octeon_cn6xxx
*cn6xxx
=
2230 (struct octeon_cn6xxx
*)oct
->chip
;
2232 if (!intr_coal
->rx_max_coalesced_frames
)
2233 rx_max_coalesced_frames
= CN6XXX_OQ_INTR_PKT
;
2235 rx_max_coalesced_frames
=
2236 intr_coal
->rx_max_coalesced_frames
;
2237 octeon_write_csr(oct
, CN6XXX_SLI_OQ_INT_LEVEL_PKTS
,
2238 rx_max_coalesced_frames
);
2239 CFG_SET_OQ_INTR_PKT(cn6xxx
->conf
, rx_max_coalesced_frames
);
2242 case OCTEON_CN23XX_PF_VID
: {
2245 if (!intr_coal
->rx_max_coalesced_frames
)
2246 rx_max_coalesced_frames
= intrmod
->rx_frames
;
2248 rx_max_coalesced_frames
=
2249 intr_coal
->rx_max_coalesced_frames
;
2250 for (q_no
= 0; q_no
< oct
->num_oqs
; q_no
++) {
2251 q_no
+= oct
->sriov_info
.pf_srn
;
2253 oct
, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no
),
2255 oct
, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no
)) &
2256 (0x3fffff00000000UL
)) |
2257 (rx_max_coalesced_frames
- 1));
2258 /*consider setting resend bit*/
2260 intrmod
->rx_frames
= rx_max_coalesced_frames
;
2261 oct
->rx_max_coalesced_frames
= rx_max_coalesced_frames
;
2264 case OCTEON_CN23XX_VF_VID
: {
2267 if (!intr_coal
->rx_max_coalesced_frames
)
2268 rx_max_coalesced_frames
= intrmod
->rx_frames
;
2270 rx_max_coalesced_frames
=
2271 intr_coal
->rx_max_coalesced_frames
;
2272 for (q_no
= 0; q_no
< oct
->num_oqs
; q_no
++) {
2274 oct
, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no
),
2276 oct
, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no
)) &
2277 (0x3fffff00000000UL
)) |
2278 (rx_max_coalesced_frames
- 1));
2279 /*consider writing to resend bit here*/
2281 intrmod
->rx_frames
= rx_max_coalesced_frames
;
2282 oct
->rx_max_coalesced_frames
= rx_max_coalesced_frames
;
2291 static int oct_cfg_rx_intrtime(struct lio
*lio
,
2292 struct oct_intrmod_cfg
*intrmod
,
2293 struct ethtool_coalesce
*intr_coal
)
2295 struct octeon_device
*oct
= lio
->oct_dev
;
2296 u32 time_threshold
, rx_coalesce_usecs
;
2298 /* Config Time based interrupt values */
2299 switch (oct
->chip_id
) {
2301 case OCTEON_CN66XX
: {
2302 struct octeon_cn6xxx
*cn6xxx
=
2303 (struct octeon_cn6xxx
*)oct
->chip
;
2304 if (!intr_coal
->rx_coalesce_usecs
)
2305 rx_coalesce_usecs
= CN6XXX_OQ_INTR_TIME
;
2307 rx_coalesce_usecs
= intr_coal
->rx_coalesce_usecs
;
2309 time_threshold
= lio_cn6xxx_get_oq_ticks(oct
,
2311 octeon_write_csr(oct
,
2312 CN6XXX_SLI_OQ_INT_LEVEL_TIME
,
2315 CFG_SET_OQ_INTR_TIME(cn6xxx
->conf
, rx_coalesce_usecs
);
2318 case OCTEON_CN23XX_PF_VID
: {
2322 if (!intr_coal
->rx_coalesce_usecs
)
2323 rx_coalesce_usecs
= intrmod
->rx_usecs
;
2325 rx_coalesce_usecs
= intr_coal
->rx_coalesce_usecs
;
2327 cn23xx_pf_get_oq_ticks(oct
, (u32
)rx_coalesce_usecs
);
2328 for (q_no
= 0; q_no
< oct
->num_oqs
; q_no
++) {
2329 q_no
+= oct
->sriov_info
.pf_srn
;
2330 octeon_write_csr64(oct
,
2331 CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no
),
2332 (intrmod
->rx_frames
|
2333 ((u64
)time_threshold
<< 32)));
2334 /*consider writing to resend bit here*/
2336 intrmod
->rx_usecs
= rx_coalesce_usecs
;
2337 oct
->rx_coalesce_usecs
= rx_coalesce_usecs
;
2340 case OCTEON_CN23XX_VF_VID
: {
2344 if (!intr_coal
->rx_coalesce_usecs
)
2345 rx_coalesce_usecs
= intrmod
->rx_usecs
;
2347 rx_coalesce_usecs
= intr_coal
->rx_coalesce_usecs
;
2350 cn23xx_vf_get_oq_ticks(oct
, (u32
)rx_coalesce_usecs
);
2351 for (q_no
= 0; q_no
< oct
->num_oqs
; q_no
++) {
2353 oct
, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no
),
2354 (intrmod
->rx_frames
|
2355 ((u64
)time_threshold
<< 32)));
2356 /*consider setting resend bit*/
2358 intrmod
->rx_usecs
= rx_coalesce_usecs
;
2359 oct
->rx_coalesce_usecs
= rx_coalesce_usecs
;
2370 oct_cfg_tx_intrcnt(struct lio
*lio
,
2371 struct oct_intrmod_cfg
*intrmod
,
2372 struct ethtool_coalesce
*intr_coal
)
2374 struct octeon_device
*oct
= lio
->oct_dev
;
2376 void __iomem
*inst_cnt_reg
;
2379 /* Config Cnt based interrupt values */
2380 switch (oct
->chip_id
) {
2384 case OCTEON_CN23XX_VF_VID
:
2385 case OCTEON_CN23XX_PF_VID
: {
2388 if (!intr_coal
->tx_max_coalesced_frames
)
2389 iq_intr_pkt
= CN23XX_DEF_IQ_INTR_THRESHOLD
&
2390 CN23XX_PKT_IN_DONE_WMARK_MASK
;
2392 iq_intr_pkt
= intr_coal
->tx_max_coalesced_frames
&
2393 CN23XX_PKT_IN_DONE_WMARK_MASK
;
2394 for (q_no
= 0; q_no
< oct
->num_iqs
; q_no
++) {
2395 inst_cnt_reg
= (oct
->instr_queue
[q_no
])->inst_cnt_reg
;
2396 val
= readq(inst_cnt_reg
);
2397 /*clear wmark and count.dont want to write count back*/
2398 val
= (val
& 0xFFFF000000000000ULL
) |
2399 ((u64
)(iq_intr_pkt
- 1)
2400 << CN23XX_PKT_IN_DONE_WMARK_BIT_POS
);
2401 writeq(val
, inst_cnt_reg
);
2402 /*consider setting resend bit*/
2404 intrmod
->tx_frames
= iq_intr_pkt
;
2405 oct
->tx_max_coalesced_frames
= iq_intr_pkt
;
2414 static int lio_set_intr_coalesce(struct net_device
*netdev
,
2415 struct ethtool_coalesce
*intr_coal
)
2417 struct lio
*lio
= GET_LIO(netdev
);
2419 struct octeon_device
*oct
= lio
->oct_dev
;
2420 struct oct_intrmod_cfg intrmod
= {0};
2424 switch (oct
->chip_id
) {
2427 db_min
= CN6XXX_DB_MIN
;
2428 db_max
= CN6XXX_DB_MAX
;
2429 if ((intr_coal
->tx_max_coalesced_frames
>= db_min
) &&
2430 (intr_coal
->tx_max_coalesced_frames
<= db_max
)) {
2431 for (j
= 0; j
< lio
->linfo
.num_txpciq
; j
++) {
2432 q_no
= lio
->linfo
.txpciq
[j
].s
.q_no
;
2433 oct
->instr_queue
[q_no
]->fill_threshold
=
2434 intr_coal
->tx_max_coalesced_frames
;
2437 dev_err(&oct
->pci_dev
->dev
,
2438 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
2439 intr_coal
->tx_max_coalesced_frames
,
2444 case OCTEON_CN23XX_PF_VID
:
2445 case OCTEON_CN23XX_VF_VID
:
2451 intrmod
.rx_enable
= intr_coal
->use_adaptive_rx_coalesce
? 1 : 0;
2452 intrmod
.tx_enable
= intr_coal
->use_adaptive_tx_coalesce
? 1 : 0;
2453 intrmod
.rx_frames
= CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct
));
2454 intrmod
.rx_usecs
= CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct
));
2455 intrmod
.tx_frames
= CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct
));
2457 ret
= oct_cfg_adaptive_intr(lio
, &intrmod
, intr_coal
);
2459 if (!intr_coal
->use_adaptive_rx_coalesce
) {
2460 ret
= oct_cfg_rx_intrtime(lio
, &intrmod
, intr_coal
);
2464 ret
= oct_cfg_rx_intrcnt(lio
, &intrmod
, intr_coal
);
2468 oct
->rx_coalesce_usecs
=
2469 CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct
));
2470 oct
->rx_max_coalesced_frames
=
2471 CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct
));
2474 if (!intr_coal
->use_adaptive_tx_coalesce
) {
2475 ret
= oct_cfg_tx_intrcnt(lio
, &intrmod
, intr_coal
);
2479 oct
->tx_max_coalesced_frames
=
2480 CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct
));
2488 static int lio_get_ts_info(struct net_device
*netdev
,
2489 struct ethtool_ts_info
*info
)
2491 struct lio
*lio
= GET_LIO(netdev
);
2493 info
->so_timestamping
=
2494 #ifdef PTP_HARDWARE_TIMESTAMPING
2495 SOF_TIMESTAMPING_TX_HARDWARE
|
2496 SOF_TIMESTAMPING_RX_HARDWARE
|
2497 SOF_TIMESTAMPING_RAW_HARDWARE
|
2498 SOF_TIMESTAMPING_TX_SOFTWARE
|
2500 SOF_TIMESTAMPING_RX_SOFTWARE
|
2501 SOF_TIMESTAMPING_SOFTWARE
;
2504 info
->phc_index
= ptp_clock_index(lio
->ptp_clock
);
2506 info
->phc_index
= -1;
2508 #ifdef PTP_HARDWARE_TIMESTAMPING
2509 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
2511 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
2512 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
2513 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
2514 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
2520 /* Return register dump len. */
2521 static int lio_get_regs_len(struct net_device
*dev
)
2523 struct lio
*lio
= GET_LIO(dev
);
2524 struct octeon_device
*oct
= lio
->oct_dev
;
2526 switch (oct
->chip_id
) {
2527 case OCTEON_CN23XX_PF_VID
:
2528 return OCT_ETHTOOL_REGDUMP_LEN_23XX
;
2529 case OCTEON_CN23XX_VF_VID
:
2530 return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF
;
2532 return OCT_ETHTOOL_REGDUMP_LEN
;
2536 static int cn23xx_read_csr_reg(char *s
, struct octeon_device
*oct
)
2539 u8 pf_num
= oct
->pf_num
;
2543 /* PCI Window Registers */
2545 len
+= sprintf(s
+ len
, "\n\t Octeon CSR Registers\n\n");
2547 /*0x29030 or 0x29040*/
2548 reg
= CN23XX_SLI_PKT_MAC_RINFO64(oct
->pcie_port
, oct
->pf_num
);
2549 len
+= sprintf(s
+ len
,
2550 "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
2551 reg
, oct
->pcie_port
, oct
->pf_num
,
2552 (u64
)octeon_read_csr64(oct
, reg
));
2554 /*0x27080 or 0x27090*/
2555 reg
= CN23XX_SLI_MAC_PF_INT_ENB64(oct
->pcie_port
, oct
->pf_num
);
2557 sprintf(s
+ len
, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
2558 reg
, oct
->pcie_port
, oct
->pf_num
,
2559 (u64
)octeon_read_csr64(oct
, reg
));
2561 /*0x27000 or 0x27010*/
2562 reg
= CN23XX_SLI_MAC_PF_INT_SUM64(oct
->pcie_port
, oct
->pf_num
);
2564 sprintf(s
+ len
, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
2565 reg
, oct
->pcie_port
, oct
->pf_num
,
2566 (u64
)octeon_read_csr64(oct
, reg
));
2570 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg
,
2571 (u64
)octeon_read_csr64(oct
, reg
));
2574 reg
= 0x27300 + oct
->pcie_port
* CN23XX_MAC_INT_OFFSET
+
2575 (oct
->pf_num
) * CN23XX_PF_INT_OFFSET
;
2577 s
+ len
, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg
,
2578 oct
->pcie_port
, oct
->pf_num
, (u64
)octeon_read_csr64(oct
, reg
));
2581 reg
= 0x27200 + oct
->pcie_port
* CN23XX_MAC_INT_OFFSET
+
2582 (oct
->pf_num
) * CN23XX_PF_INT_OFFSET
;
2583 len
+= sprintf(s
+ len
,
2584 "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
2585 reg
, oct
->pcie_port
, oct
->pf_num
,
2586 (u64
)octeon_read_csr64(oct
, reg
));
2589 reg
= CN23XX_SLI_PKT_CNT_INT
;
2590 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg
,
2591 (u64
)octeon_read_csr64(oct
, reg
));
2594 reg
= CN23XX_SLI_PKT_TIME_INT
;
2595 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg
,
2596 (u64
)octeon_read_csr64(oct
, reg
));
2600 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg
,
2601 (u64
)octeon_read_csr64(oct
, reg
));
2604 reg
= CN23XX_SLI_OQ_WMARK
;
2605 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
2606 reg
, (u64
)octeon_read_csr64(oct
, reg
));
2609 reg
= CN23XX_SLI_PKT_IOQ_RING_RST
;
2610 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg
,
2611 (u64
)octeon_read_csr64(oct
, reg
));
2614 reg
= CN23XX_SLI_GBL_CONTROL
;
2615 len
+= sprintf(s
+ len
,
2616 "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg
,
2617 (u64
)octeon_read_csr64(oct
, reg
));
2621 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
2622 reg
, (u64
)octeon_read_csr64(oct
, reg
));
2627 reg
= CN23XX_SLI_OUT_BP_EN_W1S
;
2628 len
+= sprintf(s
+ len
,
2629 "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n",
2630 reg
, (u64
)octeon_read_csr64(oct
, reg
));
2631 } else if (pf_num
== 1) {
2633 reg
= CN23XX_SLI_OUT_BP_EN2_W1S
;
2634 len
+= sprintf(s
+ len
,
2635 "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
2636 reg
, (u64
)octeon_read_csr64(oct
, reg
));
2639 for (i
= 0; i
< CN23XX_MAX_OUTPUT_QUEUES
; i
++) {
2640 reg
= CN23XX_SLI_OQ_BUFF_INFO_SIZE(i
);
2642 sprintf(s
+ len
, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2643 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2647 for (i
= 0; i
< CN23XX_MAX_INPUT_QUEUES
; i
++) {
2648 reg
= CN23XX_SLI_IQ_INSTR_COUNT64(i
);
2649 len
+= sprintf(s
+ len
,
2650 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2651 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2655 for (i
= 0; i
< CN23XX_MAX_OUTPUT_QUEUES
; i
++) {
2656 reg
= CN23XX_SLI_OQ_PKTS_CREDIT(i
);
2657 len
+= sprintf(s
+ len
,
2658 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2659 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2663 for (i
= 0; i
< CN23XX_MAX_OUTPUT_QUEUES
; i
++) {
2664 reg
= CN23XX_SLI_OQ_SIZE(i
);
2666 s
+ len
, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2667 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2671 for (i
= 0; i
< CN23XX_MAX_OUTPUT_QUEUES
; i
++) {
2672 reg
= CN23XX_SLI_OQ_PKT_CONTROL(i
);
2675 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2676 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2680 for (i
= 0; i
< CN23XX_MAX_OUTPUT_QUEUES
; i
++) {
2681 reg
= CN23XX_SLI_OQ_BASE_ADDR64(i
);
2682 len
+= sprintf(s
+ len
,
2683 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2684 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2688 for (i
= 0; i
< CN23XX_MAX_OUTPUT_QUEUES
; i
++) {
2689 reg
= CN23XX_SLI_OQ_PKT_INT_LEVELS(i
);
2690 len
+= sprintf(s
+ len
,
2691 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2692 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2696 for (i
= 0; i
< CN23XX_MAX_OUTPUT_QUEUES
; i
++) {
2697 reg
= CN23XX_SLI_OQ_PKTS_SENT(i
);
2698 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2699 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2703 for (i
= 0; i
< CN23XX_MAX_OUTPUT_QUEUES
; i
++) {
2704 reg
= 0x100c0 + i
* CN23XX_OQ_OFFSET
;
2705 len
+= sprintf(s
+ len
,
2706 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2707 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2710 for (i
= 0; i
< CN23XX_MAX_INPUT_QUEUES
; i
++) {
2711 reg
= CN23XX_SLI_IQ_PKT_CONTROL64(i
);
2714 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2715 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2719 for (i
= 0; i
< CN23XX_MAX_INPUT_QUEUES
; i
++) {
2720 reg
= CN23XX_SLI_IQ_BASE_ADDR64(i
);
2723 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg
,
2724 i
, (u64
)octeon_read_csr64(oct
, reg
));
2728 for (i
= 0; i
< CN23XX_MAX_INPUT_QUEUES
; i
++) {
2729 reg
= CN23XX_SLI_IQ_DOORBELL(i
);
2732 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2733 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2737 for (i
= 0; i
< CN23XX_MAX_INPUT_QUEUES
; i
++) {
2738 reg
= CN23XX_SLI_IQ_SIZE(i
);
2741 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2742 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2746 for (i
= 0; i
< CN23XX_MAX_INPUT_QUEUES
; i
++)
2747 reg
= CN23XX_SLI_IQ_INSTR_COUNT64(i
);
2748 len
+= sprintf(s
+ len
,
2749 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2750 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2756 static int cn23xx_vf_read_csr_reg(char *s
, struct octeon_device
*oct
)
2762 /* PCI Window Registers */
2764 len
+= sprintf(s
+ len
, "\n\t Octeon CSR Registers\n\n");
2766 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2767 reg
= CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i
);
2768 len
+= sprintf(s
+ len
,
2769 "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2770 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2773 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2774 reg
= CN23XX_VF_SLI_IQ_INSTR_COUNT64(i
);
2775 len
+= sprintf(s
+ len
,
2776 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2777 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2780 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2781 reg
= CN23XX_VF_SLI_OQ_PKTS_CREDIT(i
);
2782 len
+= sprintf(s
+ len
,
2783 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2784 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2787 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2788 reg
= CN23XX_VF_SLI_OQ_SIZE(i
);
2789 len
+= sprintf(s
+ len
,
2790 "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2791 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2794 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2795 reg
= CN23XX_VF_SLI_OQ_PKT_CONTROL(i
);
2796 len
+= sprintf(s
+ len
,
2797 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2798 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2801 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2802 reg
= CN23XX_VF_SLI_OQ_BASE_ADDR64(i
);
2803 len
+= sprintf(s
+ len
,
2804 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2805 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2808 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2809 reg
= CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i
);
2810 len
+= sprintf(s
+ len
,
2811 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2812 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2815 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2816 reg
= CN23XX_VF_SLI_OQ_PKTS_SENT(i
);
2817 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2818 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2821 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2822 reg
= 0x100c0 + i
* CN23XX_VF_OQ_OFFSET
;
2823 len
+= sprintf(s
+ len
,
2824 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2825 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2828 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2829 reg
= 0x100d0 + i
* CN23XX_VF_IQ_OFFSET
;
2830 len
+= sprintf(s
+ len
,
2831 "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
2832 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2835 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2836 reg
= CN23XX_VF_SLI_IQ_PKT_CONTROL64(i
);
2837 len
+= sprintf(s
+ len
,
2838 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2839 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2842 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2843 reg
= CN23XX_VF_SLI_IQ_BASE_ADDR64(i
);
2844 len
+= sprintf(s
+ len
,
2845 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
2846 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2849 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2850 reg
= CN23XX_VF_SLI_IQ_DOORBELL(i
);
2851 len
+= sprintf(s
+ len
,
2852 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2853 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2856 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2857 reg
= CN23XX_VF_SLI_IQ_SIZE(i
);
2858 len
+= sprintf(s
+ len
,
2859 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2860 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2863 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2864 reg
= CN23XX_VF_SLI_IQ_INSTR_COUNT64(i
);
2865 len
+= sprintf(s
+ len
,
2866 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2867 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2873 static int cn6xxx_read_csr_reg(char *s
, struct octeon_device
*oct
)
2878 /* PCI Window Registers */
2880 len
+= sprintf(s
+ len
, "\n\t Octeon CSR Registers\n\n");
2881 reg
= CN6XXX_WIN_WR_ADDR_LO
;
2882 len
+= sprintf(s
+ len
, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
2883 CN6XXX_WIN_WR_ADDR_LO
, octeon_read_csr(oct
, reg
));
2884 reg
= CN6XXX_WIN_WR_ADDR_HI
;
2885 len
+= sprintf(s
+ len
, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
2886 CN6XXX_WIN_WR_ADDR_HI
, octeon_read_csr(oct
, reg
));
2887 reg
= CN6XXX_WIN_RD_ADDR_LO
;
2888 len
+= sprintf(s
+ len
, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
2889 CN6XXX_WIN_RD_ADDR_LO
, octeon_read_csr(oct
, reg
));
2890 reg
= CN6XXX_WIN_RD_ADDR_HI
;
2891 len
+= sprintf(s
+ len
, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
2892 CN6XXX_WIN_RD_ADDR_HI
, octeon_read_csr(oct
, reg
));
2893 reg
= CN6XXX_WIN_WR_DATA_LO
;
2894 len
+= sprintf(s
+ len
, "[%02x] (WIN_WR_DATA_LO): %08x\n",
2895 CN6XXX_WIN_WR_DATA_LO
, octeon_read_csr(oct
, reg
));
2896 reg
= CN6XXX_WIN_WR_DATA_HI
;
2897 len
+= sprintf(s
+ len
, "[%02x] (WIN_WR_DATA_HI): %08x\n",
2898 CN6XXX_WIN_WR_DATA_HI
, octeon_read_csr(oct
, reg
));
2899 len
+= sprintf(s
+ len
, "[%02x] (WIN_WR_MASK_REG): %08x\n",
2900 CN6XXX_WIN_WR_MASK_REG
,
2901 octeon_read_csr(oct
, CN6XXX_WIN_WR_MASK_REG
));
2903 /* PCI Interrupt Register */
2904 len
+= sprintf(s
+ len
, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
2905 CN6XXX_SLI_INT_ENB64_PORT0
, octeon_read_csr(oct
,
2906 CN6XXX_SLI_INT_ENB64_PORT0
));
2907 len
+= sprintf(s
+ len
, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
2908 CN6XXX_SLI_INT_ENB64_PORT1
,
2909 octeon_read_csr(oct
, CN6XXX_SLI_INT_ENB64_PORT1
));
2910 len
+= sprintf(s
+ len
, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64
,
2911 octeon_read_csr(oct
, CN6XXX_SLI_INT_SUM64
));
2913 /* PCI Output queue registers */
2914 for (i
= 0; i
< oct
->num_oqs
; i
++) {
2915 reg
= CN6XXX_SLI_OQ_PKTS_SENT(i
);
2916 len
+= sprintf(s
+ len
, "\n[%x] (PKTS_SENT_%d): %08x\n",
2917 reg
, i
, octeon_read_csr(oct
, reg
));
2918 reg
= CN6XXX_SLI_OQ_PKTS_CREDIT(i
);
2919 len
+= sprintf(s
+ len
, "[%x] (PKT_CREDITS_%d): %08x\n",
2920 reg
, i
, octeon_read_csr(oct
, reg
));
2922 reg
= CN6XXX_SLI_OQ_INT_LEVEL_PKTS
;
2923 len
+= sprintf(s
+ len
, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
2924 reg
, octeon_read_csr(oct
, reg
));
2925 reg
= CN6XXX_SLI_OQ_INT_LEVEL_TIME
;
2926 len
+= sprintf(s
+ len
, "[%x] (PKTS_SENT_TIME): %08x\n",
2927 reg
, octeon_read_csr(oct
, reg
));
2929 /* PCI Input queue registers */
2930 for (i
= 0; i
<= 3; i
++) {
2933 reg
= CN6XXX_SLI_IQ_DOORBELL(i
);
2934 len
+= sprintf(s
+ len
, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
2935 reg
, i
, octeon_read_csr(oct
, reg
));
2936 reg
= CN6XXX_SLI_IQ_INSTR_COUNT(i
);
2937 len
+= sprintf(s
+ len
, "[%x] (INSTR_COUNT_%d): %08x\n",
2938 reg
, i
, octeon_read_csr(oct
, reg
));
2941 /* PCI DMA registers */
2943 len
+= sprintf(s
+ len
, "\n[%x] (DMA_CNT_0): %08x\n",
2945 octeon_read_csr(oct
, CN6XXX_DMA_CNT(0)));
2946 reg
= CN6XXX_DMA_PKT_INT_LEVEL(0);
2947 len
+= sprintf(s
+ len
, "[%x] (DMA_INT_LEV_0): %08x\n",
2948 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct
, reg
));
2949 reg
= CN6XXX_DMA_TIME_INT_LEVEL(0);
2950 len
+= sprintf(s
+ len
, "[%x] (DMA_TIME_0): %08x\n",
2951 CN6XXX_DMA_TIME_INT_LEVEL(0),
2952 octeon_read_csr(oct
, reg
));
2954 len
+= sprintf(s
+ len
, "\n[%x] (DMA_CNT_1): %08x\n",
2956 octeon_read_csr(oct
, CN6XXX_DMA_CNT(1)));
2957 reg
= CN6XXX_DMA_PKT_INT_LEVEL(1);
2958 len
+= sprintf(s
+ len
, "[%x] (DMA_INT_LEV_1): %08x\n",
2959 CN6XXX_DMA_PKT_INT_LEVEL(1),
2960 octeon_read_csr(oct
, reg
));
2961 reg
= CN6XXX_DMA_PKT_INT_LEVEL(1);
2962 len
+= sprintf(s
+ len
, "[%x] (DMA_TIME_1): %08x\n",
2963 CN6XXX_DMA_TIME_INT_LEVEL(1),
2964 octeon_read_csr(oct
, reg
));
2966 /* PCI Index registers */
2968 len
+= sprintf(s
+ len
, "\n");
2970 for (i
= 0; i
< 16; i
++) {
2971 reg
= lio_pci_readq(oct
, CN6XXX_BAR1_REG(i
, oct
->pcie_port
));
2972 len
+= sprintf(s
+ len
, "[%llx] (BAR1_INDEX_%02d): %08x\n",
2973 CN6XXX_BAR1_REG(i
, oct
->pcie_port
), i
, reg
);
2979 static int cn6xxx_read_config_reg(char *s
, struct octeon_device
*oct
)
2984 /* PCI CONFIG Registers */
2986 len
+= sprintf(s
+ len
,
2987 "\n\t Octeon Config space Registers\n\n");
2989 for (i
= 0; i
<= 13; i
++) {
2990 pci_read_config_dword(oct
->pci_dev
, (i
* 4), &val
);
2991 len
+= sprintf(s
+ len
, "[0x%x] (Config[%d]): 0x%08x\n",
2995 for (i
= 30; i
<= 34; i
++) {
2996 pci_read_config_dword(oct
->pci_dev
, (i
* 4), &val
);
2997 len
+= sprintf(s
+ len
, "[0x%x] (Config[%d]): 0x%08x\n",
3004 /* Return register dump user app. */
3005 static void lio_get_regs(struct net_device
*dev
,
3006 struct ethtool_regs
*regs
, void *regbuf
)
3008 struct lio
*lio
= GET_LIO(dev
);
3010 struct octeon_device
*oct
= lio
->oct_dev
;
3012 regs
->version
= OCT_ETHTOOL_REGSVER
;
3014 switch (oct
->chip_id
) {
3015 case OCTEON_CN23XX_PF_VID
:
3016 memset(regbuf
, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX
);
3017 len
+= cn23xx_read_csr_reg(regbuf
+ len
, oct
);
3019 case OCTEON_CN23XX_VF_VID
:
3020 memset(regbuf
, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF
);
3021 len
+= cn23xx_vf_read_csr_reg(regbuf
+ len
, oct
);
3025 memset(regbuf
, 0, OCT_ETHTOOL_REGDUMP_LEN
);
3026 len
+= cn6xxx_read_csr_reg(regbuf
+ len
, oct
);
3027 len
+= cn6xxx_read_config_reg(regbuf
+ len
, oct
);
3030 dev_err(&oct
->pci_dev
->dev
, "%s Unknown chipid: %d\n",
3031 __func__
, oct
->chip_id
);
3035 static u32
lio_get_priv_flags(struct net_device
*netdev
)
3037 struct lio
*lio
= GET_LIO(netdev
);
3039 return lio
->oct_dev
->priv_flags
;
3042 static int lio_set_priv_flags(struct net_device
*netdev
, u32 flags
)
3044 struct lio
*lio
= GET_LIO(netdev
);
3045 bool intr_by_tx_bytes
= !!(flags
& (0x1 << OCT_PRIV_FLAG_TX_BYTES
));
3047 lio_set_priv_flag(lio
->oct_dev
, OCT_PRIV_FLAG_TX_BYTES
,
3052 static int lio_get_fecparam(struct net_device
*netdev
,
3053 struct ethtool_fecparam
*fec
)
3055 struct lio
*lio
= GET_LIO(netdev
);
3056 struct octeon_device
*oct
= lio
->oct_dev
;
3058 fec
->active_fec
= ETHTOOL_FEC_NONE
;
3059 fec
->fec
= ETHTOOL_FEC_NONE
;
3061 if (oct
->subsystem_id
== OCTEON_CN2350_25GB_SUBSYS_ID
||
3062 oct
->subsystem_id
== OCTEON_CN2360_25GB_SUBSYS_ID
) {
3063 if (oct
->no_speed_setting
== 1)
3066 liquidio_get_fec(lio
);
3067 fec
->fec
= (ETHTOOL_FEC_RS
| ETHTOOL_FEC_OFF
);
3068 if (oct
->props
[lio
->ifidx
].fec
== 1)
3069 fec
->active_fec
= ETHTOOL_FEC_RS
;
3071 fec
->active_fec
= ETHTOOL_FEC_OFF
;
3077 static int lio_set_fecparam(struct net_device
*netdev
,
3078 struct ethtool_fecparam
*fec
)
3080 struct lio
*lio
= GET_LIO(netdev
);
3081 struct octeon_device
*oct
= lio
->oct_dev
;
3083 if (oct
->subsystem_id
== OCTEON_CN2350_25GB_SUBSYS_ID
||
3084 oct
->subsystem_id
== OCTEON_CN2360_25GB_SUBSYS_ID
) {
3085 if (oct
->no_speed_setting
== 1)
3088 if (fec
->fec
& ETHTOOL_FEC_OFF
)
3089 liquidio_set_fec(lio
, 0);
3090 else if (fec
->fec
& ETHTOOL_FEC_RS
)
3091 liquidio_set_fec(lio
, 1);
3101 #define LIO_ETHTOOL_COALESCE (ETHTOOL_COALESCE_RX_USECS | \
3102 ETHTOOL_COALESCE_MAX_FRAMES | \
3103 ETHTOOL_COALESCE_USE_ADAPTIVE | \
3104 ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \
3105 ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \
3106 ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \
3107 ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH | \
3108 ETHTOOL_COALESCE_PKT_RATE_RX_USECS)
3110 static const struct ethtool_ops lio_ethtool_ops
= {
3111 .supported_coalesce_params
= LIO_ETHTOOL_COALESCE
,
3112 .get_link_ksettings
= lio_get_link_ksettings
,
3113 .set_link_ksettings
= lio_set_link_ksettings
,
3114 .get_fecparam
= lio_get_fecparam
,
3115 .set_fecparam
= lio_set_fecparam
,
3116 .get_link
= ethtool_op_get_link
,
3117 .get_drvinfo
= lio_get_drvinfo
,
3118 .get_ringparam
= lio_ethtool_get_ringparam
,
3119 .set_ringparam
= lio_ethtool_set_ringparam
,
3120 .get_channels
= lio_ethtool_get_channels
,
3121 .set_channels
= lio_ethtool_set_channels
,
3122 .set_phys_id
= lio_set_phys_id
,
3123 .get_eeprom_len
= lio_get_eeprom_len
,
3124 .get_eeprom
= lio_get_eeprom
,
3125 .get_strings
= lio_get_strings
,
3126 .get_ethtool_stats
= lio_get_ethtool_stats
,
3127 .get_pauseparam
= lio_get_pauseparam
,
3128 .set_pauseparam
= lio_set_pauseparam
,
3129 .get_regs_len
= lio_get_regs_len
,
3130 .get_regs
= lio_get_regs
,
3131 .get_msglevel
= lio_get_msglevel
,
3132 .set_msglevel
= lio_set_msglevel
,
3133 .get_sset_count
= lio_get_sset_count
,
3134 .get_coalesce
= lio_get_intr_coalesce
,
3135 .set_coalesce
= lio_set_intr_coalesce
,
3136 .get_priv_flags
= lio_get_priv_flags
,
3137 .set_priv_flags
= lio_set_priv_flags
,
3138 .get_ts_info
= lio_get_ts_info
,
3141 static const struct ethtool_ops lio_vf_ethtool_ops
= {
3142 .supported_coalesce_params
= LIO_ETHTOOL_COALESCE
,
3143 .get_link_ksettings
= lio_get_link_ksettings
,
3144 .get_link
= ethtool_op_get_link
,
3145 .get_drvinfo
= lio_get_vf_drvinfo
,
3146 .get_ringparam
= lio_ethtool_get_ringparam
,
3147 .set_ringparam
= lio_ethtool_set_ringparam
,
3148 .get_channels
= lio_ethtool_get_channels
,
3149 .set_channels
= lio_ethtool_set_channels
,
3150 .get_strings
= lio_vf_get_strings
,
3151 .get_ethtool_stats
= lio_vf_get_ethtool_stats
,
3152 .get_regs_len
= lio_get_regs_len
,
3153 .get_regs
= lio_get_regs
,
3154 .get_msglevel
= lio_get_msglevel
,
3155 .set_msglevel
= lio_vf_set_msglevel
,
3156 .get_sset_count
= lio_vf_get_sset_count
,
3157 .get_coalesce
= lio_get_intr_coalesce
,
3158 .set_coalesce
= lio_set_intr_coalesce
,
3159 .get_priv_flags
= lio_get_priv_flags
,
3160 .set_priv_flags
= lio_set_priv_flags
,
3161 .get_ts_info
= lio_get_ts_info
,
3164 void liquidio_set_ethtool_ops(struct net_device
*netdev
)
3166 struct lio
*lio
= GET_LIO(netdev
);
3167 struct octeon_device
*oct
= lio
->oct_dev
;
3169 if (OCTEON_CN23XX_VF(oct
))
3170 netdev
->ethtool_ops
= &lio_vf_ethtool_ops
;
3172 netdev
->ethtool_ops
= &lio_ethtool_ops
;