1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/netdevice.h>
19 #include <linux/net_tstamp.h>
20 #include <linux/pci.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_nic.h"
27 #include "octeon_main.h"
28 #include "octeon_network.h"
29 #include "cn66xx_regs.h"
30 #include "cn66xx_device.h"
31 #include "cn23xx_pf_device.h"
32 #include "cn23xx_vf_device.h"
34 static int lio_reset_queues(struct net_device
*netdev
, uint32_t num_qs
);
36 struct oct_intrmod_context
{
43 struct oct_intrmod_resp
{
45 struct oct_intrmod_cfg intrmod
;
49 struct oct_mdio_cmd_context
{
55 struct oct_mdio_cmd_resp
{
57 struct oct_mdio_cmd resp
;
61 #define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
63 /* Octeon's interface mode of operation */
65 INTERFACE_MODE_DISABLED
,
78 INTERFACE_MODE_QSGMII
,
82 INTERFACE_MODE_10G_KR
,
83 INTERFACE_MODE_40G_KR4
,
87 #define OCT_ETHTOOL_REGDUMP_LEN 4096
88 #define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11)
89 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2)
90 #define OCT_ETHTOOL_REGSVER 1
92 /* statistics of PF */
93 static const char oct_stats_strings
[][ETH_GSTRING_LEN
] = {
119 "mac_tx_total_bytes",
122 "mac_tx_ctl_packets",
123 "mac_tx_total_collisions",
124 "mac_tx_one_collision",
125 "mac_tx_multi_collision",
126 "mac_tx_max_collision_fail",
127 "mac_tx_max_deferal_fail",
150 "rx_lro_aborts_port",
152 "rx_lro_aborts_tsval",
153 "rx_lro_aborts_timer",
161 "mac_rx_ctl_packets",
166 "link_state_changes",
169 /* statistics of VF */
170 static const char oct_vf_stats_strings
[][ETH_GSTRING_LEN
] = {
183 "link_state_changes",
186 /* statistics of host tx queue */
187 static const char oct_iq_stats_strings
[][ETH_GSTRING_LEN
] = {
195 "fw_instr_processed",
204 /* statistics of host rx queue */
205 static const char oct_droq_stats_strings
[][ETH_GSTRING_LEN
] = {
214 "fw_dropped_nodispatch",
217 "buffer_alloc_failure",
220 /* LiquidIO driver private flags */
221 static const char oct_priv_flags_strings
[][ETH_GSTRING_LEN
] = {
224 #define OCTNIC_NCMD_AUTONEG_ON 0x1
225 #define OCTNIC_NCMD_PHY_ON 0x2
227 static int lio_get_link_ksettings(struct net_device
*netdev
,
228 struct ethtool_link_ksettings
*ecmd
)
230 struct lio
*lio
= GET_LIO(netdev
);
231 struct octeon_device
*oct
= lio
->oct_dev
;
232 struct oct_link_info
*linfo
;
236 ethtool_link_ksettings_zero_link_mode(ecmd
, supported
);
237 ethtool_link_ksettings_zero_link_mode(ecmd
, advertising
);
239 switch (linfo
->link
.s
.phy_type
) {
240 case LIO_PHY_PORT_TP
:
241 ecmd
->base
.port
= PORT_TP
;
242 ecmd
->base
.autoneg
= AUTONEG_DISABLE
;
243 ethtool_link_ksettings_add_link_mode(ecmd
, supported
, TP
);
244 ethtool_link_ksettings_add_link_mode(ecmd
, supported
, Pause
);
245 ethtool_link_ksettings_add_link_mode(ecmd
, supported
,
248 ethtool_link_ksettings_add_link_mode(ecmd
, advertising
, Pause
);
249 ethtool_link_ksettings_add_link_mode(ecmd
, advertising
,
254 case LIO_PHY_PORT_FIBRE
:
255 if (linfo
->link
.s
.if_mode
== INTERFACE_MODE_XAUI
||
256 linfo
->link
.s
.if_mode
== INTERFACE_MODE_RXAUI
||
257 linfo
->link
.s
.if_mode
== INTERFACE_MODE_XLAUI
||
258 linfo
->link
.s
.if_mode
== INTERFACE_MODE_XFI
) {
259 dev_dbg(&oct
->pci_dev
->dev
, "ecmd->base.transceiver is XCVR_EXTERNAL\n");
261 dev_err(&oct
->pci_dev
->dev
, "Unknown link interface mode: %d\n",
262 linfo
->link
.s
.if_mode
);
265 ecmd
->base
.port
= PORT_FIBRE
;
266 ecmd
->base
.autoneg
= AUTONEG_DISABLE
;
267 ethtool_link_ksettings_add_link_mode(ecmd
, supported
, FIBRE
);
269 ethtool_link_ksettings_add_link_mode(ecmd
, supported
, Pause
);
270 ethtool_link_ksettings_add_link_mode(ecmd
, advertising
, Pause
);
271 if (oct
->subsystem_id
== OCTEON_CN2350_25GB_SUBSYS_ID
||
272 oct
->subsystem_id
== OCTEON_CN2360_25GB_SUBSYS_ID
) {
273 if (OCTEON_CN23XX_PF(oct
)) {
274 ethtool_link_ksettings_add_link_mode
275 (ecmd
, supported
, 25000baseSR_Full
);
276 ethtool_link_ksettings_add_link_mode
277 (ecmd
, supported
, 25000baseKR_Full
);
278 ethtool_link_ksettings_add_link_mode
279 (ecmd
, supported
, 25000baseCR_Full
);
281 if (oct
->no_speed_setting
== 0) {
282 ethtool_link_ksettings_add_link_mode
285 ethtool_link_ksettings_add_link_mode
288 ethtool_link_ksettings_add_link_mode
293 if (oct
->no_speed_setting
== 0)
294 liquidio_get_speed(lio
);
296 oct
->speed_setting
= 25;
298 if (oct
->speed_setting
== 10) {
299 ethtool_link_ksettings_add_link_mode
302 ethtool_link_ksettings_add_link_mode
305 ethtool_link_ksettings_add_link_mode
309 if (oct
->speed_setting
== 25) {
310 ethtool_link_ksettings_add_link_mode
313 ethtool_link_ksettings_add_link_mode
316 ethtool_link_ksettings_add_link_mode
321 if (linfo
->link
.s
.speed
== 10000) {
322 ethtool_link_ksettings_add_link_mode
325 ethtool_link_ksettings_add_link_mode
328 ethtool_link_ksettings_add_link_mode
332 ethtool_link_ksettings_add_link_mode
335 ethtool_link_ksettings_add_link_mode
338 ethtool_link_ksettings_add_link_mode
343 if (linfo
->link
.s
.speed
== 25000) {
344 ethtool_link_ksettings_add_link_mode
347 ethtool_link_ksettings_add_link_mode
350 ethtool_link_ksettings_add_link_mode
354 ethtool_link_ksettings_add_link_mode
357 ethtool_link_ksettings_add_link_mode
360 ethtool_link_ksettings_add_link_mode
366 ethtool_link_ksettings_add_link_mode(ecmd
, supported
,
368 ethtool_link_ksettings_add_link_mode(ecmd
, advertising
,
374 if (linfo
->link
.s
.link_up
) {
375 ecmd
->base
.speed
= linfo
->link
.s
.speed
;
376 ecmd
->base
.duplex
= linfo
->link
.s
.duplex
;
378 ecmd
->base
.speed
= SPEED_UNKNOWN
;
379 ecmd
->base
.duplex
= DUPLEX_UNKNOWN
;
385 static int lio_set_link_ksettings(struct net_device
*netdev
,
386 const struct ethtool_link_ksettings
*ecmd
)
388 const int speed
= ecmd
->base
.speed
;
389 struct lio
*lio
= GET_LIO(netdev
);
390 struct oct_link_info
*linfo
;
391 struct octeon_device
*oct
;
397 if (!(oct
->subsystem_id
== OCTEON_CN2350_25GB_SUBSYS_ID
||
398 oct
->subsystem_id
== OCTEON_CN2360_25GB_SUBSYS_ID
))
401 if (oct
->no_speed_setting
) {
402 dev_err(&oct
->pci_dev
->dev
, "%s: Changing speed is not supported\n",
407 if ((ecmd
->base
.duplex
!= DUPLEX_UNKNOWN
&&
408 ecmd
->base
.duplex
!= linfo
->link
.s
.duplex
) ||
409 ecmd
->base
.autoneg
!= AUTONEG_DISABLE
||
410 (ecmd
->base
.speed
!= 10000 && ecmd
->base
.speed
!= 25000 &&
411 ecmd
->base
.speed
!= SPEED_UNKNOWN
))
414 if ((oct
->speed_boot
== speed
/ 1000) &&
415 oct
->speed_boot
== oct
->speed_setting
)
418 liquidio_set_speed(lio
, speed
/ 1000);
420 dev_dbg(&oct
->pci_dev
->dev
, "Port speed is set to %dG\n",
427 lio_get_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*drvinfo
)
430 struct octeon_device
*oct
;
432 lio
= GET_LIO(netdev
);
435 memset(drvinfo
, 0, sizeof(struct ethtool_drvinfo
));
436 strcpy(drvinfo
->driver
, "liquidio");
437 strcpy(drvinfo
->version
, LIQUIDIO_VERSION
);
438 strncpy(drvinfo
->fw_version
, oct
->fw_info
.liquidio_firmware_version
,
440 strncpy(drvinfo
->bus_info
, pci_name(oct
->pci_dev
), 32);
444 lio_get_vf_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*drvinfo
)
446 struct octeon_device
*oct
;
449 lio
= GET_LIO(netdev
);
452 memset(drvinfo
, 0, sizeof(struct ethtool_drvinfo
));
453 strcpy(drvinfo
->driver
, "liquidio_vf");
454 strcpy(drvinfo
->version
, LIQUIDIO_VERSION
);
455 strncpy(drvinfo
->fw_version
, oct
->fw_info
.liquidio_firmware_version
,
457 strncpy(drvinfo
->bus_info
, pci_name(oct
->pci_dev
), 32);
461 lio_send_queue_count_update(struct net_device
*netdev
, uint32_t num_queues
)
463 struct lio
*lio
= GET_LIO(netdev
);
464 struct octeon_device
*oct
= lio
->oct_dev
;
465 struct octnic_ctrl_pkt nctrl
;
468 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
471 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_QUEUE_COUNT_CTL
;
472 nctrl
.ncmd
.s
.param1
= num_queues
;
473 nctrl
.ncmd
.s
.param2
= num_queues
;
474 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
475 nctrl
.wait_time
= 100;
476 nctrl
.netpndev
= (u64
)netdev
;
477 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
479 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
481 dev_err(&oct
->pci_dev
->dev
, "Failed to send Queue reset command (ret: 0x%x)\n",
490 lio_ethtool_get_channels(struct net_device
*dev
,
491 struct ethtool_channels
*channel
)
493 struct lio
*lio
= GET_LIO(dev
);
494 struct octeon_device
*oct
= lio
->oct_dev
;
495 u32 max_rx
= 0, max_tx
= 0, tx_count
= 0, rx_count
= 0;
496 u32 combined_count
= 0, max_combined
= 0;
498 if (OCTEON_CN6XXX(oct
)) {
499 struct octeon_config
*conf6x
= CHIP_CONF(oct
, cn6xxx
);
501 max_rx
= CFG_GET_OQ_MAX_Q(conf6x
);
502 max_tx
= CFG_GET_IQ_MAX_Q(conf6x
);
503 rx_count
= CFG_GET_NUM_RXQS_NIC_IF(conf6x
, lio
->ifidx
);
504 tx_count
= CFG_GET_NUM_TXQS_NIC_IF(conf6x
, lio
->ifidx
);
505 } else if (OCTEON_CN23XX_PF(oct
)) {
506 if (oct
->sriov_info
.sriov_enabled
) {
507 max_combined
= lio
->linfo
.num_txpciq
;
509 struct octeon_config
*conf23_pf
=
510 CHIP_CONF(oct
, cn23xx_pf
);
512 max_combined
= CFG_GET_IQ_MAX_Q(conf23_pf
);
514 combined_count
= oct
->num_iqs
;
515 } else if (OCTEON_CN23XX_VF(oct
)) {
517 u64 ctrl
= CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
519 reg_val
= octeon_read_csr64(oct
, ctrl
);
520 reg_val
= reg_val
>> CN23XX_PKT_INPUT_CTL_RPVF_POS
;
521 max_combined
= reg_val
& CN23XX_PKT_INPUT_CTL_RPVF_MASK
;
522 combined_count
= oct
->num_iqs
;
525 channel
->max_rx
= max_rx
;
526 channel
->max_tx
= max_tx
;
527 channel
->max_combined
= max_combined
;
528 channel
->rx_count
= rx_count
;
529 channel
->tx_count
= tx_count
;
530 channel
->combined_count
= combined_count
;
534 lio_irq_reallocate_irqs(struct octeon_device
*oct
, uint32_t num_ioqs
)
536 struct msix_entry
*msix_entries
;
537 int num_msix_irqs
= 0;
543 /* Disable the input and output queues now. No more packets will
544 * arrive from Octeon.
546 oct
->fn_list
.disable_interrupt(oct
, OCTEON_ALL_INTR
);
549 if (OCTEON_CN23XX_PF(oct
))
550 num_msix_irqs
= oct
->num_msix_irqs
- 1;
551 else if (OCTEON_CN23XX_VF(oct
))
552 num_msix_irqs
= oct
->num_msix_irqs
;
554 msix_entries
= (struct msix_entry
*)oct
->msix_entries
;
555 for (i
= 0; i
< num_msix_irqs
; i
++) {
556 if (oct
->ioq_vector
[i
].vector
) {
557 /* clear the affinity_cpumask */
558 irq_set_affinity_hint(msix_entries
[i
].vector
,
560 free_irq(msix_entries
[i
].vector
,
561 &oct
->ioq_vector
[i
]);
562 oct
->ioq_vector
[i
].vector
= 0;
566 /* non-iov vector's argument is oct struct */
567 if (OCTEON_CN23XX_PF(oct
))
568 free_irq(msix_entries
[i
].vector
, oct
);
570 pci_disable_msix(oct
->pci_dev
);
571 kfree(oct
->msix_entries
);
572 oct
->msix_entries
= NULL
;
575 kfree(oct
->irq_name_storage
);
576 oct
->irq_name_storage
= NULL
;
578 if (octeon_allocate_ioq_vector(oct
, num_ioqs
)) {
579 dev_err(&oct
->pci_dev
->dev
, "OCTEON: ioq vector allocation failed\n");
583 if (octeon_setup_interrupt(oct
, num_ioqs
)) {
584 dev_info(&oct
->pci_dev
->dev
, "Setup interrupt failed\n");
588 /* Enable Octeon device interrupts */
589 oct
->fn_list
.enable_interrupt(oct
, OCTEON_ALL_INTR
);
595 lio_ethtool_set_channels(struct net_device
*dev
,
596 struct ethtool_channels
*channel
)
598 u32 combined_count
, max_combined
;
599 struct lio
*lio
= GET_LIO(dev
);
600 struct octeon_device
*oct
= lio
->oct_dev
;
603 if (strcmp(oct
->fw_info
.liquidio_firmware_version
, "1.6.1") < 0) {
604 dev_err(&oct
->pci_dev
->dev
, "Minimum firmware version required is 1.6.1\n");
608 if (!channel
->combined_count
|| channel
->other_count
||
609 channel
->rx_count
|| channel
->tx_count
)
612 combined_count
= channel
->combined_count
;
614 if (OCTEON_CN23XX_PF(oct
)) {
615 if (oct
->sriov_info
.sriov_enabled
) {
616 max_combined
= lio
->linfo
.num_txpciq
;
618 struct octeon_config
*conf23_pf
=
623 CFG_GET_IQ_MAX_Q(conf23_pf
);
625 } else if (OCTEON_CN23XX_VF(oct
)) {
627 u64 ctrl
= CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
629 reg_val
= octeon_read_csr64(oct
, ctrl
);
630 reg_val
= reg_val
>> CN23XX_PKT_INPUT_CTL_RPVF_POS
;
631 max_combined
= reg_val
& CN23XX_PKT_INPUT_CTL_RPVF_MASK
;
636 if (combined_count
> max_combined
|| combined_count
< 1)
639 if (combined_count
== oct
->num_iqs
)
642 ifstate_set(lio
, LIO_IFSTATE_RESETTING
);
644 if (netif_running(dev
)) {
645 dev
->netdev_ops
->ndo_stop(dev
);
649 if (lio_reset_queues(dev
, combined_count
))
653 dev
->netdev_ops
->ndo_open(dev
);
655 ifstate_reset(lio
, LIO_IFSTATE_RESETTING
);
660 static int lio_get_eeprom_len(struct net_device
*netdev
)
663 struct lio
*lio
= GET_LIO(netdev
);
664 struct octeon_device
*oct_dev
= lio
->oct_dev
;
665 struct octeon_board_info
*board_info
;
668 board_info
= (struct octeon_board_info
*)(&oct_dev
->boardinfo
);
669 len
= sprintf(buf
, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
670 board_info
->name
, board_info
->serial_number
,
671 board_info
->major
, board_info
->minor
);
677 lio_get_eeprom(struct net_device
*netdev
, struct ethtool_eeprom
*eeprom
,
680 struct lio
*lio
= GET_LIO(netdev
);
681 struct octeon_device
*oct_dev
= lio
->oct_dev
;
682 struct octeon_board_info
*board_info
;
687 eeprom
->magic
= oct_dev
->pci_dev
->vendor
;
688 board_info
= (struct octeon_board_info
*)(&oct_dev
->boardinfo
);
689 sprintf((char *)bytes
,
690 "boardname:%s serialnum:%s maj:%lld min:%lld\n",
691 board_info
->name
, board_info
->serial_number
,
692 board_info
->major
, board_info
->minor
);
697 static int octnet_gpio_access(struct net_device
*netdev
, int addr
, int val
)
699 struct lio
*lio
= GET_LIO(netdev
);
700 struct octeon_device
*oct
= lio
->oct_dev
;
701 struct octnic_ctrl_pkt nctrl
;
704 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
707 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_GPIO_ACCESS
;
708 nctrl
.ncmd
.s
.param1
= addr
;
709 nctrl
.ncmd
.s
.param2
= val
;
710 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
711 nctrl
.wait_time
= 100;
712 nctrl
.netpndev
= (u64
)netdev
;
713 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
715 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
717 dev_err(&oct
->pci_dev
->dev
, "Failed to configure gpio value\n");
724 static int octnet_id_active(struct net_device
*netdev
, int val
)
726 struct lio
*lio
= GET_LIO(netdev
);
727 struct octeon_device
*oct
= lio
->oct_dev
;
728 struct octnic_ctrl_pkt nctrl
;
731 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
734 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_ID_ACTIVE
;
735 nctrl
.ncmd
.s
.param1
= val
;
736 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
737 nctrl
.wait_time
= 100;
738 nctrl
.netpndev
= (u64
)netdev
;
739 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
741 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
743 dev_err(&oct
->pci_dev
->dev
, "Failed to configure gpio value\n");
750 /* Callback for when mdio command response arrives
752 static void octnet_mdio_resp_callback(struct octeon_device
*oct
,
756 struct oct_mdio_cmd_context
*mdio_cmd_ctx
;
757 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)buf
;
759 mdio_cmd_ctx
= (struct oct_mdio_cmd_context
*)sc
->ctxptr
;
761 oct
= lio_get_device(mdio_cmd_ctx
->octeon_id
);
763 dev_err(&oct
->pci_dev
->dev
, "MIDO instruction failed. Status: %llx\n",
765 WRITE_ONCE(mdio_cmd_ctx
->cond
, -1);
767 WRITE_ONCE(mdio_cmd_ctx
->cond
, 1);
769 wake_up_interruptible(&mdio_cmd_ctx
->wc
);
772 /* This routine provides PHY access routines for
776 octnet_mdio45_access(struct lio
*lio
, int op
, int loc
, int *value
)
778 struct octeon_device
*oct_dev
= lio
->oct_dev
;
779 struct octeon_soft_command
*sc
;
780 struct oct_mdio_cmd_resp
*mdio_cmd_rsp
;
781 struct oct_mdio_cmd_context
*mdio_cmd_ctx
;
782 struct oct_mdio_cmd
*mdio_cmd
;
785 sc
= (struct octeon_soft_command
*)
786 octeon_alloc_soft_command(oct_dev
,
787 sizeof(struct oct_mdio_cmd
),
788 sizeof(struct oct_mdio_cmd_resp
),
789 sizeof(struct oct_mdio_cmd_context
));
794 mdio_cmd_ctx
= (struct oct_mdio_cmd_context
*)sc
->ctxptr
;
795 mdio_cmd_rsp
= (struct oct_mdio_cmd_resp
*)sc
->virtrptr
;
796 mdio_cmd
= (struct oct_mdio_cmd
*)sc
->virtdptr
;
798 WRITE_ONCE(mdio_cmd_ctx
->cond
, 0);
799 mdio_cmd_ctx
->octeon_id
= lio_get_device_id(oct_dev
);
801 mdio_cmd
->mdio_addr
= loc
;
803 mdio_cmd
->value1
= *value
;
804 octeon_swap_8B_data((u64
*)mdio_cmd
, sizeof(struct oct_mdio_cmd
) / 8);
806 sc
->iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
808 octeon_prepare_soft_command(oct_dev
, sc
, OPCODE_NIC
, OPCODE_NIC_MDIO45
,
811 sc
->wait_time
= 1000;
812 sc
->callback
= octnet_mdio_resp_callback
;
813 sc
->callback_arg
= sc
;
815 init_waitqueue_head(&mdio_cmd_ctx
->wc
);
817 retval
= octeon_send_soft_command(oct_dev
, sc
);
819 if (retval
== IQ_SEND_FAILED
) {
820 dev_err(&oct_dev
->pci_dev
->dev
,
821 "octnet_mdio45_access instruction failed status: %x\n",
825 /* Sleep on a wait queue till the cond flag indicates that the
828 sleep_cond(&mdio_cmd_ctx
->wc
, &mdio_cmd_ctx
->cond
);
829 retval
= mdio_cmd_rsp
->status
;
831 dev_err(&oct_dev
->pci_dev
->dev
, "octnet mdio45 access failed\n");
834 octeon_swap_8B_data((u64
*)(&mdio_cmd_rsp
->resp
),
835 sizeof(struct oct_mdio_cmd
) / 8);
837 if (READ_ONCE(mdio_cmd_ctx
->cond
) == 1) {
839 *value
= mdio_cmd_rsp
->resp
.value1
;
846 octeon_free_soft_command(oct_dev
, sc
);
851 static int lio_set_phys_id(struct net_device
*netdev
,
852 enum ethtool_phys_id_state state
)
854 struct lio
*lio
= GET_LIO(netdev
);
855 struct octeon_device
*oct
= lio
->oct_dev
;
856 struct oct_link_info
*linfo
;
861 cur_ver
= OCT_FW_VER(oct
->fw_info
.ver
.maj
,
862 oct
->fw_info
.ver
.min
,
863 oct
->fw_info
.ver
.rev
);
866 case ETHTOOL_ID_ACTIVE
:
867 if (oct
->chip_id
== OCTEON_CN66XX
) {
868 octnet_gpio_access(netdev
, VITESSE_PHY_GPIO_CFG
,
869 VITESSE_PHY_GPIO_DRIVEON
);
872 } else if (oct
->chip_id
== OCTEON_CN68XX
) {
873 /* Save the current LED settings */
874 ret
= octnet_mdio45_access(lio
, 0,
875 LIO68XX_LED_BEACON_ADDR
,
876 &lio
->phy_beacon_val
);
880 ret
= octnet_mdio45_access(lio
, 0,
881 LIO68XX_LED_CTRL_ADDR
,
886 /* Configure Beacon values */
887 value
= LIO68XX_LED_BEACON_CFGON
;
888 ret
= octnet_mdio45_access(lio
, 1,
889 LIO68XX_LED_BEACON_ADDR
,
894 value
= LIO68XX_LED_CTRL_CFGON
;
895 ret
= octnet_mdio45_access(lio
, 1,
896 LIO68XX_LED_CTRL_ADDR
,
900 } else if (oct
->chip_id
== OCTEON_CN23XX_PF_VID
) {
901 octnet_id_active(netdev
, LED_IDENTIFICATION_ON
);
902 if (linfo
->link
.s
.phy_type
== LIO_PHY_PORT_TP
&&
903 cur_ver
> OCT_FW_VER(1, 7, 2))
913 if (oct
->chip_id
== OCTEON_CN23XX_PF_VID
&&
914 linfo
->link
.s
.phy_type
== LIO_PHY_PORT_TP
&&
915 cur_ver
> OCT_FW_VER(1, 7, 2))
916 octnet_id_active(netdev
, LED_IDENTIFICATION_ON
);
917 else if (oct
->chip_id
== OCTEON_CN66XX
)
918 octnet_gpio_access(netdev
, VITESSE_PHY_GPIO_CFG
,
919 VITESSE_PHY_GPIO_HIGH
);
926 if (oct
->chip_id
== OCTEON_CN23XX_PF_VID
&&
927 linfo
->link
.s
.phy_type
== LIO_PHY_PORT_TP
&&
928 cur_ver
> OCT_FW_VER(1, 7, 2))
929 octnet_id_active(netdev
, LED_IDENTIFICATION_OFF
);
930 else if (oct
->chip_id
== OCTEON_CN66XX
)
931 octnet_gpio_access(netdev
, VITESSE_PHY_GPIO_CFG
,
932 VITESSE_PHY_GPIO_LOW
);
938 case ETHTOOL_ID_INACTIVE
:
939 if (oct
->chip_id
== OCTEON_CN66XX
) {
940 octnet_gpio_access(netdev
, VITESSE_PHY_GPIO_CFG
,
941 VITESSE_PHY_GPIO_DRIVEOFF
);
942 } else if (oct
->chip_id
== OCTEON_CN68XX
) {
943 /* Restore LED settings */
944 ret
= octnet_mdio45_access(lio
, 1,
945 LIO68XX_LED_CTRL_ADDR
,
950 ret
= octnet_mdio45_access(lio
, 1,
951 LIO68XX_LED_BEACON_ADDR
,
952 &lio
->phy_beacon_val
);
955 } else if (oct
->chip_id
== OCTEON_CN23XX_PF_VID
) {
956 octnet_id_active(netdev
, LED_IDENTIFICATION_OFF
);
972 lio_ethtool_get_ringparam(struct net_device
*netdev
,
973 struct ethtool_ringparam
*ering
)
975 struct lio
*lio
= GET_LIO(netdev
);
976 struct octeon_device
*oct
= lio
->oct_dev
;
977 u32 tx_max_pending
= 0, rx_max_pending
= 0, tx_pending
= 0,
980 if (ifstate_check(lio
, LIO_IFSTATE_RESETTING
))
983 if (OCTEON_CN6XXX(oct
)) {
984 struct octeon_config
*conf6x
= CHIP_CONF(oct
, cn6xxx
);
986 tx_max_pending
= CN6XXX_MAX_IQ_DESCRIPTORS
;
987 rx_max_pending
= CN6XXX_MAX_OQ_DESCRIPTORS
;
988 rx_pending
= CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x
, lio
->ifidx
);
989 tx_pending
= CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x
, lio
->ifidx
);
990 } else if (OCTEON_CN23XX_PF(oct
) || OCTEON_CN23XX_VF(oct
)) {
991 tx_max_pending
= CN23XX_MAX_IQ_DESCRIPTORS
;
992 rx_max_pending
= CN23XX_MAX_OQ_DESCRIPTORS
;
993 rx_pending
= oct
->droq
[0]->max_count
;
994 tx_pending
= oct
->instr_queue
[0]->max_count
;
997 ering
->tx_pending
= tx_pending
;
998 ering
->tx_max_pending
= tx_max_pending
;
999 ering
->rx_pending
= rx_pending
;
1000 ering
->rx_max_pending
= rx_max_pending
;
1001 ering
->rx_mini_pending
= 0;
1002 ering
->rx_jumbo_pending
= 0;
1003 ering
->rx_mini_max_pending
= 0;
1004 ering
->rx_jumbo_max_pending
= 0;
1007 static int lio_23xx_reconfigure_queue_count(struct lio
*lio
)
1009 struct octeon_device
*oct
= lio
->oct_dev
;
1010 struct liquidio_if_cfg_context
*ctx
;
1011 u32 resp_size
, ctx_size
, data_size
;
1012 struct liquidio_if_cfg_resp
*resp
;
1013 struct octeon_soft_command
*sc
;
1014 union oct_nic_if_cfg if_cfg
;
1015 struct lio_version
*vdata
;
1020 resp_size
= sizeof(struct liquidio_if_cfg_resp
);
1021 ctx_size
= sizeof(struct liquidio_if_cfg_context
);
1022 data_size
= sizeof(struct lio_version
);
1023 sc
= (struct octeon_soft_command
*)
1024 octeon_alloc_soft_command(oct
, data_size
,
1025 resp_size
, ctx_size
);
1027 dev_err(&oct
->pci_dev
->dev
, "%s: Failed to allocate soft command\n",
1032 resp
= (struct liquidio_if_cfg_resp
*)sc
->virtrptr
;
1033 ctx
= (struct liquidio_if_cfg_context
*)sc
->ctxptr
;
1034 vdata
= (struct lio_version
*)sc
->virtdptr
;
1036 vdata
->major
= (__force u16
)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION
);
1037 vdata
->minor
= (__force u16
)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION
);
1038 vdata
->micro
= (__force u16
)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION
);
1040 ifidx_or_pfnum
= oct
->pf_num
;
1041 WRITE_ONCE(ctx
->cond
, 0);
1042 ctx
->octeon_id
= lio_get_device_id(oct
);
1043 init_waitqueue_head(&ctx
->wc
);
1046 if_cfg
.s
.num_iqueues
= oct
->sriov_info
.num_pf_rings
;
1047 if_cfg
.s
.num_oqueues
= oct
->sriov_info
.num_pf_rings
;
1048 if_cfg
.s
.base_queue
= oct
->sriov_info
.pf_srn
;
1049 if_cfg
.s
.gmx_port_id
= oct
->pf_num
;
1052 octeon_prepare_soft_command(oct
, sc
, OPCODE_NIC
,
1053 OPCODE_NIC_QCOUNT_UPDATE
, 0,
1055 sc
->callback
= lio_if_cfg_callback
;
1056 sc
->callback_arg
= sc
;
1057 sc
->wait_time
= LIO_IFCFG_WAIT_TIME
;
1059 retval
= octeon_send_soft_command(oct
, sc
);
1060 if (retval
== IQ_SEND_FAILED
) {
1061 dev_err(&oct
->pci_dev
->dev
,
1062 "iq/oq config failed status: %x\n",
1064 goto qcount_update_fail
;
1067 if (sleep_cond(&ctx
->wc
, &ctx
->cond
) == -EINTR
) {
1068 dev_err(&oct
->pci_dev
->dev
, "Wait interrupted\n");
1072 retval
= resp
->status
;
1074 dev_err(&oct
->pci_dev
->dev
, "iq/oq config failed\n");
1075 goto qcount_update_fail
;
1078 octeon_swap_8B_data((u64
*)(&resp
->cfg_info
),
1079 (sizeof(struct liquidio_if_cfg_info
)) >> 3);
1081 lio
->ifidx
= ifidx_or_pfnum
;
1082 lio
->linfo
.num_rxpciq
= hweight64(resp
->cfg_info
.iqmask
);
1083 lio
->linfo
.num_txpciq
= hweight64(resp
->cfg_info
.iqmask
);
1084 for (j
= 0; j
< lio
->linfo
.num_rxpciq
; j
++) {
1085 lio
->linfo
.rxpciq
[j
].u64
=
1086 resp
->cfg_info
.linfo
.rxpciq
[j
].u64
;
1089 for (j
= 0; j
< lio
->linfo
.num_txpciq
; j
++) {
1090 lio
->linfo
.txpciq
[j
].u64
=
1091 resp
->cfg_info
.linfo
.txpciq
[j
].u64
;
1094 lio
->linfo
.hw_addr
= resp
->cfg_info
.linfo
.hw_addr
;
1095 lio
->linfo
.gmxport
= resp
->cfg_info
.linfo
.gmxport
;
1096 lio
->linfo
.link
.u64
= resp
->cfg_info
.linfo
.link
.u64
;
1097 lio
->txq
= lio
->linfo
.txpciq
[0].s
.q_no
;
1098 lio
->rxq
= lio
->linfo
.rxpciq
[0].s
.q_no
;
1100 octeon_free_soft_command(oct
, sc
);
1101 dev_info(&oct
->pci_dev
->dev
, "Queue count updated to %d\n",
1102 lio
->linfo
.num_rxpciq
);
1107 octeon_free_soft_command(oct
, sc
);
1112 static int lio_reset_queues(struct net_device
*netdev
, uint32_t num_qs
)
1114 struct lio
*lio
= GET_LIO(netdev
);
1115 struct octeon_device
*oct
= lio
->oct_dev
;
1116 int i
, queue_count_update
= 0;
1117 struct napi_struct
*napi
, *n
;
1120 schedule_timeout_uninterruptible(msecs_to_jiffies(100));
1122 if (wait_for_pending_requests(oct
))
1123 dev_err(&oct
->pci_dev
->dev
, "There were pending requests\n");
1125 if (lio_wait_for_instr_fetch(oct
))
1126 dev_err(&oct
->pci_dev
->dev
, "IQ had pending instructions\n");
1128 if (octeon_set_io_queues_off(oct
)) {
1129 dev_err(&oct
->pci_dev
->dev
, "Setting io queues off failed\n");
1133 /* Disable the input and output queues now. No more packets will
1134 * arrive from Octeon.
1136 oct
->fn_list
.disable_io_queues(oct
);
1138 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
1139 netif_napi_del(napi
);
1141 if (num_qs
!= oct
->num_iqs
) {
1142 ret
= netif_set_real_num_rx_queues(netdev
, num_qs
);
1144 dev_err(&oct
->pci_dev
->dev
,
1145 "Setting real number rx failed\n");
1149 ret
= netif_set_real_num_tx_queues(netdev
, num_qs
);
1151 dev_err(&oct
->pci_dev
->dev
,
1152 "Setting real number tx failed\n");
1156 /* The value of queue_count_update decides whether it is the
1157 * queue count or the descriptor count that is being
1160 queue_count_update
= 1;
1163 /* Re-configuration of queues can happen in two scenarios, SRIOV enabled
1164 * and SRIOV disabled. Few things like recreating queue zero, resetting
1165 * glists and IRQs are required for both. For the latter, some more
1166 * steps like updating sriov_info for the octeon device need to be done.
1168 if (queue_count_update
) {
1169 lio_delete_glists(lio
);
1171 /* Delete mbox for PF which is SRIOV disabled because sriov_info
1172 * will be now changed.
1174 if ((OCTEON_CN23XX_PF(oct
)) && !oct
->sriov_info
.sriov_enabled
)
1175 oct
->fn_list
.free_mbox(oct
);
1178 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
1179 if (!(oct
->io_qmask
.oq
& BIT_ULL(i
)))
1181 octeon_delete_droq(oct
, i
);
1184 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
1185 if (!(oct
->io_qmask
.iq
& BIT_ULL(i
)))
1187 octeon_delete_instr_queue(oct
, i
);
1190 if (queue_count_update
) {
1191 /* For PF re-configure sriov related information */
1192 if ((OCTEON_CN23XX_PF(oct
)) &&
1193 !oct
->sriov_info
.sriov_enabled
) {
1194 oct
->sriov_info
.num_pf_rings
= num_qs
;
1195 if (cn23xx_sriov_config(oct
)) {
1196 dev_err(&oct
->pci_dev
->dev
,
1197 "Queue reset aborted: SRIOV config failed\n");
1201 num_qs
= oct
->sriov_info
.num_pf_rings
;
1205 if (oct
->fn_list
.setup_device_regs(oct
)) {
1206 dev_err(&oct
->pci_dev
->dev
, "Failed to configure device registers\n");
1210 /* The following are needed in case of queue count re-configuration and
1211 * not for descriptor count re-configuration.
1213 if (queue_count_update
) {
1214 if (octeon_setup_instr_queues(oct
))
1217 if (octeon_setup_output_queues(oct
))
1220 /* Recreating mbox for PF that is SRIOV disabled */
1221 if (OCTEON_CN23XX_PF(oct
) && !oct
->sriov_info
.sriov_enabled
) {
1222 if (oct
->fn_list
.setup_mbox(oct
)) {
1223 dev_err(&oct
->pci_dev
->dev
, "Mailbox setup failed\n");
1228 /* Deleting and recreating IRQs whether the interface is SRIOV
1229 * enabled or disabled.
1231 if (lio_irq_reallocate_irqs(oct
, num_qs
)) {
1232 dev_err(&oct
->pci_dev
->dev
, "IRQs could not be allocated\n");
1236 /* Enable the input and output queues for this Octeon device */
1237 if (oct
->fn_list
.enable_io_queues(oct
)) {
1238 dev_err(&oct
->pci_dev
->dev
, "Failed to enable input/output queues\n");
1242 for (i
= 0; i
< oct
->num_oqs
; i
++)
1243 writel(oct
->droq
[i
]->max_count
,
1244 oct
->droq
[i
]->pkts_credit_reg
);
1246 /* Informing firmware about the new queue count. It is required
1247 * for firmware to allocate more number of queues than those at
1250 if (OCTEON_CN23XX_PF(oct
) && !oct
->sriov_info
.sriov_enabled
) {
1251 if (lio_23xx_reconfigure_queue_count(lio
))
1256 /* Once firmware is aware of the new value, queues can be recreated */
1257 if (liquidio_setup_io_queues(oct
, 0, num_qs
, num_qs
)) {
1258 dev_err(&oct
->pci_dev
->dev
, "I/O queues creation failed\n");
1262 if (queue_count_update
) {
1263 if (lio_setup_glists(oct
, lio
, num_qs
)) {
1264 dev_err(&oct
->pci_dev
->dev
, "Gather list allocation failed\n");
1268 /* Send firmware the information about new number of queues
1269 * if the interface is a VF or a PF that is SRIOV enabled.
1271 if (oct
->sriov_info
.sriov_enabled
|| OCTEON_CN23XX_VF(oct
))
1272 if (lio_send_queue_count_update(netdev
, num_qs
))
1279 static int lio_ethtool_set_ringparam(struct net_device
*netdev
,
1280 struct ethtool_ringparam
*ering
)
1282 u32 rx_count
, tx_count
, rx_count_old
, tx_count_old
;
1283 struct lio
*lio
= GET_LIO(netdev
);
1284 struct octeon_device
*oct
= lio
->oct_dev
;
1287 if (!OCTEON_CN23XX_PF(oct
) && !OCTEON_CN23XX_VF(oct
))
1290 if (ering
->rx_mini_pending
|| ering
->rx_jumbo_pending
)
1293 rx_count
= clamp_t(u32
, ering
->rx_pending
, CN23XX_MIN_OQ_DESCRIPTORS
,
1294 CN23XX_MAX_OQ_DESCRIPTORS
);
1295 tx_count
= clamp_t(u32
, ering
->tx_pending
, CN23XX_MIN_IQ_DESCRIPTORS
,
1296 CN23XX_MAX_IQ_DESCRIPTORS
);
1298 rx_count_old
= oct
->droq
[0]->max_count
;
1299 tx_count_old
= oct
->instr_queue
[0]->max_count
;
1301 if (rx_count
== rx_count_old
&& tx_count
== tx_count_old
)
1304 ifstate_set(lio
, LIO_IFSTATE_RESETTING
);
1306 if (netif_running(netdev
)) {
1307 netdev
->netdev_ops
->ndo_stop(netdev
);
1311 /* Change RX/TX DESCS count */
1312 if (tx_count
!= tx_count_old
)
1313 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct
), lio
->ifidx
,
1315 if (rx_count
!= rx_count_old
)
1316 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct
), lio
->ifidx
,
1319 if (lio_reset_queues(netdev
, oct
->num_iqs
))
1320 goto err_lio_reset_queues
;
1323 netdev
->netdev_ops
->ndo_open(netdev
);
1325 ifstate_reset(lio
, LIO_IFSTATE_RESETTING
);
1329 err_lio_reset_queues
:
1330 if (tx_count
!= tx_count_old
)
1331 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct
), lio
->ifidx
,
1333 if (rx_count
!= rx_count_old
)
1334 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct
), lio
->ifidx
,
1339 static u32
lio_get_msglevel(struct net_device
*netdev
)
1341 struct lio
*lio
= GET_LIO(netdev
);
1343 return lio
->msg_enable
;
1346 static void lio_set_msglevel(struct net_device
*netdev
, u32 msglvl
)
1348 struct lio
*lio
= GET_LIO(netdev
);
1350 if ((msglvl
^ lio
->msg_enable
) & NETIF_MSG_HW
) {
1351 if (msglvl
& NETIF_MSG_HW
)
1352 liquidio_set_feature(netdev
,
1353 OCTNET_CMD_VERBOSE_ENABLE
, 0);
1355 liquidio_set_feature(netdev
,
1356 OCTNET_CMD_VERBOSE_DISABLE
, 0);
1359 lio
->msg_enable
= msglvl
;
1362 static void lio_vf_set_msglevel(struct net_device
*netdev
, u32 msglvl
)
1364 struct lio
*lio
= GET_LIO(netdev
);
1366 lio
->msg_enable
= msglvl
;
1370 lio_get_pauseparam(struct net_device
*netdev
, struct ethtool_pauseparam
*pause
)
1372 /* Notes: Not supporting any auto negotiation in these
1373 * drivers. Just report pause frame support.
1375 struct lio
*lio
= GET_LIO(netdev
);
1376 struct octeon_device
*oct
= lio
->oct_dev
;
1380 pause
->tx_pause
= oct
->tx_pause
;
1381 pause
->rx_pause
= oct
->rx_pause
;
1385 lio_set_pauseparam(struct net_device
*netdev
, struct ethtool_pauseparam
*pause
)
1387 /* Notes: Not supporting any auto negotiation in these
1390 struct lio
*lio
= GET_LIO(netdev
);
1391 struct octeon_device
*oct
= lio
->oct_dev
;
1392 struct octnic_ctrl_pkt nctrl
;
1393 struct oct_link_info
*linfo
= &lio
->linfo
;
1397 if (oct
->chip_id
!= OCTEON_CN23XX_PF_VID
)
1400 if (linfo
->link
.s
.duplex
== 0) {
1401 /*no flow control for half duplex*/
1402 if (pause
->rx_pause
|| pause
->tx_pause
)
1406 /*do not support autoneg of link flow control*/
1407 if (pause
->autoneg
== AUTONEG_ENABLE
)
1410 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
1413 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_SET_FLOW_CTL
;
1414 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
1415 nctrl
.wait_time
= 100;
1416 nctrl
.netpndev
= (u64
)netdev
;
1417 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
1419 if (pause
->rx_pause
) {
1421 nctrl
.ncmd
.s
.param1
= 1;
1423 /*disable rx pause*/
1424 nctrl
.ncmd
.s
.param1
= 0;
1427 if (pause
->tx_pause
) {
1429 nctrl
.ncmd
.s
.param2
= 1;
1431 /*disable tx pause*/
1432 nctrl
.ncmd
.s
.param2
= 0;
1435 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
1437 dev_err(&oct
->pci_dev
->dev
, "Failed to set pause parameter\n");
1441 oct
->rx_pause
= pause
->rx_pause
;
1442 oct
->tx_pause
= pause
->tx_pause
;
1448 lio_get_ethtool_stats(struct net_device
*netdev
,
1449 struct ethtool_stats
*stats
__attribute__((unused
)),
1452 struct lio
*lio
= GET_LIO(netdev
);
1453 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1454 struct rtnl_link_stats64 lstats
;
1457 if (ifstate_check(lio
, LIO_IFSTATE_RESETTING
))
1460 netdev
->netdev_ops
->ndo_get_stats64(netdev
, &lstats
);
1461 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
1462 data
[i
++] = lstats
.rx_packets
;
1463 /*sum of oct->instr_queue[iq_no]->stats.tx_done */
1464 data
[i
++] = lstats
.tx_packets
;
1465 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
1466 data
[i
++] = lstats
.rx_bytes
;
1467 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1468 data
[i
++] = lstats
.tx_bytes
;
1469 data
[i
++] = lstats
.rx_errors
+
1470 oct_dev
->link_stats
.fromwire
.fcs_err
+
1471 oct_dev
->link_stats
.fromwire
.jabber_err
+
1472 oct_dev
->link_stats
.fromwire
.l2_err
+
1473 oct_dev
->link_stats
.fromwire
.frame_err
;
1474 data
[i
++] = lstats
.tx_errors
;
1475 /*sum of oct->droq[oq_no]->stats->rx_dropped +
1476 *oct->droq[oq_no]->stats->dropped_nodispatch +
1477 *oct->droq[oq_no]->stats->dropped_toomany +
1478 *oct->droq[oq_no]->stats->dropped_nomem
1480 data
[i
++] = lstats
.rx_dropped
+
1481 oct_dev
->link_stats
.fromwire
.fifo_err
+
1482 oct_dev
->link_stats
.fromwire
.dmac_drop
+
1483 oct_dev
->link_stats
.fromwire
.red_drops
+
1484 oct_dev
->link_stats
.fromwire
.fw_err_pko
+
1485 oct_dev
->link_stats
.fromwire
.fw_err_link
+
1486 oct_dev
->link_stats
.fromwire
.fw_err_drop
;
1487 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1488 data
[i
++] = lstats
.tx_dropped
+
1489 oct_dev
->link_stats
.fromhost
.max_collision_fail
+
1490 oct_dev
->link_stats
.fromhost
.max_deferral_fail
+
1491 oct_dev
->link_stats
.fromhost
.total_collisions
+
1492 oct_dev
->link_stats
.fromhost
.fw_err_pko
+
1493 oct_dev
->link_stats
.fromhost
.fw_err_link
+
1494 oct_dev
->link_stats
.fromhost
.fw_err_drop
+
1495 oct_dev
->link_stats
.fromhost
.fw_err_pki
;
1497 /* firmware tx stats */
1498 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
1499 *fromhost.fw_total_sent
1501 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_total_sent
);
1502 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
1503 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_total_fwd
);
1504 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
1505 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_err_pko
);
1506 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */
1507 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_err_pki
);
1508 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
1509 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_err_link
);
1510 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1513 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_err_drop
);
1515 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
1516 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_tso
);
1517 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1520 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_tso_fwd
);
1521 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1524 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_err_tso
);
1525 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1528 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_tx_vxlan
);
1530 /* Multicast packets sent by this port */
1531 data
[i
++] = oct_dev
->link_stats
.fromhost
.fw_total_mcast_sent
;
1532 data
[i
++] = oct_dev
->link_stats
.fromhost
.fw_total_bcast_sent
;
1534 /* mac tx statistics */
1535 /*CVMX_BGXX_CMRX_TX_STAT5 */
1536 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.total_pkts_sent
);
1537 /*CVMX_BGXX_CMRX_TX_STAT4 */
1538 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.total_bytes_sent
);
1539 /*CVMX_BGXX_CMRX_TX_STAT15 */
1540 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.mcast_pkts_sent
);
1541 /*CVMX_BGXX_CMRX_TX_STAT14 */
1542 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.bcast_pkts_sent
);
1543 /*CVMX_BGXX_CMRX_TX_STAT17 */
1544 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.ctl_sent
);
1545 /*CVMX_BGXX_CMRX_TX_STAT0 */
1546 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.total_collisions
);
1547 /*CVMX_BGXX_CMRX_TX_STAT3 */
1548 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.one_collision_sent
);
1549 /*CVMX_BGXX_CMRX_TX_STAT2 */
1551 CVM_CAST64(oct_dev
->link_stats
.fromhost
.multi_collision_sent
);
1552 /*CVMX_BGXX_CMRX_TX_STAT0 */
1553 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.max_collision_fail
);
1554 /*CVMX_BGXX_CMRX_TX_STAT1 */
1555 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.max_deferral_fail
);
1556 /*CVMX_BGXX_CMRX_TX_STAT16 */
1557 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fifo_err
);
1558 /*CVMX_BGXX_CMRX_TX_STAT6 */
1559 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.runts
);
1561 /* RX firmware stats */
1562 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1565 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_total_rcvd
);
1566 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1569 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_total_fwd
);
1570 /* Multicast packets received on this port */
1571 data
[i
++] = oct_dev
->link_stats
.fromwire
.fw_total_mcast
;
1572 data
[i
++] = oct_dev
->link_stats
.fromwire
.fw_total_bcast
;
1573 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
1574 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.jabber_err
);
1575 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
1576 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.l2_err
);
1577 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
1578 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.frame_err
);
1579 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1582 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_err_pko
);
1583 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
1584 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_err_link
);
1585 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1586 *fromwire.fw_err_drop
1588 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_err_drop
);
1590 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1591 *fromwire.fw_rx_vxlan
1593 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_rx_vxlan
);
1594 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1595 *fromwire.fw_rx_vxlan_err
1597 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_rx_vxlan_err
);
1600 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1603 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_pkts
);
1604 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1607 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_octs
);
1608 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
1609 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_total_lro
);
1610 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1611 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_aborts
);
1612 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1615 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_aborts_port
);
1616 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1619 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_aborts_seq
);
1620 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1621 *fw_lro_aborts_tsval
1624 CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_aborts_tsval
);
1625 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1626 *fw_lro_aborts_timer
1628 /* intrmod: packet forward rate */
1630 CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_aborts_timer
);
1631 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1632 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fwd_rate
);
1634 /* mac: link-level stats */
1635 /*CVMX_BGXX_CMRX_RX_STAT0 */
1636 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.total_rcvd
);
1637 /*CVMX_BGXX_CMRX_RX_STAT1 */
1638 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.bytes_rcvd
);
1639 /*CVMX_PKI_STATX_STAT5 */
1640 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.total_bcst
);
1641 /*CVMX_PKI_STATX_STAT5 */
1642 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.total_mcst
);
1643 /*wqe->word2.err_code or wqe->word2.err_level */
1644 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.runts
);
1645 /*CVMX_BGXX_CMRX_RX_STAT2 */
1646 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.ctl_rcvd
);
1647 /*CVMX_BGXX_CMRX_RX_STAT6 */
1648 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fifo_err
);
1649 /*CVMX_BGXX_CMRX_RX_STAT4 */
1650 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.dmac_drop
);
1651 /*wqe->word2.err_code or wqe->word2.err_level */
1652 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fcs_err
);
1653 /*lio->link_changes*/
1654 data
[i
++] = CVM_CAST64(lio
->link_changes
);
1656 for (j
= 0; j
< MAX_OCTEON_INSTR_QUEUES(oct_dev
); j
++) {
1657 if (!(oct_dev
->io_qmask
.iq
& BIT_ULL(j
)))
1659 /*packets to network port*/
1660 /*# of packets tx to network */
1661 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_done
);
1662 /*# of bytes tx to network */
1664 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_tot_bytes
);
1665 /*# of packets dropped */
1667 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_dropped
);
1668 /*# of tx fails due to queue full */
1670 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_iq_busy
);
1671 /*XXX gather entries sent */
1673 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.sgentry_sent
);
1675 /*instruction to firmware: data and control */
1676 /*# of instructions to the queue */
1678 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.instr_posted
);
1679 /*# of instructions processed */
1680 data
[i
++] = CVM_CAST64(
1681 oct_dev
->instr_queue
[j
]->stats
.instr_processed
);
1682 /*# of instructions could not be processed */
1683 data
[i
++] = CVM_CAST64(
1684 oct_dev
->instr_queue
[j
]->stats
.instr_dropped
);
1685 /*bytes sent through the queue */
1687 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.bytes_sent
);
1690 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_gso
);
1692 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_vxlan
);
1695 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_restart
);
1699 for (j
= 0; j
< MAX_OCTEON_OUTPUT_QUEUES(oct_dev
); j
++) {
1700 if (!(oct_dev
->io_qmask
.oq
& BIT_ULL(j
)))
1703 /*packets send to TCP/IP network stack */
1704 /*# of packets to network stack */
1706 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_pkts_received
);
1707 /*# of bytes to network stack */
1709 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_bytes_received
);
1710 /*# of packets dropped */
1711 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_nomem
+
1712 oct_dev
->droq
[j
]->stats
.dropped_toomany
+
1713 oct_dev
->droq
[j
]->stats
.rx_dropped
);
1715 CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_nomem
);
1717 CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_toomany
);
1719 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_dropped
);
1721 /*control and data path*/
1723 CVM_CAST64(oct_dev
->droq
[j
]->stats
.pkts_received
);
1725 CVM_CAST64(oct_dev
->droq
[j
]->stats
.bytes_received
);
1727 CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_nodispatch
);
1730 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_vxlan
);
1732 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_alloc_failure
);
1736 static void lio_vf_get_ethtool_stats(struct net_device
*netdev
,
1737 struct ethtool_stats
*stats
1738 __attribute__((unused
)),
1741 struct rtnl_link_stats64 lstats
;
1742 struct lio
*lio
= GET_LIO(netdev
);
1743 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1746 if (ifstate_check(lio
, LIO_IFSTATE_RESETTING
))
1749 netdev
->netdev_ops
->ndo_get_stats64(netdev
, &lstats
);
1750 /* sum of oct->droq[oq_no]->stats->rx_pkts_received */
1751 data
[i
++] = lstats
.rx_packets
;
1752 /* sum of oct->instr_queue[iq_no]->stats.tx_done */
1753 data
[i
++] = lstats
.tx_packets
;
1754 /* sum of oct->droq[oq_no]->stats->rx_bytes_received */
1755 data
[i
++] = lstats
.rx_bytes
;
1756 /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1757 data
[i
++] = lstats
.tx_bytes
;
1758 data
[i
++] = lstats
.rx_errors
;
1759 data
[i
++] = lstats
.tx_errors
;
1760 /* sum of oct->droq[oq_no]->stats->rx_dropped +
1761 * oct->droq[oq_no]->stats->dropped_nodispatch +
1762 * oct->droq[oq_no]->stats->dropped_toomany +
1763 * oct->droq[oq_no]->stats->dropped_nomem
1765 data
[i
++] = lstats
.rx_dropped
;
1766 /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1767 data
[i
++] = lstats
.tx_dropped
;
1769 data
[i
++] = oct_dev
->link_stats
.fromwire
.fw_total_mcast
;
1770 data
[i
++] = oct_dev
->link_stats
.fromhost
.fw_total_mcast_sent
;
1771 data
[i
++] = oct_dev
->link_stats
.fromwire
.fw_total_bcast
;
1772 data
[i
++] = oct_dev
->link_stats
.fromhost
.fw_total_bcast_sent
;
1774 /* lio->link_changes */
1775 data
[i
++] = CVM_CAST64(lio
->link_changes
);
1777 for (vj
= 0; vj
< oct_dev
->num_iqs
; vj
++) {
1778 j
= lio
->linfo
.txpciq
[vj
].s
.q_no
;
1780 /* packets to network port */
1781 /* # of packets tx to network */
1782 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_done
);
1783 /* # of bytes tx to network */
1784 data
[i
++] = CVM_CAST64(
1785 oct_dev
->instr_queue
[j
]->stats
.tx_tot_bytes
);
1786 /* # of packets dropped */
1787 data
[i
++] = CVM_CAST64(
1788 oct_dev
->instr_queue
[j
]->stats
.tx_dropped
);
1789 /* # of tx fails due to queue full */
1790 data
[i
++] = CVM_CAST64(
1791 oct_dev
->instr_queue
[j
]->stats
.tx_iq_busy
);
1792 /* XXX gather entries sent */
1793 data
[i
++] = CVM_CAST64(
1794 oct_dev
->instr_queue
[j
]->stats
.sgentry_sent
);
1796 /* instruction to firmware: data and control */
1797 /* # of instructions to the queue */
1798 data
[i
++] = CVM_CAST64(
1799 oct_dev
->instr_queue
[j
]->stats
.instr_posted
);
1800 /* # of instructions processed */
1802 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.instr_processed
);
1803 /* # of instructions could not be processed */
1805 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.instr_dropped
);
1806 /* bytes sent through the queue */
1807 data
[i
++] = CVM_CAST64(
1808 oct_dev
->instr_queue
[j
]->stats
.bytes_sent
);
1810 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_gso
);
1812 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_vxlan
);
1814 data
[i
++] = CVM_CAST64(
1815 oct_dev
->instr_queue
[j
]->stats
.tx_restart
);
1819 for (vj
= 0; vj
< oct_dev
->num_oqs
; vj
++) {
1820 j
= lio
->linfo
.rxpciq
[vj
].s
.q_no
;
1822 /* packets send to TCP/IP network stack */
1823 /* # of packets to network stack */
1824 data
[i
++] = CVM_CAST64(
1825 oct_dev
->droq
[j
]->stats
.rx_pkts_received
);
1826 /* # of bytes to network stack */
1827 data
[i
++] = CVM_CAST64(
1828 oct_dev
->droq
[j
]->stats
.rx_bytes_received
);
1829 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_nomem
+
1830 oct_dev
->droq
[j
]->stats
.dropped_toomany
+
1831 oct_dev
->droq
[j
]->stats
.rx_dropped
);
1832 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_nomem
);
1833 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_toomany
);
1834 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_dropped
);
1836 /* control and data path */
1837 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.pkts_received
);
1838 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.bytes_received
);
1840 CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_nodispatch
);
1842 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_vxlan
);
1844 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_alloc_failure
);
1848 static void lio_get_priv_flags_strings(struct lio
*lio
, u8
*data
)
1850 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1853 switch (oct_dev
->chip_id
) {
1854 case OCTEON_CN23XX_PF_VID
:
1855 case OCTEON_CN23XX_VF_VID
:
1856 for (i
= 0; i
< ARRAY_SIZE(oct_priv_flags_strings
); i
++) {
1857 sprintf(data
, "%s", oct_priv_flags_strings
[i
]);
1858 data
+= ETH_GSTRING_LEN
;
1865 netif_info(lio
, drv
, lio
->netdev
, "Unknown Chip !!\n");
1870 static void lio_get_strings(struct net_device
*netdev
, u32 stringset
, u8
*data
)
1872 struct lio
*lio
= GET_LIO(netdev
);
1873 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1874 int num_iq_stats
, num_oq_stats
, i
, j
;
1877 switch (stringset
) {
1879 num_stats
= ARRAY_SIZE(oct_stats_strings
);
1880 for (j
= 0; j
< num_stats
; j
++) {
1881 sprintf(data
, "%s", oct_stats_strings
[j
]);
1882 data
+= ETH_GSTRING_LEN
;
1885 num_iq_stats
= ARRAY_SIZE(oct_iq_stats_strings
);
1886 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct_dev
); i
++) {
1887 if (!(oct_dev
->io_qmask
.iq
& BIT_ULL(i
)))
1889 for (j
= 0; j
< num_iq_stats
; j
++) {
1890 sprintf(data
, "tx-%d-%s", i
,
1891 oct_iq_stats_strings
[j
]);
1892 data
+= ETH_GSTRING_LEN
;
1896 num_oq_stats
= ARRAY_SIZE(oct_droq_stats_strings
);
1897 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct_dev
); i
++) {
1898 if (!(oct_dev
->io_qmask
.oq
& BIT_ULL(i
)))
1900 for (j
= 0; j
< num_oq_stats
; j
++) {
1901 sprintf(data
, "rx-%d-%s", i
,
1902 oct_droq_stats_strings
[j
]);
1903 data
+= ETH_GSTRING_LEN
;
1908 case ETH_SS_PRIV_FLAGS
:
1909 lio_get_priv_flags_strings(lio
, data
);
1912 netif_info(lio
, drv
, lio
->netdev
, "Unknown Stringset !!\n");
1917 static void lio_vf_get_strings(struct net_device
*netdev
, u32 stringset
,
1920 int num_iq_stats
, num_oq_stats
, i
, j
;
1921 struct lio
*lio
= GET_LIO(netdev
);
1922 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1925 switch (stringset
) {
1927 num_stats
= ARRAY_SIZE(oct_vf_stats_strings
);
1928 for (j
= 0; j
< num_stats
; j
++) {
1929 sprintf(data
, "%s", oct_vf_stats_strings
[j
]);
1930 data
+= ETH_GSTRING_LEN
;
1933 num_iq_stats
= ARRAY_SIZE(oct_iq_stats_strings
);
1934 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct_dev
); i
++) {
1935 if (!(oct_dev
->io_qmask
.iq
& BIT_ULL(i
)))
1937 for (j
= 0; j
< num_iq_stats
; j
++) {
1938 sprintf(data
, "tx-%d-%s", i
,
1939 oct_iq_stats_strings
[j
]);
1940 data
+= ETH_GSTRING_LEN
;
1944 num_oq_stats
= ARRAY_SIZE(oct_droq_stats_strings
);
1945 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct_dev
); i
++) {
1946 if (!(oct_dev
->io_qmask
.oq
& BIT_ULL(i
)))
1948 for (j
= 0; j
< num_oq_stats
; j
++) {
1949 sprintf(data
, "rx-%d-%s", i
,
1950 oct_droq_stats_strings
[j
]);
1951 data
+= ETH_GSTRING_LEN
;
1956 case ETH_SS_PRIV_FLAGS
:
1957 lio_get_priv_flags_strings(lio
, data
);
1960 netif_info(lio
, drv
, lio
->netdev
, "Unknown Stringset !!\n");
1965 static int lio_get_priv_flags_ss_count(struct lio
*lio
)
1967 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1969 switch (oct_dev
->chip_id
) {
1970 case OCTEON_CN23XX_PF_VID
:
1971 case OCTEON_CN23XX_VF_VID
:
1972 return ARRAY_SIZE(oct_priv_flags_strings
);
1977 netif_info(lio
, drv
, lio
->netdev
, "Unknown Chip !!\n");
1982 static int lio_get_sset_count(struct net_device
*netdev
, int sset
)
1984 struct lio
*lio
= GET_LIO(netdev
);
1985 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1989 return (ARRAY_SIZE(oct_stats_strings
) +
1990 ARRAY_SIZE(oct_iq_stats_strings
) * oct_dev
->num_iqs
+
1991 ARRAY_SIZE(oct_droq_stats_strings
) * oct_dev
->num_oqs
);
1992 case ETH_SS_PRIV_FLAGS
:
1993 return lio_get_priv_flags_ss_count(lio
);
1999 static int lio_vf_get_sset_count(struct net_device
*netdev
, int sset
)
2001 struct lio
*lio
= GET_LIO(netdev
);
2002 struct octeon_device
*oct_dev
= lio
->oct_dev
;
2006 return (ARRAY_SIZE(oct_vf_stats_strings
) +
2007 ARRAY_SIZE(oct_iq_stats_strings
) * oct_dev
->num_iqs
+
2008 ARRAY_SIZE(oct_droq_stats_strings
) * oct_dev
->num_oqs
);
2009 case ETH_SS_PRIV_FLAGS
:
2010 return lio_get_priv_flags_ss_count(lio
);
2016 /* Callback function for intrmod */
2017 static void octnet_intrmod_callback(struct octeon_device
*oct_dev
,
2021 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)ptr
;
2022 struct oct_intrmod_context
*ctx
;
2024 ctx
= (struct oct_intrmod_context
*)sc
->ctxptr
;
2026 ctx
->status
= status
;
2028 WRITE_ONCE(ctx
->cond
, 1);
2030 /* This barrier is required to be sure that the response has been
2031 * written fully before waking up the handler
2035 wake_up_interruptible(&ctx
->wc
);
2038 /* get interrupt moderation parameters */
2039 static int octnet_get_intrmod_cfg(struct lio
*lio
,
2040 struct oct_intrmod_cfg
*intr_cfg
)
2042 struct octeon_soft_command
*sc
;
2043 struct oct_intrmod_context
*ctx
;
2044 struct oct_intrmod_resp
*resp
;
2046 struct octeon_device
*oct_dev
= lio
->oct_dev
;
2048 /* Alloc soft command */
2049 sc
= (struct octeon_soft_command
*)
2050 octeon_alloc_soft_command(oct_dev
,
2052 sizeof(struct oct_intrmod_resp
),
2053 sizeof(struct oct_intrmod_context
));
2058 resp
= (struct oct_intrmod_resp
*)sc
->virtrptr
;
2059 memset(resp
, 0, sizeof(struct oct_intrmod_resp
));
2061 ctx
= (struct oct_intrmod_context
*)sc
->ctxptr
;
2062 memset(ctx
, 0, sizeof(struct oct_intrmod_context
));
2063 WRITE_ONCE(ctx
->cond
, 0);
2064 ctx
->octeon_id
= lio_get_device_id(oct_dev
);
2065 init_waitqueue_head(&ctx
->wc
);
2067 sc
->iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2069 octeon_prepare_soft_command(oct_dev
, sc
, OPCODE_NIC
,
2070 OPCODE_NIC_INTRMOD_PARAMS
, 0, 0, 0);
2072 sc
->callback
= octnet_intrmod_callback
;
2073 sc
->callback_arg
= sc
;
2074 sc
->wait_time
= 1000;
2076 retval
= octeon_send_soft_command(oct_dev
, sc
);
2077 if (retval
== IQ_SEND_FAILED
) {
2078 octeon_free_soft_command(oct_dev
, sc
);
2082 /* Sleep on a wait queue till the cond flag indicates that the
2083 * response arrived or timed-out.
2085 if (sleep_cond(&ctx
->wc
, &ctx
->cond
) == -EINTR
) {
2086 dev_err(&oct_dev
->pci_dev
->dev
, "Wait interrupted\n");
2087 goto intrmod_info_wait_intr
;
2090 retval
= ctx
->status
|| resp
->status
;
2092 dev_err(&oct_dev
->pci_dev
->dev
,
2093 "Get interrupt moderation parameters failed\n");
2094 goto intrmod_info_wait_fail
;
2097 octeon_swap_8B_data((u64
*)&resp
->intrmod
,
2098 (sizeof(struct oct_intrmod_cfg
)) / 8);
2099 memcpy(intr_cfg
, &resp
->intrmod
, sizeof(struct oct_intrmod_cfg
));
2100 octeon_free_soft_command(oct_dev
, sc
);
2104 intrmod_info_wait_fail
:
2106 octeon_free_soft_command(oct_dev
, sc
);
2108 intrmod_info_wait_intr
:
2113 /* Configure interrupt moderation parameters */
2114 static int octnet_set_intrmod_cfg(struct lio
*lio
,
2115 struct oct_intrmod_cfg
*intr_cfg
)
2117 struct octeon_soft_command
*sc
;
2118 struct oct_intrmod_context
*ctx
;
2119 struct oct_intrmod_cfg
*cfg
;
2121 struct octeon_device
*oct_dev
= lio
->oct_dev
;
2123 /* Alloc soft command */
2124 sc
= (struct octeon_soft_command
*)
2125 octeon_alloc_soft_command(oct_dev
,
2126 sizeof(struct oct_intrmod_cfg
),
2128 sizeof(struct oct_intrmod_context
));
2133 ctx
= (struct oct_intrmod_context
*)sc
->ctxptr
;
2135 WRITE_ONCE(ctx
->cond
, 0);
2136 ctx
->octeon_id
= lio_get_device_id(oct_dev
);
2137 init_waitqueue_head(&ctx
->wc
);
2139 cfg
= (struct oct_intrmod_cfg
*)sc
->virtdptr
;
2141 memcpy(cfg
, intr_cfg
, sizeof(struct oct_intrmod_cfg
));
2142 octeon_swap_8B_data((u64
*)cfg
, (sizeof(struct oct_intrmod_cfg
)) / 8);
2144 sc
->iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2146 octeon_prepare_soft_command(oct_dev
, sc
, OPCODE_NIC
,
2147 OPCODE_NIC_INTRMOD_CFG
, 0, 0, 0);
2149 sc
->callback
= octnet_intrmod_callback
;
2150 sc
->callback_arg
= sc
;
2151 sc
->wait_time
= 1000;
2153 retval
= octeon_send_soft_command(oct_dev
, sc
);
2154 if (retval
== IQ_SEND_FAILED
) {
2155 octeon_free_soft_command(oct_dev
, sc
);
2159 /* Sleep on a wait queue till the cond flag indicates that the
2160 * response arrived or timed-out.
2162 if (sleep_cond(&ctx
->wc
, &ctx
->cond
) != -EINTR
) {
2163 retval
= ctx
->status
;
2165 dev_err(&oct_dev
->pci_dev
->dev
,
2166 "intrmod config failed. Status: %llx\n",
2167 CVM_CAST64(retval
));
2169 dev_info(&oct_dev
->pci_dev
->dev
,
2170 "Rx-Adaptive Interrupt moderation %s\n",
2171 (intr_cfg
->rx_enable
) ?
2172 "enabled" : "disabled");
2174 octeon_free_soft_command(oct_dev
, sc
);
2176 return ((retval
) ? -ENODEV
: 0);
2179 dev_err(&oct_dev
->pci_dev
->dev
, "iq/oq config failed\n");
2184 static int lio_get_intr_coalesce(struct net_device
*netdev
,
2185 struct ethtool_coalesce
*intr_coal
)
2187 struct lio
*lio
= GET_LIO(netdev
);
2188 struct octeon_device
*oct
= lio
->oct_dev
;
2189 struct octeon_instr_queue
*iq
;
2190 struct oct_intrmod_cfg intrmod_cfg
;
2192 if (octnet_get_intrmod_cfg(lio
, &intrmod_cfg
))
2195 switch (oct
->chip_id
) {
2196 case OCTEON_CN23XX_PF_VID
:
2197 case OCTEON_CN23XX_VF_VID
: {
2198 if (!intrmod_cfg
.rx_enable
) {
2199 intr_coal
->rx_coalesce_usecs
= oct
->rx_coalesce_usecs
;
2200 intr_coal
->rx_max_coalesced_frames
=
2201 oct
->rx_max_coalesced_frames
;
2203 if (!intrmod_cfg
.tx_enable
)
2204 intr_coal
->tx_max_coalesced_frames
=
2205 oct
->tx_max_coalesced_frames
;
2209 case OCTEON_CN66XX
: {
2210 struct octeon_cn6xxx
*cn6xxx
=
2211 (struct octeon_cn6xxx
*)oct
->chip
;
2213 if (!intrmod_cfg
.rx_enable
) {
2214 intr_coal
->rx_coalesce_usecs
=
2215 CFG_GET_OQ_INTR_TIME(cn6xxx
->conf
);
2216 intr_coal
->rx_max_coalesced_frames
=
2217 CFG_GET_OQ_INTR_PKT(cn6xxx
->conf
);
2219 iq
= oct
->instr_queue
[lio
->linfo
.txpciq
[0].s
.q_no
];
2220 intr_coal
->tx_max_coalesced_frames
= iq
->fill_threshold
;
2224 netif_info(lio
, drv
, lio
->netdev
, "Unknown Chip !!\n");
2227 if (intrmod_cfg
.rx_enable
) {
2228 intr_coal
->use_adaptive_rx_coalesce
=
2229 intrmod_cfg
.rx_enable
;
2230 intr_coal
->rate_sample_interval
=
2231 intrmod_cfg
.check_intrvl
;
2232 intr_coal
->pkt_rate_high
=
2233 intrmod_cfg
.maxpkt_ratethr
;
2234 intr_coal
->pkt_rate_low
=
2235 intrmod_cfg
.minpkt_ratethr
;
2236 intr_coal
->rx_max_coalesced_frames_high
=
2237 intrmod_cfg
.rx_maxcnt_trigger
;
2238 intr_coal
->rx_coalesce_usecs_high
=
2239 intrmod_cfg
.rx_maxtmr_trigger
;
2240 intr_coal
->rx_coalesce_usecs_low
=
2241 intrmod_cfg
.rx_mintmr_trigger
;
2242 intr_coal
->rx_max_coalesced_frames_low
=
2243 intrmod_cfg
.rx_mincnt_trigger
;
2245 if ((OCTEON_CN23XX_PF(oct
) || OCTEON_CN23XX_VF(oct
)) &&
2246 (intrmod_cfg
.tx_enable
)) {
2247 intr_coal
->use_adaptive_tx_coalesce
=
2248 intrmod_cfg
.tx_enable
;
2249 intr_coal
->tx_max_coalesced_frames_high
=
2250 intrmod_cfg
.tx_maxcnt_trigger
;
2251 intr_coal
->tx_max_coalesced_frames_low
=
2252 intrmod_cfg
.tx_mincnt_trigger
;
2257 /* Enable/Disable auto interrupt Moderation */
2258 static int oct_cfg_adaptive_intr(struct lio
*lio
,
2259 struct oct_intrmod_cfg
*intrmod_cfg
,
2260 struct ethtool_coalesce
*intr_coal
)
2264 if (intrmod_cfg
->rx_enable
|| intrmod_cfg
->tx_enable
) {
2265 intrmod_cfg
->check_intrvl
= intr_coal
->rate_sample_interval
;
2266 intrmod_cfg
->maxpkt_ratethr
= intr_coal
->pkt_rate_high
;
2267 intrmod_cfg
->minpkt_ratethr
= intr_coal
->pkt_rate_low
;
2269 if (intrmod_cfg
->rx_enable
) {
2270 intrmod_cfg
->rx_maxcnt_trigger
=
2271 intr_coal
->rx_max_coalesced_frames_high
;
2272 intrmod_cfg
->rx_maxtmr_trigger
=
2273 intr_coal
->rx_coalesce_usecs_high
;
2274 intrmod_cfg
->rx_mintmr_trigger
=
2275 intr_coal
->rx_coalesce_usecs_low
;
2276 intrmod_cfg
->rx_mincnt_trigger
=
2277 intr_coal
->rx_max_coalesced_frames_low
;
2279 if (intrmod_cfg
->tx_enable
) {
2280 intrmod_cfg
->tx_maxcnt_trigger
=
2281 intr_coal
->tx_max_coalesced_frames_high
;
2282 intrmod_cfg
->tx_mincnt_trigger
=
2283 intr_coal
->tx_max_coalesced_frames_low
;
2286 ret
= octnet_set_intrmod_cfg(lio
, intrmod_cfg
);
2292 oct_cfg_rx_intrcnt(struct lio
*lio
,
2293 struct oct_intrmod_cfg
*intrmod
,
2294 struct ethtool_coalesce
*intr_coal
)
2296 struct octeon_device
*oct
= lio
->oct_dev
;
2297 u32 rx_max_coalesced_frames
;
2299 /* Config Cnt based interrupt values */
2300 switch (oct
->chip_id
) {
2302 case OCTEON_CN66XX
: {
2303 struct octeon_cn6xxx
*cn6xxx
=
2304 (struct octeon_cn6xxx
*)oct
->chip
;
2306 if (!intr_coal
->rx_max_coalesced_frames
)
2307 rx_max_coalesced_frames
= CN6XXX_OQ_INTR_PKT
;
2309 rx_max_coalesced_frames
=
2310 intr_coal
->rx_max_coalesced_frames
;
2311 octeon_write_csr(oct
, CN6XXX_SLI_OQ_INT_LEVEL_PKTS
,
2312 rx_max_coalesced_frames
);
2313 CFG_SET_OQ_INTR_PKT(cn6xxx
->conf
, rx_max_coalesced_frames
);
2316 case OCTEON_CN23XX_PF_VID
: {
2319 if (!intr_coal
->rx_max_coalesced_frames
)
2320 rx_max_coalesced_frames
= intrmod
->rx_frames
;
2322 rx_max_coalesced_frames
=
2323 intr_coal
->rx_max_coalesced_frames
;
2324 for (q_no
= 0; q_no
< oct
->num_oqs
; q_no
++) {
2325 q_no
+= oct
->sriov_info
.pf_srn
;
2327 oct
, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no
),
2329 oct
, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no
)) &
2330 (0x3fffff00000000UL
)) |
2331 (rx_max_coalesced_frames
- 1));
2332 /*consider setting resend bit*/
2334 intrmod
->rx_frames
= rx_max_coalesced_frames
;
2335 oct
->rx_max_coalesced_frames
= rx_max_coalesced_frames
;
2338 case OCTEON_CN23XX_VF_VID
: {
2341 if (!intr_coal
->rx_max_coalesced_frames
)
2342 rx_max_coalesced_frames
= intrmod
->rx_frames
;
2344 rx_max_coalesced_frames
=
2345 intr_coal
->rx_max_coalesced_frames
;
2346 for (q_no
= 0; q_no
< oct
->num_oqs
; q_no
++) {
2348 oct
, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no
),
2350 oct
, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no
)) &
2351 (0x3fffff00000000UL
)) |
2352 (rx_max_coalesced_frames
- 1));
2353 /*consider writing to resend bit here*/
2355 intrmod
->rx_frames
= rx_max_coalesced_frames
;
2356 oct
->rx_max_coalesced_frames
= rx_max_coalesced_frames
;
2365 static int oct_cfg_rx_intrtime(struct lio
*lio
,
2366 struct oct_intrmod_cfg
*intrmod
,
2367 struct ethtool_coalesce
*intr_coal
)
2369 struct octeon_device
*oct
= lio
->oct_dev
;
2370 u32 time_threshold
, rx_coalesce_usecs
;
2372 /* Config Time based interrupt values */
2373 switch (oct
->chip_id
) {
2375 case OCTEON_CN66XX
: {
2376 struct octeon_cn6xxx
*cn6xxx
=
2377 (struct octeon_cn6xxx
*)oct
->chip
;
2378 if (!intr_coal
->rx_coalesce_usecs
)
2379 rx_coalesce_usecs
= CN6XXX_OQ_INTR_TIME
;
2381 rx_coalesce_usecs
= intr_coal
->rx_coalesce_usecs
;
2383 time_threshold
= lio_cn6xxx_get_oq_ticks(oct
,
2385 octeon_write_csr(oct
,
2386 CN6XXX_SLI_OQ_INT_LEVEL_TIME
,
2389 CFG_SET_OQ_INTR_TIME(cn6xxx
->conf
, rx_coalesce_usecs
);
2392 case OCTEON_CN23XX_PF_VID
: {
2396 if (!intr_coal
->rx_coalesce_usecs
)
2397 rx_coalesce_usecs
= intrmod
->rx_usecs
;
2399 rx_coalesce_usecs
= intr_coal
->rx_coalesce_usecs
;
2401 cn23xx_pf_get_oq_ticks(oct
, (u32
)rx_coalesce_usecs
);
2402 for (q_no
= 0; q_no
< oct
->num_oqs
; q_no
++) {
2403 q_no
+= oct
->sriov_info
.pf_srn
;
2404 octeon_write_csr64(oct
,
2405 CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no
),
2406 (intrmod
->rx_frames
|
2407 ((u64
)time_threshold
<< 32)));
2408 /*consider writing to resend bit here*/
2410 intrmod
->rx_usecs
= rx_coalesce_usecs
;
2411 oct
->rx_coalesce_usecs
= rx_coalesce_usecs
;
2414 case OCTEON_CN23XX_VF_VID
: {
2418 if (!intr_coal
->rx_coalesce_usecs
)
2419 rx_coalesce_usecs
= intrmod
->rx_usecs
;
2421 rx_coalesce_usecs
= intr_coal
->rx_coalesce_usecs
;
2424 cn23xx_vf_get_oq_ticks(oct
, (u32
)rx_coalesce_usecs
);
2425 for (q_no
= 0; q_no
< oct
->num_oqs
; q_no
++) {
2427 oct
, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no
),
2428 (intrmod
->rx_frames
|
2429 ((u64
)time_threshold
<< 32)));
2430 /*consider setting resend bit*/
2432 intrmod
->rx_usecs
= rx_coalesce_usecs
;
2433 oct
->rx_coalesce_usecs
= rx_coalesce_usecs
;
2444 oct_cfg_tx_intrcnt(struct lio
*lio
,
2445 struct oct_intrmod_cfg
*intrmod
,
2446 struct ethtool_coalesce
*intr_coal
)
2448 struct octeon_device
*oct
= lio
->oct_dev
;
2450 void __iomem
*inst_cnt_reg
;
2453 /* Config Cnt based interrupt values */
2454 switch (oct
->chip_id
) {
2458 case OCTEON_CN23XX_VF_VID
:
2459 case OCTEON_CN23XX_PF_VID
: {
2462 if (!intr_coal
->tx_max_coalesced_frames
)
2463 iq_intr_pkt
= CN23XX_DEF_IQ_INTR_THRESHOLD
&
2464 CN23XX_PKT_IN_DONE_WMARK_MASK
;
2466 iq_intr_pkt
= intr_coal
->tx_max_coalesced_frames
&
2467 CN23XX_PKT_IN_DONE_WMARK_MASK
;
2468 for (q_no
= 0; q_no
< oct
->num_iqs
; q_no
++) {
2469 inst_cnt_reg
= (oct
->instr_queue
[q_no
])->inst_cnt_reg
;
2470 val
= readq(inst_cnt_reg
);
2471 /*clear wmark and count.dont want to write count back*/
2472 val
= (val
& 0xFFFF000000000000ULL
) |
2473 ((u64
)(iq_intr_pkt
- 1)
2474 << CN23XX_PKT_IN_DONE_WMARK_BIT_POS
);
2475 writeq(val
, inst_cnt_reg
);
2476 /*consider setting resend bit*/
2478 intrmod
->tx_frames
= iq_intr_pkt
;
2479 oct
->tx_max_coalesced_frames
= iq_intr_pkt
;
2488 static int lio_set_intr_coalesce(struct net_device
*netdev
,
2489 struct ethtool_coalesce
*intr_coal
)
2491 struct lio
*lio
= GET_LIO(netdev
);
2493 struct octeon_device
*oct
= lio
->oct_dev
;
2494 struct oct_intrmod_cfg intrmod
= {0};
2498 switch (oct
->chip_id
) {
2501 db_min
= CN6XXX_DB_MIN
;
2502 db_max
= CN6XXX_DB_MAX
;
2503 if ((intr_coal
->tx_max_coalesced_frames
>= db_min
) &&
2504 (intr_coal
->tx_max_coalesced_frames
<= db_max
)) {
2505 for (j
= 0; j
< lio
->linfo
.num_txpciq
; j
++) {
2506 q_no
= lio
->linfo
.txpciq
[j
].s
.q_no
;
2507 oct
->instr_queue
[q_no
]->fill_threshold
=
2508 intr_coal
->tx_max_coalesced_frames
;
2511 dev_err(&oct
->pci_dev
->dev
,
2512 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
2513 intr_coal
->tx_max_coalesced_frames
,
2518 case OCTEON_CN23XX_PF_VID
:
2519 case OCTEON_CN23XX_VF_VID
:
2525 intrmod
.rx_enable
= intr_coal
->use_adaptive_rx_coalesce
? 1 : 0;
2526 intrmod
.tx_enable
= intr_coal
->use_adaptive_tx_coalesce
? 1 : 0;
2527 intrmod
.rx_frames
= CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct
));
2528 intrmod
.rx_usecs
= CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct
));
2529 intrmod
.tx_frames
= CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct
));
2531 ret
= oct_cfg_adaptive_intr(lio
, &intrmod
, intr_coal
);
2533 if (!intr_coal
->use_adaptive_rx_coalesce
) {
2534 ret
= oct_cfg_rx_intrtime(lio
, &intrmod
, intr_coal
);
2538 ret
= oct_cfg_rx_intrcnt(lio
, &intrmod
, intr_coal
);
2542 oct
->rx_coalesce_usecs
=
2543 CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct
));
2544 oct
->rx_max_coalesced_frames
=
2545 CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct
));
2548 if (!intr_coal
->use_adaptive_tx_coalesce
) {
2549 ret
= oct_cfg_tx_intrcnt(lio
, &intrmod
, intr_coal
);
2553 oct
->tx_max_coalesced_frames
=
2554 CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct
));
2562 static int lio_get_ts_info(struct net_device
*netdev
,
2563 struct ethtool_ts_info
*info
)
2565 struct lio
*lio
= GET_LIO(netdev
);
2567 info
->so_timestamping
=
2568 #ifdef PTP_HARDWARE_TIMESTAMPING
2569 SOF_TIMESTAMPING_TX_HARDWARE
|
2570 SOF_TIMESTAMPING_RX_HARDWARE
|
2571 SOF_TIMESTAMPING_RAW_HARDWARE
|
2572 SOF_TIMESTAMPING_TX_SOFTWARE
|
2574 SOF_TIMESTAMPING_RX_SOFTWARE
|
2575 SOF_TIMESTAMPING_SOFTWARE
;
2578 info
->phc_index
= ptp_clock_index(lio
->ptp_clock
);
2580 info
->phc_index
= -1;
2582 #ifdef PTP_HARDWARE_TIMESTAMPING
2583 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
2585 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
2586 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
2587 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
2588 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
2594 /* Return register dump len. */
2595 static int lio_get_regs_len(struct net_device
*dev
)
2597 struct lio
*lio
= GET_LIO(dev
);
2598 struct octeon_device
*oct
= lio
->oct_dev
;
2600 switch (oct
->chip_id
) {
2601 case OCTEON_CN23XX_PF_VID
:
2602 return OCT_ETHTOOL_REGDUMP_LEN_23XX
;
2603 case OCTEON_CN23XX_VF_VID
:
2604 return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF
;
2606 return OCT_ETHTOOL_REGDUMP_LEN
;
2610 static int cn23xx_read_csr_reg(char *s
, struct octeon_device
*oct
)
2613 u8 pf_num
= oct
->pf_num
;
2617 /* PCI Window Registers */
2619 len
+= sprintf(s
+ len
, "\n\t Octeon CSR Registers\n\n");
2621 /*0x29030 or 0x29040*/
2622 reg
= CN23XX_SLI_PKT_MAC_RINFO64(oct
->pcie_port
, oct
->pf_num
);
2623 len
+= sprintf(s
+ len
,
2624 "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
2625 reg
, oct
->pcie_port
, oct
->pf_num
,
2626 (u64
)octeon_read_csr64(oct
, reg
));
2628 /*0x27080 or 0x27090*/
2629 reg
= CN23XX_SLI_MAC_PF_INT_ENB64(oct
->pcie_port
, oct
->pf_num
);
2631 sprintf(s
+ len
, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
2632 reg
, oct
->pcie_port
, oct
->pf_num
,
2633 (u64
)octeon_read_csr64(oct
, reg
));
2635 /*0x27000 or 0x27010*/
2636 reg
= CN23XX_SLI_MAC_PF_INT_SUM64(oct
->pcie_port
, oct
->pf_num
);
2638 sprintf(s
+ len
, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
2639 reg
, oct
->pcie_port
, oct
->pf_num
,
2640 (u64
)octeon_read_csr64(oct
, reg
));
2644 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg
,
2645 (u64
)octeon_read_csr64(oct
, reg
));
2648 reg
= 0x27300 + oct
->pcie_port
* CN23XX_MAC_INT_OFFSET
+
2649 (oct
->pf_num
) * CN23XX_PF_INT_OFFSET
;
2651 s
+ len
, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg
,
2652 oct
->pcie_port
, oct
->pf_num
, (u64
)octeon_read_csr64(oct
, reg
));
2655 reg
= 0x27200 + oct
->pcie_port
* CN23XX_MAC_INT_OFFSET
+
2656 (oct
->pf_num
) * CN23XX_PF_INT_OFFSET
;
2657 len
+= sprintf(s
+ len
,
2658 "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
2659 reg
, oct
->pcie_port
, oct
->pf_num
,
2660 (u64
)octeon_read_csr64(oct
, reg
));
2663 reg
= CN23XX_SLI_PKT_CNT_INT
;
2664 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg
,
2665 (u64
)octeon_read_csr64(oct
, reg
));
2668 reg
= CN23XX_SLI_PKT_TIME_INT
;
2669 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg
,
2670 (u64
)octeon_read_csr64(oct
, reg
));
2674 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg
,
2675 (u64
)octeon_read_csr64(oct
, reg
));
2678 reg
= CN23XX_SLI_OQ_WMARK
;
2679 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
2680 reg
, (u64
)octeon_read_csr64(oct
, reg
));
2683 reg
= CN23XX_SLI_PKT_IOQ_RING_RST
;
2684 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg
,
2685 (u64
)octeon_read_csr64(oct
, reg
));
2688 reg
= CN23XX_SLI_GBL_CONTROL
;
2689 len
+= sprintf(s
+ len
,
2690 "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg
,
2691 (u64
)octeon_read_csr64(oct
, reg
));
2695 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
2696 reg
, (u64
)octeon_read_csr64(oct
, reg
));
2701 reg
= CN23XX_SLI_OUT_BP_EN_W1S
;
2702 len
+= sprintf(s
+ len
,
2703 "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n",
2704 reg
, (u64
)octeon_read_csr64(oct
, reg
));
2705 } else if (pf_num
== 1) {
2707 reg
= CN23XX_SLI_OUT_BP_EN2_W1S
;
2708 len
+= sprintf(s
+ len
,
2709 "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
2710 reg
, (u64
)octeon_read_csr64(oct
, reg
));
2713 for (i
= 0; i
< CN23XX_MAX_OUTPUT_QUEUES
; i
++) {
2714 reg
= CN23XX_SLI_OQ_BUFF_INFO_SIZE(i
);
2716 sprintf(s
+ len
, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2717 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2721 for (i
= 0; i
< CN23XX_MAX_INPUT_QUEUES
; i
++) {
2722 reg
= CN23XX_SLI_IQ_INSTR_COUNT64(i
);
2723 len
+= sprintf(s
+ len
,
2724 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2725 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2729 for (i
= 0; i
< CN23XX_MAX_OUTPUT_QUEUES
; i
++) {
2730 reg
= CN23XX_SLI_OQ_PKTS_CREDIT(i
);
2731 len
+= sprintf(s
+ len
,
2732 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2733 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2737 for (i
= 0; i
< CN23XX_MAX_OUTPUT_QUEUES
; i
++) {
2738 reg
= CN23XX_SLI_OQ_SIZE(i
);
2740 s
+ len
, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2741 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2745 for (i
= 0; i
< CN23XX_MAX_OUTPUT_QUEUES
; i
++) {
2746 reg
= CN23XX_SLI_OQ_PKT_CONTROL(i
);
2749 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2750 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2754 for (i
= 0; i
< CN23XX_MAX_OUTPUT_QUEUES
; i
++) {
2755 reg
= CN23XX_SLI_OQ_BASE_ADDR64(i
);
2756 len
+= sprintf(s
+ len
,
2757 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2758 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2762 for (i
= 0; i
< CN23XX_MAX_OUTPUT_QUEUES
; i
++) {
2763 reg
= CN23XX_SLI_OQ_PKT_INT_LEVELS(i
);
2764 len
+= sprintf(s
+ len
,
2765 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2766 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2770 for (i
= 0; i
< CN23XX_MAX_OUTPUT_QUEUES
; i
++) {
2771 reg
= CN23XX_SLI_OQ_PKTS_SENT(i
);
2772 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2773 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2777 for (i
= 0; i
< CN23XX_MAX_OUTPUT_QUEUES
; i
++) {
2778 reg
= 0x100c0 + i
* CN23XX_OQ_OFFSET
;
2779 len
+= sprintf(s
+ len
,
2780 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2781 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2784 for (i
= 0; i
< CN23XX_MAX_INPUT_QUEUES
; i
++) {
2785 reg
= CN23XX_SLI_IQ_PKT_CONTROL64(i
);
2788 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2789 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2793 for (i
= 0; i
< CN23XX_MAX_INPUT_QUEUES
; i
++) {
2794 reg
= CN23XX_SLI_IQ_BASE_ADDR64(i
);
2797 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg
,
2798 i
, (u64
)octeon_read_csr64(oct
, reg
));
2802 for (i
= 0; i
< CN23XX_MAX_INPUT_QUEUES
; i
++) {
2803 reg
= CN23XX_SLI_IQ_DOORBELL(i
);
2806 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2807 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2811 for (i
= 0; i
< CN23XX_MAX_INPUT_QUEUES
; i
++) {
2812 reg
= CN23XX_SLI_IQ_SIZE(i
);
2815 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2816 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2820 for (i
= 0; i
< CN23XX_MAX_INPUT_QUEUES
; i
++)
2821 reg
= CN23XX_SLI_IQ_INSTR_COUNT64(i
);
2822 len
+= sprintf(s
+ len
,
2823 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2824 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2830 static int cn23xx_vf_read_csr_reg(char *s
, struct octeon_device
*oct
)
2836 /* PCI Window Registers */
2838 len
+= sprintf(s
+ len
, "\n\t Octeon CSR Registers\n\n");
2840 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2841 reg
= CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i
);
2842 len
+= sprintf(s
+ len
,
2843 "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2844 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2847 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2848 reg
= CN23XX_VF_SLI_IQ_INSTR_COUNT64(i
);
2849 len
+= sprintf(s
+ len
,
2850 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2851 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2854 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2855 reg
= CN23XX_VF_SLI_OQ_PKTS_CREDIT(i
);
2856 len
+= sprintf(s
+ len
,
2857 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2858 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2861 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2862 reg
= CN23XX_VF_SLI_OQ_SIZE(i
);
2863 len
+= sprintf(s
+ len
,
2864 "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2865 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2868 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2869 reg
= CN23XX_VF_SLI_OQ_PKT_CONTROL(i
);
2870 len
+= sprintf(s
+ len
,
2871 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2872 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2875 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2876 reg
= CN23XX_VF_SLI_OQ_BASE_ADDR64(i
);
2877 len
+= sprintf(s
+ len
,
2878 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2879 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2882 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2883 reg
= CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i
);
2884 len
+= sprintf(s
+ len
,
2885 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2886 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2889 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2890 reg
= CN23XX_VF_SLI_OQ_PKTS_SENT(i
);
2891 len
+= sprintf(s
+ len
, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2892 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2895 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2896 reg
= 0x100c0 + i
* CN23XX_VF_OQ_OFFSET
;
2897 len
+= sprintf(s
+ len
,
2898 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2899 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2902 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2903 reg
= 0x100d0 + i
* CN23XX_VF_IQ_OFFSET
;
2904 len
+= sprintf(s
+ len
,
2905 "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
2906 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2909 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2910 reg
= CN23XX_VF_SLI_IQ_PKT_CONTROL64(i
);
2911 len
+= sprintf(s
+ len
,
2912 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2913 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2916 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2917 reg
= CN23XX_VF_SLI_IQ_BASE_ADDR64(i
);
2918 len
+= sprintf(s
+ len
,
2919 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
2920 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2923 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2924 reg
= CN23XX_VF_SLI_IQ_DOORBELL(i
);
2925 len
+= sprintf(s
+ len
,
2926 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2927 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2930 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2931 reg
= CN23XX_VF_SLI_IQ_SIZE(i
);
2932 len
+= sprintf(s
+ len
,
2933 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2934 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2937 for (i
= 0; i
< (oct
->sriov_info
.rings_per_vf
); i
++) {
2938 reg
= CN23XX_VF_SLI_IQ_INSTR_COUNT64(i
);
2939 len
+= sprintf(s
+ len
,
2940 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2941 reg
, i
, (u64
)octeon_read_csr64(oct
, reg
));
2947 static int cn6xxx_read_csr_reg(char *s
, struct octeon_device
*oct
)
2952 /* PCI Window Registers */
2954 len
+= sprintf(s
+ len
, "\n\t Octeon CSR Registers\n\n");
2955 reg
= CN6XXX_WIN_WR_ADDR_LO
;
2956 len
+= sprintf(s
+ len
, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
2957 CN6XXX_WIN_WR_ADDR_LO
, octeon_read_csr(oct
, reg
));
2958 reg
= CN6XXX_WIN_WR_ADDR_HI
;
2959 len
+= sprintf(s
+ len
, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
2960 CN6XXX_WIN_WR_ADDR_HI
, octeon_read_csr(oct
, reg
));
2961 reg
= CN6XXX_WIN_RD_ADDR_LO
;
2962 len
+= sprintf(s
+ len
, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
2963 CN6XXX_WIN_RD_ADDR_LO
, octeon_read_csr(oct
, reg
));
2964 reg
= CN6XXX_WIN_RD_ADDR_HI
;
2965 len
+= sprintf(s
+ len
, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
2966 CN6XXX_WIN_RD_ADDR_HI
, octeon_read_csr(oct
, reg
));
2967 reg
= CN6XXX_WIN_WR_DATA_LO
;
2968 len
+= sprintf(s
+ len
, "[%02x] (WIN_WR_DATA_LO): %08x\n",
2969 CN6XXX_WIN_WR_DATA_LO
, octeon_read_csr(oct
, reg
));
2970 reg
= CN6XXX_WIN_WR_DATA_HI
;
2971 len
+= sprintf(s
+ len
, "[%02x] (WIN_WR_DATA_HI): %08x\n",
2972 CN6XXX_WIN_WR_DATA_HI
, octeon_read_csr(oct
, reg
));
2973 len
+= sprintf(s
+ len
, "[%02x] (WIN_WR_MASK_REG): %08x\n",
2974 CN6XXX_WIN_WR_MASK_REG
,
2975 octeon_read_csr(oct
, CN6XXX_WIN_WR_MASK_REG
));
2977 /* PCI Interrupt Register */
2978 len
+= sprintf(s
+ len
, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
2979 CN6XXX_SLI_INT_ENB64_PORT0
, octeon_read_csr(oct
,
2980 CN6XXX_SLI_INT_ENB64_PORT0
));
2981 len
+= sprintf(s
+ len
, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
2982 CN6XXX_SLI_INT_ENB64_PORT1
,
2983 octeon_read_csr(oct
, CN6XXX_SLI_INT_ENB64_PORT1
));
2984 len
+= sprintf(s
+ len
, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64
,
2985 octeon_read_csr(oct
, CN6XXX_SLI_INT_SUM64
));
2987 /* PCI Output queue registers */
2988 for (i
= 0; i
< oct
->num_oqs
; i
++) {
2989 reg
= CN6XXX_SLI_OQ_PKTS_SENT(i
);
2990 len
+= sprintf(s
+ len
, "\n[%x] (PKTS_SENT_%d): %08x\n",
2991 reg
, i
, octeon_read_csr(oct
, reg
));
2992 reg
= CN6XXX_SLI_OQ_PKTS_CREDIT(i
);
2993 len
+= sprintf(s
+ len
, "[%x] (PKT_CREDITS_%d): %08x\n",
2994 reg
, i
, octeon_read_csr(oct
, reg
));
2996 reg
= CN6XXX_SLI_OQ_INT_LEVEL_PKTS
;
2997 len
+= sprintf(s
+ len
, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
2998 reg
, octeon_read_csr(oct
, reg
));
2999 reg
= CN6XXX_SLI_OQ_INT_LEVEL_TIME
;
3000 len
+= sprintf(s
+ len
, "[%x] (PKTS_SENT_TIME): %08x\n",
3001 reg
, octeon_read_csr(oct
, reg
));
3003 /* PCI Input queue registers */
3004 for (i
= 0; i
<= 3; i
++) {
3007 reg
= CN6XXX_SLI_IQ_DOORBELL(i
);
3008 len
+= sprintf(s
+ len
, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
3009 reg
, i
, octeon_read_csr(oct
, reg
));
3010 reg
= CN6XXX_SLI_IQ_INSTR_COUNT(i
);
3011 len
+= sprintf(s
+ len
, "[%x] (INSTR_COUNT_%d): %08x\n",
3012 reg
, i
, octeon_read_csr(oct
, reg
));
3015 /* PCI DMA registers */
3017 len
+= sprintf(s
+ len
, "\n[%x] (DMA_CNT_0): %08x\n",
3019 octeon_read_csr(oct
, CN6XXX_DMA_CNT(0)));
3020 reg
= CN6XXX_DMA_PKT_INT_LEVEL(0);
3021 len
+= sprintf(s
+ len
, "[%x] (DMA_INT_LEV_0): %08x\n",
3022 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct
, reg
));
3023 reg
= CN6XXX_DMA_TIME_INT_LEVEL(0);
3024 len
+= sprintf(s
+ len
, "[%x] (DMA_TIME_0): %08x\n",
3025 CN6XXX_DMA_TIME_INT_LEVEL(0),
3026 octeon_read_csr(oct
, reg
));
3028 len
+= sprintf(s
+ len
, "\n[%x] (DMA_CNT_1): %08x\n",
3030 octeon_read_csr(oct
, CN6XXX_DMA_CNT(1)));
3031 reg
= CN6XXX_DMA_PKT_INT_LEVEL(1);
3032 len
+= sprintf(s
+ len
, "[%x] (DMA_INT_LEV_1): %08x\n",
3033 CN6XXX_DMA_PKT_INT_LEVEL(1),
3034 octeon_read_csr(oct
, reg
));
3035 reg
= CN6XXX_DMA_PKT_INT_LEVEL(1);
3036 len
+= sprintf(s
+ len
, "[%x] (DMA_TIME_1): %08x\n",
3037 CN6XXX_DMA_TIME_INT_LEVEL(1),
3038 octeon_read_csr(oct
, reg
));
3040 /* PCI Index registers */
3042 len
+= sprintf(s
+ len
, "\n");
3044 for (i
= 0; i
< 16; i
++) {
3045 reg
= lio_pci_readq(oct
, CN6XXX_BAR1_REG(i
, oct
->pcie_port
));
3046 len
+= sprintf(s
+ len
, "[%llx] (BAR1_INDEX_%02d): %08x\n",
3047 CN6XXX_BAR1_REG(i
, oct
->pcie_port
), i
, reg
);
3053 static int cn6xxx_read_config_reg(char *s
, struct octeon_device
*oct
)
3058 /* PCI CONFIG Registers */
3060 len
+= sprintf(s
+ len
,
3061 "\n\t Octeon Config space Registers\n\n");
3063 for (i
= 0; i
<= 13; i
++) {
3064 pci_read_config_dword(oct
->pci_dev
, (i
* 4), &val
);
3065 len
+= sprintf(s
+ len
, "[0x%x] (Config[%d]): 0x%08x\n",
3069 for (i
= 30; i
<= 34; i
++) {
3070 pci_read_config_dword(oct
->pci_dev
, (i
* 4), &val
);
3071 len
+= sprintf(s
+ len
, "[0x%x] (Config[%d]): 0x%08x\n",
3078 /* Return register dump user app. */
3079 static void lio_get_regs(struct net_device
*dev
,
3080 struct ethtool_regs
*regs
, void *regbuf
)
3082 struct lio
*lio
= GET_LIO(dev
);
3084 struct octeon_device
*oct
= lio
->oct_dev
;
3086 regs
->version
= OCT_ETHTOOL_REGSVER
;
3088 switch (oct
->chip_id
) {
3089 case OCTEON_CN23XX_PF_VID
:
3090 memset(regbuf
, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX
);
3091 len
+= cn23xx_read_csr_reg(regbuf
+ len
, oct
);
3093 case OCTEON_CN23XX_VF_VID
:
3094 memset(regbuf
, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF
);
3095 len
+= cn23xx_vf_read_csr_reg(regbuf
+ len
, oct
);
3099 memset(regbuf
, 0, OCT_ETHTOOL_REGDUMP_LEN
);
3100 len
+= cn6xxx_read_csr_reg(regbuf
+ len
, oct
);
3101 len
+= cn6xxx_read_config_reg(regbuf
+ len
, oct
);
3104 dev_err(&oct
->pci_dev
->dev
, "%s Unknown chipid: %d\n",
3105 __func__
, oct
->chip_id
);
3109 static u32
lio_get_priv_flags(struct net_device
*netdev
)
3111 struct lio
*lio
= GET_LIO(netdev
);
3113 return lio
->oct_dev
->priv_flags
;
3116 static int lio_set_priv_flags(struct net_device
*netdev
, u32 flags
)
3118 struct lio
*lio
= GET_LIO(netdev
);
3119 bool intr_by_tx_bytes
= !!(flags
& (0x1 << OCT_PRIV_FLAG_TX_BYTES
));
3121 lio_set_priv_flag(lio
->oct_dev
, OCT_PRIV_FLAG_TX_BYTES
,
3126 static const struct ethtool_ops lio_ethtool_ops
= {
3127 .get_link_ksettings
= lio_get_link_ksettings
,
3128 .set_link_ksettings
= lio_set_link_ksettings
,
3129 .get_link
= ethtool_op_get_link
,
3130 .get_drvinfo
= lio_get_drvinfo
,
3131 .get_ringparam
= lio_ethtool_get_ringparam
,
3132 .set_ringparam
= lio_ethtool_set_ringparam
,
3133 .get_channels
= lio_ethtool_get_channels
,
3134 .set_channels
= lio_ethtool_set_channels
,
3135 .set_phys_id
= lio_set_phys_id
,
3136 .get_eeprom_len
= lio_get_eeprom_len
,
3137 .get_eeprom
= lio_get_eeprom
,
3138 .get_strings
= lio_get_strings
,
3139 .get_ethtool_stats
= lio_get_ethtool_stats
,
3140 .get_pauseparam
= lio_get_pauseparam
,
3141 .set_pauseparam
= lio_set_pauseparam
,
3142 .get_regs_len
= lio_get_regs_len
,
3143 .get_regs
= lio_get_regs
,
3144 .get_msglevel
= lio_get_msglevel
,
3145 .set_msglevel
= lio_set_msglevel
,
3146 .get_sset_count
= lio_get_sset_count
,
3147 .get_coalesce
= lio_get_intr_coalesce
,
3148 .set_coalesce
= lio_set_intr_coalesce
,
3149 .get_priv_flags
= lio_get_priv_flags
,
3150 .set_priv_flags
= lio_set_priv_flags
,
3151 .get_ts_info
= lio_get_ts_info
,
3154 static const struct ethtool_ops lio_vf_ethtool_ops
= {
3155 .get_link_ksettings
= lio_get_link_ksettings
,
3156 .get_link
= ethtool_op_get_link
,
3157 .get_drvinfo
= lio_get_vf_drvinfo
,
3158 .get_ringparam
= lio_ethtool_get_ringparam
,
3159 .set_ringparam
= lio_ethtool_set_ringparam
,
3160 .get_channels
= lio_ethtool_get_channels
,
3161 .set_channels
= lio_ethtool_set_channels
,
3162 .get_strings
= lio_vf_get_strings
,
3163 .get_ethtool_stats
= lio_vf_get_ethtool_stats
,
3164 .get_regs_len
= lio_get_regs_len
,
3165 .get_regs
= lio_get_regs
,
3166 .get_msglevel
= lio_get_msglevel
,
3167 .set_msglevel
= lio_vf_set_msglevel
,
3168 .get_sset_count
= lio_vf_get_sset_count
,
3169 .get_coalesce
= lio_get_intr_coalesce
,
3170 .set_coalesce
= lio_set_intr_coalesce
,
3171 .get_priv_flags
= lio_get_priv_flags
,
3172 .set_priv_flags
= lio_set_priv_flags
,
3173 .get_ts_info
= lio_get_ts_info
,
3176 void liquidio_set_ethtool_ops(struct net_device
*netdev
)
3178 struct lio
*lio
= GET_LIO(netdev
);
3179 struct octeon_device
*oct
= lio
->oct_dev
;
3181 if (OCTEON_CN23XX_VF(oct
))
3182 netdev
->ethtool_ops
= &lio_vf_ethtool_ops
;
3184 netdev
->ethtool_ops
= &lio_ethtool_ops
;