1 // SPDX-License-Identifier: GPL-2.0
3 * Texas Instruments Ethernet Switch Driver ethtool intf
5 * Copyright (C) 2019 Texas Instruments
8 #include <linux/if_ether.h>
9 #include <linux/if_vlan.h>
10 #include <linux/kmemleak.h>
11 #include <linux/module.h>
12 #include <linux/netdevice.h>
13 #include <linux/net_tstamp.h>
14 #include <linux/phy.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/skbuff.h>
21 #include "cpsw_priv.h"
22 #include "davinci_cpdma.h"
24 struct cpsw_hw_stats
{
26 u32 rxbroadcastframes
;
27 u32 rxmulticastframes
;
30 u32 rxaligncodeerrors
;
31 u32 rxoversizedframes
;
33 u32 rxundersizedframes
;
38 u32 txbroadcastframes
;
39 u32 txmulticastframes
;
42 u32 txcollisionframes
;
43 u32 txsinglecollframes
;
45 u32 txexcessivecollisions
;
48 u32 txcarriersenseerrors
;
51 u32 octetframes65t127
;
52 u32 octetframes128t255
;
53 u32 octetframes256t511
;
54 u32 octetframes512t1023
;
55 u32 octetframes1024tup
;
63 char stat_string
[ETH_GSTRING_LEN
];
75 #define CPSW_STAT(m) CPSW_STATS, \
76 sizeof_field(struct cpsw_hw_stats, m), \
77 offsetof(struct cpsw_hw_stats, m)
78 #define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
79 sizeof_field(struct cpdma_chan_stats, m), \
80 offsetof(struct cpdma_chan_stats, m)
81 #define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
82 sizeof_field(struct cpdma_chan_stats, m), \
83 offsetof(struct cpdma_chan_stats, m)
85 static const struct cpsw_stats cpsw_gstrings_stats
[] = {
86 { "Good Rx Frames", CPSW_STAT(rxgoodframes
) },
87 { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes
) },
88 { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes
) },
89 { "Pause Rx Frames", CPSW_STAT(rxpauseframes
) },
90 { "Rx CRC Errors", CPSW_STAT(rxcrcerrors
) },
91 { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors
) },
92 { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes
) },
93 { "Rx Jabbers", CPSW_STAT(rxjabberframes
) },
94 { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes
) },
95 { "Rx Fragments", CPSW_STAT(rxfragments
) },
96 { "Rx Octets", CPSW_STAT(rxoctets
) },
97 { "Good Tx Frames", CPSW_STAT(txgoodframes
) },
98 { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes
) },
99 { "Multicast Tx Frames", CPSW_STAT(txmulticastframes
) },
100 { "Pause Tx Frames", CPSW_STAT(txpauseframes
) },
101 { "Deferred Tx Frames", CPSW_STAT(txdeferredframes
) },
102 { "Collisions", CPSW_STAT(txcollisionframes
) },
103 { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes
) },
104 { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes
) },
105 { "Excessive Collisions", CPSW_STAT(txexcessivecollisions
) },
106 { "Late Collisions", CPSW_STAT(txlatecollisions
) },
107 { "Tx Underrun", CPSW_STAT(txunderrun
) },
108 { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors
) },
109 { "Tx Octets", CPSW_STAT(txoctets
) },
110 { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64
) },
111 { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127
) },
112 { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255
) },
113 { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511
) },
114 { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023
) },
115 { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup
) },
116 { "Net Octets", CPSW_STAT(netoctets
) },
117 { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns
) },
118 { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns
) },
119 { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns
) },
122 static const struct cpsw_stats cpsw_gstrings_ch_stats
[] = {
123 { "head_enqueue", CPDMA_RX_STAT(head_enqueue
) },
124 { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue
) },
125 { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue
) },
126 { "misqueued", CPDMA_RX_STAT(misqueued
) },
127 { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail
) },
128 { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail
) },
129 { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff
) },
130 { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff
) },
131 { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue
) },
132 { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue
) },
133 { "good_dequeue", CPDMA_RX_STAT(good_dequeue
) },
134 { "requeue", CPDMA_RX_STAT(requeue
) },
135 { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue
) },
138 #define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats)
139 #define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats)
141 u32
cpsw_get_msglevel(struct net_device
*ndev
)
143 struct cpsw_priv
*priv
= netdev_priv(ndev
);
145 return priv
->msg_enable
;
148 void cpsw_set_msglevel(struct net_device
*ndev
, u32 value
)
150 struct cpsw_priv
*priv
= netdev_priv(ndev
);
152 priv
->msg_enable
= value
;
155 int cpsw_get_coalesce(struct net_device
*ndev
, struct ethtool_coalesce
*coal
)
157 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
159 coal
->rx_coalesce_usecs
= cpsw
->coal_intvl
;
163 int cpsw_set_coalesce(struct net_device
*ndev
, struct ethtool_coalesce
*coal
)
165 struct cpsw_priv
*priv
= netdev_priv(ndev
);
167 u32 num_interrupts
= 0;
171 struct cpsw_common
*cpsw
= priv
->cpsw
;
173 coal_intvl
= coal
->rx_coalesce_usecs
;
175 int_ctrl
= readl(&cpsw
->wr_regs
->int_control
);
176 prescale
= cpsw
->bus_freq_mhz
* 4;
178 if (!coal
->rx_coalesce_usecs
) {
179 int_ctrl
&= ~(CPSW_INTPRESCALE_MASK
| CPSW_INTPACEEN
);
183 if (coal_intvl
< CPSW_CMINTMIN_INTVL
)
184 coal_intvl
= CPSW_CMINTMIN_INTVL
;
186 if (coal_intvl
> CPSW_CMINTMAX_INTVL
) {
187 /* Interrupt pacer works with 4us Pulse, we can
188 * throttle further by dilating the 4us pulse.
190 addnl_dvdr
= CPSW_INTPRESCALE_MASK
/ prescale
;
192 if (addnl_dvdr
> 1) {
193 prescale
*= addnl_dvdr
;
194 if (coal_intvl
> (CPSW_CMINTMAX_INTVL
* addnl_dvdr
))
195 coal_intvl
= (CPSW_CMINTMAX_INTVL
199 coal_intvl
= CPSW_CMINTMAX_INTVL
;
203 num_interrupts
= (1000 * addnl_dvdr
) / coal_intvl
;
204 writel(num_interrupts
, &cpsw
->wr_regs
->rx_imax
);
205 writel(num_interrupts
, &cpsw
->wr_regs
->tx_imax
);
207 int_ctrl
|= CPSW_INTPACEEN
;
208 int_ctrl
&= (~CPSW_INTPRESCALE_MASK
);
209 int_ctrl
|= (prescale
& CPSW_INTPRESCALE_MASK
);
212 writel(int_ctrl
, &cpsw
->wr_regs
->int_control
);
214 cpsw_notice(priv
, timer
, "Set coalesce to %d usecs.\n", coal_intvl
);
215 cpsw
->coal_intvl
= coal_intvl
;
220 int cpsw_get_sset_count(struct net_device
*ndev
, int sset
)
222 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
226 return (CPSW_STATS_COMMON_LEN
+
227 (cpsw
->rx_ch_num
+ cpsw
->tx_ch_num
) *
234 static void cpsw_add_ch_strings(u8
**p
, int ch_num
, int rx_dir
)
240 ch_stats_len
= CPSW_STATS_CH_LEN
* ch_num
;
241 for (i
= 0; i
< ch_stats_len
; i
++) {
242 line
= i
% CPSW_STATS_CH_LEN
;
243 snprintf(*p
, ETH_GSTRING_LEN
,
244 "%s DMA chan %ld: %s", rx_dir
? "Rx" : "Tx",
245 (long)(i
/ CPSW_STATS_CH_LEN
),
246 cpsw_gstrings_ch_stats
[line
].stat_string
);
247 *p
+= ETH_GSTRING_LEN
;
251 void cpsw_get_strings(struct net_device
*ndev
, u32 stringset
, u8
*data
)
253 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
259 for (i
= 0; i
< CPSW_STATS_COMMON_LEN
; i
++) {
260 memcpy(p
, cpsw_gstrings_stats
[i
].stat_string
,
262 p
+= ETH_GSTRING_LEN
;
265 cpsw_add_ch_strings(&p
, cpsw
->rx_ch_num
, 1);
266 cpsw_add_ch_strings(&p
, cpsw
->tx_ch_num
, 0);
271 void cpsw_get_ethtool_stats(struct net_device
*ndev
,
272 struct ethtool_stats
*stats
, u64
*data
)
275 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
276 struct cpdma_chan_stats ch_stats
;
279 /* Collect Davinci CPDMA stats for Rx and Tx Channel */
280 for (l
= 0; l
< CPSW_STATS_COMMON_LEN
; l
++)
281 data
[l
] = readl(cpsw
->hw_stats
+
282 cpsw_gstrings_stats
[l
].stat_offset
);
284 for (ch
= 0; ch
< cpsw
->rx_ch_num
; ch
++) {
285 cpdma_chan_get_stats(cpsw
->rxv
[ch
].ch
, &ch_stats
);
286 for (i
= 0; i
< CPSW_STATS_CH_LEN
; i
++, l
++) {
287 p
= (u8
*)&ch_stats
+
288 cpsw_gstrings_ch_stats
[i
].stat_offset
;
293 for (ch
= 0; ch
< cpsw
->tx_ch_num
; ch
++) {
294 cpdma_chan_get_stats(cpsw
->txv
[ch
].ch
, &ch_stats
);
295 for (i
= 0; i
< CPSW_STATS_CH_LEN
; i
++, l
++) {
296 p
= (u8
*)&ch_stats
+
297 cpsw_gstrings_ch_stats
[i
].stat_offset
;
303 void cpsw_get_pauseparam(struct net_device
*ndev
,
304 struct ethtool_pauseparam
*pause
)
306 struct cpsw_priv
*priv
= netdev_priv(ndev
);
308 pause
->autoneg
= AUTONEG_DISABLE
;
309 pause
->rx_pause
= priv
->rx_pause
? true : false;
310 pause
->tx_pause
= priv
->tx_pause
? true : false;
313 void cpsw_get_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
315 struct cpsw_priv
*priv
= netdev_priv(ndev
);
316 struct cpsw_common
*cpsw
= priv
->cpsw
;
317 int slave_no
= cpsw_slave_index(cpsw
, priv
);
322 if (cpsw
->slaves
[slave_no
].phy
)
323 phy_ethtool_get_wol(cpsw
->slaves
[slave_no
].phy
, wol
);
326 int cpsw_set_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
328 struct cpsw_priv
*priv
= netdev_priv(ndev
);
329 struct cpsw_common
*cpsw
= priv
->cpsw
;
330 int slave_no
= cpsw_slave_index(cpsw
, priv
);
332 if (cpsw
->slaves
[slave_no
].phy
)
333 return phy_ethtool_set_wol(cpsw
->slaves
[slave_no
].phy
, wol
);
338 int cpsw_get_regs_len(struct net_device
*ndev
)
340 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
342 return cpsw_ale_get_num_entries(cpsw
->ale
) *
343 ALE_ENTRY_WORDS
* sizeof(u32
);
346 void cpsw_get_regs(struct net_device
*ndev
, struct ethtool_regs
*regs
, void *p
)
349 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
351 /* update CPSW IP version */
352 regs
->version
= cpsw
->version
;
354 cpsw_ale_dump(cpsw
->ale
, reg
);
357 int cpsw_ethtool_op_begin(struct net_device
*ndev
)
359 struct cpsw_priv
*priv
= netdev_priv(ndev
);
360 struct cpsw_common
*cpsw
= priv
->cpsw
;
363 ret
= pm_runtime_get_sync(cpsw
->dev
);
365 cpsw_err(priv
, drv
, "ethtool begin failed %d\n", ret
);
366 pm_runtime_put_noidle(cpsw
->dev
);
372 void cpsw_ethtool_op_complete(struct net_device
*ndev
)
374 struct cpsw_priv
*priv
= netdev_priv(ndev
);
377 ret
= pm_runtime_put(priv
->cpsw
->dev
);
379 cpsw_err(priv
, drv
, "ethtool complete failed %d\n", ret
);
382 void cpsw_get_channels(struct net_device
*ndev
, struct ethtool_channels
*ch
)
384 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
386 ch
->max_rx
= cpsw
->quirk_irq
? 1 : CPSW_MAX_QUEUES
;
387 ch
->max_tx
= cpsw
->quirk_irq
? 1 : CPSW_MAX_QUEUES
;
388 ch
->max_combined
= 0;
391 ch
->rx_count
= cpsw
->rx_ch_num
;
392 ch
->tx_count
= cpsw
->tx_ch_num
;
393 ch
->combined_count
= 0;
396 int cpsw_get_link_ksettings(struct net_device
*ndev
,
397 struct ethtool_link_ksettings
*ecmd
)
399 struct cpsw_priv
*priv
= netdev_priv(ndev
);
400 struct cpsw_common
*cpsw
= priv
->cpsw
;
401 int slave_no
= cpsw_slave_index(cpsw
, priv
);
403 if (!cpsw
->slaves
[slave_no
].phy
)
406 phy_ethtool_ksettings_get(cpsw
->slaves
[slave_no
].phy
, ecmd
);
410 int cpsw_set_link_ksettings(struct net_device
*ndev
,
411 const struct ethtool_link_ksettings
*ecmd
)
413 struct cpsw_priv
*priv
= netdev_priv(ndev
);
414 struct cpsw_common
*cpsw
= priv
->cpsw
;
415 int slave_no
= cpsw_slave_index(cpsw
, priv
);
417 if (!cpsw
->slaves
[slave_no
].phy
)
420 return phy_ethtool_ksettings_set(cpsw
->slaves
[slave_no
].phy
, ecmd
);
423 int cpsw_get_eee(struct net_device
*ndev
, struct ethtool_eee
*edata
)
425 struct cpsw_priv
*priv
= netdev_priv(ndev
);
426 struct cpsw_common
*cpsw
= priv
->cpsw
;
427 int slave_no
= cpsw_slave_index(cpsw
, priv
);
429 if (cpsw
->slaves
[slave_no
].phy
)
430 return phy_ethtool_get_eee(cpsw
->slaves
[slave_no
].phy
, edata
);
435 int cpsw_set_eee(struct net_device
*ndev
, struct ethtool_eee
*edata
)
437 struct cpsw_priv
*priv
= netdev_priv(ndev
);
438 struct cpsw_common
*cpsw
= priv
->cpsw
;
439 int slave_no
= cpsw_slave_index(cpsw
, priv
);
441 if (cpsw
->slaves
[slave_no
].phy
)
442 return phy_ethtool_set_eee(cpsw
->slaves
[slave_no
].phy
, edata
);
447 int cpsw_nway_reset(struct net_device
*ndev
)
449 struct cpsw_priv
*priv
= netdev_priv(ndev
);
450 struct cpsw_common
*cpsw
= priv
->cpsw
;
451 int slave_no
= cpsw_slave_index(cpsw
, priv
);
453 if (cpsw
->slaves
[slave_no
].phy
)
454 return genphy_restart_aneg(cpsw
->slaves
[slave_no
].phy
);
459 static void cpsw_suspend_data_pass(struct net_device
*ndev
)
461 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
464 /* Disable NAPI scheduling */
465 cpsw_intr_disable(cpsw
);
467 /* Stop all transmit queues for every network device.
469 for (i
= 0; i
< cpsw
->data
.slaves
; i
++) {
470 ndev
= cpsw
->slaves
[i
].ndev
;
471 if (!(ndev
&& netif_running(ndev
)))
474 netif_tx_stop_all_queues(ndev
);
476 /* Barrier, so that stop_queue visible to other cpus */
477 smp_mb__after_atomic();
480 /* Handle rest of tx packets and stop cpdma channels */
481 cpdma_ctlr_stop(cpsw
->dma
);
484 static int cpsw_resume_data_pass(struct net_device
*ndev
)
486 struct cpsw_priv
*priv
= netdev_priv(ndev
);
487 struct cpsw_common
*cpsw
= priv
->cpsw
;
490 /* After this receive is started */
491 if (cpsw
->usage_count
) {
492 ret
= cpsw_fill_rx_channels(priv
);
496 cpdma_ctlr_start(cpsw
->dma
);
497 cpsw_intr_enable(cpsw
);
500 /* Resume transmit for every affected interface */
501 for (i
= 0; i
< cpsw
->data
.slaves
; i
++) {
502 ndev
= cpsw
->slaves
[i
].ndev
;
503 if (ndev
&& netif_running(ndev
))
504 netif_tx_start_all_queues(ndev
);
510 static int cpsw_check_ch_settings(struct cpsw_common
*cpsw
,
511 struct ethtool_channels
*ch
)
513 if (cpsw
->quirk_irq
) {
514 dev_err(cpsw
->dev
, "Maximum one tx/rx queue is allowed");
518 if (ch
->combined_count
)
521 /* verify we have at least one channel in each direction */
522 if (!ch
->rx_count
|| !ch
->tx_count
)
525 if (ch
->rx_count
> cpsw
->data
.channels
||
526 ch
->tx_count
> cpsw
->data
.channels
)
532 static int cpsw_update_channels_res(struct cpsw_priv
*priv
, int ch_num
, int rx
,
533 cpdma_handler_fn rx_handler
)
535 struct cpsw_common
*cpsw
= priv
->cpsw
;
536 void (*handler
)(void *, int, int);
537 struct netdev_queue
*queue
;
538 struct cpsw_vector
*vec
;
542 ch
= &cpsw
->rx_ch_num
;
544 handler
= rx_handler
;
546 ch
= &cpsw
->tx_ch_num
;
548 handler
= cpsw_tx_handler
;
551 while (*ch
< ch_num
) {
552 vch
= rx
? *ch
: 7 - *ch
;
553 vec
[*ch
].ch
= cpdma_chan_create(cpsw
->dma
, vch
, handler
, rx
);
554 queue
= netdev_get_tx_queue(priv
->ndev
, *ch
);
555 queue
->tx_maxrate
= 0;
557 if (IS_ERR(vec
[*ch
].ch
))
558 return PTR_ERR(vec
[*ch
].ch
);
563 cpsw_info(priv
, ifup
, "created new %d %s channel\n", *ch
,
568 while (*ch
> ch_num
) {
571 ret
= cpdma_chan_destroy(vec
[*ch
].ch
);
575 cpsw_info(priv
, ifup
, "destroyed %d %s channel\n", *ch
,
582 static void cpsw_fail(struct cpsw_common
*cpsw
)
584 struct net_device
*ndev
;
587 for (i
= 0; i
< cpsw
->data
.slaves
; i
++) {
588 ndev
= cpsw
->slaves
[i
].ndev
;
594 int cpsw_set_channels_common(struct net_device
*ndev
,
595 struct ethtool_channels
*chs
,
596 cpdma_handler_fn rx_handler
)
598 struct cpsw_priv
*priv
= netdev_priv(ndev
);
599 struct cpsw_common
*cpsw
= priv
->cpsw
;
600 struct net_device
*sl_ndev
;
601 int i
, new_pools
, ret
;
603 ret
= cpsw_check_ch_settings(cpsw
, chs
);
607 cpsw_suspend_data_pass(ndev
);
609 new_pools
= (chs
->rx_count
!= cpsw
->rx_ch_num
) && cpsw
->usage_count
;
611 ret
= cpsw_update_channels_res(priv
, chs
->rx_count
, 1, rx_handler
);
615 ret
= cpsw_update_channels_res(priv
, chs
->tx_count
, 0, rx_handler
);
619 for (i
= 0; i
< cpsw
->data
.slaves
; i
++) {
620 sl_ndev
= cpsw
->slaves
[i
].ndev
;
621 if (!(sl_ndev
&& netif_running(sl_ndev
)))
624 /* Inform stack about new count of queues */
625 ret
= netif_set_real_num_tx_queues(sl_ndev
, cpsw
->tx_ch_num
);
627 dev_err(priv
->dev
, "cannot set real number of tx queues\n");
631 ret
= netif_set_real_num_rx_queues(sl_ndev
, cpsw
->rx_ch_num
);
633 dev_err(priv
->dev
, "cannot set real number of rx queues\n");
638 cpsw_split_res(cpsw
);
641 cpsw_destroy_xdp_rxqs(cpsw
);
642 ret
= cpsw_create_xdp_rxqs(cpsw
);
647 ret
= cpsw_resume_data_pass(ndev
);
651 dev_err(priv
->dev
, "cannot update channels number, closing device\n");
656 void cpsw_get_ringparam(struct net_device
*ndev
,
657 struct ethtool_ringparam
*ering
)
659 struct cpsw_priv
*priv
= netdev_priv(ndev
);
660 struct cpsw_common
*cpsw
= priv
->cpsw
;
663 ering
->tx_max_pending
= cpsw
->descs_pool_size
- CPSW_MAX_QUEUES
;
664 ering
->tx_pending
= cpdma_get_num_tx_descs(cpsw
->dma
);
665 ering
->rx_max_pending
= cpsw
->descs_pool_size
- CPSW_MAX_QUEUES
;
666 ering
->rx_pending
= cpdma_get_num_rx_descs(cpsw
->dma
);
669 int cpsw_set_ringparam(struct net_device
*ndev
,
670 struct ethtool_ringparam
*ering
)
672 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
675 /* ignore ering->tx_pending - only rx_pending adjustment is supported */
677 if (ering
->rx_mini_pending
|| ering
->rx_jumbo_pending
||
678 ering
->rx_pending
< CPSW_MAX_QUEUES
||
679 ering
->rx_pending
> (cpsw
->descs_pool_size
- CPSW_MAX_QUEUES
))
682 descs_num
= cpdma_get_num_rx_descs(cpsw
->dma
);
683 if (ering
->rx_pending
== descs_num
)
686 cpsw_suspend_data_pass(ndev
);
688 ret
= cpdma_set_num_rx_descs(cpsw
->dma
, ering
->rx_pending
);
690 if (cpsw_resume_data_pass(ndev
))
696 if (cpsw
->usage_count
) {
697 cpsw_destroy_xdp_rxqs(cpsw
);
698 ret
= cpsw_create_xdp_rxqs(cpsw
);
703 ret
= cpsw_resume_data_pass(ndev
);
707 cpdma_set_num_rx_descs(cpsw
->dma
, descs_num
);
708 dev_err(cpsw
->dev
, "cannot set ring params, closing device\n");
713 #if IS_ENABLED(CONFIG_TI_CPTS)
714 int cpsw_get_ts_info(struct net_device
*ndev
, struct ethtool_ts_info
*info
)
716 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
718 info
->so_timestamping
=
719 SOF_TIMESTAMPING_TX_HARDWARE
|
720 SOF_TIMESTAMPING_TX_SOFTWARE
|
721 SOF_TIMESTAMPING_RX_HARDWARE
|
722 SOF_TIMESTAMPING_RX_SOFTWARE
|
723 SOF_TIMESTAMPING_SOFTWARE
|
724 SOF_TIMESTAMPING_RAW_HARDWARE
;
725 info
->phc_index
= cpsw
->cpts
->phc_index
;
727 (1 << HWTSTAMP_TX_OFF
) |
728 (1 << HWTSTAMP_TX_ON
);
730 (1 << HWTSTAMP_FILTER_NONE
) |
731 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT
);
735 int cpsw_get_ts_info(struct net_device
*ndev
, struct ethtool_ts_info
*info
)
737 info
->so_timestamping
=
738 SOF_TIMESTAMPING_TX_SOFTWARE
|
739 SOF_TIMESTAMPING_RX_SOFTWARE
|
740 SOF_TIMESTAMPING_SOFTWARE
;
741 info
->phc_index
= -1;
743 info
->rx_filters
= 0;