1 // SPDX-License-Identifier: GPL-2.0
3 * Texas Instruments Ethernet Switch Driver ethtool intf
5 * Copyright (C) 2019 Texas Instruments
8 #include <linux/if_ether.h>
9 #include <linux/if_vlan.h>
10 #include <linux/kmemleak.h>
11 #include <linux/module.h>
12 #include <linux/netdevice.h>
13 #include <linux/net_tstamp.h>
14 #include <linux/phy.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/skbuff.h>
21 #include "cpsw_priv.h"
22 #include "davinci_cpdma.h"
24 struct cpsw_hw_stats
{
26 u32 rxbroadcastframes
;
27 u32 rxmulticastframes
;
30 u32 rxaligncodeerrors
;
31 u32 rxoversizedframes
;
33 u32 rxundersizedframes
;
38 u32 txbroadcastframes
;
39 u32 txmulticastframes
;
42 u32 txcollisionframes
;
43 u32 txsinglecollframes
;
45 u32 txexcessivecollisions
;
48 u32 txcarriersenseerrors
;
51 u32 octetframes65t127
;
52 u32 octetframes128t255
;
53 u32 octetframes256t511
;
54 u32 octetframes512t1023
;
55 u32 octetframes1024tup
;
63 char stat_string
[ETH_GSTRING_LEN
];
75 #define CPSW_STAT(m) CPSW_STATS, \
76 sizeof_field(struct cpsw_hw_stats, m), \
77 offsetof(struct cpsw_hw_stats, m)
78 #define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
79 sizeof_field(struct cpdma_chan_stats, m), \
80 offsetof(struct cpdma_chan_stats, m)
81 #define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
82 sizeof_field(struct cpdma_chan_stats, m), \
83 offsetof(struct cpdma_chan_stats, m)
85 static const struct cpsw_stats cpsw_gstrings_stats
[] = {
86 { "Good Rx Frames", CPSW_STAT(rxgoodframes
) },
87 { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes
) },
88 { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes
) },
89 { "Pause Rx Frames", CPSW_STAT(rxpauseframes
) },
90 { "Rx CRC Errors", CPSW_STAT(rxcrcerrors
) },
91 { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors
) },
92 { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes
) },
93 { "Rx Jabbers", CPSW_STAT(rxjabberframes
) },
94 { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes
) },
95 { "Rx Fragments", CPSW_STAT(rxfragments
) },
96 { "Rx Octets", CPSW_STAT(rxoctets
) },
97 { "Good Tx Frames", CPSW_STAT(txgoodframes
) },
98 { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes
) },
99 { "Multicast Tx Frames", CPSW_STAT(txmulticastframes
) },
100 { "Pause Tx Frames", CPSW_STAT(txpauseframes
) },
101 { "Deferred Tx Frames", CPSW_STAT(txdeferredframes
) },
102 { "Collisions", CPSW_STAT(txcollisionframes
) },
103 { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes
) },
104 { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes
) },
105 { "Excessive Collisions", CPSW_STAT(txexcessivecollisions
) },
106 { "Late Collisions", CPSW_STAT(txlatecollisions
) },
107 { "Tx Underrun", CPSW_STAT(txunderrun
) },
108 { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors
) },
109 { "Tx Octets", CPSW_STAT(txoctets
) },
110 { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64
) },
111 { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127
) },
112 { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255
) },
113 { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511
) },
114 { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023
) },
115 { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup
) },
116 { "Net Octets", CPSW_STAT(netoctets
) },
117 { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns
) },
118 { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns
) },
119 { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns
) },
122 static const struct cpsw_stats cpsw_gstrings_ch_stats
[] = {
123 { "head_enqueue", CPDMA_RX_STAT(head_enqueue
) },
124 { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue
) },
125 { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue
) },
126 { "misqueued", CPDMA_RX_STAT(misqueued
) },
127 { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail
) },
128 { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail
) },
129 { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff
) },
130 { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff
) },
131 { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue
) },
132 { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue
) },
133 { "good_dequeue", CPDMA_RX_STAT(good_dequeue
) },
134 { "requeue", CPDMA_RX_STAT(requeue
) },
135 { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue
) },
138 #define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats)
139 #define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats)
141 u32
cpsw_get_msglevel(struct net_device
*ndev
)
143 struct cpsw_priv
*priv
= netdev_priv(ndev
);
145 return priv
->msg_enable
;
148 void cpsw_set_msglevel(struct net_device
*ndev
, u32 value
)
150 struct cpsw_priv
*priv
= netdev_priv(ndev
);
152 priv
->msg_enable
= value
;
155 int cpsw_get_coalesce(struct net_device
*ndev
, struct ethtool_coalesce
*coal
)
157 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
159 coal
->rx_coalesce_usecs
= cpsw
->coal_intvl
;
163 int cpsw_set_coalesce(struct net_device
*ndev
, struct ethtool_coalesce
*coal
)
165 struct cpsw_priv
*priv
= netdev_priv(ndev
);
167 u32 num_interrupts
= 0;
171 struct cpsw_common
*cpsw
= priv
->cpsw
;
173 coal_intvl
= coal
->rx_coalesce_usecs
;
175 int_ctrl
= readl(&cpsw
->wr_regs
->int_control
);
176 prescale
= cpsw
->bus_freq_mhz
* 4;
178 if (!coal
->rx_coalesce_usecs
) {
179 int_ctrl
&= ~(CPSW_INTPRESCALE_MASK
| CPSW_INTPACEEN
);
183 if (coal_intvl
< CPSW_CMINTMIN_INTVL
)
184 coal_intvl
= CPSW_CMINTMIN_INTVL
;
186 if (coal_intvl
> CPSW_CMINTMAX_INTVL
) {
187 /* Interrupt pacer works with 4us Pulse, we can
188 * throttle further by dilating the 4us pulse.
190 addnl_dvdr
= CPSW_INTPRESCALE_MASK
/ prescale
;
192 if (addnl_dvdr
> 1) {
193 prescale
*= addnl_dvdr
;
194 if (coal_intvl
> (CPSW_CMINTMAX_INTVL
* addnl_dvdr
))
195 coal_intvl
= (CPSW_CMINTMAX_INTVL
199 coal_intvl
= CPSW_CMINTMAX_INTVL
;
203 num_interrupts
= (1000 * addnl_dvdr
) / coal_intvl
;
204 writel(num_interrupts
, &cpsw
->wr_regs
->rx_imax
);
205 writel(num_interrupts
, &cpsw
->wr_regs
->tx_imax
);
207 int_ctrl
|= CPSW_INTPACEEN
;
208 int_ctrl
&= (~CPSW_INTPRESCALE_MASK
);
209 int_ctrl
|= (prescale
& CPSW_INTPRESCALE_MASK
);
212 writel(int_ctrl
, &cpsw
->wr_regs
->int_control
);
214 cpsw_notice(priv
, timer
, "Set coalesce to %d usecs.\n", coal_intvl
);
215 cpsw
->coal_intvl
= coal_intvl
;
220 int cpsw_get_sset_count(struct net_device
*ndev
, int sset
)
222 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
226 return (CPSW_STATS_COMMON_LEN
+
227 (cpsw
->rx_ch_num
+ cpsw
->tx_ch_num
) *
234 static void cpsw_add_ch_strings(u8
**p
, int ch_num
, int rx_dir
)
240 ch_stats_len
= CPSW_STATS_CH_LEN
* ch_num
;
241 for (i
= 0; i
< ch_stats_len
; i
++) {
242 line
= i
% CPSW_STATS_CH_LEN
;
243 snprintf(*p
, ETH_GSTRING_LEN
,
244 "%s DMA chan %ld: %s", rx_dir
? "Rx" : "Tx",
245 (long)(i
/ CPSW_STATS_CH_LEN
),
246 cpsw_gstrings_ch_stats
[line
].stat_string
);
247 *p
+= ETH_GSTRING_LEN
;
251 void cpsw_get_strings(struct net_device
*ndev
, u32 stringset
, u8
*data
)
253 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
259 for (i
= 0; i
< CPSW_STATS_COMMON_LEN
; i
++) {
260 memcpy(p
, cpsw_gstrings_stats
[i
].stat_string
,
262 p
+= ETH_GSTRING_LEN
;
265 cpsw_add_ch_strings(&p
, cpsw
->rx_ch_num
, 1);
266 cpsw_add_ch_strings(&p
, cpsw
->tx_ch_num
, 0);
271 void cpsw_get_ethtool_stats(struct net_device
*ndev
,
272 struct ethtool_stats
*stats
, u64
*data
)
275 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
276 struct cpdma_chan_stats ch_stats
;
279 /* Collect Davinci CPDMA stats for Rx and Tx Channel */
280 for (l
= 0; l
< CPSW_STATS_COMMON_LEN
; l
++)
281 data
[l
] = readl(cpsw
->hw_stats
+
282 cpsw_gstrings_stats
[l
].stat_offset
);
284 for (ch
= 0; ch
< cpsw
->rx_ch_num
; ch
++) {
285 cpdma_chan_get_stats(cpsw
->rxv
[ch
].ch
, &ch_stats
);
286 for (i
= 0; i
< CPSW_STATS_CH_LEN
; i
++, l
++) {
287 p
= (u8
*)&ch_stats
+
288 cpsw_gstrings_ch_stats
[i
].stat_offset
;
293 for (ch
= 0; ch
< cpsw
->tx_ch_num
; ch
++) {
294 cpdma_chan_get_stats(cpsw
->txv
[ch
].ch
, &ch_stats
);
295 for (i
= 0; i
< CPSW_STATS_CH_LEN
; i
++, l
++) {
296 p
= (u8
*)&ch_stats
+
297 cpsw_gstrings_ch_stats
[i
].stat_offset
;
303 void cpsw_get_pauseparam(struct net_device
*ndev
,
304 struct ethtool_pauseparam
*pause
)
306 struct cpsw_priv
*priv
= netdev_priv(ndev
);
308 pause
->autoneg
= AUTONEG_DISABLE
;
309 pause
->rx_pause
= priv
->rx_pause
? true : false;
310 pause
->tx_pause
= priv
->tx_pause
? true : false;
313 void cpsw_get_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
315 struct cpsw_priv
*priv
= netdev_priv(ndev
);
316 struct cpsw_common
*cpsw
= priv
->cpsw
;
317 int slave_no
= cpsw_slave_index(cpsw
, priv
);
322 if (cpsw
->slaves
[slave_no
].phy
)
323 phy_ethtool_get_wol(cpsw
->slaves
[slave_no
].phy
, wol
);
326 int cpsw_set_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
328 struct cpsw_priv
*priv
= netdev_priv(ndev
);
329 struct cpsw_common
*cpsw
= priv
->cpsw
;
330 int slave_no
= cpsw_slave_index(cpsw
, priv
);
332 if (cpsw
->slaves
[slave_no
].phy
)
333 return phy_ethtool_set_wol(cpsw
->slaves
[slave_no
].phy
, wol
);
338 int cpsw_get_regs_len(struct net_device
*ndev
)
340 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
342 return cpsw
->data
.ale_entries
* ALE_ENTRY_WORDS
* sizeof(u32
);
345 void cpsw_get_regs(struct net_device
*ndev
, struct ethtool_regs
*regs
, void *p
)
348 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
350 /* update CPSW IP version */
351 regs
->version
= cpsw
->version
;
353 cpsw_ale_dump(cpsw
->ale
, reg
);
356 int cpsw_ethtool_op_begin(struct net_device
*ndev
)
358 struct cpsw_priv
*priv
= netdev_priv(ndev
);
359 struct cpsw_common
*cpsw
= priv
->cpsw
;
362 ret
= pm_runtime_get_sync(cpsw
->dev
);
364 cpsw_err(priv
, drv
, "ethtool begin failed %d\n", ret
);
365 pm_runtime_put_noidle(cpsw
->dev
);
371 void cpsw_ethtool_op_complete(struct net_device
*ndev
)
373 struct cpsw_priv
*priv
= netdev_priv(ndev
);
376 ret
= pm_runtime_put(priv
->cpsw
->dev
);
378 cpsw_err(priv
, drv
, "ethtool complete failed %d\n", ret
);
381 void cpsw_get_channels(struct net_device
*ndev
, struct ethtool_channels
*ch
)
383 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
385 ch
->max_rx
= cpsw
->quirk_irq
? 1 : CPSW_MAX_QUEUES
;
386 ch
->max_tx
= cpsw
->quirk_irq
? 1 : CPSW_MAX_QUEUES
;
387 ch
->max_combined
= 0;
390 ch
->rx_count
= cpsw
->rx_ch_num
;
391 ch
->tx_count
= cpsw
->tx_ch_num
;
392 ch
->combined_count
= 0;
395 int cpsw_get_link_ksettings(struct net_device
*ndev
,
396 struct ethtool_link_ksettings
*ecmd
)
398 struct cpsw_priv
*priv
= netdev_priv(ndev
);
399 struct cpsw_common
*cpsw
= priv
->cpsw
;
400 int slave_no
= cpsw_slave_index(cpsw
, priv
);
402 if (!cpsw
->slaves
[slave_no
].phy
)
405 phy_ethtool_ksettings_get(cpsw
->slaves
[slave_no
].phy
, ecmd
);
409 int cpsw_set_link_ksettings(struct net_device
*ndev
,
410 const struct ethtool_link_ksettings
*ecmd
)
412 struct cpsw_priv
*priv
= netdev_priv(ndev
);
413 struct cpsw_common
*cpsw
= priv
->cpsw
;
414 int slave_no
= cpsw_slave_index(cpsw
, priv
);
416 if (!cpsw
->slaves
[slave_no
].phy
)
419 return phy_ethtool_ksettings_set(cpsw
->slaves
[slave_no
].phy
, ecmd
);
422 int cpsw_get_eee(struct net_device
*ndev
, struct ethtool_eee
*edata
)
424 struct cpsw_priv
*priv
= netdev_priv(ndev
);
425 struct cpsw_common
*cpsw
= priv
->cpsw
;
426 int slave_no
= cpsw_slave_index(cpsw
, priv
);
428 if (cpsw
->slaves
[slave_no
].phy
)
429 return phy_ethtool_get_eee(cpsw
->slaves
[slave_no
].phy
, edata
);
434 int cpsw_set_eee(struct net_device
*ndev
, struct ethtool_eee
*edata
)
436 struct cpsw_priv
*priv
= netdev_priv(ndev
);
437 struct cpsw_common
*cpsw
= priv
->cpsw
;
438 int slave_no
= cpsw_slave_index(cpsw
, priv
);
440 if (cpsw
->slaves
[slave_no
].phy
)
441 return phy_ethtool_set_eee(cpsw
->slaves
[slave_no
].phy
, edata
);
446 int cpsw_nway_reset(struct net_device
*ndev
)
448 struct cpsw_priv
*priv
= netdev_priv(ndev
);
449 struct cpsw_common
*cpsw
= priv
->cpsw
;
450 int slave_no
= cpsw_slave_index(cpsw
, priv
);
452 if (cpsw
->slaves
[slave_no
].phy
)
453 return genphy_restart_aneg(cpsw
->slaves
[slave_no
].phy
);
458 static void cpsw_suspend_data_pass(struct net_device
*ndev
)
460 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
463 /* Disable NAPI scheduling */
464 cpsw_intr_disable(cpsw
);
466 /* Stop all transmit queues for every network device.
468 for (i
= 0; i
< cpsw
->data
.slaves
; i
++) {
469 ndev
= cpsw
->slaves
[i
].ndev
;
470 if (!(ndev
&& netif_running(ndev
)))
473 netif_tx_stop_all_queues(ndev
);
475 /* Barrier, so that stop_queue visible to other cpus */
476 smp_mb__after_atomic();
479 /* Handle rest of tx packets and stop cpdma channels */
480 cpdma_ctlr_stop(cpsw
->dma
);
483 static int cpsw_resume_data_pass(struct net_device
*ndev
)
485 struct cpsw_priv
*priv
= netdev_priv(ndev
);
486 struct cpsw_common
*cpsw
= priv
->cpsw
;
489 /* After this receive is started */
490 if (cpsw
->usage_count
) {
491 ret
= cpsw_fill_rx_channels(priv
);
495 cpdma_ctlr_start(cpsw
->dma
);
496 cpsw_intr_enable(cpsw
);
499 /* Resume transmit for every affected interface */
500 for (i
= 0; i
< cpsw
->data
.slaves
; i
++) {
501 ndev
= cpsw
->slaves
[i
].ndev
;
502 if (ndev
&& netif_running(ndev
))
503 netif_tx_start_all_queues(ndev
);
509 static int cpsw_check_ch_settings(struct cpsw_common
*cpsw
,
510 struct ethtool_channels
*ch
)
512 if (cpsw
->quirk_irq
) {
513 dev_err(cpsw
->dev
, "Maximum one tx/rx queue is allowed");
517 if (ch
->combined_count
)
520 /* verify we have at least one channel in each direction */
521 if (!ch
->rx_count
|| !ch
->tx_count
)
524 if (ch
->rx_count
> cpsw
->data
.channels
||
525 ch
->tx_count
> cpsw
->data
.channels
)
531 static int cpsw_update_channels_res(struct cpsw_priv
*priv
, int ch_num
, int rx
,
532 cpdma_handler_fn rx_handler
)
534 struct cpsw_common
*cpsw
= priv
->cpsw
;
535 void (*handler
)(void *, int, int);
536 struct netdev_queue
*queue
;
537 struct cpsw_vector
*vec
;
541 ch
= &cpsw
->rx_ch_num
;
543 handler
= rx_handler
;
545 ch
= &cpsw
->tx_ch_num
;
547 handler
= cpsw_tx_handler
;
550 while (*ch
< ch_num
) {
551 vch
= rx
? *ch
: 7 - *ch
;
552 vec
[*ch
].ch
= cpdma_chan_create(cpsw
->dma
, vch
, handler
, rx
);
553 queue
= netdev_get_tx_queue(priv
->ndev
, *ch
);
554 queue
->tx_maxrate
= 0;
556 if (IS_ERR(vec
[*ch
].ch
))
557 return PTR_ERR(vec
[*ch
].ch
);
562 cpsw_info(priv
, ifup
, "created new %d %s channel\n", *ch
,
567 while (*ch
> ch_num
) {
570 ret
= cpdma_chan_destroy(vec
[*ch
].ch
);
574 cpsw_info(priv
, ifup
, "destroyed %d %s channel\n", *ch
,
581 static void cpsw_fail(struct cpsw_common
*cpsw
)
583 struct net_device
*ndev
;
586 for (i
= 0; i
< cpsw
->data
.slaves
; i
++) {
587 ndev
= cpsw
->slaves
[i
].ndev
;
593 int cpsw_set_channels_common(struct net_device
*ndev
,
594 struct ethtool_channels
*chs
,
595 cpdma_handler_fn rx_handler
)
597 struct cpsw_priv
*priv
= netdev_priv(ndev
);
598 struct cpsw_common
*cpsw
= priv
->cpsw
;
599 struct net_device
*sl_ndev
;
600 int i
, new_pools
, ret
;
602 ret
= cpsw_check_ch_settings(cpsw
, chs
);
606 cpsw_suspend_data_pass(ndev
);
608 new_pools
= (chs
->rx_count
!= cpsw
->rx_ch_num
) && cpsw
->usage_count
;
610 ret
= cpsw_update_channels_res(priv
, chs
->rx_count
, 1, rx_handler
);
614 ret
= cpsw_update_channels_res(priv
, chs
->tx_count
, 0, rx_handler
);
618 for (i
= 0; i
< cpsw
->data
.slaves
; i
++) {
619 sl_ndev
= cpsw
->slaves
[i
].ndev
;
620 if (!(sl_ndev
&& netif_running(sl_ndev
)))
623 /* Inform stack about new count of queues */
624 ret
= netif_set_real_num_tx_queues(sl_ndev
, cpsw
->tx_ch_num
);
626 dev_err(priv
->dev
, "cannot set real number of tx queues\n");
630 ret
= netif_set_real_num_rx_queues(sl_ndev
, cpsw
->rx_ch_num
);
632 dev_err(priv
->dev
, "cannot set real number of rx queues\n");
637 cpsw_split_res(cpsw
);
640 cpsw_destroy_xdp_rxqs(cpsw
);
641 ret
= cpsw_create_xdp_rxqs(cpsw
);
646 ret
= cpsw_resume_data_pass(ndev
);
650 dev_err(priv
->dev
, "cannot update channels number, closing device\n");
655 void cpsw_get_ringparam(struct net_device
*ndev
,
656 struct ethtool_ringparam
*ering
)
658 struct cpsw_priv
*priv
= netdev_priv(ndev
);
659 struct cpsw_common
*cpsw
= priv
->cpsw
;
662 ering
->tx_max_pending
= cpsw
->descs_pool_size
- CPSW_MAX_QUEUES
;
663 ering
->tx_pending
= cpdma_get_num_tx_descs(cpsw
->dma
);
664 ering
->rx_max_pending
= cpsw
->descs_pool_size
- CPSW_MAX_QUEUES
;
665 ering
->rx_pending
= cpdma_get_num_rx_descs(cpsw
->dma
);
668 int cpsw_set_ringparam(struct net_device
*ndev
,
669 struct ethtool_ringparam
*ering
)
671 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
674 /* ignore ering->tx_pending - only rx_pending adjustment is supported */
676 if (ering
->rx_mini_pending
|| ering
->rx_jumbo_pending
||
677 ering
->rx_pending
< CPSW_MAX_QUEUES
||
678 ering
->rx_pending
> (cpsw
->descs_pool_size
- CPSW_MAX_QUEUES
))
681 descs_num
= cpdma_get_num_rx_descs(cpsw
->dma
);
682 if (ering
->rx_pending
== descs_num
)
685 cpsw_suspend_data_pass(ndev
);
687 ret
= cpdma_set_num_rx_descs(cpsw
->dma
, ering
->rx_pending
);
689 if (cpsw_resume_data_pass(ndev
))
695 if (cpsw
->usage_count
) {
696 cpsw_destroy_xdp_rxqs(cpsw
);
697 ret
= cpsw_create_xdp_rxqs(cpsw
);
702 ret
= cpsw_resume_data_pass(ndev
);
706 cpdma_set_num_rx_descs(cpsw
->dma
, descs_num
);
707 dev_err(cpsw
->dev
, "cannot set ring params, closing device\n");
712 #if IS_ENABLED(CONFIG_TI_CPTS)
713 int cpsw_get_ts_info(struct net_device
*ndev
, struct ethtool_ts_info
*info
)
715 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
717 info
->so_timestamping
=
718 SOF_TIMESTAMPING_TX_HARDWARE
|
719 SOF_TIMESTAMPING_TX_SOFTWARE
|
720 SOF_TIMESTAMPING_RX_HARDWARE
|
721 SOF_TIMESTAMPING_RX_SOFTWARE
|
722 SOF_TIMESTAMPING_SOFTWARE
|
723 SOF_TIMESTAMPING_RAW_HARDWARE
;
724 info
->phc_index
= cpsw
->cpts
->phc_index
;
726 (1 << HWTSTAMP_TX_OFF
) |
727 (1 << HWTSTAMP_TX_ON
);
729 (1 << HWTSTAMP_FILTER_NONE
) |
730 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
731 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT
);
735 int cpsw_get_ts_info(struct net_device
*ndev
, struct ethtool_ts_info
*info
)
737 info
->so_timestamping
=
738 SOF_TIMESTAMPING_TX_SOFTWARE
|
739 SOF_TIMESTAMPING_RX_SOFTWARE
|
740 SOF_TIMESTAMPING_SOFTWARE
;
741 info
->phc_index
= -1;
743 info
->rx_filters
= 0;