1 // SPDX-License-Identifier: GPL-2.0
3 * Texas Instruments Ethernet Switch Driver ethtool intf
5 * Copyright (C) 2019 Texas Instruments
8 #include <linux/if_ether.h>
9 #include <linux/if_vlan.h>
10 #include <linux/kmemleak.h>
11 #include <linux/module.h>
12 #include <linux/netdevice.h>
13 #include <linux/net_tstamp.h>
14 #include <linux/phy.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/skbuff.h>
21 #include "cpsw_priv.h"
22 #include "davinci_cpdma.h"
24 struct cpsw_hw_stats
{
26 u32 rxbroadcastframes
;
27 u32 rxmulticastframes
;
30 u32 rxaligncodeerrors
;
31 u32 rxoversizedframes
;
33 u32 rxundersizedframes
;
38 u32 txbroadcastframes
;
39 u32 txmulticastframes
;
42 u32 txcollisionframes
;
43 u32 txsinglecollframes
;
45 u32 txexcessivecollisions
;
48 u32 txcarriersenseerrors
;
51 u32 octetframes65t127
;
52 u32 octetframes128t255
;
53 u32 octetframes256t511
;
54 u32 octetframes512t1023
;
55 u32 octetframes1024tup
;
63 char stat_string
[ETH_GSTRING_LEN
];
75 #define CPSW_STAT(m) CPSW_STATS, \
76 sizeof_field(struct cpsw_hw_stats, m), \
77 offsetof(struct cpsw_hw_stats, m)
78 #define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
79 sizeof_field(struct cpdma_chan_stats, m), \
80 offsetof(struct cpdma_chan_stats, m)
81 #define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
82 sizeof_field(struct cpdma_chan_stats, m), \
83 offsetof(struct cpdma_chan_stats, m)
85 static const struct cpsw_stats cpsw_gstrings_stats
[] = {
86 { "Good Rx Frames", CPSW_STAT(rxgoodframes
) },
87 { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes
) },
88 { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes
) },
89 { "Pause Rx Frames", CPSW_STAT(rxpauseframes
) },
90 { "Rx CRC Errors", CPSW_STAT(rxcrcerrors
) },
91 { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors
) },
92 { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes
) },
93 { "Rx Jabbers", CPSW_STAT(rxjabberframes
) },
94 { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes
) },
95 { "Rx Fragments", CPSW_STAT(rxfragments
) },
96 { "Rx Octets", CPSW_STAT(rxoctets
) },
97 { "Good Tx Frames", CPSW_STAT(txgoodframes
) },
98 { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes
) },
99 { "Multicast Tx Frames", CPSW_STAT(txmulticastframes
) },
100 { "Pause Tx Frames", CPSW_STAT(txpauseframes
) },
101 { "Deferred Tx Frames", CPSW_STAT(txdeferredframes
) },
102 { "Collisions", CPSW_STAT(txcollisionframes
) },
103 { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes
) },
104 { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes
) },
105 { "Excessive Collisions", CPSW_STAT(txexcessivecollisions
) },
106 { "Late Collisions", CPSW_STAT(txlatecollisions
) },
107 { "Tx Underrun", CPSW_STAT(txunderrun
) },
108 { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors
) },
109 { "Tx Octets", CPSW_STAT(txoctets
) },
110 { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64
) },
111 { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127
) },
112 { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255
) },
113 { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511
) },
114 { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023
) },
115 { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup
) },
116 { "Net Octets", CPSW_STAT(netoctets
) },
117 { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns
) },
118 { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns
) },
119 { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns
) },
122 static const struct cpsw_stats cpsw_gstrings_ch_stats
[] = {
123 { "head_enqueue", CPDMA_RX_STAT(head_enqueue
) },
124 { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue
) },
125 { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue
) },
126 { "misqueued", CPDMA_RX_STAT(misqueued
) },
127 { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail
) },
128 { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail
) },
129 { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff
) },
130 { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff
) },
131 { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue
) },
132 { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue
) },
133 { "good_dequeue", CPDMA_RX_STAT(good_dequeue
) },
134 { "requeue", CPDMA_RX_STAT(requeue
) },
135 { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue
) },
138 #define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats)
139 #define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats)
141 u32
cpsw_get_msglevel(struct net_device
*ndev
)
143 struct cpsw_priv
*priv
= netdev_priv(ndev
);
145 return priv
->msg_enable
;
148 void cpsw_set_msglevel(struct net_device
*ndev
, u32 value
)
150 struct cpsw_priv
*priv
= netdev_priv(ndev
);
152 priv
->msg_enable
= value
;
155 int cpsw_get_coalesce(struct net_device
*ndev
, struct ethtool_coalesce
*coal
,
156 struct kernel_ethtool_coalesce
*kernel_coal
,
157 struct netlink_ext_ack
*extack
)
159 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
161 coal
->rx_coalesce_usecs
= cpsw
->coal_intvl
;
165 int cpsw_set_coalesce(struct net_device
*ndev
, struct ethtool_coalesce
*coal
,
166 struct kernel_ethtool_coalesce
*kernel_coal
,
167 struct netlink_ext_ack
*extack
)
169 struct cpsw_priv
*priv
= netdev_priv(ndev
);
171 u32 num_interrupts
= 0;
175 struct cpsw_common
*cpsw
= priv
->cpsw
;
177 coal_intvl
= coal
->rx_coalesce_usecs
;
179 int_ctrl
= readl(&cpsw
->wr_regs
->int_control
);
180 prescale
= cpsw
->bus_freq_mhz
* 4;
182 if (!coal
->rx_coalesce_usecs
) {
183 int_ctrl
&= ~(CPSW_INTPRESCALE_MASK
| CPSW_INTPACEEN
);
187 if (coal_intvl
< CPSW_CMINTMIN_INTVL
)
188 coal_intvl
= CPSW_CMINTMIN_INTVL
;
190 if (coal_intvl
> CPSW_CMINTMAX_INTVL
) {
191 /* Interrupt pacer works with 4us Pulse, we can
192 * throttle further by dilating the 4us pulse.
194 addnl_dvdr
= CPSW_INTPRESCALE_MASK
/ prescale
;
196 if (addnl_dvdr
> 1) {
197 prescale
*= addnl_dvdr
;
198 if (coal_intvl
> (CPSW_CMINTMAX_INTVL
* addnl_dvdr
))
199 coal_intvl
= (CPSW_CMINTMAX_INTVL
203 coal_intvl
= CPSW_CMINTMAX_INTVL
;
207 num_interrupts
= (1000 * addnl_dvdr
) / coal_intvl
;
208 writel(num_interrupts
, &cpsw
->wr_regs
->rx_imax
);
209 writel(num_interrupts
, &cpsw
->wr_regs
->tx_imax
);
211 int_ctrl
|= CPSW_INTPACEEN
;
212 int_ctrl
&= (~CPSW_INTPRESCALE_MASK
);
213 int_ctrl
|= (prescale
& CPSW_INTPRESCALE_MASK
);
216 writel(int_ctrl
, &cpsw
->wr_regs
->int_control
);
218 cpsw_notice(priv
, timer
, "Set coalesce to %d usecs.\n", coal_intvl
);
219 cpsw
->coal_intvl
= coal_intvl
;
224 int cpsw_get_sset_count(struct net_device
*ndev
, int sset
)
226 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
230 return (CPSW_STATS_COMMON_LEN
+
231 (cpsw
->rx_ch_num
+ cpsw
->tx_ch_num
) *
238 static void cpsw_add_ch_strings(u8
**p
, int ch_num
, int rx_dir
)
244 ch_stats_len
= CPSW_STATS_CH_LEN
* ch_num
;
245 for (i
= 0; i
< ch_stats_len
; i
++) {
246 line
= i
% CPSW_STATS_CH_LEN
;
247 snprintf(*p
, ETH_GSTRING_LEN
,
248 "%s DMA chan %ld: %s", rx_dir
? "Rx" : "Tx",
249 (long)(i
/ CPSW_STATS_CH_LEN
),
250 cpsw_gstrings_ch_stats
[line
].stat_string
);
251 *p
+= ETH_GSTRING_LEN
;
255 void cpsw_get_strings(struct net_device
*ndev
, u32 stringset
, u8
*data
)
257 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
263 for (i
= 0; i
< CPSW_STATS_COMMON_LEN
; i
++) {
264 memcpy(p
, cpsw_gstrings_stats
[i
].stat_string
,
266 p
+= ETH_GSTRING_LEN
;
269 cpsw_add_ch_strings(&p
, cpsw
->rx_ch_num
, 1);
270 cpsw_add_ch_strings(&p
, cpsw
->tx_ch_num
, 0);
275 void cpsw_get_ethtool_stats(struct net_device
*ndev
,
276 struct ethtool_stats
*stats
, u64
*data
)
279 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
280 struct cpdma_chan_stats ch_stats
;
283 /* Collect Davinci CPDMA stats for Rx and Tx Channel */
284 for (l
= 0; l
< CPSW_STATS_COMMON_LEN
; l
++)
285 data
[l
] = readl(cpsw
->hw_stats
+
286 cpsw_gstrings_stats
[l
].stat_offset
);
288 for (ch
= 0; ch
< cpsw
->rx_ch_num
; ch
++) {
289 cpdma_chan_get_stats(cpsw
->rxv
[ch
].ch
, &ch_stats
);
290 for (i
= 0; i
< CPSW_STATS_CH_LEN
; i
++, l
++) {
291 p
= (u8
*)&ch_stats
+
292 cpsw_gstrings_ch_stats
[i
].stat_offset
;
297 for (ch
= 0; ch
< cpsw
->tx_ch_num
; ch
++) {
298 cpdma_chan_get_stats(cpsw
->txv
[ch
].ch
, &ch_stats
);
299 for (i
= 0; i
< CPSW_STATS_CH_LEN
; i
++, l
++) {
300 p
= (u8
*)&ch_stats
+
301 cpsw_gstrings_ch_stats
[i
].stat_offset
;
307 void cpsw_get_pauseparam(struct net_device
*ndev
,
308 struct ethtool_pauseparam
*pause
)
310 struct cpsw_priv
*priv
= netdev_priv(ndev
);
312 pause
->autoneg
= AUTONEG_DISABLE
;
313 pause
->rx_pause
= priv
->rx_pause
? true : false;
314 pause
->tx_pause
= priv
->tx_pause
? true : false;
317 void cpsw_get_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
319 struct cpsw_priv
*priv
= netdev_priv(ndev
);
320 struct cpsw_common
*cpsw
= priv
->cpsw
;
321 int slave_no
= cpsw_slave_index(cpsw
, priv
);
326 if (cpsw
->slaves
[slave_no
].phy
)
327 phy_ethtool_get_wol(cpsw
->slaves
[slave_no
].phy
, wol
);
330 int cpsw_set_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
332 struct cpsw_priv
*priv
= netdev_priv(ndev
);
333 struct cpsw_common
*cpsw
= priv
->cpsw
;
334 int slave_no
= cpsw_slave_index(cpsw
, priv
);
336 if (cpsw
->slaves
[slave_no
].phy
)
337 return phy_ethtool_set_wol(cpsw
->slaves
[slave_no
].phy
, wol
);
342 int cpsw_get_regs_len(struct net_device
*ndev
)
344 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
346 return cpsw_ale_get_num_entries(cpsw
->ale
) *
347 ALE_ENTRY_WORDS
* sizeof(u32
);
350 void cpsw_get_regs(struct net_device
*ndev
, struct ethtool_regs
*regs
, void *p
)
353 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
355 /* update CPSW IP version */
356 regs
->version
= cpsw
->version
;
358 cpsw_ale_dump(cpsw
->ale
, reg
);
361 int cpsw_ethtool_op_begin(struct net_device
*ndev
)
363 struct cpsw_priv
*priv
= netdev_priv(ndev
);
364 struct cpsw_common
*cpsw
= priv
->cpsw
;
367 ret
= pm_runtime_resume_and_get(cpsw
->dev
);
369 cpsw_err(priv
, drv
, "ethtool begin failed %d\n", ret
);
374 void cpsw_ethtool_op_complete(struct net_device
*ndev
)
376 struct cpsw_priv
*priv
= netdev_priv(ndev
);
379 ret
= pm_runtime_put(priv
->cpsw
->dev
);
381 cpsw_err(priv
, drv
, "ethtool complete failed %d\n", ret
);
384 void cpsw_get_channels(struct net_device
*ndev
, struct ethtool_channels
*ch
)
386 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
388 ch
->max_rx
= cpsw
->quirk_irq
? 1 : CPSW_MAX_QUEUES
;
389 ch
->max_tx
= cpsw
->quirk_irq
? 1 : CPSW_MAX_QUEUES
;
390 ch
->max_combined
= 0;
393 ch
->rx_count
= cpsw
->rx_ch_num
;
394 ch
->tx_count
= cpsw
->tx_ch_num
;
395 ch
->combined_count
= 0;
398 int cpsw_get_link_ksettings(struct net_device
*ndev
,
399 struct ethtool_link_ksettings
*ecmd
)
401 struct cpsw_priv
*priv
= netdev_priv(ndev
);
402 struct cpsw_common
*cpsw
= priv
->cpsw
;
403 int slave_no
= cpsw_slave_index(cpsw
, priv
);
405 if (!cpsw
->slaves
[slave_no
].phy
)
408 phy_ethtool_ksettings_get(cpsw
->slaves
[slave_no
].phy
, ecmd
);
412 int cpsw_set_link_ksettings(struct net_device
*ndev
,
413 const struct ethtool_link_ksettings
*ecmd
)
415 struct cpsw_priv
*priv
= netdev_priv(ndev
);
416 struct cpsw_common
*cpsw
= priv
->cpsw
;
417 int slave_no
= cpsw_slave_index(cpsw
, priv
);
419 if (!cpsw
->slaves
[slave_no
].phy
)
422 return phy_ethtool_ksettings_set(cpsw
->slaves
[slave_no
].phy
, ecmd
);
425 int cpsw_get_eee(struct net_device
*ndev
, struct ethtool_keee
*edata
)
427 struct cpsw_priv
*priv
= netdev_priv(ndev
);
428 struct cpsw_common
*cpsw
= priv
->cpsw
;
429 int slave_no
= cpsw_slave_index(cpsw
, priv
);
431 if (cpsw
->slaves
[slave_no
].phy
)
432 return phy_ethtool_get_eee(cpsw
->slaves
[slave_no
].phy
, edata
);
437 int cpsw_set_eee(struct net_device
*ndev
, struct ethtool_keee
*edata
)
439 struct cpsw_priv
*priv
= netdev_priv(ndev
);
440 struct cpsw_common
*cpsw
= priv
->cpsw
;
441 int slave_no
= cpsw_slave_index(cpsw
, priv
);
443 if (cpsw
->slaves
[slave_no
].phy
)
444 return phy_ethtool_set_eee(cpsw
->slaves
[slave_no
].phy
, edata
);
449 int cpsw_nway_reset(struct net_device
*ndev
)
451 struct cpsw_priv
*priv
= netdev_priv(ndev
);
452 struct cpsw_common
*cpsw
= priv
->cpsw
;
453 int slave_no
= cpsw_slave_index(cpsw
, priv
);
455 if (cpsw
->slaves
[slave_no
].phy
)
456 return genphy_restart_aneg(cpsw
->slaves
[slave_no
].phy
);
461 static void cpsw_suspend_data_pass(struct net_device
*ndev
)
463 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
466 /* Disable NAPI scheduling */
467 cpsw_intr_disable(cpsw
);
469 /* Stop all transmit queues for every network device.
471 for (i
= 0; i
< cpsw
->data
.slaves
; i
++) {
472 ndev
= cpsw
->slaves
[i
].ndev
;
473 if (!(ndev
&& netif_running(ndev
)))
476 netif_tx_stop_all_queues(ndev
);
478 /* Barrier, so that stop_queue visible to other cpus */
479 smp_mb__after_atomic();
482 /* Handle rest of tx packets and stop cpdma channels */
483 cpdma_ctlr_stop(cpsw
->dma
);
486 static int cpsw_resume_data_pass(struct net_device
*ndev
)
488 struct cpsw_priv
*priv
= netdev_priv(ndev
);
489 struct cpsw_common
*cpsw
= priv
->cpsw
;
492 /* After this receive is started */
493 if (cpsw
->usage_count
) {
494 ret
= cpsw_fill_rx_channels(priv
);
498 cpdma_ctlr_start(cpsw
->dma
);
499 cpsw_intr_enable(cpsw
);
502 /* Resume transmit for every affected interface */
503 for (i
= 0; i
< cpsw
->data
.slaves
; i
++) {
504 ndev
= cpsw
->slaves
[i
].ndev
;
505 if (ndev
&& netif_running(ndev
))
506 netif_tx_start_all_queues(ndev
);
512 static int cpsw_check_ch_settings(struct cpsw_common
*cpsw
,
513 struct ethtool_channels
*ch
)
515 if (cpsw
->quirk_irq
) {
516 dev_err(cpsw
->dev
, "Maximum one tx/rx queue is allowed");
520 if (ch
->combined_count
)
523 /* verify we have at least one channel in each direction */
524 if (!ch
->rx_count
|| !ch
->tx_count
)
527 if (ch
->rx_count
> cpsw
->data
.channels
||
528 ch
->tx_count
> cpsw
->data
.channels
)
534 static int cpsw_update_channels_res(struct cpsw_priv
*priv
, int ch_num
, int rx
,
535 cpdma_handler_fn rx_handler
)
537 struct cpsw_common
*cpsw
= priv
->cpsw
;
538 void (*handler
)(void *, int, int);
539 struct netdev_queue
*queue
;
540 struct cpsw_vector
*vec
;
544 ch
= &cpsw
->rx_ch_num
;
546 handler
= rx_handler
;
548 ch
= &cpsw
->tx_ch_num
;
550 handler
= cpsw_tx_handler
;
553 while (*ch
< ch_num
) {
554 vch
= rx
? *ch
: 7 - *ch
;
555 vec
[*ch
].ch
= cpdma_chan_create(cpsw
->dma
, vch
, handler
, rx
);
556 queue
= netdev_get_tx_queue(priv
->ndev
, *ch
);
557 queue
->tx_maxrate
= 0;
559 if (IS_ERR(vec
[*ch
].ch
))
560 return PTR_ERR(vec
[*ch
].ch
);
565 cpsw_info(priv
, ifup
, "created new %d %s channel\n", *ch
,
570 while (*ch
> ch_num
) {
573 ret
= cpdma_chan_destroy(vec
[*ch
].ch
);
577 cpsw_info(priv
, ifup
, "destroyed %d %s channel\n", *ch
,
584 static void cpsw_fail(struct cpsw_common
*cpsw
)
586 struct net_device
*ndev
;
589 for (i
= 0; i
< cpsw
->data
.slaves
; i
++) {
590 ndev
= cpsw
->slaves
[i
].ndev
;
596 int cpsw_set_channels_common(struct net_device
*ndev
,
597 struct ethtool_channels
*chs
,
598 cpdma_handler_fn rx_handler
)
600 struct cpsw_priv
*priv
= netdev_priv(ndev
);
601 struct cpsw_common
*cpsw
= priv
->cpsw
;
602 struct net_device
*sl_ndev
;
603 int i
, new_pools
, ret
;
605 ret
= cpsw_check_ch_settings(cpsw
, chs
);
609 cpsw_suspend_data_pass(ndev
);
611 new_pools
= (chs
->rx_count
!= cpsw
->rx_ch_num
) && cpsw
->usage_count
;
613 ret
= cpsw_update_channels_res(priv
, chs
->rx_count
, 1, rx_handler
);
617 ret
= cpsw_update_channels_res(priv
, chs
->tx_count
, 0, rx_handler
);
621 for (i
= 0; i
< cpsw
->data
.slaves
; i
++) {
622 sl_ndev
= cpsw
->slaves
[i
].ndev
;
623 if (!(sl_ndev
&& netif_running(sl_ndev
)))
626 /* Inform stack about new count of queues */
627 ret
= netif_set_real_num_tx_queues(sl_ndev
, cpsw
->tx_ch_num
);
629 dev_err(priv
->dev
, "cannot set real number of tx queues\n");
633 ret
= netif_set_real_num_rx_queues(sl_ndev
, cpsw
->rx_ch_num
);
635 dev_err(priv
->dev
, "cannot set real number of rx queues\n");
640 cpsw_split_res(cpsw
);
643 cpsw_destroy_xdp_rxqs(cpsw
);
644 ret
= cpsw_create_xdp_rxqs(cpsw
);
649 ret
= cpsw_resume_data_pass(ndev
);
653 dev_err(priv
->dev
, "cannot update channels number, closing device\n");
658 void cpsw_get_ringparam(struct net_device
*ndev
,
659 struct ethtool_ringparam
*ering
,
660 struct kernel_ethtool_ringparam
*kernel_ering
,
661 struct netlink_ext_ack
*extack
)
663 struct cpsw_priv
*priv
= netdev_priv(ndev
);
664 struct cpsw_common
*cpsw
= priv
->cpsw
;
667 ering
->tx_max_pending
= cpsw
->descs_pool_size
- CPSW_MAX_QUEUES
;
668 ering
->tx_pending
= cpdma_get_num_tx_descs(cpsw
->dma
);
669 ering
->rx_max_pending
= cpsw
->descs_pool_size
- CPSW_MAX_QUEUES
;
670 ering
->rx_pending
= cpdma_get_num_rx_descs(cpsw
->dma
);
673 int cpsw_set_ringparam(struct net_device
*ndev
,
674 struct ethtool_ringparam
*ering
,
675 struct kernel_ethtool_ringparam
*kernel_ering
,
676 struct netlink_ext_ack
*extack
)
678 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
681 /* ignore ering->tx_pending - only rx_pending adjustment is supported */
683 if (ering
->rx_mini_pending
|| ering
->rx_jumbo_pending
||
684 ering
->rx_pending
< CPSW_MAX_QUEUES
||
685 ering
->rx_pending
> (cpsw
->descs_pool_size
- CPSW_MAX_QUEUES
))
688 descs_num
= cpdma_get_num_rx_descs(cpsw
->dma
);
689 if (ering
->rx_pending
== descs_num
)
692 cpsw_suspend_data_pass(ndev
);
694 ret
= cpdma_set_num_rx_descs(cpsw
->dma
, ering
->rx_pending
);
696 if (cpsw_resume_data_pass(ndev
))
702 if (cpsw
->usage_count
) {
703 cpsw_destroy_xdp_rxqs(cpsw
);
704 ret
= cpsw_create_xdp_rxqs(cpsw
);
709 ret
= cpsw_resume_data_pass(ndev
);
713 cpdma_set_num_rx_descs(cpsw
->dma
, descs_num
);
714 dev_err(cpsw
->dev
, "cannot set ring params, closing device\n");
719 #if IS_ENABLED(CONFIG_TI_CPTS)
720 int cpsw_get_ts_info(struct net_device
*ndev
, struct kernel_ethtool_ts_info
*info
)
722 struct cpsw_common
*cpsw
= ndev_to_cpsw(ndev
);
724 info
->so_timestamping
=
725 SOF_TIMESTAMPING_TX_HARDWARE
|
726 SOF_TIMESTAMPING_TX_SOFTWARE
|
727 SOF_TIMESTAMPING_RX_HARDWARE
|
728 SOF_TIMESTAMPING_RAW_HARDWARE
;
729 info
->phc_index
= cpsw
->cpts
->phc_index
;
731 (1 << HWTSTAMP_TX_OFF
) |
732 (1 << HWTSTAMP_TX_ON
);
734 (1 << HWTSTAMP_FILTER_NONE
) |
735 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT
);
739 int cpsw_get_ts_info(struct net_device
*ndev
, struct kernel_ethtool_ts_info
*info
)
741 info
->so_timestamping
=
742 SOF_TIMESTAMPING_TX_SOFTWARE
;
744 info
->rx_filters
= 0;