1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
11 Documentation available at:
12 http://www.stlinux.com
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/prefetch.h>
32 #include <linux/pinctrl/consumer.h>
33 #ifdef CONFIG_DEBUG_FS
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #endif /* CONFIG_DEBUG_FS */
37 #include <linux/net_tstamp.h>
38 #include <linux/phylink.h>
39 #include <linux/udp.h>
40 #include <net/pkt_cls.h>
41 #include "stmmac_ptp.h"
43 #include <linux/reset.h>
44 #include <linux/of_mdio.h>
45 #include "dwmac1000.h"
49 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
50 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
52 /* Module parameters */
54 static int watchdog
= TX_TIMEO
;
55 module_param(watchdog
, int, 0644);
56 MODULE_PARM_DESC(watchdog
, "Transmit timeout in milliseconds (default 5s)");
58 static int debug
= -1;
59 module_param(debug
, int, 0644);
60 MODULE_PARM_DESC(debug
, "Message Level (-1: default, 0: no output, 16: all)");
62 static int phyaddr
= -1;
63 module_param(phyaddr
, int, 0444);
64 MODULE_PARM_DESC(phyaddr
, "Physical device address");
66 #define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
67 #define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
69 static int flow_ctrl
= FLOW_AUTO
;
70 module_param(flow_ctrl
, int, 0644);
71 MODULE_PARM_DESC(flow_ctrl
, "Flow control ability [on/off]");
73 static int pause
= PAUSE_TIME
;
74 module_param(pause
, int, 0644);
75 MODULE_PARM_DESC(pause
, "Flow Control Pause Time");
78 static int tc
= TC_DEFAULT
;
79 module_param(tc
, int, 0644);
80 MODULE_PARM_DESC(tc
, "DMA threshold control value");
82 #define DEFAULT_BUFSIZE 1536
83 static int buf_sz
= DEFAULT_BUFSIZE
;
84 module_param(buf_sz
, int, 0644);
85 MODULE_PARM_DESC(buf_sz
, "DMA buffer size");
87 #define STMMAC_RX_COPYBREAK 256
89 static const u32 default_msg_level
= (NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
90 NETIF_MSG_LINK
| NETIF_MSG_IFUP
|
91 NETIF_MSG_IFDOWN
| NETIF_MSG_TIMER
);
93 #define STMMAC_DEFAULT_LPI_TIMER 1000
94 static int eee_timer
= STMMAC_DEFAULT_LPI_TIMER
;
95 module_param(eee_timer
, int, 0644);
96 MODULE_PARM_DESC(eee_timer
, "LPI tx expiration time in msec");
97 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
99 /* By default the driver will use the ring mode to manage tx and rx descriptors,
100 * but allow user to force to use the chain instead of the ring
102 static unsigned int chain_mode
;
103 module_param(chain_mode
, int, 0444);
104 MODULE_PARM_DESC(chain_mode
, "To use chain instead of ring mode");
106 static irqreturn_t
stmmac_interrupt(int irq
, void *dev_id
);
108 #ifdef CONFIG_DEBUG_FS
109 static const struct net_device_ops stmmac_netdev_ops
;
110 static void stmmac_init_fs(struct net_device
*dev
);
111 static void stmmac_exit_fs(struct net_device
*dev
);
114 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
117 * stmmac_verify_args - verify the driver parameters.
118 * Description: it checks the driver parameters and set a default in case of
121 static void stmmac_verify_args(void)
123 if (unlikely(watchdog
< 0))
125 if (unlikely((buf_sz
< DEFAULT_BUFSIZE
) || (buf_sz
> BUF_SIZE_16KiB
)))
126 buf_sz
= DEFAULT_BUFSIZE
;
127 if (unlikely(flow_ctrl
> 1))
128 flow_ctrl
= FLOW_AUTO
;
129 else if (likely(flow_ctrl
< 0))
130 flow_ctrl
= FLOW_OFF
;
131 if (unlikely((pause
< 0) || (pause
> 0xffff)))
134 eee_timer
= STMMAC_DEFAULT_LPI_TIMER
;
138 * stmmac_disable_all_queues - Disable all queues
139 * @priv: driver private structure
141 static void stmmac_disable_all_queues(struct stmmac_priv
*priv
)
143 u32 rx_queues_cnt
= priv
->plat
->rx_queues_to_use
;
144 u32 tx_queues_cnt
= priv
->plat
->tx_queues_to_use
;
145 u32 maxq
= max(rx_queues_cnt
, tx_queues_cnt
);
148 for (queue
= 0; queue
< maxq
; queue
++) {
149 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
151 if (queue
< rx_queues_cnt
)
152 napi_disable(&ch
->rx_napi
);
153 if (queue
< tx_queues_cnt
)
154 napi_disable(&ch
->tx_napi
);
159 * stmmac_enable_all_queues - Enable all queues
160 * @priv: driver private structure
162 static void stmmac_enable_all_queues(struct stmmac_priv
*priv
)
164 u32 rx_queues_cnt
= priv
->plat
->rx_queues_to_use
;
165 u32 tx_queues_cnt
= priv
->plat
->tx_queues_to_use
;
166 u32 maxq
= max(rx_queues_cnt
, tx_queues_cnt
);
169 for (queue
= 0; queue
< maxq
; queue
++) {
170 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
172 if (queue
< rx_queues_cnt
)
173 napi_enable(&ch
->rx_napi
);
174 if (queue
< tx_queues_cnt
)
175 napi_enable(&ch
->tx_napi
);
179 static void stmmac_service_event_schedule(struct stmmac_priv
*priv
)
181 if (!test_bit(STMMAC_DOWN
, &priv
->state
) &&
182 !test_and_set_bit(STMMAC_SERVICE_SCHED
, &priv
->state
))
183 queue_work(priv
->wq
, &priv
->service_task
);
186 static void stmmac_global_err(struct stmmac_priv
*priv
)
188 netif_carrier_off(priv
->dev
);
189 set_bit(STMMAC_RESET_REQUESTED
, &priv
->state
);
190 stmmac_service_event_schedule(priv
);
194 * stmmac_clk_csr_set - dynamically set the MDC clock
195 * @priv: driver private structure
196 * Description: this is to dynamically set the MDC clock according to the csr
199 * If a specific clk_csr value is passed from the platform
200 * this means that the CSR Clock Range selection cannot be
201 * changed at run-time and it is fixed (as reported in the driver
202 * documentation). Viceversa the driver will try to set the MDC
203 * clock dynamically according to the actual clock input.
205 static void stmmac_clk_csr_set(struct stmmac_priv
*priv
)
209 clk_rate
= clk_get_rate(priv
->plat
->stmmac_clk
);
211 /* Platform provided default clk_csr would be assumed valid
212 * for all other cases except for the below mentioned ones.
213 * For values higher than the IEEE 802.3 specified frequency
214 * we can not estimate the proper divider as it is not known
215 * the frequency of clk_csr_i. So we do not change the default
218 if (!(priv
->clk_csr
& MAC_CSR_H_FRQ_MASK
)) {
219 if (clk_rate
< CSR_F_35M
)
220 priv
->clk_csr
= STMMAC_CSR_20_35M
;
221 else if ((clk_rate
>= CSR_F_35M
) && (clk_rate
< CSR_F_60M
))
222 priv
->clk_csr
= STMMAC_CSR_35_60M
;
223 else if ((clk_rate
>= CSR_F_60M
) && (clk_rate
< CSR_F_100M
))
224 priv
->clk_csr
= STMMAC_CSR_60_100M
;
225 else if ((clk_rate
>= CSR_F_100M
) && (clk_rate
< CSR_F_150M
))
226 priv
->clk_csr
= STMMAC_CSR_100_150M
;
227 else if ((clk_rate
>= CSR_F_150M
) && (clk_rate
< CSR_F_250M
))
228 priv
->clk_csr
= STMMAC_CSR_150_250M
;
229 else if ((clk_rate
>= CSR_F_250M
) && (clk_rate
< CSR_F_300M
))
230 priv
->clk_csr
= STMMAC_CSR_250_300M
;
233 if (priv
->plat
->has_sun8i
) {
234 if (clk_rate
> 160000000)
235 priv
->clk_csr
= 0x03;
236 else if (clk_rate
> 80000000)
237 priv
->clk_csr
= 0x02;
238 else if (clk_rate
> 40000000)
239 priv
->clk_csr
= 0x01;
244 if (priv
->plat
->has_xgmac
) {
245 if (clk_rate
> 400000000)
247 else if (clk_rate
> 350000000)
249 else if (clk_rate
> 300000000)
251 else if (clk_rate
> 250000000)
253 else if (clk_rate
> 150000000)
260 static void print_pkt(unsigned char *buf
, int len
)
262 pr_debug("len = %d byte, buf addr: 0x%p\n", len
, buf
);
263 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET
, buf
, len
);
266 static inline u32
stmmac_tx_avail(struct stmmac_priv
*priv
, u32 queue
)
268 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
271 if (tx_q
->dirty_tx
> tx_q
->cur_tx
)
272 avail
= tx_q
->dirty_tx
- tx_q
->cur_tx
- 1;
274 avail
= priv
->dma_tx_size
- tx_q
->cur_tx
+ tx_q
->dirty_tx
- 1;
280 * stmmac_rx_dirty - Get RX queue dirty
281 * @priv: driver private structure
282 * @queue: RX queue index
284 static inline u32
stmmac_rx_dirty(struct stmmac_priv
*priv
, u32 queue
)
286 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
289 if (rx_q
->dirty_rx
<= rx_q
->cur_rx
)
290 dirty
= rx_q
->cur_rx
- rx_q
->dirty_rx
;
292 dirty
= priv
->dma_rx_size
- rx_q
->dirty_rx
+ rx_q
->cur_rx
;
297 static void stmmac_lpi_entry_timer_config(struct stmmac_priv
*priv
, bool en
)
301 /* Clear/set the SW EEE timer flag based on LPI ET enablement */
302 priv
->eee_sw_timer_en
= en
? 0 : 1;
303 tx_lpi_timer
= en
? priv
->tx_lpi_timer
: 0;
304 stmmac_set_eee_lpi_timer(priv
, priv
->hw
, tx_lpi_timer
);
308 * stmmac_enable_eee_mode - check and enter in LPI mode
309 * @priv: driver private structure
310 * Description: this function is to verify and enter in LPI mode in case of
313 static void stmmac_enable_eee_mode(struct stmmac_priv
*priv
)
315 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
318 /* check if all TX queues have the work finished */
319 for (queue
= 0; queue
< tx_cnt
; queue
++) {
320 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
322 if (tx_q
->dirty_tx
!= tx_q
->cur_tx
)
323 return; /* still unfinished work */
326 /* Check and enter in LPI mode */
327 if (!priv
->tx_path_in_lpi_mode
)
328 stmmac_set_eee_mode(priv
, priv
->hw
,
329 priv
->plat
->en_tx_lpi_clockgating
);
333 * stmmac_disable_eee_mode - disable and exit from LPI mode
334 * @priv: driver private structure
335 * Description: this function is to exit and disable EEE in case of
336 * LPI state is true. This is called by the xmit.
338 void stmmac_disable_eee_mode(struct stmmac_priv
*priv
)
340 if (!priv
->eee_sw_timer_en
) {
341 stmmac_lpi_entry_timer_config(priv
, 0);
345 stmmac_reset_eee_mode(priv
, priv
->hw
);
346 del_timer_sync(&priv
->eee_ctrl_timer
);
347 priv
->tx_path_in_lpi_mode
= false;
351 * stmmac_eee_ctrl_timer - EEE TX SW timer.
352 * @t: timer_list struct containing private info
354 * if there is no data transfer and if we are not in LPI state,
355 * then MAC Transmitter can be moved to LPI state.
357 static void stmmac_eee_ctrl_timer(struct timer_list
*t
)
359 struct stmmac_priv
*priv
= from_timer(priv
, t
, eee_ctrl_timer
);
361 stmmac_enable_eee_mode(priv
);
362 mod_timer(&priv
->eee_ctrl_timer
, STMMAC_LPI_T(priv
->tx_lpi_timer
));
366 * stmmac_eee_init - init EEE
367 * @priv: driver private structure
369 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
370 * can also manage EEE, this function enable the LPI state and start related
373 bool stmmac_eee_init(struct stmmac_priv
*priv
)
375 int eee_tw_timer
= priv
->eee_tw_timer
;
377 /* Using PCS we cannot dial with the phy registers at this stage
378 * so we do not support extra feature like EEE.
380 if (priv
->hw
->pcs
== STMMAC_PCS_TBI
||
381 priv
->hw
->pcs
== STMMAC_PCS_RTBI
)
384 /* Check if MAC core supports the EEE feature. */
385 if (!priv
->dma_cap
.eee
)
388 mutex_lock(&priv
->lock
);
390 /* Check if it needs to be deactivated */
391 if (!priv
->eee_active
) {
392 if (priv
->eee_enabled
) {
393 netdev_dbg(priv
->dev
, "disable EEE\n");
394 stmmac_lpi_entry_timer_config(priv
, 0);
395 del_timer_sync(&priv
->eee_ctrl_timer
);
396 stmmac_set_eee_timer(priv
, priv
->hw
, 0, eee_tw_timer
);
398 mutex_unlock(&priv
->lock
);
402 if (priv
->eee_active
&& !priv
->eee_enabled
) {
403 timer_setup(&priv
->eee_ctrl_timer
, stmmac_eee_ctrl_timer
, 0);
404 stmmac_set_eee_timer(priv
, priv
->hw
, STMMAC_DEFAULT_LIT_LS
,
408 if (priv
->plat
->has_gmac4
&& priv
->tx_lpi_timer
<= STMMAC_ET_MAX
) {
409 del_timer_sync(&priv
->eee_ctrl_timer
);
410 priv
->tx_path_in_lpi_mode
= false;
411 stmmac_lpi_entry_timer_config(priv
, 1);
413 stmmac_lpi_entry_timer_config(priv
, 0);
414 mod_timer(&priv
->eee_ctrl_timer
,
415 STMMAC_LPI_T(priv
->tx_lpi_timer
));
418 mutex_unlock(&priv
->lock
);
419 netdev_dbg(priv
->dev
, "Energy-Efficient Ethernet initialized\n");
423 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
424 * @priv: driver private structure
425 * @p : descriptor pointer
426 * @skb : the socket buffer
428 * This function will read timestamp from the descriptor & pass it to stack.
429 * and also perform some sanity checks.
431 static void stmmac_get_tx_hwtstamp(struct stmmac_priv
*priv
,
432 struct dma_desc
*p
, struct sk_buff
*skb
)
434 struct skb_shared_hwtstamps shhwtstamp
;
438 if (!priv
->hwts_tx_en
)
441 /* exit if skb doesn't support hw tstamp */
442 if (likely(!skb
|| !(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
)))
445 /* check tx tstamp status */
446 if (stmmac_get_tx_timestamp_status(priv
, p
)) {
447 stmmac_get_timestamp(priv
, p
, priv
->adv_ts
, &ns
);
449 } else if (!stmmac_get_mac_tx_timestamp(priv
, priv
->hw
, &ns
)) {
454 memset(&shhwtstamp
, 0, sizeof(struct skb_shared_hwtstamps
));
455 shhwtstamp
.hwtstamp
= ns_to_ktime(ns
);
457 netdev_dbg(priv
->dev
, "get valid TX hw timestamp %llu\n", ns
);
458 /* pass tstamp to stack */
459 skb_tstamp_tx(skb
, &shhwtstamp
);
463 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
464 * @priv: driver private structure
465 * @p : descriptor pointer
466 * @np : next descriptor pointer
467 * @skb : the socket buffer
469 * This function will read received packet's timestamp from the descriptor
470 * and pass it to stack. It also perform some sanity checks.
472 static void stmmac_get_rx_hwtstamp(struct stmmac_priv
*priv
, struct dma_desc
*p
,
473 struct dma_desc
*np
, struct sk_buff
*skb
)
475 struct skb_shared_hwtstamps
*shhwtstamp
= NULL
;
476 struct dma_desc
*desc
= p
;
479 if (!priv
->hwts_rx_en
)
481 /* For GMAC4, the valid timestamp is from CTX next desc. */
482 if (priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
)
485 /* Check if timestamp is available */
486 if (stmmac_get_rx_timestamp_status(priv
, p
, np
, priv
->adv_ts
)) {
487 stmmac_get_timestamp(priv
, desc
, priv
->adv_ts
, &ns
);
488 netdev_dbg(priv
->dev
, "get valid RX hw timestamp %llu\n", ns
);
489 shhwtstamp
= skb_hwtstamps(skb
);
490 memset(shhwtstamp
, 0, sizeof(struct skb_shared_hwtstamps
));
491 shhwtstamp
->hwtstamp
= ns_to_ktime(ns
);
493 netdev_dbg(priv
->dev
, "cannot get RX hw timestamp\n");
498 * stmmac_hwtstamp_set - control hardware timestamping.
499 * @dev: device pointer.
500 * @ifr: An IOCTL specific structure, that can contain a pointer to
501 * a proprietary structure used to pass information to the driver.
503 * This function configures the MAC to enable/disable both outgoing(TX)
504 * and incoming(RX) packets time stamping based on user input.
506 * 0 on success and an appropriate -ve integer on failure.
508 static int stmmac_hwtstamp_set(struct net_device
*dev
, struct ifreq
*ifr
)
510 struct stmmac_priv
*priv
= netdev_priv(dev
);
511 struct hwtstamp_config config
;
512 struct timespec64 now
;
516 u32 ptp_over_ipv4_udp
= 0;
517 u32 ptp_over_ipv6_udp
= 0;
518 u32 ptp_over_ethernet
= 0;
519 u32 snap_type_sel
= 0;
520 u32 ts_master_en
= 0;
526 xmac
= priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
;
528 if (!(priv
->dma_cap
.time_stamp
|| priv
->adv_ts
)) {
529 netdev_alert(priv
->dev
, "No support for HW time stamping\n");
530 priv
->hwts_tx_en
= 0;
531 priv
->hwts_rx_en
= 0;
536 if (copy_from_user(&config
, ifr
->ifr_data
,
540 netdev_dbg(priv
->dev
, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
541 __func__
, config
.flags
, config
.tx_type
, config
.rx_filter
);
543 /* reserved for future extensions */
547 if (config
.tx_type
!= HWTSTAMP_TX_OFF
&&
548 config
.tx_type
!= HWTSTAMP_TX_ON
)
552 switch (config
.rx_filter
) {
553 case HWTSTAMP_FILTER_NONE
:
554 /* time stamp no incoming packet at all */
555 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
558 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
559 /* PTP v1, UDP, any kind of event packet */
560 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
561 /* 'xmac' hardware can support Sync, Pdelay_Req and
562 * Pdelay_resp by setting bit14 and bits17/16 to 01
563 * This leaves Delay_Req timestamps out.
564 * Enable all events *and* general purpose message
567 snap_type_sel
= PTP_TCR_SNAPTYPSEL_1
;
568 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
569 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
572 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
573 /* PTP v1, UDP, Sync packet */
574 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_SYNC
;
575 /* take time stamp for SYNC messages only */
576 ts_event_en
= PTP_TCR_TSEVNTENA
;
578 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
579 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
582 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
583 /* PTP v1, UDP, Delay_req packet */
584 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
;
585 /* take time stamp for Delay_Req messages only */
586 ts_master_en
= PTP_TCR_TSMSTRENA
;
587 ts_event_en
= PTP_TCR_TSEVNTENA
;
589 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
590 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
593 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
594 /* PTP v2, UDP, any kind of event packet */
595 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
596 ptp_v2
= PTP_TCR_TSVER2ENA
;
597 /* take time stamp for all event messages */
598 snap_type_sel
= PTP_TCR_SNAPTYPSEL_1
;
600 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
601 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
604 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
605 /* PTP v2, UDP, Sync packet */
606 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_SYNC
;
607 ptp_v2
= PTP_TCR_TSVER2ENA
;
608 /* take time stamp for SYNC messages only */
609 ts_event_en
= PTP_TCR_TSEVNTENA
;
611 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
612 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
615 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
616 /* PTP v2, UDP, Delay_req packet */
617 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
;
618 ptp_v2
= PTP_TCR_TSVER2ENA
;
619 /* take time stamp for Delay_Req messages only */
620 ts_master_en
= PTP_TCR_TSMSTRENA
;
621 ts_event_en
= PTP_TCR_TSEVNTENA
;
623 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
624 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
627 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
628 /* PTP v2/802.AS1 any layer, any kind of event packet */
629 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
630 ptp_v2
= PTP_TCR_TSVER2ENA
;
631 snap_type_sel
= PTP_TCR_SNAPTYPSEL_1
;
632 if (priv
->synopsys_id
!= DWMAC_CORE_5_10
)
633 ts_event_en
= PTP_TCR_TSEVNTENA
;
634 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
635 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
636 ptp_over_ethernet
= PTP_TCR_TSIPENA
;
639 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
640 /* PTP v2/802.AS1, any layer, Sync packet */
641 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_SYNC
;
642 ptp_v2
= PTP_TCR_TSVER2ENA
;
643 /* take time stamp for SYNC messages only */
644 ts_event_en
= PTP_TCR_TSEVNTENA
;
646 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
647 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
648 ptp_over_ethernet
= PTP_TCR_TSIPENA
;
651 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
652 /* PTP v2/802.AS1, any layer, Delay_req packet */
653 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
;
654 ptp_v2
= PTP_TCR_TSVER2ENA
;
655 /* take time stamp for Delay_Req messages only */
656 ts_master_en
= PTP_TCR_TSMSTRENA
;
657 ts_event_en
= PTP_TCR_TSEVNTENA
;
659 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
660 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
661 ptp_over_ethernet
= PTP_TCR_TSIPENA
;
664 case HWTSTAMP_FILTER_NTP_ALL
:
665 case HWTSTAMP_FILTER_ALL
:
666 /* time stamp any incoming packet */
667 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
668 tstamp_all
= PTP_TCR_TSENALL
;
675 switch (config
.rx_filter
) {
676 case HWTSTAMP_FILTER_NONE
:
677 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
680 /* PTP v1, UDP, any kind of event packet */
681 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
685 priv
->hwts_rx_en
= ((config
.rx_filter
== HWTSTAMP_FILTER_NONE
) ? 0 : 1);
686 priv
->hwts_tx_en
= config
.tx_type
== HWTSTAMP_TX_ON
;
688 if (!priv
->hwts_tx_en
&& !priv
->hwts_rx_en
)
689 stmmac_config_hw_tstamping(priv
, priv
->ptpaddr
, 0);
691 value
= (PTP_TCR_TSENA
| PTP_TCR_TSCFUPDT
| PTP_TCR_TSCTRLSSR
|
692 tstamp_all
| ptp_v2
| ptp_over_ethernet
|
693 ptp_over_ipv6_udp
| ptp_over_ipv4_udp
| ts_event_en
|
694 ts_master_en
| snap_type_sel
);
695 stmmac_config_hw_tstamping(priv
, priv
->ptpaddr
, value
);
697 /* program Sub Second Increment reg */
698 stmmac_config_sub_second_increment(priv
,
699 priv
->ptpaddr
, priv
->plat
->clk_ptp_rate
,
701 temp
= div_u64(1000000000ULL, sec_inc
);
703 /* Store sub second increment and flags for later use */
704 priv
->sub_second_inc
= sec_inc
;
705 priv
->systime_flags
= value
;
707 /* calculate default added value:
709 * addend = (2^32)/freq_div_ratio;
710 * where, freq_div_ratio = 1e9ns/sec_inc
712 temp
= (u64
)(temp
<< 32);
713 priv
->default_addend
= div_u64(temp
, priv
->plat
->clk_ptp_rate
);
714 stmmac_config_addend(priv
, priv
->ptpaddr
, priv
->default_addend
);
716 /* initialize system time */
717 ktime_get_real_ts64(&now
);
719 /* lower 32 bits of tv_sec are safe until y2106 */
720 stmmac_init_systime(priv
, priv
->ptpaddr
,
721 (u32
)now
.tv_sec
, now
.tv_nsec
);
724 memcpy(&priv
->tstamp_config
, &config
, sizeof(config
));
726 return copy_to_user(ifr
->ifr_data
, &config
,
727 sizeof(config
)) ? -EFAULT
: 0;
731 * stmmac_hwtstamp_get - read hardware timestamping.
732 * @dev: device pointer.
733 * @ifr: An IOCTL specific structure, that can contain a pointer to
734 * a proprietary structure used to pass information to the driver.
736 * This function obtain the current hardware timestamping settings
739 static int stmmac_hwtstamp_get(struct net_device
*dev
, struct ifreq
*ifr
)
741 struct stmmac_priv
*priv
= netdev_priv(dev
);
742 struct hwtstamp_config
*config
= &priv
->tstamp_config
;
744 if (!(priv
->dma_cap
.time_stamp
|| priv
->dma_cap
.atime_stamp
))
747 return copy_to_user(ifr
->ifr_data
, config
,
748 sizeof(*config
)) ? -EFAULT
: 0;
752 * stmmac_init_ptp - init PTP
753 * @priv: driver private structure
754 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
755 * This is done by looking at the HW cap. register.
756 * This function also registers the ptp driver.
758 static int stmmac_init_ptp(struct stmmac_priv
*priv
)
760 bool xmac
= priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
;
762 if (!(priv
->dma_cap
.time_stamp
|| priv
->dma_cap
.atime_stamp
))
766 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
767 if (xmac
&& priv
->dma_cap
.atime_stamp
)
769 /* Dwmac 3.x core with extend_desc can support adv_ts */
770 else if (priv
->extend_desc
&& priv
->dma_cap
.atime_stamp
)
773 if (priv
->dma_cap
.time_stamp
)
774 netdev_info(priv
->dev
, "IEEE 1588-2002 Timestamp supported\n");
777 netdev_info(priv
->dev
,
778 "IEEE 1588-2008 Advanced Timestamp supported\n");
780 priv
->hwts_tx_en
= 0;
781 priv
->hwts_rx_en
= 0;
783 stmmac_ptp_register(priv
);
788 static void stmmac_release_ptp(struct stmmac_priv
*priv
)
790 clk_disable_unprepare(priv
->plat
->clk_ptp_ref
);
791 stmmac_ptp_unregister(priv
);
795 * stmmac_mac_flow_ctrl - Configure flow control in all queues
796 * @priv: driver private structure
797 * @duplex: duplex passed to the next function
798 * Description: It is used for configuring the flow control in all queues
800 static void stmmac_mac_flow_ctrl(struct stmmac_priv
*priv
, u32 duplex
)
802 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
804 stmmac_flow_ctrl(priv
, priv
->hw
, duplex
, priv
->flow_ctrl
,
805 priv
->pause
, tx_cnt
);
808 static void stmmac_validate(struct phylink_config
*config
,
809 unsigned long *supported
,
810 struct phylink_link_state
*state
)
812 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
813 __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported
) = { 0, };
814 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
815 int tx_cnt
= priv
->plat
->tx_queues_to_use
;
816 int max_speed
= priv
->plat
->max_speed
;
818 phylink_set(mac_supported
, 10baseT_Half
);
819 phylink_set(mac_supported
, 10baseT_Full
);
820 phylink_set(mac_supported
, 100baseT_Half
);
821 phylink_set(mac_supported
, 100baseT_Full
);
822 phylink_set(mac_supported
, 1000baseT_Half
);
823 phylink_set(mac_supported
, 1000baseT_Full
);
824 phylink_set(mac_supported
, 1000baseKX_Full
);
826 phylink_set(mac_supported
, Autoneg
);
827 phylink_set(mac_supported
, Pause
);
828 phylink_set(mac_supported
, Asym_Pause
);
829 phylink_set_port_modes(mac_supported
);
831 /* Cut down 1G if asked to */
832 if ((max_speed
> 0) && (max_speed
< 1000)) {
833 phylink_set(mask
, 1000baseT_Full
);
834 phylink_set(mask
, 1000baseX_Full
);
835 } else if (priv
->plat
->has_xgmac
) {
836 if (!max_speed
|| (max_speed
>= 2500)) {
837 phylink_set(mac_supported
, 2500baseT_Full
);
838 phylink_set(mac_supported
, 2500baseX_Full
);
840 if (!max_speed
|| (max_speed
>= 5000)) {
841 phylink_set(mac_supported
, 5000baseT_Full
);
843 if (!max_speed
|| (max_speed
>= 10000)) {
844 phylink_set(mac_supported
, 10000baseSR_Full
);
845 phylink_set(mac_supported
, 10000baseLR_Full
);
846 phylink_set(mac_supported
, 10000baseER_Full
);
847 phylink_set(mac_supported
, 10000baseLRM_Full
);
848 phylink_set(mac_supported
, 10000baseT_Full
);
849 phylink_set(mac_supported
, 10000baseKX4_Full
);
850 phylink_set(mac_supported
, 10000baseKR_Full
);
852 if (!max_speed
|| (max_speed
>= 25000)) {
853 phylink_set(mac_supported
, 25000baseCR_Full
);
854 phylink_set(mac_supported
, 25000baseKR_Full
);
855 phylink_set(mac_supported
, 25000baseSR_Full
);
857 if (!max_speed
|| (max_speed
>= 40000)) {
858 phylink_set(mac_supported
, 40000baseKR4_Full
);
859 phylink_set(mac_supported
, 40000baseCR4_Full
);
860 phylink_set(mac_supported
, 40000baseSR4_Full
);
861 phylink_set(mac_supported
, 40000baseLR4_Full
);
863 if (!max_speed
|| (max_speed
>= 50000)) {
864 phylink_set(mac_supported
, 50000baseCR2_Full
);
865 phylink_set(mac_supported
, 50000baseKR2_Full
);
866 phylink_set(mac_supported
, 50000baseSR2_Full
);
867 phylink_set(mac_supported
, 50000baseKR_Full
);
868 phylink_set(mac_supported
, 50000baseSR_Full
);
869 phylink_set(mac_supported
, 50000baseCR_Full
);
870 phylink_set(mac_supported
, 50000baseLR_ER_FR_Full
);
871 phylink_set(mac_supported
, 50000baseDR_Full
);
873 if (!max_speed
|| (max_speed
>= 100000)) {
874 phylink_set(mac_supported
, 100000baseKR4_Full
);
875 phylink_set(mac_supported
, 100000baseSR4_Full
);
876 phylink_set(mac_supported
, 100000baseCR4_Full
);
877 phylink_set(mac_supported
, 100000baseLR4_ER4_Full
);
878 phylink_set(mac_supported
, 100000baseKR2_Full
);
879 phylink_set(mac_supported
, 100000baseSR2_Full
);
880 phylink_set(mac_supported
, 100000baseCR2_Full
);
881 phylink_set(mac_supported
, 100000baseLR2_ER2_FR2_Full
);
882 phylink_set(mac_supported
, 100000baseDR2_Full
);
886 /* Half-Duplex can only work with single queue */
888 phylink_set(mask
, 10baseT_Half
);
889 phylink_set(mask
, 100baseT_Half
);
890 phylink_set(mask
, 1000baseT_Half
);
893 linkmode_and(supported
, supported
, mac_supported
);
894 linkmode_andnot(supported
, supported
, mask
);
896 linkmode_and(state
->advertising
, state
->advertising
, mac_supported
);
897 linkmode_andnot(state
->advertising
, state
->advertising
, mask
);
899 /* If PCS is supported, check which modes it supports. */
900 stmmac_xpcs_validate(priv
, &priv
->hw
->xpcs_args
, supported
, state
);
903 static void stmmac_mac_pcs_get_state(struct phylink_config
*config
,
904 struct phylink_link_state
*state
)
906 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
909 stmmac_xpcs_get_state(priv
, &priv
->hw
->xpcs_args
, state
);
912 static void stmmac_mac_config(struct phylink_config
*config
, unsigned int mode
,
913 const struct phylink_link_state
*state
)
915 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
917 stmmac_xpcs_config(priv
, &priv
->hw
->xpcs_args
, state
);
920 static void stmmac_mac_an_restart(struct phylink_config
*config
)
925 static void stmmac_mac_link_down(struct phylink_config
*config
,
926 unsigned int mode
, phy_interface_t interface
)
928 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
930 stmmac_mac_set(priv
, priv
->ioaddr
, false);
931 priv
->eee_active
= false;
932 priv
->tx_lpi_enabled
= false;
933 stmmac_eee_init(priv
);
934 stmmac_set_eee_pls(priv
, priv
->hw
, false);
937 static void stmmac_mac_link_up(struct phylink_config
*config
,
938 struct phy_device
*phy
,
939 unsigned int mode
, phy_interface_t interface
,
940 int speed
, int duplex
,
941 bool tx_pause
, bool rx_pause
)
943 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
946 stmmac_xpcs_link_up(priv
, &priv
->hw
->xpcs_args
, speed
, interface
);
948 ctrl
= readl(priv
->ioaddr
+ MAC_CTRL_REG
);
949 ctrl
&= ~priv
->hw
->link
.speed_mask
;
951 if (interface
== PHY_INTERFACE_MODE_USXGMII
) {
954 ctrl
|= priv
->hw
->link
.xgmii
.speed10000
;
957 ctrl
|= priv
->hw
->link
.xgmii
.speed5000
;
960 ctrl
|= priv
->hw
->link
.xgmii
.speed2500
;
965 } else if (interface
== PHY_INTERFACE_MODE_XLGMII
) {
968 ctrl
|= priv
->hw
->link
.xlgmii
.speed100000
;
971 ctrl
|= priv
->hw
->link
.xlgmii
.speed50000
;
974 ctrl
|= priv
->hw
->link
.xlgmii
.speed40000
;
977 ctrl
|= priv
->hw
->link
.xlgmii
.speed25000
;
980 ctrl
|= priv
->hw
->link
.xgmii
.speed10000
;
983 ctrl
|= priv
->hw
->link
.speed2500
;
986 ctrl
|= priv
->hw
->link
.speed1000
;
994 ctrl
|= priv
->hw
->link
.speed2500
;
997 ctrl
|= priv
->hw
->link
.speed1000
;
1000 ctrl
|= priv
->hw
->link
.speed100
;
1003 ctrl
|= priv
->hw
->link
.speed10
;
1010 priv
->speed
= speed
;
1012 if (priv
->plat
->fix_mac_speed
)
1013 priv
->plat
->fix_mac_speed(priv
->plat
->bsp_priv
, speed
);
1016 ctrl
&= ~priv
->hw
->link
.duplex
;
1018 ctrl
|= priv
->hw
->link
.duplex
;
1020 /* Flow Control operation */
1021 if (tx_pause
&& rx_pause
)
1022 stmmac_mac_flow_ctrl(priv
, duplex
);
1024 writel(ctrl
, priv
->ioaddr
+ MAC_CTRL_REG
);
1026 stmmac_mac_set(priv
, priv
->ioaddr
, true);
1027 if (phy
&& priv
->dma_cap
.eee
) {
1028 priv
->eee_active
= phy_init_eee(phy
, 1) >= 0;
1029 priv
->eee_enabled
= stmmac_eee_init(priv
);
1030 priv
->tx_lpi_enabled
= priv
->eee_enabled
;
1031 stmmac_set_eee_pls(priv
, priv
->hw
, true);
1035 static const struct phylink_mac_ops stmmac_phylink_mac_ops
= {
1036 .validate
= stmmac_validate
,
1037 .mac_pcs_get_state
= stmmac_mac_pcs_get_state
,
1038 .mac_config
= stmmac_mac_config
,
1039 .mac_an_restart
= stmmac_mac_an_restart
,
1040 .mac_link_down
= stmmac_mac_link_down
,
1041 .mac_link_up
= stmmac_mac_link_up
,
1045 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1046 * @priv: driver private structure
1047 * Description: this is to verify if the HW supports the PCS.
1048 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1049 * configured for the TBI, RTBI, or SGMII PHY interface.
1051 static void stmmac_check_pcs_mode(struct stmmac_priv
*priv
)
1053 int interface
= priv
->plat
->interface
;
1055 if (priv
->dma_cap
.pcs
) {
1056 if ((interface
== PHY_INTERFACE_MODE_RGMII
) ||
1057 (interface
== PHY_INTERFACE_MODE_RGMII_ID
) ||
1058 (interface
== PHY_INTERFACE_MODE_RGMII_RXID
) ||
1059 (interface
== PHY_INTERFACE_MODE_RGMII_TXID
)) {
1060 netdev_dbg(priv
->dev
, "PCS RGMII support enabled\n");
1061 priv
->hw
->pcs
= STMMAC_PCS_RGMII
;
1062 } else if (interface
== PHY_INTERFACE_MODE_SGMII
) {
1063 netdev_dbg(priv
->dev
, "PCS SGMII support enabled\n");
1064 priv
->hw
->pcs
= STMMAC_PCS_SGMII
;
1070 * stmmac_init_phy - PHY initialization
1071 * @dev: net device structure
1072 * Description: it initializes the driver's PHY state, and attaches the PHY
1073 * to the mac driver.
1077 static int stmmac_init_phy(struct net_device
*dev
)
1079 struct ethtool_wolinfo wol
= { .cmd
= ETHTOOL_GWOL
};
1080 struct stmmac_priv
*priv
= netdev_priv(dev
);
1081 struct device_node
*node
;
1084 node
= priv
->plat
->phylink_node
;
1087 ret
= phylink_of_phy_connect(priv
->phylink
, node
, 0);
1089 /* Some DT bindings do not set-up the PHY handle. Let's try to
1093 int addr
= priv
->plat
->phy_addr
;
1094 struct phy_device
*phydev
;
1096 phydev
= mdiobus_get_phy(priv
->mii
, addr
);
1098 netdev_err(priv
->dev
, "no phy at addr %d\n", addr
);
1102 ret
= phylink_connect_phy(priv
->phylink
, phydev
);
1105 phylink_ethtool_get_wol(priv
->phylink
, &wol
);
1106 device_set_wakeup_capable(priv
->device
, !!wol
.supported
);
1111 static int stmmac_phy_setup(struct stmmac_priv
*priv
)
1113 struct fwnode_handle
*fwnode
= of_fwnode_handle(priv
->plat
->phylink_node
);
1114 int mode
= priv
->plat
->phy_interface
;
1115 struct phylink
*phylink
;
1117 priv
->phylink_config
.dev
= &priv
->dev
->dev
;
1118 priv
->phylink_config
.type
= PHYLINK_NETDEV
;
1119 priv
->phylink_config
.pcs_poll
= true;
1122 fwnode
= dev_fwnode(priv
->device
);
1124 phylink
= phylink_create(&priv
->phylink_config
, fwnode
,
1125 mode
, &stmmac_phylink_mac_ops
);
1126 if (IS_ERR(phylink
))
1127 return PTR_ERR(phylink
);
1129 priv
->phylink
= phylink
;
1133 static void stmmac_display_rx_rings(struct stmmac_priv
*priv
)
1135 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
1139 /* Display RX rings */
1140 for (queue
= 0; queue
< rx_cnt
; queue
++) {
1141 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1143 pr_info("\tRX Queue %u rings\n", queue
);
1145 if (priv
->extend_desc
)
1146 head_rx
= (void *)rx_q
->dma_erx
;
1148 head_rx
= (void *)rx_q
->dma_rx
;
1150 /* Display RX ring */
1151 stmmac_display_ring(priv
, head_rx
, priv
->dma_rx_size
, true);
1155 static void stmmac_display_tx_rings(struct stmmac_priv
*priv
)
1157 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
1161 /* Display TX rings */
1162 for (queue
= 0; queue
< tx_cnt
; queue
++) {
1163 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
1165 pr_info("\tTX Queue %d rings\n", queue
);
1167 if (priv
->extend_desc
)
1168 head_tx
= (void *)tx_q
->dma_etx
;
1169 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
1170 head_tx
= (void *)tx_q
->dma_entx
;
1172 head_tx
= (void *)tx_q
->dma_tx
;
1174 stmmac_display_ring(priv
, head_tx
, priv
->dma_tx_size
, false);
1178 static void stmmac_display_rings(struct stmmac_priv
*priv
)
1180 /* Display RX ring */
1181 stmmac_display_rx_rings(priv
);
1183 /* Display TX ring */
1184 stmmac_display_tx_rings(priv
);
1187 static int stmmac_set_bfsize(int mtu
, int bufsize
)
1191 if (mtu
>= BUF_SIZE_8KiB
)
1192 ret
= BUF_SIZE_16KiB
;
1193 else if (mtu
>= BUF_SIZE_4KiB
)
1194 ret
= BUF_SIZE_8KiB
;
1195 else if (mtu
>= BUF_SIZE_2KiB
)
1196 ret
= BUF_SIZE_4KiB
;
1197 else if (mtu
> DEFAULT_BUFSIZE
)
1198 ret
= BUF_SIZE_2KiB
;
1200 ret
= DEFAULT_BUFSIZE
;
1206 * stmmac_clear_rx_descriptors - clear RX descriptors
1207 * @priv: driver private structure
1208 * @queue: RX queue index
1209 * Description: this function is called to clear the RX descriptors
1210 * in case of both basic and extended descriptors are used.
1212 static void stmmac_clear_rx_descriptors(struct stmmac_priv
*priv
, u32 queue
)
1214 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1217 /* Clear the RX descriptors */
1218 for (i
= 0; i
< priv
->dma_rx_size
; i
++)
1219 if (priv
->extend_desc
)
1220 stmmac_init_rx_desc(priv
, &rx_q
->dma_erx
[i
].basic
,
1221 priv
->use_riwt
, priv
->mode
,
1222 (i
== priv
->dma_rx_size
- 1),
1225 stmmac_init_rx_desc(priv
, &rx_q
->dma_rx
[i
],
1226 priv
->use_riwt
, priv
->mode
,
1227 (i
== priv
->dma_rx_size
- 1),
1232 * stmmac_clear_tx_descriptors - clear tx descriptors
1233 * @priv: driver private structure
1234 * @queue: TX queue index.
1235 * Description: this function is called to clear the TX descriptors
1236 * in case of both basic and extended descriptors are used.
1238 static void stmmac_clear_tx_descriptors(struct stmmac_priv
*priv
, u32 queue
)
1240 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
1243 /* Clear the TX descriptors */
1244 for (i
= 0; i
< priv
->dma_tx_size
; i
++) {
1245 int last
= (i
== (priv
->dma_tx_size
- 1));
1248 if (priv
->extend_desc
)
1249 p
= &tx_q
->dma_etx
[i
].basic
;
1250 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
1251 p
= &tx_q
->dma_entx
[i
].basic
;
1253 p
= &tx_q
->dma_tx
[i
];
1255 stmmac_init_tx_desc(priv
, p
, priv
->mode
, last
);
1260 * stmmac_clear_descriptors - clear descriptors
1261 * @priv: driver private structure
1262 * Description: this function is called to clear the TX and RX descriptors
1263 * in case of both basic and extended descriptors are used.
1265 static void stmmac_clear_descriptors(struct stmmac_priv
*priv
)
1267 u32 rx_queue_cnt
= priv
->plat
->rx_queues_to_use
;
1268 u32 tx_queue_cnt
= priv
->plat
->tx_queues_to_use
;
1271 /* Clear the RX descriptors */
1272 for (queue
= 0; queue
< rx_queue_cnt
; queue
++)
1273 stmmac_clear_rx_descriptors(priv
, queue
);
1275 /* Clear the TX descriptors */
1276 for (queue
= 0; queue
< tx_queue_cnt
; queue
++)
1277 stmmac_clear_tx_descriptors(priv
, queue
);
1281 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1282 * @priv: driver private structure
1283 * @p: descriptor pointer
1284 * @i: descriptor index
1286 * @queue: RX queue index
1287 * Description: this function is called to allocate a receive buffer, perform
1288 * the DMA mapping and init the descriptor.
1290 static int stmmac_init_rx_buffers(struct stmmac_priv
*priv
, struct dma_desc
*p
,
1291 int i
, gfp_t flags
, u32 queue
)
1293 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1294 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[i
];
1296 buf
->page
= page_pool_dev_alloc_pages(rx_q
->page_pool
);
1301 buf
->sec_page
= page_pool_dev_alloc_pages(rx_q
->page_pool
);
1305 buf
->sec_addr
= page_pool_get_dma_addr(buf
->sec_page
);
1306 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
);
1308 buf
->sec_page
= NULL
;
1311 buf
->addr
= page_pool_get_dma_addr(buf
->page
);
1312 stmmac_set_desc_addr(priv
, p
, buf
->addr
);
1313 if (priv
->dma_buf_sz
== BUF_SIZE_16KiB
)
1314 stmmac_init_desc3(priv
, p
);
1320 * stmmac_free_rx_buffer - free RX dma buffers
1321 * @priv: private structure
1322 * @queue: RX queue index
1325 static void stmmac_free_rx_buffer(struct stmmac_priv
*priv
, u32 queue
, int i
)
1327 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1328 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[i
];
1331 page_pool_put_full_page(rx_q
->page_pool
, buf
->page
, false);
1335 page_pool_put_full_page(rx_q
->page_pool
, buf
->sec_page
, false);
1336 buf
->sec_page
= NULL
;
1340 * stmmac_free_tx_buffer - free RX dma buffers
1341 * @priv: private structure
1342 * @queue: RX queue index
1345 static void stmmac_free_tx_buffer(struct stmmac_priv
*priv
, u32 queue
, int i
)
1347 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
1349 if (tx_q
->tx_skbuff_dma
[i
].buf
) {
1350 if (tx_q
->tx_skbuff_dma
[i
].map_as_page
)
1351 dma_unmap_page(priv
->device
,
1352 tx_q
->tx_skbuff_dma
[i
].buf
,
1353 tx_q
->tx_skbuff_dma
[i
].len
,
1356 dma_unmap_single(priv
->device
,
1357 tx_q
->tx_skbuff_dma
[i
].buf
,
1358 tx_q
->tx_skbuff_dma
[i
].len
,
1362 if (tx_q
->tx_skbuff
[i
]) {
1363 dev_kfree_skb_any(tx_q
->tx_skbuff
[i
]);
1364 tx_q
->tx_skbuff
[i
] = NULL
;
1365 tx_q
->tx_skbuff_dma
[i
].buf
= 0;
1366 tx_q
->tx_skbuff_dma
[i
].map_as_page
= false;
1371 * init_dma_rx_desc_rings - init the RX descriptor rings
1372 * @dev: net device structure
1374 * Description: this function initializes the DMA RX descriptors
1375 * and allocates the socket buffers. It supports the chained and ring
1378 static int init_dma_rx_desc_rings(struct net_device
*dev
, gfp_t flags
)
1380 struct stmmac_priv
*priv
= netdev_priv(dev
);
1381 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
1386 /* RX INITIALIZATION */
1387 netif_dbg(priv
, probe
, priv
->dev
,
1388 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1390 for (queue
= 0; queue
< rx_count
; queue
++) {
1391 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1393 netif_dbg(priv
, probe
, priv
->dev
,
1394 "(%s) dma_rx_phy=0x%08x\n", __func__
,
1395 (u32
)rx_q
->dma_rx_phy
);
1397 stmmac_clear_rx_descriptors(priv
, queue
);
1399 for (i
= 0; i
< priv
->dma_rx_size
; i
++) {
1402 if (priv
->extend_desc
)
1403 p
= &((rx_q
->dma_erx
+ i
)->basic
);
1405 p
= rx_q
->dma_rx
+ i
;
1407 ret
= stmmac_init_rx_buffers(priv
, p
, i
, flags
,
1410 goto err_init_rx_buffers
;
1414 rx_q
->dirty_rx
= (unsigned int)(i
- priv
->dma_rx_size
);
1416 /* Setup the chained descriptor addresses */
1417 if (priv
->mode
== STMMAC_CHAIN_MODE
) {
1418 if (priv
->extend_desc
)
1419 stmmac_mode_init(priv
, rx_q
->dma_erx
,
1421 priv
->dma_rx_size
, 1);
1423 stmmac_mode_init(priv
, rx_q
->dma_rx
,
1425 priv
->dma_rx_size
, 0);
1431 err_init_rx_buffers
:
1432 while (queue
>= 0) {
1434 stmmac_free_rx_buffer(priv
, queue
, i
);
1439 i
= priv
->dma_rx_size
;
1447 * init_dma_tx_desc_rings - init the TX descriptor rings
1448 * @dev: net device structure.
1449 * Description: this function initializes the DMA TX descriptors
1450 * and allocates the socket buffers. It supports the chained and ring
1453 static int init_dma_tx_desc_rings(struct net_device
*dev
)
1455 struct stmmac_priv
*priv
= netdev_priv(dev
);
1456 u32 tx_queue_cnt
= priv
->plat
->tx_queues_to_use
;
1460 for (queue
= 0; queue
< tx_queue_cnt
; queue
++) {
1461 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
1463 netif_dbg(priv
, probe
, priv
->dev
,
1464 "(%s) dma_tx_phy=0x%08x\n", __func__
,
1465 (u32
)tx_q
->dma_tx_phy
);
1467 /* Setup the chained descriptor addresses */
1468 if (priv
->mode
== STMMAC_CHAIN_MODE
) {
1469 if (priv
->extend_desc
)
1470 stmmac_mode_init(priv
, tx_q
->dma_etx
,
1472 priv
->dma_tx_size
, 1);
1473 else if (!(tx_q
->tbs
& STMMAC_TBS_AVAIL
))
1474 stmmac_mode_init(priv
, tx_q
->dma_tx
,
1476 priv
->dma_tx_size
, 0);
1479 for (i
= 0; i
< priv
->dma_tx_size
; i
++) {
1481 if (priv
->extend_desc
)
1482 p
= &((tx_q
->dma_etx
+ i
)->basic
);
1483 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
1484 p
= &((tx_q
->dma_entx
+ i
)->basic
);
1486 p
= tx_q
->dma_tx
+ i
;
1488 stmmac_clear_desc(priv
, p
);
1490 tx_q
->tx_skbuff_dma
[i
].buf
= 0;
1491 tx_q
->tx_skbuff_dma
[i
].map_as_page
= false;
1492 tx_q
->tx_skbuff_dma
[i
].len
= 0;
1493 tx_q
->tx_skbuff_dma
[i
].last_segment
= false;
1494 tx_q
->tx_skbuff
[i
] = NULL
;
1501 netdev_tx_reset_queue(netdev_get_tx_queue(priv
->dev
, queue
));
1508 * init_dma_desc_rings - init the RX/TX descriptor rings
1509 * @dev: net device structure
1511 * Description: this function initializes the DMA RX/TX descriptors
1512 * and allocates the socket buffers. It supports the chained and ring
1515 static int init_dma_desc_rings(struct net_device
*dev
, gfp_t flags
)
1517 struct stmmac_priv
*priv
= netdev_priv(dev
);
1520 ret
= init_dma_rx_desc_rings(dev
, flags
);
1524 ret
= init_dma_tx_desc_rings(dev
);
1526 stmmac_clear_descriptors(priv
);
1528 if (netif_msg_hw(priv
))
1529 stmmac_display_rings(priv
);
1535 * dma_free_rx_skbufs - free RX dma buffers
1536 * @priv: private structure
1537 * @queue: RX queue index
1539 static void dma_free_rx_skbufs(struct stmmac_priv
*priv
, u32 queue
)
1543 for (i
= 0; i
< priv
->dma_rx_size
; i
++)
1544 stmmac_free_rx_buffer(priv
, queue
, i
);
1548 * dma_free_tx_skbufs - free TX dma buffers
1549 * @priv: private structure
1550 * @queue: TX queue index
1552 static void dma_free_tx_skbufs(struct stmmac_priv
*priv
, u32 queue
)
1556 for (i
= 0; i
< priv
->dma_tx_size
; i
++)
1557 stmmac_free_tx_buffer(priv
, queue
, i
);
1561 * stmmac_free_tx_skbufs - free TX skb buffers
1562 * @priv: private structure
1564 static void stmmac_free_tx_skbufs(struct stmmac_priv
*priv
)
1566 u32 tx_queue_cnt
= priv
->plat
->tx_queues_to_use
;
1569 for (queue
= 0; queue
< tx_queue_cnt
; queue
++)
1570 dma_free_tx_skbufs(priv
, queue
);
1574 * free_dma_rx_desc_resources - free RX dma desc resources
1575 * @priv: private structure
1577 static void free_dma_rx_desc_resources(struct stmmac_priv
*priv
)
1579 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
1582 /* Free RX queue resources */
1583 for (queue
= 0; queue
< rx_count
; queue
++) {
1584 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1586 /* Release the DMA RX socket buffers */
1587 dma_free_rx_skbufs(priv
, queue
);
1589 /* Free DMA regions of consistent memory previously allocated */
1590 if (!priv
->extend_desc
)
1591 dma_free_coherent(priv
->device
, priv
->dma_rx_size
*
1592 sizeof(struct dma_desc
),
1593 rx_q
->dma_rx
, rx_q
->dma_rx_phy
);
1595 dma_free_coherent(priv
->device
, priv
->dma_rx_size
*
1596 sizeof(struct dma_extended_desc
),
1597 rx_q
->dma_erx
, rx_q
->dma_rx_phy
);
1599 kfree(rx_q
->buf_pool
);
1600 if (rx_q
->page_pool
)
1601 page_pool_destroy(rx_q
->page_pool
);
1606 * free_dma_tx_desc_resources - free TX dma desc resources
1607 * @priv: private structure
1609 static void free_dma_tx_desc_resources(struct stmmac_priv
*priv
)
1611 u32 tx_count
= priv
->plat
->tx_queues_to_use
;
1614 /* Free TX queue resources */
1615 for (queue
= 0; queue
< tx_count
; queue
++) {
1616 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
1620 /* Release the DMA TX socket buffers */
1621 dma_free_tx_skbufs(priv
, queue
);
1623 if (priv
->extend_desc
) {
1624 size
= sizeof(struct dma_extended_desc
);
1625 addr
= tx_q
->dma_etx
;
1626 } else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
) {
1627 size
= sizeof(struct dma_edesc
);
1628 addr
= tx_q
->dma_entx
;
1630 size
= sizeof(struct dma_desc
);
1631 addr
= tx_q
->dma_tx
;
1634 size
*= priv
->dma_tx_size
;
1636 dma_free_coherent(priv
->device
, size
, addr
, tx_q
->dma_tx_phy
);
1638 kfree(tx_q
->tx_skbuff_dma
);
1639 kfree(tx_q
->tx_skbuff
);
1644 * alloc_dma_rx_desc_resources - alloc RX resources.
1645 * @priv: private structure
1646 * Description: according to which descriptor can be used (extend or basic)
1647 * this function allocates the resources for TX and RX paths. In case of
1648 * reception, for example, it pre-allocated the RX socket buffer in order to
1649 * allow zero-copy mechanism.
1651 static int alloc_dma_rx_desc_resources(struct stmmac_priv
*priv
)
1653 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
1657 /* RX queues buffers and DMA */
1658 for (queue
= 0; queue
< rx_count
; queue
++) {
1659 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1660 struct page_pool_params pp_params
= { 0 };
1661 unsigned int num_pages
;
1663 rx_q
->queue_index
= queue
;
1664 rx_q
->priv_data
= priv
;
1666 pp_params
.flags
= PP_FLAG_DMA_MAP
;
1667 pp_params
.pool_size
= priv
->dma_rx_size
;
1668 num_pages
= DIV_ROUND_UP(priv
->dma_buf_sz
, PAGE_SIZE
);
1669 pp_params
.order
= ilog2(num_pages
);
1670 pp_params
.nid
= dev_to_node(priv
->device
);
1671 pp_params
.dev
= priv
->device
;
1672 pp_params
.dma_dir
= DMA_FROM_DEVICE
;
1674 rx_q
->page_pool
= page_pool_create(&pp_params
);
1675 if (IS_ERR(rx_q
->page_pool
)) {
1676 ret
= PTR_ERR(rx_q
->page_pool
);
1677 rx_q
->page_pool
= NULL
;
1681 rx_q
->buf_pool
= kcalloc(priv
->dma_rx_size
,
1682 sizeof(*rx_q
->buf_pool
),
1684 if (!rx_q
->buf_pool
)
1687 if (priv
->extend_desc
) {
1688 rx_q
->dma_erx
= dma_alloc_coherent(priv
->device
,
1690 sizeof(struct dma_extended_desc
),
1697 rx_q
->dma_rx
= dma_alloc_coherent(priv
->device
,
1699 sizeof(struct dma_desc
),
1710 free_dma_rx_desc_resources(priv
);
1716 * alloc_dma_tx_desc_resources - alloc TX resources.
1717 * @priv: private structure
1718 * Description: according to which descriptor can be used (extend or basic)
1719 * this function allocates the resources for TX and RX paths. In case of
1720 * reception, for example, it pre-allocated the RX socket buffer in order to
1721 * allow zero-copy mechanism.
1723 static int alloc_dma_tx_desc_resources(struct stmmac_priv
*priv
)
1725 u32 tx_count
= priv
->plat
->tx_queues_to_use
;
1729 /* TX queues buffers and DMA */
1730 for (queue
= 0; queue
< tx_count
; queue
++) {
1731 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
1735 tx_q
->queue_index
= queue
;
1736 tx_q
->priv_data
= priv
;
1738 tx_q
->tx_skbuff_dma
= kcalloc(priv
->dma_tx_size
,
1739 sizeof(*tx_q
->tx_skbuff_dma
),
1741 if (!tx_q
->tx_skbuff_dma
)
1744 tx_q
->tx_skbuff
= kcalloc(priv
->dma_tx_size
,
1745 sizeof(struct sk_buff
*),
1747 if (!tx_q
->tx_skbuff
)
1750 if (priv
->extend_desc
)
1751 size
= sizeof(struct dma_extended_desc
);
1752 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
1753 size
= sizeof(struct dma_edesc
);
1755 size
= sizeof(struct dma_desc
);
1757 size
*= priv
->dma_tx_size
;
1759 addr
= dma_alloc_coherent(priv
->device
, size
,
1760 &tx_q
->dma_tx_phy
, GFP_KERNEL
);
1764 if (priv
->extend_desc
)
1765 tx_q
->dma_etx
= addr
;
1766 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
1767 tx_q
->dma_entx
= addr
;
1769 tx_q
->dma_tx
= addr
;
1775 free_dma_tx_desc_resources(priv
);
1780 * alloc_dma_desc_resources - alloc TX/RX resources.
1781 * @priv: private structure
1782 * Description: according to which descriptor can be used (extend or basic)
1783 * this function allocates the resources for TX and RX paths. In case of
1784 * reception, for example, it pre-allocated the RX socket buffer in order to
1785 * allow zero-copy mechanism.
1787 static int alloc_dma_desc_resources(struct stmmac_priv
*priv
)
1790 int ret
= alloc_dma_rx_desc_resources(priv
);
1795 ret
= alloc_dma_tx_desc_resources(priv
);
1801 * free_dma_desc_resources - free dma desc resources
1802 * @priv: private structure
1804 static void free_dma_desc_resources(struct stmmac_priv
*priv
)
1806 /* Release the DMA RX socket buffers */
1807 free_dma_rx_desc_resources(priv
);
1809 /* Release the DMA TX socket buffers */
1810 free_dma_tx_desc_resources(priv
);
1814 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
1815 * @priv: driver private structure
1816 * Description: It is used for enabling the rx queues in the MAC
1818 static void stmmac_mac_enable_rx_queues(struct stmmac_priv
*priv
)
1820 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
1824 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
1825 mode
= priv
->plat
->rx_queues_cfg
[queue
].mode_to_use
;
1826 stmmac_rx_queue_enable(priv
, priv
->hw
, mode
, queue
);
1831 * stmmac_start_rx_dma - start RX DMA channel
1832 * @priv: driver private structure
1833 * @chan: RX channel index
1835 * This starts a RX DMA channel
1837 static void stmmac_start_rx_dma(struct stmmac_priv
*priv
, u32 chan
)
1839 netdev_dbg(priv
->dev
, "DMA RX processes started in channel %d\n", chan
);
1840 stmmac_start_rx(priv
, priv
->ioaddr
, chan
);
1844 * stmmac_start_tx_dma - start TX DMA channel
1845 * @priv: driver private structure
1846 * @chan: TX channel index
1848 * This starts a TX DMA channel
1850 static void stmmac_start_tx_dma(struct stmmac_priv
*priv
, u32 chan
)
1852 netdev_dbg(priv
->dev
, "DMA TX processes started in channel %d\n", chan
);
1853 stmmac_start_tx(priv
, priv
->ioaddr
, chan
);
1857 * stmmac_stop_rx_dma - stop RX DMA channel
1858 * @priv: driver private structure
1859 * @chan: RX channel index
1861 * This stops a RX DMA channel
1863 static void stmmac_stop_rx_dma(struct stmmac_priv
*priv
, u32 chan
)
1865 netdev_dbg(priv
->dev
, "DMA RX processes stopped in channel %d\n", chan
);
1866 stmmac_stop_rx(priv
, priv
->ioaddr
, chan
);
1870 * stmmac_stop_tx_dma - stop TX DMA channel
1871 * @priv: driver private structure
1872 * @chan: TX channel index
1874 * This stops a TX DMA channel
1876 static void stmmac_stop_tx_dma(struct stmmac_priv
*priv
, u32 chan
)
1878 netdev_dbg(priv
->dev
, "DMA TX processes stopped in channel %d\n", chan
);
1879 stmmac_stop_tx(priv
, priv
->ioaddr
, chan
);
1883 * stmmac_start_all_dma - start all RX and TX DMA channels
1884 * @priv: driver private structure
1886 * This starts all the RX and TX DMA channels
1888 static void stmmac_start_all_dma(struct stmmac_priv
*priv
)
1890 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
1891 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
1894 for (chan
= 0; chan
< rx_channels_count
; chan
++)
1895 stmmac_start_rx_dma(priv
, chan
);
1897 for (chan
= 0; chan
< tx_channels_count
; chan
++)
1898 stmmac_start_tx_dma(priv
, chan
);
1902 * stmmac_stop_all_dma - stop all RX and TX DMA channels
1903 * @priv: driver private structure
1905 * This stops the RX and TX DMA channels
1907 static void stmmac_stop_all_dma(struct stmmac_priv
*priv
)
1909 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
1910 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
1913 for (chan
= 0; chan
< rx_channels_count
; chan
++)
1914 stmmac_stop_rx_dma(priv
, chan
);
1916 for (chan
= 0; chan
< tx_channels_count
; chan
++)
1917 stmmac_stop_tx_dma(priv
, chan
);
1921 * stmmac_dma_operation_mode - HW DMA operation mode
1922 * @priv: driver private structure
1923 * Description: it is used for configuring the DMA operation mode register in
1924 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1926 static void stmmac_dma_operation_mode(struct stmmac_priv
*priv
)
1928 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
1929 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
1930 int rxfifosz
= priv
->plat
->rx_fifo_size
;
1931 int txfifosz
= priv
->plat
->tx_fifo_size
;
1938 rxfifosz
= priv
->dma_cap
.rx_fifo_size
;
1940 txfifosz
= priv
->dma_cap
.tx_fifo_size
;
1942 /* Adjust for real per queue fifo size */
1943 rxfifosz
/= rx_channels_count
;
1944 txfifosz
/= tx_channels_count
;
1946 if (priv
->plat
->force_thresh_dma_mode
) {
1949 } else if (priv
->plat
->force_sf_dma_mode
|| priv
->plat
->tx_coe
) {
1951 * In case of GMAC, SF mode can be enabled
1952 * to perform the TX COE in HW. This depends on:
1953 * 1) TX COE if actually supported
1954 * 2) There is no bugged Jumbo frame support
1955 * that needs to not insert csum in the TDES.
1957 txmode
= SF_DMA_MODE
;
1958 rxmode
= SF_DMA_MODE
;
1959 priv
->xstats
.threshold
= SF_DMA_MODE
;
1962 rxmode
= SF_DMA_MODE
;
1965 /* configure all channels */
1966 for (chan
= 0; chan
< rx_channels_count
; chan
++) {
1967 qmode
= priv
->plat
->rx_queues_cfg
[chan
].mode_to_use
;
1969 stmmac_dma_rx_mode(priv
, priv
->ioaddr
, rxmode
, chan
,
1971 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
, priv
->dma_buf_sz
,
1975 for (chan
= 0; chan
< tx_channels_count
; chan
++) {
1976 qmode
= priv
->plat
->tx_queues_cfg
[chan
].mode_to_use
;
1978 stmmac_dma_tx_mode(priv
, priv
->ioaddr
, txmode
, chan
,
1984 * stmmac_tx_clean - to manage the transmission completion
1985 * @priv: driver private structure
1986 * @budget: napi budget limiting this functions packet handling
1987 * @queue: TX queue index
1988 * Description: it reclaims the transmit resources after transmission completes.
1990 static int stmmac_tx_clean(struct stmmac_priv
*priv
, int budget
, u32 queue
)
1992 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
1993 unsigned int bytes_compl
= 0, pkts_compl
= 0;
1994 unsigned int entry
, count
= 0;
1996 __netif_tx_lock_bh(netdev_get_tx_queue(priv
->dev
, queue
));
1998 priv
->xstats
.tx_clean
++;
2000 entry
= tx_q
->dirty_tx
;
2001 while ((entry
!= tx_q
->cur_tx
) && (count
< budget
)) {
2002 struct sk_buff
*skb
= tx_q
->tx_skbuff
[entry
];
2006 if (priv
->extend_desc
)
2007 p
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
2008 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2009 p
= &tx_q
->dma_entx
[entry
].basic
;
2011 p
= tx_q
->dma_tx
+ entry
;
2013 status
= stmmac_tx_status(priv
, &priv
->dev
->stats
,
2014 &priv
->xstats
, p
, priv
->ioaddr
);
2015 /* Check if the descriptor is owned by the DMA */
2016 if (unlikely(status
& tx_dma_own
))
2021 /* Make sure descriptor fields are read after reading
2026 /* Just consider the last segment and ...*/
2027 if (likely(!(status
& tx_not_ls
))) {
2028 /* ... verify the status error condition */
2029 if (unlikely(status
& tx_err
)) {
2030 priv
->dev
->stats
.tx_errors
++;
2032 priv
->dev
->stats
.tx_packets
++;
2033 priv
->xstats
.tx_pkt_n
++;
2035 stmmac_get_tx_hwtstamp(priv
, p
, skb
);
2038 if (likely(tx_q
->tx_skbuff_dma
[entry
].buf
)) {
2039 if (tx_q
->tx_skbuff_dma
[entry
].map_as_page
)
2040 dma_unmap_page(priv
->device
,
2041 tx_q
->tx_skbuff_dma
[entry
].buf
,
2042 tx_q
->tx_skbuff_dma
[entry
].len
,
2045 dma_unmap_single(priv
->device
,
2046 tx_q
->tx_skbuff_dma
[entry
].buf
,
2047 tx_q
->tx_skbuff_dma
[entry
].len
,
2049 tx_q
->tx_skbuff_dma
[entry
].buf
= 0;
2050 tx_q
->tx_skbuff_dma
[entry
].len
= 0;
2051 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= false;
2054 stmmac_clean_desc3(priv
, tx_q
, p
);
2056 tx_q
->tx_skbuff_dma
[entry
].last_segment
= false;
2057 tx_q
->tx_skbuff_dma
[entry
].is_jumbo
= false;
2059 if (likely(skb
!= NULL
)) {
2061 bytes_compl
+= skb
->len
;
2062 dev_consume_skb_any(skb
);
2063 tx_q
->tx_skbuff
[entry
] = NULL
;
2066 stmmac_release_tx_desc(priv
, p
, priv
->mode
);
2068 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_tx_size
);
2070 tx_q
->dirty_tx
= entry
;
2072 netdev_tx_completed_queue(netdev_get_tx_queue(priv
->dev
, queue
),
2073 pkts_compl
, bytes_compl
);
2075 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv
->dev
,
2077 stmmac_tx_avail(priv
, queue
) > STMMAC_TX_THRESH(priv
)) {
2079 netif_dbg(priv
, tx_done
, priv
->dev
,
2080 "%s: restart transmit\n", __func__
);
2081 netif_tx_wake_queue(netdev_get_tx_queue(priv
->dev
, queue
));
2084 if (priv
->eee_enabled
&& !priv
->tx_path_in_lpi_mode
&&
2085 priv
->eee_sw_timer_en
) {
2086 stmmac_enable_eee_mode(priv
);
2087 mod_timer(&priv
->eee_ctrl_timer
, STMMAC_LPI_T(priv
->tx_lpi_timer
));
2090 /* We still have pending packets, let's call for a new scheduling */
2091 if (tx_q
->dirty_tx
!= tx_q
->cur_tx
)
2092 hrtimer_start(&tx_q
->txtimer
, STMMAC_COAL_TIMER(priv
->tx_coal_timer
),
2095 __netif_tx_unlock_bh(netdev_get_tx_queue(priv
->dev
, queue
));
2101 * stmmac_tx_err - to manage the tx error
2102 * @priv: driver private structure
2103 * @chan: channel index
2104 * Description: it cleans the descriptors and restarts the transmission
2105 * in case of transmission errors.
2107 static void stmmac_tx_err(struct stmmac_priv
*priv
, u32 chan
)
2109 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[chan
];
2111 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
, chan
));
2113 stmmac_stop_tx_dma(priv
, chan
);
2114 dma_free_tx_skbufs(priv
, chan
);
2115 stmmac_clear_tx_descriptors(priv
, chan
);
2119 netdev_tx_reset_queue(netdev_get_tx_queue(priv
->dev
, chan
));
2120 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
2121 tx_q
->dma_tx_phy
, chan
);
2122 stmmac_start_tx_dma(priv
, chan
);
2124 priv
->dev
->stats
.tx_errors
++;
2125 netif_tx_wake_queue(netdev_get_tx_queue(priv
->dev
, chan
));
2129 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2130 * @priv: driver private structure
2131 * @txmode: TX operating mode
2132 * @rxmode: RX operating mode
2133 * @chan: channel index
2134 * Description: it is used for configuring of the DMA operation mode in
2135 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2138 static void stmmac_set_dma_operation_mode(struct stmmac_priv
*priv
, u32 txmode
,
2139 u32 rxmode
, u32 chan
)
2141 u8 rxqmode
= priv
->plat
->rx_queues_cfg
[chan
].mode_to_use
;
2142 u8 txqmode
= priv
->plat
->tx_queues_cfg
[chan
].mode_to_use
;
2143 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2144 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2145 int rxfifosz
= priv
->plat
->rx_fifo_size
;
2146 int txfifosz
= priv
->plat
->tx_fifo_size
;
2149 rxfifosz
= priv
->dma_cap
.rx_fifo_size
;
2151 txfifosz
= priv
->dma_cap
.tx_fifo_size
;
2153 /* Adjust for real per queue fifo size */
2154 rxfifosz
/= rx_channels_count
;
2155 txfifosz
/= tx_channels_count
;
2157 stmmac_dma_rx_mode(priv
, priv
->ioaddr
, rxmode
, chan
, rxfifosz
, rxqmode
);
2158 stmmac_dma_tx_mode(priv
, priv
->ioaddr
, txmode
, chan
, txfifosz
, txqmode
);
2161 static bool stmmac_safety_feat_interrupt(struct stmmac_priv
*priv
)
2165 ret
= stmmac_safety_feat_irq_status(priv
, priv
->dev
,
2166 priv
->ioaddr
, priv
->dma_cap
.asp
, &priv
->sstats
);
2167 if (ret
&& (ret
!= -EINVAL
)) {
2168 stmmac_global_err(priv
);
2175 static int stmmac_napi_check(struct stmmac_priv
*priv
, u32 chan
)
2177 int status
= stmmac_dma_interrupt_status(priv
, priv
->ioaddr
,
2178 &priv
->xstats
, chan
);
2179 struct stmmac_channel
*ch
= &priv
->channel
[chan
];
2180 unsigned long flags
;
2182 if ((status
& handle_rx
) && (chan
< priv
->plat
->rx_queues_to_use
)) {
2183 if (napi_schedule_prep(&ch
->rx_napi
)) {
2184 spin_lock_irqsave(&ch
->lock
, flags
);
2185 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 0);
2186 spin_unlock_irqrestore(&ch
->lock
, flags
);
2187 __napi_schedule_irqoff(&ch
->rx_napi
);
2191 if ((status
& handle_tx
) && (chan
< priv
->plat
->tx_queues_to_use
)) {
2192 if (napi_schedule_prep(&ch
->tx_napi
)) {
2193 spin_lock_irqsave(&ch
->lock
, flags
);
2194 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, chan
, 0, 1);
2195 spin_unlock_irqrestore(&ch
->lock
, flags
);
2196 __napi_schedule_irqoff(&ch
->tx_napi
);
2204 * stmmac_dma_interrupt - DMA ISR
2205 * @priv: driver private structure
2206 * Description: this is the DMA ISR. It is called by the main ISR.
2207 * It calls the dwmac dma routine and schedule poll method in case of some
2210 static void stmmac_dma_interrupt(struct stmmac_priv
*priv
)
2212 u32 tx_channel_count
= priv
->plat
->tx_queues_to_use
;
2213 u32 rx_channel_count
= priv
->plat
->rx_queues_to_use
;
2214 u32 channels_to_check
= tx_channel_count
> rx_channel_count
?
2215 tx_channel_count
: rx_channel_count
;
2217 int status
[max_t(u32
, MTL_MAX_TX_QUEUES
, MTL_MAX_RX_QUEUES
)];
2219 /* Make sure we never check beyond our status buffer. */
2220 if (WARN_ON_ONCE(channels_to_check
> ARRAY_SIZE(status
)))
2221 channels_to_check
= ARRAY_SIZE(status
);
2223 for (chan
= 0; chan
< channels_to_check
; chan
++)
2224 status
[chan
] = stmmac_napi_check(priv
, chan
);
2226 for (chan
= 0; chan
< tx_channel_count
; chan
++) {
2227 if (unlikely(status
[chan
] & tx_hard_error_bump_tc
)) {
2228 /* Try to bump up the dma threshold on this failure */
2229 if (unlikely(priv
->xstats
.threshold
!= SF_DMA_MODE
) &&
2232 if (priv
->plat
->force_thresh_dma_mode
)
2233 stmmac_set_dma_operation_mode(priv
,
2238 stmmac_set_dma_operation_mode(priv
,
2242 priv
->xstats
.threshold
= tc
;
2244 } else if (unlikely(status
[chan
] == tx_hard_error
)) {
2245 stmmac_tx_err(priv
, chan
);
2251 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2252 * @priv: driver private structure
2253 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2255 static void stmmac_mmc_setup(struct stmmac_priv
*priv
)
2257 unsigned int mode
= MMC_CNTRL_RESET_ON_READ
| MMC_CNTRL_COUNTER_RESET
|
2258 MMC_CNTRL_PRESET
| MMC_CNTRL_FULL_HALF_PRESET
;
2260 stmmac_mmc_intr_all_mask(priv
, priv
->mmcaddr
);
2262 if (priv
->dma_cap
.rmon
) {
2263 stmmac_mmc_ctrl(priv
, priv
->mmcaddr
, mode
);
2264 memset(&priv
->mmc
, 0, sizeof(struct stmmac_counters
));
2266 netdev_info(priv
->dev
, "No MAC Management Counters available\n");
2270 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2271 * @priv: driver private structure
2273 * new GMAC chip generations have a new register to indicate the
2274 * presence of the optional feature/functions.
2275 * This can be also used to override the value passed through the
2276 * platform and necessary for old MAC10/100 and GMAC chips.
2278 static int stmmac_get_hw_features(struct stmmac_priv
*priv
)
2280 return stmmac_get_hw_feature(priv
, priv
->ioaddr
, &priv
->dma_cap
) == 0;
2284 * stmmac_check_ether_addr - check if the MAC addr is valid
2285 * @priv: driver private structure
2287 * it is to verify if the MAC address is valid, in case of failures it
2288 * generates a random MAC address
2290 static void stmmac_check_ether_addr(struct stmmac_priv
*priv
)
2292 if (!is_valid_ether_addr(priv
->dev
->dev_addr
)) {
2293 stmmac_get_umac_addr(priv
, priv
->hw
, priv
->dev
->dev_addr
, 0);
2294 if (!is_valid_ether_addr(priv
->dev
->dev_addr
))
2295 eth_hw_addr_random(priv
->dev
);
2296 dev_info(priv
->device
, "device MAC address %pM\n",
2297 priv
->dev
->dev_addr
);
2302 * stmmac_init_dma_engine - DMA init.
2303 * @priv: driver private structure
2305 * It inits the DMA invoking the specific MAC/GMAC callback.
2306 * Some DMA parameters can be passed from the platform;
2307 * in case of these are not passed a default is kept for the MAC or GMAC.
2309 static int stmmac_init_dma_engine(struct stmmac_priv
*priv
)
2311 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2312 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2313 u32 dma_csr_ch
= max(rx_channels_count
, tx_channels_count
);
2314 struct stmmac_rx_queue
*rx_q
;
2315 struct stmmac_tx_queue
*tx_q
;
2320 if (!priv
->plat
->dma_cfg
|| !priv
->plat
->dma_cfg
->pbl
) {
2321 dev_err(priv
->device
, "Invalid DMA configuration\n");
2325 if (priv
->extend_desc
&& (priv
->mode
== STMMAC_RING_MODE
))
2328 ret
= stmmac_reset(priv
, priv
->ioaddr
);
2330 dev_err(priv
->device
, "Failed to reset the dma\n");
2334 /* DMA Configuration */
2335 stmmac_dma_init(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
, atds
);
2337 if (priv
->plat
->axi
)
2338 stmmac_axi(priv
, priv
->ioaddr
, priv
->plat
->axi
);
2340 /* DMA CSR Channel configuration */
2341 for (chan
= 0; chan
< dma_csr_ch
; chan
++)
2342 stmmac_init_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
, chan
);
2344 /* DMA RX Channel Configuration */
2345 for (chan
= 0; chan
< rx_channels_count
; chan
++) {
2346 rx_q
= &priv
->rx_queue
[chan
];
2348 stmmac_init_rx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
2349 rx_q
->dma_rx_phy
, chan
);
2351 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
2352 (priv
->dma_rx_size
*
2353 sizeof(struct dma_desc
));
2354 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
,
2355 rx_q
->rx_tail_addr
, chan
);
2358 /* DMA TX Channel Configuration */
2359 for (chan
= 0; chan
< tx_channels_count
; chan
++) {
2360 tx_q
= &priv
->tx_queue
[chan
];
2362 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
2363 tx_q
->dma_tx_phy
, chan
);
2365 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
;
2366 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
,
2367 tx_q
->tx_tail_addr
, chan
);
2373 static void stmmac_tx_timer_arm(struct stmmac_priv
*priv
, u32 queue
)
2375 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
2377 hrtimer_start(&tx_q
->txtimer
, STMMAC_COAL_TIMER(priv
->tx_coal_timer
),
2382 * stmmac_tx_timer - mitigation sw timer for tx.
2385 * This is the timer handler to directly invoke the stmmac_tx_clean.
2387 static enum hrtimer_restart
stmmac_tx_timer(struct hrtimer
*t
)
2389 struct stmmac_tx_queue
*tx_q
= container_of(t
, struct stmmac_tx_queue
, txtimer
);
2390 struct stmmac_priv
*priv
= tx_q
->priv_data
;
2391 struct stmmac_channel
*ch
;
2393 ch
= &priv
->channel
[tx_q
->queue_index
];
2395 if (likely(napi_schedule_prep(&ch
->tx_napi
))) {
2396 unsigned long flags
;
2398 spin_lock_irqsave(&ch
->lock
, flags
);
2399 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, ch
->index
, 0, 1);
2400 spin_unlock_irqrestore(&ch
->lock
, flags
);
2401 __napi_schedule(&ch
->tx_napi
);
2404 return HRTIMER_NORESTART
;
2408 * stmmac_init_coalesce - init mitigation options.
2409 * @priv: driver private structure
2411 * This inits the coalesce parameters: i.e. timer rate,
2412 * timer handler and default threshold used for enabling the
2413 * interrupt on completion bit.
2415 static void stmmac_init_coalesce(struct stmmac_priv
*priv
)
2417 u32 tx_channel_count
= priv
->plat
->tx_queues_to_use
;
2420 priv
->tx_coal_frames
= STMMAC_TX_FRAMES
;
2421 priv
->tx_coal_timer
= STMMAC_COAL_TX_TIMER
;
2422 priv
->rx_coal_frames
= STMMAC_RX_FRAMES
;
2424 for (chan
= 0; chan
< tx_channel_count
; chan
++) {
2425 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[chan
];
2427 hrtimer_init(&tx_q
->txtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
2428 tx_q
->txtimer
.function
= stmmac_tx_timer
;
2432 static void stmmac_set_rings_length(struct stmmac_priv
*priv
)
2434 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2435 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2438 /* set TX ring length */
2439 for (chan
= 0; chan
< tx_channels_count
; chan
++)
2440 stmmac_set_tx_ring_len(priv
, priv
->ioaddr
,
2441 (priv
->dma_tx_size
- 1), chan
);
2443 /* set RX ring length */
2444 for (chan
= 0; chan
< rx_channels_count
; chan
++)
2445 stmmac_set_rx_ring_len(priv
, priv
->ioaddr
,
2446 (priv
->dma_rx_size
- 1), chan
);
2450 * stmmac_set_tx_queue_weight - Set TX queue weight
2451 * @priv: driver private structure
2452 * Description: It is used for setting TX queues weight
2454 static void stmmac_set_tx_queue_weight(struct stmmac_priv
*priv
)
2456 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
2460 for (queue
= 0; queue
< tx_queues_count
; queue
++) {
2461 weight
= priv
->plat
->tx_queues_cfg
[queue
].weight
;
2462 stmmac_set_mtl_tx_queue_weight(priv
, priv
->hw
, weight
, queue
);
2467 * stmmac_configure_cbs - Configure CBS in TX queue
2468 * @priv: driver private structure
2469 * Description: It is used for configuring CBS in AVB TX queues
2471 static void stmmac_configure_cbs(struct stmmac_priv
*priv
)
2473 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
2477 /* queue 0 is reserved for legacy traffic */
2478 for (queue
= 1; queue
< tx_queues_count
; queue
++) {
2479 mode_to_use
= priv
->plat
->tx_queues_cfg
[queue
].mode_to_use
;
2480 if (mode_to_use
== MTL_QUEUE_DCB
)
2483 stmmac_config_cbs(priv
, priv
->hw
,
2484 priv
->plat
->tx_queues_cfg
[queue
].send_slope
,
2485 priv
->plat
->tx_queues_cfg
[queue
].idle_slope
,
2486 priv
->plat
->tx_queues_cfg
[queue
].high_credit
,
2487 priv
->plat
->tx_queues_cfg
[queue
].low_credit
,
2493 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2494 * @priv: driver private structure
2495 * Description: It is used for mapping RX queues to RX dma channels
2497 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv
*priv
)
2499 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
2503 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
2504 chan
= priv
->plat
->rx_queues_cfg
[queue
].chan
;
2505 stmmac_map_mtl_to_dma(priv
, priv
->hw
, queue
, chan
);
2510 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2511 * @priv: driver private structure
2512 * Description: It is used for configuring the RX Queue Priority
2514 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv
*priv
)
2516 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
2520 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
2521 if (!priv
->plat
->rx_queues_cfg
[queue
].use_prio
)
2524 prio
= priv
->plat
->rx_queues_cfg
[queue
].prio
;
2525 stmmac_rx_queue_prio(priv
, priv
->hw
, prio
, queue
);
2530 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2531 * @priv: driver private structure
2532 * Description: It is used for configuring the TX Queue Priority
2534 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv
*priv
)
2536 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
2540 for (queue
= 0; queue
< tx_queues_count
; queue
++) {
2541 if (!priv
->plat
->tx_queues_cfg
[queue
].use_prio
)
2544 prio
= priv
->plat
->tx_queues_cfg
[queue
].prio
;
2545 stmmac_tx_queue_prio(priv
, priv
->hw
, prio
, queue
);
2550 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2551 * @priv: driver private structure
2552 * Description: It is used for configuring the RX queue routing
2554 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv
*priv
)
2556 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
2560 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
2561 /* no specific packet type routing specified for the queue */
2562 if (priv
->plat
->rx_queues_cfg
[queue
].pkt_route
== 0x0)
2565 packet
= priv
->plat
->rx_queues_cfg
[queue
].pkt_route
;
2566 stmmac_rx_queue_routing(priv
, priv
->hw
, packet
, queue
);
2570 static void stmmac_mac_config_rss(struct stmmac_priv
*priv
)
2572 if (!priv
->dma_cap
.rssen
|| !priv
->plat
->rss_en
) {
2573 priv
->rss
.enable
= false;
2577 if (priv
->dev
->features
& NETIF_F_RXHASH
)
2578 priv
->rss
.enable
= true;
2580 priv
->rss
.enable
= false;
2582 stmmac_rss_configure(priv
, priv
->hw
, &priv
->rss
,
2583 priv
->plat
->rx_queues_to_use
);
2587 * stmmac_mtl_configuration - Configure MTL
2588 * @priv: driver private structure
2589 * Description: It is used for configurring MTL
2591 static void stmmac_mtl_configuration(struct stmmac_priv
*priv
)
2593 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
2594 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
2596 if (tx_queues_count
> 1)
2597 stmmac_set_tx_queue_weight(priv
);
2599 /* Configure MTL RX algorithms */
2600 if (rx_queues_count
> 1)
2601 stmmac_prog_mtl_rx_algorithms(priv
, priv
->hw
,
2602 priv
->plat
->rx_sched_algorithm
);
2604 /* Configure MTL TX algorithms */
2605 if (tx_queues_count
> 1)
2606 stmmac_prog_mtl_tx_algorithms(priv
, priv
->hw
,
2607 priv
->plat
->tx_sched_algorithm
);
2609 /* Configure CBS in AVB TX queues */
2610 if (tx_queues_count
> 1)
2611 stmmac_configure_cbs(priv
);
2613 /* Map RX MTL to DMA channels */
2614 stmmac_rx_queue_dma_chan_map(priv
);
2616 /* Enable MAC RX Queues */
2617 stmmac_mac_enable_rx_queues(priv
);
2619 /* Set RX priorities */
2620 if (rx_queues_count
> 1)
2621 stmmac_mac_config_rx_queues_prio(priv
);
2623 /* Set TX priorities */
2624 if (tx_queues_count
> 1)
2625 stmmac_mac_config_tx_queues_prio(priv
);
2627 /* Set RX routing */
2628 if (rx_queues_count
> 1)
2629 stmmac_mac_config_rx_queues_routing(priv
);
2631 /* Receive Side Scaling */
2632 if (rx_queues_count
> 1)
2633 stmmac_mac_config_rss(priv
);
2636 static void stmmac_safety_feat_configuration(struct stmmac_priv
*priv
)
2638 if (priv
->dma_cap
.asp
) {
2639 netdev_info(priv
->dev
, "Enabling Safety Features\n");
2640 stmmac_safety_feat_config(priv
, priv
->ioaddr
, priv
->dma_cap
.asp
);
2642 netdev_info(priv
->dev
, "No Safety Features support found\n");
2647 * stmmac_hw_setup - setup mac in a usable state.
2648 * @dev : pointer to the device structure.
2649 * @init_ptp: initialize PTP if set
2651 * this is the main function to setup the HW in a usable state because the
2652 * dma engine is reset, the core registers are configured (e.g. AXI,
2653 * Checksum features, timers). The DMA is ready to start receiving and
2656 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2659 static int stmmac_hw_setup(struct net_device
*dev
, bool init_ptp
)
2661 struct stmmac_priv
*priv
= netdev_priv(dev
);
2662 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
2663 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
2667 /* DMA initialization and SW reset */
2668 ret
= stmmac_init_dma_engine(priv
);
2670 netdev_err(priv
->dev
, "%s: DMA engine initialization failed\n",
2675 /* Copy the MAC addr into the HW */
2676 stmmac_set_umac_addr(priv
, priv
->hw
, dev
->dev_addr
, 0);
2678 /* PS and related bits will be programmed according to the speed */
2679 if (priv
->hw
->pcs
) {
2680 int speed
= priv
->plat
->mac_port_sel_speed
;
2682 if ((speed
== SPEED_10
) || (speed
== SPEED_100
) ||
2683 (speed
== SPEED_1000
)) {
2684 priv
->hw
->ps
= speed
;
2686 dev_warn(priv
->device
, "invalid port speed\n");
2691 /* Initialize the MAC Core */
2692 stmmac_core_init(priv
, priv
->hw
, dev
);
2695 stmmac_mtl_configuration(priv
);
2697 /* Initialize Safety Features */
2698 stmmac_safety_feat_configuration(priv
);
2700 ret
= stmmac_rx_ipc(priv
, priv
->hw
);
2702 netdev_warn(priv
->dev
, "RX IPC Checksum Offload disabled\n");
2703 priv
->plat
->rx_coe
= STMMAC_RX_COE_NONE
;
2704 priv
->hw
->rx_csum
= 0;
2707 /* Enable the MAC Rx/Tx */
2708 stmmac_mac_set(priv
, priv
->ioaddr
, true);
2710 /* Set the HW DMA mode and the COE */
2711 stmmac_dma_operation_mode(priv
);
2713 stmmac_mmc_setup(priv
);
2716 ret
= clk_prepare_enable(priv
->plat
->clk_ptp_ref
);
2718 netdev_warn(priv
->dev
, "failed to enable PTP reference clock: %d\n", ret
);
2720 ret
= stmmac_init_ptp(priv
);
2721 if (ret
== -EOPNOTSUPP
)
2722 netdev_warn(priv
->dev
, "PTP not supported by HW\n");
2724 netdev_warn(priv
->dev
, "PTP init failed\n");
2727 priv
->eee_tw_timer
= STMMAC_DEFAULT_TWT_LS
;
2729 /* Convert the timer from msec to usec */
2730 if (!priv
->tx_lpi_timer
)
2731 priv
->tx_lpi_timer
= eee_timer
* 1000;
2733 if (priv
->use_riwt
) {
2735 priv
->rx_riwt
= DEF_DMA_RIWT
;
2737 ret
= stmmac_rx_watchdog(priv
, priv
->ioaddr
, priv
->rx_riwt
, rx_cnt
);
2741 stmmac_pcs_ctrl_ane(priv
, priv
->ioaddr
, 1, priv
->hw
->ps
, 0);
2743 /* set TX and RX rings length */
2744 stmmac_set_rings_length(priv
);
2748 for (chan
= 0; chan
< tx_cnt
; chan
++)
2749 stmmac_enable_tso(priv
, priv
->ioaddr
, 1, chan
);
2752 /* Enable Split Header */
2753 if (priv
->sph
&& priv
->hw
->rx_csum
) {
2754 for (chan
= 0; chan
< rx_cnt
; chan
++)
2755 stmmac_enable_sph(priv
, priv
->ioaddr
, 1, chan
);
2758 /* VLAN Tag Insertion */
2759 if (priv
->dma_cap
.vlins
)
2760 stmmac_enable_vlan(priv
, priv
->hw
, STMMAC_VLAN_INSERT
);
2763 for (chan
= 0; chan
< tx_cnt
; chan
++) {
2764 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[chan
];
2765 int enable
= tx_q
->tbs
& STMMAC_TBS_AVAIL
;
2767 stmmac_enable_tbs(priv
, priv
->ioaddr
, enable
, chan
);
2770 /* Configure real RX and TX queues */
2771 netif_set_real_num_rx_queues(dev
, priv
->plat
->rx_queues_to_use
);
2772 netif_set_real_num_tx_queues(dev
, priv
->plat
->tx_queues_to_use
);
2774 /* Start the ball rolling... */
2775 stmmac_start_all_dma(priv
);
2780 static void stmmac_hw_teardown(struct net_device
*dev
)
2782 struct stmmac_priv
*priv
= netdev_priv(dev
);
2784 clk_disable_unprepare(priv
->plat
->clk_ptp_ref
);
2788 * stmmac_open - open entry point of the driver
2789 * @dev : pointer to the device structure.
2791 * This function is the open entry point of the driver.
2793 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2796 static int stmmac_open(struct net_device
*dev
)
2798 struct stmmac_priv
*priv
= netdev_priv(dev
);
2803 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
2804 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
&&
2805 priv
->hw
->xpcs
== NULL
) {
2806 ret
= stmmac_init_phy(dev
);
2808 netdev_err(priv
->dev
,
2809 "%s: Cannot attach to PHY (error: %d)\n",
2815 /* Extra statistics */
2816 memset(&priv
->xstats
, 0, sizeof(struct stmmac_extra_stats
));
2817 priv
->xstats
.threshold
= tc
;
2819 bfsize
= stmmac_set_16kib_bfsize(priv
, dev
->mtu
);
2823 if (bfsize
< BUF_SIZE_16KiB
)
2824 bfsize
= stmmac_set_bfsize(dev
->mtu
, priv
->dma_buf_sz
);
2826 priv
->dma_buf_sz
= bfsize
;
2829 priv
->rx_copybreak
= STMMAC_RX_COPYBREAK
;
2831 if (!priv
->dma_tx_size
)
2832 priv
->dma_tx_size
= DMA_DEFAULT_TX_SIZE
;
2833 if (!priv
->dma_rx_size
)
2834 priv
->dma_rx_size
= DMA_DEFAULT_RX_SIZE
;
2836 /* Earlier check for TBS */
2837 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++) {
2838 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[chan
];
2839 int tbs_en
= priv
->plat
->tx_queues_cfg
[chan
].tbs_en
;
2841 tx_q
->tbs
|= tbs_en
? STMMAC_TBS_AVAIL
: 0;
2842 if (stmmac_enable_tbs(priv
, priv
->ioaddr
, tbs_en
, chan
))
2843 tx_q
->tbs
&= ~STMMAC_TBS_AVAIL
;
2846 ret
= alloc_dma_desc_resources(priv
);
2848 netdev_err(priv
->dev
, "%s: DMA descriptors allocation failed\n",
2850 goto dma_desc_error
;
2853 ret
= init_dma_desc_rings(dev
, GFP_KERNEL
);
2855 netdev_err(priv
->dev
, "%s: DMA descriptors initialization failed\n",
2860 ret
= stmmac_hw_setup(dev
, true);
2862 netdev_err(priv
->dev
, "%s: Hw setup failed\n", __func__
);
2866 stmmac_init_coalesce(priv
);
2868 phylink_start(priv
->phylink
);
2869 /* We may have called phylink_speed_down before */
2870 phylink_speed_up(priv
->phylink
);
2872 /* Request the IRQ lines */
2873 ret
= request_irq(dev
->irq
, stmmac_interrupt
,
2874 IRQF_SHARED
, dev
->name
, dev
);
2875 if (unlikely(ret
< 0)) {
2876 netdev_err(priv
->dev
,
2877 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2878 __func__
, dev
->irq
, ret
);
2882 /* Request the Wake IRQ in case of another line is used for WoL */
2883 if (priv
->wol_irq
!= dev
->irq
) {
2884 ret
= request_irq(priv
->wol_irq
, stmmac_interrupt
,
2885 IRQF_SHARED
, dev
->name
, dev
);
2886 if (unlikely(ret
< 0)) {
2887 netdev_err(priv
->dev
,
2888 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2889 __func__
, priv
->wol_irq
, ret
);
2894 /* Request the IRQ lines */
2895 if (priv
->lpi_irq
> 0) {
2896 ret
= request_irq(priv
->lpi_irq
, stmmac_interrupt
, IRQF_SHARED
,
2898 if (unlikely(ret
< 0)) {
2899 netdev_err(priv
->dev
,
2900 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2901 __func__
, priv
->lpi_irq
, ret
);
2906 stmmac_enable_all_queues(priv
);
2907 netif_tx_start_all_queues(priv
->dev
);
2912 if (priv
->wol_irq
!= dev
->irq
)
2913 free_irq(priv
->wol_irq
, dev
);
2915 free_irq(dev
->irq
, dev
);
2917 phylink_stop(priv
->phylink
);
2919 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
2920 hrtimer_cancel(&priv
->tx_queue
[chan
].txtimer
);
2922 stmmac_hw_teardown(dev
);
2924 free_dma_desc_resources(priv
);
2926 phylink_disconnect_phy(priv
->phylink
);
2931 * stmmac_release - close entry point of the driver
2932 * @dev : device pointer.
2934 * This is the stop entry point of the driver.
2936 static int stmmac_release(struct net_device
*dev
)
2938 struct stmmac_priv
*priv
= netdev_priv(dev
);
2941 if (device_may_wakeup(priv
->device
))
2942 phylink_speed_down(priv
->phylink
, false);
2943 /* Stop and disconnect the PHY */
2944 phylink_stop(priv
->phylink
);
2945 phylink_disconnect_phy(priv
->phylink
);
2947 stmmac_disable_all_queues(priv
);
2949 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
2950 hrtimer_cancel(&priv
->tx_queue
[chan
].txtimer
);
2952 /* Free the IRQ lines */
2953 free_irq(dev
->irq
, dev
);
2954 if (priv
->wol_irq
!= dev
->irq
)
2955 free_irq(priv
->wol_irq
, dev
);
2956 if (priv
->lpi_irq
> 0)
2957 free_irq(priv
->lpi_irq
, dev
);
2959 if (priv
->eee_enabled
) {
2960 priv
->tx_path_in_lpi_mode
= false;
2961 del_timer_sync(&priv
->eee_ctrl_timer
);
2964 /* Stop TX/RX DMA and clear the descriptors */
2965 stmmac_stop_all_dma(priv
);
2967 /* Release and free the Rx/Tx resources */
2968 free_dma_desc_resources(priv
);
2970 /* Disable the MAC Rx/Tx */
2971 stmmac_mac_set(priv
, priv
->ioaddr
, false);
2973 netif_carrier_off(dev
);
2975 stmmac_release_ptp(priv
);
2980 static bool stmmac_vlan_insert(struct stmmac_priv
*priv
, struct sk_buff
*skb
,
2981 struct stmmac_tx_queue
*tx_q
)
2983 u16 tag
= 0x0, inner_tag
= 0x0;
2984 u32 inner_type
= 0x0;
2987 if (!priv
->dma_cap
.vlins
)
2989 if (!skb_vlan_tag_present(skb
))
2991 if (skb
->vlan_proto
== htons(ETH_P_8021AD
)) {
2992 inner_tag
= skb_vlan_tag_get(skb
);
2993 inner_type
= STMMAC_VLAN_INSERT
;
2996 tag
= skb_vlan_tag_get(skb
);
2998 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2999 p
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
3001 p
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
3003 if (stmmac_set_desc_vlan_tag(priv
, p
, tag
, inner_tag
, inner_type
))
3006 stmmac_set_tx_owner(priv
, p
);
3007 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
, priv
->dma_tx_size
);
3012 * stmmac_tso_allocator - close entry point of the driver
3013 * @priv: driver private structure
3014 * @des: buffer start address
3015 * @total_len: total length to fill in descriptors
3016 * @last_segment: condition for the last descriptor
3017 * @queue: TX queue index
3019 * This function fills descriptor and request new descriptors according to
3020 * buffer length to fill
3022 static void stmmac_tso_allocator(struct stmmac_priv
*priv
, dma_addr_t des
,
3023 int total_len
, bool last_segment
, u32 queue
)
3025 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
3026 struct dma_desc
*desc
;
3030 tmp_len
= total_len
;
3032 while (tmp_len
> 0) {
3033 dma_addr_t curr_addr
;
3035 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
,
3037 WARN_ON(tx_q
->tx_skbuff
[tx_q
->cur_tx
]);
3039 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
3040 desc
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
3042 desc
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
3044 curr_addr
= des
+ (total_len
- tmp_len
);
3045 if (priv
->dma_cap
.addr64
<= 32)
3046 desc
->des0
= cpu_to_le32(curr_addr
);
3048 stmmac_set_desc_addr(priv
, desc
, curr_addr
);
3050 buff_size
= tmp_len
>= TSO_MAX_BUFF_SIZE
?
3051 TSO_MAX_BUFF_SIZE
: tmp_len
;
3053 stmmac_prepare_tso_tx_desc(priv
, desc
, 0, buff_size
,
3055 (last_segment
) && (tmp_len
<= TSO_MAX_BUFF_SIZE
),
3058 tmp_len
-= TSO_MAX_BUFF_SIZE
;
3063 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3064 * @skb : the socket buffer
3065 * @dev : device pointer
3066 * Description: this is the transmit function that is called on TSO frames
3067 * (support available on GMAC4 and newer chips).
3068 * Diagram below show the ring programming in case of TSO frames:
3072 * | DES0 |---> buffer1 = L2/L3/L4 header
3073 * | DES1 |---> TCP Payload (can continue on next descr...)
3074 * | DES2 |---> buffer 1 and 2 len
3075 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3081 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
3083 * | DES2 | --> buffer 1 and 2 len
3087 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3089 static netdev_tx_t
stmmac_tso_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3091 struct dma_desc
*desc
, *first
, *mss_desc
= NULL
;
3092 struct stmmac_priv
*priv
= netdev_priv(dev
);
3093 int desc_size
, tmp_pay_len
= 0, first_tx
;
3094 int nfrags
= skb_shinfo(skb
)->nr_frags
;
3095 u32 queue
= skb_get_queue_mapping(skb
);
3096 unsigned int first_entry
, tx_packets
;
3097 struct stmmac_tx_queue
*tx_q
;
3098 bool has_vlan
, set_ic
;
3099 u8 proto_hdr_len
, hdr
;
3104 tx_q
= &priv
->tx_queue
[queue
];
3105 first_tx
= tx_q
->cur_tx
;
3107 /* Compute header lengths */
3108 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
) {
3109 proto_hdr_len
= skb_transport_offset(skb
) + sizeof(struct udphdr
);
3110 hdr
= sizeof(struct udphdr
);
3112 proto_hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
3113 hdr
= tcp_hdrlen(skb
);
3116 /* Desc availability based on threshold should be enough safe */
3117 if (unlikely(stmmac_tx_avail(priv
, queue
) <
3118 (((skb
->len
- proto_hdr_len
) / TSO_MAX_BUFF_SIZE
+ 1)))) {
3119 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev
, queue
))) {
3120 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
,
3122 /* This is a hard error, log it. */
3123 netdev_err(priv
->dev
,
3124 "%s: Tx Ring full when queue awake\n",
3127 return NETDEV_TX_BUSY
;
3130 pay_len
= skb_headlen(skb
) - proto_hdr_len
; /* no frags */
3132 mss
= skb_shinfo(skb
)->gso_size
;
3134 /* set new MSS value if needed */
3135 if (mss
!= tx_q
->mss
) {
3136 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
3137 mss_desc
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
3139 mss_desc
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
3141 stmmac_set_mss(priv
, mss_desc
, mss
);
3143 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
,
3145 WARN_ON(tx_q
->tx_skbuff
[tx_q
->cur_tx
]);
3148 if (netif_msg_tx_queued(priv
)) {
3149 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3150 __func__
, hdr
, proto_hdr_len
, pay_len
, mss
);
3151 pr_info("\tskb->len %d, skb->data_len %d\n", skb
->len
,
3155 /* Check if VLAN can be inserted by HW */
3156 has_vlan
= stmmac_vlan_insert(priv
, skb
, tx_q
);
3158 first_entry
= tx_q
->cur_tx
;
3159 WARN_ON(tx_q
->tx_skbuff
[first_entry
]);
3161 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
3162 desc
= &tx_q
->dma_entx
[first_entry
].basic
;
3164 desc
= &tx_q
->dma_tx
[first_entry
];
3168 stmmac_set_desc_vlan(priv
, first
, STMMAC_VLAN_INSERT
);
3170 /* first descriptor: fill Headers on Buf1 */
3171 des
= dma_map_single(priv
->device
, skb
->data
, skb_headlen(skb
),
3173 if (dma_mapping_error(priv
->device
, des
))
3176 tx_q
->tx_skbuff_dma
[first_entry
].buf
= des
;
3177 tx_q
->tx_skbuff_dma
[first_entry
].len
= skb_headlen(skb
);
3179 if (priv
->dma_cap
.addr64
<= 32) {
3180 first
->des0
= cpu_to_le32(des
);
3182 /* Fill start of payload in buff2 of first descriptor */
3184 first
->des1
= cpu_to_le32(des
+ proto_hdr_len
);
3186 /* If needed take extra descriptors to fill the remaining payload */
3187 tmp_pay_len
= pay_len
- TSO_MAX_BUFF_SIZE
;
3189 stmmac_set_desc_addr(priv
, first
, des
);
3190 tmp_pay_len
= pay_len
;
3191 des
+= proto_hdr_len
;
3195 stmmac_tso_allocator(priv
, des
, tmp_pay_len
, (nfrags
== 0), queue
);
3197 /* Prepare fragments */
3198 for (i
= 0; i
< nfrags
; i
++) {
3199 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3201 des
= skb_frag_dma_map(priv
->device
, frag
, 0,
3202 skb_frag_size(frag
),
3204 if (dma_mapping_error(priv
->device
, des
))
3207 stmmac_tso_allocator(priv
, des
, skb_frag_size(frag
),
3208 (i
== nfrags
- 1), queue
);
3210 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].buf
= des
;
3211 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].len
= skb_frag_size(frag
);
3212 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].map_as_page
= true;
3215 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].last_segment
= true;
3217 /* Only the last descriptor gets to point to the skb. */
3218 tx_q
->tx_skbuff
[tx_q
->cur_tx
] = skb
;
3220 /* Manage tx mitigation */
3221 tx_packets
= (tx_q
->cur_tx
+ 1) - first_tx
;
3222 tx_q
->tx_count_frames
+= tx_packets
;
3224 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) && priv
->hwts_tx_en
)
3226 else if (!priv
->tx_coal_frames
)
3228 else if (tx_packets
> priv
->tx_coal_frames
)
3230 else if ((tx_q
->tx_count_frames
% priv
->tx_coal_frames
) < tx_packets
)
3236 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
3237 desc
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
3239 desc
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
3241 tx_q
->tx_count_frames
= 0;
3242 stmmac_set_tx_ic(priv
, desc
);
3243 priv
->xstats
.tx_set_ic_bit
++;
3246 /* We've used all descriptors we need for this skb, however,
3247 * advance cur_tx so that it references a fresh descriptor.
3248 * ndo_start_xmit will fill this descriptor the next time it's
3249 * called and stmmac_tx_clean may clean up to this descriptor.
3251 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
, priv
->dma_tx_size
);
3253 if (unlikely(stmmac_tx_avail(priv
, queue
) <= (MAX_SKB_FRAGS
+ 1))) {
3254 netif_dbg(priv
, hw
, priv
->dev
, "%s: stop transmitted packets\n",
3256 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
, queue
));
3259 dev
->stats
.tx_bytes
+= skb
->len
;
3260 priv
->xstats
.tx_tso_frames
++;
3261 priv
->xstats
.tx_tso_nfrags
+= nfrags
;
3263 if (priv
->sarc_type
)
3264 stmmac_set_desc_sarc(priv
, first
, priv
->sarc_type
);
3266 skb_tx_timestamp(skb
);
3268 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
3269 priv
->hwts_tx_en
)) {
3270 /* declare that device is doing timestamping */
3271 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
3272 stmmac_enable_tx_timestamp(priv
, first
);
3275 /* Complete the first descriptor before granting the DMA */
3276 stmmac_prepare_tso_tx_desc(priv
, first
, 1,
3279 1, tx_q
->tx_skbuff_dma
[first_entry
].last_segment
,
3280 hdr
/ 4, (skb
->len
- proto_hdr_len
));
3282 /* If context desc is used to change MSS */
3284 /* Make sure that first descriptor has been completely
3285 * written, including its own bit. This is because MSS is
3286 * actually before first descriptor, so we need to make
3287 * sure that MSS's own bit is the last thing written.
3290 stmmac_set_tx_owner(priv
, mss_desc
);
3293 /* The own bit must be the latest setting done when prepare the
3294 * descriptor and then barrier is needed to make sure that
3295 * all is coherent before granting the DMA engine.
3299 if (netif_msg_pktdata(priv
)) {
3300 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3301 __func__
, tx_q
->cur_tx
, tx_q
->dirty_tx
, first_entry
,
3302 tx_q
->cur_tx
, first
, nfrags
);
3303 pr_info(">>> frame to be transmitted: ");
3304 print_pkt(skb
->data
, skb_headlen(skb
));
3307 netdev_tx_sent_queue(netdev_get_tx_queue(dev
, queue
), skb
->len
);
3309 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
3310 desc_size
= sizeof(struct dma_edesc
);
3312 desc_size
= sizeof(struct dma_desc
);
3314 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
+ (tx_q
->cur_tx
* desc_size
);
3315 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
, tx_q
->tx_tail_addr
, queue
);
3316 stmmac_tx_timer_arm(priv
, queue
);
3318 return NETDEV_TX_OK
;
3321 dev_err(priv
->device
, "Tx dma map failed\n");
3323 priv
->dev
->stats
.tx_dropped
++;
3324 return NETDEV_TX_OK
;
3328 * stmmac_xmit - Tx entry point of the driver
3329 * @skb : the socket buffer
3330 * @dev : device pointer
3331 * Description : this is the tx entry point of the driver.
3332 * It programs the chain or the ring and supports oversized frames
3335 static netdev_tx_t
stmmac_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3337 unsigned int first_entry
, tx_packets
, enh_desc
;
3338 struct stmmac_priv
*priv
= netdev_priv(dev
);
3339 unsigned int nopaged_len
= skb_headlen(skb
);
3340 int i
, csum_insertion
= 0, is_jumbo
= 0;
3341 u32 queue
= skb_get_queue_mapping(skb
);
3342 int nfrags
= skb_shinfo(skb
)->nr_frags
;
3343 int gso
= skb_shinfo(skb
)->gso_type
;
3344 struct dma_edesc
*tbs_desc
= NULL
;
3345 int entry
, desc_size
, first_tx
;
3346 struct dma_desc
*desc
, *first
;
3347 struct stmmac_tx_queue
*tx_q
;
3348 bool has_vlan
, set_ic
;
3351 tx_q
= &priv
->tx_queue
[queue
];
3352 first_tx
= tx_q
->cur_tx
;
3354 if (priv
->tx_path_in_lpi_mode
&& priv
->eee_sw_timer_en
)
3355 stmmac_disable_eee_mode(priv
);
3357 /* Manage oversized TCP frames for GMAC4 device */
3358 if (skb_is_gso(skb
) && priv
->tso
) {
3359 if (gso
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))
3360 return stmmac_tso_xmit(skb
, dev
);
3361 if (priv
->plat
->has_gmac4
&& (gso
& SKB_GSO_UDP_L4
))
3362 return stmmac_tso_xmit(skb
, dev
);
3365 if (unlikely(stmmac_tx_avail(priv
, queue
) < nfrags
+ 1)) {
3366 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev
, queue
))) {
3367 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
,
3369 /* This is a hard error, log it. */
3370 netdev_err(priv
->dev
,
3371 "%s: Tx Ring full when queue awake\n",
3374 return NETDEV_TX_BUSY
;
3377 /* Check if VLAN can be inserted by HW */
3378 has_vlan
= stmmac_vlan_insert(priv
, skb
, tx_q
);
3380 entry
= tx_q
->cur_tx
;
3381 first_entry
= entry
;
3382 WARN_ON(tx_q
->tx_skbuff
[first_entry
]);
3384 csum_insertion
= (skb
->ip_summed
== CHECKSUM_PARTIAL
);
3386 if (likely(priv
->extend_desc
))
3387 desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
3388 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
3389 desc
= &tx_q
->dma_entx
[entry
].basic
;
3391 desc
= tx_q
->dma_tx
+ entry
;
3396 stmmac_set_desc_vlan(priv
, first
, STMMAC_VLAN_INSERT
);
3398 enh_desc
= priv
->plat
->enh_desc
;
3399 /* To program the descriptors according to the size of the frame */
3401 is_jumbo
= stmmac_is_jumbo_frm(priv
, skb
->len
, enh_desc
);
3403 if (unlikely(is_jumbo
)) {
3404 entry
= stmmac_jumbo_frm(priv
, tx_q
, skb
, csum_insertion
);
3405 if (unlikely(entry
< 0) && (entry
!= -EINVAL
))
3409 for (i
= 0; i
< nfrags
; i
++) {
3410 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3411 int len
= skb_frag_size(frag
);
3412 bool last_segment
= (i
== (nfrags
- 1));
3414 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_tx_size
);
3415 WARN_ON(tx_q
->tx_skbuff
[entry
]);
3417 if (likely(priv
->extend_desc
))
3418 desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
3419 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
3420 desc
= &tx_q
->dma_entx
[entry
].basic
;
3422 desc
= tx_q
->dma_tx
+ entry
;
3424 des
= skb_frag_dma_map(priv
->device
, frag
, 0, len
,
3426 if (dma_mapping_error(priv
->device
, des
))
3427 goto dma_map_err
; /* should reuse desc w/o issues */
3429 tx_q
->tx_skbuff_dma
[entry
].buf
= des
;
3431 stmmac_set_desc_addr(priv
, desc
, des
);
3433 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= true;
3434 tx_q
->tx_skbuff_dma
[entry
].len
= len
;
3435 tx_q
->tx_skbuff_dma
[entry
].last_segment
= last_segment
;
3437 /* Prepare the descriptor and set the own bit too */
3438 stmmac_prepare_tx_desc(priv
, desc
, 0, len
, csum_insertion
,
3439 priv
->mode
, 1, last_segment
, skb
->len
);
3442 /* Only the last descriptor gets to point to the skb. */
3443 tx_q
->tx_skbuff
[entry
] = skb
;
3445 /* According to the coalesce parameter the IC bit for the latest
3446 * segment is reset and the timer re-started to clean the tx status.
3447 * This approach takes care about the fragments: desc is the first
3448 * element in case of no SG.
3450 tx_packets
= (entry
+ 1) - first_tx
;
3451 tx_q
->tx_count_frames
+= tx_packets
;
3453 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) && priv
->hwts_tx_en
)
3455 else if (!priv
->tx_coal_frames
)
3457 else if (tx_packets
> priv
->tx_coal_frames
)
3459 else if ((tx_q
->tx_count_frames
% priv
->tx_coal_frames
) < tx_packets
)
3465 if (likely(priv
->extend_desc
))
3466 desc
= &tx_q
->dma_etx
[entry
].basic
;
3467 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
3468 desc
= &tx_q
->dma_entx
[entry
].basic
;
3470 desc
= &tx_q
->dma_tx
[entry
];
3472 tx_q
->tx_count_frames
= 0;
3473 stmmac_set_tx_ic(priv
, desc
);
3474 priv
->xstats
.tx_set_ic_bit
++;
3477 /* We've used all descriptors we need for this skb, however,
3478 * advance cur_tx so that it references a fresh descriptor.
3479 * ndo_start_xmit will fill this descriptor the next time it's
3480 * called and stmmac_tx_clean may clean up to this descriptor.
3482 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_tx_size
);
3483 tx_q
->cur_tx
= entry
;
3485 if (netif_msg_pktdata(priv
)) {
3486 netdev_dbg(priv
->dev
,
3487 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3488 __func__
, tx_q
->cur_tx
, tx_q
->dirty_tx
, first_entry
,
3489 entry
, first
, nfrags
);
3491 netdev_dbg(priv
->dev
, ">>> frame to be transmitted: ");
3492 print_pkt(skb
->data
, skb
->len
);
3495 if (unlikely(stmmac_tx_avail(priv
, queue
) <= (MAX_SKB_FRAGS
+ 1))) {
3496 netif_dbg(priv
, hw
, priv
->dev
, "%s: stop transmitted packets\n",
3498 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
, queue
));
3501 dev
->stats
.tx_bytes
+= skb
->len
;
3503 if (priv
->sarc_type
)
3504 stmmac_set_desc_sarc(priv
, first
, priv
->sarc_type
);
3506 skb_tx_timestamp(skb
);
3508 /* Ready to fill the first descriptor and set the OWN bit w/o any
3509 * problems because all the descriptors are actually ready to be
3510 * passed to the DMA engine.
3512 if (likely(!is_jumbo
)) {
3513 bool last_segment
= (nfrags
== 0);
3515 des
= dma_map_single(priv
->device
, skb
->data
,
3516 nopaged_len
, DMA_TO_DEVICE
);
3517 if (dma_mapping_error(priv
->device
, des
))
3520 tx_q
->tx_skbuff_dma
[first_entry
].buf
= des
;
3522 stmmac_set_desc_addr(priv
, first
, des
);
3524 tx_q
->tx_skbuff_dma
[first_entry
].len
= nopaged_len
;
3525 tx_q
->tx_skbuff_dma
[first_entry
].last_segment
= last_segment
;
3527 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
3528 priv
->hwts_tx_en
)) {
3529 /* declare that device is doing timestamping */
3530 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
3531 stmmac_enable_tx_timestamp(priv
, first
);
3534 /* Prepare the first descriptor setting the OWN bit too */
3535 stmmac_prepare_tx_desc(priv
, first
, 1, nopaged_len
,
3536 csum_insertion
, priv
->mode
, 0, last_segment
,
3540 if (tx_q
->tbs
& STMMAC_TBS_EN
) {
3541 struct timespec64 ts
= ns_to_timespec64(skb
->tstamp
);
3543 tbs_desc
= &tx_q
->dma_entx
[first_entry
];
3544 stmmac_set_desc_tbs(priv
, tbs_desc
, ts
.tv_sec
, ts
.tv_nsec
);
3547 stmmac_set_tx_owner(priv
, first
);
3549 /* The own bit must be the latest setting done when prepare the
3550 * descriptor and then barrier is needed to make sure that
3551 * all is coherent before granting the DMA engine.
3555 netdev_tx_sent_queue(netdev_get_tx_queue(dev
, queue
), skb
->len
);
3557 stmmac_enable_dma_transmission(priv
, priv
->ioaddr
);
3559 if (likely(priv
->extend_desc
))
3560 desc_size
= sizeof(struct dma_extended_desc
);
3561 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
3562 desc_size
= sizeof(struct dma_edesc
);
3564 desc_size
= sizeof(struct dma_desc
);
3566 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
+ (tx_q
->cur_tx
* desc_size
);
3567 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
, tx_q
->tx_tail_addr
, queue
);
3568 stmmac_tx_timer_arm(priv
, queue
);
3570 return NETDEV_TX_OK
;
3573 netdev_err(priv
->dev
, "Tx DMA map failed\n");
3575 priv
->dev
->stats
.tx_dropped
++;
3576 return NETDEV_TX_OK
;
3579 static void stmmac_rx_vlan(struct net_device
*dev
, struct sk_buff
*skb
)
3581 struct vlan_ethhdr
*veth
;
3585 veth
= (struct vlan_ethhdr
*)skb
->data
;
3586 vlan_proto
= veth
->h_vlan_proto
;
3588 if ((vlan_proto
== htons(ETH_P_8021Q
) &&
3589 dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) ||
3590 (vlan_proto
== htons(ETH_P_8021AD
) &&
3591 dev
->features
& NETIF_F_HW_VLAN_STAG_RX
)) {
3592 /* pop the vlan tag */
3593 vlanid
= ntohs(veth
->h_vlan_TCI
);
3594 memmove(skb
->data
+ VLAN_HLEN
, veth
, ETH_ALEN
* 2);
3595 skb_pull(skb
, VLAN_HLEN
);
3596 __vlan_hwaccel_put_tag(skb
, vlan_proto
, vlanid
);
3601 * stmmac_rx_refill - refill used skb preallocated buffers
3602 * @priv: driver private structure
3603 * @queue: RX queue index
3604 * Description : this is to reallocate the skb for the reception process
3605 * that is based on zero-copy.
3607 static inline void stmmac_rx_refill(struct stmmac_priv
*priv
, u32 queue
)
3609 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
3610 int len
, dirty
= stmmac_rx_dirty(priv
, queue
);
3611 unsigned int entry
= rx_q
->dirty_rx
;
3613 len
= DIV_ROUND_UP(priv
->dma_buf_sz
, PAGE_SIZE
) * PAGE_SIZE
;
3615 while (dirty
-- > 0) {
3616 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[entry
];
3620 if (priv
->extend_desc
)
3621 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
3623 p
= rx_q
->dma_rx
+ entry
;
3626 buf
->page
= page_pool_dev_alloc_pages(rx_q
->page_pool
);
3631 if (priv
->sph
&& !buf
->sec_page
) {
3632 buf
->sec_page
= page_pool_dev_alloc_pages(rx_q
->page_pool
);
3636 buf
->sec_addr
= page_pool_get_dma_addr(buf
->sec_page
);
3638 dma_sync_single_for_device(priv
->device
, buf
->sec_addr
,
3639 len
, DMA_FROM_DEVICE
);
3642 buf
->addr
= page_pool_get_dma_addr(buf
->page
);
3644 /* Sync whole allocation to device. This will invalidate old
3647 dma_sync_single_for_device(priv
->device
, buf
->addr
, len
,
3650 stmmac_set_desc_addr(priv
, p
, buf
->addr
);
3651 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
);
3652 stmmac_refill_desc3(priv
, rx_q
, p
);
3654 rx_q
->rx_count_frames
++;
3655 rx_q
->rx_count_frames
+= priv
->rx_coal_frames
;
3656 if (rx_q
->rx_count_frames
> priv
->rx_coal_frames
)
3657 rx_q
->rx_count_frames
= 0;
3659 use_rx_wd
= !priv
->rx_coal_frames
;
3660 use_rx_wd
|= rx_q
->rx_count_frames
> 0;
3661 if (!priv
->use_riwt
)
3665 stmmac_set_rx_owner(priv
, p
, use_rx_wd
);
3667 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_rx_size
);
3669 rx_q
->dirty_rx
= entry
;
3670 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
3671 (rx_q
->dirty_rx
* sizeof(struct dma_desc
));
3672 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
, rx_q
->rx_tail_addr
, queue
);
3675 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv
*priv
,
3677 int status
, unsigned int len
)
3679 unsigned int plen
= 0, hlen
= 0;
3680 int coe
= priv
->hw
->rx_csum
;
3682 /* Not first descriptor, buffer is always zero */
3683 if (priv
->sph
&& len
)
3686 /* First descriptor, get split header length */
3687 stmmac_get_rx_header_len(priv
, p
, &hlen
);
3688 if (priv
->sph
&& hlen
) {
3689 priv
->xstats
.rx_split_hdr_pkt_n
++;
3693 /* First descriptor, not last descriptor and not split header */
3694 if (status
& rx_not_ls
)
3695 return priv
->dma_buf_sz
;
3697 plen
= stmmac_get_rx_frame_len(priv
, p
, coe
);
3699 /* First descriptor and last descriptor and not split header */
3700 return min_t(unsigned int, priv
->dma_buf_sz
, plen
);
3703 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv
*priv
,
3705 int status
, unsigned int len
)
3707 int coe
= priv
->hw
->rx_csum
;
3708 unsigned int plen
= 0;
3710 /* Not split header, buffer is not available */
3714 /* Not last descriptor */
3715 if (status
& rx_not_ls
)
3716 return priv
->dma_buf_sz
;
3718 plen
= stmmac_get_rx_frame_len(priv
, p
, coe
);
3720 /* Last descriptor */
3725 * stmmac_rx - manage the receive process
3726 * @priv: driver private structure
3727 * @limit: napi bugget
3728 * @queue: RX queue index.
3729 * Description : this the function called by the napi poll method.
3730 * It gets all the frames inside the ring.
3732 static int stmmac_rx(struct stmmac_priv
*priv
, int limit
, u32 queue
)
3734 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
3735 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
3736 unsigned int count
= 0, error
= 0, len
= 0;
3737 int status
= 0, coe
= priv
->hw
->rx_csum
;
3738 unsigned int next_entry
= rx_q
->cur_rx
;
3739 struct sk_buff
*skb
= NULL
;
3741 if (netif_msg_rx_status(priv
)) {
3744 netdev_dbg(priv
->dev
, "%s: descriptor ring:\n", __func__
);
3745 if (priv
->extend_desc
)
3746 rx_head
= (void *)rx_q
->dma_erx
;
3748 rx_head
= (void *)rx_q
->dma_rx
;
3750 stmmac_display_ring(priv
, rx_head
, priv
->dma_rx_size
, true);
3752 while (count
< limit
) {
3753 unsigned int buf1_len
= 0, buf2_len
= 0;
3754 enum pkt_hash_types hash_type
;
3755 struct stmmac_rx_buffer
*buf
;
3756 struct dma_desc
*np
, *p
;
3760 if (!count
&& rx_q
->state_saved
) {
3761 skb
= rx_q
->state
.skb
;
3762 error
= rx_q
->state
.error
;
3763 len
= rx_q
->state
.len
;
3765 rx_q
->state_saved
= false;
3778 buf
= &rx_q
->buf_pool
[entry
];
3780 if (priv
->extend_desc
)
3781 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
3783 p
= rx_q
->dma_rx
+ entry
;
3785 /* read the status of the incoming frame */
3786 status
= stmmac_rx_status(priv
, &priv
->dev
->stats
,
3788 /* check if managed by the DMA otherwise go ahead */
3789 if (unlikely(status
& dma_own
))
3792 rx_q
->cur_rx
= STMMAC_GET_ENTRY(rx_q
->cur_rx
,
3794 next_entry
= rx_q
->cur_rx
;
3796 if (priv
->extend_desc
)
3797 np
= (struct dma_desc
*)(rx_q
->dma_erx
+ next_entry
);
3799 np
= rx_q
->dma_rx
+ next_entry
;
3803 if (priv
->extend_desc
)
3804 stmmac_rx_extended_status(priv
, &priv
->dev
->stats
,
3805 &priv
->xstats
, rx_q
->dma_erx
+ entry
);
3806 if (unlikely(status
== discard_frame
)) {
3807 page_pool_recycle_direct(rx_q
->page_pool
, buf
->page
);
3810 if (!priv
->hwts_rx_en
)
3811 priv
->dev
->stats
.rx_errors
++;
3814 if (unlikely(error
&& (status
& rx_not_ls
)))
3816 if (unlikely(error
)) {
3823 /* Buffer is good. Go on. */
3825 prefetch(page_address(buf
->page
));
3827 prefetch(page_address(buf
->sec_page
));
3829 buf1_len
= stmmac_rx_buf1_len(priv
, p
, status
, len
);
3831 buf2_len
= stmmac_rx_buf2_len(priv
, p
, status
, len
);
3834 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3835 * Type frames (LLC/LLC-SNAP)
3837 * llc_snap is never checked in GMAC >= 4, so this ACS
3838 * feature is always disabled and packets need to be
3839 * stripped manually.
3841 if (likely(!(status
& rx_not_ls
)) &&
3842 (likely(priv
->synopsys_id
>= DWMAC_CORE_4_00
) ||
3843 unlikely(status
!= llc_snap
))) {
3845 buf2_len
-= ETH_FCS_LEN
;
3847 buf1_len
-= ETH_FCS_LEN
;
3853 skb
= napi_alloc_skb(&ch
->rx_napi
, buf1_len
);
3855 priv
->dev
->stats
.rx_dropped
++;
3860 dma_sync_single_for_cpu(priv
->device
, buf
->addr
,
3861 buf1_len
, DMA_FROM_DEVICE
);
3862 skb_copy_to_linear_data(skb
, page_address(buf
->page
),
3864 skb_put(skb
, buf1_len
);
3866 /* Data payload copied into SKB, page ready for recycle */
3867 page_pool_recycle_direct(rx_q
->page_pool
, buf
->page
);
3869 } else if (buf1_len
) {
3870 dma_sync_single_for_cpu(priv
->device
, buf
->addr
,
3871 buf1_len
, DMA_FROM_DEVICE
);
3872 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
3873 buf
->page
, 0, buf1_len
,
3876 /* Data payload appended into SKB */
3877 page_pool_release_page(rx_q
->page_pool
, buf
->page
);
3882 dma_sync_single_for_cpu(priv
->device
, buf
->sec_addr
,
3883 buf2_len
, DMA_FROM_DEVICE
);
3884 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
3885 buf
->sec_page
, 0, buf2_len
,
3888 /* Data payload appended into SKB */
3889 page_pool_release_page(rx_q
->page_pool
, buf
->sec_page
);
3890 buf
->sec_page
= NULL
;
3894 if (likely(status
& rx_not_ls
))
3899 /* Got entire packet into SKB. Finish it. */
3901 stmmac_get_rx_hwtstamp(priv
, p
, np
, skb
);
3902 stmmac_rx_vlan(priv
->dev
, skb
);
3903 skb
->protocol
= eth_type_trans(skb
, priv
->dev
);
3906 skb_checksum_none_assert(skb
);
3908 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3910 if (!stmmac_get_rx_hash(priv
, p
, &hash
, &hash_type
))
3911 skb_set_hash(skb
, hash
, hash_type
);
3913 skb_record_rx_queue(skb
, queue
);
3914 napi_gro_receive(&ch
->rx_napi
, skb
);
3917 priv
->dev
->stats
.rx_packets
++;
3918 priv
->dev
->stats
.rx_bytes
+= len
;
3922 if (status
& rx_not_ls
|| skb
) {
3923 rx_q
->state_saved
= true;
3924 rx_q
->state
.skb
= skb
;
3925 rx_q
->state
.error
= error
;
3926 rx_q
->state
.len
= len
;
3929 stmmac_rx_refill(priv
, queue
);
3931 priv
->xstats
.rx_pkt_n
+= count
;
3936 static int stmmac_napi_poll_rx(struct napi_struct
*napi
, int budget
)
3938 struct stmmac_channel
*ch
=
3939 container_of(napi
, struct stmmac_channel
, rx_napi
);
3940 struct stmmac_priv
*priv
= ch
->priv_data
;
3941 u32 chan
= ch
->index
;
3944 priv
->xstats
.napi_poll
++;
3946 work_done
= stmmac_rx(priv
, budget
, chan
);
3947 if (work_done
< budget
&& napi_complete_done(napi
, work_done
)) {
3948 unsigned long flags
;
3950 spin_lock_irqsave(&ch
->lock
, flags
);
3951 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 0);
3952 spin_unlock_irqrestore(&ch
->lock
, flags
);
3958 static int stmmac_napi_poll_tx(struct napi_struct
*napi
, int budget
)
3960 struct stmmac_channel
*ch
=
3961 container_of(napi
, struct stmmac_channel
, tx_napi
);
3962 struct stmmac_priv
*priv
= ch
->priv_data
;
3963 u32 chan
= ch
->index
;
3966 priv
->xstats
.napi_poll
++;
3968 work_done
= stmmac_tx_clean(priv
, priv
->dma_tx_size
, chan
);
3969 work_done
= min(work_done
, budget
);
3971 if (work_done
< budget
&& napi_complete_done(napi
, work_done
)) {
3972 unsigned long flags
;
3974 spin_lock_irqsave(&ch
->lock
, flags
);
3975 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 0, 1);
3976 spin_unlock_irqrestore(&ch
->lock
, flags
);
3984 * @dev : Pointer to net device structure
3985 * @txqueue: the index of the hanging transmit queue
3986 * Description: this function is called when a packet transmission fails to
3987 * complete within a reasonable time. The driver will mark the error in the
3988 * netdev structure and arrange for the device to be reset to a sane state
3989 * in order to transmit a new packet.
3991 static void stmmac_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
3993 struct stmmac_priv
*priv
= netdev_priv(dev
);
3995 stmmac_global_err(priv
);
3999 * stmmac_set_rx_mode - entry point for multicast addressing
4000 * @dev : pointer to the device structure
4002 * This function is a driver entry point which gets called by the kernel
4003 * whenever multicast addresses must be enabled/disabled.
4007 static void stmmac_set_rx_mode(struct net_device
*dev
)
4009 struct stmmac_priv
*priv
= netdev_priv(dev
);
4011 stmmac_set_filter(priv
, priv
->hw
, dev
);
4015 * stmmac_change_mtu - entry point to change MTU size for the device.
4016 * @dev : device pointer.
4017 * @new_mtu : the new MTU size for the device.
4018 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
4019 * to drive packet transmission. Ethernet has an MTU of 1500 octets
4020 * (ETH_DATA_LEN). This value can be changed with ifconfig.
4022 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4025 static int stmmac_change_mtu(struct net_device
*dev
, int new_mtu
)
4027 struct stmmac_priv
*priv
= netdev_priv(dev
);
4028 int txfifosz
= priv
->plat
->tx_fifo_size
;
4031 txfifosz
= priv
->dma_cap
.tx_fifo_size
;
4033 txfifosz
/= priv
->plat
->tx_queues_to_use
;
4035 if (netif_running(dev
)) {
4036 netdev_err(priv
->dev
, "must be stopped to change its MTU\n");
4040 new_mtu
= STMMAC_ALIGN(new_mtu
);
4042 /* If condition true, FIFO is too small or MTU too large */
4043 if ((txfifosz
< new_mtu
) || (new_mtu
> BUF_SIZE_16KiB
))
4048 netdev_update_features(dev
);
4053 static netdev_features_t
stmmac_fix_features(struct net_device
*dev
,
4054 netdev_features_t features
)
4056 struct stmmac_priv
*priv
= netdev_priv(dev
);
4058 if (priv
->plat
->rx_coe
== STMMAC_RX_COE_NONE
)
4059 features
&= ~NETIF_F_RXCSUM
;
4061 if (!priv
->plat
->tx_coe
)
4062 features
&= ~NETIF_F_CSUM_MASK
;
4064 /* Some GMAC devices have a bugged Jumbo frame support that
4065 * needs to have the Tx COE disabled for oversized frames
4066 * (due to limited buffer sizes). In this case we disable
4067 * the TX csum insertion in the TDES and not use SF.
4069 if (priv
->plat
->bugged_jumbo
&& (dev
->mtu
> ETH_DATA_LEN
))
4070 features
&= ~NETIF_F_CSUM_MASK
;
4072 /* Disable tso if asked by ethtool */
4073 if ((priv
->plat
->tso_en
) && (priv
->dma_cap
.tsoen
)) {
4074 if (features
& NETIF_F_TSO
)
4083 static int stmmac_set_features(struct net_device
*netdev
,
4084 netdev_features_t features
)
4086 struct stmmac_priv
*priv
= netdev_priv(netdev
);
4090 /* Keep the COE Type in case of csum is supporting */
4091 if (features
& NETIF_F_RXCSUM
)
4092 priv
->hw
->rx_csum
= priv
->plat
->rx_coe
;
4094 priv
->hw
->rx_csum
= 0;
4095 /* No check needed because rx_coe has been set before and it will be
4096 * fixed in case of issue.
4098 stmmac_rx_ipc(priv
, priv
->hw
);
4100 sph_en
= (priv
->hw
->rx_csum
> 0) && priv
->sph
;
4101 for (chan
= 0; chan
< priv
->plat
->rx_queues_to_use
; chan
++)
4102 stmmac_enable_sph(priv
, priv
->ioaddr
, sph_en
, chan
);
4108 * stmmac_interrupt - main ISR
4109 * @irq: interrupt number.
4110 * @dev_id: to pass the net device pointer (must be valid).
4111 * Description: this is the main driver interrupt service routine.
4113 * o DMA service routine (to manage incoming frame reception and transmission
4115 * o Core interrupts to manage: remote wake-up, management counter, LPI
4118 static irqreturn_t
stmmac_interrupt(int irq
, void *dev_id
)
4120 struct net_device
*dev
= (struct net_device
*)dev_id
;
4121 struct stmmac_priv
*priv
= netdev_priv(dev
);
4122 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
4123 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
4128 xmac
= priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
;
4129 queues_count
= (rx_cnt
> tx_cnt
) ? rx_cnt
: tx_cnt
;
4132 pm_wakeup_event(priv
->device
, 0);
4134 /* Check if adapter is up */
4135 if (test_bit(STMMAC_DOWN
, &priv
->state
))
4137 /* Check if a fatal error happened */
4138 if (stmmac_safety_feat_interrupt(priv
))
4141 /* To handle GMAC own interrupts */
4142 if ((priv
->plat
->has_gmac
) || xmac
) {
4143 int status
= stmmac_host_irq_status(priv
, priv
->hw
, &priv
->xstats
);
4146 if (unlikely(status
)) {
4147 /* For LPI we need to save the tx status */
4148 if (status
& CORE_IRQ_TX_PATH_IN_LPI_MODE
)
4149 priv
->tx_path_in_lpi_mode
= true;
4150 if (status
& CORE_IRQ_TX_PATH_EXIT_LPI_MODE
)
4151 priv
->tx_path_in_lpi_mode
= false;
4154 for (queue
= 0; queue
< queues_count
; queue
++) {
4155 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
4157 mtl_status
= stmmac_host_mtl_irq_status(priv
, priv
->hw
,
4159 if (mtl_status
!= -EINVAL
)
4160 status
|= mtl_status
;
4162 if (status
& CORE_IRQ_MTL_RX_OVERFLOW
)
4163 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
,
4168 /* PCS link status */
4169 if (priv
->hw
->pcs
) {
4170 if (priv
->xstats
.pcs_link
)
4171 netif_carrier_on(dev
);
4173 netif_carrier_off(dev
);
4177 /* To handle DMA interrupts */
4178 stmmac_dma_interrupt(priv
);
4183 #ifdef CONFIG_NET_POLL_CONTROLLER
4184 /* Polling receive - used by NETCONSOLE and other diagnostic tools
4185 * to allow network I/O with interrupts disabled.
4187 static void stmmac_poll_controller(struct net_device
*dev
)
4189 disable_irq(dev
->irq
);
4190 stmmac_interrupt(dev
->irq
, dev
);
4191 enable_irq(dev
->irq
);
4196 * stmmac_ioctl - Entry point for the Ioctl
4197 * @dev: Device pointer.
4198 * @rq: An IOCTL specefic structure, that can contain a pointer to
4199 * a proprietary structure used to pass information to the driver.
4200 * @cmd: IOCTL command
4202 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
4204 static int stmmac_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
4206 struct stmmac_priv
*priv
= netdev_priv (dev
);
4207 int ret
= -EOPNOTSUPP
;
4209 if (!netif_running(dev
))
4216 ret
= phylink_mii_ioctl(priv
->phylink
, rq
, cmd
);
4219 ret
= stmmac_hwtstamp_set(dev
, rq
);
4222 ret
= stmmac_hwtstamp_get(dev
, rq
);
4231 static int stmmac_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
4234 struct stmmac_priv
*priv
= cb_priv
;
4235 int ret
= -EOPNOTSUPP
;
4237 if (!tc_cls_can_offload_and_chain0(priv
->dev
, type_data
))
4240 stmmac_disable_all_queues(priv
);
4243 case TC_SETUP_CLSU32
:
4244 ret
= stmmac_tc_setup_cls_u32(priv
, priv
, type_data
);
4246 case TC_SETUP_CLSFLOWER
:
4247 ret
= stmmac_tc_setup_cls(priv
, priv
, type_data
);
4253 stmmac_enable_all_queues(priv
);
4257 static LIST_HEAD(stmmac_block_cb_list
);
4259 static int stmmac_setup_tc(struct net_device
*ndev
, enum tc_setup_type type
,
4262 struct stmmac_priv
*priv
= netdev_priv(ndev
);
4265 case TC_SETUP_BLOCK
:
4266 return flow_block_cb_setup_simple(type_data
,
4267 &stmmac_block_cb_list
,
4268 stmmac_setup_tc_block_cb
,
4270 case TC_SETUP_QDISC_CBS
:
4271 return stmmac_tc_setup_cbs(priv
, priv
, type_data
);
4272 case TC_SETUP_QDISC_TAPRIO
:
4273 return stmmac_tc_setup_taprio(priv
, priv
, type_data
);
4274 case TC_SETUP_QDISC_ETF
:
4275 return stmmac_tc_setup_etf(priv
, priv
, type_data
);
4281 static u16
stmmac_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
4282 struct net_device
*sb_dev
)
4284 int gso
= skb_shinfo(skb
)->gso_type
;
4286 if (gso
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
| SKB_GSO_UDP_L4
)) {
4288 * There is no way to determine the number of TSO/USO
4289 * capable Queues. Let's use always the Queue 0
4290 * because if TSO/USO is supported then at least this
4291 * one will be capable.
4296 return netdev_pick_tx(dev
, skb
, NULL
) % dev
->real_num_tx_queues
;
4299 static int stmmac_set_mac_address(struct net_device
*ndev
, void *addr
)
4301 struct stmmac_priv
*priv
= netdev_priv(ndev
);
4304 ret
= eth_mac_addr(ndev
, addr
);
4308 stmmac_set_umac_addr(priv
, priv
->hw
, ndev
->dev_addr
, 0);
4313 #ifdef CONFIG_DEBUG_FS
4314 static struct dentry
*stmmac_fs_dir
;
4316 static void sysfs_display_ring(void *head
, int size
, int extend_desc
,
4317 struct seq_file
*seq
)
4320 struct dma_extended_desc
*ep
= (struct dma_extended_desc
*)head
;
4321 struct dma_desc
*p
= (struct dma_desc
*)head
;
4323 for (i
= 0; i
< size
; i
++) {
4325 seq_printf(seq
, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4326 i
, (unsigned int)virt_to_phys(ep
),
4327 le32_to_cpu(ep
->basic
.des0
),
4328 le32_to_cpu(ep
->basic
.des1
),
4329 le32_to_cpu(ep
->basic
.des2
),
4330 le32_to_cpu(ep
->basic
.des3
));
4333 seq_printf(seq
, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4334 i
, (unsigned int)virt_to_phys(p
),
4335 le32_to_cpu(p
->des0
), le32_to_cpu(p
->des1
),
4336 le32_to_cpu(p
->des2
), le32_to_cpu(p
->des3
));
4339 seq_printf(seq
, "\n");
4343 static int stmmac_rings_status_show(struct seq_file
*seq
, void *v
)
4345 struct net_device
*dev
= seq
->private;
4346 struct stmmac_priv
*priv
= netdev_priv(dev
);
4347 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
4348 u32 tx_count
= priv
->plat
->tx_queues_to_use
;
4351 if ((dev
->flags
& IFF_UP
) == 0)
4354 for (queue
= 0; queue
< rx_count
; queue
++) {
4355 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
4357 seq_printf(seq
, "RX Queue %d:\n", queue
);
4359 if (priv
->extend_desc
) {
4360 seq_printf(seq
, "Extended descriptor ring:\n");
4361 sysfs_display_ring((void *)rx_q
->dma_erx
,
4362 priv
->dma_rx_size
, 1, seq
);
4364 seq_printf(seq
, "Descriptor ring:\n");
4365 sysfs_display_ring((void *)rx_q
->dma_rx
,
4366 priv
->dma_rx_size
, 0, seq
);
4370 for (queue
= 0; queue
< tx_count
; queue
++) {
4371 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
4373 seq_printf(seq
, "TX Queue %d:\n", queue
);
4375 if (priv
->extend_desc
) {
4376 seq_printf(seq
, "Extended descriptor ring:\n");
4377 sysfs_display_ring((void *)tx_q
->dma_etx
,
4378 priv
->dma_tx_size
, 1, seq
);
4379 } else if (!(tx_q
->tbs
& STMMAC_TBS_AVAIL
)) {
4380 seq_printf(seq
, "Descriptor ring:\n");
4381 sysfs_display_ring((void *)tx_q
->dma_tx
,
4382 priv
->dma_tx_size
, 0, seq
);
4388 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status
);
4390 static int stmmac_dma_cap_show(struct seq_file
*seq
, void *v
)
4392 struct net_device
*dev
= seq
->private;
4393 struct stmmac_priv
*priv
= netdev_priv(dev
);
4395 if (!priv
->hw_cap_support
) {
4396 seq_printf(seq
, "DMA HW features not supported\n");
4400 seq_printf(seq
, "==============================\n");
4401 seq_printf(seq
, "\tDMA HW features\n");
4402 seq_printf(seq
, "==============================\n");
4404 seq_printf(seq
, "\t10/100 Mbps: %s\n",
4405 (priv
->dma_cap
.mbps_10_100
) ? "Y" : "N");
4406 seq_printf(seq
, "\t1000 Mbps: %s\n",
4407 (priv
->dma_cap
.mbps_1000
) ? "Y" : "N");
4408 seq_printf(seq
, "\tHalf duplex: %s\n",
4409 (priv
->dma_cap
.half_duplex
) ? "Y" : "N");
4410 seq_printf(seq
, "\tHash Filter: %s\n",
4411 (priv
->dma_cap
.hash_filter
) ? "Y" : "N");
4412 seq_printf(seq
, "\tMultiple MAC address registers: %s\n",
4413 (priv
->dma_cap
.multi_addr
) ? "Y" : "N");
4414 seq_printf(seq
, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4415 (priv
->dma_cap
.pcs
) ? "Y" : "N");
4416 seq_printf(seq
, "\tSMA (MDIO) Interface: %s\n",
4417 (priv
->dma_cap
.sma_mdio
) ? "Y" : "N");
4418 seq_printf(seq
, "\tPMT Remote wake up: %s\n",
4419 (priv
->dma_cap
.pmt_remote_wake_up
) ? "Y" : "N");
4420 seq_printf(seq
, "\tPMT Magic Frame: %s\n",
4421 (priv
->dma_cap
.pmt_magic_frame
) ? "Y" : "N");
4422 seq_printf(seq
, "\tRMON module: %s\n",
4423 (priv
->dma_cap
.rmon
) ? "Y" : "N");
4424 seq_printf(seq
, "\tIEEE 1588-2002 Time Stamp: %s\n",
4425 (priv
->dma_cap
.time_stamp
) ? "Y" : "N");
4426 seq_printf(seq
, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4427 (priv
->dma_cap
.atime_stamp
) ? "Y" : "N");
4428 seq_printf(seq
, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4429 (priv
->dma_cap
.eee
) ? "Y" : "N");
4430 seq_printf(seq
, "\tAV features: %s\n", (priv
->dma_cap
.av
) ? "Y" : "N");
4431 seq_printf(seq
, "\tChecksum Offload in TX: %s\n",
4432 (priv
->dma_cap
.tx_coe
) ? "Y" : "N");
4433 if (priv
->synopsys_id
>= DWMAC_CORE_4_00
) {
4434 seq_printf(seq
, "\tIP Checksum Offload in RX: %s\n",
4435 (priv
->dma_cap
.rx_coe
) ? "Y" : "N");
4437 seq_printf(seq
, "\tIP Checksum Offload (type1) in RX: %s\n",
4438 (priv
->dma_cap
.rx_coe_type1
) ? "Y" : "N");
4439 seq_printf(seq
, "\tIP Checksum Offload (type2) in RX: %s\n",
4440 (priv
->dma_cap
.rx_coe_type2
) ? "Y" : "N");
4442 seq_printf(seq
, "\tRXFIFO > 2048bytes: %s\n",
4443 (priv
->dma_cap
.rxfifo_over_2048
) ? "Y" : "N");
4444 seq_printf(seq
, "\tNumber of Additional RX channel: %d\n",
4445 priv
->dma_cap
.number_rx_channel
);
4446 seq_printf(seq
, "\tNumber of Additional TX channel: %d\n",
4447 priv
->dma_cap
.number_tx_channel
);
4448 seq_printf(seq
, "\tNumber of Additional RX queues: %d\n",
4449 priv
->dma_cap
.number_rx_queues
);
4450 seq_printf(seq
, "\tNumber of Additional TX queues: %d\n",
4451 priv
->dma_cap
.number_tx_queues
);
4452 seq_printf(seq
, "\tEnhanced descriptors: %s\n",
4453 (priv
->dma_cap
.enh_desc
) ? "Y" : "N");
4454 seq_printf(seq
, "\tTX Fifo Size: %d\n", priv
->dma_cap
.tx_fifo_size
);
4455 seq_printf(seq
, "\tRX Fifo Size: %d\n", priv
->dma_cap
.rx_fifo_size
);
4456 seq_printf(seq
, "\tHash Table Size: %d\n", priv
->dma_cap
.hash_tb_sz
);
4457 seq_printf(seq
, "\tTSO: %s\n", priv
->dma_cap
.tsoen
? "Y" : "N");
4458 seq_printf(seq
, "\tNumber of PPS Outputs: %d\n",
4459 priv
->dma_cap
.pps_out_num
);
4460 seq_printf(seq
, "\tSafety Features: %s\n",
4461 priv
->dma_cap
.asp
? "Y" : "N");
4462 seq_printf(seq
, "\tFlexible RX Parser: %s\n",
4463 priv
->dma_cap
.frpsel
? "Y" : "N");
4464 seq_printf(seq
, "\tEnhanced Addressing: %d\n",
4465 priv
->dma_cap
.addr64
);
4466 seq_printf(seq
, "\tReceive Side Scaling: %s\n",
4467 priv
->dma_cap
.rssen
? "Y" : "N");
4468 seq_printf(seq
, "\tVLAN Hash Filtering: %s\n",
4469 priv
->dma_cap
.vlhash
? "Y" : "N");
4470 seq_printf(seq
, "\tSplit Header: %s\n",
4471 priv
->dma_cap
.sphen
? "Y" : "N");
4472 seq_printf(seq
, "\tVLAN TX Insertion: %s\n",
4473 priv
->dma_cap
.vlins
? "Y" : "N");
4474 seq_printf(seq
, "\tDouble VLAN: %s\n",
4475 priv
->dma_cap
.dvlan
? "Y" : "N");
4476 seq_printf(seq
, "\tNumber of L3/L4 Filters: %d\n",
4477 priv
->dma_cap
.l3l4fnum
);
4478 seq_printf(seq
, "\tARP Offloading: %s\n",
4479 priv
->dma_cap
.arpoffsel
? "Y" : "N");
4480 seq_printf(seq
, "\tEnhancements to Scheduled Traffic (EST): %s\n",
4481 priv
->dma_cap
.estsel
? "Y" : "N");
4482 seq_printf(seq
, "\tFrame Preemption (FPE): %s\n",
4483 priv
->dma_cap
.fpesel
? "Y" : "N");
4484 seq_printf(seq
, "\tTime-Based Scheduling (TBS): %s\n",
4485 priv
->dma_cap
.tbssel
? "Y" : "N");
4488 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap
);
4490 /* Use network device events to rename debugfs file entries.
4492 static int stmmac_device_event(struct notifier_block
*unused
,
4493 unsigned long event
, void *ptr
)
4495 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
4496 struct stmmac_priv
*priv
= netdev_priv(dev
);
4498 if (dev
->netdev_ops
!= &stmmac_netdev_ops
)
4502 case NETDEV_CHANGENAME
:
4503 if (priv
->dbgfs_dir
)
4504 priv
->dbgfs_dir
= debugfs_rename(stmmac_fs_dir
,
4514 static struct notifier_block stmmac_notifier
= {
4515 .notifier_call
= stmmac_device_event
,
4518 static void stmmac_init_fs(struct net_device
*dev
)
4520 struct stmmac_priv
*priv
= netdev_priv(dev
);
4524 /* Create per netdev entries */
4525 priv
->dbgfs_dir
= debugfs_create_dir(dev
->name
, stmmac_fs_dir
);
4527 /* Entry to report DMA RX/TX rings */
4528 debugfs_create_file("descriptors_status", 0444, priv
->dbgfs_dir
, dev
,
4529 &stmmac_rings_status_fops
);
4531 /* Entry to report the DMA HW features */
4532 debugfs_create_file("dma_cap", 0444, priv
->dbgfs_dir
, dev
,
4533 &stmmac_dma_cap_fops
);
4538 static void stmmac_exit_fs(struct net_device
*dev
)
4540 struct stmmac_priv
*priv
= netdev_priv(dev
);
4542 debugfs_remove_recursive(priv
->dbgfs_dir
);
4544 #endif /* CONFIG_DEBUG_FS */
4546 static u32
stmmac_vid_crc32_le(__le16 vid_le
)
4548 unsigned char *data
= (unsigned char *)&vid_le
;
4549 unsigned char data_byte
= 0;
4554 bits
= get_bitmask_order(VLAN_VID_MASK
);
4555 for (i
= 0; i
< bits
; i
++) {
4557 data_byte
= data
[i
/ 8];
4559 temp
= ((crc
& 1) ^ data_byte
) & 1;
4570 static int stmmac_vlan_update(struct stmmac_priv
*priv
, bool is_double
)
4577 for_each_set_bit(vid
, priv
->active_vlans
, VLAN_N_VID
) {
4578 __le16 vid_le
= cpu_to_le16(vid
);
4579 crc
= bitrev32(~stmmac_vid_crc32_le(vid_le
)) >> 28;
4584 if (!priv
->dma_cap
.vlhash
) {
4585 if (count
> 2) /* VID = 0 always passes filter */
4588 pmatch
= cpu_to_le16(vid
);
4592 return stmmac_update_vlan_hash(priv
, priv
->hw
, hash
, pmatch
, is_double
);
4595 static int stmmac_vlan_rx_add_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
4597 struct stmmac_priv
*priv
= netdev_priv(ndev
);
4598 bool is_double
= false;
4601 if (be16_to_cpu(proto
) == ETH_P_8021AD
)
4604 set_bit(vid
, priv
->active_vlans
);
4605 ret
= stmmac_vlan_update(priv
, is_double
);
4607 clear_bit(vid
, priv
->active_vlans
);
4611 if (priv
->hw
->num_vlan
) {
4612 ret
= stmmac_add_hw_vlan_rx_fltr(priv
, ndev
, priv
->hw
, proto
, vid
);
4620 static int stmmac_vlan_rx_kill_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
4622 struct stmmac_priv
*priv
= netdev_priv(ndev
);
4623 bool is_double
= false;
4626 if (be16_to_cpu(proto
) == ETH_P_8021AD
)
4629 clear_bit(vid
, priv
->active_vlans
);
4631 if (priv
->hw
->num_vlan
) {
4632 ret
= stmmac_del_hw_vlan_rx_fltr(priv
, ndev
, priv
->hw
, proto
, vid
);
4637 return stmmac_vlan_update(priv
, is_double
);
4640 static const struct net_device_ops stmmac_netdev_ops
= {
4641 .ndo_open
= stmmac_open
,
4642 .ndo_start_xmit
= stmmac_xmit
,
4643 .ndo_stop
= stmmac_release
,
4644 .ndo_change_mtu
= stmmac_change_mtu
,
4645 .ndo_fix_features
= stmmac_fix_features
,
4646 .ndo_set_features
= stmmac_set_features
,
4647 .ndo_set_rx_mode
= stmmac_set_rx_mode
,
4648 .ndo_tx_timeout
= stmmac_tx_timeout
,
4649 .ndo_do_ioctl
= stmmac_ioctl
,
4650 .ndo_setup_tc
= stmmac_setup_tc
,
4651 .ndo_select_queue
= stmmac_select_queue
,
4652 #ifdef CONFIG_NET_POLL_CONTROLLER
4653 .ndo_poll_controller
= stmmac_poll_controller
,
4655 .ndo_set_mac_address
= stmmac_set_mac_address
,
4656 .ndo_vlan_rx_add_vid
= stmmac_vlan_rx_add_vid
,
4657 .ndo_vlan_rx_kill_vid
= stmmac_vlan_rx_kill_vid
,
4660 static void stmmac_reset_subtask(struct stmmac_priv
*priv
)
4662 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED
, &priv
->state
))
4664 if (test_bit(STMMAC_DOWN
, &priv
->state
))
4667 netdev_err(priv
->dev
, "Reset adapter.\n");
4670 netif_trans_update(priv
->dev
);
4671 while (test_and_set_bit(STMMAC_RESETING
, &priv
->state
))
4672 usleep_range(1000, 2000);
4674 set_bit(STMMAC_DOWN
, &priv
->state
);
4675 dev_close(priv
->dev
);
4676 dev_open(priv
->dev
, NULL
);
4677 clear_bit(STMMAC_DOWN
, &priv
->state
);
4678 clear_bit(STMMAC_RESETING
, &priv
->state
);
4682 static void stmmac_service_task(struct work_struct
*work
)
4684 struct stmmac_priv
*priv
= container_of(work
, struct stmmac_priv
,
4687 stmmac_reset_subtask(priv
);
4688 clear_bit(STMMAC_SERVICE_SCHED
, &priv
->state
);
4692 * stmmac_hw_init - Init the MAC device
4693 * @priv: driver private structure
4694 * Description: this function is to configure the MAC device according to
4695 * some platform parameters or the HW capability register. It prepares the
4696 * driver to use either ring or chain modes and to setup either enhanced or
4697 * normal descriptors.
4699 static int stmmac_hw_init(struct stmmac_priv
*priv
)
4703 /* dwmac-sun8i only work in chain mode */
4704 if (priv
->plat
->has_sun8i
)
4706 priv
->chain_mode
= chain_mode
;
4708 /* Initialize HW Interface */
4709 ret
= stmmac_hwif_init(priv
);
4713 /* Get the HW capability (new GMAC newer than 3.50a) */
4714 priv
->hw_cap_support
= stmmac_get_hw_features(priv
);
4715 if (priv
->hw_cap_support
) {
4716 dev_info(priv
->device
, "DMA HW capability register supported\n");
4718 /* We can override some gmac/dma configuration fields: e.g.
4719 * enh_desc, tx_coe (e.g. that are passed through the
4720 * platform) with the values from the HW capability
4721 * register (if supported).
4723 priv
->plat
->enh_desc
= priv
->dma_cap
.enh_desc
;
4724 priv
->plat
->pmt
= priv
->dma_cap
.pmt_remote_wake_up
;
4725 priv
->hw
->pmt
= priv
->plat
->pmt
;
4726 if (priv
->dma_cap
.hash_tb_sz
) {
4727 priv
->hw
->multicast_filter_bins
=
4728 (BIT(priv
->dma_cap
.hash_tb_sz
) << 5);
4729 priv
->hw
->mcast_bits_log2
=
4730 ilog2(priv
->hw
->multicast_filter_bins
);
4733 /* TXCOE doesn't work in thresh DMA mode */
4734 if (priv
->plat
->force_thresh_dma_mode
)
4735 priv
->plat
->tx_coe
= 0;
4737 priv
->plat
->tx_coe
= priv
->dma_cap
.tx_coe
;
4739 /* In case of GMAC4 rx_coe is from HW cap register. */
4740 priv
->plat
->rx_coe
= priv
->dma_cap
.rx_coe
;
4742 if (priv
->dma_cap
.rx_coe_type2
)
4743 priv
->plat
->rx_coe
= STMMAC_RX_COE_TYPE2
;
4744 else if (priv
->dma_cap
.rx_coe_type1
)
4745 priv
->plat
->rx_coe
= STMMAC_RX_COE_TYPE1
;
4748 dev_info(priv
->device
, "No HW DMA feature register supported\n");
4751 if (priv
->plat
->rx_coe
) {
4752 priv
->hw
->rx_csum
= priv
->plat
->rx_coe
;
4753 dev_info(priv
->device
, "RX Checksum Offload Engine supported\n");
4754 if (priv
->synopsys_id
< DWMAC_CORE_4_00
)
4755 dev_info(priv
->device
, "COE Type %d\n", priv
->hw
->rx_csum
);
4757 if (priv
->plat
->tx_coe
)
4758 dev_info(priv
->device
, "TX Checksum insertion supported\n");
4760 if (priv
->plat
->pmt
) {
4761 dev_info(priv
->device
, "Wake-Up On Lan supported\n");
4762 device_set_wakeup_capable(priv
->device
, 1);
4765 if (priv
->dma_cap
.tsoen
)
4766 dev_info(priv
->device
, "TSO supported\n");
4768 priv
->hw
->vlan_fail_q_en
= priv
->plat
->vlan_fail_q_en
;
4769 priv
->hw
->vlan_fail_q
= priv
->plat
->vlan_fail_q
;
4771 /* Run HW quirks, if any */
4772 if (priv
->hwif_quirks
) {
4773 ret
= priv
->hwif_quirks(priv
);
4778 /* Rx Watchdog is available in the COREs newer than the 3.40.
4779 * In some case, for example on bugged HW this feature
4780 * has to be disable and this can be done by passing the
4781 * riwt_off field from the platform.
4783 if (((priv
->synopsys_id
>= DWMAC_CORE_3_50
) ||
4784 (priv
->plat
->has_xgmac
)) && (!priv
->plat
->riwt_off
)) {
4786 dev_info(priv
->device
,
4787 "Enable RX Mitigation via HW Watchdog Timer\n");
4793 static void stmmac_napi_add(struct net_device
*dev
)
4795 struct stmmac_priv
*priv
= netdev_priv(dev
);
4798 maxq
= max(priv
->plat
->rx_queues_to_use
, priv
->plat
->tx_queues_to_use
);
4800 for (queue
= 0; queue
< maxq
; queue
++) {
4801 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
4803 ch
->priv_data
= priv
;
4805 spin_lock_init(&ch
->lock
);
4807 if (queue
< priv
->plat
->rx_queues_to_use
) {
4808 netif_napi_add(dev
, &ch
->rx_napi
, stmmac_napi_poll_rx
,
4811 if (queue
< priv
->plat
->tx_queues_to_use
) {
4812 netif_tx_napi_add(dev
, &ch
->tx_napi
,
4813 stmmac_napi_poll_tx
,
4819 static void stmmac_napi_del(struct net_device
*dev
)
4821 struct stmmac_priv
*priv
= netdev_priv(dev
);
4824 maxq
= max(priv
->plat
->rx_queues_to_use
, priv
->plat
->tx_queues_to_use
);
4826 for (queue
= 0; queue
< maxq
; queue
++) {
4827 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
4829 if (queue
< priv
->plat
->rx_queues_to_use
)
4830 netif_napi_del(&ch
->rx_napi
);
4831 if (queue
< priv
->plat
->tx_queues_to_use
)
4832 netif_napi_del(&ch
->tx_napi
);
4836 int stmmac_reinit_queues(struct net_device
*dev
, u32 rx_cnt
, u32 tx_cnt
)
4838 struct stmmac_priv
*priv
= netdev_priv(dev
);
4841 if (netif_running(dev
))
4842 stmmac_release(dev
);
4844 stmmac_napi_del(dev
);
4846 priv
->plat
->rx_queues_to_use
= rx_cnt
;
4847 priv
->plat
->tx_queues_to_use
= tx_cnt
;
4849 stmmac_napi_add(dev
);
4851 if (netif_running(dev
))
4852 ret
= stmmac_open(dev
);
4857 int stmmac_reinit_ringparam(struct net_device
*dev
, u32 rx_size
, u32 tx_size
)
4859 struct stmmac_priv
*priv
= netdev_priv(dev
);
4862 if (netif_running(dev
))
4863 stmmac_release(dev
);
4865 priv
->dma_rx_size
= rx_size
;
4866 priv
->dma_tx_size
= tx_size
;
4868 if (netif_running(dev
))
4869 ret
= stmmac_open(dev
);
4876 * @device: device pointer
4877 * @plat_dat: platform data pointer
4878 * @res: stmmac resource pointer
4879 * Description: this is the main probe function used to
4880 * call the alloc_etherdev, allocate the priv structure.
4882 * returns 0 on success, otherwise errno.
4884 int stmmac_dvr_probe(struct device
*device
,
4885 struct plat_stmmacenet_data
*plat_dat
,
4886 struct stmmac_resources
*res
)
4888 struct net_device
*ndev
= NULL
;
4889 struct stmmac_priv
*priv
;
4893 ndev
= devm_alloc_etherdev_mqs(device
, sizeof(struct stmmac_priv
),
4894 MTL_MAX_TX_QUEUES
, MTL_MAX_RX_QUEUES
);
4898 SET_NETDEV_DEV(ndev
, device
);
4900 priv
= netdev_priv(ndev
);
4901 priv
->device
= device
;
4904 stmmac_set_ethtool_ops(ndev
);
4905 priv
->pause
= pause
;
4906 priv
->plat
= plat_dat
;
4907 priv
->ioaddr
= res
->addr
;
4908 priv
->dev
->base_addr
= (unsigned long)res
->addr
;
4910 priv
->dev
->irq
= res
->irq
;
4911 priv
->wol_irq
= res
->wol_irq
;
4912 priv
->lpi_irq
= res
->lpi_irq
;
4914 if (!IS_ERR_OR_NULL(res
->mac
))
4915 memcpy(priv
->dev
->dev_addr
, res
->mac
, ETH_ALEN
);
4917 dev_set_drvdata(device
, priv
->dev
);
4919 /* Verify driver arguments */
4920 stmmac_verify_args();
4922 /* Allocate workqueue */
4923 priv
->wq
= create_singlethread_workqueue("stmmac_wq");
4925 dev_err(priv
->device
, "failed to create workqueue\n");
4929 INIT_WORK(&priv
->service_task
, stmmac_service_task
);
4931 /* Override with kernel parameters if supplied XXX CRS XXX
4932 * this needs to have multiple instances
4934 if ((phyaddr
>= 0) && (phyaddr
<= 31))
4935 priv
->plat
->phy_addr
= phyaddr
;
4937 if (priv
->plat
->stmmac_rst
) {
4938 ret
= reset_control_assert(priv
->plat
->stmmac_rst
);
4939 reset_control_deassert(priv
->plat
->stmmac_rst
);
4940 /* Some reset controllers have only reset callback instead of
4941 * assert + deassert callbacks pair.
4943 if (ret
== -ENOTSUPP
)
4944 reset_control_reset(priv
->plat
->stmmac_rst
);
4947 /* Init MAC and get the capabilities */
4948 ret
= stmmac_hw_init(priv
);
4952 stmmac_check_ether_addr(priv
);
4954 ndev
->netdev_ops
= &stmmac_netdev_ops
;
4956 ndev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
4959 ret
= stmmac_tc_init(priv
, priv
);
4961 ndev
->hw_features
|= NETIF_F_HW_TC
;
4964 if ((priv
->plat
->tso_en
) && (priv
->dma_cap
.tsoen
)) {
4965 ndev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
4966 if (priv
->plat
->has_gmac4
)
4967 ndev
->hw_features
|= NETIF_F_GSO_UDP_L4
;
4969 dev_info(priv
->device
, "TSO feature enabled\n");
4972 if (priv
->dma_cap
.sphen
) {
4973 ndev
->hw_features
|= NETIF_F_GRO
;
4975 dev_info(priv
->device
, "SPH feature enabled\n");
4978 /* The current IP register MAC_HW_Feature1[ADDR64] only define
4979 * 32/40/64 bit width, but some SOC support others like i.MX8MP
4980 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
4981 * So overwrite dma_cap.addr64 according to HW real design.
4983 if (priv
->plat
->addr64
)
4984 priv
->dma_cap
.addr64
= priv
->plat
->addr64
;
4986 if (priv
->dma_cap
.addr64
) {
4987 ret
= dma_set_mask_and_coherent(device
,
4988 DMA_BIT_MASK(priv
->dma_cap
.addr64
));
4990 dev_info(priv
->device
, "Using %d bits DMA width\n",
4991 priv
->dma_cap
.addr64
);
4994 * If more than 32 bits can be addressed, make sure to
4995 * enable enhanced addressing mode.
4997 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT
))
4998 priv
->plat
->dma_cfg
->eame
= true;
5000 ret
= dma_set_mask_and_coherent(device
, DMA_BIT_MASK(32));
5002 dev_err(priv
->device
, "Failed to set DMA Mask\n");
5006 priv
->dma_cap
.addr64
= 32;
5010 ndev
->features
|= ndev
->hw_features
| NETIF_F_HIGHDMA
;
5011 ndev
->watchdog_timeo
= msecs_to_jiffies(watchdog
);
5012 #ifdef STMMAC_VLAN_TAG_USED
5013 /* Both mac100 and gmac support receive VLAN tag detection */
5014 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_STAG_RX
;
5015 if (priv
->dma_cap
.vlhash
) {
5016 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
5017 ndev
->features
|= NETIF_F_HW_VLAN_STAG_FILTER
;
5019 if (priv
->dma_cap
.vlins
) {
5020 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_TX
;
5021 if (priv
->dma_cap
.dvlan
)
5022 ndev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
5025 priv
->msg_enable
= netif_msg_init(debug
, default_msg_level
);
5027 /* Initialize RSS */
5028 rxq
= priv
->plat
->rx_queues_to_use
;
5029 netdev_rss_key_fill(priv
->rss
.key
, sizeof(priv
->rss
.key
));
5030 for (i
= 0; i
< ARRAY_SIZE(priv
->rss
.table
); i
++)
5031 priv
->rss
.table
[i
] = ethtool_rxfh_indir_default(i
, rxq
);
5033 if (priv
->dma_cap
.rssen
&& priv
->plat
->rss_en
)
5034 ndev
->features
|= NETIF_F_RXHASH
;
5036 /* MTU range: 46 - hw-specific max */
5037 ndev
->min_mtu
= ETH_ZLEN
- ETH_HLEN
;
5038 if (priv
->plat
->has_xgmac
)
5039 ndev
->max_mtu
= XGMAC_JUMBO_LEN
;
5040 else if ((priv
->plat
->enh_desc
) || (priv
->synopsys_id
>= DWMAC_CORE_4_00
))
5041 ndev
->max_mtu
= JUMBO_LEN
;
5043 ndev
->max_mtu
= SKB_MAX_HEAD(NET_SKB_PAD
+ NET_IP_ALIGN
);
5044 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
5045 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
5047 if ((priv
->plat
->maxmtu
< ndev
->max_mtu
) &&
5048 (priv
->plat
->maxmtu
>= ndev
->min_mtu
))
5049 ndev
->max_mtu
= priv
->plat
->maxmtu
;
5050 else if (priv
->plat
->maxmtu
< ndev
->min_mtu
)
5051 dev_warn(priv
->device
,
5052 "%s: warning: maxmtu having invalid value (%d)\n",
5053 __func__
, priv
->plat
->maxmtu
);
5056 priv
->flow_ctrl
= FLOW_AUTO
; /* RX/TX pause on */
5058 /* Setup channels NAPI */
5059 stmmac_napi_add(ndev
);
5061 mutex_init(&priv
->lock
);
5063 /* If a specific clk_csr value is passed from the platform
5064 * this means that the CSR Clock Range selection cannot be
5065 * changed at run-time and it is fixed. Viceversa the driver'll try to
5066 * set the MDC clock dynamically according to the csr actual
5069 if (priv
->plat
->clk_csr
>= 0)
5070 priv
->clk_csr
= priv
->plat
->clk_csr
;
5072 stmmac_clk_csr_set(priv
);
5074 stmmac_check_pcs_mode(priv
);
5076 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
5077 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
) {
5078 /* MDIO bus Registration */
5079 ret
= stmmac_mdio_register(ndev
);
5081 dev_err(priv
->device
,
5082 "%s: MDIO bus (id: %d) registration failed",
5083 __func__
, priv
->plat
->bus_id
);
5084 goto error_mdio_register
;
5088 ret
= stmmac_phy_setup(priv
);
5090 netdev_err(ndev
, "failed to setup phy (%d)\n", ret
);
5091 goto error_phy_setup
;
5094 ret
= register_netdev(ndev
);
5096 dev_err(priv
->device
, "%s: ERROR %i registering the device\n",
5098 goto error_netdev_register
;
5101 if (priv
->plat
->serdes_powerup
) {
5102 ret
= priv
->plat
->serdes_powerup(ndev
,
5103 priv
->plat
->bsp_priv
);
5106 goto error_serdes_powerup
;
5109 #ifdef CONFIG_DEBUG_FS
5110 stmmac_init_fs(ndev
);
5115 error_serdes_powerup
:
5116 unregister_netdev(ndev
);
5117 error_netdev_register
:
5118 phylink_destroy(priv
->phylink
);
5120 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
5121 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
)
5122 stmmac_mdio_unregister(ndev
);
5123 error_mdio_register
:
5124 stmmac_napi_del(ndev
);
5126 destroy_workqueue(priv
->wq
);
5130 EXPORT_SYMBOL_GPL(stmmac_dvr_probe
);
5134 * @dev: device pointer
5135 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
5136 * changes the link status, releases the DMA descriptor rings.
5138 int stmmac_dvr_remove(struct device
*dev
)
5140 struct net_device
*ndev
= dev_get_drvdata(dev
);
5141 struct stmmac_priv
*priv
= netdev_priv(ndev
);
5143 netdev_info(priv
->dev
, "%s: removing driver", __func__
);
5145 stmmac_stop_all_dma(priv
);
5147 if (priv
->plat
->serdes_powerdown
)
5148 priv
->plat
->serdes_powerdown(ndev
, priv
->plat
->bsp_priv
);
5150 stmmac_mac_set(priv
, priv
->ioaddr
, false);
5151 netif_carrier_off(ndev
);
5152 unregister_netdev(ndev
);
5153 #ifdef CONFIG_DEBUG_FS
5154 stmmac_exit_fs(ndev
);
5156 phylink_destroy(priv
->phylink
);
5157 if (priv
->plat
->stmmac_rst
)
5158 reset_control_assert(priv
->plat
->stmmac_rst
);
5159 clk_disable_unprepare(priv
->plat
->pclk
);
5160 clk_disable_unprepare(priv
->plat
->stmmac_clk
);
5161 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
5162 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
)
5163 stmmac_mdio_unregister(ndev
);
5164 destroy_workqueue(priv
->wq
);
5165 mutex_destroy(&priv
->lock
);
5169 EXPORT_SYMBOL_GPL(stmmac_dvr_remove
);
5172 * stmmac_suspend - suspend callback
5173 * @dev: device pointer
5174 * Description: this is the function to suspend the device and it is called
5175 * by the platform driver to stop the network queue, release the resources,
5176 * program the PMT register (for WoL), clean and release driver resources.
5178 int stmmac_suspend(struct device
*dev
)
5180 struct net_device
*ndev
= dev_get_drvdata(dev
);
5181 struct stmmac_priv
*priv
= netdev_priv(ndev
);
5184 if (!ndev
|| !netif_running(ndev
))
5187 phylink_mac_change(priv
->phylink
, false);
5189 mutex_lock(&priv
->lock
);
5191 netif_device_detach(ndev
);
5193 stmmac_disable_all_queues(priv
);
5195 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
5196 hrtimer_cancel(&priv
->tx_queue
[chan
].txtimer
);
5198 if (priv
->eee_enabled
) {
5199 priv
->tx_path_in_lpi_mode
= false;
5200 del_timer_sync(&priv
->eee_ctrl_timer
);
5203 /* Stop TX/RX DMA */
5204 stmmac_stop_all_dma(priv
);
5206 if (priv
->plat
->serdes_powerdown
)
5207 priv
->plat
->serdes_powerdown(ndev
, priv
->plat
->bsp_priv
);
5209 /* Enable Power down mode by programming the PMT regs */
5210 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
5211 stmmac_pmt(priv
, priv
->hw
, priv
->wolopts
);
5214 mutex_unlock(&priv
->lock
);
5216 if (device_may_wakeup(priv
->device
))
5217 phylink_speed_down(priv
->phylink
, false);
5218 phylink_stop(priv
->phylink
);
5220 mutex_lock(&priv
->lock
);
5222 stmmac_mac_set(priv
, priv
->ioaddr
, false);
5223 pinctrl_pm_select_sleep_state(priv
->device
);
5224 /* Disable clock in case of PWM is off */
5225 clk_disable_unprepare(priv
->plat
->clk_ptp_ref
);
5226 clk_disable_unprepare(priv
->plat
->pclk
);
5227 clk_disable_unprepare(priv
->plat
->stmmac_clk
);
5229 mutex_unlock(&priv
->lock
);
5231 priv
->speed
= SPEED_UNKNOWN
;
5234 EXPORT_SYMBOL_GPL(stmmac_suspend
);
5237 * stmmac_reset_queues_param - reset queue parameters
5238 * @priv: device pointer
5240 static void stmmac_reset_queues_param(struct stmmac_priv
*priv
)
5242 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
5243 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
5246 for (queue
= 0; queue
< rx_cnt
; queue
++) {
5247 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
5253 for (queue
= 0; queue
< tx_cnt
; queue
++) {
5254 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
5263 * stmmac_resume - resume callback
5264 * @dev: device pointer
5265 * Description: when resume this function is invoked to setup the DMA and CORE
5266 * in a usable state.
5268 int stmmac_resume(struct device
*dev
)
5270 struct net_device
*ndev
= dev_get_drvdata(dev
);
5271 struct stmmac_priv
*priv
= netdev_priv(ndev
);
5274 if (!netif_running(ndev
))
5277 /* Power Down bit, into the PM register, is cleared
5278 * automatically as soon as a magic packet or a Wake-up frame
5279 * is received. Anyway, it's better to manually clear
5280 * this bit because it can generate problems while resuming
5281 * from another devices (e.g. serial console).
5283 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
5284 mutex_lock(&priv
->lock
);
5285 stmmac_pmt(priv
, priv
->hw
, 0);
5286 mutex_unlock(&priv
->lock
);
5289 pinctrl_pm_select_default_state(priv
->device
);
5290 /* enable the clk previously disabled */
5291 clk_prepare_enable(priv
->plat
->stmmac_clk
);
5292 clk_prepare_enable(priv
->plat
->pclk
);
5293 if (priv
->plat
->clk_ptp_ref
)
5294 clk_prepare_enable(priv
->plat
->clk_ptp_ref
);
5295 /* reset the phy so that it's ready */
5297 stmmac_mdio_reset(priv
->mii
);
5300 if (priv
->plat
->serdes_powerup
) {
5301 ret
= priv
->plat
->serdes_powerup(ndev
,
5302 priv
->plat
->bsp_priv
);
5308 if (!device_may_wakeup(priv
->device
) || !priv
->plat
->pmt
) {
5310 phylink_start(priv
->phylink
);
5311 /* We may have called phylink_speed_down before */
5312 phylink_speed_up(priv
->phylink
);
5317 mutex_lock(&priv
->lock
);
5319 stmmac_reset_queues_param(priv
);
5321 stmmac_free_tx_skbufs(priv
);
5322 stmmac_clear_descriptors(priv
);
5324 stmmac_hw_setup(ndev
, false);
5325 stmmac_init_coalesce(priv
);
5326 stmmac_set_rx_mode(ndev
);
5328 stmmac_restore_hw_vlan_rx_fltr(priv
, ndev
, priv
->hw
);
5330 stmmac_enable_all_queues(priv
);
5332 mutex_unlock(&priv
->lock
);
5335 phylink_mac_change(priv
->phylink
, true);
5337 netif_device_attach(ndev
);
5341 EXPORT_SYMBOL_GPL(stmmac_resume
);
5344 static int __init
stmmac_cmdline_opt(char *str
)
5350 while ((opt
= strsep(&str
, ",")) != NULL
) {
5351 if (!strncmp(opt
, "debug:", 6)) {
5352 if (kstrtoint(opt
+ 6, 0, &debug
))
5354 } else if (!strncmp(opt
, "phyaddr:", 8)) {
5355 if (kstrtoint(opt
+ 8, 0, &phyaddr
))
5357 } else if (!strncmp(opt
, "buf_sz:", 7)) {
5358 if (kstrtoint(opt
+ 7, 0, &buf_sz
))
5360 } else if (!strncmp(opt
, "tc:", 3)) {
5361 if (kstrtoint(opt
+ 3, 0, &tc
))
5363 } else if (!strncmp(opt
, "watchdog:", 9)) {
5364 if (kstrtoint(opt
+ 9, 0, &watchdog
))
5366 } else if (!strncmp(opt
, "flow_ctrl:", 10)) {
5367 if (kstrtoint(opt
+ 10, 0, &flow_ctrl
))
5369 } else if (!strncmp(opt
, "pause:", 6)) {
5370 if (kstrtoint(opt
+ 6, 0, &pause
))
5372 } else if (!strncmp(opt
, "eee_timer:", 10)) {
5373 if (kstrtoint(opt
+ 10, 0, &eee_timer
))
5375 } else if (!strncmp(opt
, "chain_mode:", 11)) {
5376 if (kstrtoint(opt
+ 11, 0, &chain_mode
))
5383 pr_err("%s: ERROR broken module parameter conversion", __func__
);
5387 __setup("stmmaceth=", stmmac_cmdline_opt
);
5390 static int __init
stmmac_init(void)
5392 #ifdef CONFIG_DEBUG_FS
5393 /* Create debugfs main directory if it doesn't exist yet */
5395 stmmac_fs_dir
= debugfs_create_dir(STMMAC_RESOURCE_NAME
, NULL
);
5396 register_netdevice_notifier(&stmmac_notifier
);
5402 static void __exit
stmmac_exit(void)
5404 #ifdef CONFIG_DEBUG_FS
5405 unregister_netdevice_notifier(&stmmac_notifier
);
5406 debugfs_remove_recursive(stmmac_fs_dir
);
5410 module_init(stmmac_init
)
5411 module_exit(stmmac_exit
)
5413 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
5414 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
5415 MODULE_LICENSE("GPL");