Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
blob7ad841434ec8d688d435d058d4cf259e911e3b55
1 /*******************************************************************************
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core.
5 Copyright(C) 2007-2011 STMicroelectronics Ltd
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
16 The full GNU General Public License is included in this distribution in
17 the file called "COPYING".
19 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21 Documentation available at:
22 http://www.stlinux.com
23 Support available at:
24 https://bugzilla.stlinux.com/
25 *******************************************************************************/
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
57 /* Module parameters */
58 #define TX_TIMEO 5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
71 #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
92 #define STMMAC_RX_COPYBREAK 256
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95 NETIF_MSG_LINK | NETIF_MSG_IFUP |
96 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
98 #define STMMAC_DEFAULT_LPI_TIMER 1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105 * but allow user to force to use the chain instead of the ring
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
121 * stmmac_verify_args - verify the driver parameters.
122 * Description: it checks the driver parameters and set a default in case of
123 * errors.
125 static void stmmac_verify_args(void)
127 if (unlikely(watchdog < 0))
128 watchdog = TX_TIMEO;
129 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130 buf_sz = DEFAULT_BUFSIZE;
131 if (unlikely(flow_ctrl > 1))
132 flow_ctrl = FLOW_AUTO;
133 else if (likely(flow_ctrl < 0))
134 flow_ctrl = FLOW_OFF;
135 if (unlikely((pause < 0) || (pause > 0xffff)))
136 pause = PAUSE_TIME;
137 if (eee_timer < 0)
138 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
142 * stmmac_disable_all_queues - Disable all queues
143 * @priv: driver private structure
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
147 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148 u32 queue;
150 for (queue = 0; queue < rx_queues_cnt; queue++) {
151 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
153 napi_disable(&rx_q->napi);
158 * stmmac_enable_all_queues - Enable all queues
159 * @priv: driver private structure
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
163 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164 u32 queue;
166 for (queue = 0; queue < rx_queues_cnt; queue++) {
167 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
169 napi_enable(&rx_q->napi);
174 * stmmac_stop_all_queues - Stop all queues
175 * @priv: driver private structure
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
179 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180 u32 queue;
182 for (queue = 0; queue < tx_queues_cnt; queue++)
183 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
187 * stmmac_start_all_queues - Start all queues
188 * @priv: driver private structure
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
192 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193 u32 queue;
195 for (queue = 0; queue < tx_queues_cnt; queue++)
196 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
200 * stmmac_clk_csr_set - dynamically set the MDC clock
201 * @priv: driver private structure
202 * Description: this is to dynamically set the MDC clock according to the csr
203 * clock input.
204 * Note:
205 * If a specific clk_csr value is passed from the platform
206 * this means that the CSR Clock Range selection cannot be
207 * changed at run-time and it is fixed (as reported in the driver
208 * documentation). Viceversa the driver will try to set the MDC
209 * clock dynamically according to the actual clock input.
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
213 u32 clk_rate;
215 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
217 /* Platform provided default clk_csr would be assumed valid
218 * for all other cases except for the below mentioned ones.
219 * For values higher than the IEEE 802.3 specified frequency
220 * we can not estimate the proper divider as it is not known
221 * the frequency of clk_csr_i. So we do not change the default
222 * divider.
224 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225 if (clk_rate < CSR_F_35M)
226 priv->clk_csr = STMMAC_CSR_20_35M;
227 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228 priv->clk_csr = STMMAC_CSR_35_60M;
229 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230 priv->clk_csr = STMMAC_CSR_60_100M;
231 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232 priv->clk_csr = STMMAC_CSR_100_150M;
233 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234 priv->clk_csr = STMMAC_CSR_150_250M;
235 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236 priv->clk_csr = STMMAC_CSR_250_300M;
239 if (priv->plat->has_sun8i) {
240 if (clk_rate > 160000000)
241 priv->clk_csr = 0x03;
242 else if (clk_rate > 80000000)
243 priv->clk_csr = 0x02;
244 else if (clk_rate > 40000000)
245 priv->clk_csr = 0x01;
246 else
247 priv->clk_csr = 0;
251 static void print_pkt(unsigned char *buf, int len)
253 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
254 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
257 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
259 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
260 u32 avail;
262 if (tx_q->dirty_tx > tx_q->cur_tx)
263 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
264 else
265 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
267 return avail;
271 * stmmac_rx_dirty - Get RX queue dirty
272 * @priv: driver private structure
273 * @queue: RX queue index
275 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
277 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
278 u32 dirty;
280 if (rx_q->dirty_rx <= rx_q->cur_rx)
281 dirty = rx_q->cur_rx - rx_q->dirty_rx;
282 else
283 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
285 return dirty;
289 * stmmac_hw_fix_mac_speed - callback for speed selection
290 * @priv: driver private structure
291 * Description: on some platforms (e.g. ST), some HW system configuration
292 * registers have to be set according to the link speed negotiated.
294 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
296 struct net_device *ndev = priv->dev;
297 struct phy_device *phydev = ndev->phydev;
299 if (likely(priv->plat->fix_mac_speed))
300 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
304 * stmmac_enable_eee_mode - check and enter in LPI mode
305 * @priv: driver private structure
306 * Description: this function is to verify and enter in LPI mode in case of
307 * EEE.
309 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
311 u32 tx_cnt = priv->plat->tx_queues_to_use;
312 u32 queue;
314 /* check if all TX queues have the work finished */
315 for (queue = 0; queue < tx_cnt; queue++) {
316 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
318 if (tx_q->dirty_tx != tx_q->cur_tx)
319 return; /* still unfinished work */
322 /* Check and enter in LPI mode */
323 if (!priv->tx_path_in_lpi_mode)
324 priv->hw->mac->set_eee_mode(priv->hw,
325 priv->plat->en_tx_lpi_clockgating);
329 * stmmac_disable_eee_mode - disable and exit from LPI mode
330 * @priv: driver private structure
331 * Description: this function is to exit and disable EEE in case of
332 * LPI state is true. This is called by the xmit.
334 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
336 priv->hw->mac->reset_eee_mode(priv->hw);
337 del_timer_sync(&priv->eee_ctrl_timer);
338 priv->tx_path_in_lpi_mode = false;
342 * stmmac_eee_ctrl_timer - EEE TX SW timer.
343 * @arg : data hook
344 * Description:
345 * if there is no data transfer and if we are not in LPI state,
346 * then MAC Transmitter can be moved to LPI state.
348 static void stmmac_eee_ctrl_timer(struct timer_list *t)
350 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
352 stmmac_enable_eee_mode(priv);
353 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
357 * stmmac_eee_init - init EEE
358 * @priv: driver private structure
359 * Description:
360 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
361 * can also manage EEE, this function enable the LPI state and start related
362 * timer.
364 bool stmmac_eee_init(struct stmmac_priv *priv)
366 struct net_device *ndev = priv->dev;
367 int interface = priv->plat->interface;
368 unsigned long flags;
369 bool ret = false;
371 if ((interface != PHY_INTERFACE_MODE_MII) &&
372 (interface != PHY_INTERFACE_MODE_GMII) &&
373 !phy_interface_mode_is_rgmii(interface))
374 goto out;
376 /* Using PCS we cannot dial with the phy registers at this stage
377 * so we do not support extra feature like EEE.
379 if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
380 (priv->hw->pcs == STMMAC_PCS_TBI) ||
381 (priv->hw->pcs == STMMAC_PCS_RTBI))
382 goto out;
384 /* MAC core supports the EEE feature. */
385 if (priv->dma_cap.eee) {
386 int tx_lpi_timer = priv->tx_lpi_timer;
388 /* Check if the PHY supports EEE */
389 if (phy_init_eee(ndev->phydev, 1)) {
390 /* To manage at run-time if the EEE cannot be supported
391 * anymore (for example because the lp caps have been
392 * changed).
393 * In that case the driver disable own timers.
395 spin_lock_irqsave(&priv->lock, flags);
396 if (priv->eee_active) {
397 netdev_dbg(priv->dev, "disable EEE\n");
398 del_timer_sync(&priv->eee_ctrl_timer);
399 priv->hw->mac->set_eee_timer(priv->hw, 0,
400 tx_lpi_timer);
402 priv->eee_active = 0;
403 spin_unlock_irqrestore(&priv->lock, flags);
404 goto out;
406 /* Activate the EEE and start timers */
407 spin_lock_irqsave(&priv->lock, flags);
408 if (!priv->eee_active) {
409 priv->eee_active = 1;
410 timer_setup(&priv->eee_ctrl_timer,
411 stmmac_eee_ctrl_timer, 0);
412 mod_timer(&priv->eee_ctrl_timer,
413 STMMAC_LPI_T(eee_timer));
415 priv->hw->mac->set_eee_timer(priv->hw,
416 STMMAC_DEFAULT_LIT_LS,
417 tx_lpi_timer);
419 /* Set HW EEE according to the speed */
420 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
422 ret = true;
423 spin_unlock_irqrestore(&priv->lock, flags);
425 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
427 out:
428 return ret;
431 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
432 * @priv: driver private structure
433 * @p : descriptor pointer
434 * @skb : the socket buffer
435 * Description :
436 * This function will read timestamp from the descriptor & pass it to stack.
437 * and also perform some sanity checks.
439 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
440 struct dma_desc *p, struct sk_buff *skb)
442 struct skb_shared_hwtstamps shhwtstamp;
443 u64 ns;
445 if (!priv->hwts_tx_en)
446 return;
448 /* exit if skb doesn't support hw tstamp */
449 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
450 return;
452 /* check tx tstamp status */
453 if (priv->hw->desc->get_tx_timestamp_status(p)) {
454 /* get the valid tstamp */
455 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
457 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
458 shhwtstamp.hwtstamp = ns_to_ktime(ns);
460 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
461 /* pass tstamp to stack */
462 skb_tstamp_tx(skb, &shhwtstamp);
465 return;
468 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
469 * @priv: driver private structure
470 * @p : descriptor pointer
471 * @np : next descriptor pointer
472 * @skb : the socket buffer
473 * Description :
474 * This function will read received packet's timestamp from the descriptor
475 * and pass it to stack. It also perform some sanity checks.
477 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
478 struct dma_desc *np, struct sk_buff *skb)
480 struct skb_shared_hwtstamps *shhwtstamp = NULL;
481 struct dma_desc *desc = p;
482 u64 ns;
484 if (!priv->hwts_rx_en)
485 return;
486 /* For GMAC4, the valid timestamp is from CTX next desc. */
487 if (priv->plat->has_gmac4)
488 desc = np;
490 /* Check if timestamp is available */
491 if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
492 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
493 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
494 shhwtstamp = skb_hwtstamps(skb);
495 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
496 shhwtstamp->hwtstamp = ns_to_ktime(ns);
497 } else {
498 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
503 * stmmac_hwtstamp_ioctl - control hardware timestamping.
504 * @dev: device pointer.
505 * @ifr: An IOCTL specific structure, that can contain a pointer to
506 * a proprietary structure used to pass information to the driver.
507 * Description:
508 * This function configures the MAC to enable/disable both outgoing(TX)
509 * and incoming(RX) packets time stamping based on user input.
510 * Return Value:
511 * 0 on success and an appropriate -ve integer on failure.
513 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
515 struct stmmac_priv *priv = netdev_priv(dev);
516 struct hwtstamp_config config;
517 struct timespec64 now;
518 u64 temp = 0;
519 u32 ptp_v2 = 0;
520 u32 tstamp_all = 0;
521 u32 ptp_over_ipv4_udp = 0;
522 u32 ptp_over_ipv6_udp = 0;
523 u32 ptp_over_ethernet = 0;
524 u32 snap_type_sel = 0;
525 u32 ts_master_en = 0;
526 u32 ts_event_en = 0;
527 u32 value = 0;
528 u32 sec_inc;
530 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
531 netdev_alert(priv->dev, "No support for HW time stamping\n");
532 priv->hwts_tx_en = 0;
533 priv->hwts_rx_en = 0;
535 return -EOPNOTSUPP;
538 if (copy_from_user(&config, ifr->ifr_data,
539 sizeof(struct hwtstamp_config)))
540 return -EFAULT;
542 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
543 __func__, config.flags, config.tx_type, config.rx_filter);
545 /* reserved for future extensions */
546 if (config.flags)
547 return -EINVAL;
549 if (config.tx_type != HWTSTAMP_TX_OFF &&
550 config.tx_type != HWTSTAMP_TX_ON)
551 return -ERANGE;
553 if (priv->adv_ts) {
554 switch (config.rx_filter) {
555 case HWTSTAMP_FILTER_NONE:
556 /* time stamp no incoming packet at all */
557 config.rx_filter = HWTSTAMP_FILTER_NONE;
558 break;
560 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
561 /* PTP v1, UDP, any kind of event packet */
562 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
563 /* take time stamp for all event messages */
564 if (priv->plat->has_gmac4)
565 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
566 else
567 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
569 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
570 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
571 break;
573 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
574 /* PTP v1, UDP, Sync packet */
575 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
576 /* take time stamp for SYNC messages only */
577 ts_event_en = PTP_TCR_TSEVNTENA;
579 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
580 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
581 break;
583 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
584 /* PTP v1, UDP, Delay_req packet */
585 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
586 /* take time stamp for Delay_Req messages only */
587 ts_master_en = PTP_TCR_TSMSTRENA;
588 ts_event_en = PTP_TCR_TSEVNTENA;
590 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
591 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
592 break;
594 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
595 /* PTP v2, UDP, any kind of event packet */
596 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
597 ptp_v2 = PTP_TCR_TSVER2ENA;
598 /* take time stamp for all event messages */
599 if (priv->plat->has_gmac4)
600 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
601 else
602 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
604 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
605 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
606 break;
608 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
609 /* PTP v2, UDP, Sync packet */
610 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
611 ptp_v2 = PTP_TCR_TSVER2ENA;
612 /* take time stamp for SYNC messages only */
613 ts_event_en = PTP_TCR_TSEVNTENA;
615 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
616 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
617 break;
619 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
620 /* PTP v2, UDP, Delay_req packet */
621 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
622 ptp_v2 = PTP_TCR_TSVER2ENA;
623 /* take time stamp for Delay_Req messages only */
624 ts_master_en = PTP_TCR_TSMSTRENA;
625 ts_event_en = PTP_TCR_TSEVNTENA;
627 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
628 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
629 break;
631 case HWTSTAMP_FILTER_PTP_V2_EVENT:
632 /* PTP v2/802.AS1 any layer, any kind of event packet */
633 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
634 ptp_v2 = PTP_TCR_TSVER2ENA;
635 /* take time stamp for all event messages */
636 if (priv->plat->has_gmac4)
637 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
638 else
639 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
641 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643 ptp_over_ethernet = PTP_TCR_TSIPENA;
644 break;
646 case HWTSTAMP_FILTER_PTP_V2_SYNC:
647 /* PTP v2/802.AS1, any layer, Sync packet */
648 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
649 ptp_v2 = PTP_TCR_TSVER2ENA;
650 /* take time stamp for SYNC messages only */
651 ts_event_en = PTP_TCR_TSEVNTENA;
653 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
654 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
655 ptp_over_ethernet = PTP_TCR_TSIPENA;
656 break;
658 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
659 /* PTP v2/802.AS1, any layer, Delay_req packet */
660 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
661 ptp_v2 = PTP_TCR_TSVER2ENA;
662 /* take time stamp for Delay_Req messages only */
663 ts_master_en = PTP_TCR_TSMSTRENA;
664 ts_event_en = PTP_TCR_TSEVNTENA;
666 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
667 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
668 ptp_over_ethernet = PTP_TCR_TSIPENA;
669 break;
671 case HWTSTAMP_FILTER_NTP_ALL:
672 case HWTSTAMP_FILTER_ALL:
673 /* time stamp any incoming packet */
674 config.rx_filter = HWTSTAMP_FILTER_ALL;
675 tstamp_all = PTP_TCR_TSENALL;
676 break;
678 default:
679 return -ERANGE;
681 } else {
682 switch (config.rx_filter) {
683 case HWTSTAMP_FILTER_NONE:
684 config.rx_filter = HWTSTAMP_FILTER_NONE;
685 break;
686 default:
687 /* PTP v1, UDP, any kind of event packet */
688 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
689 break;
692 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
693 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
695 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
696 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
697 else {
698 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
699 tstamp_all | ptp_v2 | ptp_over_ethernet |
700 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
701 ts_master_en | snap_type_sel);
702 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
704 /* program Sub Second Increment reg */
705 sec_inc = priv->hw->ptp->config_sub_second_increment(
706 priv->ptpaddr, priv->plat->clk_ptp_rate,
707 priv->plat->has_gmac4);
708 temp = div_u64(1000000000ULL, sec_inc);
710 /* calculate default added value:
711 * formula is :
712 * addend = (2^32)/freq_div_ratio;
713 * where, freq_div_ratio = 1e9ns/sec_inc
715 temp = (u64)(temp << 32);
716 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
717 priv->hw->ptp->config_addend(priv->ptpaddr,
718 priv->default_addend);
720 /* initialize system time */
721 ktime_get_real_ts64(&now);
723 /* lower 32 bits of tv_sec are safe until y2106 */
724 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
725 now.tv_nsec);
728 return copy_to_user(ifr->ifr_data, &config,
729 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
733 * stmmac_init_ptp - init PTP
734 * @priv: driver private structure
735 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
736 * This is done by looking at the HW cap. register.
737 * This function also registers the ptp driver.
739 static int stmmac_init_ptp(struct stmmac_priv *priv)
741 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
742 return -EOPNOTSUPP;
744 priv->adv_ts = 0;
745 /* Check if adv_ts can be enabled for dwmac 4.x core */
746 if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
747 priv->adv_ts = 1;
748 /* Dwmac 3.x core with extend_desc can support adv_ts */
749 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
750 priv->adv_ts = 1;
752 if (priv->dma_cap.time_stamp)
753 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
755 if (priv->adv_ts)
756 netdev_info(priv->dev,
757 "IEEE 1588-2008 Advanced Timestamp supported\n");
759 priv->hw->ptp = &stmmac_ptp;
760 priv->hwts_tx_en = 0;
761 priv->hwts_rx_en = 0;
763 stmmac_ptp_register(priv);
765 return 0;
768 static void stmmac_release_ptp(struct stmmac_priv *priv)
770 if (priv->plat->clk_ptp_ref)
771 clk_disable_unprepare(priv->plat->clk_ptp_ref);
772 stmmac_ptp_unregister(priv);
776 * stmmac_mac_flow_ctrl - Configure flow control in all queues
777 * @priv: driver private structure
778 * Description: It is used for configuring the flow control in all queues
780 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
782 u32 tx_cnt = priv->plat->tx_queues_to_use;
784 priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
785 priv->pause, tx_cnt);
789 * stmmac_adjust_link - adjusts the link parameters
790 * @dev: net device structure
791 * Description: this is the helper called by the physical abstraction layer
792 * drivers to communicate the phy link status. According the speed and duplex
793 * this driver can invoke registered glue-logic as well.
794 * It also invoke the eee initialization because it could happen when switch
795 * on different networks (that are eee capable).
797 static void stmmac_adjust_link(struct net_device *dev)
799 struct stmmac_priv *priv = netdev_priv(dev);
800 struct phy_device *phydev = dev->phydev;
801 unsigned long flags;
802 bool new_state = false;
804 if (!phydev)
805 return;
807 spin_lock_irqsave(&priv->lock, flags);
809 if (phydev->link) {
810 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
812 /* Now we make sure that we can be in full duplex mode.
813 * If not, we operate in half-duplex mode. */
814 if (phydev->duplex != priv->oldduplex) {
815 new_state = true;
816 if (!phydev->duplex)
817 ctrl &= ~priv->hw->link.duplex;
818 else
819 ctrl |= priv->hw->link.duplex;
820 priv->oldduplex = phydev->duplex;
822 /* Flow Control operation */
823 if (phydev->pause)
824 stmmac_mac_flow_ctrl(priv, phydev->duplex);
826 if (phydev->speed != priv->speed) {
827 new_state = true;
828 ctrl &= ~priv->hw->link.speed_mask;
829 switch (phydev->speed) {
830 case SPEED_1000:
831 ctrl |= priv->hw->link.speed1000;
832 break;
833 case SPEED_100:
834 ctrl |= priv->hw->link.speed100;
835 break;
836 case SPEED_10:
837 ctrl |= priv->hw->link.speed10;
838 break;
839 default:
840 netif_warn(priv, link, priv->dev,
841 "broken speed: %d\n", phydev->speed);
842 phydev->speed = SPEED_UNKNOWN;
843 break;
845 if (phydev->speed != SPEED_UNKNOWN)
846 stmmac_hw_fix_mac_speed(priv);
847 priv->speed = phydev->speed;
850 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
852 if (!priv->oldlink) {
853 new_state = true;
854 priv->oldlink = true;
856 } else if (priv->oldlink) {
857 new_state = true;
858 priv->oldlink = false;
859 priv->speed = SPEED_UNKNOWN;
860 priv->oldduplex = DUPLEX_UNKNOWN;
863 if (new_state && netif_msg_link(priv))
864 phy_print_status(phydev);
866 spin_unlock_irqrestore(&priv->lock, flags);
868 if (phydev->is_pseudo_fixed_link)
869 /* Stop PHY layer to call the hook to adjust the link in case
870 * of a switch is attached to the stmmac driver.
872 phydev->irq = PHY_IGNORE_INTERRUPT;
873 else
874 /* At this stage, init the EEE if supported.
875 * Never called in case of fixed_link.
877 priv->eee_enabled = stmmac_eee_init(priv);
881 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
882 * @priv: driver private structure
883 * Description: this is to verify if the HW supports the PCS.
884 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
885 * configured for the TBI, RTBI, or SGMII PHY interface.
887 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
889 int interface = priv->plat->interface;
891 if (priv->dma_cap.pcs) {
892 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
893 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
894 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
895 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
896 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
897 priv->hw->pcs = STMMAC_PCS_RGMII;
898 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
899 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
900 priv->hw->pcs = STMMAC_PCS_SGMII;
906 * stmmac_init_phy - PHY initialization
907 * @dev: net device structure
908 * Description: it initializes the driver's PHY state, and attaches the PHY
909 * to the mac driver.
910 * Return value:
911 * 0 on success
913 static int stmmac_init_phy(struct net_device *dev)
915 struct stmmac_priv *priv = netdev_priv(dev);
916 struct phy_device *phydev;
917 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
918 char bus_id[MII_BUS_ID_SIZE];
919 int interface = priv->plat->interface;
920 int max_speed = priv->plat->max_speed;
921 priv->oldlink = false;
922 priv->speed = SPEED_UNKNOWN;
923 priv->oldduplex = DUPLEX_UNKNOWN;
925 if (priv->plat->phy_node) {
926 phydev = of_phy_connect(dev, priv->plat->phy_node,
927 &stmmac_adjust_link, 0, interface);
928 } else {
929 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
930 priv->plat->bus_id);
932 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
933 priv->plat->phy_addr);
934 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
935 phy_id_fmt);
937 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
938 interface);
941 if (IS_ERR_OR_NULL(phydev)) {
942 netdev_err(priv->dev, "Could not attach to PHY\n");
943 if (!phydev)
944 return -ENODEV;
946 return PTR_ERR(phydev);
949 /* Stop Advertising 1000BASE Capability if interface is not GMII */
950 if ((interface == PHY_INTERFACE_MODE_MII) ||
951 (interface == PHY_INTERFACE_MODE_RMII) ||
952 (max_speed < 1000 && max_speed > 0))
953 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
954 SUPPORTED_1000baseT_Full);
957 * Broken HW is sometimes missing the pull-up resistor on the
958 * MDIO line, which results in reads to non-existent devices returning
959 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
960 * device as well.
961 * Note: phydev->phy_id is the result of reading the UID PHY registers.
963 if (!priv->plat->phy_node && phydev->phy_id == 0) {
964 phy_disconnect(phydev);
965 return -ENODEV;
968 /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
969 * subsequent PHY polling, make sure we force a link transition if
970 * we have a UP/DOWN/UP transition
972 if (phydev->is_pseudo_fixed_link)
973 phydev->irq = PHY_POLL;
975 phy_attached_info(phydev);
976 return 0;
979 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
981 u32 rx_cnt = priv->plat->rx_queues_to_use;
982 void *head_rx;
983 u32 queue;
985 /* Display RX rings */
986 for (queue = 0; queue < rx_cnt; queue++) {
987 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
989 pr_info("\tRX Queue %u rings\n", queue);
991 if (priv->extend_desc)
992 head_rx = (void *)rx_q->dma_erx;
993 else
994 head_rx = (void *)rx_q->dma_rx;
996 /* Display RX ring */
997 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
1001 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1003 u32 tx_cnt = priv->plat->tx_queues_to_use;
1004 void *head_tx;
1005 u32 queue;
1007 /* Display TX rings */
1008 for (queue = 0; queue < tx_cnt; queue++) {
1009 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1011 pr_info("\tTX Queue %d rings\n", queue);
1013 if (priv->extend_desc)
1014 head_tx = (void *)tx_q->dma_etx;
1015 else
1016 head_tx = (void *)tx_q->dma_tx;
1018 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1022 static void stmmac_display_rings(struct stmmac_priv *priv)
1024 /* Display RX ring */
1025 stmmac_display_rx_rings(priv);
1027 /* Display TX ring */
1028 stmmac_display_tx_rings(priv);
1031 static int stmmac_set_bfsize(int mtu, int bufsize)
1033 int ret = bufsize;
1035 if (mtu >= BUF_SIZE_4KiB)
1036 ret = BUF_SIZE_8KiB;
1037 else if (mtu >= BUF_SIZE_2KiB)
1038 ret = BUF_SIZE_4KiB;
1039 else if (mtu > DEFAULT_BUFSIZE)
1040 ret = BUF_SIZE_2KiB;
1041 else
1042 ret = DEFAULT_BUFSIZE;
1044 return ret;
1048 * stmmac_clear_rx_descriptors - clear RX descriptors
1049 * @priv: driver private structure
1050 * @queue: RX queue index
1051 * Description: this function is called to clear the RX descriptors
1052 * in case of both basic and extended descriptors are used.
1054 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1056 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1057 int i;
1059 /* Clear the RX descriptors */
1060 for (i = 0; i < DMA_RX_SIZE; i++)
1061 if (priv->extend_desc)
1062 priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1063 priv->use_riwt, priv->mode,
1064 (i == DMA_RX_SIZE - 1));
1065 else
1066 priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1067 priv->use_riwt, priv->mode,
1068 (i == DMA_RX_SIZE - 1));
1072 * stmmac_clear_tx_descriptors - clear tx descriptors
1073 * @priv: driver private structure
1074 * @queue: TX queue index.
1075 * Description: this function is called to clear the TX descriptors
1076 * in case of both basic and extended descriptors are used.
1078 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1080 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1081 int i;
1083 /* Clear the TX descriptors */
1084 for (i = 0; i < DMA_TX_SIZE; i++)
1085 if (priv->extend_desc)
1086 priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1087 priv->mode,
1088 (i == DMA_TX_SIZE - 1));
1089 else
1090 priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1091 priv->mode,
1092 (i == DMA_TX_SIZE - 1));
1096 * stmmac_clear_descriptors - clear descriptors
1097 * @priv: driver private structure
1098 * Description: this function is called to clear the TX and RX descriptors
1099 * in case of both basic and extended descriptors are used.
1101 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1103 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1104 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1105 u32 queue;
1107 /* Clear the RX descriptors */
1108 for (queue = 0; queue < rx_queue_cnt; queue++)
1109 stmmac_clear_rx_descriptors(priv, queue);
1111 /* Clear the TX descriptors */
1112 for (queue = 0; queue < tx_queue_cnt; queue++)
1113 stmmac_clear_tx_descriptors(priv, queue);
1117 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1118 * @priv: driver private structure
1119 * @p: descriptor pointer
1120 * @i: descriptor index
1121 * @flags: gfp flag
1122 * @queue: RX queue index
1123 * Description: this function is called to allocate a receive buffer, perform
1124 * the DMA mapping and init the descriptor.
1126 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1127 int i, gfp_t flags, u32 queue)
1129 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1130 struct sk_buff *skb;
1132 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1133 if (!skb) {
1134 netdev_err(priv->dev,
1135 "%s: Rx init fails; skb is NULL\n", __func__);
1136 return -ENOMEM;
1138 rx_q->rx_skbuff[i] = skb;
1139 rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1140 priv->dma_buf_sz,
1141 DMA_FROM_DEVICE);
1142 if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1143 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1144 dev_kfree_skb_any(skb);
1145 return -EINVAL;
1148 if (priv->synopsys_id >= DWMAC_CORE_4_00)
1149 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1150 else
1151 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1153 if ((priv->hw->mode->init_desc3) &&
1154 (priv->dma_buf_sz == BUF_SIZE_16KiB))
1155 priv->hw->mode->init_desc3(p);
1157 return 0;
1161 * stmmac_free_rx_buffer - free RX dma buffers
1162 * @priv: private structure
1163 * @queue: RX queue index
1164 * @i: buffer index.
1166 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1168 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1170 if (rx_q->rx_skbuff[i]) {
1171 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1172 priv->dma_buf_sz, DMA_FROM_DEVICE);
1173 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1175 rx_q->rx_skbuff[i] = NULL;
1179 * stmmac_free_tx_buffer - free RX dma buffers
1180 * @priv: private structure
1181 * @queue: RX queue index
1182 * @i: buffer index.
1184 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1186 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1188 if (tx_q->tx_skbuff_dma[i].buf) {
1189 if (tx_q->tx_skbuff_dma[i].map_as_page)
1190 dma_unmap_page(priv->device,
1191 tx_q->tx_skbuff_dma[i].buf,
1192 tx_q->tx_skbuff_dma[i].len,
1193 DMA_TO_DEVICE);
1194 else
1195 dma_unmap_single(priv->device,
1196 tx_q->tx_skbuff_dma[i].buf,
1197 tx_q->tx_skbuff_dma[i].len,
1198 DMA_TO_DEVICE);
1201 if (tx_q->tx_skbuff[i]) {
1202 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1203 tx_q->tx_skbuff[i] = NULL;
1204 tx_q->tx_skbuff_dma[i].buf = 0;
1205 tx_q->tx_skbuff_dma[i].map_as_page = false;
1210 * init_dma_rx_desc_rings - init the RX descriptor rings
1211 * @dev: net device structure
1212 * @flags: gfp flag.
1213 * Description: this function initializes the DMA RX descriptors
1214 * and allocates the socket buffers. It supports the chained and ring
1215 * modes.
1217 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1219 struct stmmac_priv *priv = netdev_priv(dev);
1220 u32 rx_count = priv->plat->rx_queues_to_use;
1221 unsigned int bfsize = 0;
1222 int ret = -ENOMEM;
1223 int queue;
1224 int i;
1226 if (priv->hw->mode->set_16kib_bfsize)
1227 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1229 if (bfsize < BUF_SIZE_16KiB)
1230 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1232 priv->dma_buf_sz = bfsize;
1234 /* RX INITIALIZATION */
1235 netif_dbg(priv, probe, priv->dev,
1236 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1238 for (queue = 0; queue < rx_count; queue++) {
1239 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1241 netif_dbg(priv, probe, priv->dev,
1242 "(%s) dma_rx_phy=0x%08x\n", __func__,
1243 (u32)rx_q->dma_rx_phy);
1245 for (i = 0; i < DMA_RX_SIZE; i++) {
1246 struct dma_desc *p;
1248 if (priv->extend_desc)
1249 p = &((rx_q->dma_erx + i)->basic);
1250 else
1251 p = rx_q->dma_rx + i;
1253 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1254 queue);
1255 if (ret)
1256 goto err_init_rx_buffers;
1258 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1259 rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1260 (unsigned int)rx_q->rx_skbuff_dma[i]);
1263 rx_q->cur_rx = 0;
1264 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1266 stmmac_clear_rx_descriptors(priv, queue);
1268 /* Setup the chained descriptor addresses */
1269 if (priv->mode == STMMAC_CHAIN_MODE) {
1270 if (priv->extend_desc)
1271 priv->hw->mode->init(rx_q->dma_erx,
1272 rx_q->dma_rx_phy,
1273 DMA_RX_SIZE, 1);
1274 else
1275 priv->hw->mode->init(rx_q->dma_rx,
1276 rx_q->dma_rx_phy,
1277 DMA_RX_SIZE, 0);
1281 buf_sz = bfsize;
1283 return 0;
1285 err_init_rx_buffers:
1286 while (queue >= 0) {
1287 while (--i >= 0)
1288 stmmac_free_rx_buffer(priv, queue, i);
1290 if (queue == 0)
1291 break;
1293 i = DMA_RX_SIZE;
1294 queue--;
1297 return ret;
1301 * init_dma_tx_desc_rings - init the TX descriptor rings
1302 * @dev: net device structure.
1303 * Description: this function initializes the DMA TX descriptors
1304 * and allocates the socket buffers. It supports the chained and ring
1305 * modes.
1307 static int init_dma_tx_desc_rings(struct net_device *dev)
1309 struct stmmac_priv *priv = netdev_priv(dev);
1310 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1311 u32 queue;
1312 int i;
1314 for (queue = 0; queue < tx_queue_cnt; queue++) {
1315 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1317 netif_dbg(priv, probe, priv->dev,
1318 "(%s) dma_tx_phy=0x%08x\n", __func__,
1319 (u32)tx_q->dma_tx_phy);
1321 /* Setup the chained descriptor addresses */
1322 if (priv->mode == STMMAC_CHAIN_MODE) {
1323 if (priv->extend_desc)
1324 priv->hw->mode->init(tx_q->dma_etx,
1325 tx_q->dma_tx_phy,
1326 DMA_TX_SIZE, 1);
1327 else
1328 priv->hw->mode->init(tx_q->dma_tx,
1329 tx_q->dma_tx_phy,
1330 DMA_TX_SIZE, 0);
1333 for (i = 0; i < DMA_TX_SIZE; i++) {
1334 struct dma_desc *p;
1335 if (priv->extend_desc)
1336 p = &((tx_q->dma_etx + i)->basic);
1337 else
1338 p = tx_q->dma_tx + i;
1340 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1341 p->des0 = 0;
1342 p->des1 = 0;
1343 p->des2 = 0;
1344 p->des3 = 0;
1345 } else {
1346 p->des2 = 0;
1349 tx_q->tx_skbuff_dma[i].buf = 0;
1350 tx_q->tx_skbuff_dma[i].map_as_page = false;
1351 tx_q->tx_skbuff_dma[i].len = 0;
1352 tx_q->tx_skbuff_dma[i].last_segment = false;
1353 tx_q->tx_skbuff[i] = NULL;
1356 tx_q->dirty_tx = 0;
1357 tx_q->cur_tx = 0;
1359 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1362 return 0;
1366 * init_dma_desc_rings - init the RX/TX descriptor rings
1367 * @dev: net device structure
1368 * @flags: gfp flag.
1369 * Description: this function initializes the DMA RX/TX descriptors
1370 * and allocates the socket buffers. It supports the chained and ring
1371 * modes.
1373 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1375 struct stmmac_priv *priv = netdev_priv(dev);
1376 int ret;
1378 ret = init_dma_rx_desc_rings(dev, flags);
1379 if (ret)
1380 return ret;
1382 ret = init_dma_tx_desc_rings(dev);
1384 stmmac_clear_descriptors(priv);
1386 if (netif_msg_hw(priv))
1387 stmmac_display_rings(priv);
1389 return ret;
1393 * dma_free_rx_skbufs - free RX dma buffers
1394 * @priv: private structure
1395 * @queue: RX queue index
1397 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1399 int i;
1401 for (i = 0; i < DMA_RX_SIZE; i++)
1402 stmmac_free_rx_buffer(priv, queue, i);
1406 * dma_free_tx_skbufs - free TX dma buffers
1407 * @priv: private structure
1408 * @queue: TX queue index
1410 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1412 int i;
1414 for (i = 0; i < DMA_TX_SIZE; i++)
1415 stmmac_free_tx_buffer(priv, queue, i);
1419 * free_dma_rx_desc_resources - free RX dma desc resources
1420 * @priv: private structure
1422 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1424 u32 rx_count = priv->plat->rx_queues_to_use;
1425 u32 queue;
1427 /* Free RX queue resources */
1428 for (queue = 0; queue < rx_count; queue++) {
1429 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1431 /* Release the DMA RX socket buffers */
1432 dma_free_rx_skbufs(priv, queue);
1434 /* Free DMA regions of consistent memory previously allocated */
1435 if (!priv->extend_desc)
1436 dma_free_coherent(priv->device,
1437 DMA_RX_SIZE * sizeof(struct dma_desc),
1438 rx_q->dma_rx, rx_q->dma_rx_phy);
1439 else
1440 dma_free_coherent(priv->device, DMA_RX_SIZE *
1441 sizeof(struct dma_extended_desc),
1442 rx_q->dma_erx, rx_q->dma_rx_phy);
1444 kfree(rx_q->rx_skbuff_dma);
1445 kfree(rx_q->rx_skbuff);
1450 * free_dma_tx_desc_resources - free TX dma desc resources
1451 * @priv: private structure
1453 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1455 u32 tx_count = priv->plat->tx_queues_to_use;
1456 u32 queue;
1458 /* Free TX queue resources */
1459 for (queue = 0; queue < tx_count; queue++) {
1460 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1462 /* Release the DMA TX socket buffers */
1463 dma_free_tx_skbufs(priv, queue);
1465 /* Free DMA regions of consistent memory previously allocated */
1466 if (!priv->extend_desc)
1467 dma_free_coherent(priv->device,
1468 DMA_TX_SIZE * sizeof(struct dma_desc),
1469 tx_q->dma_tx, tx_q->dma_tx_phy);
1470 else
1471 dma_free_coherent(priv->device, DMA_TX_SIZE *
1472 sizeof(struct dma_extended_desc),
1473 tx_q->dma_etx, tx_q->dma_tx_phy);
1475 kfree(tx_q->tx_skbuff_dma);
1476 kfree(tx_q->tx_skbuff);
1481 * alloc_dma_rx_desc_resources - alloc RX resources.
1482 * @priv: private structure
1483 * Description: according to which descriptor can be used (extend or basic)
1484 * this function allocates the resources for TX and RX paths. In case of
1485 * reception, for example, it pre-allocated the RX socket buffer in order to
1486 * allow zero-copy mechanism.
1488 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1490 u32 rx_count = priv->plat->rx_queues_to_use;
1491 int ret = -ENOMEM;
1492 u32 queue;
1494 /* RX queues buffers and DMA */
1495 for (queue = 0; queue < rx_count; queue++) {
1496 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1498 rx_q->queue_index = queue;
1499 rx_q->priv_data = priv;
1501 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1502 sizeof(dma_addr_t),
1503 GFP_KERNEL);
1504 if (!rx_q->rx_skbuff_dma)
1505 goto err_dma;
1507 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1508 sizeof(struct sk_buff *),
1509 GFP_KERNEL);
1510 if (!rx_q->rx_skbuff)
1511 goto err_dma;
1513 if (priv->extend_desc) {
1514 rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1515 DMA_RX_SIZE *
1516 sizeof(struct
1517 dma_extended_desc),
1518 &rx_q->dma_rx_phy,
1519 GFP_KERNEL);
1520 if (!rx_q->dma_erx)
1521 goto err_dma;
1523 } else {
1524 rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1525 DMA_RX_SIZE *
1526 sizeof(struct
1527 dma_desc),
1528 &rx_q->dma_rx_phy,
1529 GFP_KERNEL);
1530 if (!rx_q->dma_rx)
1531 goto err_dma;
1535 return 0;
1537 err_dma:
1538 free_dma_rx_desc_resources(priv);
1540 return ret;
1544 * alloc_dma_tx_desc_resources - alloc TX resources.
1545 * @priv: private structure
1546 * Description: according to which descriptor can be used (extend or basic)
1547 * this function allocates the resources for TX and RX paths. In case of
1548 * reception, for example, it pre-allocated the RX socket buffer in order to
1549 * allow zero-copy mechanism.
1551 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1553 u32 tx_count = priv->plat->tx_queues_to_use;
1554 int ret = -ENOMEM;
1555 u32 queue;
1557 /* TX queues buffers and DMA */
1558 for (queue = 0; queue < tx_count; queue++) {
1559 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1561 tx_q->queue_index = queue;
1562 tx_q->priv_data = priv;
1564 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1565 sizeof(*tx_q->tx_skbuff_dma),
1566 GFP_KERNEL);
1567 if (!tx_q->tx_skbuff_dma)
1568 goto err_dma;
1570 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1571 sizeof(struct sk_buff *),
1572 GFP_KERNEL);
1573 if (!tx_q->tx_skbuff)
1574 goto err_dma;
1576 if (priv->extend_desc) {
1577 tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1578 DMA_TX_SIZE *
1579 sizeof(struct
1580 dma_extended_desc),
1581 &tx_q->dma_tx_phy,
1582 GFP_KERNEL);
1583 if (!tx_q->dma_etx)
1584 goto err_dma;
1585 } else {
1586 tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1587 DMA_TX_SIZE *
1588 sizeof(struct
1589 dma_desc),
1590 &tx_q->dma_tx_phy,
1591 GFP_KERNEL);
1592 if (!tx_q->dma_tx)
1593 goto err_dma;
1597 return 0;
1599 err_dma:
1600 free_dma_tx_desc_resources(priv);
1602 return ret;
1606 * alloc_dma_desc_resources - alloc TX/RX resources.
1607 * @priv: private structure
1608 * Description: according to which descriptor can be used (extend or basic)
1609 * this function allocates the resources for TX and RX paths. In case of
1610 * reception, for example, it pre-allocated the RX socket buffer in order to
1611 * allow zero-copy mechanism.
1613 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1615 /* RX Allocation */
1616 int ret = alloc_dma_rx_desc_resources(priv);
1618 if (ret)
1619 return ret;
1621 ret = alloc_dma_tx_desc_resources(priv);
1623 return ret;
1627 * free_dma_desc_resources - free dma desc resources
1628 * @priv: private structure
1630 static void free_dma_desc_resources(struct stmmac_priv *priv)
1632 /* Release the DMA RX socket buffers */
1633 free_dma_rx_desc_resources(priv);
1635 /* Release the DMA TX socket buffers */
1636 free_dma_tx_desc_resources(priv);
1640 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
1641 * @priv: driver private structure
1642 * Description: It is used for enabling the rx queues in the MAC
1644 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1646 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1647 int queue;
1648 u8 mode;
1650 for (queue = 0; queue < rx_queues_count; queue++) {
1651 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1652 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1657 * stmmac_start_rx_dma - start RX DMA channel
1658 * @priv: driver private structure
1659 * @chan: RX channel index
1660 * Description:
1661 * This starts a RX DMA channel
1663 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1665 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1666 priv->hw->dma->start_rx(priv->ioaddr, chan);
1670 * stmmac_start_tx_dma - start TX DMA channel
1671 * @priv: driver private structure
1672 * @chan: TX channel index
1673 * Description:
1674 * This starts a TX DMA channel
1676 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1678 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1679 priv->hw->dma->start_tx(priv->ioaddr, chan);
1683 * stmmac_stop_rx_dma - stop RX DMA channel
1684 * @priv: driver private structure
1685 * @chan: RX channel index
1686 * Description:
1687 * This stops a RX DMA channel
1689 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1691 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1692 priv->hw->dma->stop_rx(priv->ioaddr, chan);
1696 * stmmac_stop_tx_dma - stop TX DMA channel
1697 * @priv: driver private structure
1698 * @chan: TX channel index
1699 * Description:
1700 * This stops a TX DMA channel
1702 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1704 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1705 priv->hw->dma->stop_tx(priv->ioaddr, chan);
1709 * stmmac_start_all_dma - start all RX and TX DMA channels
1710 * @priv: driver private structure
1711 * Description:
1712 * This starts all the RX and TX DMA channels
1714 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1716 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1717 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1718 u32 chan = 0;
1720 for (chan = 0; chan < rx_channels_count; chan++)
1721 stmmac_start_rx_dma(priv, chan);
1723 for (chan = 0; chan < tx_channels_count; chan++)
1724 stmmac_start_tx_dma(priv, chan);
1728 * stmmac_stop_all_dma - stop all RX and TX DMA channels
1729 * @priv: driver private structure
1730 * Description:
1731 * This stops the RX and TX DMA channels
1733 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1735 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1736 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1737 u32 chan = 0;
1739 for (chan = 0; chan < rx_channels_count; chan++)
1740 stmmac_stop_rx_dma(priv, chan);
1742 for (chan = 0; chan < tx_channels_count; chan++)
1743 stmmac_stop_tx_dma(priv, chan);
1747 * stmmac_dma_operation_mode - HW DMA operation mode
1748 * @priv: driver private structure
1749 * Description: it is used for configuring the DMA operation mode register in
1750 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1752 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1754 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1755 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1756 int rxfifosz = priv->plat->rx_fifo_size;
1757 int txfifosz = priv->plat->tx_fifo_size;
1758 u32 txmode = 0;
1759 u32 rxmode = 0;
1760 u32 chan = 0;
1761 u8 qmode = 0;
1763 if (rxfifosz == 0)
1764 rxfifosz = priv->dma_cap.rx_fifo_size;
1765 if (txfifosz == 0)
1766 txfifosz = priv->dma_cap.tx_fifo_size;
1768 /* Adjust for real per queue fifo size */
1769 rxfifosz /= rx_channels_count;
1770 txfifosz /= tx_channels_count;
1772 if (priv->plat->force_thresh_dma_mode) {
1773 txmode = tc;
1774 rxmode = tc;
1775 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1777 * In case of GMAC, SF mode can be enabled
1778 * to perform the TX COE in HW. This depends on:
1779 * 1) TX COE if actually supported
1780 * 2) There is no bugged Jumbo frame support
1781 * that needs to not insert csum in the TDES.
1783 txmode = SF_DMA_MODE;
1784 rxmode = SF_DMA_MODE;
1785 priv->xstats.threshold = SF_DMA_MODE;
1786 } else {
1787 txmode = tc;
1788 rxmode = SF_DMA_MODE;
1791 /* configure all channels */
1792 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1793 for (chan = 0; chan < rx_channels_count; chan++) {
1794 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1796 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1797 rxfifosz, qmode);
1800 for (chan = 0; chan < tx_channels_count; chan++) {
1801 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1803 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1804 txfifosz, qmode);
1806 } else {
1807 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1808 rxfifosz);
1813 * stmmac_tx_clean - to manage the transmission completion
1814 * @priv: driver private structure
1815 * @queue: TX queue index
1816 * Description: it reclaims the transmit resources after transmission completes.
1818 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1820 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1821 unsigned int bytes_compl = 0, pkts_compl = 0;
1822 unsigned int entry;
1824 netif_tx_lock(priv->dev);
1826 priv->xstats.tx_clean++;
1828 entry = tx_q->dirty_tx;
1829 while (entry != tx_q->cur_tx) {
1830 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1831 struct dma_desc *p;
1832 int status;
1834 if (priv->extend_desc)
1835 p = (struct dma_desc *)(tx_q->dma_etx + entry);
1836 else
1837 p = tx_q->dma_tx + entry;
1839 status = priv->hw->desc->tx_status(&priv->dev->stats,
1840 &priv->xstats, p,
1841 priv->ioaddr);
1842 /* Check if the descriptor is owned by the DMA */
1843 if (unlikely(status & tx_dma_own))
1844 break;
1846 /* Just consider the last segment and ...*/
1847 if (likely(!(status & tx_not_ls))) {
1848 /* ... verify the status error condition */
1849 if (unlikely(status & tx_err)) {
1850 priv->dev->stats.tx_errors++;
1851 } else {
1852 priv->dev->stats.tx_packets++;
1853 priv->xstats.tx_pkt_n++;
1855 stmmac_get_tx_hwtstamp(priv, p, skb);
1858 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1859 if (tx_q->tx_skbuff_dma[entry].map_as_page)
1860 dma_unmap_page(priv->device,
1861 tx_q->tx_skbuff_dma[entry].buf,
1862 tx_q->tx_skbuff_dma[entry].len,
1863 DMA_TO_DEVICE);
1864 else
1865 dma_unmap_single(priv->device,
1866 tx_q->tx_skbuff_dma[entry].buf,
1867 tx_q->tx_skbuff_dma[entry].len,
1868 DMA_TO_DEVICE);
1869 tx_q->tx_skbuff_dma[entry].buf = 0;
1870 tx_q->tx_skbuff_dma[entry].len = 0;
1871 tx_q->tx_skbuff_dma[entry].map_as_page = false;
1874 if (priv->hw->mode->clean_desc3)
1875 priv->hw->mode->clean_desc3(tx_q, p);
1877 tx_q->tx_skbuff_dma[entry].last_segment = false;
1878 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1880 if (likely(skb != NULL)) {
1881 pkts_compl++;
1882 bytes_compl += skb->len;
1883 dev_consume_skb_any(skb);
1884 tx_q->tx_skbuff[entry] = NULL;
1887 priv->hw->desc->release_tx_desc(p, priv->mode);
1889 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1891 tx_q->dirty_tx = entry;
1893 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1894 pkts_compl, bytes_compl);
1896 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1897 queue))) &&
1898 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1900 netif_dbg(priv, tx_done, priv->dev,
1901 "%s: restart transmit\n", __func__);
1902 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1905 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1906 stmmac_enable_eee_mode(priv);
1907 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1909 netif_tx_unlock(priv->dev);
1912 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1914 priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1917 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1919 priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1923 * stmmac_tx_err - to manage the tx error
1924 * @priv: driver private structure
1925 * @chan: channel index
1926 * Description: it cleans the descriptors and restarts the transmission
1927 * in case of transmission errors.
1929 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1931 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1932 int i;
1934 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1936 stmmac_stop_tx_dma(priv, chan);
1937 dma_free_tx_skbufs(priv, chan);
1938 for (i = 0; i < DMA_TX_SIZE; i++)
1939 if (priv->extend_desc)
1940 priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1941 priv->mode,
1942 (i == DMA_TX_SIZE - 1));
1943 else
1944 priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1945 priv->mode,
1946 (i == DMA_TX_SIZE - 1));
1947 tx_q->dirty_tx = 0;
1948 tx_q->cur_tx = 0;
1949 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1950 stmmac_start_tx_dma(priv, chan);
1952 priv->dev->stats.tx_errors++;
1953 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1957 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1958 * @priv: driver private structure
1959 * @txmode: TX operating mode
1960 * @rxmode: RX operating mode
1961 * @chan: channel index
1962 * Description: it is used for configuring of the DMA operation mode in
1963 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1964 * mode.
1966 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1967 u32 rxmode, u32 chan)
1969 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1970 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1971 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1972 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1973 int rxfifosz = priv->plat->rx_fifo_size;
1974 int txfifosz = priv->plat->tx_fifo_size;
1976 if (rxfifosz == 0)
1977 rxfifosz = priv->dma_cap.rx_fifo_size;
1978 if (txfifosz == 0)
1979 txfifosz = priv->dma_cap.tx_fifo_size;
1981 /* Adjust for real per queue fifo size */
1982 rxfifosz /= rx_channels_count;
1983 txfifosz /= tx_channels_count;
1985 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1986 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1987 rxfifosz, rxqmode);
1988 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1989 txfifosz, txqmode);
1990 } else {
1991 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1992 rxfifosz);
1997 * stmmac_dma_interrupt - DMA ISR
1998 * @priv: driver private structure
1999 * Description: this is the DMA ISR. It is called by the main ISR.
2000 * It calls the dwmac dma routine and schedule poll method in case of some
2001 * work can be done.
2003 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2005 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2006 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2007 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2008 tx_channel_count : rx_channel_count;
2009 u32 chan;
2010 bool poll_scheduled = false;
2011 int status[channels_to_check];
2013 /* Each DMA channel can be used for rx and tx simultaneously, yet
2014 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2015 * stmmac_channel struct.
2016 * Because of this, stmmac_poll currently checks (and possibly wakes)
2017 * all tx queues rather than just a single tx queue.
2019 for (chan = 0; chan < channels_to_check; chan++)
2020 status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr,
2021 &priv->xstats,
2022 chan);
2024 for (chan = 0; chan < rx_channel_count; chan++) {
2025 if (likely(status[chan] & handle_rx)) {
2026 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2028 if (likely(napi_schedule_prep(&rx_q->napi))) {
2029 stmmac_disable_dma_irq(priv, chan);
2030 __napi_schedule(&rx_q->napi);
2031 poll_scheduled = true;
2036 /* If we scheduled poll, we already know that tx queues will be checked.
2037 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2038 * completed transmission, if so, call stmmac_poll (once).
2040 if (!poll_scheduled) {
2041 for (chan = 0; chan < tx_channel_count; chan++) {
2042 if (status[chan] & handle_tx) {
2043 /* It doesn't matter what rx queue we choose
2044 * here. We use 0 since it always exists.
2046 struct stmmac_rx_queue *rx_q =
2047 &priv->rx_queue[0];
2049 if (likely(napi_schedule_prep(&rx_q->napi))) {
2050 stmmac_disable_dma_irq(priv, chan);
2051 __napi_schedule(&rx_q->napi);
2053 break;
2058 for (chan = 0; chan < tx_channel_count; chan++) {
2059 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2060 /* Try to bump up the dma threshold on this failure */
2061 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2062 (tc <= 256)) {
2063 tc += 64;
2064 if (priv->plat->force_thresh_dma_mode)
2065 stmmac_set_dma_operation_mode(priv,
2068 chan);
2069 else
2070 stmmac_set_dma_operation_mode(priv,
2072 SF_DMA_MODE,
2073 chan);
2074 priv->xstats.threshold = tc;
2076 } else if (unlikely(status[chan] == tx_hard_error)) {
2077 stmmac_tx_err(priv, chan);
2083 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2084 * @priv: driver private structure
2085 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2087 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2089 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2090 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2092 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2093 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2094 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2095 } else {
2096 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2097 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2100 dwmac_mmc_intr_all_mask(priv->mmcaddr);
2102 if (priv->dma_cap.rmon) {
2103 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2104 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2105 } else
2106 netdev_info(priv->dev, "No MAC Management Counters available\n");
2110 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2111 * @priv: driver private structure
2112 * Description: select the Enhanced/Alternate or Normal descriptors.
2113 * In case of Enhanced/Alternate, it checks if the extended descriptors are
2114 * supported by the HW capability register.
2116 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2118 if (priv->plat->enh_desc) {
2119 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2121 /* GMAC older than 3.50 has no extended descriptors */
2122 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2123 dev_info(priv->device, "Enabled extended descriptors\n");
2124 priv->extend_desc = 1;
2125 } else
2126 dev_warn(priv->device, "Extended descriptors not supported\n");
2128 priv->hw->desc = &enh_desc_ops;
2129 } else {
2130 dev_info(priv->device, "Normal descriptors\n");
2131 priv->hw->desc = &ndesc_ops;
2136 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2137 * @priv: driver private structure
2138 * Description:
2139 * new GMAC chip generations have a new register to indicate the
2140 * presence of the optional feature/functions.
2141 * This can be also used to override the value passed through the
2142 * platform and necessary for old MAC10/100 and GMAC chips.
2144 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2146 u32 ret = 0;
2148 if (priv->hw->dma->get_hw_feature) {
2149 priv->hw->dma->get_hw_feature(priv->ioaddr,
2150 &priv->dma_cap);
2151 ret = 1;
2154 return ret;
2158 * stmmac_check_ether_addr - check if the MAC addr is valid
2159 * @priv: driver private structure
2160 * Description:
2161 * it is to verify if the MAC address is valid, in case of failures it
2162 * generates a random MAC address
2164 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2166 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2167 priv->hw->mac->get_umac_addr(priv->hw,
2168 priv->dev->dev_addr, 0);
2169 if (!is_valid_ether_addr(priv->dev->dev_addr))
2170 eth_hw_addr_random(priv->dev);
2171 netdev_info(priv->dev, "device MAC address %pM\n",
2172 priv->dev->dev_addr);
2177 * stmmac_init_dma_engine - DMA init.
2178 * @priv: driver private structure
2179 * Description:
2180 * It inits the DMA invoking the specific MAC/GMAC callback.
2181 * Some DMA parameters can be passed from the platform;
2182 * in case of these are not passed a default is kept for the MAC or GMAC.
2184 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2186 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2187 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2188 struct stmmac_rx_queue *rx_q;
2189 struct stmmac_tx_queue *tx_q;
2190 u32 dummy_dma_rx_phy = 0;
2191 u32 dummy_dma_tx_phy = 0;
2192 u32 chan = 0;
2193 int atds = 0;
2194 int ret = 0;
2196 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2197 dev_err(priv->device, "Invalid DMA configuration\n");
2198 return -EINVAL;
2201 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2202 atds = 1;
2204 ret = priv->hw->dma->reset(priv->ioaddr);
2205 if (ret) {
2206 dev_err(priv->device, "Failed to reset the dma\n");
2207 return ret;
2210 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2211 /* DMA Configuration */
2212 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2213 dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2215 /* DMA RX Channel Configuration */
2216 for (chan = 0; chan < rx_channels_count; chan++) {
2217 rx_q = &priv->rx_queue[chan];
2219 priv->hw->dma->init_rx_chan(priv->ioaddr,
2220 priv->plat->dma_cfg,
2221 rx_q->dma_rx_phy, chan);
2223 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2224 (DMA_RX_SIZE * sizeof(struct dma_desc));
2225 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2226 rx_q->rx_tail_addr,
2227 chan);
2230 /* DMA TX Channel Configuration */
2231 for (chan = 0; chan < tx_channels_count; chan++) {
2232 tx_q = &priv->tx_queue[chan];
2234 priv->hw->dma->init_chan(priv->ioaddr,
2235 priv->plat->dma_cfg,
2236 chan);
2238 priv->hw->dma->init_tx_chan(priv->ioaddr,
2239 priv->plat->dma_cfg,
2240 tx_q->dma_tx_phy, chan);
2242 tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2243 (DMA_TX_SIZE * sizeof(struct dma_desc));
2244 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2245 tx_q->tx_tail_addr,
2246 chan);
2248 } else {
2249 rx_q = &priv->rx_queue[chan];
2250 tx_q = &priv->tx_queue[chan];
2251 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2252 tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2255 if (priv->plat->axi && priv->hw->dma->axi)
2256 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2258 return ret;
2262 * stmmac_tx_timer - mitigation sw timer for tx.
2263 * @data: data pointer
2264 * Description:
2265 * This is the timer handler to directly invoke the stmmac_tx_clean.
2267 static void stmmac_tx_timer(struct timer_list *t)
2269 struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2270 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2271 u32 queue;
2273 /* let's scan all the tx queues */
2274 for (queue = 0; queue < tx_queues_count; queue++)
2275 stmmac_tx_clean(priv, queue);
2279 * stmmac_init_tx_coalesce - init tx mitigation options.
2280 * @priv: driver private structure
2281 * Description:
2282 * This inits the transmit coalesce parameters: i.e. timer rate,
2283 * timer handler and default threshold used for enabling the
2284 * interrupt on completion bit.
2286 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2288 priv->tx_coal_frames = STMMAC_TX_FRAMES;
2289 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2290 timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2291 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2292 add_timer(&priv->txtimer);
2295 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2297 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2298 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2299 u32 chan;
2301 /* set TX ring length */
2302 if (priv->hw->dma->set_tx_ring_len) {
2303 for (chan = 0; chan < tx_channels_count; chan++)
2304 priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2305 (DMA_TX_SIZE - 1), chan);
2308 /* set RX ring length */
2309 if (priv->hw->dma->set_rx_ring_len) {
2310 for (chan = 0; chan < rx_channels_count; chan++)
2311 priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2312 (DMA_RX_SIZE - 1), chan);
2317 * stmmac_set_tx_queue_weight - Set TX queue weight
2318 * @priv: driver private structure
2319 * Description: It is used for setting TX queues weight
2321 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2323 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2324 u32 weight;
2325 u32 queue;
2327 for (queue = 0; queue < tx_queues_count; queue++) {
2328 weight = priv->plat->tx_queues_cfg[queue].weight;
2329 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2334 * stmmac_configure_cbs - Configure CBS in TX queue
2335 * @priv: driver private structure
2336 * Description: It is used for configuring CBS in AVB TX queues
2338 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2340 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2341 u32 mode_to_use;
2342 u32 queue;
2344 /* queue 0 is reserved for legacy traffic */
2345 for (queue = 1; queue < tx_queues_count; queue++) {
2346 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2347 if (mode_to_use == MTL_QUEUE_DCB)
2348 continue;
2350 priv->hw->mac->config_cbs(priv->hw,
2351 priv->plat->tx_queues_cfg[queue].send_slope,
2352 priv->plat->tx_queues_cfg[queue].idle_slope,
2353 priv->plat->tx_queues_cfg[queue].high_credit,
2354 priv->plat->tx_queues_cfg[queue].low_credit,
2355 queue);
2360 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2361 * @priv: driver private structure
2362 * Description: It is used for mapping RX queues to RX dma channels
2364 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2366 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2367 u32 queue;
2368 u32 chan;
2370 for (queue = 0; queue < rx_queues_count; queue++) {
2371 chan = priv->plat->rx_queues_cfg[queue].chan;
2372 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2377 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2378 * @priv: driver private structure
2379 * Description: It is used for configuring the RX Queue Priority
2381 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2383 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2384 u32 queue;
2385 u32 prio;
2387 for (queue = 0; queue < rx_queues_count; queue++) {
2388 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2389 continue;
2391 prio = priv->plat->rx_queues_cfg[queue].prio;
2392 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2397 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2398 * @priv: driver private structure
2399 * Description: It is used for configuring the TX Queue Priority
2401 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2403 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2404 u32 queue;
2405 u32 prio;
2407 for (queue = 0; queue < tx_queues_count; queue++) {
2408 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2409 continue;
2411 prio = priv->plat->tx_queues_cfg[queue].prio;
2412 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2417 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2418 * @priv: driver private structure
2419 * Description: It is used for configuring the RX queue routing
2421 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2423 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2424 u32 queue;
2425 u8 packet;
2427 for (queue = 0; queue < rx_queues_count; queue++) {
2428 /* no specific packet type routing specified for the queue */
2429 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2430 continue;
2432 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2433 priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2438 * stmmac_mtl_configuration - Configure MTL
2439 * @priv: driver private structure
2440 * Description: It is used for configurring MTL
2442 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2444 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2445 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2447 if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2448 stmmac_set_tx_queue_weight(priv);
2450 /* Configure MTL RX algorithms */
2451 if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2452 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2453 priv->plat->rx_sched_algorithm);
2455 /* Configure MTL TX algorithms */
2456 if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2457 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2458 priv->plat->tx_sched_algorithm);
2460 /* Configure CBS in AVB TX queues */
2461 if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2462 stmmac_configure_cbs(priv);
2464 /* Map RX MTL to DMA channels */
2465 if (priv->hw->mac->map_mtl_to_dma)
2466 stmmac_rx_queue_dma_chan_map(priv);
2468 /* Enable MAC RX Queues */
2469 if (priv->hw->mac->rx_queue_enable)
2470 stmmac_mac_enable_rx_queues(priv);
2472 /* Set RX priorities */
2473 if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2474 stmmac_mac_config_rx_queues_prio(priv);
2476 /* Set TX priorities */
2477 if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2478 stmmac_mac_config_tx_queues_prio(priv);
2480 /* Set RX routing */
2481 if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2482 stmmac_mac_config_rx_queues_routing(priv);
2486 * stmmac_hw_setup - setup mac in a usable state.
2487 * @dev : pointer to the device structure.
2488 * Description:
2489 * this is the main function to setup the HW in a usable state because the
2490 * dma engine is reset, the core registers are configured (e.g. AXI,
2491 * Checksum features, timers). The DMA is ready to start receiving and
2492 * transmitting.
2493 * Return value:
2494 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2495 * file on failure.
2497 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2499 struct stmmac_priv *priv = netdev_priv(dev);
2500 u32 rx_cnt = priv->plat->rx_queues_to_use;
2501 u32 tx_cnt = priv->plat->tx_queues_to_use;
2502 u32 chan;
2503 int ret;
2505 /* DMA initialization and SW reset */
2506 ret = stmmac_init_dma_engine(priv);
2507 if (ret < 0) {
2508 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2509 __func__);
2510 return ret;
2513 /* Copy the MAC addr into the HW */
2514 priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2516 /* PS and related bits will be programmed according to the speed */
2517 if (priv->hw->pcs) {
2518 int speed = priv->plat->mac_port_sel_speed;
2520 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2521 (speed == SPEED_1000)) {
2522 priv->hw->ps = speed;
2523 } else {
2524 dev_warn(priv->device, "invalid port speed\n");
2525 priv->hw->ps = 0;
2529 /* Initialize the MAC Core */
2530 priv->hw->mac->core_init(priv->hw, dev);
2532 /* Initialize MTL*/
2533 if (priv->synopsys_id >= DWMAC_CORE_4_00)
2534 stmmac_mtl_configuration(priv);
2536 ret = priv->hw->mac->rx_ipc(priv->hw);
2537 if (!ret) {
2538 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2539 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2540 priv->hw->rx_csum = 0;
2543 /* Enable the MAC Rx/Tx */
2544 priv->hw->mac->set_mac(priv->ioaddr, true);
2546 /* Set the HW DMA mode and the COE */
2547 stmmac_dma_operation_mode(priv);
2549 stmmac_mmc_setup(priv);
2551 if (init_ptp) {
2552 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2553 if (ret < 0)
2554 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2556 ret = stmmac_init_ptp(priv);
2557 if (ret == -EOPNOTSUPP)
2558 netdev_warn(priv->dev, "PTP not supported by HW\n");
2559 else if (ret)
2560 netdev_warn(priv->dev, "PTP init failed\n");
2563 #ifdef CONFIG_DEBUG_FS
2564 ret = stmmac_init_fs(dev);
2565 if (ret < 0)
2566 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2567 __func__);
2568 #endif
2569 /* Start the ball rolling... */
2570 stmmac_start_all_dma(priv);
2572 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2574 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2575 priv->rx_riwt = MAX_DMA_RIWT;
2576 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2579 if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2580 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2582 /* set TX and RX rings length */
2583 stmmac_set_rings_length(priv);
2585 /* Enable TSO */
2586 if (priv->tso) {
2587 for (chan = 0; chan < tx_cnt; chan++)
2588 priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2591 return 0;
2594 static void stmmac_hw_teardown(struct net_device *dev)
2596 struct stmmac_priv *priv = netdev_priv(dev);
2598 clk_disable_unprepare(priv->plat->clk_ptp_ref);
2602 * stmmac_open - open entry point of the driver
2603 * @dev : pointer to the device structure.
2604 * Description:
2605 * This function is the open entry point of the driver.
2606 * Return value:
2607 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2608 * file on failure.
2610 static int stmmac_open(struct net_device *dev)
2612 struct stmmac_priv *priv = netdev_priv(dev);
2613 int ret;
2615 stmmac_check_ether_addr(priv);
2617 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2618 priv->hw->pcs != STMMAC_PCS_TBI &&
2619 priv->hw->pcs != STMMAC_PCS_RTBI) {
2620 ret = stmmac_init_phy(dev);
2621 if (ret) {
2622 netdev_err(priv->dev,
2623 "%s: Cannot attach to PHY (error: %d)\n",
2624 __func__, ret);
2625 return ret;
2629 /* Extra statistics */
2630 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2631 priv->xstats.threshold = tc;
2633 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2634 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2635 priv->mss = 0;
2637 ret = alloc_dma_desc_resources(priv);
2638 if (ret < 0) {
2639 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2640 __func__);
2641 goto dma_desc_error;
2644 ret = init_dma_desc_rings(dev, GFP_KERNEL);
2645 if (ret < 0) {
2646 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2647 __func__);
2648 goto init_error;
2651 ret = stmmac_hw_setup(dev, true);
2652 if (ret < 0) {
2653 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2654 goto init_error;
2657 stmmac_init_tx_coalesce(priv);
2659 if (dev->phydev)
2660 phy_start(dev->phydev);
2662 /* Request the IRQ lines */
2663 ret = request_irq(dev->irq, stmmac_interrupt,
2664 IRQF_SHARED, dev->name, dev);
2665 if (unlikely(ret < 0)) {
2666 netdev_err(priv->dev,
2667 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2668 __func__, dev->irq, ret);
2669 goto irq_error;
2672 /* Request the Wake IRQ in case of another line is used for WoL */
2673 if (priv->wol_irq != dev->irq) {
2674 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2675 IRQF_SHARED, dev->name, dev);
2676 if (unlikely(ret < 0)) {
2677 netdev_err(priv->dev,
2678 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2679 __func__, priv->wol_irq, ret);
2680 goto wolirq_error;
2684 /* Request the IRQ lines */
2685 if (priv->lpi_irq > 0) {
2686 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2687 dev->name, dev);
2688 if (unlikely(ret < 0)) {
2689 netdev_err(priv->dev,
2690 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2691 __func__, priv->lpi_irq, ret);
2692 goto lpiirq_error;
2696 stmmac_enable_all_queues(priv);
2697 stmmac_start_all_queues(priv);
2699 return 0;
2701 lpiirq_error:
2702 if (priv->wol_irq != dev->irq)
2703 free_irq(priv->wol_irq, dev);
2704 wolirq_error:
2705 free_irq(dev->irq, dev);
2706 irq_error:
2707 if (dev->phydev)
2708 phy_stop(dev->phydev);
2710 del_timer_sync(&priv->txtimer);
2711 stmmac_hw_teardown(dev);
2712 init_error:
2713 free_dma_desc_resources(priv);
2714 dma_desc_error:
2715 if (dev->phydev)
2716 phy_disconnect(dev->phydev);
2718 return ret;
2722 * stmmac_release - close entry point of the driver
2723 * @dev : device pointer.
2724 * Description:
2725 * This is the stop entry point of the driver.
2727 static int stmmac_release(struct net_device *dev)
2729 struct stmmac_priv *priv = netdev_priv(dev);
2731 if (priv->eee_enabled)
2732 del_timer_sync(&priv->eee_ctrl_timer);
2734 /* Stop and disconnect the PHY */
2735 if (dev->phydev) {
2736 phy_stop(dev->phydev);
2737 phy_disconnect(dev->phydev);
2740 stmmac_stop_all_queues(priv);
2742 stmmac_disable_all_queues(priv);
2744 del_timer_sync(&priv->txtimer);
2746 /* Free the IRQ lines */
2747 free_irq(dev->irq, dev);
2748 if (priv->wol_irq != dev->irq)
2749 free_irq(priv->wol_irq, dev);
2750 if (priv->lpi_irq > 0)
2751 free_irq(priv->lpi_irq, dev);
2753 /* Stop TX/RX DMA and clear the descriptors */
2754 stmmac_stop_all_dma(priv);
2756 /* Release and free the Rx/Tx resources */
2757 free_dma_desc_resources(priv);
2759 /* Disable the MAC Rx/Tx */
2760 priv->hw->mac->set_mac(priv->ioaddr, false);
2762 netif_carrier_off(dev);
2764 #ifdef CONFIG_DEBUG_FS
2765 stmmac_exit_fs(dev);
2766 #endif
2768 stmmac_release_ptp(priv);
2770 return 0;
2774 * stmmac_tso_allocator - close entry point of the driver
2775 * @priv: driver private structure
2776 * @des: buffer start address
2777 * @total_len: total length to fill in descriptors
2778 * @last_segmant: condition for the last descriptor
2779 * @queue: TX queue index
2780 * Description:
2781 * This function fills descriptor and request new descriptors according to
2782 * buffer length to fill
2784 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2785 int total_len, bool last_segment, u32 queue)
2787 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2788 struct dma_desc *desc;
2789 u32 buff_size;
2790 int tmp_len;
2792 tmp_len = total_len;
2794 while (tmp_len > 0) {
2795 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2796 desc = tx_q->dma_tx + tx_q->cur_tx;
2798 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2799 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2800 TSO_MAX_BUFF_SIZE : tmp_len;
2802 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2803 0, 1,
2804 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2805 0, 0);
2807 tmp_len -= TSO_MAX_BUFF_SIZE;
2812 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2813 * @skb : the socket buffer
2814 * @dev : device pointer
2815 * Description: this is the transmit function that is called on TSO frames
2816 * (support available on GMAC4 and newer chips).
2817 * Diagram below show the ring programming in case of TSO frames:
2819 * First Descriptor
2820 * --------
2821 * | DES0 |---> buffer1 = L2/L3/L4 header
2822 * | DES1 |---> TCP Payload (can continue on next descr...)
2823 * | DES2 |---> buffer 1 and 2 len
2824 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2825 * --------
2827 * ...
2829 * --------
2830 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
2831 * | DES1 | --|
2832 * | DES2 | --> buffer 1 and 2 len
2833 * | DES3 |
2834 * --------
2836 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2838 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2840 struct dma_desc *desc, *first, *mss_desc = NULL;
2841 struct stmmac_priv *priv = netdev_priv(dev);
2842 int nfrags = skb_shinfo(skb)->nr_frags;
2843 u32 queue = skb_get_queue_mapping(skb);
2844 unsigned int first_entry, des;
2845 struct stmmac_tx_queue *tx_q;
2846 int tmp_pay_len = 0;
2847 u32 pay_len, mss;
2848 u8 proto_hdr_len;
2849 int i;
2851 tx_q = &priv->tx_queue[queue];
2853 /* Compute header lengths */
2854 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2856 /* Desc availability based on threshold should be enough safe */
2857 if (unlikely(stmmac_tx_avail(priv, queue) <
2858 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2859 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2860 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2861 queue));
2862 /* This is a hard error, log it. */
2863 netdev_err(priv->dev,
2864 "%s: Tx Ring full when queue awake\n",
2865 __func__);
2867 return NETDEV_TX_BUSY;
2870 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2872 mss = skb_shinfo(skb)->gso_size;
2874 /* set new MSS value if needed */
2875 if (mss != priv->mss) {
2876 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2877 priv->hw->desc->set_mss(mss_desc, mss);
2878 priv->mss = mss;
2879 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2882 if (netif_msg_tx_queued(priv)) {
2883 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2884 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2885 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2886 skb->data_len);
2889 first_entry = tx_q->cur_tx;
2891 desc = tx_q->dma_tx + first_entry;
2892 first = desc;
2894 /* first descriptor: fill Headers on Buf1 */
2895 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2896 DMA_TO_DEVICE);
2897 if (dma_mapping_error(priv->device, des))
2898 goto dma_map_err;
2900 tx_q->tx_skbuff_dma[first_entry].buf = des;
2901 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2903 first->des0 = cpu_to_le32(des);
2905 /* Fill start of payload in buff2 of first descriptor */
2906 if (pay_len)
2907 first->des1 = cpu_to_le32(des + proto_hdr_len);
2909 /* If needed take extra descriptors to fill the remaining payload */
2910 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2912 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2914 /* Prepare fragments */
2915 for (i = 0; i < nfrags; i++) {
2916 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2918 des = skb_frag_dma_map(priv->device, frag, 0,
2919 skb_frag_size(frag),
2920 DMA_TO_DEVICE);
2921 if (dma_mapping_error(priv->device, des))
2922 goto dma_map_err;
2924 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2925 (i == nfrags - 1), queue);
2927 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2928 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2929 tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2930 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2933 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2935 /* Only the last descriptor gets to point to the skb. */
2936 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2938 /* We've used all descriptors we need for this skb, however,
2939 * advance cur_tx so that it references a fresh descriptor.
2940 * ndo_start_xmit will fill this descriptor the next time it's
2941 * called and stmmac_tx_clean may clean up to this descriptor.
2943 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2945 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2946 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2947 __func__);
2948 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2951 dev->stats.tx_bytes += skb->len;
2952 priv->xstats.tx_tso_frames++;
2953 priv->xstats.tx_tso_nfrags += nfrags;
2955 /* Manage tx mitigation */
2956 priv->tx_count_frames += nfrags + 1;
2957 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2958 mod_timer(&priv->txtimer,
2959 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2960 } else {
2961 priv->tx_count_frames = 0;
2962 priv->hw->desc->set_tx_ic(desc);
2963 priv->xstats.tx_set_ic_bit++;
2966 skb_tx_timestamp(skb);
2968 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2969 priv->hwts_tx_en)) {
2970 /* declare that device is doing timestamping */
2971 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2972 priv->hw->desc->enable_tx_timestamp(first);
2975 /* Complete the first descriptor before granting the DMA */
2976 priv->hw->desc->prepare_tso_tx_desc(first, 1,
2977 proto_hdr_len,
2978 pay_len,
2979 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2980 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2982 /* If context desc is used to change MSS */
2983 if (mss_desc)
2984 priv->hw->desc->set_tx_owner(mss_desc);
2986 /* The own bit must be the latest setting done when prepare the
2987 * descriptor and then barrier is needed to make sure that
2988 * all is coherent before granting the DMA engine.
2990 dma_wmb();
2992 if (netif_msg_pktdata(priv)) {
2993 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2994 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2995 tx_q->cur_tx, first, nfrags);
2997 priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
3000 pr_info(">>> frame to be transmitted: ");
3001 print_pkt(skb->data, skb_headlen(skb));
3004 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3006 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3007 queue);
3009 return NETDEV_TX_OK;
3011 dma_map_err:
3012 dev_err(priv->device, "Tx dma map failed\n");
3013 dev_kfree_skb(skb);
3014 priv->dev->stats.tx_dropped++;
3015 return NETDEV_TX_OK;
3019 * stmmac_xmit - Tx entry point of the driver
3020 * @skb : the socket buffer
3021 * @dev : device pointer
3022 * Description : this is the tx entry point of the driver.
3023 * It programs the chain or the ring and supports oversized frames
3024 * and SG feature.
3026 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3028 struct stmmac_priv *priv = netdev_priv(dev);
3029 unsigned int nopaged_len = skb_headlen(skb);
3030 int i, csum_insertion = 0, is_jumbo = 0;
3031 u32 queue = skb_get_queue_mapping(skb);
3032 int nfrags = skb_shinfo(skb)->nr_frags;
3033 int entry;
3034 unsigned int first_entry;
3035 struct dma_desc *desc, *first;
3036 struct stmmac_tx_queue *tx_q;
3037 unsigned int enh_desc;
3038 unsigned int des;
3040 tx_q = &priv->tx_queue[queue];
3042 /* Manage oversized TCP frames for GMAC4 device */
3043 if (skb_is_gso(skb) && priv->tso) {
3044 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3045 return stmmac_tso_xmit(skb, dev);
3048 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3049 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3050 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3051 queue));
3052 /* This is a hard error, log it. */
3053 netdev_err(priv->dev,
3054 "%s: Tx Ring full when queue awake\n",
3055 __func__);
3057 return NETDEV_TX_BUSY;
3060 if (priv->tx_path_in_lpi_mode)
3061 stmmac_disable_eee_mode(priv);
3063 entry = tx_q->cur_tx;
3064 first_entry = entry;
3066 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3068 if (likely(priv->extend_desc))
3069 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3070 else
3071 desc = tx_q->dma_tx + entry;
3073 first = desc;
3075 enh_desc = priv->plat->enh_desc;
3076 /* To program the descriptors according to the size of the frame */
3077 if (enh_desc)
3078 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3080 if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3081 DWMAC_CORE_4_00)) {
3082 entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3083 if (unlikely(entry < 0))
3084 goto dma_map_err;
3087 for (i = 0; i < nfrags; i++) {
3088 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3089 int len = skb_frag_size(frag);
3090 bool last_segment = (i == (nfrags - 1));
3092 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3094 if (likely(priv->extend_desc))
3095 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3096 else
3097 desc = tx_q->dma_tx + entry;
3099 des = skb_frag_dma_map(priv->device, frag, 0, len,
3100 DMA_TO_DEVICE);
3101 if (dma_mapping_error(priv->device, des))
3102 goto dma_map_err; /* should reuse desc w/o issues */
3104 tx_q->tx_skbuff[entry] = NULL;
3106 tx_q->tx_skbuff_dma[entry].buf = des;
3107 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3108 desc->des0 = cpu_to_le32(des);
3109 else
3110 desc->des2 = cpu_to_le32(des);
3112 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3113 tx_q->tx_skbuff_dma[entry].len = len;
3114 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3116 /* Prepare the descriptor and set the own bit too */
3117 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3118 priv->mode, 1, last_segment,
3119 skb->len);
3122 /* Only the last descriptor gets to point to the skb. */
3123 tx_q->tx_skbuff[entry] = skb;
3125 /* We've used all descriptors we need for this skb, however,
3126 * advance cur_tx so that it references a fresh descriptor.
3127 * ndo_start_xmit will fill this descriptor the next time it's
3128 * called and stmmac_tx_clean may clean up to this descriptor.
3130 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3131 tx_q->cur_tx = entry;
3133 if (netif_msg_pktdata(priv)) {
3134 void *tx_head;
3136 netdev_dbg(priv->dev,
3137 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3138 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3139 entry, first, nfrags);
3141 if (priv->extend_desc)
3142 tx_head = (void *)tx_q->dma_etx;
3143 else
3144 tx_head = (void *)tx_q->dma_tx;
3146 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3148 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3149 print_pkt(skb->data, skb->len);
3152 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3153 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3154 __func__);
3155 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3158 dev->stats.tx_bytes += skb->len;
3160 /* According to the coalesce parameter the IC bit for the latest
3161 * segment is reset and the timer re-started to clean the tx status.
3162 * This approach takes care about the fragments: desc is the first
3163 * element in case of no SG.
3165 priv->tx_count_frames += nfrags + 1;
3166 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3167 mod_timer(&priv->txtimer,
3168 STMMAC_COAL_TIMER(priv->tx_coal_timer));
3169 } else {
3170 priv->tx_count_frames = 0;
3171 priv->hw->desc->set_tx_ic(desc);
3172 priv->xstats.tx_set_ic_bit++;
3175 skb_tx_timestamp(skb);
3177 /* Ready to fill the first descriptor and set the OWN bit w/o any
3178 * problems because all the descriptors are actually ready to be
3179 * passed to the DMA engine.
3181 if (likely(!is_jumbo)) {
3182 bool last_segment = (nfrags == 0);
3184 des = dma_map_single(priv->device, skb->data,
3185 nopaged_len, DMA_TO_DEVICE);
3186 if (dma_mapping_error(priv->device, des))
3187 goto dma_map_err;
3189 tx_q->tx_skbuff_dma[first_entry].buf = des;
3190 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3191 first->des0 = cpu_to_le32(des);
3192 else
3193 first->des2 = cpu_to_le32(des);
3195 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3196 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3198 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3199 priv->hwts_tx_en)) {
3200 /* declare that device is doing timestamping */
3201 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3202 priv->hw->desc->enable_tx_timestamp(first);
3205 /* Prepare the first descriptor setting the OWN bit too */
3206 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3207 csum_insertion, priv->mode, 1,
3208 last_segment, skb->len);
3210 /* The own bit must be the latest setting done when prepare the
3211 * descriptor and then barrier is needed to make sure that
3212 * all is coherent before granting the DMA engine.
3214 dma_wmb();
3217 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3219 if (priv->synopsys_id < DWMAC_CORE_4_00)
3220 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3221 else
3222 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3223 queue);
3225 return NETDEV_TX_OK;
3227 dma_map_err:
3228 netdev_err(priv->dev, "Tx DMA map failed\n");
3229 dev_kfree_skb(skb);
3230 priv->dev->stats.tx_dropped++;
3231 return NETDEV_TX_OK;
3234 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3236 struct ethhdr *ehdr;
3237 u16 vlanid;
3239 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3240 NETIF_F_HW_VLAN_CTAG_RX &&
3241 !__vlan_get_tag(skb, &vlanid)) {
3242 /* pop the vlan tag */
3243 ehdr = (struct ethhdr *)skb->data;
3244 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3245 skb_pull(skb, VLAN_HLEN);
3246 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3251 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3253 if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3254 return 0;
3256 return 1;
3260 * stmmac_rx_refill - refill used skb preallocated buffers
3261 * @priv: driver private structure
3262 * @queue: RX queue index
3263 * Description : this is to reallocate the skb for the reception process
3264 * that is based on zero-copy.
3266 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3268 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3269 int dirty = stmmac_rx_dirty(priv, queue);
3270 unsigned int entry = rx_q->dirty_rx;
3272 int bfsize = priv->dma_buf_sz;
3274 while (dirty-- > 0) {
3275 struct dma_desc *p;
3277 if (priv->extend_desc)
3278 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3279 else
3280 p = rx_q->dma_rx + entry;
3282 if (likely(!rx_q->rx_skbuff[entry])) {
3283 struct sk_buff *skb;
3285 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3286 if (unlikely(!skb)) {
3287 /* so for a while no zero-copy! */
3288 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3289 if (unlikely(net_ratelimit()))
3290 dev_err(priv->device,
3291 "fail to alloc skb entry %d\n",
3292 entry);
3293 break;
3296 rx_q->rx_skbuff[entry] = skb;
3297 rx_q->rx_skbuff_dma[entry] =
3298 dma_map_single(priv->device, skb->data, bfsize,
3299 DMA_FROM_DEVICE);
3300 if (dma_mapping_error(priv->device,
3301 rx_q->rx_skbuff_dma[entry])) {
3302 netdev_err(priv->dev, "Rx DMA map failed\n");
3303 dev_kfree_skb(skb);
3304 break;
3307 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3308 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3309 p->des1 = 0;
3310 } else {
3311 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3313 if (priv->hw->mode->refill_desc3)
3314 priv->hw->mode->refill_desc3(rx_q, p);
3316 if (rx_q->rx_zeroc_thresh > 0)
3317 rx_q->rx_zeroc_thresh--;
3319 netif_dbg(priv, rx_status, priv->dev,
3320 "refill entry #%d\n", entry);
3322 dma_wmb();
3324 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3325 priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3326 else
3327 priv->hw->desc->set_rx_owner(p);
3329 dma_wmb();
3331 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3333 rx_q->dirty_rx = entry;
3337 * stmmac_rx - manage the receive process
3338 * @priv: driver private structure
3339 * @limit: napi bugget
3340 * @queue: RX queue index.
3341 * Description : this the function called by the napi poll method.
3342 * It gets all the frames inside the ring.
3344 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3346 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3347 unsigned int entry = rx_q->cur_rx;
3348 int coe = priv->hw->rx_csum;
3349 unsigned int next_entry;
3350 unsigned int count = 0;
3352 if (netif_msg_rx_status(priv)) {
3353 void *rx_head;
3355 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3356 if (priv->extend_desc)
3357 rx_head = (void *)rx_q->dma_erx;
3358 else
3359 rx_head = (void *)rx_q->dma_rx;
3361 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3363 while (count < limit) {
3364 int status;
3365 struct dma_desc *p;
3366 struct dma_desc *np;
3368 if (priv->extend_desc)
3369 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3370 else
3371 p = rx_q->dma_rx + entry;
3373 /* read the status of the incoming frame */
3374 status = priv->hw->desc->rx_status(&priv->dev->stats,
3375 &priv->xstats, p);
3376 /* check if managed by the DMA otherwise go ahead */
3377 if (unlikely(status & dma_own))
3378 break;
3380 count++;
3382 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3383 next_entry = rx_q->cur_rx;
3385 if (priv->extend_desc)
3386 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3387 else
3388 np = rx_q->dma_rx + next_entry;
3390 prefetch(np);
3392 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3393 priv->hw->desc->rx_extended_status(&priv->dev->stats,
3394 &priv->xstats,
3395 rx_q->dma_erx +
3396 entry);
3397 if (unlikely(status == discard_frame)) {
3398 priv->dev->stats.rx_errors++;
3399 if (priv->hwts_rx_en && !priv->extend_desc) {
3400 /* DESC2 & DESC3 will be overwritten by device
3401 * with timestamp value, hence reinitialize
3402 * them in stmmac_rx_refill() function so that
3403 * device can reuse it.
3405 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3406 rx_q->rx_skbuff[entry] = NULL;
3407 dma_unmap_single(priv->device,
3408 rx_q->rx_skbuff_dma[entry],
3409 priv->dma_buf_sz,
3410 DMA_FROM_DEVICE);
3412 } else {
3413 struct sk_buff *skb;
3414 int frame_len;
3415 unsigned int des;
3417 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3418 des = le32_to_cpu(p->des0);
3419 else
3420 des = le32_to_cpu(p->des2);
3422 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3424 /* If frame length is greater than skb buffer size
3425 * (preallocated during init) then the packet is
3426 * ignored
3428 if (frame_len > priv->dma_buf_sz) {
3429 netdev_err(priv->dev,
3430 "len %d larger than size (%d)\n",
3431 frame_len, priv->dma_buf_sz);
3432 priv->dev->stats.rx_length_errors++;
3433 break;
3436 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3437 * Type frames (LLC/LLC-SNAP)
3439 if (unlikely(status != llc_snap))
3440 frame_len -= ETH_FCS_LEN;
3442 if (netif_msg_rx_status(priv)) {
3443 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3444 p, entry, des);
3445 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3446 frame_len, status);
3449 /* The zero-copy is always used for all the sizes
3450 * in case of GMAC4 because it needs
3451 * to refill the used descriptors, always.
3453 if (unlikely(!priv->plat->has_gmac4 &&
3454 ((frame_len < priv->rx_copybreak) ||
3455 stmmac_rx_threshold_count(rx_q)))) {
3456 skb = netdev_alloc_skb_ip_align(priv->dev,
3457 frame_len);
3458 if (unlikely(!skb)) {
3459 if (net_ratelimit())
3460 dev_warn(priv->device,
3461 "packet dropped\n");
3462 priv->dev->stats.rx_dropped++;
3463 break;
3466 dma_sync_single_for_cpu(priv->device,
3467 rx_q->rx_skbuff_dma
3468 [entry], frame_len,
3469 DMA_FROM_DEVICE);
3470 skb_copy_to_linear_data(skb,
3471 rx_q->
3472 rx_skbuff[entry]->data,
3473 frame_len);
3475 skb_put(skb, frame_len);
3476 dma_sync_single_for_device(priv->device,
3477 rx_q->rx_skbuff_dma
3478 [entry], frame_len,
3479 DMA_FROM_DEVICE);
3480 } else {
3481 skb = rx_q->rx_skbuff[entry];
3482 if (unlikely(!skb)) {
3483 netdev_err(priv->dev,
3484 "%s: Inconsistent Rx chain\n",
3485 priv->dev->name);
3486 priv->dev->stats.rx_dropped++;
3487 break;
3489 prefetch(skb->data - NET_IP_ALIGN);
3490 rx_q->rx_skbuff[entry] = NULL;
3491 rx_q->rx_zeroc_thresh++;
3493 skb_put(skb, frame_len);
3494 dma_unmap_single(priv->device,
3495 rx_q->rx_skbuff_dma[entry],
3496 priv->dma_buf_sz,
3497 DMA_FROM_DEVICE);
3500 if (netif_msg_pktdata(priv)) {
3501 netdev_dbg(priv->dev, "frame received (%dbytes)",
3502 frame_len);
3503 print_pkt(skb->data, frame_len);
3506 stmmac_get_rx_hwtstamp(priv, p, np, skb);
3508 stmmac_rx_vlan(priv->dev, skb);
3510 skb->protocol = eth_type_trans(skb, priv->dev);
3512 if (unlikely(!coe))
3513 skb_checksum_none_assert(skb);
3514 else
3515 skb->ip_summed = CHECKSUM_UNNECESSARY;
3517 napi_gro_receive(&rx_q->napi, skb);
3519 priv->dev->stats.rx_packets++;
3520 priv->dev->stats.rx_bytes += frame_len;
3522 entry = next_entry;
3525 stmmac_rx_refill(priv, queue);
3527 priv->xstats.rx_pkt_n += count;
3529 return count;
3533 * stmmac_poll - stmmac poll method (NAPI)
3534 * @napi : pointer to the napi structure.
3535 * @budget : maximum number of packets that the current CPU can receive from
3536 * all interfaces.
3537 * Description :
3538 * To look at the incoming frames and clear the tx resources.
3540 static int stmmac_poll(struct napi_struct *napi, int budget)
3542 struct stmmac_rx_queue *rx_q =
3543 container_of(napi, struct stmmac_rx_queue, napi);
3544 struct stmmac_priv *priv = rx_q->priv_data;
3545 u32 tx_count = priv->plat->tx_queues_to_use;
3546 u32 chan = rx_q->queue_index;
3547 int work_done = 0;
3548 u32 queue;
3550 priv->xstats.napi_poll++;
3552 /* check all the queues */
3553 for (queue = 0; queue < tx_count; queue++)
3554 stmmac_tx_clean(priv, queue);
3556 work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3557 if (work_done < budget) {
3558 napi_complete_done(napi, work_done);
3559 stmmac_enable_dma_irq(priv, chan);
3561 return work_done;
3565 * stmmac_tx_timeout
3566 * @dev : Pointer to net device structure
3567 * Description: this function is called when a packet transmission fails to
3568 * complete within a reasonable time. The driver will mark the error in the
3569 * netdev structure and arrange for the device to be reset to a sane state
3570 * in order to transmit a new packet.
3572 static void stmmac_tx_timeout(struct net_device *dev)
3574 struct stmmac_priv *priv = netdev_priv(dev);
3575 u32 tx_count = priv->plat->tx_queues_to_use;
3576 u32 chan;
3578 /* Clear Tx resources and restart transmitting again */
3579 for (chan = 0; chan < tx_count; chan++)
3580 stmmac_tx_err(priv, chan);
3584 * stmmac_set_rx_mode - entry point for multicast addressing
3585 * @dev : pointer to the device structure
3586 * Description:
3587 * This function is a driver entry point which gets called by the kernel
3588 * whenever multicast addresses must be enabled/disabled.
3589 * Return value:
3590 * void.
3592 static void stmmac_set_rx_mode(struct net_device *dev)
3594 struct stmmac_priv *priv = netdev_priv(dev);
3596 priv->hw->mac->set_filter(priv->hw, dev);
3600 * stmmac_change_mtu - entry point to change MTU size for the device.
3601 * @dev : device pointer.
3602 * @new_mtu : the new MTU size for the device.
3603 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
3604 * to drive packet transmission. Ethernet has an MTU of 1500 octets
3605 * (ETH_DATA_LEN). This value can be changed with ifconfig.
3606 * Return value:
3607 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3608 * file on failure.
3610 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3612 struct stmmac_priv *priv = netdev_priv(dev);
3614 if (netif_running(dev)) {
3615 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3616 return -EBUSY;
3619 dev->mtu = new_mtu;
3621 netdev_update_features(dev);
3623 return 0;
3626 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3627 netdev_features_t features)
3629 struct stmmac_priv *priv = netdev_priv(dev);
3631 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3632 features &= ~NETIF_F_RXCSUM;
3634 if (!priv->plat->tx_coe)
3635 features &= ~NETIF_F_CSUM_MASK;
3637 /* Some GMAC devices have a bugged Jumbo frame support that
3638 * needs to have the Tx COE disabled for oversized frames
3639 * (due to limited buffer sizes). In this case we disable
3640 * the TX csum insertion in the TDES and not use SF.
3642 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3643 features &= ~NETIF_F_CSUM_MASK;
3645 /* Disable tso if asked by ethtool */
3646 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3647 if (features & NETIF_F_TSO)
3648 priv->tso = true;
3649 else
3650 priv->tso = false;
3653 return features;
3656 static int stmmac_set_features(struct net_device *netdev,
3657 netdev_features_t features)
3659 struct stmmac_priv *priv = netdev_priv(netdev);
3661 /* Keep the COE Type in case of csum is supporting */
3662 if (features & NETIF_F_RXCSUM)
3663 priv->hw->rx_csum = priv->plat->rx_coe;
3664 else
3665 priv->hw->rx_csum = 0;
3666 /* No check needed because rx_coe has been set before and it will be
3667 * fixed in case of issue.
3669 priv->hw->mac->rx_ipc(priv->hw);
3671 return 0;
3675 * stmmac_interrupt - main ISR
3676 * @irq: interrupt number.
3677 * @dev_id: to pass the net device pointer.
3678 * Description: this is the main driver interrupt service routine.
3679 * It can call:
3680 * o DMA service routine (to manage incoming frame reception and transmission
3681 * status)
3682 * o Core interrupts to manage: remote wake-up, management counter, LPI
3683 * interrupts.
3685 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3687 struct net_device *dev = (struct net_device *)dev_id;
3688 struct stmmac_priv *priv = netdev_priv(dev);
3689 u32 rx_cnt = priv->plat->rx_queues_to_use;
3690 u32 tx_cnt = priv->plat->tx_queues_to_use;
3691 u32 queues_count;
3692 u32 queue;
3694 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3696 if (priv->irq_wake)
3697 pm_wakeup_event(priv->device, 0);
3699 if (unlikely(!dev)) {
3700 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3701 return IRQ_NONE;
3704 /* To handle GMAC own interrupts */
3705 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3706 int status = priv->hw->mac->host_irq_status(priv->hw,
3707 &priv->xstats);
3709 if (unlikely(status)) {
3710 /* For LPI we need to save the tx status */
3711 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3712 priv->tx_path_in_lpi_mode = true;
3713 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3714 priv->tx_path_in_lpi_mode = false;
3717 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3718 for (queue = 0; queue < queues_count; queue++) {
3719 struct stmmac_rx_queue *rx_q =
3720 &priv->rx_queue[queue];
3722 status |=
3723 priv->hw->mac->host_mtl_irq_status(priv->hw,
3724 queue);
3726 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3727 priv->hw->dma->set_rx_tail_ptr)
3728 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3729 rx_q->rx_tail_addr,
3730 queue);
3734 /* PCS link status */
3735 if (priv->hw->pcs) {
3736 if (priv->xstats.pcs_link)
3737 netif_carrier_on(dev);
3738 else
3739 netif_carrier_off(dev);
3743 /* To handle DMA interrupts */
3744 stmmac_dma_interrupt(priv);
3746 return IRQ_HANDLED;
3749 #ifdef CONFIG_NET_POLL_CONTROLLER
3750 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3751 * to allow network I/O with interrupts disabled.
3753 static void stmmac_poll_controller(struct net_device *dev)
3755 disable_irq(dev->irq);
3756 stmmac_interrupt(dev->irq, dev);
3757 enable_irq(dev->irq);
3759 #endif
3762 * stmmac_ioctl - Entry point for the Ioctl
3763 * @dev: Device pointer.
3764 * @rq: An IOCTL specefic structure, that can contain a pointer to
3765 * a proprietary structure used to pass information to the driver.
3766 * @cmd: IOCTL command
3767 * Description:
3768 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3770 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3772 int ret = -EOPNOTSUPP;
3774 if (!netif_running(dev))
3775 return -EINVAL;
3777 switch (cmd) {
3778 case SIOCGMIIPHY:
3779 case SIOCGMIIREG:
3780 case SIOCSMIIREG:
3781 if (!dev->phydev)
3782 return -EINVAL;
3783 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3784 break;
3785 case SIOCSHWTSTAMP:
3786 ret = stmmac_hwtstamp_ioctl(dev, rq);
3787 break;
3788 default:
3789 break;
3792 return ret;
3795 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3797 struct stmmac_priv *priv = netdev_priv(ndev);
3798 int ret = 0;
3800 ret = eth_mac_addr(ndev, addr);
3801 if (ret)
3802 return ret;
3804 priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
3806 return ret;
3809 #ifdef CONFIG_DEBUG_FS
3810 static struct dentry *stmmac_fs_dir;
3812 static void sysfs_display_ring(void *head, int size, int extend_desc,
3813 struct seq_file *seq)
3815 int i;
3816 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3817 struct dma_desc *p = (struct dma_desc *)head;
3819 for (i = 0; i < size; i++) {
3820 if (extend_desc) {
3821 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3822 i, (unsigned int)virt_to_phys(ep),
3823 le32_to_cpu(ep->basic.des0),
3824 le32_to_cpu(ep->basic.des1),
3825 le32_to_cpu(ep->basic.des2),
3826 le32_to_cpu(ep->basic.des3));
3827 ep++;
3828 } else {
3829 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3830 i, (unsigned int)virt_to_phys(p),
3831 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3832 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3833 p++;
3835 seq_printf(seq, "\n");
3839 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3841 struct net_device *dev = seq->private;
3842 struct stmmac_priv *priv = netdev_priv(dev);
3843 u32 rx_count = priv->plat->rx_queues_to_use;
3844 u32 tx_count = priv->plat->tx_queues_to_use;
3845 u32 queue;
3847 for (queue = 0; queue < rx_count; queue++) {
3848 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3850 seq_printf(seq, "RX Queue %d:\n", queue);
3852 if (priv->extend_desc) {
3853 seq_printf(seq, "Extended descriptor ring:\n");
3854 sysfs_display_ring((void *)rx_q->dma_erx,
3855 DMA_RX_SIZE, 1, seq);
3856 } else {
3857 seq_printf(seq, "Descriptor ring:\n");
3858 sysfs_display_ring((void *)rx_q->dma_rx,
3859 DMA_RX_SIZE, 0, seq);
3863 for (queue = 0; queue < tx_count; queue++) {
3864 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3866 seq_printf(seq, "TX Queue %d:\n", queue);
3868 if (priv->extend_desc) {
3869 seq_printf(seq, "Extended descriptor ring:\n");
3870 sysfs_display_ring((void *)tx_q->dma_etx,
3871 DMA_TX_SIZE, 1, seq);
3872 } else {
3873 seq_printf(seq, "Descriptor ring:\n");
3874 sysfs_display_ring((void *)tx_q->dma_tx,
3875 DMA_TX_SIZE, 0, seq);
3879 return 0;
3882 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3884 return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3887 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3889 static const struct file_operations stmmac_rings_status_fops = {
3890 .owner = THIS_MODULE,
3891 .open = stmmac_sysfs_ring_open,
3892 .read = seq_read,
3893 .llseek = seq_lseek,
3894 .release = single_release,
3897 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3899 struct net_device *dev = seq->private;
3900 struct stmmac_priv *priv = netdev_priv(dev);
3902 if (!priv->hw_cap_support) {
3903 seq_printf(seq, "DMA HW features not supported\n");
3904 return 0;
3907 seq_printf(seq, "==============================\n");
3908 seq_printf(seq, "\tDMA HW features\n");
3909 seq_printf(seq, "==============================\n");
3911 seq_printf(seq, "\t10/100 Mbps: %s\n",
3912 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3913 seq_printf(seq, "\t1000 Mbps: %s\n",
3914 (priv->dma_cap.mbps_1000) ? "Y" : "N");
3915 seq_printf(seq, "\tHalf duplex: %s\n",
3916 (priv->dma_cap.half_duplex) ? "Y" : "N");
3917 seq_printf(seq, "\tHash Filter: %s\n",
3918 (priv->dma_cap.hash_filter) ? "Y" : "N");
3919 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3920 (priv->dma_cap.multi_addr) ? "Y" : "N");
3921 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3922 (priv->dma_cap.pcs) ? "Y" : "N");
3923 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3924 (priv->dma_cap.sma_mdio) ? "Y" : "N");
3925 seq_printf(seq, "\tPMT Remote wake up: %s\n",
3926 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3927 seq_printf(seq, "\tPMT Magic Frame: %s\n",
3928 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3929 seq_printf(seq, "\tRMON module: %s\n",
3930 (priv->dma_cap.rmon) ? "Y" : "N");
3931 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3932 (priv->dma_cap.time_stamp) ? "Y" : "N");
3933 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3934 (priv->dma_cap.atime_stamp) ? "Y" : "N");
3935 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3936 (priv->dma_cap.eee) ? "Y" : "N");
3937 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3938 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3939 (priv->dma_cap.tx_coe) ? "Y" : "N");
3940 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3941 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3942 (priv->dma_cap.rx_coe) ? "Y" : "N");
3943 } else {
3944 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3945 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3946 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3947 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3949 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3950 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3951 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3952 priv->dma_cap.number_rx_channel);
3953 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3954 priv->dma_cap.number_tx_channel);
3955 seq_printf(seq, "\tEnhanced descriptors: %s\n",
3956 (priv->dma_cap.enh_desc) ? "Y" : "N");
3958 return 0;
3961 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3963 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3966 static const struct file_operations stmmac_dma_cap_fops = {
3967 .owner = THIS_MODULE,
3968 .open = stmmac_sysfs_dma_cap_open,
3969 .read = seq_read,
3970 .llseek = seq_lseek,
3971 .release = single_release,
3974 static int stmmac_init_fs(struct net_device *dev)
3976 struct stmmac_priv *priv = netdev_priv(dev);
3978 /* Create per netdev entries */
3979 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3981 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3982 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3984 return -ENOMEM;
3987 /* Entry to report DMA RX/TX rings */
3988 priv->dbgfs_rings_status =
3989 debugfs_create_file("descriptors_status", S_IRUGO,
3990 priv->dbgfs_dir, dev,
3991 &stmmac_rings_status_fops);
3993 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3994 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3995 debugfs_remove_recursive(priv->dbgfs_dir);
3997 return -ENOMEM;
4000 /* Entry to report the DMA HW features */
4001 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
4002 priv->dbgfs_dir,
4003 dev, &stmmac_dma_cap_fops);
4005 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4006 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4007 debugfs_remove_recursive(priv->dbgfs_dir);
4009 return -ENOMEM;
4012 return 0;
4015 static void stmmac_exit_fs(struct net_device *dev)
4017 struct stmmac_priv *priv = netdev_priv(dev);
4019 debugfs_remove_recursive(priv->dbgfs_dir);
4021 #endif /* CONFIG_DEBUG_FS */
4023 static const struct net_device_ops stmmac_netdev_ops = {
4024 .ndo_open = stmmac_open,
4025 .ndo_start_xmit = stmmac_xmit,
4026 .ndo_stop = stmmac_release,
4027 .ndo_change_mtu = stmmac_change_mtu,
4028 .ndo_fix_features = stmmac_fix_features,
4029 .ndo_set_features = stmmac_set_features,
4030 .ndo_set_rx_mode = stmmac_set_rx_mode,
4031 .ndo_tx_timeout = stmmac_tx_timeout,
4032 .ndo_do_ioctl = stmmac_ioctl,
4033 #ifdef CONFIG_NET_POLL_CONTROLLER
4034 .ndo_poll_controller = stmmac_poll_controller,
4035 #endif
4036 .ndo_set_mac_address = stmmac_set_mac_address,
4040 * stmmac_hw_init - Init the MAC device
4041 * @priv: driver private structure
4042 * Description: this function is to configure the MAC device according to
4043 * some platform parameters or the HW capability register. It prepares the
4044 * driver to use either ring or chain modes and to setup either enhanced or
4045 * normal descriptors.
4047 static int stmmac_hw_init(struct stmmac_priv *priv)
4049 struct mac_device_info *mac;
4051 /* Identify the MAC HW device */
4052 if (priv->plat->setup) {
4053 mac = priv->plat->setup(priv);
4054 } else if (priv->plat->has_gmac) {
4055 priv->dev->priv_flags |= IFF_UNICAST_FLT;
4056 mac = dwmac1000_setup(priv->ioaddr,
4057 priv->plat->multicast_filter_bins,
4058 priv->plat->unicast_filter_entries,
4059 &priv->synopsys_id);
4060 } else if (priv->plat->has_gmac4) {
4061 priv->dev->priv_flags |= IFF_UNICAST_FLT;
4062 mac = dwmac4_setup(priv->ioaddr,
4063 priv->plat->multicast_filter_bins,
4064 priv->plat->unicast_filter_entries,
4065 &priv->synopsys_id);
4066 } else {
4067 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
4069 if (!mac)
4070 return -ENOMEM;
4072 priv->hw = mac;
4074 /* dwmac-sun8i only work in chain mode */
4075 if (priv->plat->has_sun8i)
4076 chain_mode = 1;
4078 /* To use the chained or ring mode */
4079 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4080 priv->hw->mode = &dwmac4_ring_mode_ops;
4081 } else {
4082 if (chain_mode) {
4083 priv->hw->mode = &chain_mode_ops;
4084 dev_info(priv->device, "Chain mode enabled\n");
4085 priv->mode = STMMAC_CHAIN_MODE;
4086 } else {
4087 priv->hw->mode = &ring_mode_ops;
4088 dev_info(priv->device, "Ring mode enabled\n");
4089 priv->mode = STMMAC_RING_MODE;
4093 /* Get the HW capability (new GMAC newer than 3.50a) */
4094 priv->hw_cap_support = stmmac_get_hw_features(priv);
4095 if (priv->hw_cap_support) {
4096 dev_info(priv->device, "DMA HW capability register supported\n");
4098 /* We can override some gmac/dma configuration fields: e.g.
4099 * enh_desc, tx_coe (e.g. that are passed through the
4100 * platform) with the values from the HW capability
4101 * register (if supported).
4103 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4104 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4105 priv->hw->pmt = priv->plat->pmt;
4107 /* TXCOE doesn't work in thresh DMA mode */
4108 if (priv->plat->force_thresh_dma_mode)
4109 priv->plat->tx_coe = 0;
4110 else
4111 priv->plat->tx_coe = priv->dma_cap.tx_coe;
4113 /* In case of GMAC4 rx_coe is from HW cap register. */
4114 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4116 if (priv->dma_cap.rx_coe_type2)
4117 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4118 else if (priv->dma_cap.rx_coe_type1)
4119 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4121 } else {
4122 dev_info(priv->device, "No HW DMA feature register supported\n");
4125 /* To use alternate (extended), normal or GMAC4 descriptor structures */
4126 if (priv->synopsys_id >= DWMAC_CORE_4_00)
4127 priv->hw->desc = &dwmac4_desc_ops;
4128 else
4129 stmmac_selec_desc_mode(priv);
4131 if (priv->plat->rx_coe) {
4132 priv->hw->rx_csum = priv->plat->rx_coe;
4133 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4134 if (priv->synopsys_id < DWMAC_CORE_4_00)
4135 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4137 if (priv->plat->tx_coe)
4138 dev_info(priv->device, "TX Checksum insertion supported\n");
4140 if (priv->plat->pmt) {
4141 dev_info(priv->device, "Wake-Up On Lan supported\n");
4142 device_set_wakeup_capable(priv->device, 1);
4145 if (priv->dma_cap.tsoen)
4146 dev_info(priv->device, "TSO supported\n");
4148 return 0;
4152 * stmmac_dvr_probe
4153 * @device: device pointer
4154 * @plat_dat: platform data pointer
4155 * @res: stmmac resource pointer
4156 * Description: this is the main probe function used to
4157 * call the alloc_etherdev, allocate the priv structure.
4158 * Return:
4159 * returns 0 on success, otherwise errno.
4161 int stmmac_dvr_probe(struct device *device,
4162 struct plat_stmmacenet_data *plat_dat,
4163 struct stmmac_resources *res)
4165 struct net_device *ndev = NULL;
4166 struct stmmac_priv *priv;
4167 int ret = 0;
4168 u32 queue;
4170 ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4171 MTL_MAX_TX_QUEUES,
4172 MTL_MAX_RX_QUEUES);
4173 if (!ndev)
4174 return -ENOMEM;
4176 SET_NETDEV_DEV(ndev, device);
4178 priv = netdev_priv(ndev);
4179 priv->device = device;
4180 priv->dev = ndev;
4182 stmmac_set_ethtool_ops(ndev);
4183 priv->pause = pause;
4184 priv->plat = plat_dat;
4185 priv->ioaddr = res->addr;
4186 priv->dev->base_addr = (unsigned long)res->addr;
4188 priv->dev->irq = res->irq;
4189 priv->wol_irq = res->wol_irq;
4190 priv->lpi_irq = res->lpi_irq;
4192 if (res->mac)
4193 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4195 dev_set_drvdata(device, priv->dev);
4197 /* Verify driver arguments */
4198 stmmac_verify_args();
4200 /* Override with kernel parameters if supplied XXX CRS XXX
4201 * this needs to have multiple instances
4203 if ((phyaddr >= 0) && (phyaddr <= 31))
4204 priv->plat->phy_addr = phyaddr;
4206 if (priv->plat->stmmac_rst) {
4207 ret = reset_control_assert(priv->plat->stmmac_rst);
4208 reset_control_deassert(priv->plat->stmmac_rst);
4209 /* Some reset controllers have only reset callback instead of
4210 * assert + deassert callbacks pair.
4212 if (ret == -ENOTSUPP)
4213 reset_control_reset(priv->plat->stmmac_rst);
4216 /* Init MAC and get the capabilities */
4217 ret = stmmac_hw_init(priv);
4218 if (ret)
4219 goto error_hw_init;
4221 /* Configure real RX and TX queues */
4222 netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4223 netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4225 ndev->netdev_ops = &stmmac_netdev_ops;
4227 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4228 NETIF_F_RXCSUM;
4230 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4231 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4232 priv->tso = true;
4233 dev_info(priv->device, "TSO feature enabled\n");
4235 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4236 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4237 #ifdef STMMAC_VLAN_TAG_USED
4238 /* Both mac100 and gmac support receive VLAN tag detection */
4239 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4240 #endif
4241 priv->msg_enable = netif_msg_init(debug, default_msg_level);
4243 /* MTU range: 46 - hw-specific max */
4244 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4245 if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4246 ndev->max_mtu = JUMBO_LEN;
4247 else
4248 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4249 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4250 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4252 if ((priv->plat->maxmtu < ndev->max_mtu) &&
4253 (priv->plat->maxmtu >= ndev->min_mtu))
4254 ndev->max_mtu = priv->plat->maxmtu;
4255 else if (priv->plat->maxmtu < ndev->min_mtu)
4256 dev_warn(priv->device,
4257 "%s: warning: maxmtu having invalid value (%d)\n",
4258 __func__, priv->plat->maxmtu);
4260 if (flow_ctrl)
4261 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
4263 /* Rx Watchdog is available in the COREs newer than the 3.40.
4264 * In some case, for example on bugged HW this feature
4265 * has to be disable and this can be done by passing the
4266 * riwt_off field from the platform.
4268 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4269 priv->use_riwt = 1;
4270 dev_info(priv->device,
4271 "Enable RX Mitigation via HW Watchdog Timer\n");
4274 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4275 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4277 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4278 (8 * priv->plat->rx_queues_to_use));
4281 spin_lock_init(&priv->lock);
4283 /* If a specific clk_csr value is passed from the platform
4284 * this means that the CSR Clock Range selection cannot be
4285 * changed at run-time and it is fixed. Viceversa the driver'll try to
4286 * set the MDC clock dynamically according to the csr actual
4287 * clock input.
4289 if (!priv->plat->clk_csr)
4290 stmmac_clk_csr_set(priv);
4291 else
4292 priv->clk_csr = priv->plat->clk_csr;
4294 stmmac_check_pcs_mode(priv);
4296 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4297 priv->hw->pcs != STMMAC_PCS_TBI &&
4298 priv->hw->pcs != STMMAC_PCS_RTBI) {
4299 /* MDIO bus Registration */
4300 ret = stmmac_mdio_register(ndev);
4301 if (ret < 0) {
4302 dev_err(priv->device,
4303 "%s: MDIO bus (id: %d) registration failed",
4304 __func__, priv->plat->bus_id);
4305 goto error_mdio_register;
4309 ret = register_netdev(ndev);
4310 if (ret) {
4311 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4312 __func__, ret);
4313 goto error_netdev_register;
4316 return ret;
4318 error_netdev_register:
4319 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4320 priv->hw->pcs != STMMAC_PCS_TBI &&
4321 priv->hw->pcs != STMMAC_PCS_RTBI)
4322 stmmac_mdio_unregister(ndev);
4323 error_mdio_register:
4324 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4325 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4327 netif_napi_del(&rx_q->napi);
4329 error_hw_init:
4330 free_netdev(ndev);
4332 return ret;
4334 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4337 * stmmac_dvr_remove
4338 * @dev: device pointer
4339 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4340 * changes the link status, releases the DMA descriptor rings.
4342 int stmmac_dvr_remove(struct device *dev)
4344 struct net_device *ndev = dev_get_drvdata(dev);
4345 struct stmmac_priv *priv = netdev_priv(ndev);
4347 netdev_info(priv->dev, "%s: removing driver", __func__);
4349 stmmac_stop_all_dma(priv);
4351 priv->hw->mac->set_mac(priv->ioaddr, false);
4352 netif_carrier_off(ndev);
4353 unregister_netdev(ndev);
4354 if (priv->plat->stmmac_rst)
4355 reset_control_assert(priv->plat->stmmac_rst);
4356 clk_disable_unprepare(priv->plat->pclk);
4357 clk_disable_unprepare(priv->plat->stmmac_clk);
4358 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4359 priv->hw->pcs != STMMAC_PCS_TBI &&
4360 priv->hw->pcs != STMMAC_PCS_RTBI)
4361 stmmac_mdio_unregister(ndev);
4362 free_netdev(ndev);
4364 return 0;
4366 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4369 * stmmac_suspend - suspend callback
4370 * @dev: device pointer
4371 * Description: this is the function to suspend the device and it is called
4372 * by the platform driver to stop the network queue, release the resources,
4373 * program the PMT register (for WoL), clean and release driver resources.
4375 int stmmac_suspend(struct device *dev)
4377 struct net_device *ndev = dev_get_drvdata(dev);
4378 struct stmmac_priv *priv = netdev_priv(ndev);
4379 unsigned long flags;
4381 if (!ndev || !netif_running(ndev))
4382 return 0;
4384 if (ndev->phydev)
4385 phy_stop(ndev->phydev);
4387 spin_lock_irqsave(&priv->lock, flags);
4389 netif_device_detach(ndev);
4390 stmmac_stop_all_queues(priv);
4392 stmmac_disable_all_queues(priv);
4394 /* Stop TX/RX DMA */
4395 stmmac_stop_all_dma(priv);
4397 /* Enable Power down mode by programming the PMT regs */
4398 if (device_may_wakeup(priv->device)) {
4399 priv->hw->mac->pmt(priv->hw, priv->wolopts);
4400 priv->irq_wake = 1;
4401 } else {
4402 priv->hw->mac->set_mac(priv->ioaddr, false);
4403 pinctrl_pm_select_sleep_state(priv->device);
4404 /* Disable clock in case of PWM is off */
4405 clk_disable(priv->plat->pclk);
4406 clk_disable(priv->plat->stmmac_clk);
4408 spin_unlock_irqrestore(&priv->lock, flags);
4410 priv->oldlink = false;
4411 priv->speed = SPEED_UNKNOWN;
4412 priv->oldduplex = DUPLEX_UNKNOWN;
4413 return 0;
4415 EXPORT_SYMBOL_GPL(stmmac_suspend);
4418 * stmmac_reset_queues_param - reset queue parameters
4419 * @dev: device pointer
4421 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4423 u32 rx_cnt = priv->plat->rx_queues_to_use;
4424 u32 tx_cnt = priv->plat->tx_queues_to_use;
4425 u32 queue;
4427 for (queue = 0; queue < rx_cnt; queue++) {
4428 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4430 rx_q->cur_rx = 0;
4431 rx_q->dirty_rx = 0;
4434 for (queue = 0; queue < tx_cnt; queue++) {
4435 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4437 tx_q->cur_tx = 0;
4438 tx_q->dirty_tx = 0;
4443 * stmmac_resume - resume callback
4444 * @dev: device pointer
4445 * Description: when resume this function is invoked to setup the DMA and CORE
4446 * in a usable state.
4448 int stmmac_resume(struct device *dev)
4450 struct net_device *ndev = dev_get_drvdata(dev);
4451 struct stmmac_priv *priv = netdev_priv(ndev);
4452 unsigned long flags;
4454 if (!netif_running(ndev))
4455 return 0;
4457 /* Power Down bit, into the PM register, is cleared
4458 * automatically as soon as a magic packet or a Wake-up frame
4459 * is received. Anyway, it's better to manually clear
4460 * this bit because it can generate problems while resuming
4461 * from another devices (e.g. serial console).
4463 if (device_may_wakeup(priv->device)) {
4464 spin_lock_irqsave(&priv->lock, flags);
4465 priv->hw->mac->pmt(priv->hw, 0);
4466 spin_unlock_irqrestore(&priv->lock, flags);
4467 priv->irq_wake = 0;
4468 } else {
4469 pinctrl_pm_select_default_state(priv->device);
4470 /* enable the clk previously disabled */
4471 clk_enable(priv->plat->stmmac_clk);
4472 clk_enable(priv->plat->pclk);
4473 /* reset the phy so that it's ready */
4474 if (priv->mii)
4475 stmmac_mdio_reset(priv->mii);
4478 netif_device_attach(ndev);
4480 spin_lock_irqsave(&priv->lock, flags);
4482 stmmac_reset_queues_param(priv);
4484 /* reset private mss value to force mss context settings at
4485 * next tso xmit (only used for gmac4).
4487 priv->mss = 0;
4489 stmmac_clear_descriptors(priv);
4491 stmmac_hw_setup(ndev, false);
4492 stmmac_init_tx_coalesce(priv);
4493 stmmac_set_rx_mode(ndev);
4495 stmmac_enable_all_queues(priv);
4497 stmmac_start_all_queues(priv);
4499 spin_unlock_irqrestore(&priv->lock, flags);
4501 if (ndev->phydev)
4502 phy_start(ndev->phydev);
4504 return 0;
4506 EXPORT_SYMBOL_GPL(stmmac_resume);
4508 #ifndef MODULE
4509 static int __init stmmac_cmdline_opt(char *str)
4511 char *opt;
4513 if (!str || !*str)
4514 return -EINVAL;
4515 while ((opt = strsep(&str, ",")) != NULL) {
4516 if (!strncmp(opt, "debug:", 6)) {
4517 if (kstrtoint(opt + 6, 0, &debug))
4518 goto err;
4519 } else if (!strncmp(opt, "phyaddr:", 8)) {
4520 if (kstrtoint(opt + 8, 0, &phyaddr))
4521 goto err;
4522 } else if (!strncmp(opt, "buf_sz:", 7)) {
4523 if (kstrtoint(opt + 7, 0, &buf_sz))
4524 goto err;
4525 } else if (!strncmp(opt, "tc:", 3)) {
4526 if (kstrtoint(opt + 3, 0, &tc))
4527 goto err;
4528 } else if (!strncmp(opt, "watchdog:", 9)) {
4529 if (kstrtoint(opt + 9, 0, &watchdog))
4530 goto err;
4531 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4532 if (kstrtoint(opt + 10, 0, &flow_ctrl))
4533 goto err;
4534 } else if (!strncmp(opt, "pause:", 6)) {
4535 if (kstrtoint(opt + 6, 0, &pause))
4536 goto err;
4537 } else if (!strncmp(opt, "eee_timer:", 10)) {
4538 if (kstrtoint(opt + 10, 0, &eee_timer))
4539 goto err;
4540 } else if (!strncmp(opt, "chain_mode:", 11)) {
4541 if (kstrtoint(opt + 11, 0, &chain_mode))
4542 goto err;
4545 return 0;
4547 err:
4548 pr_err("%s: ERROR broken module parameter conversion", __func__);
4549 return -EINVAL;
4552 __setup("stmmaceth=", stmmac_cmdline_opt);
4553 #endif /* MODULE */
4555 static int __init stmmac_init(void)
4557 #ifdef CONFIG_DEBUG_FS
4558 /* Create debugfs main directory if it doesn't exist yet */
4559 if (!stmmac_fs_dir) {
4560 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4562 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4563 pr_err("ERROR %s, debugfs create directory failed\n",
4564 STMMAC_RESOURCE_NAME);
4566 return -ENOMEM;
4569 #endif
4571 return 0;
4574 static void __exit stmmac_exit(void)
4576 #ifdef CONFIG_DEBUG_FS
4577 debugfs_remove_recursive(stmmac_fs_dir);
4578 #endif
4581 module_init(stmmac_init)
4582 module_exit(stmmac_exit)
4584 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4585 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4586 MODULE_LICENSE("GPL");