2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
5 * Right now, I am very wasteful with the buffers. I allocate memory
6 * pages and then divide them into 2K frame buffers. This way I know I
7 * have buffers large enough to hold one frame within one buffer descriptor.
8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9 * will be much more memory efficient and will easily handle lots of
12 * Much better multiple PHY support by Magnus Damm.
13 * Copyright (c) 2000 Ericsson Radio Systems AB.
15 * Support for FEC controller of ColdFire processors.
16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA.
21 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/string.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/ptrace.h>
29 #include <linux/errno.h>
30 #include <linux/ioport.h>
31 #include <linux/slab.h>
32 #include <linux/interrupt.h>
33 #include <linux/delay.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
41 #include <linux/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/icmp.h>
44 #include <linux/spinlock.h>
45 #include <linux/workqueue.h>
46 #include <linux/bitops.h>
48 #include <linux/irq.h>
49 #include <linux/clk.h>
50 #include <linux/platform_device.h>
51 #include <linux/mdio.h>
52 #include <linux/phy.h>
53 #include <linux/fec.h>
55 #include <linux/of_device.h>
56 #include <linux/of_gpio.h>
57 #include <linux/of_mdio.h>
58 #include <linux/of_net.h>
59 #include <linux/regulator/consumer.h>
60 #include <linux/if_vlan.h>
61 #include <linux/pinctrl/consumer.h>
62 #include <linux/prefetch.h>
63 #include <soc/imx/cpuidle.h>
65 #include <asm/cacheflush.h>
69 static void set_multicast_list(struct net_device
*ndev
);
70 static void fec_enet_itr_coal_init(struct net_device
*ndev
);
72 #define DRIVER_NAME "fec"
74 #define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
76 /* Pause frame feild and FIFO threshold */
77 #define FEC_ENET_FCE (1 << 5)
78 #define FEC_ENET_RSEM_V 0x84
79 #define FEC_ENET_RSFL_V 16
80 #define FEC_ENET_RAEM_V 0x8
81 #define FEC_ENET_RAFL_V 0x8
82 #define FEC_ENET_OPD_V 0xFFF0
83 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */
85 static struct platform_device_id fec_devtype
[] = {
87 /* keep it for coldfire */
92 .driver_data
= FEC_QUIRK_USE_GASKET
| FEC_QUIRK_MIB_CLEAR
,
95 .driver_data
= FEC_QUIRK_MIB_CLEAR
,
98 .driver_data
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_SWAP_FRAME
|
99 FEC_QUIRK_SINGLE_MDIO
| FEC_QUIRK_HAS_RACC
,
102 .driver_data
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_HAS_GBIT
|
103 FEC_QUIRK_HAS_BUFDESC_EX
| FEC_QUIRK_HAS_CSUM
|
104 FEC_QUIRK_HAS_VLAN
| FEC_QUIRK_ERR006358
|
107 .name
= "mvf600-fec",
108 .driver_data
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_HAS_RACC
,
110 .name
= "imx6sx-fec",
111 .driver_data
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_HAS_GBIT
|
112 FEC_QUIRK_HAS_BUFDESC_EX
| FEC_QUIRK_HAS_CSUM
|
113 FEC_QUIRK_HAS_VLAN
| FEC_QUIRK_HAS_AVB
|
114 FEC_QUIRK_ERR007885
| FEC_QUIRK_BUG_CAPTURE
|
115 FEC_QUIRK_HAS_RACC
| FEC_QUIRK_HAS_COALESCE
,
117 .name
= "imx6ul-fec",
118 .driver_data
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_HAS_GBIT
|
119 FEC_QUIRK_HAS_BUFDESC_EX
| FEC_QUIRK_HAS_CSUM
|
120 FEC_QUIRK_HAS_VLAN
| FEC_QUIRK_ERR007885
|
121 FEC_QUIRK_BUG_CAPTURE
| FEC_QUIRK_HAS_RACC
|
122 FEC_QUIRK_HAS_COALESCE
,
127 MODULE_DEVICE_TABLE(platform
, fec_devtype
);
130 IMX25_FEC
= 1, /* runs on i.mx25/50/53 */
131 IMX27_FEC
, /* runs on i.mx27/35/51 */
139 static const struct of_device_id fec_dt_ids
[] = {
140 { .compatible
= "fsl,imx25-fec", .data
= &fec_devtype
[IMX25_FEC
], },
141 { .compatible
= "fsl,imx27-fec", .data
= &fec_devtype
[IMX27_FEC
], },
142 { .compatible
= "fsl,imx28-fec", .data
= &fec_devtype
[IMX28_FEC
], },
143 { .compatible
= "fsl,imx6q-fec", .data
= &fec_devtype
[IMX6Q_FEC
], },
144 { .compatible
= "fsl,mvf600-fec", .data
= &fec_devtype
[MVF600_FEC
], },
145 { .compatible
= "fsl,imx6sx-fec", .data
= &fec_devtype
[IMX6SX_FEC
], },
146 { .compatible
= "fsl,imx6ul-fec", .data
= &fec_devtype
[IMX6UL_FEC
], },
149 MODULE_DEVICE_TABLE(of
, fec_dt_ids
);
151 static unsigned char macaddr
[ETH_ALEN
];
152 module_param_array(macaddr
, byte
, NULL
, 0);
153 MODULE_PARM_DESC(macaddr
, "FEC Ethernet MAC address");
155 #if defined(CONFIG_M5272)
157 * Some hardware gets it MAC address out of local flash memory.
158 * if this is non-zero then assume it is the address to get MAC from.
160 #if defined(CONFIG_NETtel)
161 #define FEC_FLASHMAC 0xf0006006
162 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
163 #define FEC_FLASHMAC 0xf0006000
164 #elif defined(CONFIG_CANCam)
165 #define FEC_FLASHMAC 0xf0020000
166 #elif defined (CONFIG_M5272C3)
167 #define FEC_FLASHMAC (0xffe04000 + 4)
168 #elif defined(CONFIG_MOD5272)
169 #define FEC_FLASHMAC 0xffc0406b
171 #define FEC_FLASHMAC 0
173 #endif /* CONFIG_M5272 */
175 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
177 #define PKT_MAXBUF_SIZE 1522
178 #define PKT_MINBUF_SIZE 64
179 #define PKT_MAXBLR_SIZE 1536
181 /* FEC receive acceleration */
182 #define FEC_RACC_IPDIS (1 << 1)
183 #define FEC_RACC_PRODIS (1 << 2)
184 #define FEC_RACC_SHIFT16 BIT(7)
185 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
187 /* MIB Control Register */
188 #define FEC_MIB_CTRLSTAT_DISABLE BIT(31)
191 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
192 * size bits. Other FEC hardware does not, so we need to take that into
193 * account when setting it.
195 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
196 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
197 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
199 #define OPT_FRAME_SIZE 0
202 /* FEC MII MMFR bits definition */
203 #define FEC_MMFR_ST (1 << 30)
204 #define FEC_MMFR_OP_READ (2 << 28)
205 #define FEC_MMFR_OP_WRITE (1 << 28)
206 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
207 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
208 #define FEC_MMFR_TA (2 << 16)
209 #define FEC_MMFR_DATA(v) (v & 0xffff)
210 /* FEC ECR bits definition */
211 #define FEC_ECR_MAGICEN (1 << 2)
212 #define FEC_ECR_SLEEP (1 << 3)
214 #define FEC_MII_TIMEOUT 30000 /* us */
216 /* Transmitter timeout */
217 #define TX_TIMEOUT (2 * HZ)
219 #define FEC_PAUSE_FLAG_AUTONEG 0x1
220 #define FEC_PAUSE_FLAG_ENABLE 0x2
221 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
222 #define FEC_WOL_FLAG_ENABLE (0x1 << 1)
223 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
225 #define COPYBREAK_DEFAULT 256
227 #define TSO_HEADER_SIZE 128
228 /* Max number of allowed TCP segments for software TSO */
229 #define FEC_MAX_TSO_SEGS 100
230 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
232 #define IS_TSO_HEADER(txq, addr) \
233 ((addr >= txq->tso_hdrs_dma) && \
234 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
238 static struct bufdesc
*fec_enet_get_nextdesc(struct bufdesc
*bdp
,
239 struct bufdesc_prop
*bd
)
241 return (bdp
>= bd
->last
) ? bd
->base
242 : (struct bufdesc
*)(((void *)bdp
) + bd
->dsize
);
245 static struct bufdesc
*fec_enet_get_prevdesc(struct bufdesc
*bdp
,
246 struct bufdesc_prop
*bd
)
248 return (bdp
<= bd
->base
) ? bd
->last
249 : (struct bufdesc
*)(((void *)bdp
) - bd
->dsize
);
252 static int fec_enet_get_bd_index(struct bufdesc
*bdp
,
253 struct bufdesc_prop
*bd
)
255 return ((const char *)bdp
- (const char *)bd
->base
) >> bd
->dsize_log2
;
258 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q
*txq
)
262 entries
= (((const char *)txq
->dirty_tx
-
263 (const char *)txq
->bd
.cur
) >> txq
->bd
.dsize_log2
) - 1;
265 return entries
>= 0 ? entries
: entries
+ txq
->bd
.ring_size
;
268 static void swap_buffer(void *bufaddr
, int len
)
271 unsigned int *buf
= bufaddr
;
273 for (i
= 0; i
< len
; i
+= 4, buf
++)
277 static void swap_buffer2(void *dst_buf
, void *src_buf
, int len
)
280 unsigned int *src
= src_buf
;
281 unsigned int *dst
= dst_buf
;
283 for (i
= 0; i
< len
; i
+= 4, src
++, dst
++)
287 static void fec_dump(struct net_device
*ndev
)
289 struct fec_enet_private
*fep
= netdev_priv(ndev
);
291 struct fec_enet_priv_tx_q
*txq
;
294 netdev_info(ndev
, "TX ring dump\n");
295 pr_info("Nr SC addr len SKB\n");
297 txq
= fep
->tx_queue
[0];
301 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
303 bdp
== txq
->bd
.cur
? 'S' : ' ',
304 bdp
== txq
->dirty_tx
? 'H' : ' ',
305 fec16_to_cpu(bdp
->cbd_sc
),
306 fec32_to_cpu(bdp
->cbd_bufaddr
),
307 fec16_to_cpu(bdp
->cbd_datlen
),
308 txq
->tx_skbuff
[index
]);
309 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
311 } while (bdp
!= txq
->bd
.base
);
314 static inline bool is_ipv4_pkt(struct sk_buff
*skb
)
316 return skb
->protocol
== htons(ETH_P_IP
) && ip_hdr(skb
)->version
== 4;
320 fec_enet_clear_csum(struct sk_buff
*skb
, struct net_device
*ndev
)
322 /* Only run for packets requiring a checksum. */
323 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
326 if (unlikely(skb_cow_head(skb
, 0)))
329 if (is_ipv4_pkt(skb
))
330 ip_hdr(skb
)->check
= 0;
331 *(__sum16
*)(skb
->head
+ skb
->csum_start
+ skb
->csum_offset
) = 0;
336 static struct bufdesc
*
337 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q
*txq
,
339 struct net_device
*ndev
)
341 struct fec_enet_private
*fep
= netdev_priv(ndev
);
342 struct bufdesc
*bdp
= txq
->bd
.cur
;
343 struct bufdesc_ex
*ebdp
;
344 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
346 unsigned short status
;
347 unsigned int estatus
= 0;
348 skb_frag_t
*this_frag
;
354 for (frag
= 0; frag
< nr_frags
; frag
++) {
355 this_frag
= &skb_shinfo(skb
)->frags
[frag
];
356 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
357 ebdp
= (struct bufdesc_ex
*)bdp
;
359 status
= fec16_to_cpu(bdp
->cbd_sc
);
360 status
&= ~BD_ENET_TX_STATS
;
361 status
|= (BD_ENET_TX_TC
| BD_ENET_TX_READY
);
362 frag_len
= skb_shinfo(skb
)->frags
[frag
].size
;
364 /* Handle the last BD specially */
365 if (frag
== nr_frags
- 1) {
366 status
|= (BD_ENET_TX_INTR
| BD_ENET_TX_LAST
);
367 if (fep
->bufdesc_ex
) {
368 estatus
|= BD_ENET_TX_INT
;
369 if (unlikely(skb_shinfo(skb
)->tx_flags
&
370 SKBTX_HW_TSTAMP
&& fep
->hwts_tx_en
))
371 estatus
|= BD_ENET_TX_TS
;
375 if (fep
->bufdesc_ex
) {
376 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
)
377 estatus
|= FEC_TX_BD_FTYPE(txq
->bd
.qid
);
378 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
379 estatus
|= BD_ENET_TX_PINS
| BD_ENET_TX_IINS
;
381 ebdp
->cbd_esc
= cpu_to_fec32(estatus
);
384 bufaddr
= page_address(this_frag
->page
.p
) + this_frag
->page_offset
;
386 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
387 if (((unsigned long) bufaddr
) & fep
->tx_align
||
388 fep
->quirks
& FEC_QUIRK_SWAP_FRAME
) {
389 memcpy(txq
->tx_bounce
[index
], bufaddr
, frag_len
);
390 bufaddr
= txq
->tx_bounce
[index
];
392 if (fep
->quirks
& FEC_QUIRK_SWAP_FRAME
)
393 swap_buffer(bufaddr
, frag_len
);
396 addr
= dma_map_single(&fep
->pdev
->dev
, bufaddr
, frag_len
,
398 if (dma_mapping_error(&fep
->pdev
->dev
, addr
)) {
400 netdev_err(ndev
, "Tx DMA memory map failed\n");
401 goto dma_mapping_error
;
404 bdp
->cbd_bufaddr
= cpu_to_fec32(addr
);
405 bdp
->cbd_datlen
= cpu_to_fec16(frag_len
);
406 /* Make sure the updates to rest of the descriptor are
407 * performed before transferring ownership.
410 bdp
->cbd_sc
= cpu_to_fec16(status
);
416 for (i
= 0; i
< frag
; i
++) {
417 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
418 dma_unmap_single(&fep
->pdev
->dev
, fec32_to_cpu(bdp
->cbd_bufaddr
),
419 fec16_to_cpu(bdp
->cbd_datlen
), DMA_TO_DEVICE
);
421 return ERR_PTR(-ENOMEM
);
424 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q
*txq
,
425 struct sk_buff
*skb
, struct net_device
*ndev
)
427 struct fec_enet_private
*fep
= netdev_priv(ndev
);
428 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
429 struct bufdesc
*bdp
, *last_bdp
;
432 unsigned short status
;
433 unsigned short buflen
;
434 unsigned int estatus
= 0;
438 entries_free
= fec_enet_get_free_txdesc_num(txq
);
439 if (entries_free
< MAX_SKB_FRAGS
+ 1) {
440 dev_kfree_skb_any(skb
);
442 netdev_err(ndev
, "NOT enough BD for SG!\n");
446 /* Protocol checksum off-load for TCP and UDP. */
447 if (fec_enet_clear_csum(skb
, ndev
)) {
448 dev_kfree_skb_any(skb
);
452 /* Fill in a Tx ring entry */
455 status
= fec16_to_cpu(bdp
->cbd_sc
);
456 status
&= ~BD_ENET_TX_STATS
;
458 /* Set buffer length and buffer pointer */
460 buflen
= skb_headlen(skb
);
462 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
463 if (((unsigned long) bufaddr
) & fep
->tx_align
||
464 fep
->quirks
& FEC_QUIRK_SWAP_FRAME
) {
465 memcpy(txq
->tx_bounce
[index
], skb
->data
, buflen
);
466 bufaddr
= txq
->tx_bounce
[index
];
468 if (fep
->quirks
& FEC_QUIRK_SWAP_FRAME
)
469 swap_buffer(bufaddr
, buflen
);
472 /* Push the data cache so the CPM does not get stale memory data. */
473 addr
= dma_map_single(&fep
->pdev
->dev
, bufaddr
, buflen
, DMA_TO_DEVICE
);
474 if (dma_mapping_error(&fep
->pdev
->dev
, addr
)) {
475 dev_kfree_skb_any(skb
);
477 netdev_err(ndev
, "Tx DMA memory map failed\n");
482 last_bdp
= fec_enet_txq_submit_frag_skb(txq
, skb
, ndev
);
483 if (IS_ERR(last_bdp
)) {
484 dma_unmap_single(&fep
->pdev
->dev
, addr
,
485 buflen
, DMA_TO_DEVICE
);
486 dev_kfree_skb_any(skb
);
490 status
|= (BD_ENET_TX_INTR
| BD_ENET_TX_LAST
);
491 if (fep
->bufdesc_ex
) {
492 estatus
= BD_ENET_TX_INT
;
493 if (unlikely(skb_shinfo(skb
)->tx_flags
&
494 SKBTX_HW_TSTAMP
&& fep
->hwts_tx_en
))
495 estatus
|= BD_ENET_TX_TS
;
498 bdp
->cbd_bufaddr
= cpu_to_fec32(addr
);
499 bdp
->cbd_datlen
= cpu_to_fec16(buflen
);
501 if (fep
->bufdesc_ex
) {
503 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
505 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
&&
507 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
509 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
)
510 estatus
|= FEC_TX_BD_FTYPE(txq
->bd
.qid
);
512 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
513 estatus
|= BD_ENET_TX_PINS
| BD_ENET_TX_IINS
;
516 ebdp
->cbd_esc
= cpu_to_fec32(estatus
);
519 index
= fec_enet_get_bd_index(last_bdp
, &txq
->bd
);
520 /* Save skb pointer */
521 txq
->tx_skbuff
[index
] = skb
;
523 /* Make sure the updates to rest of the descriptor are performed before
524 * transferring ownership.
528 /* Send it on its way. Tell FEC it's ready, interrupt when done,
529 * it's the last BD of the frame, and to put the CRC on the end.
531 status
|= (BD_ENET_TX_READY
| BD_ENET_TX_TC
);
532 bdp
->cbd_sc
= cpu_to_fec16(status
);
534 /* If this was the last BD in the ring, start at the beginning again. */
535 bdp
= fec_enet_get_nextdesc(last_bdp
, &txq
->bd
);
537 skb_tx_timestamp(skb
);
539 /* Make sure the update to bdp and tx_skbuff are performed before
545 /* Trigger transmission start */
546 writel(0, txq
->bd
.reg_desc_active
);
552 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q
*txq
, struct sk_buff
*skb
,
553 struct net_device
*ndev
,
554 struct bufdesc
*bdp
, int index
, char *data
,
555 int size
, bool last_tcp
, bool is_last
)
557 struct fec_enet_private
*fep
= netdev_priv(ndev
);
558 struct bufdesc_ex
*ebdp
= container_of(bdp
, struct bufdesc_ex
, desc
);
559 unsigned short status
;
560 unsigned int estatus
= 0;
563 status
= fec16_to_cpu(bdp
->cbd_sc
);
564 status
&= ~BD_ENET_TX_STATS
;
566 status
|= (BD_ENET_TX_TC
| BD_ENET_TX_READY
);
568 if (((unsigned long) data
) & fep
->tx_align
||
569 fep
->quirks
& FEC_QUIRK_SWAP_FRAME
) {
570 memcpy(txq
->tx_bounce
[index
], data
, size
);
571 data
= txq
->tx_bounce
[index
];
573 if (fep
->quirks
& FEC_QUIRK_SWAP_FRAME
)
574 swap_buffer(data
, size
);
577 addr
= dma_map_single(&fep
->pdev
->dev
, data
, size
, DMA_TO_DEVICE
);
578 if (dma_mapping_error(&fep
->pdev
->dev
, addr
)) {
579 dev_kfree_skb_any(skb
);
581 netdev_err(ndev
, "Tx DMA memory map failed\n");
582 return NETDEV_TX_BUSY
;
585 bdp
->cbd_datlen
= cpu_to_fec16(size
);
586 bdp
->cbd_bufaddr
= cpu_to_fec32(addr
);
588 if (fep
->bufdesc_ex
) {
589 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
)
590 estatus
|= FEC_TX_BD_FTYPE(txq
->bd
.qid
);
591 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
592 estatus
|= BD_ENET_TX_PINS
| BD_ENET_TX_IINS
;
594 ebdp
->cbd_esc
= cpu_to_fec32(estatus
);
597 /* Handle the last BD specially */
599 status
|= (BD_ENET_TX_LAST
| BD_ENET_TX_TC
);
601 status
|= BD_ENET_TX_INTR
;
603 ebdp
->cbd_esc
|= cpu_to_fec32(BD_ENET_TX_INT
);
606 bdp
->cbd_sc
= cpu_to_fec16(status
);
612 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q
*txq
,
613 struct sk_buff
*skb
, struct net_device
*ndev
,
614 struct bufdesc
*bdp
, int index
)
616 struct fec_enet_private
*fep
= netdev_priv(ndev
);
617 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
618 struct bufdesc_ex
*ebdp
= container_of(bdp
, struct bufdesc_ex
, desc
);
620 unsigned long dmabuf
;
621 unsigned short status
;
622 unsigned int estatus
= 0;
624 status
= fec16_to_cpu(bdp
->cbd_sc
);
625 status
&= ~BD_ENET_TX_STATS
;
626 status
|= (BD_ENET_TX_TC
| BD_ENET_TX_READY
);
628 bufaddr
= txq
->tso_hdrs
+ index
* TSO_HEADER_SIZE
;
629 dmabuf
= txq
->tso_hdrs_dma
+ index
* TSO_HEADER_SIZE
;
630 if (((unsigned long)bufaddr
) & fep
->tx_align
||
631 fep
->quirks
& FEC_QUIRK_SWAP_FRAME
) {
632 memcpy(txq
->tx_bounce
[index
], skb
->data
, hdr_len
);
633 bufaddr
= txq
->tx_bounce
[index
];
635 if (fep
->quirks
& FEC_QUIRK_SWAP_FRAME
)
636 swap_buffer(bufaddr
, hdr_len
);
638 dmabuf
= dma_map_single(&fep
->pdev
->dev
, bufaddr
,
639 hdr_len
, DMA_TO_DEVICE
);
640 if (dma_mapping_error(&fep
->pdev
->dev
, dmabuf
)) {
641 dev_kfree_skb_any(skb
);
643 netdev_err(ndev
, "Tx DMA memory map failed\n");
644 return NETDEV_TX_BUSY
;
648 bdp
->cbd_bufaddr
= cpu_to_fec32(dmabuf
);
649 bdp
->cbd_datlen
= cpu_to_fec16(hdr_len
);
651 if (fep
->bufdesc_ex
) {
652 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
)
653 estatus
|= FEC_TX_BD_FTYPE(txq
->bd
.qid
);
654 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
655 estatus
|= BD_ENET_TX_PINS
| BD_ENET_TX_IINS
;
657 ebdp
->cbd_esc
= cpu_to_fec32(estatus
);
660 bdp
->cbd_sc
= cpu_to_fec16(status
);
665 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q
*txq
,
667 struct net_device
*ndev
)
669 struct fec_enet_private
*fep
= netdev_priv(ndev
);
670 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
671 int total_len
, data_left
;
672 struct bufdesc
*bdp
= txq
->bd
.cur
;
674 unsigned int index
= 0;
677 if (tso_count_descs(skb
) >= fec_enet_get_free_txdesc_num(txq
)) {
678 dev_kfree_skb_any(skb
);
680 netdev_err(ndev
, "NOT enough BD for TSO!\n");
684 /* Protocol checksum off-load for TCP and UDP. */
685 if (fec_enet_clear_csum(skb
, ndev
)) {
686 dev_kfree_skb_any(skb
);
690 /* Initialize the TSO handler, and prepare the first payload */
691 tso_start(skb
, &tso
);
693 total_len
= skb
->len
- hdr_len
;
694 while (total_len
> 0) {
697 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
698 data_left
= min_t(int, skb_shinfo(skb
)->gso_size
, total_len
);
699 total_len
-= data_left
;
701 /* prepare packet headers: MAC + IP + TCP */
702 hdr
= txq
->tso_hdrs
+ index
* TSO_HEADER_SIZE
;
703 tso_build_hdr(skb
, hdr
, &tso
, data_left
, total_len
== 0);
704 ret
= fec_enet_txq_put_hdr_tso(txq
, skb
, ndev
, bdp
, index
);
708 while (data_left
> 0) {
711 size
= min_t(int, tso
.size
, data_left
);
712 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
713 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
714 ret
= fec_enet_txq_put_data_tso(txq
, skb
, ndev
,
723 tso_build_data(skb
, &tso
, size
);
726 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
729 /* Save skb pointer */
730 txq
->tx_skbuff
[index
] = skb
;
732 skb_tx_timestamp(skb
);
735 /* Trigger transmission start */
736 if (!(fep
->quirks
& FEC_QUIRK_ERR007885
) ||
737 !readl(txq
->bd
.reg_desc_active
) ||
738 !readl(txq
->bd
.reg_desc_active
) ||
739 !readl(txq
->bd
.reg_desc_active
) ||
740 !readl(txq
->bd
.reg_desc_active
))
741 writel(0, txq
->bd
.reg_desc_active
);
746 /* TODO: Release all used data descriptors for TSO */
751 fec_enet_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
753 struct fec_enet_private
*fep
= netdev_priv(ndev
);
755 unsigned short queue
;
756 struct fec_enet_priv_tx_q
*txq
;
757 struct netdev_queue
*nq
;
760 queue
= skb_get_queue_mapping(skb
);
761 txq
= fep
->tx_queue
[queue
];
762 nq
= netdev_get_tx_queue(ndev
, queue
);
765 ret
= fec_enet_txq_submit_tso(txq
, skb
, ndev
);
767 ret
= fec_enet_txq_submit_skb(txq
, skb
, ndev
);
771 entries_free
= fec_enet_get_free_txdesc_num(txq
);
772 if (entries_free
<= txq
->tx_stop_threshold
)
773 netif_tx_stop_queue(nq
);
778 /* Init RX & TX buffer descriptors
780 static void fec_enet_bd_init(struct net_device
*dev
)
782 struct fec_enet_private
*fep
= netdev_priv(dev
);
783 struct fec_enet_priv_tx_q
*txq
;
784 struct fec_enet_priv_rx_q
*rxq
;
789 for (q
= 0; q
< fep
->num_rx_queues
; q
++) {
790 /* Initialize the receive buffer descriptors. */
791 rxq
= fep
->rx_queue
[q
];
794 for (i
= 0; i
< rxq
->bd
.ring_size
; i
++) {
796 /* Initialize the BD for every fragment in the page. */
797 if (bdp
->cbd_bufaddr
)
798 bdp
->cbd_sc
= cpu_to_fec16(BD_ENET_RX_EMPTY
);
800 bdp
->cbd_sc
= cpu_to_fec16(0);
801 bdp
= fec_enet_get_nextdesc(bdp
, &rxq
->bd
);
804 /* Set the last buffer to wrap */
805 bdp
= fec_enet_get_prevdesc(bdp
, &rxq
->bd
);
806 bdp
->cbd_sc
|= cpu_to_fec16(BD_SC_WRAP
);
808 rxq
->bd
.cur
= rxq
->bd
.base
;
811 for (q
= 0; q
< fep
->num_tx_queues
; q
++) {
812 /* ...and the same for transmit */
813 txq
= fep
->tx_queue
[q
];
817 for (i
= 0; i
< txq
->bd
.ring_size
; i
++) {
818 /* Initialize the BD for every fragment in the page. */
819 bdp
->cbd_sc
= cpu_to_fec16(0);
820 if (txq
->tx_skbuff
[i
]) {
821 dev_kfree_skb_any(txq
->tx_skbuff
[i
]);
822 txq
->tx_skbuff
[i
] = NULL
;
824 bdp
->cbd_bufaddr
= cpu_to_fec32(0);
825 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
828 /* Set the last buffer to wrap */
829 bdp
= fec_enet_get_prevdesc(bdp
, &txq
->bd
);
830 bdp
->cbd_sc
|= cpu_to_fec16(BD_SC_WRAP
);
835 static void fec_enet_active_rxring(struct net_device
*ndev
)
837 struct fec_enet_private
*fep
= netdev_priv(ndev
);
840 for (i
= 0; i
< fep
->num_rx_queues
; i
++)
841 writel(0, fep
->rx_queue
[i
]->bd
.reg_desc_active
);
844 static void fec_enet_enable_ring(struct net_device
*ndev
)
846 struct fec_enet_private
*fep
= netdev_priv(ndev
);
847 struct fec_enet_priv_tx_q
*txq
;
848 struct fec_enet_priv_rx_q
*rxq
;
851 for (i
= 0; i
< fep
->num_rx_queues
; i
++) {
852 rxq
= fep
->rx_queue
[i
];
853 writel(rxq
->bd
.dma
, fep
->hwp
+ FEC_R_DES_START(i
));
854 writel(PKT_MAXBLR_SIZE
, fep
->hwp
+ FEC_R_BUFF_SIZE(i
));
858 writel(RCMR_MATCHEN
| RCMR_CMP(i
),
859 fep
->hwp
+ FEC_RCMR(i
));
862 for (i
= 0; i
< fep
->num_tx_queues
; i
++) {
863 txq
= fep
->tx_queue
[i
];
864 writel(txq
->bd
.dma
, fep
->hwp
+ FEC_X_DES_START(i
));
868 writel(DMA_CLASS_EN
| IDLE_SLOPE(i
),
869 fep
->hwp
+ FEC_DMA_CFG(i
));
873 static void fec_enet_reset_skb(struct net_device
*ndev
)
875 struct fec_enet_private
*fep
= netdev_priv(ndev
);
876 struct fec_enet_priv_tx_q
*txq
;
879 for (i
= 0; i
< fep
->num_tx_queues
; i
++) {
880 txq
= fep
->tx_queue
[i
];
882 for (j
= 0; j
< txq
->bd
.ring_size
; j
++) {
883 if (txq
->tx_skbuff
[j
]) {
884 dev_kfree_skb_any(txq
->tx_skbuff
[j
]);
885 txq
->tx_skbuff
[j
] = NULL
;
892 * This function is called to start or restart the FEC during a link
893 * change, transmit timeout, or to reconfigure the FEC. The network
894 * packet processing for this device must be stopped before this call.
897 fec_restart(struct net_device
*ndev
)
899 struct fec_enet_private
*fep
= netdev_priv(ndev
);
902 u32 rcntl
= OPT_FRAME_SIZE
| 0x04;
903 u32 ecntl
= 0x2; /* ETHEREN */
905 /* Whack a reset. We should wait for this.
906 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
907 * instead of reset MAC itself.
909 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
) {
910 writel(0, fep
->hwp
+ FEC_ECNTRL
);
912 writel(1, fep
->hwp
+ FEC_ECNTRL
);
917 * enet-mac reset will reset mac address registers too,
918 * so need to reconfigure it.
920 memcpy(&temp_mac
, ndev
->dev_addr
, ETH_ALEN
);
921 writel((__force u32
)cpu_to_be32(temp_mac
[0]),
922 fep
->hwp
+ FEC_ADDR_LOW
);
923 writel((__force u32
)cpu_to_be32(temp_mac
[1]),
924 fep
->hwp
+ FEC_ADDR_HIGH
);
926 /* Clear any outstanding interrupt. */
927 writel(0xffffffff, fep
->hwp
+ FEC_IEVENT
);
929 fec_enet_bd_init(ndev
);
931 fec_enet_enable_ring(ndev
);
933 /* Reset tx SKB buffers. */
934 fec_enet_reset_skb(ndev
);
936 /* Enable MII mode */
937 if (fep
->full_duplex
== DUPLEX_FULL
) {
939 writel(0x04, fep
->hwp
+ FEC_X_CNTRL
);
943 writel(0x0, fep
->hwp
+ FEC_X_CNTRL
);
947 writel(fep
->phy_speed
, fep
->hwp
+ FEC_MII_SPEED
);
949 #if !defined(CONFIG_M5272)
950 if (fep
->quirks
& FEC_QUIRK_HAS_RACC
) {
951 val
= readl(fep
->hwp
+ FEC_RACC
);
952 /* align IP header */
953 val
|= FEC_RACC_SHIFT16
;
954 if (fep
->csum_flags
& FLAG_RX_CSUM_ENABLED
)
955 /* set RX checksum */
956 val
|= FEC_RACC_OPTIONS
;
958 val
&= ~FEC_RACC_OPTIONS
;
959 writel(val
, fep
->hwp
+ FEC_RACC
);
960 writel(PKT_MAXBUF_SIZE
, fep
->hwp
+ FEC_FTRL
);
965 * The phy interface and speed need to get configured
966 * differently on enet-mac.
968 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
) {
969 /* Enable flow control and length check */
970 rcntl
|= 0x40000000 | 0x00000020;
972 /* RGMII, RMII or MII */
973 if (fep
->phy_interface
== PHY_INTERFACE_MODE_RGMII
||
974 fep
->phy_interface
== PHY_INTERFACE_MODE_RGMII_ID
||
975 fep
->phy_interface
== PHY_INTERFACE_MODE_RGMII_RXID
||
976 fep
->phy_interface
== PHY_INTERFACE_MODE_RGMII_TXID
)
978 else if (fep
->phy_interface
== PHY_INTERFACE_MODE_RMII
)
983 /* 1G, 100M or 10M */
985 if (ndev
->phydev
->speed
== SPEED_1000
)
987 else if (ndev
->phydev
->speed
== SPEED_100
)
993 #ifdef FEC_MIIGSK_ENR
994 if (fep
->quirks
& FEC_QUIRK_USE_GASKET
) {
996 /* disable the gasket and wait */
997 writel(0, fep
->hwp
+ FEC_MIIGSK_ENR
);
998 while (readl(fep
->hwp
+ FEC_MIIGSK_ENR
) & 4)
1002 * configure the gasket:
1003 * RMII, 50 MHz, no loopback, no echo
1004 * MII, 25 MHz, no loopback, no echo
1006 cfgr
= (fep
->phy_interface
== PHY_INTERFACE_MODE_RMII
)
1007 ? BM_MIIGSK_CFGR_RMII
: BM_MIIGSK_CFGR_MII
;
1008 if (ndev
->phydev
&& ndev
->phydev
->speed
== SPEED_10
)
1009 cfgr
|= BM_MIIGSK_CFGR_FRCONT_10M
;
1010 writel(cfgr
, fep
->hwp
+ FEC_MIIGSK_CFGR
);
1012 /* re-enable the gasket */
1013 writel(2, fep
->hwp
+ FEC_MIIGSK_ENR
);
1018 #if !defined(CONFIG_M5272)
1019 /* enable pause frame*/
1020 if ((fep
->pause_flag
& FEC_PAUSE_FLAG_ENABLE
) ||
1021 ((fep
->pause_flag
& FEC_PAUSE_FLAG_AUTONEG
) &&
1022 ndev
->phydev
&& ndev
->phydev
->pause
)) {
1023 rcntl
|= FEC_ENET_FCE
;
1025 /* set FIFO threshold parameter to reduce overrun */
1026 writel(FEC_ENET_RSEM_V
, fep
->hwp
+ FEC_R_FIFO_RSEM
);
1027 writel(FEC_ENET_RSFL_V
, fep
->hwp
+ FEC_R_FIFO_RSFL
);
1028 writel(FEC_ENET_RAEM_V
, fep
->hwp
+ FEC_R_FIFO_RAEM
);
1029 writel(FEC_ENET_RAFL_V
, fep
->hwp
+ FEC_R_FIFO_RAFL
);
1032 writel(FEC_ENET_OPD_V
, fep
->hwp
+ FEC_OPD
);
1034 rcntl
&= ~FEC_ENET_FCE
;
1036 #endif /* !defined(CONFIG_M5272) */
1038 writel(rcntl
, fep
->hwp
+ FEC_R_CNTRL
);
1040 /* Setup multicast filter. */
1041 set_multicast_list(ndev
);
1042 #ifndef CONFIG_M5272
1043 writel(0, fep
->hwp
+ FEC_HASH_TABLE_HIGH
);
1044 writel(0, fep
->hwp
+ FEC_HASH_TABLE_LOW
);
1047 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
) {
1048 /* enable ENET endian swap */
1050 /* enable ENET store and forward mode */
1051 writel(1 << 8, fep
->hwp
+ FEC_X_WMRK
);
1054 if (fep
->bufdesc_ex
)
1057 #ifndef CONFIG_M5272
1058 /* Enable the MIB statistic event counters */
1059 writel(0 << 31, fep
->hwp
+ FEC_MIB_CTRLSTAT
);
1062 /* And last, enable the transmit and receive processing */
1063 writel(ecntl
, fep
->hwp
+ FEC_ECNTRL
);
1064 fec_enet_active_rxring(ndev
);
1066 if (fep
->bufdesc_ex
)
1067 fec_ptp_start_cyclecounter(ndev
);
1069 /* Enable interrupts we wish to service */
1071 writel(FEC_DEFAULT_IMASK
, fep
->hwp
+ FEC_IMASK
);
1073 writel(FEC_ENET_MII
, fep
->hwp
+ FEC_IMASK
);
1075 /* Init the interrupt coalescing */
1076 fec_enet_itr_coal_init(ndev
);
1081 fec_stop(struct net_device
*ndev
)
1083 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1084 struct fec_platform_data
*pdata
= fep
->pdev
->dev
.platform_data
;
1085 u32 rmii_mode
= readl(fep
->hwp
+ FEC_R_CNTRL
) & (1 << 8);
1088 /* We cannot expect a graceful transmit stop without link !!! */
1090 writel(1, fep
->hwp
+ FEC_X_CNTRL
); /* Graceful transmit stop */
1092 if (!(readl(fep
->hwp
+ FEC_IEVENT
) & FEC_ENET_GRA
))
1093 netdev_err(ndev
, "Graceful transmit stop did not complete!\n");
1096 /* Whack a reset. We should wait for this.
1097 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1098 * instead of reset MAC itself.
1100 if (!(fep
->wol_flag
& FEC_WOL_FLAG_SLEEP_ON
)) {
1101 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
) {
1102 writel(0, fep
->hwp
+ FEC_ECNTRL
);
1104 writel(1, fep
->hwp
+ FEC_ECNTRL
);
1107 writel(FEC_DEFAULT_IMASK
, fep
->hwp
+ FEC_IMASK
);
1109 writel(FEC_DEFAULT_IMASK
| FEC_ENET_WAKEUP
, fep
->hwp
+ FEC_IMASK
);
1110 val
= readl(fep
->hwp
+ FEC_ECNTRL
);
1111 val
|= (FEC_ECR_MAGICEN
| FEC_ECR_SLEEP
);
1112 writel(val
, fep
->hwp
+ FEC_ECNTRL
);
1114 if (pdata
&& pdata
->sleep_mode_enable
)
1115 pdata
->sleep_mode_enable(true);
1117 writel(fep
->phy_speed
, fep
->hwp
+ FEC_MII_SPEED
);
1119 /* We have to keep ENET enabled to have MII interrupt stay working */
1120 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
&&
1121 !(fep
->wol_flag
& FEC_WOL_FLAG_SLEEP_ON
)) {
1122 writel(2, fep
->hwp
+ FEC_ECNTRL
);
1123 writel(rmii_mode
, fep
->hwp
+ FEC_R_CNTRL
);
1129 fec_timeout(struct net_device
*ndev
)
1131 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1135 ndev
->stats
.tx_errors
++;
1137 schedule_work(&fep
->tx_timeout_work
);
1140 static void fec_enet_timeout_work(struct work_struct
*work
)
1142 struct fec_enet_private
*fep
=
1143 container_of(work
, struct fec_enet_private
, tx_timeout_work
);
1144 struct net_device
*ndev
= fep
->netdev
;
1147 if (netif_device_present(ndev
) || netif_running(ndev
)) {
1148 napi_disable(&fep
->napi
);
1149 netif_tx_lock_bh(ndev
);
1151 netif_wake_queue(ndev
);
1152 netif_tx_unlock_bh(ndev
);
1153 napi_enable(&fep
->napi
);
1159 fec_enet_hwtstamp(struct fec_enet_private
*fep
, unsigned ts
,
1160 struct skb_shared_hwtstamps
*hwtstamps
)
1162 unsigned long flags
;
1165 spin_lock_irqsave(&fep
->tmreg_lock
, flags
);
1166 ns
= timecounter_cyc2time(&fep
->tc
, ts
);
1167 spin_unlock_irqrestore(&fep
->tmreg_lock
, flags
);
1169 memset(hwtstamps
, 0, sizeof(*hwtstamps
));
1170 hwtstamps
->hwtstamp
= ns_to_ktime(ns
);
1174 fec_enet_tx_queue(struct net_device
*ndev
, u16 queue_id
)
1176 struct fec_enet_private
*fep
;
1177 struct bufdesc
*bdp
;
1178 unsigned short status
;
1179 struct sk_buff
*skb
;
1180 struct fec_enet_priv_tx_q
*txq
;
1181 struct netdev_queue
*nq
;
1185 fep
= netdev_priv(ndev
);
1187 queue_id
= FEC_ENET_GET_QUQUE(queue_id
);
1189 txq
= fep
->tx_queue
[queue_id
];
1190 /* get next bdp of dirty_tx */
1191 nq
= netdev_get_tx_queue(ndev
, queue_id
);
1192 bdp
= txq
->dirty_tx
;
1194 /* get next bdp of dirty_tx */
1195 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
1197 while (bdp
!= READ_ONCE(txq
->bd
.cur
)) {
1198 /* Order the load of bd.cur and cbd_sc */
1200 status
= fec16_to_cpu(READ_ONCE(bdp
->cbd_sc
));
1201 if (status
& BD_ENET_TX_READY
)
1204 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
1206 skb
= txq
->tx_skbuff
[index
];
1207 txq
->tx_skbuff
[index
] = NULL
;
1208 if (!IS_TSO_HEADER(txq
, fec32_to_cpu(bdp
->cbd_bufaddr
)))
1209 dma_unmap_single(&fep
->pdev
->dev
,
1210 fec32_to_cpu(bdp
->cbd_bufaddr
),
1211 fec16_to_cpu(bdp
->cbd_datlen
),
1213 bdp
->cbd_bufaddr
= cpu_to_fec32(0);
1217 /* Check for errors. */
1218 if (status
& (BD_ENET_TX_HB
| BD_ENET_TX_LC
|
1219 BD_ENET_TX_RL
| BD_ENET_TX_UN
|
1221 ndev
->stats
.tx_errors
++;
1222 if (status
& BD_ENET_TX_HB
) /* No heartbeat */
1223 ndev
->stats
.tx_heartbeat_errors
++;
1224 if (status
& BD_ENET_TX_LC
) /* Late collision */
1225 ndev
->stats
.tx_window_errors
++;
1226 if (status
& BD_ENET_TX_RL
) /* Retrans limit */
1227 ndev
->stats
.tx_aborted_errors
++;
1228 if (status
& BD_ENET_TX_UN
) /* Underrun */
1229 ndev
->stats
.tx_fifo_errors
++;
1230 if (status
& BD_ENET_TX_CSL
) /* Carrier lost */
1231 ndev
->stats
.tx_carrier_errors
++;
1233 ndev
->stats
.tx_packets
++;
1234 ndev
->stats
.tx_bytes
+= skb
->len
;
1237 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
) &&
1239 struct skb_shared_hwtstamps shhwtstamps
;
1240 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
1242 fec_enet_hwtstamp(fep
, fec32_to_cpu(ebdp
->ts
), &shhwtstamps
);
1243 skb_tstamp_tx(skb
, &shhwtstamps
);
1246 /* Deferred means some collisions occurred during transmit,
1247 * but we eventually sent the packet OK.
1249 if (status
& BD_ENET_TX_DEF
)
1250 ndev
->stats
.collisions
++;
1252 /* Free the sk buffer associated with this last transmit */
1253 dev_kfree_skb_any(skb
);
1255 /* Make sure the update to bdp and tx_skbuff are performed
1259 txq
->dirty_tx
= bdp
;
1261 /* Update pointer to next buffer descriptor to be transmitted */
1262 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
1264 /* Since we have freed up a buffer, the ring is no longer full
1266 if (netif_queue_stopped(ndev
)) {
1267 entries_free
= fec_enet_get_free_txdesc_num(txq
);
1268 if (entries_free
>= txq
->tx_wake_threshold
)
1269 netif_tx_wake_queue(nq
);
1273 /* ERR006358: Keep the transmitter going */
1274 if (bdp
!= txq
->bd
.cur
&&
1275 readl(txq
->bd
.reg_desc_active
) == 0)
1276 writel(0, txq
->bd
.reg_desc_active
);
1280 fec_enet_tx(struct net_device
*ndev
)
1282 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1284 /* First process class A queue, then Class B and Best Effort queue */
1285 for_each_set_bit(queue_id
, &fep
->work_tx
, FEC_ENET_MAX_TX_QS
) {
1286 clear_bit(queue_id
, &fep
->work_tx
);
1287 fec_enet_tx_queue(ndev
, queue_id
);
1293 fec_enet_new_rxbdp(struct net_device
*ndev
, struct bufdesc
*bdp
, struct sk_buff
*skb
)
1295 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1298 off
= ((unsigned long)skb
->data
) & fep
->rx_align
;
1300 skb_reserve(skb
, fep
->rx_align
+ 1 - off
);
1302 bdp
->cbd_bufaddr
= cpu_to_fec32(dma_map_single(&fep
->pdev
->dev
, skb
->data
, FEC_ENET_RX_FRSIZE
- fep
->rx_align
, DMA_FROM_DEVICE
));
1303 if (dma_mapping_error(&fep
->pdev
->dev
, fec32_to_cpu(bdp
->cbd_bufaddr
))) {
1304 if (net_ratelimit())
1305 netdev_err(ndev
, "Rx DMA memory map failed\n");
1312 static bool fec_enet_copybreak(struct net_device
*ndev
, struct sk_buff
**skb
,
1313 struct bufdesc
*bdp
, u32 length
, bool swap
)
1315 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1316 struct sk_buff
*new_skb
;
1318 if (length
> fep
->rx_copybreak
)
1321 new_skb
= netdev_alloc_skb(ndev
, length
);
1325 dma_sync_single_for_cpu(&fep
->pdev
->dev
,
1326 fec32_to_cpu(bdp
->cbd_bufaddr
),
1327 FEC_ENET_RX_FRSIZE
- fep
->rx_align
,
1330 memcpy(new_skb
->data
, (*skb
)->data
, length
);
1332 swap_buffer2(new_skb
->data
, (*skb
)->data
, length
);
1338 /* During a receive, the bd_rx.cur points to the current incoming buffer.
1339 * When we update through the ring, if the next incoming buffer has
1340 * not been given to the system, we just set the empty indicator,
1341 * effectively tossing the packet.
1344 fec_enet_rx_queue(struct net_device
*ndev
, int budget
, u16 queue_id
)
1346 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1347 struct fec_enet_priv_rx_q
*rxq
;
1348 struct bufdesc
*bdp
;
1349 unsigned short status
;
1350 struct sk_buff
*skb_new
= NULL
;
1351 struct sk_buff
*skb
;
1354 int pkt_received
= 0;
1355 struct bufdesc_ex
*ebdp
= NULL
;
1356 bool vlan_packet_rcvd
= false;
1360 bool need_swap
= fep
->quirks
& FEC_QUIRK_SWAP_FRAME
;
1365 queue_id
= FEC_ENET_GET_QUQUE(queue_id
);
1366 rxq
= fep
->rx_queue
[queue_id
];
1368 /* First, grab all of the stats for the incoming packet.
1369 * These get messed up if we get called due to a busy condition.
1373 while (!((status
= fec16_to_cpu(bdp
->cbd_sc
)) & BD_ENET_RX_EMPTY
)) {
1375 if (pkt_received
>= budget
)
1379 writel(FEC_ENET_RXF
, fep
->hwp
+ FEC_IEVENT
);
1381 /* Check for errors. */
1382 status
^= BD_ENET_RX_LAST
;
1383 if (status
& (BD_ENET_RX_LG
| BD_ENET_RX_SH
| BD_ENET_RX_NO
|
1384 BD_ENET_RX_CR
| BD_ENET_RX_OV
| BD_ENET_RX_LAST
|
1386 ndev
->stats
.rx_errors
++;
1387 if (status
& BD_ENET_RX_OV
) {
1389 ndev
->stats
.rx_fifo_errors
++;
1390 goto rx_processing_done
;
1392 if (status
& (BD_ENET_RX_LG
| BD_ENET_RX_SH
1393 | BD_ENET_RX_LAST
)) {
1394 /* Frame too long or too short. */
1395 ndev
->stats
.rx_length_errors
++;
1396 if (status
& BD_ENET_RX_LAST
)
1397 netdev_err(ndev
, "rcv is not +last\n");
1399 if (status
& BD_ENET_RX_CR
) /* CRC Error */
1400 ndev
->stats
.rx_crc_errors
++;
1401 /* Report late collisions as a frame error. */
1402 if (status
& (BD_ENET_RX_NO
| BD_ENET_RX_CL
))
1403 ndev
->stats
.rx_frame_errors
++;
1404 goto rx_processing_done
;
1407 /* Process the incoming frame. */
1408 ndev
->stats
.rx_packets
++;
1409 pkt_len
= fec16_to_cpu(bdp
->cbd_datlen
);
1410 ndev
->stats
.rx_bytes
+= pkt_len
;
1412 index
= fec_enet_get_bd_index(bdp
, &rxq
->bd
);
1413 skb
= rxq
->rx_skbuff
[index
];
1415 /* The packet length includes FCS, but we don't want to
1416 * include that when passing upstream as it messes up
1417 * bridging applications.
1419 is_copybreak
= fec_enet_copybreak(ndev
, &skb
, bdp
, pkt_len
- 4,
1421 if (!is_copybreak
) {
1422 skb_new
= netdev_alloc_skb(ndev
, FEC_ENET_RX_FRSIZE
);
1423 if (unlikely(!skb_new
)) {
1424 ndev
->stats
.rx_dropped
++;
1425 goto rx_processing_done
;
1427 dma_unmap_single(&fep
->pdev
->dev
,
1428 fec32_to_cpu(bdp
->cbd_bufaddr
),
1429 FEC_ENET_RX_FRSIZE
- fep
->rx_align
,
1433 prefetch(skb
->data
- NET_IP_ALIGN
);
1434 skb_put(skb
, pkt_len
- 4);
1437 if (!is_copybreak
&& need_swap
)
1438 swap_buffer(data
, pkt_len
);
1440 #if !defined(CONFIG_M5272)
1441 if (fep
->quirks
& FEC_QUIRK_HAS_RACC
)
1442 data
= skb_pull_inline(skb
, 2);
1445 /* Extract the enhanced buffer descriptor */
1447 if (fep
->bufdesc_ex
)
1448 ebdp
= (struct bufdesc_ex
*)bdp
;
1450 /* If this is a VLAN packet remove the VLAN Tag */
1451 vlan_packet_rcvd
= false;
1452 if ((ndev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
1454 (ebdp
->cbd_esc
& cpu_to_fec32(BD_ENET_RX_VLAN
))) {
1455 /* Push and remove the vlan tag */
1456 struct vlan_hdr
*vlan_header
=
1457 (struct vlan_hdr
*) (data
+ ETH_HLEN
);
1458 vlan_tag
= ntohs(vlan_header
->h_vlan_TCI
);
1460 vlan_packet_rcvd
= true;
1462 memmove(skb
->data
+ VLAN_HLEN
, data
, ETH_ALEN
* 2);
1463 skb_pull(skb
, VLAN_HLEN
);
1466 skb
->protocol
= eth_type_trans(skb
, ndev
);
1468 /* Get receive timestamp from the skb */
1469 if (fep
->hwts_rx_en
&& fep
->bufdesc_ex
)
1470 fec_enet_hwtstamp(fep
, fec32_to_cpu(ebdp
->ts
),
1471 skb_hwtstamps(skb
));
1473 if (fep
->bufdesc_ex
&&
1474 (fep
->csum_flags
& FLAG_RX_CSUM_ENABLED
)) {
1475 if (!(ebdp
->cbd_esc
& cpu_to_fec32(FLAG_RX_CSUM_ERROR
))) {
1476 /* don't check it */
1477 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1479 skb_checksum_none_assert(skb
);
1483 /* Handle received VLAN packets */
1484 if (vlan_packet_rcvd
)
1485 __vlan_hwaccel_put_tag(skb
,
1489 napi_gro_receive(&fep
->napi
, skb
);
1492 dma_sync_single_for_device(&fep
->pdev
->dev
,
1493 fec32_to_cpu(bdp
->cbd_bufaddr
),
1494 FEC_ENET_RX_FRSIZE
- fep
->rx_align
,
1497 rxq
->rx_skbuff
[index
] = skb_new
;
1498 fec_enet_new_rxbdp(ndev
, bdp
, skb_new
);
1502 /* Clear the status flags for this buffer */
1503 status
&= ~BD_ENET_RX_STATS
;
1505 /* Mark the buffer empty */
1506 status
|= BD_ENET_RX_EMPTY
;
1508 if (fep
->bufdesc_ex
) {
1509 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
1511 ebdp
->cbd_esc
= cpu_to_fec32(BD_ENET_RX_INT
);
1515 /* Make sure the updates to rest of the descriptor are
1516 * performed before transferring ownership.
1519 bdp
->cbd_sc
= cpu_to_fec16(status
);
1521 /* Update BD pointer to next entry */
1522 bdp
= fec_enet_get_nextdesc(bdp
, &rxq
->bd
);
1524 /* Doing this here will keep the FEC running while we process
1525 * incoming frames. On a heavily loaded network, we should be
1526 * able to keep up at the expense of system resources.
1528 writel(0, rxq
->bd
.reg_desc_active
);
1531 return pkt_received
;
1535 fec_enet_rx(struct net_device
*ndev
, int budget
)
1537 int pkt_received
= 0;
1539 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1541 for_each_set_bit(queue_id
, &fep
->work_rx
, FEC_ENET_MAX_RX_QS
) {
1544 ret
= fec_enet_rx_queue(ndev
,
1545 budget
- pkt_received
, queue_id
);
1547 if (ret
< budget
- pkt_received
)
1548 clear_bit(queue_id
, &fep
->work_rx
);
1550 pkt_received
+= ret
;
1552 return pkt_received
;
1556 fec_enet_collect_events(struct fec_enet_private
*fep
, uint int_events
)
1558 if (int_events
== 0)
1561 if (int_events
& FEC_ENET_RXF
)
1562 fep
->work_rx
|= (1 << 2);
1563 if (int_events
& FEC_ENET_RXF_1
)
1564 fep
->work_rx
|= (1 << 0);
1565 if (int_events
& FEC_ENET_RXF_2
)
1566 fep
->work_rx
|= (1 << 1);
1568 if (int_events
& FEC_ENET_TXF
)
1569 fep
->work_tx
|= (1 << 2);
1570 if (int_events
& FEC_ENET_TXF_1
)
1571 fep
->work_tx
|= (1 << 0);
1572 if (int_events
& FEC_ENET_TXF_2
)
1573 fep
->work_tx
|= (1 << 1);
1579 fec_enet_interrupt(int irq
, void *dev_id
)
1581 struct net_device
*ndev
= dev_id
;
1582 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1584 irqreturn_t ret
= IRQ_NONE
;
1586 int_events
= readl(fep
->hwp
+ FEC_IEVENT
);
1587 writel(int_events
, fep
->hwp
+ FEC_IEVENT
);
1588 fec_enet_collect_events(fep
, int_events
);
1590 if ((fep
->work_tx
|| fep
->work_rx
) && fep
->link
) {
1593 if (napi_schedule_prep(&fep
->napi
)) {
1594 /* Disable the NAPI interrupts */
1595 writel(FEC_NAPI_IMASK
, fep
->hwp
+ FEC_IMASK
);
1596 __napi_schedule(&fep
->napi
);
1600 if (int_events
& FEC_ENET_MII
) {
1602 complete(&fep
->mdio_done
);
1606 fec_ptp_check_pps_event(fep
);
1611 static int fec_enet_rx_napi(struct napi_struct
*napi
, int budget
)
1613 struct net_device
*ndev
= napi
->dev
;
1614 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1617 pkts
= fec_enet_rx(ndev
, budget
);
1621 if (pkts
< budget
) {
1622 napi_complete_done(napi
, pkts
);
1623 writel(FEC_DEFAULT_IMASK
, fep
->hwp
+ FEC_IMASK
);
1628 /* ------------------------------------------------------------------------- */
1629 static void fec_get_mac(struct net_device
*ndev
)
1631 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1632 struct fec_platform_data
*pdata
= dev_get_platdata(&fep
->pdev
->dev
);
1633 unsigned char *iap
, tmpaddr
[ETH_ALEN
];
1636 * try to get mac address in following order:
1638 * 1) module parameter via kernel command line in form
1639 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1644 * 2) from device tree data
1646 if (!is_valid_ether_addr(iap
)) {
1647 struct device_node
*np
= fep
->pdev
->dev
.of_node
;
1649 const char *mac
= of_get_mac_address(np
);
1651 iap
= (unsigned char *) mac
;
1656 * 3) from flash or fuse (via platform data)
1658 if (!is_valid_ether_addr(iap
)) {
1661 iap
= (unsigned char *)FEC_FLASHMAC
;
1664 iap
= (unsigned char *)&pdata
->mac
;
1669 * 4) FEC mac registers set by bootloader
1671 if (!is_valid_ether_addr(iap
)) {
1672 *((__be32
*) &tmpaddr
[0]) =
1673 cpu_to_be32(readl(fep
->hwp
+ FEC_ADDR_LOW
));
1674 *((__be16
*) &tmpaddr
[4]) =
1675 cpu_to_be16(readl(fep
->hwp
+ FEC_ADDR_HIGH
) >> 16);
1680 * 5) random mac address
1682 if (!is_valid_ether_addr(iap
)) {
1683 /* Report it and use a random ethernet address instead */
1684 netdev_err(ndev
, "Invalid MAC address: %pM\n", iap
);
1685 eth_hw_addr_random(ndev
);
1686 netdev_info(ndev
, "Using random MAC address: %pM\n",
1691 memcpy(ndev
->dev_addr
, iap
, ETH_ALEN
);
1693 /* Adjust MAC if using macaddr */
1695 ndev
->dev_addr
[ETH_ALEN
-1] = macaddr
[ETH_ALEN
-1] + fep
->dev_id
;
1698 /* ------------------------------------------------------------------------- */
1703 static void fec_enet_adjust_link(struct net_device
*ndev
)
1705 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1706 struct phy_device
*phy_dev
= ndev
->phydev
;
1707 int status_change
= 0;
1709 /* Prevent a state halted on mii error */
1710 if (fep
->mii_timeout
&& phy_dev
->state
== PHY_HALTED
) {
1711 phy_dev
->state
= PHY_RESUMING
;
1716 * If the netdev is down, or is going down, we're not interested
1717 * in link state events, so just mark our idea of the link as down
1718 * and ignore the event.
1720 if (!netif_running(ndev
) || !netif_device_present(ndev
)) {
1722 } else if (phy_dev
->link
) {
1724 fep
->link
= phy_dev
->link
;
1728 if (fep
->full_duplex
!= phy_dev
->duplex
) {
1729 fep
->full_duplex
= phy_dev
->duplex
;
1733 if (phy_dev
->speed
!= fep
->speed
) {
1734 fep
->speed
= phy_dev
->speed
;
1738 /* if any of the above changed restart the FEC */
1739 if (status_change
) {
1740 napi_disable(&fep
->napi
);
1741 netif_tx_lock_bh(ndev
);
1743 netif_wake_queue(ndev
);
1744 netif_tx_unlock_bh(ndev
);
1745 napi_enable(&fep
->napi
);
1749 napi_disable(&fep
->napi
);
1750 netif_tx_lock_bh(ndev
);
1752 netif_tx_unlock_bh(ndev
);
1753 napi_enable(&fep
->napi
);
1754 fep
->link
= phy_dev
->link
;
1760 phy_print_status(phy_dev
);
1763 static int fec_enet_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
1765 struct fec_enet_private
*fep
= bus
->priv
;
1766 struct device
*dev
= &fep
->pdev
->dev
;
1767 unsigned long time_left
;
1770 ret
= pm_runtime_get_sync(dev
);
1774 fep
->mii_timeout
= 0;
1775 reinit_completion(&fep
->mdio_done
);
1777 /* start a read op */
1778 writel(FEC_MMFR_ST
| FEC_MMFR_OP_READ
|
1779 FEC_MMFR_PA(mii_id
) | FEC_MMFR_RA(regnum
) |
1780 FEC_MMFR_TA
, fep
->hwp
+ FEC_MII_DATA
);
1782 /* wait for end of transfer */
1783 time_left
= wait_for_completion_timeout(&fep
->mdio_done
,
1784 usecs_to_jiffies(FEC_MII_TIMEOUT
));
1785 if (time_left
== 0) {
1786 fep
->mii_timeout
= 1;
1787 netdev_err(fep
->netdev
, "MDIO read timeout\n");
1792 ret
= FEC_MMFR_DATA(readl(fep
->hwp
+ FEC_MII_DATA
));
1795 pm_runtime_mark_last_busy(dev
);
1796 pm_runtime_put_autosuspend(dev
);
1801 static int fec_enet_mdio_write(struct mii_bus
*bus
, int mii_id
, int regnum
,
1804 struct fec_enet_private
*fep
= bus
->priv
;
1805 struct device
*dev
= &fep
->pdev
->dev
;
1806 unsigned long time_left
;
1809 ret
= pm_runtime_get_sync(dev
);
1815 fep
->mii_timeout
= 0;
1816 reinit_completion(&fep
->mdio_done
);
1818 /* start a write op */
1819 writel(FEC_MMFR_ST
| FEC_MMFR_OP_WRITE
|
1820 FEC_MMFR_PA(mii_id
) | FEC_MMFR_RA(regnum
) |
1821 FEC_MMFR_TA
| FEC_MMFR_DATA(value
),
1822 fep
->hwp
+ FEC_MII_DATA
);
1824 /* wait for end of transfer */
1825 time_left
= wait_for_completion_timeout(&fep
->mdio_done
,
1826 usecs_to_jiffies(FEC_MII_TIMEOUT
));
1827 if (time_left
== 0) {
1828 fep
->mii_timeout
= 1;
1829 netdev_err(fep
->netdev
, "MDIO write timeout\n");
1833 pm_runtime_mark_last_busy(dev
);
1834 pm_runtime_put_autosuspend(dev
);
1839 static int fec_enet_clk_enable(struct net_device
*ndev
, bool enable
)
1841 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1845 ret
= clk_prepare_enable(fep
->clk_ahb
);
1849 ret
= clk_prepare_enable(fep
->clk_enet_out
);
1851 goto failed_clk_enet_out
;
1854 mutex_lock(&fep
->ptp_clk_mutex
);
1855 ret
= clk_prepare_enable(fep
->clk_ptp
);
1857 mutex_unlock(&fep
->ptp_clk_mutex
);
1858 goto failed_clk_ptp
;
1860 fep
->ptp_clk_on
= true;
1862 mutex_unlock(&fep
->ptp_clk_mutex
);
1865 ret
= clk_prepare_enable(fep
->clk_ref
);
1867 goto failed_clk_ref
;
1869 clk_disable_unprepare(fep
->clk_ahb
);
1870 clk_disable_unprepare(fep
->clk_enet_out
);
1872 mutex_lock(&fep
->ptp_clk_mutex
);
1873 clk_disable_unprepare(fep
->clk_ptp
);
1874 fep
->ptp_clk_on
= false;
1875 mutex_unlock(&fep
->ptp_clk_mutex
);
1877 clk_disable_unprepare(fep
->clk_ref
);
1884 clk_disable_unprepare(fep
->clk_ref
);
1886 if (fep
->clk_enet_out
)
1887 clk_disable_unprepare(fep
->clk_enet_out
);
1888 failed_clk_enet_out
:
1889 clk_disable_unprepare(fep
->clk_ahb
);
1894 static int fec_enet_mii_probe(struct net_device
*ndev
)
1896 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1897 struct phy_device
*phy_dev
= NULL
;
1898 char mdio_bus_id
[MII_BUS_ID_SIZE
];
1899 char phy_name
[MII_BUS_ID_SIZE
+ 3];
1901 int dev_id
= fep
->dev_id
;
1903 if (fep
->phy_node
) {
1904 phy_dev
= of_phy_connect(ndev
, fep
->phy_node
,
1905 &fec_enet_adjust_link
, 0,
1906 fep
->phy_interface
);
1910 /* check for attached phy */
1911 for (phy_id
= 0; (phy_id
< PHY_MAX_ADDR
); phy_id
++) {
1912 if (!mdiobus_is_registered_device(fep
->mii_bus
, phy_id
))
1916 strlcpy(mdio_bus_id
, fep
->mii_bus
->id
, MII_BUS_ID_SIZE
);
1920 if (phy_id
>= PHY_MAX_ADDR
) {
1921 netdev_info(ndev
, "no PHY, assuming direct connection to switch\n");
1922 strlcpy(mdio_bus_id
, "fixed-0", MII_BUS_ID_SIZE
);
1926 snprintf(phy_name
, sizeof(phy_name
),
1927 PHY_ID_FMT
, mdio_bus_id
, phy_id
);
1928 phy_dev
= phy_connect(ndev
, phy_name
, &fec_enet_adjust_link
,
1929 fep
->phy_interface
);
1932 if (IS_ERR(phy_dev
)) {
1933 netdev_err(ndev
, "could not attach to PHY\n");
1934 return PTR_ERR(phy_dev
);
1937 /* mask with MAC supported features */
1938 if (fep
->quirks
& FEC_QUIRK_HAS_GBIT
) {
1939 phy_dev
->supported
&= PHY_GBIT_FEATURES
;
1940 phy_dev
->supported
&= ~SUPPORTED_1000baseT_Half
;
1941 #if !defined(CONFIG_M5272)
1942 phy_dev
->supported
|= SUPPORTED_Pause
;
1946 phy_dev
->supported
&= PHY_BASIC_FEATURES
;
1948 phy_dev
->advertising
= phy_dev
->supported
;
1951 fep
->full_duplex
= 0;
1953 phy_attached_info(phy_dev
);
1958 static int fec_enet_mii_init(struct platform_device
*pdev
)
1960 static struct mii_bus
*fec0_mii_bus
;
1961 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1962 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1963 struct device_node
*node
;
1965 u32 mii_speed
, holdtime
;
1968 * The i.MX28 dual fec interfaces are not equal.
1969 * Here are the differences:
1971 * - fec0 supports MII & RMII modes while fec1 only supports RMII
1972 * - fec0 acts as the 1588 time master while fec1 is slave
1973 * - external phys can only be configured by fec0
1975 * That is to say fec1 can not work independently. It only works
1976 * when fec0 is working. The reason behind this design is that the
1977 * second interface is added primarily for Switch mode.
1979 * Because of the last point above, both phys are attached on fec0
1980 * mdio interface in board design, and need to be configured by
1983 if ((fep
->quirks
& FEC_QUIRK_SINGLE_MDIO
) && fep
->dev_id
> 0) {
1984 /* fec1 uses fec0 mii_bus */
1985 if (mii_cnt
&& fec0_mii_bus
) {
1986 fep
->mii_bus
= fec0_mii_bus
;
1993 fep
->mii_timeout
= 0;
1996 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
1998 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
1999 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
2000 * Reference Manual has an error on this, and gets fixed on i.MX6Q
2003 mii_speed
= DIV_ROUND_UP(clk_get_rate(fep
->clk_ipg
), 5000000);
2004 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
)
2006 if (mii_speed
> 63) {
2008 "fec clock (%lu) too fast to get right mii speed\n",
2009 clk_get_rate(fep
->clk_ipg
));
2015 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
2016 * MII_SPEED) register that defines the MDIO output hold time. Earlier
2017 * versions are RAZ there, so just ignore the difference and write the
2019 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
2020 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
2022 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
2023 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
2024 * holdtime cannot result in a value greater than 3.
2026 holdtime
= DIV_ROUND_UP(clk_get_rate(fep
->clk_ipg
), 100000000) - 1;
2028 fep
->phy_speed
= mii_speed
<< 1 | holdtime
<< 8;
2030 writel(fep
->phy_speed
, fep
->hwp
+ FEC_MII_SPEED
);
2032 fep
->mii_bus
= mdiobus_alloc();
2033 if (fep
->mii_bus
== NULL
) {
2038 fep
->mii_bus
->name
= "fec_enet_mii_bus";
2039 fep
->mii_bus
->read
= fec_enet_mdio_read
;
2040 fep
->mii_bus
->write
= fec_enet_mdio_write
;
2041 snprintf(fep
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
2042 pdev
->name
, fep
->dev_id
+ 1);
2043 fep
->mii_bus
->priv
= fep
;
2044 fep
->mii_bus
->parent
= &pdev
->dev
;
2046 node
= of_get_child_by_name(pdev
->dev
.of_node
, "mdio");
2048 err
= of_mdiobus_register(fep
->mii_bus
, node
);
2051 err
= mdiobus_register(fep
->mii_bus
);
2055 goto err_out_free_mdiobus
;
2059 /* save fec0 mii_bus */
2060 if (fep
->quirks
& FEC_QUIRK_SINGLE_MDIO
)
2061 fec0_mii_bus
= fep
->mii_bus
;
2065 err_out_free_mdiobus
:
2066 mdiobus_free(fep
->mii_bus
);
2071 static void fec_enet_mii_remove(struct fec_enet_private
*fep
)
2073 if (--mii_cnt
== 0) {
2074 mdiobus_unregister(fep
->mii_bus
);
2075 mdiobus_free(fep
->mii_bus
);
2079 static void fec_enet_get_drvinfo(struct net_device
*ndev
,
2080 struct ethtool_drvinfo
*info
)
2082 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2084 strlcpy(info
->driver
, fep
->pdev
->dev
.driver
->name
,
2085 sizeof(info
->driver
));
2086 strlcpy(info
->version
, "Revision: 1.0", sizeof(info
->version
));
2087 strlcpy(info
->bus_info
, dev_name(&ndev
->dev
), sizeof(info
->bus_info
));
2090 static int fec_enet_get_regs_len(struct net_device
*ndev
)
2092 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2096 r
= platform_get_resource(fep
->pdev
, IORESOURCE_MEM
, 0);
2098 s
= resource_size(r
);
2103 /* List of registers that can be safety be read to dump them with ethtool */
2104 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2105 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
2106 static u32 fec_enet_register_offset
[] = {
2107 FEC_IEVENT
, FEC_IMASK
, FEC_R_DES_ACTIVE_0
, FEC_X_DES_ACTIVE_0
,
2108 FEC_ECNTRL
, FEC_MII_DATA
, FEC_MII_SPEED
, FEC_MIB_CTRLSTAT
, FEC_R_CNTRL
,
2109 FEC_X_CNTRL
, FEC_ADDR_LOW
, FEC_ADDR_HIGH
, FEC_OPD
, FEC_TXIC0
, FEC_TXIC1
,
2110 FEC_TXIC2
, FEC_RXIC0
, FEC_RXIC1
, FEC_RXIC2
, FEC_HASH_TABLE_HIGH
,
2111 FEC_HASH_TABLE_LOW
, FEC_GRP_HASH_TABLE_HIGH
, FEC_GRP_HASH_TABLE_LOW
,
2112 FEC_X_WMRK
, FEC_R_BOUND
, FEC_R_FSTART
, FEC_R_DES_START_1
,
2113 FEC_X_DES_START_1
, FEC_R_BUFF_SIZE_1
, FEC_R_DES_START_2
,
2114 FEC_X_DES_START_2
, FEC_R_BUFF_SIZE_2
, FEC_R_DES_START_0
,
2115 FEC_X_DES_START_0
, FEC_R_BUFF_SIZE_0
, FEC_R_FIFO_RSFL
, FEC_R_FIFO_RSEM
,
2116 FEC_R_FIFO_RAEM
, FEC_R_FIFO_RAFL
, FEC_RACC
, FEC_RCMR_1
, FEC_RCMR_2
,
2117 FEC_DMA_CFG_1
, FEC_DMA_CFG_2
, FEC_R_DES_ACTIVE_1
, FEC_X_DES_ACTIVE_1
,
2118 FEC_R_DES_ACTIVE_2
, FEC_X_DES_ACTIVE_2
, FEC_QOS_SCHEME
,
2119 RMON_T_DROP
, RMON_T_PACKETS
, RMON_T_BC_PKT
, RMON_T_MC_PKT
,
2120 RMON_T_CRC_ALIGN
, RMON_T_UNDERSIZE
, RMON_T_OVERSIZE
, RMON_T_FRAG
,
2121 RMON_T_JAB
, RMON_T_COL
, RMON_T_P64
, RMON_T_P65TO127
, RMON_T_P128TO255
,
2122 RMON_T_P256TO511
, RMON_T_P512TO1023
, RMON_T_P1024TO2047
,
2123 RMON_T_P_GTE2048
, RMON_T_OCTETS
,
2124 IEEE_T_DROP
, IEEE_T_FRAME_OK
, IEEE_T_1COL
, IEEE_T_MCOL
, IEEE_T_DEF
,
2125 IEEE_T_LCOL
, IEEE_T_EXCOL
, IEEE_T_MACERR
, IEEE_T_CSERR
, IEEE_T_SQE
,
2126 IEEE_T_FDXFC
, IEEE_T_OCTETS_OK
,
2127 RMON_R_PACKETS
, RMON_R_BC_PKT
, RMON_R_MC_PKT
, RMON_R_CRC_ALIGN
,
2128 RMON_R_UNDERSIZE
, RMON_R_OVERSIZE
, RMON_R_FRAG
, RMON_R_JAB
,
2129 RMON_R_RESVD_O
, RMON_R_P64
, RMON_R_P65TO127
, RMON_R_P128TO255
,
2130 RMON_R_P256TO511
, RMON_R_P512TO1023
, RMON_R_P1024TO2047
,
2131 RMON_R_P_GTE2048
, RMON_R_OCTETS
,
2132 IEEE_R_DROP
, IEEE_R_FRAME_OK
, IEEE_R_CRC
, IEEE_R_ALIGN
, IEEE_R_MACERR
,
2133 IEEE_R_FDXFC
, IEEE_R_OCTETS_OK
2136 static u32 fec_enet_register_offset
[] = {
2137 FEC_ECNTRL
, FEC_IEVENT
, FEC_IMASK
, FEC_IVEC
, FEC_R_DES_ACTIVE_0
,
2138 FEC_R_DES_ACTIVE_1
, FEC_R_DES_ACTIVE_2
, FEC_X_DES_ACTIVE_0
,
2139 FEC_X_DES_ACTIVE_1
, FEC_X_DES_ACTIVE_2
, FEC_MII_DATA
, FEC_MII_SPEED
,
2140 FEC_R_BOUND
, FEC_R_FSTART
, FEC_X_WMRK
, FEC_X_FSTART
, FEC_R_CNTRL
,
2141 FEC_MAX_FRM_LEN
, FEC_X_CNTRL
, FEC_ADDR_LOW
, FEC_ADDR_HIGH
,
2142 FEC_GRP_HASH_TABLE_HIGH
, FEC_GRP_HASH_TABLE_LOW
, FEC_R_DES_START_0
,
2143 FEC_R_DES_START_1
, FEC_R_DES_START_2
, FEC_X_DES_START_0
,
2144 FEC_X_DES_START_1
, FEC_X_DES_START_2
, FEC_R_BUFF_SIZE_0
,
2145 FEC_R_BUFF_SIZE_1
, FEC_R_BUFF_SIZE_2
2149 static void fec_enet_get_regs(struct net_device
*ndev
,
2150 struct ethtool_regs
*regs
, void *regbuf
)
2152 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2153 u32 __iomem
*theregs
= (u32 __iomem
*)fep
->hwp
;
2154 u32
*buf
= (u32
*)regbuf
;
2157 memset(buf
, 0, regs
->len
);
2159 for (i
= 0; i
< ARRAY_SIZE(fec_enet_register_offset
); i
++) {
2160 off
= fec_enet_register_offset
[i
] / 4;
2161 buf
[off
] = readl(&theregs
[off
]);
2165 static int fec_enet_get_ts_info(struct net_device
*ndev
,
2166 struct ethtool_ts_info
*info
)
2168 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2170 if (fep
->bufdesc_ex
) {
2172 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
2173 SOF_TIMESTAMPING_RX_SOFTWARE
|
2174 SOF_TIMESTAMPING_SOFTWARE
|
2175 SOF_TIMESTAMPING_TX_HARDWARE
|
2176 SOF_TIMESTAMPING_RX_HARDWARE
|
2177 SOF_TIMESTAMPING_RAW_HARDWARE
;
2179 info
->phc_index
= ptp_clock_index(fep
->ptp_clock
);
2181 info
->phc_index
= -1;
2183 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) |
2184 (1 << HWTSTAMP_TX_ON
);
2186 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
2187 (1 << HWTSTAMP_FILTER_ALL
);
2190 return ethtool_op_get_ts_info(ndev
, info
);
2194 #if !defined(CONFIG_M5272)
2196 static void fec_enet_get_pauseparam(struct net_device
*ndev
,
2197 struct ethtool_pauseparam
*pause
)
2199 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2201 pause
->autoneg
= (fep
->pause_flag
& FEC_PAUSE_FLAG_AUTONEG
) != 0;
2202 pause
->tx_pause
= (fep
->pause_flag
& FEC_PAUSE_FLAG_ENABLE
) != 0;
2203 pause
->rx_pause
= pause
->tx_pause
;
2206 static int fec_enet_set_pauseparam(struct net_device
*ndev
,
2207 struct ethtool_pauseparam
*pause
)
2209 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2214 if (pause
->tx_pause
!= pause
->rx_pause
) {
2216 "hardware only support enable/disable both tx and rx");
2220 fep
->pause_flag
= 0;
2222 /* tx pause must be same as rx pause */
2223 fep
->pause_flag
|= pause
->rx_pause
? FEC_PAUSE_FLAG_ENABLE
: 0;
2224 fep
->pause_flag
|= pause
->autoneg
? FEC_PAUSE_FLAG_AUTONEG
: 0;
2226 if (pause
->rx_pause
|| pause
->autoneg
) {
2227 ndev
->phydev
->supported
|= ADVERTISED_Pause
;
2228 ndev
->phydev
->advertising
|= ADVERTISED_Pause
;
2230 ndev
->phydev
->supported
&= ~ADVERTISED_Pause
;
2231 ndev
->phydev
->advertising
&= ~ADVERTISED_Pause
;
2234 if (pause
->autoneg
) {
2235 if (netif_running(ndev
))
2237 phy_start_aneg(ndev
->phydev
);
2239 if (netif_running(ndev
)) {
2240 napi_disable(&fep
->napi
);
2241 netif_tx_lock_bh(ndev
);
2243 netif_wake_queue(ndev
);
2244 netif_tx_unlock_bh(ndev
);
2245 napi_enable(&fep
->napi
);
2251 static const struct fec_stat
{
2252 char name
[ETH_GSTRING_LEN
];
2256 { "tx_dropped", RMON_T_DROP
},
2257 { "tx_packets", RMON_T_PACKETS
},
2258 { "tx_broadcast", RMON_T_BC_PKT
},
2259 { "tx_multicast", RMON_T_MC_PKT
},
2260 { "tx_crc_errors", RMON_T_CRC_ALIGN
},
2261 { "tx_undersize", RMON_T_UNDERSIZE
},
2262 { "tx_oversize", RMON_T_OVERSIZE
},
2263 { "tx_fragment", RMON_T_FRAG
},
2264 { "tx_jabber", RMON_T_JAB
},
2265 { "tx_collision", RMON_T_COL
},
2266 { "tx_64byte", RMON_T_P64
},
2267 { "tx_65to127byte", RMON_T_P65TO127
},
2268 { "tx_128to255byte", RMON_T_P128TO255
},
2269 { "tx_256to511byte", RMON_T_P256TO511
},
2270 { "tx_512to1023byte", RMON_T_P512TO1023
},
2271 { "tx_1024to2047byte", RMON_T_P1024TO2047
},
2272 { "tx_GTE2048byte", RMON_T_P_GTE2048
},
2273 { "tx_octets", RMON_T_OCTETS
},
2276 { "IEEE_tx_drop", IEEE_T_DROP
},
2277 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK
},
2278 { "IEEE_tx_1col", IEEE_T_1COL
},
2279 { "IEEE_tx_mcol", IEEE_T_MCOL
},
2280 { "IEEE_tx_def", IEEE_T_DEF
},
2281 { "IEEE_tx_lcol", IEEE_T_LCOL
},
2282 { "IEEE_tx_excol", IEEE_T_EXCOL
},
2283 { "IEEE_tx_macerr", IEEE_T_MACERR
},
2284 { "IEEE_tx_cserr", IEEE_T_CSERR
},
2285 { "IEEE_tx_sqe", IEEE_T_SQE
},
2286 { "IEEE_tx_fdxfc", IEEE_T_FDXFC
},
2287 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK
},
2290 { "rx_packets", RMON_R_PACKETS
},
2291 { "rx_broadcast", RMON_R_BC_PKT
},
2292 { "rx_multicast", RMON_R_MC_PKT
},
2293 { "rx_crc_errors", RMON_R_CRC_ALIGN
},
2294 { "rx_undersize", RMON_R_UNDERSIZE
},
2295 { "rx_oversize", RMON_R_OVERSIZE
},
2296 { "rx_fragment", RMON_R_FRAG
},
2297 { "rx_jabber", RMON_R_JAB
},
2298 { "rx_64byte", RMON_R_P64
},
2299 { "rx_65to127byte", RMON_R_P65TO127
},
2300 { "rx_128to255byte", RMON_R_P128TO255
},
2301 { "rx_256to511byte", RMON_R_P256TO511
},
2302 { "rx_512to1023byte", RMON_R_P512TO1023
},
2303 { "rx_1024to2047byte", RMON_R_P1024TO2047
},
2304 { "rx_GTE2048byte", RMON_R_P_GTE2048
},
2305 { "rx_octets", RMON_R_OCTETS
},
2308 { "IEEE_rx_drop", IEEE_R_DROP
},
2309 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK
},
2310 { "IEEE_rx_crc", IEEE_R_CRC
},
2311 { "IEEE_rx_align", IEEE_R_ALIGN
},
2312 { "IEEE_rx_macerr", IEEE_R_MACERR
},
2313 { "IEEE_rx_fdxfc", IEEE_R_FDXFC
},
2314 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK
},
2317 #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
2319 static void fec_enet_update_ethtool_stats(struct net_device
*dev
)
2321 struct fec_enet_private
*fep
= netdev_priv(dev
);
2324 for (i
= 0; i
< ARRAY_SIZE(fec_stats
); i
++)
2325 fep
->ethtool_stats
[i
] = readl(fep
->hwp
+ fec_stats
[i
].offset
);
2328 static void fec_enet_get_ethtool_stats(struct net_device
*dev
,
2329 struct ethtool_stats
*stats
, u64
*data
)
2331 struct fec_enet_private
*fep
= netdev_priv(dev
);
2333 if (netif_running(dev
))
2334 fec_enet_update_ethtool_stats(dev
);
2336 memcpy(data
, fep
->ethtool_stats
, FEC_STATS_SIZE
);
2339 static void fec_enet_get_strings(struct net_device
*netdev
,
2340 u32 stringset
, u8
*data
)
2343 switch (stringset
) {
2345 for (i
= 0; i
< ARRAY_SIZE(fec_stats
); i
++)
2346 memcpy(data
+ i
* ETH_GSTRING_LEN
,
2347 fec_stats
[i
].name
, ETH_GSTRING_LEN
);
2352 static int fec_enet_get_sset_count(struct net_device
*dev
, int sset
)
2356 return ARRAY_SIZE(fec_stats
);
2362 static void fec_enet_clear_ethtool_stats(struct net_device
*dev
)
2364 struct fec_enet_private
*fep
= netdev_priv(dev
);
2367 /* Disable MIB statistics counters */
2368 writel(FEC_MIB_CTRLSTAT_DISABLE
, fep
->hwp
+ FEC_MIB_CTRLSTAT
);
2370 for (i
= 0; i
< ARRAY_SIZE(fec_stats
); i
++)
2371 writel(0, fep
->hwp
+ fec_stats
[i
].offset
);
2373 /* Don't disable MIB statistics counters */
2374 writel(0, fep
->hwp
+ FEC_MIB_CTRLSTAT
);
2377 #else /* !defined(CONFIG_M5272) */
2378 #define FEC_STATS_SIZE 0
2379 static inline void fec_enet_update_ethtool_stats(struct net_device
*dev
)
2383 static inline void fec_enet_clear_ethtool_stats(struct net_device
*dev
)
2386 #endif /* !defined(CONFIG_M5272) */
2388 /* ITR clock source is enet system clock (clk_ahb).
2389 * TCTT unit is cycle_ns * 64 cycle
2390 * So, the ICTT value = X us / (cycle_ns * 64)
2392 static int fec_enet_us_to_itr_clock(struct net_device
*ndev
, int us
)
2394 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2396 return us
* (fep
->itr_clk_rate
/ 64000) / 1000;
2399 /* Set threshold for interrupt coalescing */
2400 static void fec_enet_itr_coal_set(struct net_device
*ndev
)
2402 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2405 /* Must be greater than zero to avoid unpredictable behavior */
2406 if (!fep
->rx_time_itr
|| !fep
->rx_pkts_itr
||
2407 !fep
->tx_time_itr
|| !fep
->tx_pkts_itr
)
2410 /* Select enet system clock as Interrupt Coalescing
2411 * timer Clock Source
2413 rx_itr
= FEC_ITR_CLK_SEL
;
2414 tx_itr
= FEC_ITR_CLK_SEL
;
2416 /* set ICFT and ICTT */
2417 rx_itr
|= FEC_ITR_ICFT(fep
->rx_pkts_itr
);
2418 rx_itr
|= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev
, fep
->rx_time_itr
));
2419 tx_itr
|= FEC_ITR_ICFT(fep
->tx_pkts_itr
);
2420 tx_itr
|= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev
, fep
->tx_time_itr
));
2422 rx_itr
|= FEC_ITR_EN
;
2423 tx_itr
|= FEC_ITR_EN
;
2425 writel(tx_itr
, fep
->hwp
+ FEC_TXIC0
);
2426 writel(rx_itr
, fep
->hwp
+ FEC_RXIC0
);
2427 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
) {
2428 writel(tx_itr
, fep
->hwp
+ FEC_TXIC1
);
2429 writel(rx_itr
, fep
->hwp
+ FEC_RXIC1
);
2430 writel(tx_itr
, fep
->hwp
+ FEC_TXIC2
);
2431 writel(rx_itr
, fep
->hwp
+ FEC_RXIC2
);
2436 fec_enet_get_coalesce(struct net_device
*ndev
, struct ethtool_coalesce
*ec
)
2438 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2440 if (!(fep
->quirks
& FEC_QUIRK_HAS_COALESCE
))
2443 ec
->rx_coalesce_usecs
= fep
->rx_time_itr
;
2444 ec
->rx_max_coalesced_frames
= fep
->rx_pkts_itr
;
2446 ec
->tx_coalesce_usecs
= fep
->tx_time_itr
;
2447 ec
->tx_max_coalesced_frames
= fep
->tx_pkts_itr
;
2453 fec_enet_set_coalesce(struct net_device
*ndev
, struct ethtool_coalesce
*ec
)
2455 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2458 if (!(fep
->quirks
& FEC_QUIRK_HAS_COALESCE
))
2461 if (ec
->rx_max_coalesced_frames
> 255) {
2462 pr_err("Rx coalesced frames exceed hardware limitation\n");
2466 if (ec
->tx_max_coalesced_frames
> 255) {
2467 pr_err("Tx coalesced frame exceed hardware limitation\n");
2471 cycle
= fec_enet_us_to_itr_clock(ndev
, fep
->rx_time_itr
);
2472 if (cycle
> 0xFFFF) {
2473 pr_err("Rx coalesced usec exceed hardware limitation\n");
2477 cycle
= fec_enet_us_to_itr_clock(ndev
, fep
->tx_time_itr
);
2478 if (cycle
> 0xFFFF) {
2479 pr_err("Rx coalesced usec exceed hardware limitation\n");
2483 fep
->rx_time_itr
= ec
->rx_coalesce_usecs
;
2484 fep
->rx_pkts_itr
= ec
->rx_max_coalesced_frames
;
2486 fep
->tx_time_itr
= ec
->tx_coalesce_usecs
;
2487 fep
->tx_pkts_itr
= ec
->tx_max_coalesced_frames
;
2489 fec_enet_itr_coal_set(ndev
);
2494 static void fec_enet_itr_coal_init(struct net_device
*ndev
)
2496 struct ethtool_coalesce ec
;
2498 ec
.rx_coalesce_usecs
= FEC_ITR_ICTT_DEFAULT
;
2499 ec
.rx_max_coalesced_frames
= FEC_ITR_ICFT_DEFAULT
;
2501 ec
.tx_coalesce_usecs
= FEC_ITR_ICTT_DEFAULT
;
2502 ec
.tx_max_coalesced_frames
= FEC_ITR_ICFT_DEFAULT
;
2504 fec_enet_set_coalesce(ndev
, &ec
);
2507 static int fec_enet_get_tunable(struct net_device
*netdev
,
2508 const struct ethtool_tunable
*tuna
,
2511 struct fec_enet_private
*fep
= netdev_priv(netdev
);
2515 case ETHTOOL_RX_COPYBREAK
:
2516 *(u32
*)data
= fep
->rx_copybreak
;
2526 static int fec_enet_set_tunable(struct net_device
*netdev
,
2527 const struct ethtool_tunable
*tuna
,
2530 struct fec_enet_private
*fep
= netdev_priv(netdev
);
2534 case ETHTOOL_RX_COPYBREAK
:
2535 fep
->rx_copybreak
= *(u32
*)data
;
2546 fec_enet_get_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
2548 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2550 if (fep
->wol_flag
& FEC_WOL_HAS_MAGIC_PACKET
) {
2551 wol
->supported
= WAKE_MAGIC
;
2552 wol
->wolopts
= fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
? WAKE_MAGIC
: 0;
2554 wol
->supported
= wol
->wolopts
= 0;
2559 fec_enet_set_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
2561 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2563 if (!(fep
->wol_flag
& FEC_WOL_HAS_MAGIC_PACKET
))
2566 if (wol
->wolopts
& ~WAKE_MAGIC
)
2569 device_set_wakeup_enable(&ndev
->dev
, wol
->wolopts
& WAKE_MAGIC
);
2570 if (device_may_wakeup(&ndev
->dev
)) {
2571 fep
->wol_flag
|= FEC_WOL_FLAG_ENABLE
;
2572 if (fep
->irq
[0] > 0)
2573 enable_irq_wake(fep
->irq
[0]);
2575 fep
->wol_flag
&= (~FEC_WOL_FLAG_ENABLE
);
2576 if (fep
->irq
[0] > 0)
2577 disable_irq_wake(fep
->irq
[0]);
2583 static const struct ethtool_ops fec_enet_ethtool_ops
= {
2584 .get_drvinfo
= fec_enet_get_drvinfo
,
2585 .get_regs_len
= fec_enet_get_regs_len
,
2586 .get_regs
= fec_enet_get_regs
,
2587 .nway_reset
= phy_ethtool_nway_reset
,
2588 .get_link
= ethtool_op_get_link
,
2589 .get_coalesce
= fec_enet_get_coalesce
,
2590 .set_coalesce
= fec_enet_set_coalesce
,
2591 #ifndef CONFIG_M5272
2592 .get_pauseparam
= fec_enet_get_pauseparam
,
2593 .set_pauseparam
= fec_enet_set_pauseparam
,
2594 .get_strings
= fec_enet_get_strings
,
2595 .get_ethtool_stats
= fec_enet_get_ethtool_stats
,
2596 .get_sset_count
= fec_enet_get_sset_count
,
2598 .get_ts_info
= fec_enet_get_ts_info
,
2599 .get_tunable
= fec_enet_get_tunable
,
2600 .set_tunable
= fec_enet_set_tunable
,
2601 .get_wol
= fec_enet_get_wol
,
2602 .set_wol
= fec_enet_set_wol
,
2603 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
2604 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
2607 static int fec_enet_ioctl(struct net_device
*ndev
, struct ifreq
*rq
, int cmd
)
2609 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2610 struct phy_device
*phydev
= ndev
->phydev
;
2612 if (!netif_running(ndev
))
2618 if (fep
->bufdesc_ex
) {
2619 if (cmd
== SIOCSHWTSTAMP
)
2620 return fec_ptp_set(ndev
, rq
);
2621 if (cmd
== SIOCGHWTSTAMP
)
2622 return fec_ptp_get(ndev
, rq
);
2625 return phy_mii_ioctl(phydev
, rq
, cmd
);
2628 static void fec_enet_free_buffers(struct net_device
*ndev
)
2630 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2632 struct sk_buff
*skb
;
2633 struct bufdesc
*bdp
;
2634 struct fec_enet_priv_tx_q
*txq
;
2635 struct fec_enet_priv_rx_q
*rxq
;
2638 for (q
= 0; q
< fep
->num_rx_queues
; q
++) {
2639 rxq
= fep
->rx_queue
[q
];
2641 for (i
= 0; i
< rxq
->bd
.ring_size
; i
++) {
2642 skb
= rxq
->rx_skbuff
[i
];
2643 rxq
->rx_skbuff
[i
] = NULL
;
2645 dma_unmap_single(&fep
->pdev
->dev
,
2646 fec32_to_cpu(bdp
->cbd_bufaddr
),
2647 FEC_ENET_RX_FRSIZE
- fep
->rx_align
,
2651 bdp
= fec_enet_get_nextdesc(bdp
, &rxq
->bd
);
2655 for (q
= 0; q
< fep
->num_tx_queues
; q
++) {
2656 txq
= fep
->tx_queue
[q
];
2658 for (i
= 0; i
< txq
->bd
.ring_size
; i
++) {
2659 kfree(txq
->tx_bounce
[i
]);
2660 txq
->tx_bounce
[i
] = NULL
;
2661 skb
= txq
->tx_skbuff
[i
];
2662 txq
->tx_skbuff
[i
] = NULL
;
2668 static void fec_enet_free_queue(struct net_device
*ndev
)
2670 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2672 struct fec_enet_priv_tx_q
*txq
;
2674 for (i
= 0; i
< fep
->num_tx_queues
; i
++)
2675 if (fep
->tx_queue
[i
] && fep
->tx_queue
[i
]->tso_hdrs
) {
2676 txq
= fep
->tx_queue
[i
];
2677 dma_free_coherent(&fep
->pdev
->dev
,
2678 txq
->bd
.ring_size
* TSO_HEADER_SIZE
,
2683 for (i
= 0; i
< fep
->num_rx_queues
; i
++)
2684 kfree(fep
->rx_queue
[i
]);
2685 for (i
= 0; i
< fep
->num_tx_queues
; i
++)
2686 kfree(fep
->tx_queue
[i
]);
2689 static int fec_enet_alloc_queue(struct net_device
*ndev
)
2691 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2694 struct fec_enet_priv_tx_q
*txq
;
2696 for (i
= 0; i
< fep
->num_tx_queues
; i
++) {
2697 txq
= kzalloc(sizeof(*txq
), GFP_KERNEL
);
2703 fep
->tx_queue
[i
] = txq
;
2704 txq
->bd
.ring_size
= TX_RING_SIZE
;
2705 fep
->total_tx_ring_size
+= fep
->tx_queue
[i
]->bd
.ring_size
;
2707 txq
->tx_stop_threshold
= FEC_MAX_SKB_DESCS
;
2708 txq
->tx_wake_threshold
=
2709 (txq
->bd
.ring_size
- txq
->tx_stop_threshold
) / 2;
2711 txq
->tso_hdrs
= dma_alloc_coherent(&fep
->pdev
->dev
,
2712 txq
->bd
.ring_size
* TSO_HEADER_SIZE
,
2715 if (!txq
->tso_hdrs
) {
2721 for (i
= 0; i
< fep
->num_rx_queues
; i
++) {
2722 fep
->rx_queue
[i
] = kzalloc(sizeof(*fep
->rx_queue
[i
]),
2724 if (!fep
->rx_queue
[i
]) {
2729 fep
->rx_queue
[i
]->bd
.ring_size
= RX_RING_SIZE
;
2730 fep
->total_rx_ring_size
+= fep
->rx_queue
[i
]->bd
.ring_size
;
2735 fec_enet_free_queue(ndev
);
2740 fec_enet_alloc_rxq_buffers(struct net_device
*ndev
, unsigned int queue
)
2742 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2744 struct sk_buff
*skb
;
2745 struct bufdesc
*bdp
;
2746 struct fec_enet_priv_rx_q
*rxq
;
2748 rxq
= fep
->rx_queue
[queue
];
2750 for (i
= 0; i
< rxq
->bd
.ring_size
; i
++) {
2751 skb
= netdev_alloc_skb(ndev
, FEC_ENET_RX_FRSIZE
);
2755 if (fec_enet_new_rxbdp(ndev
, bdp
, skb
)) {
2760 rxq
->rx_skbuff
[i
] = skb
;
2761 bdp
->cbd_sc
= cpu_to_fec16(BD_ENET_RX_EMPTY
);
2763 if (fep
->bufdesc_ex
) {
2764 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
2765 ebdp
->cbd_esc
= cpu_to_fec32(BD_ENET_RX_INT
);
2768 bdp
= fec_enet_get_nextdesc(bdp
, &rxq
->bd
);
2771 /* Set the last buffer to wrap. */
2772 bdp
= fec_enet_get_prevdesc(bdp
, &rxq
->bd
);
2773 bdp
->cbd_sc
|= cpu_to_fec16(BD_SC_WRAP
);
2777 fec_enet_free_buffers(ndev
);
2782 fec_enet_alloc_txq_buffers(struct net_device
*ndev
, unsigned int queue
)
2784 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2786 struct bufdesc
*bdp
;
2787 struct fec_enet_priv_tx_q
*txq
;
2789 txq
= fep
->tx_queue
[queue
];
2791 for (i
= 0; i
< txq
->bd
.ring_size
; i
++) {
2792 txq
->tx_bounce
[i
] = kmalloc(FEC_ENET_TX_FRSIZE
, GFP_KERNEL
);
2793 if (!txq
->tx_bounce
[i
])
2796 bdp
->cbd_sc
= cpu_to_fec16(0);
2797 bdp
->cbd_bufaddr
= cpu_to_fec32(0);
2799 if (fep
->bufdesc_ex
) {
2800 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
2801 ebdp
->cbd_esc
= cpu_to_fec32(BD_ENET_TX_INT
);
2804 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
2807 /* Set the last buffer to wrap. */
2808 bdp
= fec_enet_get_prevdesc(bdp
, &txq
->bd
);
2809 bdp
->cbd_sc
|= cpu_to_fec16(BD_SC_WRAP
);
2814 fec_enet_free_buffers(ndev
);
2818 static int fec_enet_alloc_buffers(struct net_device
*ndev
)
2820 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2823 for (i
= 0; i
< fep
->num_rx_queues
; i
++)
2824 if (fec_enet_alloc_rxq_buffers(ndev
, i
))
2827 for (i
= 0; i
< fep
->num_tx_queues
; i
++)
2828 if (fec_enet_alloc_txq_buffers(ndev
, i
))
2834 fec_enet_open(struct net_device
*ndev
)
2836 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2839 ret
= pm_runtime_get_sync(&fep
->pdev
->dev
);
2843 pinctrl_pm_select_default_state(&fep
->pdev
->dev
);
2844 ret
= fec_enet_clk_enable(ndev
, true);
2848 /* I should reset the ring buffers here, but I don't yet know
2849 * a simple way to do that.
2852 ret
= fec_enet_alloc_buffers(ndev
);
2854 goto err_enet_alloc
;
2856 /* Init MAC prior to mii bus probe */
2859 /* Probe and connect to PHY when open the interface */
2860 ret
= fec_enet_mii_probe(ndev
);
2862 goto err_enet_mii_probe
;
2864 if (fep
->quirks
& FEC_QUIRK_ERR006687
)
2865 imx6q_cpuidle_fec_irqs_used();
2867 napi_enable(&fep
->napi
);
2868 phy_start(ndev
->phydev
);
2869 netif_tx_start_all_queues(ndev
);
2871 device_set_wakeup_enable(&ndev
->dev
, fep
->wol_flag
&
2872 FEC_WOL_FLAG_ENABLE
);
2877 fec_enet_free_buffers(ndev
);
2879 fec_enet_clk_enable(ndev
, false);
2881 pm_runtime_mark_last_busy(&fep
->pdev
->dev
);
2882 pm_runtime_put_autosuspend(&fep
->pdev
->dev
);
2883 pinctrl_pm_select_sleep_state(&fep
->pdev
->dev
);
2888 fec_enet_close(struct net_device
*ndev
)
2890 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2892 phy_stop(ndev
->phydev
);
2894 if (netif_device_present(ndev
)) {
2895 napi_disable(&fep
->napi
);
2896 netif_tx_disable(ndev
);
2900 phy_disconnect(ndev
->phydev
);
2902 if (fep
->quirks
& FEC_QUIRK_ERR006687
)
2903 imx6q_cpuidle_fec_irqs_unused();
2905 fec_enet_update_ethtool_stats(ndev
);
2907 fec_enet_clk_enable(ndev
, false);
2908 pinctrl_pm_select_sleep_state(&fep
->pdev
->dev
);
2909 pm_runtime_mark_last_busy(&fep
->pdev
->dev
);
2910 pm_runtime_put_autosuspend(&fep
->pdev
->dev
);
2912 fec_enet_free_buffers(ndev
);
2917 /* Set or clear the multicast filter for this adaptor.
2918 * Skeleton taken from sunlance driver.
2919 * The CPM Ethernet implementation allows Multicast as well as individual
2920 * MAC address filtering. Some of the drivers check to make sure it is
2921 * a group multicast address, and discard those that are not. I guess I
2922 * will do the same for now, but just remove the test if you want
2923 * individual filtering as well (do the upper net layers want or support
2924 * this kind of feature?).
2927 #define FEC_HASH_BITS 6 /* #bits in hash */
2928 #define CRC32_POLY 0xEDB88320
2930 static void set_multicast_list(struct net_device
*ndev
)
2932 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2933 struct netdev_hw_addr
*ha
;
2934 unsigned int i
, bit
, data
, crc
, tmp
;
2936 unsigned int hash_high
= 0, hash_low
= 0;
2938 if (ndev
->flags
& IFF_PROMISC
) {
2939 tmp
= readl(fep
->hwp
+ FEC_R_CNTRL
);
2941 writel(tmp
, fep
->hwp
+ FEC_R_CNTRL
);
2945 tmp
= readl(fep
->hwp
+ FEC_R_CNTRL
);
2947 writel(tmp
, fep
->hwp
+ FEC_R_CNTRL
);
2949 if (ndev
->flags
& IFF_ALLMULTI
) {
2950 /* Catch all multicast addresses, so set the
2953 writel(0xffffffff, fep
->hwp
+ FEC_GRP_HASH_TABLE_HIGH
);
2954 writel(0xffffffff, fep
->hwp
+ FEC_GRP_HASH_TABLE_LOW
);
2959 /* Add the addresses in hash register */
2960 netdev_for_each_mc_addr(ha
, ndev
) {
2961 /* calculate crc32 value of mac address */
2964 for (i
= 0; i
< ndev
->addr_len
; i
++) {
2966 for (bit
= 0; bit
< 8; bit
++, data
>>= 1) {
2968 (((crc
^ data
) & 1) ? CRC32_POLY
: 0);
2972 /* only upper 6 bits (FEC_HASH_BITS) are used
2973 * which point to specific bit in the hash registers
2975 hash
= (crc
>> (32 - FEC_HASH_BITS
)) & 0x3f;
2978 hash_high
|= 1 << (hash
- 32);
2980 hash_low
|= 1 << hash
;
2983 writel(hash_high
, fep
->hwp
+ FEC_GRP_HASH_TABLE_HIGH
);
2984 writel(hash_low
, fep
->hwp
+ FEC_GRP_HASH_TABLE_LOW
);
2987 /* Set a MAC change in hardware. */
2989 fec_set_mac_address(struct net_device
*ndev
, void *p
)
2991 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2992 struct sockaddr
*addr
= p
;
2995 if (!is_valid_ether_addr(addr
->sa_data
))
2996 return -EADDRNOTAVAIL
;
2997 memcpy(ndev
->dev_addr
, addr
->sa_data
, ndev
->addr_len
);
3000 /* Add netif status check here to avoid system hang in below case:
3001 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
3002 * After ethx down, fec all clocks are gated off and then register
3003 * access causes system hang.
3005 if (!netif_running(ndev
))
3008 writel(ndev
->dev_addr
[3] | (ndev
->dev_addr
[2] << 8) |
3009 (ndev
->dev_addr
[1] << 16) | (ndev
->dev_addr
[0] << 24),
3010 fep
->hwp
+ FEC_ADDR_LOW
);
3011 writel((ndev
->dev_addr
[5] << 16) | (ndev
->dev_addr
[4] << 24),
3012 fep
->hwp
+ FEC_ADDR_HIGH
);
3016 #ifdef CONFIG_NET_POLL_CONTROLLER
3018 * fec_poll_controller - FEC Poll controller function
3019 * @dev: The FEC network adapter
3021 * Polled functionality used by netconsole and others in non interrupt mode
3024 static void fec_poll_controller(struct net_device
*dev
)
3027 struct fec_enet_private
*fep
= netdev_priv(dev
);
3029 for (i
= 0; i
< FEC_IRQ_NUM
; i
++) {
3030 if (fep
->irq
[i
] > 0) {
3031 disable_irq(fep
->irq
[i
]);
3032 fec_enet_interrupt(fep
->irq
[i
], dev
);
3033 enable_irq(fep
->irq
[i
]);
3039 static inline void fec_enet_set_netdev_features(struct net_device
*netdev
,
3040 netdev_features_t features
)
3042 struct fec_enet_private
*fep
= netdev_priv(netdev
);
3043 netdev_features_t changed
= features
^ netdev
->features
;
3045 netdev
->features
= features
;
3047 /* Receive checksum has been changed */
3048 if (changed
& NETIF_F_RXCSUM
) {
3049 if (features
& NETIF_F_RXCSUM
)
3050 fep
->csum_flags
|= FLAG_RX_CSUM_ENABLED
;
3052 fep
->csum_flags
&= ~FLAG_RX_CSUM_ENABLED
;
3056 static int fec_set_features(struct net_device
*netdev
,
3057 netdev_features_t features
)
3059 struct fec_enet_private
*fep
= netdev_priv(netdev
);
3060 netdev_features_t changed
= features
^ netdev
->features
;
3062 if (netif_running(netdev
) && changed
& NETIF_F_RXCSUM
) {
3063 napi_disable(&fep
->napi
);
3064 netif_tx_lock_bh(netdev
);
3066 fec_enet_set_netdev_features(netdev
, features
);
3067 fec_restart(netdev
);
3068 netif_tx_wake_all_queues(netdev
);
3069 netif_tx_unlock_bh(netdev
);
3070 napi_enable(&fep
->napi
);
3072 fec_enet_set_netdev_features(netdev
, features
);
3078 static const struct net_device_ops fec_netdev_ops
= {
3079 .ndo_open
= fec_enet_open
,
3080 .ndo_stop
= fec_enet_close
,
3081 .ndo_start_xmit
= fec_enet_start_xmit
,
3082 .ndo_set_rx_mode
= set_multicast_list
,
3083 .ndo_validate_addr
= eth_validate_addr
,
3084 .ndo_tx_timeout
= fec_timeout
,
3085 .ndo_set_mac_address
= fec_set_mac_address
,
3086 .ndo_do_ioctl
= fec_enet_ioctl
,
3087 #ifdef CONFIG_NET_POLL_CONTROLLER
3088 .ndo_poll_controller
= fec_poll_controller
,
3090 .ndo_set_features
= fec_set_features
,
3093 static const unsigned short offset_des_active_rxq
[] = {
3094 FEC_R_DES_ACTIVE_0
, FEC_R_DES_ACTIVE_1
, FEC_R_DES_ACTIVE_2
3097 static const unsigned short offset_des_active_txq
[] = {
3098 FEC_X_DES_ACTIVE_0
, FEC_X_DES_ACTIVE_1
, FEC_X_DES_ACTIVE_2
3102 * XXX: We need to clean up on failure exits here.
3105 static int fec_enet_init(struct net_device
*ndev
)
3107 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3108 struct bufdesc
*cbd_base
;
3112 unsigned dsize
= fep
->bufdesc_ex
? sizeof(struct bufdesc_ex
) :
3113 sizeof(struct bufdesc
);
3114 unsigned dsize_log2
= __fls(dsize
);
3116 WARN_ON(dsize
!= (1 << dsize_log2
));
3117 #if defined(CONFIG_ARM)
3118 fep
->rx_align
= 0xf;
3119 fep
->tx_align
= 0xf;
3121 fep
->rx_align
= 0x3;
3122 fep
->tx_align
= 0x3;
3125 fec_enet_alloc_queue(ndev
);
3127 bd_size
= (fep
->total_tx_ring_size
+ fep
->total_rx_ring_size
) * dsize
;
3129 /* Allocate memory for buffer descriptors. */
3130 cbd_base
= dmam_alloc_coherent(&fep
->pdev
->dev
, bd_size
, &bd_dma
,
3136 memset(cbd_base
, 0, bd_size
);
3138 /* Get the Ethernet address */
3140 /* make sure MAC we just acquired is programmed into the hw */
3141 fec_set_mac_address(ndev
, NULL
);
3143 /* Set receive and transmit descriptor base. */
3144 for (i
= 0; i
< fep
->num_rx_queues
; i
++) {
3145 struct fec_enet_priv_rx_q
*rxq
= fep
->rx_queue
[i
];
3146 unsigned size
= dsize
* rxq
->bd
.ring_size
;
3149 rxq
->bd
.base
= cbd_base
;
3150 rxq
->bd
.cur
= cbd_base
;
3151 rxq
->bd
.dma
= bd_dma
;
3152 rxq
->bd
.dsize
= dsize
;
3153 rxq
->bd
.dsize_log2
= dsize_log2
;
3154 rxq
->bd
.reg_desc_active
= fep
->hwp
+ offset_des_active_rxq
[i
];
3156 cbd_base
= (struct bufdesc
*)(((void *)cbd_base
) + size
);
3157 rxq
->bd
.last
= (struct bufdesc
*)(((void *)cbd_base
) - dsize
);
3160 for (i
= 0; i
< fep
->num_tx_queues
; i
++) {
3161 struct fec_enet_priv_tx_q
*txq
= fep
->tx_queue
[i
];
3162 unsigned size
= dsize
* txq
->bd
.ring_size
;
3165 txq
->bd
.base
= cbd_base
;
3166 txq
->bd
.cur
= cbd_base
;
3167 txq
->bd
.dma
= bd_dma
;
3168 txq
->bd
.dsize
= dsize
;
3169 txq
->bd
.dsize_log2
= dsize_log2
;
3170 txq
->bd
.reg_desc_active
= fep
->hwp
+ offset_des_active_txq
[i
];
3172 cbd_base
= (struct bufdesc
*)(((void *)cbd_base
) + size
);
3173 txq
->bd
.last
= (struct bufdesc
*)(((void *)cbd_base
) - dsize
);
3177 /* The FEC Ethernet specific entries in the device structure */
3178 ndev
->watchdog_timeo
= TX_TIMEOUT
;
3179 ndev
->netdev_ops
= &fec_netdev_ops
;
3180 ndev
->ethtool_ops
= &fec_enet_ethtool_ops
;
3182 writel(FEC_RX_DISABLED_IMASK
, fep
->hwp
+ FEC_IMASK
);
3183 netif_napi_add(ndev
, &fep
->napi
, fec_enet_rx_napi
, NAPI_POLL_WEIGHT
);
3185 if (fep
->quirks
& FEC_QUIRK_HAS_VLAN
)
3186 /* enable hw VLAN support */
3187 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_RX
;
3189 if (fep
->quirks
& FEC_QUIRK_HAS_CSUM
) {
3190 ndev
->gso_max_segs
= FEC_MAX_TSO_SEGS
;
3192 /* enable hw accelerator */
3193 ndev
->features
|= (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
3194 | NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_TSO
);
3195 fep
->csum_flags
|= FLAG_RX_CSUM_ENABLED
;
3198 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
) {
3200 fep
->rx_align
= 0x3f;
3203 ndev
->hw_features
= ndev
->features
;
3207 if (fep
->quirks
& FEC_QUIRK_MIB_CLEAR
)
3208 fec_enet_clear_ethtool_stats(ndev
);
3210 fec_enet_update_ethtool_stats(ndev
);
3216 static int fec_reset_phy(struct platform_device
*pdev
)
3219 bool active_high
= false;
3220 int msec
= 1, phy_post_delay
= 0;
3221 struct device_node
*np
= pdev
->dev
.of_node
;
3226 err
= of_property_read_u32(np
, "phy-reset-duration", &msec
);
3227 /* A sane reset duration should not be longer than 1s */
3228 if (!err
&& msec
> 1000)
3231 phy_reset
= of_get_named_gpio(np
, "phy-reset-gpios", 0);
3232 if (phy_reset
== -EPROBE_DEFER
)
3234 else if (!gpio_is_valid(phy_reset
))
3237 err
= of_property_read_u32(np
, "phy-reset-post-delay", &phy_post_delay
);
3238 /* valid reset duration should be less than 1s */
3239 if (!err
&& phy_post_delay
> 1000)
3242 active_high
= of_property_read_bool(np
, "phy-reset-active-high");
3244 err
= devm_gpio_request_one(&pdev
->dev
, phy_reset
,
3245 active_high
? GPIOF_OUT_INIT_HIGH
: GPIOF_OUT_INIT_LOW
,
3248 dev_err(&pdev
->dev
, "failed to get phy-reset-gpios: %d\n", err
);
3255 usleep_range(msec
* 1000, msec
* 1000 + 1000);
3257 gpio_set_value_cansleep(phy_reset
, !active_high
);
3259 if (!phy_post_delay
)
3262 if (phy_post_delay
> 20)
3263 msleep(phy_post_delay
);
3265 usleep_range(phy_post_delay
* 1000,
3266 phy_post_delay
* 1000 + 1000);
3270 #else /* CONFIG_OF */
3271 static int fec_reset_phy(struct platform_device
*pdev
)
3274 * In case of platform probe, the reset has been done
3279 #endif /* CONFIG_OF */
3282 fec_enet_get_queue_num(struct platform_device
*pdev
, int *num_tx
, int *num_rx
)
3284 struct device_node
*np
= pdev
->dev
.of_node
;
3286 *num_tx
= *num_rx
= 1;
3288 if (!np
|| !of_device_is_available(np
))
3291 /* parse the num of tx and rx queues */
3292 of_property_read_u32(np
, "fsl,num-tx-queues", num_tx
);
3294 of_property_read_u32(np
, "fsl,num-rx-queues", num_rx
);
3296 if (*num_tx
< 1 || *num_tx
> FEC_ENET_MAX_TX_QS
) {
3297 dev_warn(&pdev
->dev
, "Invalid num_tx(=%d), fall back to 1\n",
3303 if (*num_rx
< 1 || *num_rx
> FEC_ENET_MAX_RX_QS
) {
3304 dev_warn(&pdev
->dev
, "Invalid num_rx(=%d), fall back to 1\n",
3313 fec_probe(struct platform_device
*pdev
)
3315 struct fec_enet_private
*fep
;
3316 struct fec_platform_data
*pdata
;
3317 struct net_device
*ndev
;
3318 int i
, irq
, ret
= 0;
3320 const struct of_device_id
*of_id
;
3322 struct device_node
*np
= pdev
->dev
.of_node
, *phy_node
;
3326 fec_enet_get_queue_num(pdev
, &num_tx_qs
, &num_rx_qs
);
3328 /* Init network device */
3329 ndev
= alloc_etherdev_mqs(sizeof(struct fec_enet_private
) +
3330 FEC_STATS_SIZE
, num_tx_qs
, num_rx_qs
);
3334 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
3336 /* setup board info structure */
3337 fep
= netdev_priv(ndev
);
3339 of_id
= of_match_device(fec_dt_ids
, &pdev
->dev
);
3341 pdev
->id_entry
= of_id
->data
;
3342 fep
->quirks
= pdev
->id_entry
->driver_data
;
3345 fep
->num_rx_queues
= num_rx_qs
;
3346 fep
->num_tx_queues
= num_tx_qs
;
3348 #if !defined(CONFIG_M5272)
3349 /* default enable pause frame auto negotiation */
3350 if (fep
->quirks
& FEC_QUIRK_HAS_GBIT
)
3351 fep
->pause_flag
|= FEC_PAUSE_FLAG_AUTONEG
;
3354 /* Select default pin state */
3355 pinctrl_pm_select_default_state(&pdev
->dev
);
3357 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
3358 fep
->hwp
= devm_ioremap_resource(&pdev
->dev
, r
);
3359 if (IS_ERR(fep
->hwp
)) {
3360 ret
= PTR_ERR(fep
->hwp
);
3361 goto failed_ioremap
;
3365 fep
->dev_id
= dev_id
++;
3367 platform_set_drvdata(pdev
, ndev
);
3369 if ((of_machine_is_compatible("fsl,imx6q") ||
3370 of_machine_is_compatible("fsl,imx6dl")) &&
3371 !of_property_read_bool(np
, "fsl,err006687-workaround-present"))
3372 fep
->quirks
|= FEC_QUIRK_ERR006687
;
3374 if (of_get_property(np
, "fsl,magic-packet", NULL
))
3375 fep
->wol_flag
|= FEC_WOL_HAS_MAGIC_PACKET
;
3377 phy_node
= of_parse_phandle(np
, "phy-handle", 0);
3378 if (!phy_node
&& of_phy_is_fixed_link(np
)) {
3379 ret
= of_phy_register_fixed_link(np
);
3382 "broken fixed-link specification\n");
3385 phy_node
= of_node_get(np
);
3387 fep
->phy_node
= phy_node
;
3389 ret
= of_get_phy_mode(pdev
->dev
.of_node
);
3391 pdata
= dev_get_platdata(&pdev
->dev
);
3393 fep
->phy_interface
= pdata
->phy
;
3395 fep
->phy_interface
= PHY_INTERFACE_MODE_MII
;
3397 fep
->phy_interface
= ret
;
3400 fep
->clk_ipg
= devm_clk_get(&pdev
->dev
, "ipg");
3401 if (IS_ERR(fep
->clk_ipg
)) {
3402 ret
= PTR_ERR(fep
->clk_ipg
);
3406 fep
->clk_ahb
= devm_clk_get(&pdev
->dev
, "ahb");
3407 if (IS_ERR(fep
->clk_ahb
)) {
3408 ret
= PTR_ERR(fep
->clk_ahb
);
3412 fep
->itr_clk_rate
= clk_get_rate(fep
->clk_ahb
);
3414 /* enet_out is optional, depends on board */
3415 fep
->clk_enet_out
= devm_clk_get(&pdev
->dev
, "enet_out");
3416 if (IS_ERR(fep
->clk_enet_out
))
3417 fep
->clk_enet_out
= NULL
;
3419 fep
->ptp_clk_on
= false;
3420 mutex_init(&fep
->ptp_clk_mutex
);
3422 /* clk_ref is optional, depends on board */
3423 fep
->clk_ref
= devm_clk_get(&pdev
->dev
, "enet_clk_ref");
3424 if (IS_ERR(fep
->clk_ref
))
3425 fep
->clk_ref
= NULL
;
3427 fep
->bufdesc_ex
= fep
->quirks
& FEC_QUIRK_HAS_BUFDESC_EX
;
3428 fep
->clk_ptp
= devm_clk_get(&pdev
->dev
, "ptp");
3429 if (IS_ERR(fep
->clk_ptp
)) {
3430 fep
->clk_ptp
= NULL
;
3431 fep
->bufdesc_ex
= false;
3434 ret
= fec_enet_clk_enable(ndev
, true);
3438 ret
= clk_prepare_enable(fep
->clk_ipg
);
3440 goto failed_clk_ipg
;
3442 fep
->reg_phy
= devm_regulator_get(&pdev
->dev
, "phy");
3443 if (!IS_ERR(fep
->reg_phy
)) {
3444 ret
= regulator_enable(fep
->reg_phy
);
3447 "Failed to enable phy regulator: %d\n", ret
);
3448 clk_disable_unprepare(fep
->clk_ipg
);
3449 goto failed_regulator
;
3452 fep
->reg_phy
= NULL
;
3455 pm_runtime_set_autosuspend_delay(&pdev
->dev
, FEC_MDIO_PM_TIMEOUT
);
3456 pm_runtime_use_autosuspend(&pdev
->dev
);
3457 pm_runtime_get_noresume(&pdev
->dev
);
3458 pm_runtime_set_active(&pdev
->dev
);
3459 pm_runtime_enable(&pdev
->dev
);
3461 ret
= fec_reset_phy(pdev
);
3465 if (fep
->bufdesc_ex
)
3468 ret
= fec_enet_init(ndev
);
3472 for (i
= 0; i
< FEC_IRQ_NUM
; i
++) {
3473 irq
= platform_get_irq(pdev
, i
);
3480 ret
= devm_request_irq(&pdev
->dev
, irq
, fec_enet_interrupt
,
3481 0, pdev
->name
, ndev
);
3488 init_completion(&fep
->mdio_done
);
3489 ret
= fec_enet_mii_init(pdev
);
3491 goto failed_mii_init
;
3493 /* Carrier starts down, phylib will bring it up */
3494 netif_carrier_off(ndev
);
3495 fec_enet_clk_enable(ndev
, false);
3496 pinctrl_pm_select_sleep_state(&pdev
->dev
);
3498 ret
= register_netdev(ndev
);
3500 goto failed_register
;
3502 device_init_wakeup(&ndev
->dev
, fep
->wol_flag
&
3503 FEC_WOL_HAS_MAGIC_PACKET
);
3505 if (fep
->bufdesc_ex
&& fep
->ptp_clock
)
3506 netdev_info(ndev
, "registered PHC device %d\n", fep
->dev_id
);
3508 fep
->rx_copybreak
= COPYBREAK_DEFAULT
;
3509 INIT_WORK(&fep
->tx_timeout_work
, fec_enet_timeout_work
);
3511 pm_runtime_mark_last_busy(&pdev
->dev
);
3512 pm_runtime_put_autosuspend(&pdev
->dev
);
3517 fec_enet_mii_remove(fep
);
3523 regulator_disable(fep
->reg_phy
);
3525 pm_runtime_put(&pdev
->dev
);
3526 pm_runtime_disable(&pdev
->dev
);
3529 fec_enet_clk_enable(ndev
, false);
3531 if (of_phy_is_fixed_link(np
))
3532 of_phy_deregister_fixed_link(np
);
3534 of_node_put(phy_node
);
3542 fec_drv_remove(struct platform_device
*pdev
)
3544 struct net_device
*ndev
= platform_get_drvdata(pdev
);
3545 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3546 struct device_node
*np
= pdev
->dev
.of_node
;
3548 cancel_work_sync(&fep
->tx_timeout_work
);
3550 unregister_netdev(ndev
);
3551 fec_enet_mii_remove(fep
);
3553 regulator_disable(fep
->reg_phy
);
3554 if (of_phy_is_fixed_link(np
))
3555 of_phy_deregister_fixed_link(np
);
3556 of_node_put(fep
->phy_node
);
3562 static int __maybe_unused
fec_suspend(struct device
*dev
)
3564 struct net_device
*ndev
= dev_get_drvdata(dev
);
3565 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3568 if (netif_running(ndev
)) {
3569 if (fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
)
3570 fep
->wol_flag
|= FEC_WOL_FLAG_SLEEP_ON
;
3571 phy_stop(ndev
->phydev
);
3572 napi_disable(&fep
->napi
);
3573 netif_tx_lock_bh(ndev
);
3574 netif_device_detach(ndev
);
3575 netif_tx_unlock_bh(ndev
);
3577 fec_enet_clk_enable(ndev
, false);
3578 if (!(fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
))
3579 pinctrl_pm_select_sleep_state(&fep
->pdev
->dev
);
3583 if (fep
->reg_phy
&& !(fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
))
3584 regulator_disable(fep
->reg_phy
);
3586 /* SOC supply clock to phy, when clock is disabled, phy link down
3587 * SOC control phy regulator, when regulator is disabled, phy link down
3589 if (fep
->clk_enet_out
|| fep
->reg_phy
)
3595 static int __maybe_unused
fec_resume(struct device
*dev
)
3597 struct net_device
*ndev
= dev_get_drvdata(dev
);
3598 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3599 struct fec_platform_data
*pdata
= fep
->pdev
->dev
.platform_data
;
3603 if (fep
->reg_phy
&& !(fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
)) {
3604 ret
= regulator_enable(fep
->reg_phy
);
3610 if (netif_running(ndev
)) {
3611 ret
= fec_enet_clk_enable(ndev
, true);
3616 if (fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
) {
3617 if (pdata
&& pdata
->sleep_mode_enable
)
3618 pdata
->sleep_mode_enable(false);
3619 val
= readl(fep
->hwp
+ FEC_ECNTRL
);
3620 val
&= ~(FEC_ECR_MAGICEN
| FEC_ECR_SLEEP
);
3621 writel(val
, fep
->hwp
+ FEC_ECNTRL
);
3622 fep
->wol_flag
&= ~FEC_WOL_FLAG_SLEEP_ON
;
3624 pinctrl_pm_select_default_state(&fep
->pdev
->dev
);
3627 netif_tx_lock_bh(ndev
);
3628 netif_device_attach(ndev
);
3629 netif_tx_unlock_bh(ndev
);
3630 napi_enable(&fep
->napi
);
3631 phy_start(ndev
->phydev
);
3639 regulator_disable(fep
->reg_phy
);
3643 static int __maybe_unused
fec_runtime_suspend(struct device
*dev
)
3645 struct net_device
*ndev
= dev_get_drvdata(dev
);
3646 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3648 clk_disable_unprepare(fep
->clk_ipg
);
3653 static int __maybe_unused
fec_runtime_resume(struct device
*dev
)
3655 struct net_device
*ndev
= dev_get_drvdata(dev
);
3656 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3658 return clk_prepare_enable(fep
->clk_ipg
);
3661 static const struct dev_pm_ops fec_pm_ops
= {
3662 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend
, fec_resume
)
3663 SET_RUNTIME_PM_OPS(fec_runtime_suspend
, fec_runtime_resume
, NULL
)
3666 static struct platform_driver fec_driver
= {
3668 .name
= DRIVER_NAME
,
3670 .of_match_table
= fec_dt_ids
,
3672 .id_table
= fec_devtype
,
3674 .remove
= fec_drv_remove
,
3677 module_platform_driver(fec_driver
);
3679 MODULE_ALIAS("platform:"DRIVER_NAME
);
3680 MODULE_LICENSE("GPL");