2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
5 * Right now, I am very wasteful with the buffers. I allocate memory
6 * pages and then divide them into 2K frame buffers. This way I know I
7 * have buffers large enough to hold one frame within one buffer descriptor.
8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9 * will be much more memory efficient and will easily handle lots of
12 * Much better multiple PHY support by Magnus Damm.
13 * Copyright (c) 2000 Ericsson Radio Systems AB.
15 * Support for FEC controller of ColdFire processors.
16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA.
21 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/string.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/ptrace.h>
29 #include <linux/errno.h>
30 #include <linux/ioport.h>
31 #include <linux/slab.h>
32 #include <linux/interrupt.h>
33 #include <linux/delay.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
41 #include <linux/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/icmp.h>
44 #include <linux/spinlock.h>
45 #include <linux/workqueue.h>
46 #include <linux/bitops.h>
48 #include <linux/irq.h>
49 #include <linux/clk.h>
50 #include <linux/platform_device.h>
51 #include <linux/mdio.h>
52 #include <linux/phy.h>
53 #include <linux/fec.h>
55 #include <linux/of_device.h>
56 #include <linux/of_gpio.h>
57 #include <linux/of_mdio.h>
58 #include <linux/of_net.h>
59 #include <linux/regulator/consumer.h>
60 #include <linux/if_vlan.h>
61 #include <linux/pinctrl/consumer.h>
62 #include <linux/prefetch.h>
64 #include <asm/cacheflush.h>
68 static void set_multicast_list(struct net_device
*ndev
);
69 static void fec_enet_itr_coal_init(struct net_device
*ndev
);
71 #define DRIVER_NAME "fec"
73 #define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
75 /* Pause frame feild and FIFO threshold */
76 #define FEC_ENET_FCE (1 << 5)
77 #define FEC_ENET_RSEM_V 0x84
78 #define FEC_ENET_RSFL_V 16
79 #define FEC_ENET_RAEM_V 0x8
80 #define FEC_ENET_RAFL_V 0x8
81 #define FEC_ENET_OPD_V 0xFFF0
82 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */
84 static struct platform_device_id fec_devtype
[] = {
86 /* keep it for coldfire */
91 .driver_data
= FEC_QUIRK_USE_GASKET
| FEC_QUIRK_HAS_RACC
,
94 .driver_data
= FEC_QUIRK_HAS_RACC
,
97 .driver_data
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_SWAP_FRAME
|
98 FEC_QUIRK_SINGLE_MDIO
| FEC_QUIRK_HAS_RACC
,
101 .driver_data
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_HAS_GBIT
|
102 FEC_QUIRK_HAS_BUFDESC_EX
| FEC_QUIRK_HAS_CSUM
|
103 FEC_QUIRK_HAS_VLAN
| FEC_QUIRK_ERR006358
|
106 .name
= "mvf600-fec",
107 .driver_data
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_HAS_RACC
,
109 .name
= "imx6sx-fec",
110 .driver_data
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_HAS_GBIT
|
111 FEC_QUIRK_HAS_BUFDESC_EX
| FEC_QUIRK_HAS_CSUM
|
112 FEC_QUIRK_HAS_VLAN
| FEC_QUIRK_HAS_AVB
|
113 FEC_QUIRK_ERR007885
| FEC_QUIRK_BUG_CAPTURE
|
119 MODULE_DEVICE_TABLE(platform
, fec_devtype
);
122 IMX25_FEC
= 1, /* runs on i.mx25/50/53 */
123 IMX27_FEC
, /* runs on i.mx27/35/51 */
130 static const struct of_device_id fec_dt_ids
[] = {
131 { .compatible
= "fsl,imx25-fec", .data
= &fec_devtype
[IMX25_FEC
], },
132 { .compatible
= "fsl,imx27-fec", .data
= &fec_devtype
[IMX27_FEC
], },
133 { .compatible
= "fsl,imx28-fec", .data
= &fec_devtype
[IMX28_FEC
], },
134 { .compatible
= "fsl,imx6q-fec", .data
= &fec_devtype
[IMX6Q_FEC
], },
135 { .compatible
= "fsl,mvf600-fec", .data
= &fec_devtype
[MVF600_FEC
], },
136 { .compatible
= "fsl,imx6sx-fec", .data
= &fec_devtype
[IMX6SX_FEC
], },
139 MODULE_DEVICE_TABLE(of
, fec_dt_ids
);
141 static unsigned char macaddr
[ETH_ALEN
];
142 module_param_array(macaddr
, byte
, NULL
, 0);
143 MODULE_PARM_DESC(macaddr
, "FEC Ethernet MAC address");
145 #if defined(CONFIG_M5272)
147 * Some hardware gets it MAC address out of local flash memory.
148 * if this is non-zero then assume it is the address to get MAC from.
150 #if defined(CONFIG_NETtel)
151 #define FEC_FLASHMAC 0xf0006006
152 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
153 #define FEC_FLASHMAC 0xf0006000
154 #elif defined(CONFIG_CANCam)
155 #define FEC_FLASHMAC 0xf0020000
156 #elif defined (CONFIG_M5272C3)
157 #define FEC_FLASHMAC (0xffe04000 + 4)
158 #elif defined(CONFIG_MOD5272)
159 #define FEC_FLASHMAC 0xffc0406b
161 #define FEC_FLASHMAC 0
163 #endif /* CONFIG_M5272 */
165 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
167 #define PKT_MAXBUF_SIZE 1522
168 #define PKT_MINBUF_SIZE 64
169 #define PKT_MAXBLR_SIZE 1536
171 /* FEC receive acceleration */
172 #define FEC_RACC_IPDIS (1 << 1)
173 #define FEC_RACC_PRODIS (1 << 2)
174 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
177 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
178 * size bits. Other FEC hardware does not, so we need to take that into
179 * account when setting it.
181 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
182 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
183 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
185 #define OPT_FRAME_SIZE 0
188 /* FEC MII MMFR bits definition */
189 #define FEC_MMFR_ST (1 << 30)
190 #define FEC_MMFR_OP_READ (2 << 28)
191 #define FEC_MMFR_OP_WRITE (1 << 28)
192 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
193 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
194 #define FEC_MMFR_TA (2 << 16)
195 #define FEC_MMFR_DATA(v) (v & 0xffff)
196 /* FEC ECR bits definition */
197 #define FEC_ECR_MAGICEN (1 << 2)
198 #define FEC_ECR_SLEEP (1 << 3)
200 #define FEC_MII_TIMEOUT 30000 /* us */
202 /* Transmitter timeout */
203 #define TX_TIMEOUT (2 * HZ)
205 #define FEC_PAUSE_FLAG_AUTONEG 0x1
206 #define FEC_PAUSE_FLAG_ENABLE 0x2
207 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
208 #define FEC_WOL_FLAG_ENABLE (0x1 << 1)
209 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
211 #define COPYBREAK_DEFAULT 256
213 #define TSO_HEADER_SIZE 128
214 /* Max number of allowed TCP segments for software TSO */
215 #define FEC_MAX_TSO_SEGS 100
216 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
218 #define IS_TSO_HEADER(txq, addr) \
219 ((addr >= txq->tso_hdrs_dma) && \
220 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
224 static struct bufdesc
*fec_enet_get_nextdesc(struct bufdesc
*bdp
,
225 struct bufdesc_prop
*bd
)
227 return (bdp
>= bd
->last
) ? bd
->base
228 : (struct bufdesc
*)(((unsigned)bdp
) + bd
->dsize
);
231 static struct bufdesc
*fec_enet_get_prevdesc(struct bufdesc
*bdp
,
232 struct bufdesc_prop
*bd
)
234 return (bdp
<= bd
->base
) ? bd
->last
235 : (struct bufdesc
*)(((unsigned)bdp
) - bd
->dsize
);
238 static int fec_enet_get_bd_index(struct bufdesc
*bdp
,
239 struct bufdesc_prop
*bd
)
241 return ((const char *)bdp
- (const char *)bd
->base
) >> bd
->dsize_log2
;
244 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q
*txq
)
248 entries
= (((const char *)txq
->dirty_tx
-
249 (const char *)txq
->bd
.cur
) >> txq
->bd
.dsize_log2
) - 1;
251 return entries
>= 0 ? entries
: entries
+ txq
->bd
.ring_size
;
254 static void swap_buffer(void *bufaddr
, int len
)
257 unsigned int *buf
= bufaddr
;
259 for (i
= 0; i
< len
; i
+= 4, buf
++)
263 static void swap_buffer2(void *dst_buf
, void *src_buf
, int len
)
266 unsigned int *src
= src_buf
;
267 unsigned int *dst
= dst_buf
;
269 for (i
= 0; i
< len
; i
+= 4, src
++, dst
++)
273 static void fec_dump(struct net_device
*ndev
)
275 struct fec_enet_private
*fep
= netdev_priv(ndev
);
277 struct fec_enet_priv_tx_q
*txq
;
280 netdev_info(ndev
, "TX ring dump\n");
281 pr_info("Nr SC addr len SKB\n");
283 txq
= fep
->tx_queue
[0];
287 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
289 bdp
== txq
->bd
.cur
? 'S' : ' ',
290 bdp
== txq
->dirty_tx
? 'H' : ' ',
291 fec16_to_cpu(bdp
->cbd_sc
),
292 fec32_to_cpu(bdp
->cbd_bufaddr
),
293 fec16_to_cpu(bdp
->cbd_datlen
),
294 txq
->tx_skbuff
[index
]);
295 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
297 } while (bdp
!= txq
->bd
.base
);
300 static inline bool is_ipv4_pkt(struct sk_buff
*skb
)
302 return skb
->protocol
== htons(ETH_P_IP
) && ip_hdr(skb
)->version
== 4;
306 fec_enet_clear_csum(struct sk_buff
*skb
, struct net_device
*ndev
)
308 /* Only run for packets requiring a checksum. */
309 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
312 if (unlikely(skb_cow_head(skb
, 0)))
315 if (is_ipv4_pkt(skb
))
316 ip_hdr(skb
)->check
= 0;
317 *(__sum16
*)(skb
->head
+ skb
->csum_start
+ skb
->csum_offset
) = 0;
322 static struct bufdesc
*
323 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q
*txq
,
325 struct net_device
*ndev
)
327 struct fec_enet_private
*fep
= netdev_priv(ndev
);
328 struct bufdesc
*bdp
= txq
->bd
.cur
;
329 struct bufdesc_ex
*ebdp
;
330 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
332 unsigned short status
;
333 unsigned int estatus
= 0;
334 skb_frag_t
*this_frag
;
340 for (frag
= 0; frag
< nr_frags
; frag
++) {
341 this_frag
= &skb_shinfo(skb
)->frags
[frag
];
342 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
343 ebdp
= (struct bufdesc_ex
*)bdp
;
345 status
= fec16_to_cpu(bdp
->cbd_sc
);
346 status
&= ~BD_ENET_TX_STATS
;
347 status
|= (BD_ENET_TX_TC
| BD_ENET_TX_READY
);
348 frag_len
= skb_shinfo(skb
)->frags
[frag
].size
;
350 /* Handle the last BD specially */
351 if (frag
== nr_frags
- 1) {
352 status
|= (BD_ENET_TX_INTR
| BD_ENET_TX_LAST
);
353 if (fep
->bufdesc_ex
) {
354 estatus
|= BD_ENET_TX_INT
;
355 if (unlikely(skb_shinfo(skb
)->tx_flags
&
356 SKBTX_HW_TSTAMP
&& fep
->hwts_tx_en
))
357 estatus
|= BD_ENET_TX_TS
;
361 if (fep
->bufdesc_ex
) {
362 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
)
363 estatus
|= FEC_TX_BD_FTYPE(txq
->bd
.qid
);
364 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
365 estatus
|= BD_ENET_TX_PINS
| BD_ENET_TX_IINS
;
367 ebdp
->cbd_esc
= cpu_to_fec32(estatus
);
370 bufaddr
= page_address(this_frag
->page
.p
) + this_frag
->page_offset
;
372 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
373 if (((unsigned long) bufaddr
) & fep
->tx_align
||
374 fep
->quirks
& FEC_QUIRK_SWAP_FRAME
) {
375 memcpy(txq
->tx_bounce
[index
], bufaddr
, frag_len
);
376 bufaddr
= txq
->tx_bounce
[index
];
378 if (fep
->quirks
& FEC_QUIRK_SWAP_FRAME
)
379 swap_buffer(bufaddr
, frag_len
);
382 addr
= dma_map_single(&fep
->pdev
->dev
, bufaddr
, frag_len
,
384 if (dma_mapping_error(&fep
->pdev
->dev
, addr
)) {
386 netdev_err(ndev
, "Tx DMA memory map failed\n");
387 goto dma_mapping_error
;
390 bdp
->cbd_bufaddr
= cpu_to_fec32(addr
);
391 bdp
->cbd_datlen
= cpu_to_fec16(frag_len
);
392 /* Make sure the updates to rest of the descriptor are
393 * performed before transferring ownership.
396 bdp
->cbd_sc
= cpu_to_fec16(status
);
402 for (i
= 0; i
< frag
; i
++) {
403 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
404 dma_unmap_single(&fep
->pdev
->dev
, fec32_to_cpu(bdp
->cbd_bufaddr
),
405 fec16_to_cpu(bdp
->cbd_datlen
), DMA_TO_DEVICE
);
407 return ERR_PTR(-ENOMEM
);
410 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q
*txq
,
411 struct sk_buff
*skb
, struct net_device
*ndev
)
413 struct fec_enet_private
*fep
= netdev_priv(ndev
);
414 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
415 struct bufdesc
*bdp
, *last_bdp
;
418 unsigned short status
;
419 unsigned short buflen
;
420 unsigned int estatus
= 0;
424 entries_free
= fec_enet_get_free_txdesc_num(txq
);
425 if (entries_free
< MAX_SKB_FRAGS
+ 1) {
426 dev_kfree_skb_any(skb
);
428 netdev_err(ndev
, "NOT enough BD for SG!\n");
432 /* Protocol checksum off-load for TCP and UDP. */
433 if (fec_enet_clear_csum(skb
, ndev
)) {
434 dev_kfree_skb_any(skb
);
438 /* Fill in a Tx ring entry */
441 status
= fec16_to_cpu(bdp
->cbd_sc
);
442 status
&= ~BD_ENET_TX_STATS
;
444 /* Set buffer length and buffer pointer */
446 buflen
= skb_headlen(skb
);
448 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
449 if (((unsigned long) bufaddr
) & fep
->tx_align
||
450 fep
->quirks
& FEC_QUIRK_SWAP_FRAME
) {
451 memcpy(txq
->tx_bounce
[index
], skb
->data
, buflen
);
452 bufaddr
= txq
->tx_bounce
[index
];
454 if (fep
->quirks
& FEC_QUIRK_SWAP_FRAME
)
455 swap_buffer(bufaddr
, buflen
);
458 /* Push the data cache so the CPM does not get stale memory data. */
459 addr
= dma_map_single(&fep
->pdev
->dev
, bufaddr
, buflen
, DMA_TO_DEVICE
);
460 if (dma_mapping_error(&fep
->pdev
->dev
, addr
)) {
461 dev_kfree_skb_any(skb
);
463 netdev_err(ndev
, "Tx DMA memory map failed\n");
468 last_bdp
= fec_enet_txq_submit_frag_skb(txq
, skb
, ndev
);
469 if (IS_ERR(last_bdp
)) {
470 dma_unmap_single(&fep
->pdev
->dev
, addr
,
471 buflen
, DMA_TO_DEVICE
);
472 dev_kfree_skb_any(skb
);
476 status
|= (BD_ENET_TX_INTR
| BD_ENET_TX_LAST
);
477 if (fep
->bufdesc_ex
) {
478 estatus
= BD_ENET_TX_INT
;
479 if (unlikely(skb_shinfo(skb
)->tx_flags
&
480 SKBTX_HW_TSTAMP
&& fep
->hwts_tx_en
))
481 estatus
|= BD_ENET_TX_TS
;
484 bdp
->cbd_bufaddr
= cpu_to_fec32(addr
);
485 bdp
->cbd_datlen
= cpu_to_fec16(buflen
);
487 if (fep
->bufdesc_ex
) {
489 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
491 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
&&
493 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
495 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
)
496 estatus
|= FEC_TX_BD_FTYPE(txq
->bd
.qid
);
498 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
499 estatus
|= BD_ENET_TX_PINS
| BD_ENET_TX_IINS
;
502 ebdp
->cbd_esc
= cpu_to_fec32(estatus
);
505 index
= fec_enet_get_bd_index(last_bdp
, &txq
->bd
);
506 /* Save skb pointer */
507 txq
->tx_skbuff
[index
] = skb
;
509 /* Make sure the updates to rest of the descriptor are performed before
510 * transferring ownership.
514 /* Send it on its way. Tell FEC it's ready, interrupt when done,
515 * it's the last BD of the frame, and to put the CRC on the end.
517 status
|= (BD_ENET_TX_READY
| BD_ENET_TX_TC
);
518 bdp
->cbd_sc
= cpu_to_fec16(status
);
520 /* If this was the last BD in the ring, start at the beginning again. */
521 bdp
= fec_enet_get_nextdesc(last_bdp
, &txq
->bd
);
523 skb_tx_timestamp(skb
);
525 /* Make sure the update to bdp and tx_skbuff are performed before
531 /* Trigger transmission start */
532 writel(0, txq
->bd
.reg_desc_active
);
538 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q
*txq
, struct sk_buff
*skb
,
539 struct net_device
*ndev
,
540 struct bufdesc
*bdp
, int index
, char *data
,
541 int size
, bool last_tcp
, bool is_last
)
543 struct fec_enet_private
*fep
= netdev_priv(ndev
);
544 struct bufdesc_ex
*ebdp
= container_of(bdp
, struct bufdesc_ex
, desc
);
545 unsigned short status
;
546 unsigned int estatus
= 0;
549 status
= fec16_to_cpu(bdp
->cbd_sc
);
550 status
&= ~BD_ENET_TX_STATS
;
552 status
|= (BD_ENET_TX_TC
| BD_ENET_TX_READY
);
554 if (((unsigned long) data
) & fep
->tx_align
||
555 fep
->quirks
& FEC_QUIRK_SWAP_FRAME
) {
556 memcpy(txq
->tx_bounce
[index
], data
, size
);
557 data
= txq
->tx_bounce
[index
];
559 if (fep
->quirks
& FEC_QUIRK_SWAP_FRAME
)
560 swap_buffer(data
, size
);
563 addr
= dma_map_single(&fep
->pdev
->dev
, data
, size
, DMA_TO_DEVICE
);
564 if (dma_mapping_error(&fep
->pdev
->dev
, addr
)) {
565 dev_kfree_skb_any(skb
);
567 netdev_err(ndev
, "Tx DMA memory map failed\n");
568 return NETDEV_TX_BUSY
;
571 bdp
->cbd_datlen
= cpu_to_fec16(size
);
572 bdp
->cbd_bufaddr
= cpu_to_fec32(addr
);
574 if (fep
->bufdesc_ex
) {
575 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
)
576 estatus
|= FEC_TX_BD_FTYPE(txq
->bd
.qid
);
577 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
578 estatus
|= BD_ENET_TX_PINS
| BD_ENET_TX_IINS
;
580 ebdp
->cbd_esc
= cpu_to_fec32(estatus
);
583 /* Handle the last BD specially */
585 status
|= (BD_ENET_TX_LAST
| BD_ENET_TX_TC
);
587 status
|= BD_ENET_TX_INTR
;
589 ebdp
->cbd_esc
|= cpu_to_fec32(BD_ENET_TX_INT
);
592 bdp
->cbd_sc
= cpu_to_fec16(status
);
598 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q
*txq
,
599 struct sk_buff
*skb
, struct net_device
*ndev
,
600 struct bufdesc
*bdp
, int index
)
602 struct fec_enet_private
*fep
= netdev_priv(ndev
);
603 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
604 struct bufdesc_ex
*ebdp
= container_of(bdp
, struct bufdesc_ex
, desc
);
606 unsigned long dmabuf
;
607 unsigned short status
;
608 unsigned int estatus
= 0;
610 status
= fec16_to_cpu(bdp
->cbd_sc
);
611 status
&= ~BD_ENET_TX_STATS
;
612 status
|= (BD_ENET_TX_TC
| BD_ENET_TX_READY
);
614 bufaddr
= txq
->tso_hdrs
+ index
* TSO_HEADER_SIZE
;
615 dmabuf
= txq
->tso_hdrs_dma
+ index
* TSO_HEADER_SIZE
;
616 if (((unsigned long)bufaddr
) & fep
->tx_align
||
617 fep
->quirks
& FEC_QUIRK_SWAP_FRAME
) {
618 memcpy(txq
->tx_bounce
[index
], skb
->data
, hdr_len
);
619 bufaddr
= txq
->tx_bounce
[index
];
621 if (fep
->quirks
& FEC_QUIRK_SWAP_FRAME
)
622 swap_buffer(bufaddr
, hdr_len
);
624 dmabuf
= dma_map_single(&fep
->pdev
->dev
, bufaddr
,
625 hdr_len
, DMA_TO_DEVICE
);
626 if (dma_mapping_error(&fep
->pdev
->dev
, dmabuf
)) {
627 dev_kfree_skb_any(skb
);
629 netdev_err(ndev
, "Tx DMA memory map failed\n");
630 return NETDEV_TX_BUSY
;
634 bdp
->cbd_bufaddr
= cpu_to_fec32(dmabuf
);
635 bdp
->cbd_datlen
= cpu_to_fec16(hdr_len
);
637 if (fep
->bufdesc_ex
) {
638 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
)
639 estatus
|= FEC_TX_BD_FTYPE(txq
->bd
.qid
);
640 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
641 estatus
|= BD_ENET_TX_PINS
| BD_ENET_TX_IINS
;
643 ebdp
->cbd_esc
= cpu_to_fec32(estatus
);
646 bdp
->cbd_sc
= cpu_to_fec16(status
);
651 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q
*txq
,
653 struct net_device
*ndev
)
655 struct fec_enet_private
*fep
= netdev_priv(ndev
);
656 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
657 int total_len
, data_left
;
658 struct bufdesc
*bdp
= txq
->bd
.cur
;
660 unsigned int index
= 0;
663 if (tso_count_descs(skb
) >= fec_enet_get_free_txdesc_num(txq
)) {
664 dev_kfree_skb_any(skb
);
666 netdev_err(ndev
, "NOT enough BD for TSO!\n");
670 /* Protocol checksum off-load for TCP and UDP. */
671 if (fec_enet_clear_csum(skb
, ndev
)) {
672 dev_kfree_skb_any(skb
);
676 /* Initialize the TSO handler, and prepare the first payload */
677 tso_start(skb
, &tso
);
679 total_len
= skb
->len
- hdr_len
;
680 while (total_len
> 0) {
683 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
684 data_left
= min_t(int, skb_shinfo(skb
)->gso_size
, total_len
);
685 total_len
-= data_left
;
687 /* prepare packet headers: MAC + IP + TCP */
688 hdr
= txq
->tso_hdrs
+ index
* TSO_HEADER_SIZE
;
689 tso_build_hdr(skb
, hdr
, &tso
, data_left
, total_len
== 0);
690 ret
= fec_enet_txq_put_hdr_tso(txq
, skb
, ndev
, bdp
, index
);
694 while (data_left
> 0) {
697 size
= min_t(int, tso
.size
, data_left
);
698 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
699 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
700 ret
= fec_enet_txq_put_data_tso(txq
, skb
, ndev
,
709 tso_build_data(skb
, &tso
, size
);
712 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
715 /* Save skb pointer */
716 txq
->tx_skbuff
[index
] = skb
;
718 skb_tx_timestamp(skb
);
721 /* Trigger transmission start */
722 if (!(fep
->quirks
& FEC_QUIRK_ERR007885
) ||
723 !readl(txq
->bd
.reg_desc_active
) ||
724 !readl(txq
->bd
.reg_desc_active
) ||
725 !readl(txq
->bd
.reg_desc_active
) ||
726 !readl(txq
->bd
.reg_desc_active
))
727 writel(0, txq
->bd
.reg_desc_active
);
732 /* TODO: Release all used data descriptors for TSO */
737 fec_enet_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
739 struct fec_enet_private
*fep
= netdev_priv(ndev
);
741 unsigned short queue
;
742 struct fec_enet_priv_tx_q
*txq
;
743 struct netdev_queue
*nq
;
746 queue
= skb_get_queue_mapping(skb
);
747 txq
= fep
->tx_queue
[queue
];
748 nq
= netdev_get_tx_queue(ndev
, queue
);
751 ret
= fec_enet_txq_submit_tso(txq
, skb
, ndev
);
753 ret
= fec_enet_txq_submit_skb(txq
, skb
, ndev
);
757 entries_free
= fec_enet_get_free_txdesc_num(txq
);
758 if (entries_free
<= txq
->tx_stop_threshold
)
759 netif_tx_stop_queue(nq
);
764 /* Init RX & TX buffer descriptors
766 static void fec_enet_bd_init(struct net_device
*dev
)
768 struct fec_enet_private
*fep
= netdev_priv(dev
);
769 struct fec_enet_priv_tx_q
*txq
;
770 struct fec_enet_priv_rx_q
*rxq
;
775 for (q
= 0; q
< fep
->num_rx_queues
; q
++) {
776 /* Initialize the receive buffer descriptors. */
777 rxq
= fep
->rx_queue
[q
];
780 for (i
= 0; i
< rxq
->bd
.ring_size
; i
++) {
782 /* Initialize the BD for every fragment in the page. */
783 if (bdp
->cbd_bufaddr
)
784 bdp
->cbd_sc
= cpu_to_fec16(BD_ENET_RX_EMPTY
);
786 bdp
->cbd_sc
= cpu_to_fec16(0);
787 bdp
= fec_enet_get_nextdesc(bdp
, &rxq
->bd
);
790 /* Set the last buffer to wrap */
791 bdp
= fec_enet_get_prevdesc(bdp
, &rxq
->bd
);
792 bdp
->cbd_sc
|= cpu_to_fec16(BD_SC_WRAP
);
794 rxq
->bd
.cur
= rxq
->bd
.base
;
797 for (q
= 0; q
< fep
->num_tx_queues
; q
++) {
798 /* ...and the same for transmit */
799 txq
= fep
->tx_queue
[q
];
803 for (i
= 0; i
< txq
->bd
.ring_size
; i
++) {
804 /* Initialize the BD for every fragment in the page. */
805 bdp
->cbd_sc
= cpu_to_fec16(0);
806 if (txq
->tx_skbuff
[i
]) {
807 dev_kfree_skb_any(txq
->tx_skbuff
[i
]);
808 txq
->tx_skbuff
[i
] = NULL
;
810 bdp
->cbd_bufaddr
= cpu_to_fec32(0);
811 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
814 /* Set the last buffer to wrap */
815 bdp
= fec_enet_get_prevdesc(bdp
, &txq
->bd
);
816 bdp
->cbd_sc
|= cpu_to_fec16(BD_SC_WRAP
);
821 static void fec_enet_active_rxring(struct net_device
*ndev
)
823 struct fec_enet_private
*fep
= netdev_priv(ndev
);
826 for (i
= 0; i
< fep
->num_rx_queues
; i
++)
827 writel(0, fep
->rx_queue
[i
]->bd
.reg_desc_active
);
830 static void fec_enet_enable_ring(struct net_device
*ndev
)
832 struct fec_enet_private
*fep
= netdev_priv(ndev
);
833 struct fec_enet_priv_tx_q
*txq
;
834 struct fec_enet_priv_rx_q
*rxq
;
837 for (i
= 0; i
< fep
->num_rx_queues
; i
++) {
838 rxq
= fep
->rx_queue
[i
];
839 writel(rxq
->bd
.dma
, fep
->hwp
+ FEC_R_DES_START(i
));
840 writel(PKT_MAXBLR_SIZE
, fep
->hwp
+ FEC_R_BUFF_SIZE(i
));
844 writel(RCMR_MATCHEN
| RCMR_CMP(i
),
845 fep
->hwp
+ FEC_RCMR(i
));
848 for (i
= 0; i
< fep
->num_tx_queues
; i
++) {
849 txq
= fep
->tx_queue
[i
];
850 writel(txq
->bd
.dma
, fep
->hwp
+ FEC_X_DES_START(i
));
854 writel(DMA_CLASS_EN
| IDLE_SLOPE(i
),
855 fep
->hwp
+ FEC_DMA_CFG(i
));
859 static void fec_enet_reset_skb(struct net_device
*ndev
)
861 struct fec_enet_private
*fep
= netdev_priv(ndev
);
862 struct fec_enet_priv_tx_q
*txq
;
865 for (i
= 0; i
< fep
->num_tx_queues
; i
++) {
866 txq
= fep
->tx_queue
[i
];
868 for (j
= 0; j
< txq
->bd
.ring_size
; j
++) {
869 if (txq
->tx_skbuff
[j
]) {
870 dev_kfree_skb_any(txq
->tx_skbuff
[j
]);
871 txq
->tx_skbuff
[j
] = NULL
;
878 * This function is called to start or restart the FEC during a link
879 * change, transmit timeout, or to reconfigure the FEC. The network
880 * packet processing for this device must be stopped before this call.
883 fec_restart(struct net_device
*ndev
)
885 struct fec_enet_private
*fep
= netdev_priv(ndev
);
888 u32 rcntl
= OPT_FRAME_SIZE
| 0x04;
889 u32 ecntl
= 0x2; /* ETHEREN */
891 /* Whack a reset. We should wait for this.
892 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
893 * instead of reset MAC itself.
895 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
) {
896 writel(0, fep
->hwp
+ FEC_ECNTRL
);
898 writel(1, fep
->hwp
+ FEC_ECNTRL
);
903 * enet-mac reset will reset mac address registers too,
904 * so need to reconfigure it.
906 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
) {
907 memcpy(&temp_mac
, ndev
->dev_addr
, ETH_ALEN
);
908 writel((__force u32
)cpu_to_be32(temp_mac
[0]),
909 fep
->hwp
+ FEC_ADDR_LOW
);
910 writel((__force u32
)cpu_to_be32(temp_mac
[1]),
911 fep
->hwp
+ FEC_ADDR_HIGH
);
914 /* Clear any outstanding interrupt. */
915 writel(0xffffffff, fep
->hwp
+ FEC_IEVENT
);
917 fec_enet_bd_init(ndev
);
919 fec_enet_enable_ring(ndev
);
921 /* Reset tx SKB buffers. */
922 fec_enet_reset_skb(ndev
);
924 /* Enable MII mode */
925 if (fep
->full_duplex
== DUPLEX_FULL
) {
927 writel(0x04, fep
->hwp
+ FEC_X_CNTRL
);
931 writel(0x0, fep
->hwp
+ FEC_X_CNTRL
);
935 writel(fep
->phy_speed
, fep
->hwp
+ FEC_MII_SPEED
);
937 #if !defined(CONFIG_M5272)
938 if (fep
->quirks
& FEC_QUIRK_HAS_RACC
) {
939 /* set RX checksum */
940 val
= readl(fep
->hwp
+ FEC_RACC
);
941 if (fep
->csum_flags
& FLAG_RX_CSUM_ENABLED
)
942 val
|= FEC_RACC_OPTIONS
;
944 val
&= ~FEC_RACC_OPTIONS
;
945 writel(val
, fep
->hwp
+ FEC_RACC
);
946 writel(PKT_MAXBUF_SIZE
, fep
->hwp
+ FEC_FTRL
);
951 * The phy interface and speed need to get configured
952 * differently on enet-mac.
954 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
) {
955 /* Enable flow control and length check */
956 rcntl
|= 0x40000000 | 0x00000020;
958 /* RGMII, RMII or MII */
959 if (fep
->phy_interface
== PHY_INTERFACE_MODE_RGMII
||
960 fep
->phy_interface
== PHY_INTERFACE_MODE_RGMII_ID
||
961 fep
->phy_interface
== PHY_INTERFACE_MODE_RGMII_RXID
||
962 fep
->phy_interface
== PHY_INTERFACE_MODE_RGMII_TXID
)
964 else if (fep
->phy_interface
== PHY_INTERFACE_MODE_RMII
)
969 /* 1G, 100M or 10M */
971 if (fep
->phy_dev
->speed
== SPEED_1000
)
973 else if (fep
->phy_dev
->speed
== SPEED_100
)
979 #ifdef FEC_MIIGSK_ENR
980 if (fep
->quirks
& FEC_QUIRK_USE_GASKET
) {
982 /* disable the gasket and wait */
983 writel(0, fep
->hwp
+ FEC_MIIGSK_ENR
);
984 while (readl(fep
->hwp
+ FEC_MIIGSK_ENR
) & 4)
988 * configure the gasket:
989 * RMII, 50 MHz, no loopback, no echo
990 * MII, 25 MHz, no loopback, no echo
992 cfgr
= (fep
->phy_interface
== PHY_INTERFACE_MODE_RMII
)
993 ? BM_MIIGSK_CFGR_RMII
: BM_MIIGSK_CFGR_MII
;
994 if (fep
->phy_dev
&& fep
->phy_dev
->speed
== SPEED_10
)
995 cfgr
|= BM_MIIGSK_CFGR_FRCONT_10M
;
996 writel(cfgr
, fep
->hwp
+ FEC_MIIGSK_CFGR
);
998 /* re-enable the gasket */
999 writel(2, fep
->hwp
+ FEC_MIIGSK_ENR
);
1004 #if !defined(CONFIG_M5272)
1005 /* enable pause frame*/
1006 if ((fep
->pause_flag
& FEC_PAUSE_FLAG_ENABLE
) ||
1007 ((fep
->pause_flag
& FEC_PAUSE_FLAG_AUTONEG
) &&
1008 fep
->phy_dev
&& fep
->phy_dev
->pause
)) {
1009 rcntl
|= FEC_ENET_FCE
;
1011 /* set FIFO threshold parameter to reduce overrun */
1012 writel(FEC_ENET_RSEM_V
, fep
->hwp
+ FEC_R_FIFO_RSEM
);
1013 writel(FEC_ENET_RSFL_V
, fep
->hwp
+ FEC_R_FIFO_RSFL
);
1014 writel(FEC_ENET_RAEM_V
, fep
->hwp
+ FEC_R_FIFO_RAEM
);
1015 writel(FEC_ENET_RAFL_V
, fep
->hwp
+ FEC_R_FIFO_RAFL
);
1018 writel(FEC_ENET_OPD_V
, fep
->hwp
+ FEC_OPD
);
1020 rcntl
&= ~FEC_ENET_FCE
;
1022 #endif /* !defined(CONFIG_M5272) */
1024 writel(rcntl
, fep
->hwp
+ FEC_R_CNTRL
);
1026 /* Setup multicast filter. */
1027 set_multicast_list(ndev
);
1028 #ifndef CONFIG_M5272
1029 writel(0, fep
->hwp
+ FEC_HASH_TABLE_HIGH
);
1030 writel(0, fep
->hwp
+ FEC_HASH_TABLE_LOW
);
1033 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
) {
1034 /* enable ENET endian swap */
1036 /* enable ENET store and forward mode */
1037 writel(1 << 8, fep
->hwp
+ FEC_X_WMRK
);
1040 if (fep
->bufdesc_ex
)
1043 #ifndef CONFIG_M5272
1044 /* Enable the MIB statistic event counters */
1045 writel(0 << 31, fep
->hwp
+ FEC_MIB_CTRLSTAT
);
1048 /* And last, enable the transmit and receive processing */
1049 writel(ecntl
, fep
->hwp
+ FEC_ECNTRL
);
1050 fec_enet_active_rxring(ndev
);
1052 if (fep
->bufdesc_ex
)
1053 fec_ptp_start_cyclecounter(ndev
);
1055 /* Enable interrupts we wish to service */
1057 writel(FEC_DEFAULT_IMASK
, fep
->hwp
+ FEC_IMASK
);
1059 writel(FEC_ENET_MII
, fep
->hwp
+ FEC_IMASK
);
1061 /* Init the interrupt coalescing */
1062 fec_enet_itr_coal_init(ndev
);
1067 fec_stop(struct net_device
*ndev
)
1069 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1070 struct fec_platform_data
*pdata
= fep
->pdev
->dev
.platform_data
;
1071 u32 rmii_mode
= readl(fep
->hwp
+ FEC_R_CNTRL
) & (1 << 8);
1074 /* We cannot expect a graceful transmit stop without link !!! */
1076 writel(1, fep
->hwp
+ FEC_X_CNTRL
); /* Graceful transmit stop */
1078 if (!(readl(fep
->hwp
+ FEC_IEVENT
) & FEC_ENET_GRA
))
1079 netdev_err(ndev
, "Graceful transmit stop did not complete!\n");
1082 /* Whack a reset. We should wait for this.
1083 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1084 * instead of reset MAC itself.
1086 if (!(fep
->wol_flag
& FEC_WOL_FLAG_SLEEP_ON
)) {
1087 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
) {
1088 writel(0, fep
->hwp
+ FEC_ECNTRL
);
1090 writel(1, fep
->hwp
+ FEC_ECNTRL
);
1093 writel(FEC_DEFAULT_IMASK
, fep
->hwp
+ FEC_IMASK
);
1095 writel(FEC_DEFAULT_IMASK
| FEC_ENET_WAKEUP
, fep
->hwp
+ FEC_IMASK
);
1096 val
= readl(fep
->hwp
+ FEC_ECNTRL
);
1097 val
|= (FEC_ECR_MAGICEN
| FEC_ECR_SLEEP
);
1098 writel(val
, fep
->hwp
+ FEC_ECNTRL
);
1100 if (pdata
&& pdata
->sleep_mode_enable
)
1101 pdata
->sleep_mode_enable(true);
1103 writel(fep
->phy_speed
, fep
->hwp
+ FEC_MII_SPEED
);
1105 /* We have to keep ENET enabled to have MII interrupt stay working */
1106 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
&&
1107 !(fep
->wol_flag
& FEC_WOL_FLAG_SLEEP_ON
)) {
1108 writel(2, fep
->hwp
+ FEC_ECNTRL
);
1109 writel(rmii_mode
, fep
->hwp
+ FEC_R_CNTRL
);
1115 fec_timeout(struct net_device
*ndev
)
1117 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1121 ndev
->stats
.tx_errors
++;
1123 schedule_work(&fep
->tx_timeout_work
);
1126 static void fec_enet_timeout_work(struct work_struct
*work
)
1128 struct fec_enet_private
*fep
=
1129 container_of(work
, struct fec_enet_private
, tx_timeout_work
);
1130 struct net_device
*ndev
= fep
->netdev
;
1133 if (netif_device_present(ndev
) || netif_running(ndev
)) {
1134 napi_disable(&fep
->napi
);
1135 netif_tx_lock_bh(ndev
);
1137 netif_wake_queue(ndev
);
1138 netif_tx_unlock_bh(ndev
);
1139 napi_enable(&fep
->napi
);
1145 fec_enet_hwtstamp(struct fec_enet_private
*fep
, unsigned ts
,
1146 struct skb_shared_hwtstamps
*hwtstamps
)
1148 unsigned long flags
;
1151 spin_lock_irqsave(&fep
->tmreg_lock
, flags
);
1152 ns
= timecounter_cyc2time(&fep
->tc
, ts
);
1153 spin_unlock_irqrestore(&fep
->tmreg_lock
, flags
);
1155 memset(hwtstamps
, 0, sizeof(*hwtstamps
));
1156 hwtstamps
->hwtstamp
= ns_to_ktime(ns
);
1160 fec_enet_tx_queue(struct net_device
*ndev
, u16 queue_id
)
1162 struct fec_enet_private
*fep
;
1163 struct bufdesc
*bdp
;
1164 unsigned short status
;
1165 struct sk_buff
*skb
;
1166 struct fec_enet_priv_tx_q
*txq
;
1167 struct netdev_queue
*nq
;
1171 fep
= netdev_priv(ndev
);
1173 queue_id
= FEC_ENET_GET_QUQUE(queue_id
);
1175 txq
= fep
->tx_queue
[queue_id
];
1176 /* get next bdp of dirty_tx */
1177 nq
= netdev_get_tx_queue(ndev
, queue_id
);
1178 bdp
= txq
->dirty_tx
;
1180 /* get next bdp of dirty_tx */
1181 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
1183 while (bdp
!= READ_ONCE(txq
->bd
.cur
)) {
1184 /* Order the load of bd.cur and cbd_sc */
1186 status
= fec16_to_cpu(READ_ONCE(bdp
->cbd_sc
));
1187 if (status
& BD_ENET_TX_READY
)
1190 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
1192 skb
= txq
->tx_skbuff
[index
];
1193 txq
->tx_skbuff
[index
] = NULL
;
1194 if (!IS_TSO_HEADER(txq
, fec32_to_cpu(bdp
->cbd_bufaddr
)))
1195 dma_unmap_single(&fep
->pdev
->dev
,
1196 fec32_to_cpu(bdp
->cbd_bufaddr
),
1197 fec16_to_cpu(bdp
->cbd_datlen
),
1199 bdp
->cbd_bufaddr
= cpu_to_fec32(0);
1201 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
1205 /* Check for errors. */
1206 if (status
& (BD_ENET_TX_HB
| BD_ENET_TX_LC
|
1207 BD_ENET_TX_RL
| BD_ENET_TX_UN
|
1209 ndev
->stats
.tx_errors
++;
1210 if (status
& BD_ENET_TX_HB
) /* No heartbeat */
1211 ndev
->stats
.tx_heartbeat_errors
++;
1212 if (status
& BD_ENET_TX_LC
) /* Late collision */
1213 ndev
->stats
.tx_window_errors
++;
1214 if (status
& BD_ENET_TX_RL
) /* Retrans limit */
1215 ndev
->stats
.tx_aborted_errors
++;
1216 if (status
& BD_ENET_TX_UN
) /* Underrun */
1217 ndev
->stats
.tx_fifo_errors
++;
1218 if (status
& BD_ENET_TX_CSL
) /* Carrier lost */
1219 ndev
->stats
.tx_carrier_errors
++;
1221 ndev
->stats
.tx_packets
++;
1222 ndev
->stats
.tx_bytes
+= skb
->len
;
1225 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
) &&
1227 struct skb_shared_hwtstamps shhwtstamps
;
1228 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
1230 fec_enet_hwtstamp(fep
, fec32_to_cpu(ebdp
->ts
), &shhwtstamps
);
1231 skb_tstamp_tx(skb
, &shhwtstamps
);
1234 /* Deferred means some collisions occurred during transmit,
1235 * but we eventually sent the packet OK.
1237 if (status
& BD_ENET_TX_DEF
)
1238 ndev
->stats
.collisions
++;
1240 /* Free the sk buffer associated with this last transmit */
1241 dev_kfree_skb_any(skb
);
1243 /* Make sure the update to bdp and tx_skbuff are performed
1247 txq
->dirty_tx
= bdp
;
1249 /* Update pointer to next buffer descriptor to be transmitted */
1250 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
1252 /* Since we have freed up a buffer, the ring is no longer full
1254 if (netif_queue_stopped(ndev
)) {
1255 entries_free
= fec_enet_get_free_txdesc_num(txq
);
1256 if (entries_free
>= txq
->tx_wake_threshold
)
1257 netif_tx_wake_queue(nq
);
1261 /* ERR006538: Keep the transmitter going */
1262 if (bdp
!= txq
->bd
.cur
&&
1263 readl(txq
->bd
.reg_desc_active
) == 0)
1264 writel(0, txq
->bd
.reg_desc_active
);
1268 fec_enet_tx(struct net_device
*ndev
)
1270 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1272 /* First process class A queue, then Class B and Best Effort queue */
1273 for_each_set_bit(queue_id
, &fep
->work_tx
, FEC_ENET_MAX_TX_QS
) {
1274 clear_bit(queue_id
, &fep
->work_tx
);
1275 fec_enet_tx_queue(ndev
, queue_id
);
1281 fec_enet_new_rxbdp(struct net_device
*ndev
, struct bufdesc
*bdp
, struct sk_buff
*skb
)
1283 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1286 off
= ((unsigned long)skb
->data
) & fep
->rx_align
;
1288 skb_reserve(skb
, fep
->rx_align
+ 1 - off
);
1290 bdp
->cbd_bufaddr
= cpu_to_fec32(dma_map_single(&fep
->pdev
->dev
, skb
->data
, FEC_ENET_RX_FRSIZE
- fep
->rx_align
, DMA_FROM_DEVICE
));
1291 if (dma_mapping_error(&fep
->pdev
->dev
, fec32_to_cpu(bdp
->cbd_bufaddr
))) {
1292 if (net_ratelimit())
1293 netdev_err(ndev
, "Rx DMA memory map failed\n");
1300 static bool fec_enet_copybreak(struct net_device
*ndev
, struct sk_buff
**skb
,
1301 struct bufdesc
*bdp
, u32 length
, bool swap
)
1303 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1304 struct sk_buff
*new_skb
;
1306 if (length
> fep
->rx_copybreak
)
1309 new_skb
= netdev_alloc_skb(ndev
, length
);
1313 dma_sync_single_for_cpu(&fep
->pdev
->dev
,
1314 fec32_to_cpu(bdp
->cbd_bufaddr
),
1315 FEC_ENET_RX_FRSIZE
- fep
->rx_align
,
1318 memcpy(new_skb
->data
, (*skb
)->data
, length
);
1320 swap_buffer2(new_skb
->data
, (*skb
)->data
, length
);
1326 /* During a receive, the bd_rx.cur points to the current incoming buffer.
1327 * When we update through the ring, if the next incoming buffer has
1328 * not been given to the system, we just set the empty indicator,
1329 * effectively tossing the packet.
1332 fec_enet_rx_queue(struct net_device
*ndev
, int budget
, u16 queue_id
)
1334 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1335 struct fec_enet_priv_rx_q
*rxq
;
1336 struct bufdesc
*bdp
;
1337 unsigned short status
;
1338 struct sk_buff
*skb_new
= NULL
;
1339 struct sk_buff
*skb
;
1342 int pkt_received
= 0;
1343 struct bufdesc_ex
*ebdp
= NULL
;
1344 bool vlan_packet_rcvd
= false;
1348 bool need_swap
= fep
->quirks
& FEC_QUIRK_SWAP_FRAME
;
1353 queue_id
= FEC_ENET_GET_QUQUE(queue_id
);
1354 rxq
= fep
->rx_queue
[queue_id
];
1356 /* First, grab all of the stats for the incoming packet.
1357 * These get messed up if we get called due to a busy condition.
1361 while (!((status
= fec16_to_cpu(bdp
->cbd_sc
)) & BD_ENET_RX_EMPTY
)) {
1363 if (pkt_received
>= budget
)
1367 writel(FEC_ENET_RXF
, fep
->hwp
+ FEC_IEVENT
);
1369 /* Check for errors. */
1370 status
^= BD_ENET_RX_LAST
;
1371 if (status
& (BD_ENET_RX_LG
| BD_ENET_RX_SH
| BD_ENET_RX_NO
|
1372 BD_ENET_RX_CR
| BD_ENET_RX_OV
| BD_ENET_RX_LAST
|
1374 ndev
->stats
.rx_errors
++;
1375 if (status
& BD_ENET_RX_OV
) {
1377 ndev
->stats
.rx_fifo_errors
++;
1378 goto rx_processing_done
;
1380 if (status
& (BD_ENET_RX_LG
| BD_ENET_RX_SH
1381 | BD_ENET_RX_LAST
)) {
1382 /* Frame too long or too short. */
1383 ndev
->stats
.rx_length_errors
++;
1384 if (status
& BD_ENET_RX_LAST
)
1385 netdev_err(ndev
, "rcv is not +last\n");
1387 if (status
& BD_ENET_RX_CR
) /* CRC Error */
1388 ndev
->stats
.rx_crc_errors
++;
1389 /* Report late collisions as a frame error. */
1390 if (status
& (BD_ENET_RX_NO
| BD_ENET_RX_CL
))
1391 ndev
->stats
.rx_frame_errors
++;
1392 goto rx_processing_done
;
1395 /* Process the incoming frame. */
1396 ndev
->stats
.rx_packets
++;
1397 pkt_len
= fec16_to_cpu(bdp
->cbd_datlen
);
1398 ndev
->stats
.rx_bytes
+= pkt_len
;
1400 index
= fec_enet_get_bd_index(bdp
, &rxq
->bd
);
1401 skb
= rxq
->rx_skbuff
[index
];
1403 /* The packet length includes FCS, but we don't want to
1404 * include that when passing upstream as it messes up
1405 * bridging applications.
1407 is_copybreak
= fec_enet_copybreak(ndev
, &skb
, bdp
, pkt_len
- 4,
1409 if (!is_copybreak
) {
1410 skb_new
= netdev_alloc_skb(ndev
, FEC_ENET_RX_FRSIZE
);
1411 if (unlikely(!skb_new
)) {
1412 ndev
->stats
.rx_dropped
++;
1413 goto rx_processing_done
;
1415 dma_unmap_single(&fep
->pdev
->dev
,
1416 fec32_to_cpu(bdp
->cbd_bufaddr
),
1417 FEC_ENET_RX_FRSIZE
- fep
->rx_align
,
1421 prefetch(skb
->data
- NET_IP_ALIGN
);
1422 skb_put(skb
, pkt_len
- 4);
1424 if (!is_copybreak
&& need_swap
)
1425 swap_buffer(data
, pkt_len
);
1427 /* Extract the enhanced buffer descriptor */
1429 if (fep
->bufdesc_ex
)
1430 ebdp
= (struct bufdesc_ex
*)bdp
;
1432 /* If this is a VLAN packet remove the VLAN Tag */
1433 vlan_packet_rcvd
= false;
1434 if ((ndev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
1436 (ebdp
->cbd_esc
& cpu_to_fec32(BD_ENET_RX_VLAN
))) {
1437 /* Push and remove the vlan tag */
1438 struct vlan_hdr
*vlan_header
=
1439 (struct vlan_hdr
*) (data
+ ETH_HLEN
);
1440 vlan_tag
= ntohs(vlan_header
->h_vlan_TCI
);
1442 vlan_packet_rcvd
= true;
1444 memmove(skb
->data
+ VLAN_HLEN
, data
, ETH_ALEN
* 2);
1445 skb_pull(skb
, VLAN_HLEN
);
1448 skb
->protocol
= eth_type_trans(skb
, ndev
);
1450 /* Get receive timestamp from the skb */
1451 if (fep
->hwts_rx_en
&& fep
->bufdesc_ex
)
1452 fec_enet_hwtstamp(fep
, fec32_to_cpu(ebdp
->ts
),
1453 skb_hwtstamps(skb
));
1455 if (fep
->bufdesc_ex
&&
1456 (fep
->csum_flags
& FLAG_RX_CSUM_ENABLED
)) {
1457 if (!(ebdp
->cbd_esc
& cpu_to_fec32(FLAG_RX_CSUM_ERROR
))) {
1458 /* don't check it */
1459 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1461 skb_checksum_none_assert(skb
);
1465 /* Handle received VLAN packets */
1466 if (vlan_packet_rcvd
)
1467 __vlan_hwaccel_put_tag(skb
,
1471 napi_gro_receive(&fep
->napi
, skb
);
1474 dma_sync_single_for_device(&fep
->pdev
->dev
,
1475 fec32_to_cpu(bdp
->cbd_bufaddr
),
1476 FEC_ENET_RX_FRSIZE
- fep
->rx_align
,
1479 rxq
->rx_skbuff
[index
] = skb_new
;
1480 fec_enet_new_rxbdp(ndev
, bdp
, skb_new
);
1484 /* Clear the status flags for this buffer */
1485 status
&= ~BD_ENET_RX_STATS
;
1487 /* Mark the buffer empty */
1488 status
|= BD_ENET_RX_EMPTY
;
1490 if (fep
->bufdesc_ex
) {
1491 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
1493 ebdp
->cbd_esc
= cpu_to_fec32(BD_ENET_RX_INT
);
1497 /* Make sure the updates to rest of the descriptor are
1498 * performed before transferring ownership.
1501 bdp
->cbd_sc
= cpu_to_fec16(status
);
1503 /* Update BD pointer to next entry */
1504 bdp
= fec_enet_get_nextdesc(bdp
, &rxq
->bd
);
1506 /* Doing this here will keep the FEC running while we process
1507 * incoming frames. On a heavily loaded network, we should be
1508 * able to keep up at the expense of system resources.
1510 writel(0, rxq
->bd
.reg_desc_active
);
1513 return pkt_received
;
1517 fec_enet_rx(struct net_device
*ndev
, int budget
)
1519 int pkt_received
= 0;
1521 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1523 for_each_set_bit(queue_id
, &fep
->work_rx
, FEC_ENET_MAX_RX_QS
) {
1524 clear_bit(queue_id
, &fep
->work_rx
);
1525 pkt_received
+= fec_enet_rx_queue(ndev
,
1526 budget
- pkt_received
, queue_id
);
1528 return pkt_received
;
1532 fec_enet_collect_events(struct fec_enet_private
*fep
, uint int_events
)
1534 if (int_events
== 0)
1537 if (int_events
& FEC_ENET_RXF
)
1538 fep
->work_rx
|= (1 << 2);
1539 if (int_events
& FEC_ENET_RXF_1
)
1540 fep
->work_rx
|= (1 << 0);
1541 if (int_events
& FEC_ENET_RXF_2
)
1542 fep
->work_rx
|= (1 << 1);
1544 if (int_events
& FEC_ENET_TXF
)
1545 fep
->work_tx
|= (1 << 2);
1546 if (int_events
& FEC_ENET_TXF_1
)
1547 fep
->work_tx
|= (1 << 0);
1548 if (int_events
& FEC_ENET_TXF_2
)
1549 fep
->work_tx
|= (1 << 1);
1555 fec_enet_interrupt(int irq
, void *dev_id
)
1557 struct net_device
*ndev
= dev_id
;
1558 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1560 irqreturn_t ret
= IRQ_NONE
;
1562 int_events
= readl(fep
->hwp
+ FEC_IEVENT
);
1563 writel(int_events
, fep
->hwp
+ FEC_IEVENT
);
1564 fec_enet_collect_events(fep
, int_events
);
1566 if ((fep
->work_tx
|| fep
->work_rx
) && fep
->link
) {
1569 if (napi_schedule_prep(&fep
->napi
)) {
1570 /* Disable the NAPI interrupts */
1571 writel(FEC_NAPI_IMASK
, fep
->hwp
+ FEC_IMASK
);
1572 __napi_schedule(&fep
->napi
);
1576 if (int_events
& FEC_ENET_MII
) {
1578 complete(&fep
->mdio_done
);
1582 fec_ptp_check_pps_event(fep
);
1587 static int fec_enet_rx_napi(struct napi_struct
*napi
, int budget
)
1589 struct net_device
*ndev
= napi
->dev
;
1590 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1593 pkts
= fec_enet_rx(ndev
, budget
);
1597 if (pkts
< budget
) {
1598 napi_complete(napi
);
1599 writel(FEC_DEFAULT_IMASK
, fep
->hwp
+ FEC_IMASK
);
1604 /* ------------------------------------------------------------------------- */
1605 static void fec_get_mac(struct net_device
*ndev
)
1607 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1608 struct fec_platform_data
*pdata
= dev_get_platdata(&fep
->pdev
->dev
);
1609 unsigned char *iap
, tmpaddr
[ETH_ALEN
];
1612 * try to get mac address in following order:
1614 * 1) module parameter via kernel command line in form
1615 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1620 * 2) from device tree data
1622 if (!is_valid_ether_addr(iap
)) {
1623 struct device_node
*np
= fep
->pdev
->dev
.of_node
;
1625 const char *mac
= of_get_mac_address(np
);
1627 iap
= (unsigned char *) mac
;
1632 * 3) from flash or fuse (via platform data)
1634 if (!is_valid_ether_addr(iap
)) {
1637 iap
= (unsigned char *)FEC_FLASHMAC
;
1640 iap
= (unsigned char *)&pdata
->mac
;
1645 * 4) FEC mac registers set by bootloader
1647 if (!is_valid_ether_addr(iap
)) {
1648 *((__be32
*) &tmpaddr
[0]) =
1649 cpu_to_be32(readl(fep
->hwp
+ FEC_ADDR_LOW
));
1650 *((__be16
*) &tmpaddr
[4]) =
1651 cpu_to_be16(readl(fep
->hwp
+ FEC_ADDR_HIGH
) >> 16);
1656 * 5) random mac address
1658 if (!is_valid_ether_addr(iap
)) {
1659 /* Report it and use a random ethernet address instead */
1660 netdev_err(ndev
, "Invalid MAC address: %pM\n", iap
);
1661 eth_hw_addr_random(ndev
);
1662 netdev_info(ndev
, "Using random MAC address: %pM\n",
1667 memcpy(ndev
->dev_addr
, iap
, ETH_ALEN
);
1669 /* Adjust MAC if using macaddr */
1671 ndev
->dev_addr
[ETH_ALEN
-1] = macaddr
[ETH_ALEN
-1] + fep
->dev_id
;
1674 /* ------------------------------------------------------------------------- */
1679 static void fec_enet_adjust_link(struct net_device
*ndev
)
1681 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1682 struct phy_device
*phy_dev
= fep
->phy_dev
;
1683 int status_change
= 0;
1685 /* Prevent a state halted on mii error */
1686 if (fep
->mii_timeout
&& phy_dev
->state
== PHY_HALTED
) {
1687 phy_dev
->state
= PHY_RESUMING
;
1692 * If the netdev is down, or is going down, we're not interested
1693 * in link state events, so just mark our idea of the link as down
1694 * and ignore the event.
1696 if (!netif_running(ndev
) || !netif_device_present(ndev
)) {
1698 } else if (phy_dev
->link
) {
1700 fep
->link
= phy_dev
->link
;
1704 if (fep
->full_duplex
!= phy_dev
->duplex
) {
1705 fep
->full_duplex
= phy_dev
->duplex
;
1709 if (phy_dev
->speed
!= fep
->speed
) {
1710 fep
->speed
= phy_dev
->speed
;
1714 /* if any of the above changed restart the FEC */
1715 if (status_change
) {
1716 napi_disable(&fep
->napi
);
1717 netif_tx_lock_bh(ndev
);
1719 netif_wake_queue(ndev
);
1720 netif_tx_unlock_bh(ndev
);
1721 napi_enable(&fep
->napi
);
1725 napi_disable(&fep
->napi
);
1726 netif_tx_lock_bh(ndev
);
1728 netif_tx_unlock_bh(ndev
);
1729 napi_enable(&fep
->napi
);
1730 fep
->link
= phy_dev
->link
;
1736 phy_print_status(phy_dev
);
1739 static int fec_enet_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
1741 struct fec_enet_private
*fep
= bus
->priv
;
1742 struct device
*dev
= &fep
->pdev
->dev
;
1743 unsigned long time_left
;
1746 ret
= pm_runtime_get_sync(dev
);
1750 fep
->mii_timeout
= 0;
1751 reinit_completion(&fep
->mdio_done
);
1753 /* start a read op */
1754 writel(FEC_MMFR_ST
| FEC_MMFR_OP_READ
|
1755 FEC_MMFR_PA(mii_id
) | FEC_MMFR_RA(regnum
) |
1756 FEC_MMFR_TA
, fep
->hwp
+ FEC_MII_DATA
);
1758 /* wait for end of transfer */
1759 time_left
= wait_for_completion_timeout(&fep
->mdio_done
,
1760 usecs_to_jiffies(FEC_MII_TIMEOUT
));
1761 if (time_left
== 0) {
1762 fep
->mii_timeout
= 1;
1763 netdev_err(fep
->netdev
, "MDIO read timeout\n");
1768 ret
= FEC_MMFR_DATA(readl(fep
->hwp
+ FEC_MII_DATA
));
1771 pm_runtime_mark_last_busy(dev
);
1772 pm_runtime_put_autosuspend(dev
);
1777 static int fec_enet_mdio_write(struct mii_bus
*bus
, int mii_id
, int regnum
,
1780 struct fec_enet_private
*fep
= bus
->priv
;
1781 struct device
*dev
= &fep
->pdev
->dev
;
1782 unsigned long time_left
;
1785 ret
= pm_runtime_get_sync(dev
);
1791 fep
->mii_timeout
= 0;
1792 reinit_completion(&fep
->mdio_done
);
1794 /* start a write op */
1795 writel(FEC_MMFR_ST
| FEC_MMFR_OP_WRITE
|
1796 FEC_MMFR_PA(mii_id
) | FEC_MMFR_RA(regnum
) |
1797 FEC_MMFR_TA
| FEC_MMFR_DATA(value
),
1798 fep
->hwp
+ FEC_MII_DATA
);
1800 /* wait for end of transfer */
1801 time_left
= wait_for_completion_timeout(&fep
->mdio_done
,
1802 usecs_to_jiffies(FEC_MII_TIMEOUT
));
1803 if (time_left
== 0) {
1804 fep
->mii_timeout
= 1;
1805 netdev_err(fep
->netdev
, "MDIO write timeout\n");
1809 pm_runtime_mark_last_busy(dev
);
1810 pm_runtime_put_autosuspend(dev
);
1815 static int fec_enet_clk_enable(struct net_device
*ndev
, bool enable
)
1817 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1821 ret
= clk_prepare_enable(fep
->clk_ahb
);
1824 if (fep
->clk_enet_out
) {
1825 ret
= clk_prepare_enable(fep
->clk_enet_out
);
1827 goto failed_clk_enet_out
;
1830 mutex_lock(&fep
->ptp_clk_mutex
);
1831 ret
= clk_prepare_enable(fep
->clk_ptp
);
1833 mutex_unlock(&fep
->ptp_clk_mutex
);
1834 goto failed_clk_ptp
;
1836 fep
->ptp_clk_on
= true;
1838 mutex_unlock(&fep
->ptp_clk_mutex
);
1841 ret
= clk_prepare_enable(fep
->clk_ref
);
1843 goto failed_clk_ref
;
1846 clk_disable_unprepare(fep
->clk_ahb
);
1847 if (fep
->clk_enet_out
)
1848 clk_disable_unprepare(fep
->clk_enet_out
);
1850 mutex_lock(&fep
->ptp_clk_mutex
);
1851 clk_disable_unprepare(fep
->clk_ptp
);
1852 fep
->ptp_clk_on
= false;
1853 mutex_unlock(&fep
->ptp_clk_mutex
);
1856 clk_disable_unprepare(fep
->clk_ref
);
1863 clk_disable_unprepare(fep
->clk_ref
);
1865 if (fep
->clk_enet_out
)
1866 clk_disable_unprepare(fep
->clk_enet_out
);
1867 failed_clk_enet_out
:
1868 clk_disable_unprepare(fep
->clk_ahb
);
1873 static int fec_enet_mii_probe(struct net_device
*ndev
)
1875 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1876 struct phy_device
*phy_dev
= NULL
;
1877 char mdio_bus_id
[MII_BUS_ID_SIZE
];
1878 char phy_name
[MII_BUS_ID_SIZE
+ 3];
1880 int dev_id
= fep
->dev_id
;
1882 fep
->phy_dev
= NULL
;
1884 if (fep
->phy_node
) {
1885 phy_dev
= of_phy_connect(ndev
, fep
->phy_node
,
1886 &fec_enet_adjust_link
, 0,
1887 fep
->phy_interface
);
1891 /* check for attached phy */
1892 for (phy_id
= 0; (phy_id
< PHY_MAX_ADDR
); phy_id
++) {
1893 if (!mdiobus_is_registered_device(fep
->mii_bus
, phy_id
))
1897 strlcpy(mdio_bus_id
, fep
->mii_bus
->id
, MII_BUS_ID_SIZE
);
1901 if (phy_id
>= PHY_MAX_ADDR
) {
1902 netdev_info(ndev
, "no PHY, assuming direct connection to switch\n");
1903 strlcpy(mdio_bus_id
, "fixed-0", MII_BUS_ID_SIZE
);
1907 snprintf(phy_name
, sizeof(phy_name
),
1908 PHY_ID_FMT
, mdio_bus_id
, phy_id
);
1909 phy_dev
= phy_connect(ndev
, phy_name
, &fec_enet_adjust_link
,
1910 fep
->phy_interface
);
1913 if (IS_ERR(phy_dev
)) {
1914 netdev_err(ndev
, "could not attach to PHY\n");
1915 return PTR_ERR(phy_dev
);
1918 /* mask with MAC supported features */
1919 if (fep
->quirks
& FEC_QUIRK_HAS_GBIT
) {
1920 phy_dev
->supported
&= PHY_GBIT_FEATURES
;
1921 phy_dev
->supported
&= ~SUPPORTED_1000baseT_Half
;
1922 #if !defined(CONFIG_M5272)
1923 phy_dev
->supported
|= SUPPORTED_Pause
;
1927 phy_dev
->supported
&= PHY_BASIC_FEATURES
;
1929 phy_dev
->advertising
= phy_dev
->supported
;
1931 fep
->phy_dev
= phy_dev
;
1933 fep
->full_duplex
= 0;
1935 phy_attached_info(phy_dev
);
1940 static int fec_enet_mii_init(struct platform_device
*pdev
)
1942 static struct mii_bus
*fec0_mii_bus
;
1943 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1944 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1945 struct device_node
*node
;
1947 u32 mii_speed
, holdtime
;
1950 * The i.MX28 dual fec interfaces are not equal.
1951 * Here are the differences:
1953 * - fec0 supports MII & RMII modes while fec1 only supports RMII
1954 * - fec0 acts as the 1588 time master while fec1 is slave
1955 * - external phys can only be configured by fec0
1957 * That is to say fec1 can not work independently. It only works
1958 * when fec0 is working. The reason behind this design is that the
1959 * second interface is added primarily for Switch mode.
1961 * Because of the last point above, both phys are attached on fec0
1962 * mdio interface in board design, and need to be configured by
1965 if ((fep
->quirks
& FEC_QUIRK_SINGLE_MDIO
) && fep
->dev_id
> 0) {
1966 /* fec1 uses fec0 mii_bus */
1967 if (mii_cnt
&& fec0_mii_bus
) {
1968 fep
->mii_bus
= fec0_mii_bus
;
1975 fep
->mii_timeout
= 0;
1978 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
1980 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
1981 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
1982 * Reference Manual has an error on this, and gets fixed on i.MX6Q
1985 mii_speed
= DIV_ROUND_UP(clk_get_rate(fep
->clk_ipg
), 5000000);
1986 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
)
1988 if (mii_speed
> 63) {
1990 "fec clock (%lu) to fast to get right mii speed\n",
1991 clk_get_rate(fep
->clk_ipg
));
1997 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
1998 * MII_SPEED) register that defines the MDIO output hold time. Earlier
1999 * versions are RAZ there, so just ignore the difference and write the
2001 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
2002 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
2004 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
2005 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
2006 * holdtime cannot result in a value greater than 3.
2008 holdtime
= DIV_ROUND_UP(clk_get_rate(fep
->clk_ipg
), 100000000) - 1;
2010 fep
->phy_speed
= mii_speed
<< 1 | holdtime
<< 8;
2012 writel(fep
->phy_speed
, fep
->hwp
+ FEC_MII_SPEED
);
2014 fep
->mii_bus
= mdiobus_alloc();
2015 if (fep
->mii_bus
== NULL
) {
2020 fep
->mii_bus
->name
= "fec_enet_mii_bus";
2021 fep
->mii_bus
->read
= fec_enet_mdio_read
;
2022 fep
->mii_bus
->write
= fec_enet_mdio_write
;
2023 snprintf(fep
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
2024 pdev
->name
, fep
->dev_id
+ 1);
2025 fep
->mii_bus
->priv
= fep
;
2026 fep
->mii_bus
->parent
= &pdev
->dev
;
2028 node
= of_get_child_by_name(pdev
->dev
.of_node
, "mdio");
2030 err
= of_mdiobus_register(fep
->mii_bus
, node
);
2033 err
= mdiobus_register(fep
->mii_bus
);
2037 goto err_out_free_mdiobus
;
2041 /* save fec0 mii_bus */
2042 if (fep
->quirks
& FEC_QUIRK_SINGLE_MDIO
)
2043 fec0_mii_bus
= fep
->mii_bus
;
2047 err_out_free_mdiobus
:
2048 mdiobus_free(fep
->mii_bus
);
2053 static void fec_enet_mii_remove(struct fec_enet_private
*fep
)
2055 if (--mii_cnt
== 0) {
2056 mdiobus_unregister(fep
->mii_bus
);
2057 mdiobus_free(fep
->mii_bus
);
2061 static int fec_enet_get_settings(struct net_device
*ndev
,
2062 struct ethtool_cmd
*cmd
)
2064 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2065 struct phy_device
*phydev
= fep
->phy_dev
;
2070 return phy_ethtool_gset(phydev
, cmd
);
2073 static int fec_enet_set_settings(struct net_device
*ndev
,
2074 struct ethtool_cmd
*cmd
)
2076 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2077 struct phy_device
*phydev
= fep
->phy_dev
;
2082 return phy_ethtool_sset(phydev
, cmd
);
2085 static void fec_enet_get_drvinfo(struct net_device
*ndev
,
2086 struct ethtool_drvinfo
*info
)
2088 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2090 strlcpy(info
->driver
, fep
->pdev
->dev
.driver
->name
,
2091 sizeof(info
->driver
));
2092 strlcpy(info
->version
, "Revision: 1.0", sizeof(info
->version
));
2093 strlcpy(info
->bus_info
, dev_name(&ndev
->dev
), sizeof(info
->bus_info
));
2096 static int fec_enet_get_regs_len(struct net_device
*ndev
)
2098 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2102 r
= platform_get_resource(fep
->pdev
, IORESOURCE_MEM
, 0);
2104 s
= resource_size(r
);
2109 /* List of registers that can be safety be read to dump them with ethtool */
2110 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2111 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
2112 static u32 fec_enet_register_offset
[] = {
2113 FEC_IEVENT
, FEC_IMASK
, FEC_R_DES_ACTIVE_0
, FEC_X_DES_ACTIVE_0
,
2114 FEC_ECNTRL
, FEC_MII_DATA
, FEC_MII_SPEED
, FEC_MIB_CTRLSTAT
, FEC_R_CNTRL
,
2115 FEC_X_CNTRL
, FEC_ADDR_LOW
, FEC_ADDR_HIGH
, FEC_OPD
, FEC_TXIC0
, FEC_TXIC1
,
2116 FEC_TXIC2
, FEC_RXIC0
, FEC_RXIC1
, FEC_RXIC2
, FEC_HASH_TABLE_HIGH
,
2117 FEC_HASH_TABLE_LOW
, FEC_GRP_HASH_TABLE_HIGH
, FEC_GRP_HASH_TABLE_LOW
,
2118 FEC_X_WMRK
, FEC_R_BOUND
, FEC_R_FSTART
, FEC_R_DES_START_1
,
2119 FEC_X_DES_START_1
, FEC_R_BUFF_SIZE_1
, FEC_R_DES_START_2
,
2120 FEC_X_DES_START_2
, FEC_R_BUFF_SIZE_2
, FEC_R_DES_START_0
,
2121 FEC_X_DES_START_0
, FEC_R_BUFF_SIZE_0
, FEC_R_FIFO_RSFL
, FEC_R_FIFO_RSEM
,
2122 FEC_R_FIFO_RAEM
, FEC_R_FIFO_RAFL
, FEC_RACC
, FEC_RCMR_1
, FEC_RCMR_2
,
2123 FEC_DMA_CFG_1
, FEC_DMA_CFG_2
, FEC_R_DES_ACTIVE_1
, FEC_X_DES_ACTIVE_1
,
2124 FEC_R_DES_ACTIVE_2
, FEC_X_DES_ACTIVE_2
, FEC_QOS_SCHEME
,
2125 RMON_T_DROP
, RMON_T_PACKETS
, RMON_T_BC_PKT
, RMON_T_MC_PKT
,
2126 RMON_T_CRC_ALIGN
, RMON_T_UNDERSIZE
, RMON_T_OVERSIZE
, RMON_T_FRAG
,
2127 RMON_T_JAB
, RMON_T_COL
, RMON_T_P64
, RMON_T_P65TO127
, RMON_T_P128TO255
,
2128 RMON_T_P256TO511
, RMON_T_P512TO1023
, RMON_T_P1024TO2047
,
2129 RMON_T_P_GTE2048
, RMON_T_OCTETS
,
2130 IEEE_T_DROP
, IEEE_T_FRAME_OK
, IEEE_T_1COL
, IEEE_T_MCOL
, IEEE_T_DEF
,
2131 IEEE_T_LCOL
, IEEE_T_EXCOL
, IEEE_T_MACERR
, IEEE_T_CSERR
, IEEE_T_SQE
,
2132 IEEE_T_FDXFC
, IEEE_T_OCTETS_OK
,
2133 RMON_R_PACKETS
, RMON_R_BC_PKT
, RMON_R_MC_PKT
, RMON_R_CRC_ALIGN
,
2134 RMON_R_UNDERSIZE
, RMON_R_OVERSIZE
, RMON_R_FRAG
, RMON_R_JAB
,
2135 RMON_R_RESVD_O
, RMON_R_P64
, RMON_R_P65TO127
, RMON_R_P128TO255
,
2136 RMON_R_P256TO511
, RMON_R_P512TO1023
, RMON_R_P1024TO2047
,
2137 RMON_R_P_GTE2048
, RMON_R_OCTETS
,
2138 IEEE_R_DROP
, IEEE_R_FRAME_OK
, IEEE_R_CRC
, IEEE_R_ALIGN
, IEEE_R_MACERR
,
2139 IEEE_R_FDXFC
, IEEE_R_OCTETS_OK
2142 static u32 fec_enet_register_offset
[] = {
2143 FEC_ECNTRL
, FEC_IEVENT
, FEC_IMASK
, FEC_IVEC
, FEC_R_DES_ACTIVE_0
,
2144 FEC_R_DES_ACTIVE_1
, FEC_R_DES_ACTIVE_2
, FEC_X_DES_ACTIVE_0
,
2145 FEC_X_DES_ACTIVE_1
, FEC_X_DES_ACTIVE_2
, FEC_MII_DATA
, FEC_MII_SPEED
,
2146 FEC_R_BOUND
, FEC_R_FSTART
, FEC_X_WMRK
, FEC_X_FSTART
, FEC_R_CNTRL
,
2147 FEC_MAX_FRM_LEN
, FEC_X_CNTRL
, FEC_ADDR_LOW
, FEC_ADDR_HIGH
,
2148 FEC_GRP_HASH_TABLE_HIGH
, FEC_GRP_HASH_TABLE_LOW
, FEC_R_DES_START_0
,
2149 FEC_R_DES_START_1
, FEC_R_DES_START_2
, FEC_X_DES_START_0
,
2150 FEC_X_DES_START_1
, FEC_X_DES_START_2
, FEC_R_BUFF_SIZE_0
,
2151 FEC_R_BUFF_SIZE_1
, FEC_R_BUFF_SIZE_2
2155 static void fec_enet_get_regs(struct net_device
*ndev
,
2156 struct ethtool_regs
*regs
, void *regbuf
)
2158 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2159 u32 __iomem
*theregs
= (u32 __iomem
*)fep
->hwp
;
2160 u32
*buf
= (u32
*)regbuf
;
2163 memset(buf
, 0, regs
->len
);
2165 for (i
= 0; i
< ARRAY_SIZE(fec_enet_register_offset
); i
++) {
2166 off
= fec_enet_register_offset
[i
] / 4;
2167 buf
[off
] = readl(&theregs
[off
]);
2171 static int fec_enet_get_ts_info(struct net_device
*ndev
,
2172 struct ethtool_ts_info
*info
)
2174 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2176 if (fep
->bufdesc_ex
) {
2178 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
2179 SOF_TIMESTAMPING_RX_SOFTWARE
|
2180 SOF_TIMESTAMPING_SOFTWARE
|
2181 SOF_TIMESTAMPING_TX_HARDWARE
|
2182 SOF_TIMESTAMPING_RX_HARDWARE
|
2183 SOF_TIMESTAMPING_RAW_HARDWARE
;
2185 info
->phc_index
= ptp_clock_index(fep
->ptp_clock
);
2187 info
->phc_index
= -1;
2189 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) |
2190 (1 << HWTSTAMP_TX_ON
);
2192 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
2193 (1 << HWTSTAMP_FILTER_ALL
);
2196 return ethtool_op_get_ts_info(ndev
, info
);
2200 #if !defined(CONFIG_M5272)
2202 static void fec_enet_get_pauseparam(struct net_device
*ndev
,
2203 struct ethtool_pauseparam
*pause
)
2205 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2207 pause
->autoneg
= (fep
->pause_flag
& FEC_PAUSE_FLAG_AUTONEG
) != 0;
2208 pause
->tx_pause
= (fep
->pause_flag
& FEC_PAUSE_FLAG_ENABLE
) != 0;
2209 pause
->rx_pause
= pause
->tx_pause
;
2212 static int fec_enet_set_pauseparam(struct net_device
*ndev
,
2213 struct ethtool_pauseparam
*pause
)
2215 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2220 if (pause
->tx_pause
!= pause
->rx_pause
) {
2222 "hardware only support enable/disable both tx and rx");
2226 fep
->pause_flag
= 0;
2228 /* tx pause must be same as rx pause */
2229 fep
->pause_flag
|= pause
->rx_pause
? FEC_PAUSE_FLAG_ENABLE
: 0;
2230 fep
->pause_flag
|= pause
->autoneg
? FEC_PAUSE_FLAG_AUTONEG
: 0;
2232 if (pause
->rx_pause
|| pause
->autoneg
) {
2233 fep
->phy_dev
->supported
|= ADVERTISED_Pause
;
2234 fep
->phy_dev
->advertising
|= ADVERTISED_Pause
;
2236 fep
->phy_dev
->supported
&= ~ADVERTISED_Pause
;
2237 fep
->phy_dev
->advertising
&= ~ADVERTISED_Pause
;
2240 if (pause
->autoneg
) {
2241 if (netif_running(ndev
))
2243 phy_start_aneg(fep
->phy_dev
);
2245 if (netif_running(ndev
)) {
2246 napi_disable(&fep
->napi
);
2247 netif_tx_lock_bh(ndev
);
2249 netif_wake_queue(ndev
);
2250 netif_tx_unlock_bh(ndev
);
2251 napi_enable(&fep
->napi
);
2257 static const struct fec_stat
{
2258 char name
[ETH_GSTRING_LEN
];
2262 { "tx_dropped", RMON_T_DROP
},
2263 { "tx_packets", RMON_T_PACKETS
},
2264 { "tx_broadcast", RMON_T_BC_PKT
},
2265 { "tx_multicast", RMON_T_MC_PKT
},
2266 { "tx_crc_errors", RMON_T_CRC_ALIGN
},
2267 { "tx_undersize", RMON_T_UNDERSIZE
},
2268 { "tx_oversize", RMON_T_OVERSIZE
},
2269 { "tx_fragment", RMON_T_FRAG
},
2270 { "tx_jabber", RMON_T_JAB
},
2271 { "tx_collision", RMON_T_COL
},
2272 { "tx_64byte", RMON_T_P64
},
2273 { "tx_65to127byte", RMON_T_P65TO127
},
2274 { "tx_128to255byte", RMON_T_P128TO255
},
2275 { "tx_256to511byte", RMON_T_P256TO511
},
2276 { "tx_512to1023byte", RMON_T_P512TO1023
},
2277 { "tx_1024to2047byte", RMON_T_P1024TO2047
},
2278 { "tx_GTE2048byte", RMON_T_P_GTE2048
},
2279 { "tx_octets", RMON_T_OCTETS
},
2282 { "IEEE_tx_drop", IEEE_T_DROP
},
2283 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK
},
2284 { "IEEE_tx_1col", IEEE_T_1COL
},
2285 { "IEEE_tx_mcol", IEEE_T_MCOL
},
2286 { "IEEE_tx_def", IEEE_T_DEF
},
2287 { "IEEE_tx_lcol", IEEE_T_LCOL
},
2288 { "IEEE_tx_excol", IEEE_T_EXCOL
},
2289 { "IEEE_tx_macerr", IEEE_T_MACERR
},
2290 { "IEEE_tx_cserr", IEEE_T_CSERR
},
2291 { "IEEE_tx_sqe", IEEE_T_SQE
},
2292 { "IEEE_tx_fdxfc", IEEE_T_FDXFC
},
2293 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK
},
2296 { "rx_packets", RMON_R_PACKETS
},
2297 { "rx_broadcast", RMON_R_BC_PKT
},
2298 { "rx_multicast", RMON_R_MC_PKT
},
2299 { "rx_crc_errors", RMON_R_CRC_ALIGN
},
2300 { "rx_undersize", RMON_R_UNDERSIZE
},
2301 { "rx_oversize", RMON_R_OVERSIZE
},
2302 { "rx_fragment", RMON_R_FRAG
},
2303 { "rx_jabber", RMON_R_JAB
},
2304 { "rx_64byte", RMON_R_P64
},
2305 { "rx_65to127byte", RMON_R_P65TO127
},
2306 { "rx_128to255byte", RMON_R_P128TO255
},
2307 { "rx_256to511byte", RMON_R_P256TO511
},
2308 { "rx_512to1023byte", RMON_R_P512TO1023
},
2309 { "rx_1024to2047byte", RMON_R_P1024TO2047
},
2310 { "rx_GTE2048byte", RMON_R_P_GTE2048
},
2311 { "rx_octets", RMON_R_OCTETS
},
2314 { "IEEE_rx_drop", IEEE_R_DROP
},
2315 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK
},
2316 { "IEEE_rx_crc", IEEE_R_CRC
},
2317 { "IEEE_rx_align", IEEE_R_ALIGN
},
2318 { "IEEE_rx_macerr", IEEE_R_MACERR
},
2319 { "IEEE_rx_fdxfc", IEEE_R_FDXFC
},
2320 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK
},
2323 static void fec_enet_get_ethtool_stats(struct net_device
*dev
,
2324 struct ethtool_stats
*stats
, u64
*data
)
2326 struct fec_enet_private
*fep
= netdev_priv(dev
);
2329 for (i
= 0; i
< ARRAY_SIZE(fec_stats
); i
++)
2330 data
[i
] = readl(fep
->hwp
+ fec_stats
[i
].offset
);
2333 static void fec_enet_get_strings(struct net_device
*netdev
,
2334 u32 stringset
, u8
*data
)
2337 switch (stringset
) {
2339 for (i
= 0; i
< ARRAY_SIZE(fec_stats
); i
++)
2340 memcpy(data
+ i
* ETH_GSTRING_LEN
,
2341 fec_stats
[i
].name
, ETH_GSTRING_LEN
);
2346 static int fec_enet_get_sset_count(struct net_device
*dev
, int sset
)
2350 return ARRAY_SIZE(fec_stats
);
2355 #endif /* !defined(CONFIG_M5272) */
2357 static int fec_enet_nway_reset(struct net_device
*dev
)
2359 struct fec_enet_private
*fep
= netdev_priv(dev
);
2360 struct phy_device
*phydev
= fep
->phy_dev
;
2365 return genphy_restart_aneg(phydev
);
2368 /* ITR clock source is enet system clock (clk_ahb).
2369 * TCTT unit is cycle_ns * 64 cycle
2370 * So, the ICTT value = X us / (cycle_ns * 64)
2372 static int fec_enet_us_to_itr_clock(struct net_device
*ndev
, int us
)
2374 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2376 return us
* (fep
->itr_clk_rate
/ 64000) / 1000;
2379 /* Set threshold for interrupt coalescing */
2380 static void fec_enet_itr_coal_set(struct net_device
*ndev
)
2382 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2385 if (!(fep
->quirks
& FEC_QUIRK_HAS_AVB
))
2388 /* Must be greater than zero to avoid unpredictable behavior */
2389 if (!fep
->rx_time_itr
|| !fep
->rx_pkts_itr
||
2390 !fep
->tx_time_itr
|| !fep
->tx_pkts_itr
)
2393 /* Select enet system clock as Interrupt Coalescing
2394 * timer Clock Source
2396 rx_itr
= FEC_ITR_CLK_SEL
;
2397 tx_itr
= FEC_ITR_CLK_SEL
;
2399 /* set ICFT and ICTT */
2400 rx_itr
|= FEC_ITR_ICFT(fep
->rx_pkts_itr
);
2401 rx_itr
|= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev
, fep
->rx_time_itr
));
2402 tx_itr
|= FEC_ITR_ICFT(fep
->tx_pkts_itr
);
2403 tx_itr
|= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev
, fep
->tx_time_itr
));
2405 rx_itr
|= FEC_ITR_EN
;
2406 tx_itr
|= FEC_ITR_EN
;
2408 writel(tx_itr
, fep
->hwp
+ FEC_TXIC0
);
2409 writel(rx_itr
, fep
->hwp
+ FEC_RXIC0
);
2410 writel(tx_itr
, fep
->hwp
+ FEC_TXIC1
);
2411 writel(rx_itr
, fep
->hwp
+ FEC_RXIC1
);
2412 writel(tx_itr
, fep
->hwp
+ FEC_TXIC2
);
2413 writel(rx_itr
, fep
->hwp
+ FEC_RXIC2
);
2417 fec_enet_get_coalesce(struct net_device
*ndev
, struct ethtool_coalesce
*ec
)
2419 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2421 if (!(fep
->quirks
& FEC_QUIRK_HAS_AVB
))
2424 ec
->rx_coalesce_usecs
= fep
->rx_time_itr
;
2425 ec
->rx_max_coalesced_frames
= fep
->rx_pkts_itr
;
2427 ec
->tx_coalesce_usecs
= fep
->tx_time_itr
;
2428 ec
->tx_max_coalesced_frames
= fep
->tx_pkts_itr
;
2434 fec_enet_set_coalesce(struct net_device
*ndev
, struct ethtool_coalesce
*ec
)
2436 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2439 if (!(fep
->quirks
& FEC_QUIRK_HAS_AVB
))
2442 if (ec
->rx_max_coalesced_frames
> 255) {
2443 pr_err("Rx coalesced frames exceed hardware limiation");
2447 if (ec
->tx_max_coalesced_frames
> 255) {
2448 pr_err("Tx coalesced frame exceed hardware limiation");
2452 cycle
= fec_enet_us_to_itr_clock(ndev
, fep
->rx_time_itr
);
2453 if (cycle
> 0xFFFF) {
2454 pr_err("Rx coalesed usec exceeed hardware limiation");
2458 cycle
= fec_enet_us_to_itr_clock(ndev
, fep
->tx_time_itr
);
2459 if (cycle
> 0xFFFF) {
2460 pr_err("Rx coalesed usec exceeed hardware limiation");
2464 fep
->rx_time_itr
= ec
->rx_coalesce_usecs
;
2465 fep
->rx_pkts_itr
= ec
->rx_max_coalesced_frames
;
2467 fep
->tx_time_itr
= ec
->tx_coalesce_usecs
;
2468 fep
->tx_pkts_itr
= ec
->tx_max_coalesced_frames
;
2470 fec_enet_itr_coal_set(ndev
);
2475 static void fec_enet_itr_coal_init(struct net_device
*ndev
)
2477 struct ethtool_coalesce ec
;
2479 ec
.rx_coalesce_usecs
= FEC_ITR_ICTT_DEFAULT
;
2480 ec
.rx_max_coalesced_frames
= FEC_ITR_ICFT_DEFAULT
;
2482 ec
.tx_coalesce_usecs
= FEC_ITR_ICTT_DEFAULT
;
2483 ec
.tx_max_coalesced_frames
= FEC_ITR_ICFT_DEFAULT
;
2485 fec_enet_set_coalesce(ndev
, &ec
);
2488 static int fec_enet_get_tunable(struct net_device
*netdev
,
2489 const struct ethtool_tunable
*tuna
,
2492 struct fec_enet_private
*fep
= netdev_priv(netdev
);
2496 case ETHTOOL_RX_COPYBREAK
:
2497 *(u32
*)data
= fep
->rx_copybreak
;
2507 static int fec_enet_set_tunable(struct net_device
*netdev
,
2508 const struct ethtool_tunable
*tuna
,
2511 struct fec_enet_private
*fep
= netdev_priv(netdev
);
2515 case ETHTOOL_RX_COPYBREAK
:
2516 fep
->rx_copybreak
= *(u32
*)data
;
2527 fec_enet_get_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
2529 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2531 if (fep
->wol_flag
& FEC_WOL_HAS_MAGIC_PACKET
) {
2532 wol
->supported
= WAKE_MAGIC
;
2533 wol
->wolopts
= fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
? WAKE_MAGIC
: 0;
2535 wol
->supported
= wol
->wolopts
= 0;
2540 fec_enet_set_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
2542 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2544 if (!(fep
->wol_flag
& FEC_WOL_HAS_MAGIC_PACKET
))
2547 if (wol
->wolopts
& ~WAKE_MAGIC
)
2550 device_set_wakeup_enable(&ndev
->dev
, wol
->wolopts
& WAKE_MAGIC
);
2551 if (device_may_wakeup(&ndev
->dev
)) {
2552 fep
->wol_flag
|= FEC_WOL_FLAG_ENABLE
;
2553 if (fep
->irq
[0] > 0)
2554 enable_irq_wake(fep
->irq
[0]);
2556 fep
->wol_flag
&= (~FEC_WOL_FLAG_ENABLE
);
2557 if (fep
->irq
[0] > 0)
2558 disable_irq_wake(fep
->irq
[0]);
2564 static const struct ethtool_ops fec_enet_ethtool_ops
= {
2565 .get_settings
= fec_enet_get_settings
,
2566 .set_settings
= fec_enet_set_settings
,
2567 .get_drvinfo
= fec_enet_get_drvinfo
,
2568 .get_regs_len
= fec_enet_get_regs_len
,
2569 .get_regs
= fec_enet_get_regs
,
2570 .nway_reset
= fec_enet_nway_reset
,
2571 .get_link
= ethtool_op_get_link
,
2572 .get_coalesce
= fec_enet_get_coalesce
,
2573 .set_coalesce
= fec_enet_set_coalesce
,
2574 #ifndef CONFIG_M5272
2575 .get_pauseparam
= fec_enet_get_pauseparam
,
2576 .set_pauseparam
= fec_enet_set_pauseparam
,
2577 .get_strings
= fec_enet_get_strings
,
2578 .get_ethtool_stats
= fec_enet_get_ethtool_stats
,
2579 .get_sset_count
= fec_enet_get_sset_count
,
2581 .get_ts_info
= fec_enet_get_ts_info
,
2582 .get_tunable
= fec_enet_get_tunable
,
2583 .set_tunable
= fec_enet_set_tunable
,
2584 .get_wol
= fec_enet_get_wol
,
2585 .set_wol
= fec_enet_set_wol
,
2588 static int fec_enet_ioctl(struct net_device
*ndev
, struct ifreq
*rq
, int cmd
)
2590 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2591 struct phy_device
*phydev
= fep
->phy_dev
;
2593 if (!netif_running(ndev
))
2599 if (fep
->bufdesc_ex
) {
2600 if (cmd
== SIOCSHWTSTAMP
)
2601 return fec_ptp_set(ndev
, rq
);
2602 if (cmd
== SIOCGHWTSTAMP
)
2603 return fec_ptp_get(ndev
, rq
);
2606 return phy_mii_ioctl(phydev
, rq
, cmd
);
2609 static void fec_enet_free_buffers(struct net_device
*ndev
)
2611 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2613 struct sk_buff
*skb
;
2614 struct bufdesc
*bdp
;
2615 struct fec_enet_priv_tx_q
*txq
;
2616 struct fec_enet_priv_rx_q
*rxq
;
2619 for (q
= 0; q
< fep
->num_rx_queues
; q
++) {
2620 rxq
= fep
->rx_queue
[q
];
2622 for (i
= 0; i
< rxq
->bd
.ring_size
; i
++) {
2623 skb
= rxq
->rx_skbuff
[i
];
2624 rxq
->rx_skbuff
[i
] = NULL
;
2626 dma_unmap_single(&fep
->pdev
->dev
,
2627 fec32_to_cpu(bdp
->cbd_bufaddr
),
2628 FEC_ENET_RX_FRSIZE
- fep
->rx_align
,
2632 bdp
= fec_enet_get_nextdesc(bdp
, &rxq
->bd
);
2636 for (q
= 0; q
< fep
->num_tx_queues
; q
++) {
2637 txq
= fep
->tx_queue
[q
];
2639 for (i
= 0; i
< txq
->bd
.ring_size
; i
++) {
2640 kfree(txq
->tx_bounce
[i
]);
2641 txq
->tx_bounce
[i
] = NULL
;
2642 skb
= txq
->tx_skbuff
[i
];
2643 txq
->tx_skbuff
[i
] = NULL
;
2649 static void fec_enet_free_queue(struct net_device
*ndev
)
2651 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2653 struct fec_enet_priv_tx_q
*txq
;
2655 for (i
= 0; i
< fep
->num_tx_queues
; i
++)
2656 if (fep
->tx_queue
[i
] && fep
->tx_queue
[i
]->tso_hdrs
) {
2657 txq
= fep
->tx_queue
[i
];
2658 dma_free_coherent(NULL
,
2659 txq
->bd
.ring_size
* TSO_HEADER_SIZE
,
2664 for (i
= 0; i
< fep
->num_rx_queues
; i
++)
2665 kfree(fep
->rx_queue
[i
]);
2666 for (i
= 0; i
< fep
->num_tx_queues
; i
++)
2667 kfree(fep
->tx_queue
[i
]);
2670 static int fec_enet_alloc_queue(struct net_device
*ndev
)
2672 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2675 struct fec_enet_priv_tx_q
*txq
;
2677 for (i
= 0; i
< fep
->num_tx_queues
; i
++) {
2678 txq
= kzalloc(sizeof(*txq
), GFP_KERNEL
);
2684 fep
->tx_queue
[i
] = txq
;
2685 txq
->bd
.ring_size
= TX_RING_SIZE
;
2686 fep
->total_tx_ring_size
+= fep
->tx_queue
[i
]->bd
.ring_size
;
2688 txq
->tx_stop_threshold
= FEC_MAX_SKB_DESCS
;
2689 txq
->tx_wake_threshold
=
2690 (txq
->bd
.ring_size
- txq
->tx_stop_threshold
) / 2;
2692 txq
->tso_hdrs
= dma_alloc_coherent(NULL
,
2693 txq
->bd
.ring_size
* TSO_HEADER_SIZE
,
2696 if (!txq
->tso_hdrs
) {
2702 for (i
= 0; i
< fep
->num_rx_queues
; i
++) {
2703 fep
->rx_queue
[i
] = kzalloc(sizeof(*fep
->rx_queue
[i
]),
2705 if (!fep
->rx_queue
[i
]) {
2710 fep
->rx_queue
[i
]->bd
.ring_size
= RX_RING_SIZE
;
2711 fep
->total_rx_ring_size
+= fep
->rx_queue
[i
]->bd
.ring_size
;
2716 fec_enet_free_queue(ndev
);
2721 fec_enet_alloc_rxq_buffers(struct net_device
*ndev
, unsigned int queue
)
2723 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2725 struct sk_buff
*skb
;
2726 struct bufdesc
*bdp
;
2727 struct fec_enet_priv_rx_q
*rxq
;
2729 rxq
= fep
->rx_queue
[queue
];
2731 for (i
= 0; i
< rxq
->bd
.ring_size
; i
++) {
2732 skb
= netdev_alloc_skb(ndev
, FEC_ENET_RX_FRSIZE
);
2736 if (fec_enet_new_rxbdp(ndev
, bdp
, skb
)) {
2741 rxq
->rx_skbuff
[i
] = skb
;
2742 bdp
->cbd_sc
= cpu_to_fec16(BD_ENET_RX_EMPTY
);
2744 if (fep
->bufdesc_ex
) {
2745 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
2746 ebdp
->cbd_esc
= cpu_to_fec32(BD_ENET_RX_INT
);
2749 bdp
= fec_enet_get_nextdesc(bdp
, &rxq
->bd
);
2752 /* Set the last buffer to wrap. */
2753 bdp
= fec_enet_get_prevdesc(bdp
, &rxq
->bd
);
2754 bdp
->cbd_sc
|= cpu_to_fec16(BD_SC_WRAP
);
2758 fec_enet_free_buffers(ndev
);
2763 fec_enet_alloc_txq_buffers(struct net_device
*ndev
, unsigned int queue
)
2765 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2767 struct bufdesc
*bdp
;
2768 struct fec_enet_priv_tx_q
*txq
;
2770 txq
= fep
->tx_queue
[queue
];
2772 for (i
= 0; i
< txq
->bd
.ring_size
; i
++) {
2773 txq
->tx_bounce
[i
] = kmalloc(FEC_ENET_TX_FRSIZE
, GFP_KERNEL
);
2774 if (!txq
->tx_bounce
[i
])
2777 bdp
->cbd_sc
= cpu_to_fec16(0);
2778 bdp
->cbd_bufaddr
= cpu_to_fec32(0);
2780 if (fep
->bufdesc_ex
) {
2781 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
2782 ebdp
->cbd_esc
= cpu_to_fec32(BD_ENET_TX_INT
);
2785 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
2788 /* Set the last buffer to wrap. */
2789 bdp
= fec_enet_get_prevdesc(bdp
, &txq
->bd
);
2790 bdp
->cbd_sc
|= cpu_to_fec16(BD_SC_WRAP
);
2795 fec_enet_free_buffers(ndev
);
2799 static int fec_enet_alloc_buffers(struct net_device
*ndev
)
2801 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2804 for (i
= 0; i
< fep
->num_rx_queues
; i
++)
2805 if (fec_enet_alloc_rxq_buffers(ndev
, i
))
2808 for (i
= 0; i
< fep
->num_tx_queues
; i
++)
2809 if (fec_enet_alloc_txq_buffers(ndev
, i
))
2815 fec_enet_open(struct net_device
*ndev
)
2817 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2820 ret
= pm_runtime_get_sync(&fep
->pdev
->dev
);
2824 pinctrl_pm_select_default_state(&fep
->pdev
->dev
);
2825 ret
= fec_enet_clk_enable(ndev
, true);
2829 /* I should reset the ring buffers here, but I don't yet know
2830 * a simple way to do that.
2833 ret
= fec_enet_alloc_buffers(ndev
);
2835 goto err_enet_alloc
;
2837 /* Init MAC prior to mii bus probe */
2840 /* Probe and connect to PHY when open the interface */
2841 ret
= fec_enet_mii_probe(ndev
);
2843 goto err_enet_mii_probe
;
2845 napi_enable(&fep
->napi
);
2846 phy_start(fep
->phy_dev
);
2847 netif_tx_start_all_queues(ndev
);
2849 device_set_wakeup_enable(&ndev
->dev
, fep
->wol_flag
&
2850 FEC_WOL_FLAG_ENABLE
);
2855 fec_enet_free_buffers(ndev
);
2857 fec_enet_clk_enable(ndev
, false);
2859 pm_runtime_mark_last_busy(&fep
->pdev
->dev
);
2860 pm_runtime_put_autosuspend(&fep
->pdev
->dev
);
2861 pinctrl_pm_select_sleep_state(&fep
->pdev
->dev
);
2866 fec_enet_close(struct net_device
*ndev
)
2868 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2870 phy_stop(fep
->phy_dev
);
2872 if (netif_device_present(ndev
)) {
2873 napi_disable(&fep
->napi
);
2874 netif_tx_disable(ndev
);
2878 phy_disconnect(fep
->phy_dev
);
2879 fep
->phy_dev
= NULL
;
2881 fec_enet_clk_enable(ndev
, false);
2882 pinctrl_pm_select_sleep_state(&fep
->pdev
->dev
);
2883 pm_runtime_mark_last_busy(&fep
->pdev
->dev
);
2884 pm_runtime_put_autosuspend(&fep
->pdev
->dev
);
2886 fec_enet_free_buffers(ndev
);
2891 /* Set or clear the multicast filter for this adaptor.
2892 * Skeleton taken from sunlance driver.
2893 * The CPM Ethernet implementation allows Multicast as well as individual
2894 * MAC address filtering. Some of the drivers check to make sure it is
2895 * a group multicast address, and discard those that are not. I guess I
2896 * will do the same for now, but just remove the test if you want
2897 * individual filtering as well (do the upper net layers want or support
2898 * this kind of feature?).
2901 #define HASH_BITS 6 /* #bits in hash */
2902 #define CRC32_POLY 0xEDB88320
2904 static void set_multicast_list(struct net_device
*ndev
)
2906 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2907 struct netdev_hw_addr
*ha
;
2908 unsigned int i
, bit
, data
, crc
, tmp
;
2911 if (ndev
->flags
& IFF_PROMISC
) {
2912 tmp
= readl(fep
->hwp
+ FEC_R_CNTRL
);
2914 writel(tmp
, fep
->hwp
+ FEC_R_CNTRL
);
2918 tmp
= readl(fep
->hwp
+ FEC_R_CNTRL
);
2920 writel(tmp
, fep
->hwp
+ FEC_R_CNTRL
);
2922 if (ndev
->flags
& IFF_ALLMULTI
) {
2923 /* Catch all multicast addresses, so set the
2926 writel(0xffffffff, fep
->hwp
+ FEC_GRP_HASH_TABLE_HIGH
);
2927 writel(0xffffffff, fep
->hwp
+ FEC_GRP_HASH_TABLE_LOW
);
2932 /* Clear filter and add the addresses in hash register
2934 writel(0, fep
->hwp
+ FEC_GRP_HASH_TABLE_HIGH
);
2935 writel(0, fep
->hwp
+ FEC_GRP_HASH_TABLE_LOW
);
2937 netdev_for_each_mc_addr(ha
, ndev
) {
2938 /* calculate crc32 value of mac address */
2941 for (i
= 0; i
< ndev
->addr_len
; i
++) {
2943 for (bit
= 0; bit
< 8; bit
++, data
>>= 1) {
2945 (((crc
^ data
) & 1) ? CRC32_POLY
: 0);
2949 /* only upper 6 bits (HASH_BITS) are used
2950 * which point to specific bit in he hash registers
2952 hash
= (crc
>> (32 - HASH_BITS
)) & 0x3f;
2955 tmp
= readl(fep
->hwp
+ FEC_GRP_HASH_TABLE_HIGH
);
2956 tmp
|= 1 << (hash
- 32);
2957 writel(tmp
, fep
->hwp
+ FEC_GRP_HASH_TABLE_HIGH
);
2959 tmp
= readl(fep
->hwp
+ FEC_GRP_HASH_TABLE_LOW
);
2961 writel(tmp
, fep
->hwp
+ FEC_GRP_HASH_TABLE_LOW
);
2966 /* Set a MAC change in hardware. */
2968 fec_set_mac_address(struct net_device
*ndev
, void *p
)
2970 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2971 struct sockaddr
*addr
= p
;
2974 if (!is_valid_ether_addr(addr
->sa_data
))
2975 return -EADDRNOTAVAIL
;
2976 memcpy(ndev
->dev_addr
, addr
->sa_data
, ndev
->addr_len
);
2979 /* Add netif status check here to avoid system hang in below case:
2980 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
2981 * After ethx down, fec all clocks are gated off and then register
2982 * access causes system hang.
2984 if (!netif_running(ndev
))
2987 writel(ndev
->dev_addr
[3] | (ndev
->dev_addr
[2] << 8) |
2988 (ndev
->dev_addr
[1] << 16) | (ndev
->dev_addr
[0] << 24),
2989 fep
->hwp
+ FEC_ADDR_LOW
);
2990 writel((ndev
->dev_addr
[5] << 16) | (ndev
->dev_addr
[4] << 24),
2991 fep
->hwp
+ FEC_ADDR_HIGH
);
2995 #ifdef CONFIG_NET_POLL_CONTROLLER
2997 * fec_poll_controller - FEC Poll controller function
2998 * @dev: The FEC network adapter
3000 * Polled functionality used by netconsole and others in non interrupt mode
3003 static void fec_poll_controller(struct net_device
*dev
)
3006 struct fec_enet_private
*fep
= netdev_priv(dev
);
3008 for (i
= 0; i
< FEC_IRQ_NUM
; i
++) {
3009 if (fep
->irq
[i
] > 0) {
3010 disable_irq(fep
->irq
[i
]);
3011 fec_enet_interrupt(fep
->irq
[i
], dev
);
3012 enable_irq(fep
->irq
[i
]);
3018 static inline void fec_enet_set_netdev_features(struct net_device
*netdev
,
3019 netdev_features_t features
)
3021 struct fec_enet_private
*fep
= netdev_priv(netdev
);
3022 netdev_features_t changed
= features
^ netdev
->features
;
3024 netdev
->features
= features
;
3026 /* Receive checksum has been changed */
3027 if (changed
& NETIF_F_RXCSUM
) {
3028 if (features
& NETIF_F_RXCSUM
)
3029 fep
->csum_flags
|= FLAG_RX_CSUM_ENABLED
;
3031 fep
->csum_flags
&= ~FLAG_RX_CSUM_ENABLED
;
3035 static int fec_set_features(struct net_device
*netdev
,
3036 netdev_features_t features
)
3038 struct fec_enet_private
*fep
= netdev_priv(netdev
);
3039 netdev_features_t changed
= features
^ netdev
->features
;
3041 if (netif_running(netdev
) && changed
& NETIF_F_RXCSUM
) {
3042 napi_disable(&fep
->napi
);
3043 netif_tx_lock_bh(netdev
);
3045 fec_enet_set_netdev_features(netdev
, features
);
3046 fec_restart(netdev
);
3047 netif_tx_wake_all_queues(netdev
);
3048 netif_tx_unlock_bh(netdev
);
3049 napi_enable(&fep
->napi
);
3051 fec_enet_set_netdev_features(netdev
, features
);
3057 static const struct net_device_ops fec_netdev_ops
= {
3058 .ndo_open
= fec_enet_open
,
3059 .ndo_stop
= fec_enet_close
,
3060 .ndo_start_xmit
= fec_enet_start_xmit
,
3061 .ndo_set_rx_mode
= set_multicast_list
,
3062 .ndo_change_mtu
= eth_change_mtu
,
3063 .ndo_validate_addr
= eth_validate_addr
,
3064 .ndo_tx_timeout
= fec_timeout
,
3065 .ndo_set_mac_address
= fec_set_mac_address
,
3066 .ndo_do_ioctl
= fec_enet_ioctl
,
3067 #ifdef CONFIG_NET_POLL_CONTROLLER
3068 .ndo_poll_controller
= fec_poll_controller
,
3070 .ndo_set_features
= fec_set_features
,
3073 static const unsigned short offset_des_active_rxq
[] = {
3074 FEC_R_DES_ACTIVE_0
, FEC_R_DES_ACTIVE_1
, FEC_R_DES_ACTIVE_2
3077 static const unsigned short offset_des_active_txq
[] = {
3078 FEC_X_DES_ACTIVE_0
, FEC_X_DES_ACTIVE_1
, FEC_X_DES_ACTIVE_2
3082 * XXX: We need to clean up on failure exits here.
3085 static int fec_enet_init(struct net_device
*ndev
)
3087 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3088 struct bufdesc
*cbd_base
;
3092 unsigned dsize
= fep
->bufdesc_ex
? sizeof(struct bufdesc_ex
) :
3093 sizeof(struct bufdesc
);
3094 unsigned dsize_log2
= __fls(dsize
);
3096 WARN_ON(dsize
!= (1 << dsize_log2
));
3097 #if defined(CONFIG_ARM)
3098 fep
->rx_align
= 0xf;
3099 fep
->tx_align
= 0xf;
3101 fep
->rx_align
= 0x3;
3102 fep
->tx_align
= 0x3;
3105 fec_enet_alloc_queue(ndev
);
3107 bd_size
= (fep
->total_tx_ring_size
+ fep
->total_rx_ring_size
) * dsize
;
3109 /* Allocate memory for buffer descriptors. */
3110 cbd_base
= dmam_alloc_coherent(&fep
->pdev
->dev
, bd_size
, &bd_dma
,
3116 memset(cbd_base
, 0, bd_size
);
3118 /* Get the Ethernet address */
3120 /* make sure MAC we just acquired is programmed into the hw */
3121 fec_set_mac_address(ndev
, NULL
);
3123 /* Set receive and transmit descriptor base. */
3124 for (i
= 0; i
< fep
->num_rx_queues
; i
++) {
3125 struct fec_enet_priv_rx_q
*rxq
= fep
->rx_queue
[i
];
3126 unsigned size
= dsize
* rxq
->bd
.ring_size
;
3129 rxq
->bd
.base
= cbd_base
;
3130 rxq
->bd
.cur
= cbd_base
;
3131 rxq
->bd
.dma
= bd_dma
;
3132 rxq
->bd
.dsize
= dsize
;
3133 rxq
->bd
.dsize_log2
= dsize_log2
;
3134 rxq
->bd
.reg_desc_active
= fep
->hwp
+ offset_des_active_rxq
[i
];
3136 cbd_base
= (struct bufdesc
*)(((void *)cbd_base
) + size
);
3137 rxq
->bd
.last
= (struct bufdesc
*)(((void *)cbd_base
) - dsize
);
3140 for (i
= 0; i
< fep
->num_tx_queues
; i
++) {
3141 struct fec_enet_priv_tx_q
*txq
= fep
->tx_queue
[i
];
3142 unsigned size
= dsize
* txq
->bd
.ring_size
;
3145 txq
->bd
.base
= cbd_base
;
3146 txq
->bd
.cur
= cbd_base
;
3147 txq
->bd
.dma
= bd_dma
;
3148 txq
->bd
.dsize
= dsize
;
3149 txq
->bd
.dsize_log2
= dsize_log2
;
3150 txq
->bd
.reg_desc_active
= fep
->hwp
+ offset_des_active_txq
[i
];
3152 cbd_base
= (struct bufdesc
*)(((void *)cbd_base
) + size
);
3153 txq
->bd
.last
= (struct bufdesc
*)(((void *)cbd_base
) - dsize
);
3157 /* The FEC Ethernet specific entries in the device structure */
3158 ndev
->watchdog_timeo
= TX_TIMEOUT
;
3159 ndev
->netdev_ops
= &fec_netdev_ops
;
3160 ndev
->ethtool_ops
= &fec_enet_ethtool_ops
;
3162 writel(FEC_RX_DISABLED_IMASK
, fep
->hwp
+ FEC_IMASK
);
3163 netif_napi_add(ndev
, &fep
->napi
, fec_enet_rx_napi
, NAPI_POLL_WEIGHT
);
3165 if (fep
->quirks
& FEC_QUIRK_HAS_VLAN
)
3166 /* enable hw VLAN support */
3167 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_RX
;
3169 if (fep
->quirks
& FEC_QUIRK_HAS_CSUM
) {
3170 ndev
->gso_max_segs
= FEC_MAX_TSO_SEGS
;
3172 /* enable hw accelerator */
3173 ndev
->features
|= (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
3174 | NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_TSO
);
3175 fep
->csum_flags
|= FLAG_RX_CSUM_ENABLED
;
3178 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
) {
3180 fep
->rx_align
= 0x3f;
3183 ndev
->hw_features
= ndev
->features
;
3191 static void fec_reset_phy(struct platform_device
*pdev
)
3194 bool active_high
= false;
3196 struct device_node
*np
= pdev
->dev
.of_node
;
3201 of_property_read_u32(np
, "phy-reset-duration", &msec
);
3202 /* A sane reset duration should not be longer than 1s */
3206 phy_reset
= of_get_named_gpio(np
, "phy-reset-gpios", 0);
3207 if (!gpio_is_valid(phy_reset
))
3210 active_high
= of_property_read_bool(np
, "phy-reset-active-high");
3212 err
= devm_gpio_request_one(&pdev
->dev
, phy_reset
,
3213 active_high
? GPIOF_OUT_INIT_HIGH
: GPIOF_OUT_INIT_LOW
,
3216 dev_err(&pdev
->dev
, "failed to get phy-reset-gpios: %d\n", err
);
3220 gpio_set_value_cansleep(phy_reset
, !active_high
);
3222 #else /* CONFIG_OF */
3223 static void fec_reset_phy(struct platform_device
*pdev
)
3226 * In case of platform probe, the reset has been done
3230 #endif /* CONFIG_OF */
3233 fec_enet_get_queue_num(struct platform_device
*pdev
, int *num_tx
, int *num_rx
)
3235 struct device_node
*np
= pdev
->dev
.of_node
;
3237 *num_tx
= *num_rx
= 1;
3239 if (!np
|| !of_device_is_available(np
))
3242 /* parse the num of tx and rx queues */
3243 of_property_read_u32(np
, "fsl,num-tx-queues", num_tx
);
3245 of_property_read_u32(np
, "fsl,num-rx-queues", num_rx
);
3247 if (*num_tx
< 1 || *num_tx
> FEC_ENET_MAX_TX_QS
) {
3248 dev_warn(&pdev
->dev
, "Invalid num_tx(=%d), fall back to 1\n",
3254 if (*num_rx
< 1 || *num_rx
> FEC_ENET_MAX_RX_QS
) {
3255 dev_warn(&pdev
->dev
, "Invalid num_rx(=%d), fall back to 1\n",
3264 fec_probe(struct platform_device
*pdev
)
3266 struct fec_enet_private
*fep
;
3267 struct fec_platform_data
*pdata
;
3268 struct net_device
*ndev
;
3269 int i
, irq
, ret
= 0;
3271 const struct of_device_id
*of_id
;
3273 struct device_node
*np
= pdev
->dev
.of_node
, *phy_node
;
3277 fec_enet_get_queue_num(pdev
, &num_tx_qs
, &num_rx_qs
);
3279 /* Init network device */
3280 ndev
= alloc_etherdev_mqs(sizeof(struct fec_enet_private
),
3281 num_tx_qs
, num_rx_qs
);
3285 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
3287 /* setup board info structure */
3288 fep
= netdev_priv(ndev
);
3290 of_id
= of_match_device(fec_dt_ids
, &pdev
->dev
);
3292 pdev
->id_entry
= of_id
->data
;
3293 fep
->quirks
= pdev
->id_entry
->driver_data
;
3296 fep
->num_rx_queues
= num_rx_qs
;
3297 fep
->num_tx_queues
= num_tx_qs
;
3299 #if !defined(CONFIG_M5272)
3300 /* default enable pause frame auto negotiation */
3301 if (fep
->quirks
& FEC_QUIRK_HAS_GBIT
)
3302 fep
->pause_flag
|= FEC_PAUSE_FLAG_AUTONEG
;
3305 /* Select default pin state */
3306 pinctrl_pm_select_default_state(&pdev
->dev
);
3308 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
3309 fep
->hwp
= devm_ioremap_resource(&pdev
->dev
, r
);
3310 if (IS_ERR(fep
->hwp
)) {
3311 ret
= PTR_ERR(fep
->hwp
);
3312 goto failed_ioremap
;
3316 fep
->dev_id
= dev_id
++;
3318 platform_set_drvdata(pdev
, ndev
);
3320 if (of_get_property(np
, "fsl,magic-packet", NULL
))
3321 fep
->wol_flag
|= FEC_WOL_HAS_MAGIC_PACKET
;
3323 phy_node
= of_parse_phandle(np
, "phy-handle", 0);
3324 if (!phy_node
&& of_phy_is_fixed_link(np
)) {
3325 ret
= of_phy_register_fixed_link(np
);
3328 "broken fixed-link specification\n");
3331 phy_node
= of_node_get(np
);
3333 fep
->phy_node
= phy_node
;
3335 ret
= of_get_phy_mode(pdev
->dev
.of_node
);
3337 pdata
= dev_get_platdata(&pdev
->dev
);
3339 fep
->phy_interface
= pdata
->phy
;
3341 fep
->phy_interface
= PHY_INTERFACE_MODE_MII
;
3343 fep
->phy_interface
= ret
;
3346 fep
->clk_ipg
= devm_clk_get(&pdev
->dev
, "ipg");
3347 if (IS_ERR(fep
->clk_ipg
)) {
3348 ret
= PTR_ERR(fep
->clk_ipg
);
3352 fep
->clk_ahb
= devm_clk_get(&pdev
->dev
, "ahb");
3353 if (IS_ERR(fep
->clk_ahb
)) {
3354 ret
= PTR_ERR(fep
->clk_ahb
);
3358 fep
->itr_clk_rate
= clk_get_rate(fep
->clk_ahb
);
3360 /* enet_out is optional, depends on board */
3361 fep
->clk_enet_out
= devm_clk_get(&pdev
->dev
, "enet_out");
3362 if (IS_ERR(fep
->clk_enet_out
))
3363 fep
->clk_enet_out
= NULL
;
3365 fep
->ptp_clk_on
= false;
3366 mutex_init(&fep
->ptp_clk_mutex
);
3368 /* clk_ref is optional, depends on board */
3369 fep
->clk_ref
= devm_clk_get(&pdev
->dev
, "enet_clk_ref");
3370 if (IS_ERR(fep
->clk_ref
))
3371 fep
->clk_ref
= NULL
;
3373 fep
->bufdesc_ex
= fep
->quirks
& FEC_QUIRK_HAS_BUFDESC_EX
;
3374 fep
->clk_ptp
= devm_clk_get(&pdev
->dev
, "ptp");
3375 if (IS_ERR(fep
->clk_ptp
)) {
3376 fep
->clk_ptp
= NULL
;
3377 fep
->bufdesc_ex
= false;
3380 ret
= fec_enet_clk_enable(ndev
, true);
3384 ret
= clk_prepare_enable(fep
->clk_ipg
);
3386 goto failed_clk_ipg
;
3388 fep
->reg_phy
= devm_regulator_get(&pdev
->dev
, "phy");
3389 if (!IS_ERR(fep
->reg_phy
)) {
3390 ret
= regulator_enable(fep
->reg_phy
);
3393 "Failed to enable phy regulator: %d\n", ret
);
3394 goto failed_regulator
;
3397 fep
->reg_phy
= NULL
;
3400 pm_runtime_set_autosuspend_delay(&pdev
->dev
, FEC_MDIO_PM_TIMEOUT
);
3401 pm_runtime_use_autosuspend(&pdev
->dev
);
3402 pm_runtime_get_noresume(&pdev
->dev
);
3403 pm_runtime_set_active(&pdev
->dev
);
3404 pm_runtime_enable(&pdev
->dev
);
3406 fec_reset_phy(pdev
);
3408 if (fep
->bufdesc_ex
)
3411 ret
= fec_enet_init(ndev
);
3415 for (i
= 0; i
< FEC_IRQ_NUM
; i
++) {
3416 irq
= platform_get_irq(pdev
, i
);
3423 ret
= devm_request_irq(&pdev
->dev
, irq
, fec_enet_interrupt
,
3424 0, pdev
->name
, ndev
);
3431 init_completion(&fep
->mdio_done
);
3432 ret
= fec_enet_mii_init(pdev
);
3434 goto failed_mii_init
;
3436 /* Carrier starts down, phylib will bring it up */
3437 netif_carrier_off(ndev
);
3438 fec_enet_clk_enable(ndev
, false);
3439 pinctrl_pm_select_sleep_state(&pdev
->dev
);
3441 ret
= register_netdev(ndev
);
3443 goto failed_register
;
3445 device_init_wakeup(&ndev
->dev
, fep
->wol_flag
&
3446 FEC_WOL_HAS_MAGIC_PACKET
);
3448 if (fep
->bufdesc_ex
&& fep
->ptp_clock
)
3449 netdev_info(ndev
, "registered PHC device %d\n", fep
->dev_id
);
3451 fep
->rx_copybreak
= COPYBREAK_DEFAULT
;
3452 INIT_WORK(&fep
->tx_timeout_work
, fec_enet_timeout_work
);
3454 pm_runtime_mark_last_busy(&pdev
->dev
);
3455 pm_runtime_put_autosuspend(&pdev
->dev
);
3460 fec_enet_mii_remove(fep
);
3466 regulator_disable(fep
->reg_phy
);
3468 clk_disable_unprepare(fep
->clk_ipg
);
3470 fec_enet_clk_enable(ndev
, false);
3473 of_node_put(phy_node
);
3481 fec_drv_remove(struct platform_device
*pdev
)
3483 struct net_device
*ndev
= platform_get_drvdata(pdev
);
3484 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3486 cancel_work_sync(&fep
->tx_timeout_work
);
3488 unregister_netdev(ndev
);
3489 fec_enet_mii_remove(fep
);
3491 regulator_disable(fep
->reg_phy
);
3492 of_node_put(fep
->phy_node
);
3498 static int __maybe_unused
fec_suspend(struct device
*dev
)
3500 struct net_device
*ndev
= dev_get_drvdata(dev
);
3501 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3504 if (netif_running(ndev
)) {
3505 if (fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
)
3506 fep
->wol_flag
|= FEC_WOL_FLAG_SLEEP_ON
;
3507 phy_stop(fep
->phy_dev
);
3508 napi_disable(&fep
->napi
);
3509 netif_tx_lock_bh(ndev
);
3510 netif_device_detach(ndev
);
3511 netif_tx_unlock_bh(ndev
);
3513 fec_enet_clk_enable(ndev
, false);
3514 if (!(fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
))
3515 pinctrl_pm_select_sleep_state(&fep
->pdev
->dev
);
3519 if (fep
->reg_phy
&& !(fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
))
3520 regulator_disable(fep
->reg_phy
);
3522 /* SOC supply clock to phy, when clock is disabled, phy link down
3523 * SOC control phy regulator, when regulator is disabled, phy link down
3525 if (fep
->clk_enet_out
|| fep
->reg_phy
)
3531 static int __maybe_unused
fec_resume(struct device
*dev
)
3533 struct net_device
*ndev
= dev_get_drvdata(dev
);
3534 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3535 struct fec_platform_data
*pdata
= fep
->pdev
->dev
.platform_data
;
3539 if (fep
->reg_phy
&& !(fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
)) {
3540 ret
= regulator_enable(fep
->reg_phy
);
3546 if (netif_running(ndev
)) {
3547 ret
= fec_enet_clk_enable(ndev
, true);
3552 if (fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
) {
3553 if (pdata
&& pdata
->sleep_mode_enable
)
3554 pdata
->sleep_mode_enable(false);
3555 val
= readl(fep
->hwp
+ FEC_ECNTRL
);
3556 val
&= ~(FEC_ECR_MAGICEN
| FEC_ECR_SLEEP
);
3557 writel(val
, fep
->hwp
+ FEC_ECNTRL
);
3558 fep
->wol_flag
&= ~FEC_WOL_FLAG_SLEEP_ON
;
3560 pinctrl_pm_select_default_state(&fep
->pdev
->dev
);
3563 netif_tx_lock_bh(ndev
);
3564 netif_device_attach(ndev
);
3565 netif_tx_unlock_bh(ndev
);
3566 napi_enable(&fep
->napi
);
3567 phy_start(fep
->phy_dev
);
3575 regulator_disable(fep
->reg_phy
);
3579 static int __maybe_unused
fec_runtime_suspend(struct device
*dev
)
3581 struct net_device
*ndev
= dev_get_drvdata(dev
);
3582 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3584 clk_disable_unprepare(fep
->clk_ipg
);
3589 static int __maybe_unused
fec_runtime_resume(struct device
*dev
)
3591 struct net_device
*ndev
= dev_get_drvdata(dev
);
3592 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3594 return clk_prepare_enable(fep
->clk_ipg
);
3597 static const struct dev_pm_ops fec_pm_ops
= {
3598 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend
, fec_resume
)
3599 SET_RUNTIME_PM_OPS(fec_runtime_suspend
, fec_runtime_resume
, NULL
)
3602 static struct platform_driver fec_driver
= {
3604 .name
= DRIVER_NAME
,
3606 .of_match_table
= fec_dt_ids
,
3608 .id_table
= fec_devtype
,
3610 .remove
= fec_drv_remove
,
3613 module_platform_driver(fec_driver
);
3615 MODULE_ALIAS("platform:"DRIVER_NAME
);
3616 MODULE_LICENSE("GPL");