1 // SPDX-License-Identifier: GPL-2.0+
3 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
4 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
6 * Right now, I am very wasteful with the buffers. I allocate memory
7 * pages and then divide them into 2K frame buffers. This way I know I
8 * have buffers large enough to hold one frame within one buffer descriptor.
9 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
10 * will be much more memory efficient and will easily handle lots of
13 * Much better multiple PHY support by Magnus Damm.
14 * Copyright (c) 2000 Ericsson Radio Systems AB.
16 * Support for FEC controller of ColdFire processors.
17 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
19 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
20 * Copyright (c) 2004-2006 Macq Electronique SA.
22 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/ptrace.h>
30 #include <linux/errno.h>
31 #include <linux/ioport.h>
32 #include <linux/slab.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
42 #include <linux/tcp.h>
43 #include <linux/udp.h>
44 #include <linux/icmp.h>
45 #include <linux/spinlock.h>
46 #include <linux/workqueue.h>
47 #include <linux/bitops.h>
49 #include <linux/irq.h>
50 #include <linux/clk.h>
51 #include <linux/crc32.h>
52 #include <linux/platform_device.h>
53 #include <linux/mdio.h>
54 #include <linux/phy.h>
55 #include <linux/fec.h>
57 #include <linux/of_device.h>
58 #include <linux/of_gpio.h>
59 #include <linux/of_mdio.h>
60 #include <linux/of_net.h>
61 #include <linux/regulator/consumer.h>
62 #include <linux/if_vlan.h>
63 #include <linux/pinctrl/consumer.h>
64 #include <linux/prefetch.h>
65 #include <linux/mfd/syscon.h>
66 #include <linux/regmap.h>
67 #include <soc/imx/cpuidle.h>
69 #include <asm/cacheflush.h>
73 static void set_multicast_list(struct net_device
*ndev
);
74 static void fec_enet_itr_coal_init(struct net_device
*ndev
);
76 #define DRIVER_NAME "fec"
78 /* Pause frame feild and FIFO threshold */
79 #define FEC_ENET_FCE (1 << 5)
80 #define FEC_ENET_RSEM_V 0x84
81 #define FEC_ENET_RSFL_V 16
82 #define FEC_ENET_RAEM_V 0x8
83 #define FEC_ENET_RAFL_V 0x8
84 #define FEC_ENET_OPD_V 0xFFF0
85 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */
91 static const struct fec_devinfo fec_imx25_info
= {
92 .quirks
= FEC_QUIRK_USE_GASKET
| FEC_QUIRK_MIB_CLEAR
|
96 static const struct fec_devinfo fec_imx27_info
= {
97 .quirks
= FEC_QUIRK_MIB_CLEAR
| FEC_QUIRK_HAS_FRREG
,
100 static const struct fec_devinfo fec_imx28_info
= {
101 .quirks
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_SWAP_FRAME
|
102 FEC_QUIRK_SINGLE_MDIO
| FEC_QUIRK_HAS_RACC
|
103 FEC_QUIRK_HAS_FRREG
| FEC_QUIRK_CLEAR_SETUP_MII
,
106 static const struct fec_devinfo fec_imx6q_info
= {
107 .quirks
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_HAS_GBIT
|
108 FEC_QUIRK_HAS_BUFDESC_EX
| FEC_QUIRK_HAS_CSUM
|
109 FEC_QUIRK_HAS_VLAN
| FEC_QUIRK_ERR006358
|
110 FEC_QUIRK_HAS_RACC
| FEC_QUIRK_CLEAR_SETUP_MII
,
113 static const struct fec_devinfo fec_mvf600_info
= {
114 .quirks
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_HAS_RACC
,
117 static const struct fec_devinfo fec_imx6x_info
= {
118 .quirks
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_HAS_GBIT
|
119 FEC_QUIRK_HAS_BUFDESC_EX
| FEC_QUIRK_HAS_CSUM
|
120 FEC_QUIRK_HAS_VLAN
| FEC_QUIRK_HAS_AVB
|
121 FEC_QUIRK_ERR007885
| FEC_QUIRK_BUG_CAPTURE
|
122 FEC_QUIRK_HAS_RACC
| FEC_QUIRK_HAS_COALESCE
|
123 FEC_QUIRK_CLEAR_SETUP_MII
,
126 static const struct fec_devinfo fec_imx6ul_info
= {
127 .quirks
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_HAS_GBIT
|
128 FEC_QUIRK_HAS_BUFDESC_EX
| FEC_QUIRK_HAS_CSUM
|
129 FEC_QUIRK_HAS_VLAN
| FEC_QUIRK_ERR007885
|
130 FEC_QUIRK_BUG_CAPTURE
| FEC_QUIRK_HAS_RACC
|
131 FEC_QUIRK_HAS_COALESCE
| FEC_QUIRK_CLEAR_SETUP_MII
,
134 static struct platform_device_id fec_devtype
[] = {
136 /* keep it for coldfire */
141 .driver_data
= (kernel_ulong_t
)&fec_imx25_info
,
144 .driver_data
= (kernel_ulong_t
)&fec_imx27_info
,
147 .driver_data
= (kernel_ulong_t
)&fec_imx28_info
,
150 .driver_data
= (kernel_ulong_t
)&fec_imx6q_info
,
152 .name
= "mvf600-fec",
153 .driver_data
= (kernel_ulong_t
)&fec_mvf600_info
,
155 .name
= "imx6sx-fec",
156 .driver_data
= (kernel_ulong_t
)&fec_imx6x_info
,
158 .name
= "imx6ul-fec",
159 .driver_data
= (kernel_ulong_t
)&fec_imx6ul_info
,
164 MODULE_DEVICE_TABLE(platform
, fec_devtype
);
167 IMX25_FEC
= 1, /* runs on i.mx25/50/53 */
168 IMX27_FEC
, /* runs on i.mx27/35/51 */
176 static const struct of_device_id fec_dt_ids
[] = {
177 { .compatible
= "fsl,imx25-fec", .data
= &fec_devtype
[IMX25_FEC
], },
178 { .compatible
= "fsl,imx27-fec", .data
= &fec_devtype
[IMX27_FEC
], },
179 { .compatible
= "fsl,imx28-fec", .data
= &fec_devtype
[IMX28_FEC
], },
180 { .compatible
= "fsl,imx6q-fec", .data
= &fec_devtype
[IMX6Q_FEC
], },
181 { .compatible
= "fsl,mvf600-fec", .data
= &fec_devtype
[MVF600_FEC
], },
182 { .compatible
= "fsl,imx6sx-fec", .data
= &fec_devtype
[IMX6SX_FEC
], },
183 { .compatible
= "fsl,imx6ul-fec", .data
= &fec_devtype
[IMX6UL_FEC
], },
186 MODULE_DEVICE_TABLE(of
, fec_dt_ids
);
188 static unsigned char macaddr
[ETH_ALEN
];
189 module_param_array(macaddr
, byte
, NULL
, 0);
190 MODULE_PARM_DESC(macaddr
, "FEC Ethernet MAC address");
192 #if defined(CONFIG_M5272)
194 * Some hardware gets it MAC address out of local flash memory.
195 * if this is non-zero then assume it is the address to get MAC from.
197 #if defined(CONFIG_NETtel)
198 #define FEC_FLASHMAC 0xf0006006
199 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
200 #define FEC_FLASHMAC 0xf0006000
201 #elif defined(CONFIG_CANCam)
202 #define FEC_FLASHMAC 0xf0020000
203 #elif defined (CONFIG_M5272C3)
204 #define FEC_FLASHMAC (0xffe04000 + 4)
205 #elif defined(CONFIG_MOD5272)
206 #define FEC_FLASHMAC 0xffc0406b
208 #define FEC_FLASHMAC 0
210 #endif /* CONFIG_M5272 */
212 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
214 * 2048 byte skbufs are allocated. However, alignment requirements
215 * varies between FEC variants. Worst case is 64, so round down by 64.
217 #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
218 #define PKT_MINBUF_SIZE 64
220 /* FEC receive acceleration */
221 #define FEC_RACC_IPDIS (1 << 1)
222 #define FEC_RACC_PRODIS (1 << 2)
223 #define FEC_RACC_SHIFT16 BIT(7)
224 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
226 /* MIB Control Register */
227 #define FEC_MIB_CTRLSTAT_DISABLE BIT(31)
230 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
231 * size bits. Other FEC hardware does not, so we need to take that into
232 * account when setting it.
234 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
235 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
236 defined(CONFIG_ARM64)
237 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
239 #define OPT_FRAME_SIZE 0
242 /* FEC MII MMFR bits definition */
243 #define FEC_MMFR_ST (1 << 30)
244 #define FEC_MMFR_ST_C45 (0)
245 #define FEC_MMFR_OP_READ (2 << 28)
246 #define FEC_MMFR_OP_READ_C45 (3 << 28)
247 #define FEC_MMFR_OP_WRITE (1 << 28)
248 #define FEC_MMFR_OP_ADDR_WRITE (0)
249 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
250 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
251 #define FEC_MMFR_TA (2 << 16)
252 #define FEC_MMFR_DATA(v) (v & 0xffff)
253 /* FEC ECR bits definition */
254 #define FEC_ECR_MAGICEN (1 << 2)
255 #define FEC_ECR_SLEEP (1 << 3)
257 #define FEC_MII_TIMEOUT 30000 /* us */
259 /* Transmitter timeout */
260 #define TX_TIMEOUT (2 * HZ)
262 #define FEC_PAUSE_FLAG_AUTONEG 0x1
263 #define FEC_PAUSE_FLAG_ENABLE 0x2
264 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
265 #define FEC_WOL_FLAG_ENABLE (0x1 << 1)
266 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
268 #define COPYBREAK_DEFAULT 256
270 /* Max number of allowed TCP segments for software TSO */
271 #define FEC_MAX_TSO_SEGS 100
272 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
274 #define IS_TSO_HEADER(txq, addr) \
275 ((addr >= txq->tso_hdrs_dma) && \
276 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
280 static struct bufdesc
*fec_enet_get_nextdesc(struct bufdesc
*bdp
,
281 struct bufdesc_prop
*bd
)
283 return (bdp
>= bd
->last
) ? bd
->base
284 : (struct bufdesc
*)(((void *)bdp
) + bd
->dsize
);
287 static struct bufdesc
*fec_enet_get_prevdesc(struct bufdesc
*bdp
,
288 struct bufdesc_prop
*bd
)
290 return (bdp
<= bd
->base
) ? bd
->last
291 : (struct bufdesc
*)(((void *)bdp
) - bd
->dsize
);
294 static int fec_enet_get_bd_index(struct bufdesc
*bdp
,
295 struct bufdesc_prop
*bd
)
297 return ((const char *)bdp
- (const char *)bd
->base
) >> bd
->dsize_log2
;
300 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q
*txq
)
304 entries
= (((const char *)txq
->dirty_tx
-
305 (const char *)txq
->bd
.cur
) >> txq
->bd
.dsize_log2
) - 1;
307 return entries
>= 0 ? entries
: entries
+ txq
->bd
.ring_size
;
310 static void swap_buffer(void *bufaddr
, int len
)
313 unsigned int *buf
= bufaddr
;
315 for (i
= 0; i
< len
; i
+= 4, buf
++)
319 static void swap_buffer2(void *dst_buf
, void *src_buf
, int len
)
322 unsigned int *src
= src_buf
;
323 unsigned int *dst
= dst_buf
;
325 for (i
= 0; i
< len
; i
+= 4, src
++, dst
++)
329 static void fec_dump(struct net_device
*ndev
)
331 struct fec_enet_private
*fep
= netdev_priv(ndev
);
333 struct fec_enet_priv_tx_q
*txq
;
336 netdev_info(ndev
, "TX ring dump\n");
337 pr_info("Nr SC addr len SKB\n");
339 txq
= fep
->tx_queue
[0];
343 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
345 bdp
== txq
->bd
.cur
? 'S' : ' ',
346 bdp
== txq
->dirty_tx
? 'H' : ' ',
347 fec16_to_cpu(bdp
->cbd_sc
),
348 fec32_to_cpu(bdp
->cbd_bufaddr
),
349 fec16_to_cpu(bdp
->cbd_datlen
),
350 txq
->tx_skbuff
[index
]);
351 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
353 } while (bdp
!= txq
->bd
.base
);
356 static inline bool is_ipv4_pkt(struct sk_buff
*skb
)
358 return skb
->protocol
== htons(ETH_P_IP
) && ip_hdr(skb
)->version
== 4;
362 fec_enet_clear_csum(struct sk_buff
*skb
, struct net_device
*ndev
)
364 /* Only run for packets requiring a checksum. */
365 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
368 if (unlikely(skb_cow_head(skb
, 0)))
371 if (is_ipv4_pkt(skb
))
372 ip_hdr(skb
)->check
= 0;
373 *(__sum16
*)(skb
->head
+ skb
->csum_start
+ skb
->csum_offset
) = 0;
378 static struct bufdesc
*
379 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q
*txq
,
381 struct net_device
*ndev
)
383 struct fec_enet_private
*fep
= netdev_priv(ndev
);
384 struct bufdesc
*bdp
= txq
->bd
.cur
;
385 struct bufdesc_ex
*ebdp
;
386 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
388 unsigned short status
;
389 unsigned int estatus
= 0;
390 skb_frag_t
*this_frag
;
396 for (frag
= 0; frag
< nr_frags
; frag
++) {
397 this_frag
= &skb_shinfo(skb
)->frags
[frag
];
398 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
399 ebdp
= (struct bufdesc_ex
*)bdp
;
401 status
= fec16_to_cpu(bdp
->cbd_sc
);
402 status
&= ~BD_ENET_TX_STATS
;
403 status
|= (BD_ENET_TX_TC
| BD_ENET_TX_READY
);
404 frag_len
= skb_frag_size(&skb_shinfo(skb
)->frags
[frag
]);
406 /* Handle the last BD specially */
407 if (frag
== nr_frags
- 1) {
408 status
|= (BD_ENET_TX_INTR
| BD_ENET_TX_LAST
);
409 if (fep
->bufdesc_ex
) {
410 estatus
|= BD_ENET_TX_INT
;
411 if (unlikely(skb_shinfo(skb
)->tx_flags
&
412 SKBTX_HW_TSTAMP
&& fep
->hwts_tx_en
))
413 estatus
|= BD_ENET_TX_TS
;
417 if (fep
->bufdesc_ex
) {
418 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
)
419 estatus
|= FEC_TX_BD_FTYPE(txq
->bd
.qid
);
420 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
421 estatus
|= BD_ENET_TX_PINS
| BD_ENET_TX_IINS
;
423 ebdp
->cbd_esc
= cpu_to_fec32(estatus
);
426 bufaddr
= skb_frag_address(this_frag
);
428 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
429 if (((unsigned long) bufaddr
) & fep
->tx_align
||
430 fep
->quirks
& FEC_QUIRK_SWAP_FRAME
) {
431 memcpy(txq
->tx_bounce
[index
], bufaddr
, frag_len
);
432 bufaddr
= txq
->tx_bounce
[index
];
434 if (fep
->quirks
& FEC_QUIRK_SWAP_FRAME
)
435 swap_buffer(bufaddr
, frag_len
);
438 addr
= dma_map_single(&fep
->pdev
->dev
, bufaddr
, frag_len
,
440 if (dma_mapping_error(&fep
->pdev
->dev
, addr
)) {
442 netdev_err(ndev
, "Tx DMA memory map failed\n");
443 goto dma_mapping_error
;
446 bdp
->cbd_bufaddr
= cpu_to_fec32(addr
);
447 bdp
->cbd_datlen
= cpu_to_fec16(frag_len
);
448 /* Make sure the updates to rest of the descriptor are
449 * performed before transferring ownership.
452 bdp
->cbd_sc
= cpu_to_fec16(status
);
458 for (i
= 0; i
< frag
; i
++) {
459 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
460 dma_unmap_single(&fep
->pdev
->dev
, fec32_to_cpu(bdp
->cbd_bufaddr
),
461 fec16_to_cpu(bdp
->cbd_datlen
), DMA_TO_DEVICE
);
463 return ERR_PTR(-ENOMEM
);
466 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q
*txq
,
467 struct sk_buff
*skb
, struct net_device
*ndev
)
469 struct fec_enet_private
*fep
= netdev_priv(ndev
);
470 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
471 struct bufdesc
*bdp
, *last_bdp
;
474 unsigned short status
;
475 unsigned short buflen
;
476 unsigned int estatus
= 0;
480 entries_free
= fec_enet_get_free_txdesc_num(txq
);
481 if (entries_free
< MAX_SKB_FRAGS
+ 1) {
482 dev_kfree_skb_any(skb
);
484 netdev_err(ndev
, "NOT enough BD for SG!\n");
488 /* Protocol checksum off-load for TCP and UDP. */
489 if (fec_enet_clear_csum(skb
, ndev
)) {
490 dev_kfree_skb_any(skb
);
494 /* Fill in a Tx ring entry */
497 status
= fec16_to_cpu(bdp
->cbd_sc
);
498 status
&= ~BD_ENET_TX_STATS
;
500 /* Set buffer length and buffer pointer */
502 buflen
= skb_headlen(skb
);
504 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
505 if (((unsigned long) bufaddr
) & fep
->tx_align
||
506 fep
->quirks
& FEC_QUIRK_SWAP_FRAME
) {
507 memcpy(txq
->tx_bounce
[index
], skb
->data
, buflen
);
508 bufaddr
= txq
->tx_bounce
[index
];
510 if (fep
->quirks
& FEC_QUIRK_SWAP_FRAME
)
511 swap_buffer(bufaddr
, buflen
);
514 /* Push the data cache so the CPM does not get stale memory data. */
515 addr
= dma_map_single(&fep
->pdev
->dev
, bufaddr
, buflen
, DMA_TO_DEVICE
);
516 if (dma_mapping_error(&fep
->pdev
->dev
, addr
)) {
517 dev_kfree_skb_any(skb
);
519 netdev_err(ndev
, "Tx DMA memory map failed\n");
524 last_bdp
= fec_enet_txq_submit_frag_skb(txq
, skb
, ndev
);
525 if (IS_ERR(last_bdp
)) {
526 dma_unmap_single(&fep
->pdev
->dev
, addr
,
527 buflen
, DMA_TO_DEVICE
);
528 dev_kfree_skb_any(skb
);
532 status
|= (BD_ENET_TX_INTR
| BD_ENET_TX_LAST
);
533 if (fep
->bufdesc_ex
) {
534 estatus
= BD_ENET_TX_INT
;
535 if (unlikely(skb_shinfo(skb
)->tx_flags
&
536 SKBTX_HW_TSTAMP
&& fep
->hwts_tx_en
))
537 estatus
|= BD_ENET_TX_TS
;
540 bdp
->cbd_bufaddr
= cpu_to_fec32(addr
);
541 bdp
->cbd_datlen
= cpu_to_fec16(buflen
);
543 if (fep
->bufdesc_ex
) {
545 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
547 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
&&
549 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
551 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
)
552 estatus
|= FEC_TX_BD_FTYPE(txq
->bd
.qid
);
554 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
555 estatus
|= BD_ENET_TX_PINS
| BD_ENET_TX_IINS
;
558 ebdp
->cbd_esc
= cpu_to_fec32(estatus
);
561 index
= fec_enet_get_bd_index(last_bdp
, &txq
->bd
);
562 /* Save skb pointer */
563 txq
->tx_skbuff
[index
] = skb
;
565 /* Make sure the updates to rest of the descriptor are performed before
566 * transferring ownership.
570 /* Send it on its way. Tell FEC it's ready, interrupt when done,
571 * it's the last BD of the frame, and to put the CRC on the end.
573 status
|= (BD_ENET_TX_READY
| BD_ENET_TX_TC
);
574 bdp
->cbd_sc
= cpu_to_fec16(status
);
576 /* If this was the last BD in the ring, start at the beginning again. */
577 bdp
= fec_enet_get_nextdesc(last_bdp
, &txq
->bd
);
579 skb_tx_timestamp(skb
);
581 /* Make sure the update to bdp and tx_skbuff are performed before
587 /* Trigger transmission start */
588 writel(0, txq
->bd
.reg_desc_active
);
594 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q
*txq
, struct sk_buff
*skb
,
595 struct net_device
*ndev
,
596 struct bufdesc
*bdp
, int index
, char *data
,
597 int size
, bool last_tcp
, bool is_last
)
599 struct fec_enet_private
*fep
= netdev_priv(ndev
);
600 struct bufdesc_ex
*ebdp
= container_of(bdp
, struct bufdesc_ex
, desc
);
601 unsigned short status
;
602 unsigned int estatus
= 0;
605 status
= fec16_to_cpu(bdp
->cbd_sc
);
606 status
&= ~BD_ENET_TX_STATS
;
608 status
|= (BD_ENET_TX_TC
| BD_ENET_TX_READY
);
610 if (((unsigned long) data
) & fep
->tx_align
||
611 fep
->quirks
& FEC_QUIRK_SWAP_FRAME
) {
612 memcpy(txq
->tx_bounce
[index
], data
, size
);
613 data
= txq
->tx_bounce
[index
];
615 if (fep
->quirks
& FEC_QUIRK_SWAP_FRAME
)
616 swap_buffer(data
, size
);
619 addr
= dma_map_single(&fep
->pdev
->dev
, data
, size
, DMA_TO_DEVICE
);
620 if (dma_mapping_error(&fep
->pdev
->dev
, addr
)) {
621 dev_kfree_skb_any(skb
);
623 netdev_err(ndev
, "Tx DMA memory map failed\n");
624 return NETDEV_TX_BUSY
;
627 bdp
->cbd_datlen
= cpu_to_fec16(size
);
628 bdp
->cbd_bufaddr
= cpu_to_fec32(addr
);
630 if (fep
->bufdesc_ex
) {
631 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
)
632 estatus
|= FEC_TX_BD_FTYPE(txq
->bd
.qid
);
633 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
634 estatus
|= BD_ENET_TX_PINS
| BD_ENET_TX_IINS
;
636 ebdp
->cbd_esc
= cpu_to_fec32(estatus
);
639 /* Handle the last BD specially */
641 status
|= (BD_ENET_TX_LAST
| BD_ENET_TX_TC
);
643 status
|= BD_ENET_TX_INTR
;
645 ebdp
->cbd_esc
|= cpu_to_fec32(BD_ENET_TX_INT
);
648 bdp
->cbd_sc
= cpu_to_fec16(status
);
654 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q
*txq
,
655 struct sk_buff
*skb
, struct net_device
*ndev
,
656 struct bufdesc
*bdp
, int index
)
658 struct fec_enet_private
*fep
= netdev_priv(ndev
);
659 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
660 struct bufdesc_ex
*ebdp
= container_of(bdp
, struct bufdesc_ex
, desc
);
662 unsigned long dmabuf
;
663 unsigned short status
;
664 unsigned int estatus
= 0;
666 status
= fec16_to_cpu(bdp
->cbd_sc
);
667 status
&= ~BD_ENET_TX_STATS
;
668 status
|= (BD_ENET_TX_TC
| BD_ENET_TX_READY
);
670 bufaddr
= txq
->tso_hdrs
+ index
* TSO_HEADER_SIZE
;
671 dmabuf
= txq
->tso_hdrs_dma
+ index
* TSO_HEADER_SIZE
;
672 if (((unsigned long)bufaddr
) & fep
->tx_align
||
673 fep
->quirks
& FEC_QUIRK_SWAP_FRAME
) {
674 memcpy(txq
->tx_bounce
[index
], skb
->data
, hdr_len
);
675 bufaddr
= txq
->tx_bounce
[index
];
677 if (fep
->quirks
& FEC_QUIRK_SWAP_FRAME
)
678 swap_buffer(bufaddr
, hdr_len
);
680 dmabuf
= dma_map_single(&fep
->pdev
->dev
, bufaddr
,
681 hdr_len
, DMA_TO_DEVICE
);
682 if (dma_mapping_error(&fep
->pdev
->dev
, dmabuf
)) {
683 dev_kfree_skb_any(skb
);
685 netdev_err(ndev
, "Tx DMA memory map failed\n");
686 return NETDEV_TX_BUSY
;
690 bdp
->cbd_bufaddr
= cpu_to_fec32(dmabuf
);
691 bdp
->cbd_datlen
= cpu_to_fec16(hdr_len
);
693 if (fep
->bufdesc_ex
) {
694 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
)
695 estatus
|= FEC_TX_BD_FTYPE(txq
->bd
.qid
);
696 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
697 estatus
|= BD_ENET_TX_PINS
| BD_ENET_TX_IINS
;
699 ebdp
->cbd_esc
= cpu_to_fec32(estatus
);
702 bdp
->cbd_sc
= cpu_to_fec16(status
);
707 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q
*txq
,
709 struct net_device
*ndev
)
711 struct fec_enet_private
*fep
= netdev_priv(ndev
);
712 int hdr_len
, total_len
, data_left
;
713 struct bufdesc
*bdp
= txq
->bd
.cur
;
715 unsigned int index
= 0;
718 if (tso_count_descs(skb
) >= fec_enet_get_free_txdesc_num(txq
)) {
719 dev_kfree_skb_any(skb
);
721 netdev_err(ndev
, "NOT enough BD for TSO!\n");
725 /* Protocol checksum off-load for TCP and UDP. */
726 if (fec_enet_clear_csum(skb
, ndev
)) {
727 dev_kfree_skb_any(skb
);
731 /* Initialize the TSO handler, and prepare the first payload */
732 hdr_len
= tso_start(skb
, &tso
);
734 total_len
= skb
->len
- hdr_len
;
735 while (total_len
> 0) {
738 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
739 data_left
= min_t(int, skb_shinfo(skb
)->gso_size
, total_len
);
740 total_len
-= data_left
;
742 /* prepare packet headers: MAC + IP + TCP */
743 hdr
= txq
->tso_hdrs
+ index
* TSO_HEADER_SIZE
;
744 tso_build_hdr(skb
, hdr
, &tso
, data_left
, total_len
== 0);
745 ret
= fec_enet_txq_put_hdr_tso(txq
, skb
, ndev
, bdp
, index
);
749 while (data_left
> 0) {
752 size
= min_t(int, tso
.size
, data_left
);
753 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
754 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
755 ret
= fec_enet_txq_put_data_tso(txq
, skb
, ndev
,
764 tso_build_data(skb
, &tso
, size
);
767 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
770 /* Save skb pointer */
771 txq
->tx_skbuff
[index
] = skb
;
773 skb_tx_timestamp(skb
);
776 /* Trigger transmission start */
777 if (!(fep
->quirks
& FEC_QUIRK_ERR007885
) ||
778 !readl(txq
->bd
.reg_desc_active
) ||
779 !readl(txq
->bd
.reg_desc_active
) ||
780 !readl(txq
->bd
.reg_desc_active
) ||
781 !readl(txq
->bd
.reg_desc_active
))
782 writel(0, txq
->bd
.reg_desc_active
);
787 /* TODO: Release all used data descriptors for TSO */
792 fec_enet_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
794 struct fec_enet_private
*fep
= netdev_priv(ndev
);
796 unsigned short queue
;
797 struct fec_enet_priv_tx_q
*txq
;
798 struct netdev_queue
*nq
;
801 queue
= skb_get_queue_mapping(skb
);
802 txq
= fep
->tx_queue
[queue
];
803 nq
= netdev_get_tx_queue(ndev
, queue
);
806 ret
= fec_enet_txq_submit_tso(txq
, skb
, ndev
);
808 ret
= fec_enet_txq_submit_skb(txq
, skb
, ndev
);
812 entries_free
= fec_enet_get_free_txdesc_num(txq
);
813 if (entries_free
<= txq
->tx_stop_threshold
)
814 netif_tx_stop_queue(nq
);
819 /* Init RX & TX buffer descriptors
821 static void fec_enet_bd_init(struct net_device
*dev
)
823 struct fec_enet_private
*fep
= netdev_priv(dev
);
824 struct fec_enet_priv_tx_q
*txq
;
825 struct fec_enet_priv_rx_q
*rxq
;
830 for (q
= 0; q
< fep
->num_rx_queues
; q
++) {
831 /* Initialize the receive buffer descriptors. */
832 rxq
= fep
->rx_queue
[q
];
835 for (i
= 0; i
< rxq
->bd
.ring_size
; i
++) {
837 /* Initialize the BD for every fragment in the page. */
838 if (bdp
->cbd_bufaddr
)
839 bdp
->cbd_sc
= cpu_to_fec16(BD_ENET_RX_EMPTY
);
841 bdp
->cbd_sc
= cpu_to_fec16(0);
842 bdp
= fec_enet_get_nextdesc(bdp
, &rxq
->bd
);
845 /* Set the last buffer to wrap */
846 bdp
= fec_enet_get_prevdesc(bdp
, &rxq
->bd
);
847 bdp
->cbd_sc
|= cpu_to_fec16(BD_SC_WRAP
);
849 rxq
->bd
.cur
= rxq
->bd
.base
;
852 for (q
= 0; q
< fep
->num_tx_queues
; q
++) {
853 /* ...and the same for transmit */
854 txq
= fep
->tx_queue
[q
];
858 for (i
= 0; i
< txq
->bd
.ring_size
; i
++) {
859 /* Initialize the BD for every fragment in the page. */
860 bdp
->cbd_sc
= cpu_to_fec16(0);
861 if (bdp
->cbd_bufaddr
&&
862 !IS_TSO_HEADER(txq
, fec32_to_cpu(bdp
->cbd_bufaddr
)))
863 dma_unmap_single(&fep
->pdev
->dev
,
864 fec32_to_cpu(bdp
->cbd_bufaddr
),
865 fec16_to_cpu(bdp
->cbd_datlen
),
867 if (txq
->tx_skbuff
[i
]) {
868 dev_kfree_skb_any(txq
->tx_skbuff
[i
]);
869 txq
->tx_skbuff
[i
] = NULL
;
871 bdp
->cbd_bufaddr
= cpu_to_fec32(0);
872 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
875 /* Set the last buffer to wrap */
876 bdp
= fec_enet_get_prevdesc(bdp
, &txq
->bd
);
877 bdp
->cbd_sc
|= cpu_to_fec16(BD_SC_WRAP
);
882 static void fec_enet_active_rxring(struct net_device
*ndev
)
884 struct fec_enet_private
*fep
= netdev_priv(ndev
);
887 for (i
= 0; i
< fep
->num_rx_queues
; i
++)
888 writel(0, fep
->rx_queue
[i
]->bd
.reg_desc_active
);
891 static void fec_enet_enable_ring(struct net_device
*ndev
)
893 struct fec_enet_private
*fep
= netdev_priv(ndev
);
894 struct fec_enet_priv_tx_q
*txq
;
895 struct fec_enet_priv_rx_q
*rxq
;
898 for (i
= 0; i
< fep
->num_rx_queues
; i
++) {
899 rxq
= fep
->rx_queue
[i
];
900 writel(rxq
->bd
.dma
, fep
->hwp
+ FEC_R_DES_START(i
));
901 writel(PKT_MAXBUF_SIZE
, fep
->hwp
+ FEC_R_BUFF_SIZE(i
));
905 writel(RCMR_MATCHEN
| RCMR_CMP(i
),
906 fep
->hwp
+ FEC_RCMR(i
));
909 for (i
= 0; i
< fep
->num_tx_queues
; i
++) {
910 txq
= fep
->tx_queue
[i
];
911 writel(txq
->bd
.dma
, fep
->hwp
+ FEC_X_DES_START(i
));
915 writel(DMA_CLASS_EN
| IDLE_SLOPE(i
),
916 fep
->hwp
+ FEC_DMA_CFG(i
));
920 static void fec_enet_reset_skb(struct net_device
*ndev
)
922 struct fec_enet_private
*fep
= netdev_priv(ndev
);
923 struct fec_enet_priv_tx_q
*txq
;
926 for (i
= 0; i
< fep
->num_tx_queues
; i
++) {
927 txq
= fep
->tx_queue
[i
];
929 for (j
= 0; j
< txq
->bd
.ring_size
; j
++) {
930 if (txq
->tx_skbuff
[j
]) {
931 dev_kfree_skb_any(txq
->tx_skbuff
[j
]);
932 txq
->tx_skbuff
[j
] = NULL
;
939 * This function is called to start or restart the FEC during a link
940 * change, transmit timeout, or to reconfigure the FEC. The network
941 * packet processing for this device must be stopped before this call.
944 fec_restart(struct net_device
*ndev
)
946 struct fec_enet_private
*fep
= netdev_priv(ndev
);
949 u32 rcntl
= OPT_FRAME_SIZE
| 0x04;
950 u32 ecntl
= 0x2; /* ETHEREN */
952 /* Whack a reset. We should wait for this.
953 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
954 * instead of reset MAC itself.
956 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
) {
957 writel(0, fep
->hwp
+ FEC_ECNTRL
);
959 writel(1, fep
->hwp
+ FEC_ECNTRL
);
964 * enet-mac reset will reset mac address registers too,
965 * so need to reconfigure it.
967 memcpy(&temp_mac
, ndev
->dev_addr
, ETH_ALEN
);
968 writel((__force u32
)cpu_to_be32(temp_mac
[0]),
969 fep
->hwp
+ FEC_ADDR_LOW
);
970 writel((__force u32
)cpu_to_be32(temp_mac
[1]),
971 fep
->hwp
+ FEC_ADDR_HIGH
);
973 /* Clear any outstanding interrupt, except MDIO. */
974 writel((0xffffffff & ~FEC_ENET_MII
), fep
->hwp
+ FEC_IEVENT
);
976 fec_enet_bd_init(ndev
);
978 fec_enet_enable_ring(ndev
);
980 /* Reset tx SKB buffers. */
981 fec_enet_reset_skb(ndev
);
983 /* Enable MII mode */
984 if (fep
->full_duplex
== DUPLEX_FULL
) {
986 writel(0x04, fep
->hwp
+ FEC_X_CNTRL
);
990 writel(0x0, fep
->hwp
+ FEC_X_CNTRL
);
994 writel(fep
->phy_speed
, fep
->hwp
+ FEC_MII_SPEED
);
996 #if !defined(CONFIG_M5272)
997 if (fep
->quirks
& FEC_QUIRK_HAS_RACC
) {
998 val
= readl(fep
->hwp
+ FEC_RACC
);
999 /* align IP header */
1000 val
|= FEC_RACC_SHIFT16
;
1001 if (fep
->csum_flags
& FLAG_RX_CSUM_ENABLED
)
1002 /* set RX checksum */
1003 val
|= FEC_RACC_OPTIONS
;
1005 val
&= ~FEC_RACC_OPTIONS
;
1006 writel(val
, fep
->hwp
+ FEC_RACC
);
1007 writel(PKT_MAXBUF_SIZE
, fep
->hwp
+ FEC_FTRL
);
1012 * The phy interface and speed need to get configured
1013 * differently on enet-mac.
1015 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
) {
1016 /* Enable flow control and length check */
1017 rcntl
|= 0x40000000 | 0x00000020;
1019 /* RGMII, RMII or MII */
1020 if (fep
->phy_interface
== PHY_INTERFACE_MODE_RGMII
||
1021 fep
->phy_interface
== PHY_INTERFACE_MODE_RGMII_ID
||
1022 fep
->phy_interface
== PHY_INTERFACE_MODE_RGMII_RXID
||
1023 fep
->phy_interface
== PHY_INTERFACE_MODE_RGMII_TXID
)
1025 else if (fep
->phy_interface
== PHY_INTERFACE_MODE_RMII
)
1030 /* 1G, 100M or 10M */
1032 if (ndev
->phydev
->speed
== SPEED_1000
)
1034 else if (ndev
->phydev
->speed
== SPEED_100
)
1040 #ifdef FEC_MIIGSK_ENR
1041 if (fep
->quirks
& FEC_QUIRK_USE_GASKET
) {
1043 /* disable the gasket and wait */
1044 writel(0, fep
->hwp
+ FEC_MIIGSK_ENR
);
1045 while (readl(fep
->hwp
+ FEC_MIIGSK_ENR
) & 4)
1049 * configure the gasket:
1050 * RMII, 50 MHz, no loopback, no echo
1051 * MII, 25 MHz, no loopback, no echo
1053 cfgr
= (fep
->phy_interface
== PHY_INTERFACE_MODE_RMII
)
1054 ? BM_MIIGSK_CFGR_RMII
: BM_MIIGSK_CFGR_MII
;
1055 if (ndev
->phydev
&& ndev
->phydev
->speed
== SPEED_10
)
1056 cfgr
|= BM_MIIGSK_CFGR_FRCONT_10M
;
1057 writel(cfgr
, fep
->hwp
+ FEC_MIIGSK_CFGR
);
1059 /* re-enable the gasket */
1060 writel(2, fep
->hwp
+ FEC_MIIGSK_ENR
);
1065 #if !defined(CONFIG_M5272)
1066 /* enable pause frame*/
1067 if ((fep
->pause_flag
& FEC_PAUSE_FLAG_ENABLE
) ||
1068 ((fep
->pause_flag
& FEC_PAUSE_FLAG_AUTONEG
) &&
1069 ndev
->phydev
&& ndev
->phydev
->pause
)) {
1070 rcntl
|= FEC_ENET_FCE
;
1072 /* set FIFO threshold parameter to reduce overrun */
1073 writel(FEC_ENET_RSEM_V
, fep
->hwp
+ FEC_R_FIFO_RSEM
);
1074 writel(FEC_ENET_RSFL_V
, fep
->hwp
+ FEC_R_FIFO_RSFL
);
1075 writel(FEC_ENET_RAEM_V
, fep
->hwp
+ FEC_R_FIFO_RAEM
);
1076 writel(FEC_ENET_RAFL_V
, fep
->hwp
+ FEC_R_FIFO_RAFL
);
1079 writel(FEC_ENET_OPD_V
, fep
->hwp
+ FEC_OPD
);
1081 rcntl
&= ~FEC_ENET_FCE
;
1083 #endif /* !defined(CONFIG_M5272) */
1085 writel(rcntl
, fep
->hwp
+ FEC_R_CNTRL
);
1087 /* Setup multicast filter. */
1088 set_multicast_list(ndev
);
1089 #ifndef CONFIG_M5272
1090 writel(0, fep
->hwp
+ FEC_HASH_TABLE_HIGH
);
1091 writel(0, fep
->hwp
+ FEC_HASH_TABLE_LOW
);
1094 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
) {
1095 /* enable ENET endian swap */
1097 /* enable ENET store and forward mode */
1098 writel(1 << 8, fep
->hwp
+ FEC_X_WMRK
);
1101 if (fep
->bufdesc_ex
)
1104 #ifndef CONFIG_M5272
1105 /* Enable the MIB statistic event counters */
1106 writel(0 << 31, fep
->hwp
+ FEC_MIB_CTRLSTAT
);
1109 /* And last, enable the transmit and receive processing */
1110 writel(ecntl
, fep
->hwp
+ FEC_ECNTRL
);
1111 fec_enet_active_rxring(ndev
);
1113 if (fep
->bufdesc_ex
)
1114 fec_ptp_start_cyclecounter(ndev
);
1116 /* Enable interrupts we wish to service */
1118 writel(FEC_DEFAULT_IMASK
, fep
->hwp
+ FEC_IMASK
);
1120 writel(0, fep
->hwp
+ FEC_IMASK
);
1122 /* Init the interrupt coalescing */
1123 fec_enet_itr_coal_init(ndev
);
1127 static void fec_enet_stop_mode(struct fec_enet_private
*fep
, bool enabled
)
1129 struct fec_platform_data
*pdata
= fep
->pdev
->dev
.platform_data
;
1130 struct fec_stop_mode_gpr
*stop_gpr
= &fep
->stop_gpr
;
1132 if (stop_gpr
->gpr
) {
1134 regmap_update_bits(stop_gpr
->gpr
, stop_gpr
->reg
,
1136 BIT(stop_gpr
->bit
));
1138 regmap_update_bits(stop_gpr
->gpr
, stop_gpr
->reg
,
1139 BIT(stop_gpr
->bit
), 0);
1140 } else if (pdata
&& pdata
->sleep_mode_enable
) {
1141 pdata
->sleep_mode_enable(enabled
);
1146 fec_stop(struct net_device
*ndev
)
1148 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1149 u32 rmii_mode
= readl(fep
->hwp
+ FEC_R_CNTRL
) & (1 << 8);
1152 /* We cannot expect a graceful transmit stop without link !!! */
1154 writel(1, fep
->hwp
+ FEC_X_CNTRL
); /* Graceful transmit stop */
1156 if (!(readl(fep
->hwp
+ FEC_IEVENT
) & FEC_ENET_GRA
))
1157 netdev_err(ndev
, "Graceful transmit stop did not complete!\n");
1160 /* Whack a reset. We should wait for this.
1161 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1162 * instead of reset MAC itself.
1164 if (!(fep
->wol_flag
& FEC_WOL_FLAG_SLEEP_ON
)) {
1165 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
) {
1166 writel(0, fep
->hwp
+ FEC_ECNTRL
);
1168 writel(1, fep
->hwp
+ FEC_ECNTRL
);
1171 writel(FEC_DEFAULT_IMASK
, fep
->hwp
+ FEC_IMASK
);
1173 writel(FEC_DEFAULT_IMASK
| FEC_ENET_WAKEUP
, fep
->hwp
+ FEC_IMASK
);
1174 val
= readl(fep
->hwp
+ FEC_ECNTRL
);
1175 val
|= (FEC_ECR_MAGICEN
| FEC_ECR_SLEEP
);
1176 writel(val
, fep
->hwp
+ FEC_ECNTRL
);
1177 fec_enet_stop_mode(fep
, true);
1179 writel(fep
->phy_speed
, fep
->hwp
+ FEC_MII_SPEED
);
1181 /* We have to keep ENET enabled to have MII interrupt stay working */
1182 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
&&
1183 !(fep
->wol_flag
& FEC_WOL_FLAG_SLEEP_ON
)) {
1184 writel(2, fep
->hwp
+ FEC_ECNTRL
);
1185 writel(rmii_mode
, fep
->hwp
+ FEC_R_CNTRL
);
1191 fec_timeout(struct net_device
*ndev
, unsigned int txqueue
)
1193 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1197 ndev
->stats
.tx_errors
++;
1199 schedule_work(&fep
->tx_timeout_work
);
1202 static void fec_enet_timeout_work(struct work_struct
*work
)
1204 struct fec_enet_private
*fep
=
1205 container_of(work
, struct fec_enet_private
, tx_timeout_work
);
1206 struct net_device
*ndev
= fep
->netdev
;
1209 if (netif_device_present(ndev
) || netif_running(ndev
)) {
1210 napi_disable(&fep
->napi
);
1211 netif_tx_lock_bh(ndev
);
1213 netif_tx_wake_all_queues(ndev
);
1214 netif_tx_unlock_bh(ndev
);
1215 napi_enable(&fep
->napi
);
1221 fec_enet_hwtstamp(struct fec_enet_private
*fep
, unsigned ts
,
1222 struct skb_shared_hwtstamps
*hwtstamps
)
1224 unsigned long flags
;
1227 spin_lock_irqsave(&fep
->tmreg_lock
, flags
);
1228 ns
= timecounter_cyc2time(&fep
->tc
, ts
);
1229 spin_unlock_irqrestore(&fep
->tmreg_lock
, flags
);
1231 memset(hwtstamps
, 0, sizeof(*hwtstamps
));
1232 hwtstamps
->hwtstamp
= ns_to_ktime(ns
);
1236 fec_enet_tx_queue(struct net_device
*ndev
, u16 queue_id
)
1238 struct fec_enet_private
*fep
;
1239 struct bufdesc
*bdp
;
1240 unsigned short status
;
1241 struct sk_buff
*skb
;
1242 struct fec_enet_priv_tx_q
*txq
;
1243 struct netdev_queue
*nq
;
1247 fep
= netdev_priv(ndev
);
1249 txq
= fep
->tx_queue
[queue_id
];
1250 /* get next bdp of dirty_tx */
1251 nq
= netdev_get_tx_queue(ndev
, queue_id
);
1252 bdp
= txq
->dirty_tx
;
1254 /* get next bdp of dirty_tx */
1255 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
1257 while (bdp
!= READ_ONCE(txq
->bd
.cur
)) {
1258 /* Order the load of bd.cur and cbd_sc */
1260 status
= fec16_to_cpu(READ_ONCE(bdp
->cbd_sc
));
1261 if (status
& BD_ENET_TX_READY
)
1264 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
1266 skb
= txq
->tx_skbuff
[index
];
1267 txq
->tx_skbuff
[index
] = NULL
;
1268 if (!IS_TSO_HEADER(txq
, fec32_to_cpu(bdp
->cbd_bufaddr
)))
1269 dma_unmap_single(&fep
->pdev
->dev
,
1270 fec32_to_cpu(bdp
->cbd_bufaddr
),
1271 fec16_to_cpu(bdp
->cbd_datlen
),
1273 bdp
->cbd_bufaddr
= cpu_to_fec32(0);
1277 /* Check for errors. */
1278 if (status
& (BD_ENET_TX_HB
| BD_ENET_TX_LC
|
1279 BD_ENET_TX_RL
| BD_ENET_TX_UN
|
1281 ndev
->stats
.tx_errors
++;
1282 if (status
& BD_ENET_TX_HB
) /* No heartbeat */
1283 ndev
->stats
.tx_heartbeat_errors
++;
1284 if (status
& BD_ENET_TX_LC
) /* Late collision */
1285 ndev
->stats
.tx_window_errors
++;
1286 if (status
& BD_ENET_TX_RL
) /* Retrans limit */
1287 ndev
->stats
.tx_aborted_errors
++;
1288 if (status
& BD_ENET_TX_UN
) /* Underrun */
1289 ndev
->stats
.tx_fifo_errors
++;
1290 if (status
& BD_ENET_TX_CSL
) /* Carrier lost */
1291 ndev
->stats
.tx_carrier_errors
++;
1293 ndev
->stats
.tx_packets
++;
1294 ndev
->stats
.tx_bytes
+= skb
->len
;
1297 /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
1298 * are to time stamp the packet, so we still need to check time
1299 * stamping enabled flag.
1301 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
&&
1304 struct skb_shared_hwtstamps shhwtstamps
;
1305 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
1307 fec_enet_hwtstamp(fep
, fec32_to_cpu(ebdp
->ts
), &shhwtstamps
);
1308 skb_tstamp_tx(skb
, &shhwtstamps
);
1311 /* Deferred means some collisions occurred during transmit,
1312 * but we eventually sent the packet OK.
1314 if (status
& BD_ENET_TX_DEF
)
1315 ndev
->stats
.collisions
++;
1317 /* Free the sk buffer associated with this last transmit */
1318 dev_kfree_skb_any(skb
);
1320 /* Make sure the update to bdp and tx_skbuff are performed
1324 txq
->dirty_tx
= bdp
;
1326 /* Update pointer to next buffer descriptor to be transmitted */
1327 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
1329 /* Since we have freed up a buffer, the ring is no longer full
1331 if (netif_tx_queue_stopped(nq
)) {
1332 entries_free
= fec_enet_get_free_txdesc_num(txq
);
1333 if (entries_free
>= txq
->tx_wake_threshold
)
1334 netif_tx_wake_queue(nq
);
1338 /* ERR006358: Keep the transmitter going */
1339 if (bdp
!= txq
->bd
.cur
&&
1340 readl(txq
->bd
.reg_desc_active
) == 0)
1341 writel(0, txq
->bd
.reg_desc_active
);
1344 static void fec_enet_tx(struct net_device
*ndev
)
1346 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1349 /* Make sure that AVB queues are processed first. */
1350 for (i
= fep
->num_tx_queues
- 1; i
>= 0; i
--)
1351 fec_enet_tx_queue(ndev
, i
);
1355 fec_enet_new_rxbdp(struct net_device
*ndev
, struct bufdesc
*bdp
, struct sk_buff
*skb
)
1357 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1360 off
= ((unsigned long)skb
->data
) & fep
->rx_align
;
1362 skb_reserve(skb
, fep
->rx_align
+ 1 - off
);
1364 bdp
->cbd_bufaddr
= cpu_to_fec32(dma_map_single(&fep
->pdev
->dev
, skb
->data
, FEC_ENET_RX_FRSIZE
- fep
->rx_align
, DMA_FROM_DEVICE
));
1365 if (dma_mapping_error(&fep
->pdev
->dev
, fec32_to_cpu(bdp
->cbd_bufaddr
))) {
1366 if (net_ratelimit())
1367 netdev_err(ndev
, "Rx DMA memory map failed\n");
1374 static bool fec_enet_copybreak(struct net_device
*ndev
, struct sk_buff
**skb
,
1375 struct bufdesc
*bdp
, u32 length
, bool swap
)
1377 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1378 struct sk_buff
*new_skb
;
1380 if (length
> fep
->rx_copybreak
)
1383 new_skb
= netdev_alloc_skb(ndev
, length
);
1387 dma_sync_single_for_cpu(&fep
->pdev
->dev
,
1388 fec32_to_cpu(bdp
->cbd_bufaddr
),
1389 FEC_ENET_RX_FRSIZE
- fep
->rx_align
,
1392 memcpy(new_skb
->data
, (*skb
)->data
, length
);
1394 swap_buffer2(new_skb
->data
, (*skb
)->data
, length
);
1400 /* During a receive, the bd_rx.cur points to the current incoming buffer.
1401 * When we update through the ring, if the next incoming buffer has
1402 * not been given to the system, we just set the empty indicator,
1403 * effectively tossing the packet.
1406 fec_enet_rx_queue(struct net_device
*ndev
, int budget
, u16 queue_id
)
1408 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1409 struct fec_enet_priv_rx_q
*rxq
;
1410 struct bufdesc
*bdp
;
1411 unsigned short status
;
1412 struct sk_buff
*skb_new
= NULL
;
1413 struct sk_buff
*skb
;
1416 int pkt_received
= 0;
1417 struct bufdesc_ex
*ebdp
= NULL
;
1418 bool vlan_packet_rcvd
= false;
1422 bool need_swap
= fep
->quirks
& FEC_QUIRK_SWAP_FRAME
;
1427 rxq
= fep
->rx_queue
[queue_id
];
1429 /* First, grab all of the stats for the incoming packet.
1430 * These get messed up if we get called due to a busy condition.
1434 while (!((status
= fec16_to_cpu(bdp
->cbd_sc
)) & BD_ENET_RX_EMPTY
)) {
1436 if (pkt_received
>= budget
)
1440 writel(FEC_ENET_RXF
, fep
->hwp
+ FEC_IEVENT
);
1442 /* Check for errors. */
1443 status
^= BD_ENET_RX_LAST
;
1444 if (status
& (BD_ENET_RX_LG
| BD_ENET_RX_SH
| BD_ENET_RX_NO
|
1445 BD_ENET_RX_CR
| BD_ENET_RX_OV
| BD_ENET_RX_LAST
|
1447 ndev
->stats
.rx_errors
++;
1448 if (status
& BD_ENET_RX_OV
) {
1450 ndev
->stats
.rx_fifo_errors
++;
1451 goto rx_processing_done
;
1453 if (status
& (BD_ENET_RX_LG
| BD_ENET_RX_SH
1454 | BD_ENET_RX_LAST
)) {
1455 /* Frame too long or too short. */
1456 ndev
->stats
.rx_length_errors
++;
1457 if (status
& BD_ENET_RX_LAST
)
1458 netdev_err(ndev
, "rcv is not +last\n");
1460 if (status
& BD_ENET_RX_CR
) /* CRC Error */
1461 ndev
->stats
.rx_crc_errors
++;
1462 /* Report late collisions as a frame error. */
1463 if (status
& (BD_ENET_RX_NO
| BD_ENET_RX_CL
))
1464 ndev
->stats
.rx_frame_errors
++;
1465 goto rx_processing_done
;
1468 /* Process the incoming frame. */
1469 ndev
->stats
.rx_packets
++;
1470 pkt_len
= fec16_to_cpu(bdp
->cbd_datlen
);
1471 ndev
->stats
.rx_bytes
+= pkt_len
;
1473 index
= fec_enet_get_bd_index(bdp
, &rxq
->bd
);
1474 skb
= rxq
->rx_skbuff
[index
];
1476 /* The packet length includes FCS, but we don't want to
1477 * include that when passing upstream as it messes up
1478 * bridging applications.
1480 is_copybreak
= fec_enet_copybreak(ndev
, &skb
, bdp
, pkt_len
- 4,
1482 if (!is_copybreak
) {
1483 skb_new
= netdev_alloc_skb(ndev
, FEC_ENET_RX_FRSIZE
);
1484 if (unlikely(!skb_new
)) {
1485 ndev
->stats
.rx_dropped
++;
1486 goto rx_processing_done
;
1488 dma_unmap_single(&fep
->pdev
->dev
,
1489 fec32_to_cpu(bdp
->cbd_bufaddr
),
1490 FEC_ENET_RX_FRSIZE
- fep
->rx_align
,
1494 prefetch(skb
->data
- NET_IP_ALIGN
);
1495 skb_put(skb
, pkt_len
- 4);
1498 if (!is_copybreak
&& need_swap
)
1499 swap_buffer(data
, pkt_len
);
1501 #if !defined(CONFIG_M5272)
1502 if (fep
->quirks
& FEC_QUIRK_HAS_RACC
)
1503 data
= skb_pull_inline(skb
, 2);
1506 /* Extract the enhanced buffer descriptor */
1508 if (fep
->bufdesc_ex
)
1509 ebdp
= (struct bufdesc_ex
*)bdp
;
1511 /* If this is a VLAN packet remove the VLAN Tag */
1512 vlan_packet_rcvd
= false;
1513 if ((ndev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
1515 (ebdp
->cbd_esc
& cpu_to_fec32(BD_ENET_RX_VLAN
))) {
1516 /* Push and remove the vlan tag */
1517 struct vlan_hdr
*vlan_header
=
1518 (struct vlan_hdr
*) (data
+ ETH_HLEN
);
1519 vlan_tag
= ntohs(vlan_header
->h_vlan_TCI
);
1521 vlan_packet_rcvd
= true;
1523 memmove(skb
->data
+ VLAN_HLEN
, data
, ETH_ALEN
* 2);
1524 skb_pull(skb
, VLAN_HLEN
);
1527 skb
->protocol
= eth_type_trans(skb
, ndev
);
1529 /* Get receive timestamp from the skb */
1530 if (fep
->hwts_rx_en
&& fep
->bufdesc_ex
)
1531 fec_enet_hwtstamp(fep
, fec32_to_cpu(ebdp
->ts
),
1532 skb_hwtstamps(skb
));
1534 if (fep
->bufdesc_ex
&&
1535 (fep
->csum_flags
& FLAG_RX_CSUM_ENABLED
)) {
1536 if (!(ebdp
->cbd_esc
& cpu_to_fec32(FLAG_RX_CSUM_ERROR
))) {
1537 /* don't check it */
1538 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1540 skb_checksum_none_assert(skb
);
1544 /* Handle received VLAN packets */
1545 if (vlan_packet_rcvd
)
1546 __vlan_hwaccel_put_tag(skb
,
1550 skb_record_rx_queue(skb
, queue_id
);
1551 napi_gro_receive(&fep
->napi
, skb
);
1554 dma_sync_single_for_device(&fep
->pdev
->dev
,
1555 fec32_to_cpu(bdp
->cbd_bufaddr
),
1556 FEC_ENET_RX_FRSIZE
- fep
->rx_align
,
1559 rxq
->rx_skbuff
[index
] = skb_new
;
1560 fec_enet_new_rxbdp(ndev
, bdp
, skb_new
);
1564 /* Clear the status flags for this buffer */
1565 status
&= ~BD_ENET_RX_STATS
;
1567 /* Mark the buffer empty */
1568 status
|= BD_ENET_RX_EMPTY
;
1570 if (fep
->bufdesc_ex
) {
1571 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
1573 ebdp
->cbd_esc
= cpu_to_fec32(BD_ENET_RX_INT
);
1577 /* Make sure the updates to rest of the descriptor are
1578 * performed before transferring ownership.
1581 bdp
->cbd_sc
= cpu_to_fec16(status
);
1583 /* Update BD pointer to next entry */
1584 bdp
= fec_enet_get_nextdesc(bdp
, &rxq
->bd
);
1586 /* Doing this here will keep the FEC running while we process
1587 * incoming frames. On a heavily loaded network, we should be
1588 * able to keep up at the expense of system resources.
1590 writel(0, rxq
->bd
.reg_desc_active
);
1593 return pkt_received
;
1596 static int fec_enet_rx(struct net_device
*ndev
, int budget
)
1598 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1601 /* Make sure that AVB queues are processed first. */
1602 for (i
= fep
->num_rx_queues
- 1; i
>= 0; i
--)
1603 done
+= fec_enet_rx_queue(ndev
, budget
- done
, i
);
1608 static bool fec_enet_collect_events(struct fec_enet_private
*fep
)
1612 int_events
= readl(fep
->hwp
+ FEC_IEVENT
);
1614 /* Don't clear MDIO events, we poll for those */
1615 int_events
&= ~FEC_ENET_MII
;
1617 writel(int_events
, fep
->hwp
+ FEC_IEVENT
);
1619 return int_events
!= 0;
1623 fec_enet_interrupt(int irq
, void *dev_id
)
1625 struct net_device
*ndev
= dev_id
;
1626 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1627 irqreturn_t ret
= IRQ_NONE
;
1629 if (fec_enet_collect_events(fep
) && fep
->link
) {
1632 if (napi_schedule_prep(&fep
->napi
)) {
1633 /* Disable interrupts */
1634 writel(0, fep
->hwp
+ FEC_IMASK
);
1635 __napi_schedule(&fep
->napi
);
1642 static int fec_enet_rx_napi(struct napi_struct
*napi
, int budget
)
1644 struct net_device
*ndev
= napi
->dev
;
1645 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1649 done
+= fec_enet_rx(ndev
, budget
- done
);
1651 } while ((done
< budget
) && fec_enet_collect_events(fep
));
1653 if (done
< budget
) {
1654 napi_complete_done(napi
, done
);
1655 writel(FEC_DEFAULT_IMASK
, fep
->hwp
+ FEC_IMASK
);
1661 /* ------------------------------------------------------------------------- */
1662 static void fec_get_mac(struct net_device
*ndev
)
1664 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1665 struct fec_platform_data
*pdata
= dev_get_platdata(&fep
->pdev
->dev
);
1666 unsigned char *iap
, tmpaddr
[ETH_ALEN
];
1669 * try to get mac address in following order:
1671 * 1) module parameter via kernel command line in form
1672 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1677 * 2) from device tree data
1679 if (!is_valid_ether_addr(iap
)) {
1680 struct device_node
*np
= fep
->pdev
->dev
.of_node
;
1682 const char *mac
= of_get_mac_address(np
);
1684 iap
= (unsigned char *) mac
;
1689 * 3) from flash or fuse (via platform data)
1691 if (!is_valid_ether_addr(iap
)) {
1694 iap
= (unsigned char *)FEC_FLASHMAC
;
1697 iap
= (unsigned char *)&pdata
->mac
;
1702 * 4) FEC mac registers set by bootloader
1704 if (!is_valid_ether_addr(iap
)) {
1705 *((__be32
*) &tmpaddr
[0]) =
1706 cpu_to_be32(readl(fep
->hwp
+ FEC_ADDR_LOW
));
1707 *((__be16
*) &tmpaddr
[4]) =
1708 cpu_to_be16(readl(fep
->hwp
+ FEC_ADDR_HIGH
) >> 16);
1713 * 5) random mac address
1715 if (!is_valid_ether_addr(iap
)) {
1716 /* Report it and use a random ethernet address instead */
1717 dev_err(&fep
->pdev
->dev
, "Invalid MAC address: %pM\n", iap
);
1718 eth_hw_addr_random(ndev
);
1719 dev_info(&fep
->pdev
->dev
, "Using random MAC address: %pM\n",
1724 memcpy(ndev
->dev_addr
, iap
, ETH_ALEN
);
1726 /* Adjust MAC if using macaddr */
1728 ndev
->dev_addr
[ETH_ALEN
-1] = macaddr
[ETH_ALEN
-1] + fep
->dev_id
;
1731 /* ------------------------------------------------------------------------- */
1736 static void fec_enet_adjust_link(struct net_device
*ndev
)
1738 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1739 struct phy_device
*phy_dev
= ndev
->phydev
;
1740 int status_change
= 0;
1743 * If the netdev is down, or is going down, we're not interested
1744 * in link state events, so just mark our idea of the link as down
1745 * and ignore the event.
1747 if (!netif_running(ndev
) || !netif_device_present(ndev
)) {
1749 } else if (phy_dev
->link
) {
1751 fep
->link
= phy_dev
->link
;
1755 if (fep
->full_duplex
!= phy_dev
->duplex
) {
1756 fep
->full_duplex
= phy_dev
->duplex
;
1760 if (phy_dev
->speed
!= fep
->speed
) {
1761 fep
->speed
= phy_dev
->speed
;
1765 /* if any of the above changed restart the FEC */
1766 if (status_change
) {
1767 napi_disable(&fep
->napi
);
1768 netif_tx_lock_bh(ndev
);
1770 netif_tx_wake_all_queues(ndev
);
1771 netif_tx_unlock_bh(ndev
);
1772 napi_enable(&fep
->napi
);
1776 napi_disable(&fep
->napi
);
1777 netif_tx_lock_bh(ndev
);
1779 netif_tx_unlock_bh(ndev
);
1780 napi_enable(&fep
->napi
);
1781 fep
->link
= phy_dev
->link
;
1787 phy_print_status(phy_dev
);
1790 static int fec_enet_mdio_wait(struct fec_enet_private
*fep
)
1795 ret
= readl_poll_timeout_atomic(fep
->hwp
+ FEC_IEVENT
, ievent
,
1796 ievent
& FEC_ENET_MII
, 2, 30000);
1799 writel(FEC_ENET_MII
, fep
->hwp
+ FEC_IEVENT
);
1804 static int fec_enet_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
1806 struct fec_enet_private
*fep
= bus
->priv
;
1807 struct device
*dev
= &fep
->pdev
->dev
;
1808 int ret
= 0, frame_start
, frame_addr
, frame_op
;
1809 bool is_c45
= !!(regnum
& MII_ADDR_C45
);
1811 ret
= pm_runtime_resume_and_get(dev
);
1816 frame_start
= FEC_MMFR_ST_C45
;
1819 frame_addr
= (regnum
>> 16);
1820 writel(frame_start
| FEC_MMFR_OP_ADDR_WRITE
|
1821 FEC_MMFR_PA(mii_id
) | FEC_MMFR_RA(frame_addr
) |
1822 FEC_MMFR_TA
| (regnum
& 0xFFFF),
1823 fep
->hwp
+ FEC_MII_DATA
);
1825 /* wait for end of transfer */
1826 ret
= fec_enet_mdio_wait(fep
);
1828 netdev_err(fep
->netdev
, "MDIO address write timeout\n");
1832 frame_op
= FEC_MMFR_OP_READ_C45
;
1836 frame_op
= FEC_MMFR_OP_READ
;
1837 frame_start
= FEC_MMFR_ST
;
1838 frame_addr
= regnum
;
1841 /* start a read op */
1842 writel(frame_start
| frame_op
|
1843 FEC_MMFR_PA(mii_id
) | FEC_MMFR_RA(frame_addr
) |
1844 FEC_MMFR_TA
, fep
->hwp
+ FEC_MII_DATA
);
1846 /* wait for end of transfer */
1847 ret
= fec_enet_mdio_wait(fep
);
1849 netdev_err(fep
->netdev
, "MDIO read timeout\n");
1853 ret
= FEC_MMFR_DATA(readl(fep
->hwp
+ FEC_MII_DATA
));
1856 pm_runtime_mark_last_busy(dev
);
1857 pm_runtime_put_autosuspend(dev
);
1862 static int fec_enet_mdio_write(struct mii_bus
*bus
, int mii_id
, int regnum
,
1865 struct fec_enet_private
*fep
= bus
->priv
;
1866 struct device
*dev
= &fep
->pdev
->dev
;
1867 int ret
, frame_start
, frame_addr
;
1868 bool is_c45
= !!(regnum
& MII_ADDR_C45
);
1870 ret
= pm_runtime_resume_and_get(dev
);
1875 frame_start
= FEC_MMFR_ST_C45
;
1878 frame_addr
= (regnum
>> 16);
1879 writel(frame_start
| FEC_MMFR_OP_ADDR_WRITE
|
1880 FEC_MMFR_PA(mii_id
) | FEC_MMFR_RA(frame_addr
) |
1881 FEC_MMFR_TA
| (regnum
& 0xFFFF),
1882 fep
->hwp
+ FEC_MII_DATA
);
1884 /* wait for end of transfer */
1885 ret
= fec_enet_mdio_wait(fep
);
1887 netdev_err(fep
->netdev
, "MDIO address write timeout\n");
1892 frame_start
= FEC_MMFR_ST
;
1893 frame_addr
= regnum
;
1896 /* start a write op */
1897 writel(frame_start
| FEC_MMFR_OP_WRITE
|
1898 FEC_MMFR_PA(mii_id
) | FEC_MMFR_RA(frame_addr
) |
1899 FEC_MMFR_TA
| FEC_MMFR_DATA(value
),
1900 fep
->hwp
+ FEC_MII_DATA
);
1902 /* wait for end of transfer */
1903 ret
= fec_enet_mdio_wait(fep
);
1905 netdev_err(fep
->netdev
, "MDIO write timeout\n");
1908 pm_runtime_mark_last_busy(dev
);
1909 pm_runtime_put_autosuspend(dev
);
1914 static void fec_enet_phy_reset_after_clk_enable(struct net_device
*ndev
)
1916 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1917 struct phy_device
*phy_dev
= ndev
->phydev
;
1920 phy_reset_after_clk_enable(phy_dev
);
1921 } else if (fep
->phy_node
) {
1923 * If the PHY still is not bound to the MAC, but there is
1924 * OF PHY node and a matching PHY device instance already,
1925 * use the OF PHY node to obtain the PHY device instance,
1926 * and then use that PHY device instance when triggering
1929 phy_dev
= of_phy_find_device(fep
->phy_node
);
1930 phy_reset_after_clk_enable(phy_dev
);
1931 put_device(&phy_dev
->mdio
.dev
);
1935 static int fec_enet_clk_enable(struct net_device
*ndev
, bool enable
)
1937 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1941 ret
= clk_prepare_enable(fep
->clk_enet_out
);
1946 mutex_lock(&fep
->ptp_clk_mutex
);
1947 ret
= clk_prepare_enable(fep
->clk_ptp
);
1949 mutex_unlock(&fep
->ptp_clk_mutex
);
1950 goto failed_clk_ptp
;
1952 fep
->ptp_clk_on
= true;
1954 mutex_unlock(&fep
->ptp_clk_mutex
);
1957 ret
= clk_prepare_enable(fep
->clk_ref
);
1959 goto failed_clk_ref
;
1961 fec_enet_phy_reset_after_clk_enable(ndev
);
1963 clk_disable_unprepare(fep
->clk_enet_out
);
1965 mutex_lock(&fep
->ptp_clk_mutex
);
1966 clk_disable_unprepare(fep
->clk_ptp
);
1967 fep
->ptp_clk_on
= false;
1968 mutex_unlock(&fep
->ptp_clk_mutex
);
1970 clk_disable_unprepare(fep
->clk_ref
);
1977 mutex_lock(&fep
->ptp_clk_mutex
);
1978 clk_disable_unprepare(fep
->clk_ptp
);
1979 fep
->ptp_clk_on
= false;
1980 mutex_unlock(&fep
->ptp_clk_mutex
);
1983 clk_disable_unprepare(fep
->clk_enet_out
);
1988 static int fec_enet_mii_probe(struct net_device
*ndev
)
1990 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1991 struct phy_device
*phy_dev
= NULL
;
1992 char mdio_bus_id
[MII_BUS_ID_SIZE
];
1993 char phy_name
[MII_BUS_ID_SIZE
+ 3];
1995 int dev_id
= fep
->dev_id
;
1997 if (fep
->phy_node
) {
1998 phy_dev
= of_phy_connect(ndev
, fep
->phy_node
,
1999 &fec_enet_adjust_link
, 0,
2000 fep
->phy_interface
);
2002 netdev_err(ndev
, "Unable to connect to phy\n");
2006 /* check for attached phy */
2007 for (phy_id
= 0; (phy_id
< PHY_MAX_ADDR
); phy_id
++) {
2008 if (!mdiobus_is_registered_device(fep
->mii_bus
, phy_id
))
2012 strlcpy(mdio_bus_id
, fep
->mii_bus
->id
, MII_BUS_ID_SIZE
);
2016 if (phy_id
>= PHY_MAX_ADDR
) {
2017 netdev_info(ndev
, "no PHY, assuming direct connection to switch\n");
2018 strlcpy(mdio_bus_id
, "fixed-0", MII_BUS_ID_SIZE
);
2022 snprintf(phy_name
, sizeof(phy_name
),
2023 PHY_ID_FMT
, mdio_bus_id
, phy_id
);
2024 phy_dev
= phy_connect(ndev
, phy_name
, &fec_enet_adjust_link
,
2025 fep
->phy_interface
);
2028 if (IS_ERR(phy_dev
)) {
2029 netdev_err(ndev
, "could not attach to PHY\n");
2030 return PTR_ERR(phy_dev
);
2033 /* mask with MAC supported features */
2034 if (fep
->quirks
& FEC_QUIRK_HAS_GBIT
) {
2035 phy_set_max_speed(phy_dev
, 1000);
2036 phy_remove_link_mode(phy_dev
,
2037 ETHTOOL_LINK_MODE_1000baseT_Half_BIT
);
2038 #if !defined(CONFIG_M5272)
2039 phy_support_sym_pause(phy_dev
);
2043 phy_set_max_speed(phy_dev
, 100);
2046 fep
->full_duplex
= 0;
2048 phy_attached_info(phy_dev
);
2053 static int fec_enet_mii_init(struct platform_device
*pdev
)
2055 static struct mii_bus
*fec0_mii_bus
;
2056 struct net_device
*ndev
= platform_get_drvdata(pdev
);
2057 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2058 bool suppress_preamble
= false;
2059 struct device_node
*node
;
2061 u32 mii_speed
, holdtime
;
2065 * The i.MX28 dual fec interfaces are not equal.
2066 * Here are the differences:
2068 * - fec0 supports MII & RMII modes while fec1 only supports RMII
2069 * - fec0 acts as the 1588 time master while fec1 is slave
2070 * - external phys can only be configured by fec0
2072 * That is to say fec1 can not work independently. It only works
2073 * when fec0 is working. The reason behind this design is that the
2074 * second interface is added primarily for Switch mode.
2076 * Because of the last point above, both phys are attached on fec0
2077 * mdio interface in board design, and need to be configured by
2080 if ((fep
->quirks
& FEC_QUIRK_SINGLE_MDIO
) && fep
->dev_id
> 0) {
2081 /* fec1 uses fec0 mii_bus */
2082 if (mii_cnt
&& fec0_mii_bus
) {
2083 fep
->mii_bus
= fec0_mii_bus
;
2090 bus_freq
= 2500000; /* 2.5MHz by default */
2091 node
= of_get_child_by_name(pdev
->dev
.of_node
, "mdio");
2093 of_property_read_u32(node
, "clock-frequency", &bus_freq
);
2094 suppress_preamble
= of_property_read_bool(node
,
2095 "suppress-preamble");
2099 * Set MII speed (= clk_get_rate() / 2 * phy_speed)
2101 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
2102 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
2103 * Reference Manual has an error on this, and gets fixed on i.MX6Q
2106 mii_speed
= DIV_ROUND_UP(clk_get_rate(fep
->clk_ipg
), bus_freq
* 2);
2107 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
)
2109 if (mii_speed
> 63) {
2111 "fec clock (%lu) too fast to get right mii speed\n",
2112 clk_get_rate(fep
->clk_ipg
));
2118 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
2119 * MII_SPEED) register that defines the MDIO output hold time. Earlier
2120 * versions are RAZ there, so just ignore the difference and write the
2122 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
2123 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
2125 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
2126 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
2127 * holdtime cannot result in a value greater than 3.
2129 holdtime
= DIV_ROUND_UP(clk_get_rate(fep
->clk_ipg
), 100000000) - 1;
2131 fep
->phy_speed
= mii_speed
<< 1 | holdtime
<< 8;
2133 if (suppress_preamble
)
2134 fep
->phy_speed
|= BIT(7);
2136 if (fep
->quirks
& FEC_QUIRK_CLEAR_SETUP_MII
) {
2137 /* Clear MMFR to avoid to generate MII event by writing MSCR.
2138 * MII event generation condition:
2140 * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
2141 * mscr_reg_data_in[7:0] != 0
2143 * - mscr[7:0]_not_zero
2145 writel(0, fep
->hwp
+ FEC_MII_DATA
);
2148 writel(fep
->phy_speed
, fep
->hwp
+ FEC_MII_SPEED
);
2150 /* Clear any pending transaction complete indication */
2151 writel(FEC_ENET_MII
, fep
->hwp
+ FEC_IEVENT
);
2153 fep
->mii_bus
= mdiobus_alloc();
2154 if (fep
->mii_bus
== NULL
) {
2159 fep
->mii_bus
->name
= "fec_enet_mii_bus";
2160 fep
->mii_bus
->read
= fec_enet_mdio_read
;
2161 fep
->mii_bus
->write
= fec_enet_mdio_write
;
2162 snprintf(fep
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
2163 pdev
->name
, fep
->dev_id
+ 1);
2164 fep
->mii_bus
->priv
= fep
;
2165 fep
->mii_bus
->parent
= &pdev
->dev
;
2167 err
= of_mdiobus_register(fep
->mii_bus
, node
);
2170 goto err_out_free_mdiobus
;
2174 /* save fec0 mii_bus */
2175 if (fep
->quirks
& FEC_QUIRK_SINGLE_MDIO
)
2176 fec0_mii_bus
= fep
->mii_bus
;
2180 err_out_free_mdiobus
:
2181 mdiobus_free(fep
->mii_bus
);
2186 static void fec_enet_mii_remove(struct fec_enet_private
*fep
)
2188 if (--mii_cnt
== 0) {
2189 mdiobus_unregister(fep
->mii_bus
);
2190 mdiobus_free(fep
->mii_bus
);
2194 static void fec_enet_get_drvinfo(struct net_device
*ndev
,
2195 struct ethtool_drvinfo
*info
)
2197 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2199 strlcpy(info
->driver
, fep
->pdev
->dev
.driver
->name
,
2200 sizeof(info
->driver
));
2201 strlcpy(info
->bus_info
, dev_name(&ndev
->dev
), sizeof(info
->bus_info
));
2204 static int fec_enet_get_regs_len(struct net_device
*ndev
)
2206 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2210 r
= platform_get_resource(fep
->pdev
, IORESOURCE_MEM
, 0);
2212 s
= resource_size(r
);
2217 /* List of registers that can be safety be read to dump them with ethtool */
2218 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2219 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2220 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2221 static __u32 fec_enet_register_version
= 2;
2222 static u32 fec_enet_register_offset
[] = {
2223 FEC_IEVENT
, FEC_IMASK
, FEC_R_DES_ACTIVE_0
, FEC_X_DES_ACTIVE_0
,
2224 FEC_ECNTRL
, FEC_MII_DATA
, FEC_MII_SPEED
, FEC_MIB_CTRLSTAT
, FEC_R_CNTRL
,
2225 FEC_X_CNTRL
, FEC_ADDR_LOW
, FEC_ADDR_HIGH
, FEC_OPD
, FEC_TXIC0
, FEC_TXIC1
,
2226 FEC_TXIC2
, FEC_RXIC0
, FEC_RXIC1
, FEC_RXIC2
, FEC_HASH_TABLE_HIGH
,
2227 FEC_HASH_TABLE_LOW
, FEC_GRP_HASH_TABLE_HIGH
, FEC_GRP_HASH_TABLE_LOW
,
2228 FEC_X_WMRK
, FEC_R_BOUND
, FEC_R_FSTART
, FEC_R_DES_START_1
,
2229 FEC_X_DES_START_1
, FEC_R_BUFF_SIZE_1
, FEC_R_DES_START_2
,
2230 FEC_X_DES_START_2
, FEC_R_BUFF_SIZE_2
, FEC_R_DES_START_0
,
2231 FEC_X_DES_START_0
, FEC_R_BUFF_SIZE_0
, FEC_R_FIFO_RSFL
, FEC_R_FIFO_RSEM
,
2232 FEC_R_FIFO_RAEM
, FEC_R_FIFO_RAFL
, FEC_RACC
, FEC_RCMR_1
, FEC_RCMR_2
,
2233 FEC_DMA_CFG_1
, FEC_DMA_CFG_2
, FEC_R_DES_ACTIVE_1
, FEC_X_DES_ACTIVE_1
,
2234 FEC_R_DES_ACTIVE_2
, FEC_X_DES_ACTIVE_2
, FEC_QOS_SCHEME
,
2235 RMON_T_DROP
, RMON_T_PACKETS
, RMON_T_BC_PKT
, RMON_T_MC_PKT
,
2236 RMON_T_CRC_ALIGN
, RMON_T_UNDERSIZE
, RMON_T_OVERSIZE
, RMON_T_FRAG
,
2237 RMON_T_JAB
, RMON_T_COL
, RMON_T_P64
, RMON_T_P65TO127
, RMON_T_P128TO255
,
2238 RMON_T_P256TO511
, RMON_T_P512TO1023
, RMON_T_P1024TO2047
,
2239 RMON_T_P_GTE2048
, RMON_T_OCTETS
,
2240 IEEE_T_DROP
, IEEE_T_FRAME_OK
, IEEE_T_1COL
, IEEE_T_MCOL
, IEEE_T_DEF
,
2241 IEEE_T_LCOL
, IEEE_T_EXCOL
, IEEE_T_MACERR
, IEEE_T_CSERR
, IEEE_T_SQE
,
2242 IEEE_T_FDXFC
, IEEE_T_OCTETS_OK
,
2243 RMON_R_PACKETS
, RMON_R_BC_PKT
, RMON_R_MC_PKT
, RMON_R_CRC_ALIGN
,
2244 RMON_R_UNDERSIZE
, RMON_R_OVERSIZE
, RMON_R_FRAG
, RMON_R_JAB
,
2245 RMON_R_RESVD_O
, RMON_R_P64
, RMON_R_P65TO127
, RMON_R_P128TO255
,
2246 RMON_R_P256TO511
, RMON_R_P512TO1023
, RMON_R_P1024TO2047
,
2247 RMON_R_P_GTE2048
, RMON_R_OCTETS
,
2248 IEEE_R_DROP
, IEEE_R_FRAME_OK
, IEEE_R_CRC
, IEEE_R_ALIGN
, IEEE_R_MACERR
,
2249 IEEE_R_FDXFC
, IEEE_R_OCTETS_OK
2252 static __u32 fec_enet_register_version
= 1;
2253 static u32 fec_enet_register_offset
[] = {
2254 FEC_ECNTRL
, FEC_IEVENT
, FEC_IMASK
, FEC_IVEC
, FEC_R_DES_ACTIVE_0
,
2255 FEC_R_DES_ACTIVE_1
, FEC_R_DES_ACTIVE_2
, FEC_X_DES_ACTIVE_0
,
2256 FEC_X_DES_ACTIVE_1
, FEC_X_DES_ACTIVE_2
, FEC_MII_DATA
, FEC_MII_SPEED
,
2257 FEC_R_BOUND
, FEC_R_FSTART
, FEC_X_WMRK
, FEC_X_FSTART
, FEC_R_CNTRL
,
2258 FEC_MAX_FRM_LEN
, FEC_X_CNTRL
, FEC_ADDR_LOW
, FEC_ADDR_HIGH
,
2259 FEC_GRP_HASH_TABLE_HIGH
, FEC_GRP_HASH_TABLE_LOW
, FEC_R_DES_START_0
,
2260 FEC_R_DES_START_1
, FEC_R_DES_START_2
, FEC_X_DES_START_0
,
2261 FEC_X_DES_START_1
, FEC_X_DES_START_2
, FEC_R_BUFF_SIZE_0
,
2262 FEC_R_BUFF_SIZE_1
, FEC_R_BUFF_SIZE_2
2266 static void fec_enet_get_regs(struct net_device
*ndev
,
2267 struct ethtool_regs
*regs
, void *regbuf
)
2269 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2270 u32 __iomem
*theregs
= (u32 __iomem
*)fep
->hwp
;
2271 struct device
*dev
= &fep
->pdev
->dev
;
2272 u32
*buf
= (u32
*)regbuf
;
2276 ret
= pm_runtime_resume_and_get(dev
);
2280 regs
->version
= fec_enet_register_version
;
2282 memset(buf
, 0, regs
->len
);
2284 for (i
= 0; i
< ARRAY_SIZE(fec_enet_register_offset
); i
++) {
2285 off
= fec_enet_register_offset
[i
];
2287 if ((off
== FEC_R_BOUND
|| off
== FEC_R_FSTART
) &&
2288 !(fep
->quirks
& FEC_QUIRK_HAS_FRREG
))
2292 buf
[off
] = readl(&theregs
[off
]);
2295 pm_runtime_mark_last_busy(dev
);
2296 pm_runtime_put_autosuspend(dev
);
2299 static int fec_enet_get_ts_info(struct net_device
*ndev
,
2300 struct ethtool_ts_info
*info
)
2302 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2304 if (fep
->bufdesc_ex
) {
2306 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
2307 SOF_TIMESTAMPING_RX_SOFTWARE
|
2308 SOF_TIMESTAMPING_SOFTWARE
|
2309 SOF_TIMESTAMPING_TX_HARDWARE
|
2310 SOF_TIMESTAMPING_RX_HARDWARE
|
2311 SOF_TIMESTAMPING_RAW_HARDWARE
;
2313 info
->phc_index
= ptp_clock_index(fep
->ptp_clock
);
2315 info
->phc_index
= -1;
2317 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) |
2318 (1 << HWTSTAMP_TX_ON
);
2320 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
2321 (1 << HWTSTAMP_FILTER_ALL
);
2324 return ethtool_op_get_ts_info(ndev
, info
);
2328 #if !defined(CONFIG_M5272)
2330 static void fec_enet_get_pauseparam(struct net_device
*ndev
,
2331 struct ethtool_pauseparam
*pause
)
2333 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2335 pause
->autoneg
= (fep
->pause_flag
& FEC_PAUSE_FLAG_AUTONEG
) != 0;
2336 pause
->tx_pause
= (fep
->pause_flag
& FEC_PAUSE_FLAG_ENABLE
) != 0;
2337 pause
->rx_pause
= pause
->tx_pause
;
2340 static int fec_enet_set_pauseparam(struct net_device
*ndev
,
2341 struct ethtool_pauseparam
*pause
)
2343 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2348 if (pause
->tx_pause
!= pause
->rx_pause
) {
2350 "hardware only support enable/disable both tx and rx");
2354 fep
->pause_flag
= 0;
2356 /* tx pause must be same as rx pause */
2357 fep
->pause_flag
|= pause
->rx_pause
? FEC_PAUSE_FLAG_ENABLE
: 0;
2358 fep
->pause_flag
|= pause
->autoneg
? FEC_PAUSE_FLAG_AUTONEG
: 0;
2360 phy_set_sym_pause(ndev
->phydev
, pause
->rx_pause
, pause
->tx_pause
,
2363 if (pause
->autoneg
) {
2364 if (netif_running(ndev
))
2366 phy_start_aneg(ndev
->phydev
);
2368 if (netif_running(ndev
)) {
2369 napi_disable(&fep
->napi
);
2370 netif_tx_lock_bh(ndev
);
2372 netif_tx_wake_all_queues(ndev
);
2373 netif_tx_unlock_bh(ndev
);
2374 napi_enable(&fep
->napi
);
2380 static const struct fec_stat
{
2381 char name
[ETH_GSTRING_LEN
];
2385 { "tx_dropped", RMON_T_DROP
},
2386 { "tx_packets", RMON_T_PACKETS
},
2387 { "tx_broadcast", RMON_T_BC_PKT
},
2388 { "tx_multicast", RMON_T_MC_PKT
},
2389 { "tx_crc_errors", RMON_T_CRC_ALIGN
},
2390 { "tx_undersize", RMON_T_UNDERSIZE
},
2391 { "tx_oversize", RMON_T_OVERSIZE
},
2392 { "tx_fragment", RMON_T_FRAG
},
2393 { "tx_jabber", RMON_T_JAB
},
2394 { "tx_collision", RMON_T_COL
},
2395 { "tx_64byte", RMON_T_P64
},
2396 { "tx_65to127byte", RMON_T_P65TO127
},
2397 { "tx_128to255byte", RMON_T_P128TO255
},
2398 { "tx_256to511byte", RMON_T_P256TO511
},
2399 { "tx_512to1023byte", RMON_T_P512TO1023
},
2400 { "tx_1024to2047byte", RMON_T_P1024TO2047
},
2401 { "tx_GTE2048byte", RMON_T_P_GTE2048
},
2402 { "tx_octets", RMON_T_OCTETS
},
2405 { "IEEE_tx_drop", IEEE_T_DROP
},
2406 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK
},
2407 { "IEEE_tx_1col", IEEE_T_1COL
},
2408 { "IEEE_tx_mcol", IEEE_T_MCOL
},
2409 { "IEEE_tx_def", IEEE_T_DEF
},
2410 { "IEEE_tx_lcol", IEEE_T_LCOL
},
2411 { "IEEE_tx_excol", IEEE_T_EXCOL
},
2412 { "IEEE_tx_macerr", IEEE_T_MACERR
},
2413 { "IEEE_tx_cserr", IEEE_T_CSERR
},
2414 { "IEEE_tx_sqe", IEEE_T_SQE
},
2415 { "IEEE_tx_fdxfc", IEEE_T_FDXFC
},
2416 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK
},
2419 { "rx_packets", RMON_R_PACKETS
},
2420 { "rx_broadcast", RMON_R_BC_PKT
},
2421 { "rx_multicast", RMON_R_MC_PKT
},
2422 { "rx_crc_errors", RMON_R_CRC_ALIGN
},
2423 { "rx_undersize", RMON_R_UNDERSIZE
},
2424 { "rx_oversize", RMON_R_OVERSIZE
},
2425 { "rx_fragment", RMON_R_FRAG
},
2426 { "rx_jabber", RMON_R_JAB
},
2427 { "rx_64byte", RMON_R_P64
},
2428 { "rx_65to127byte", RMON_R_P65TO127
},
2429 { "rx_128to255byte", RMON_R_P128TO255
},
2430 { "rx_256to511byte", RMON_R_P256TO511
},
2431 { "rx_512to1023byte", RMON_R_P512TO1023
},
2432 { "rx_1024to2047byte", RMON_R_P1024TO2047
},
2433 { "rx_GTE2048byte", RMON_R_P_GTE2048
},
2434 { "rx_octets", RMON_R_OCTETS
},
2437 { "IEEE_rx_drop", IEEE_R_DROP
},
2438 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK
},
2439 { "IEEE_rx_crc", IEEE_R_CRC
},
2440 { "IEEE_rx_align", IEEE_R_ALIGN
},
2441 { "IEEE_rx_macerr", IEEE_R_MACERR
},
2442 { "IEEE_rx_fdxfc", IEEE_R_FDXFC
},
2443 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK
},
2446 #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
2448 static void fec_enet_update_ethtool_stats(struct net_device
*dev
)
2450 struct fec_enet_private
*fep
= netdev_priv(dev
);
2453 for (i
= 0; i
< ARRAY_SIZE(fec_stats
); i
++)
2454 fep
->ethtool_stats
[i
] = readl(fep
->hwp
+ fec_stats
[i
].offset
);
2457 static void fec_enet_get_ethtool_stats(struct net_device
*dev
,
2458 struct ethtool_stats
*stats
, u64
*data
)
2460 struct fec_enet_private
*fep
= netdev_priv(dev
);
2462 if (netif_running(dev
))
2463 fec_enet_update_ethtool_stats(dev
);
2465 memcpy(data
, fep
->ethtool_stats
, FEC_STATS_SIZE
);
2468 static void fec_enet_get_strings(struct net_device
*netdev
,
2469 u32 stringset
, u8
*data
)
2472 switch (stringset
) {
2474 for (i
= 0; i
< ARRAY_SIZE(fec_stats
); i
++)
2475 memcpy(data
+ i
* ETH_GSTRING_LEN
,
2476 fec_stats
[i
].name
, ETH_GSTRING_LEN
);
2481 static int fec_enet_get_sset_count(struct net_device
*dev
, int sset
)
2485 return ARRAY_SIZE(fec_stats
);
2491 static void fec_enet_clear_ethtool_stats(struct net_device
*dev
)
2493 struct fec_enet_private
*fep
= netdev_priv(dev
);
2496 /* Disable MIB statistics counters */
2497 writel(FEC_MIB_CTRLSTAT_DISABLE
, fep
->hwp
+ FEC_MIB_CTRLSTAT
);
2499 for (i
= 0; i
< ARRAY_SIZE(fec_stats
); i
++)
2500 writel(0, fep
->hwp
+ fec_stats
[i
].offset
);
2502 /* Don't disable MIB statistics counters */
2503 writel(0, fep
->hwp
+ FEC_MIB_CTRLSTAT
);
2506 #else /* !defined(CONFIG_M5272) */
2507 #define FEC_STATS_SIZE 0
2508 static inline void fec_enet_update_ethtool_stats(struct net_device
*dev
)
2512 static inline void fec_enet_clear_ethtool_stats(struct net_device
*dev
)
2515 #endif /* !defined(CONFIG_M5272) */
2517 /* ITR clock source is enet system clock (clk_ahb).
2518 * TCTT unit is cycle_ns * 64 cycle
2519 * So, the ICTT value = X us / (cycle_ns * 64)
2521 static int fec_enet_us_to_itr_clock(struct net_device
*ndev
, int us
)
2523 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2525 return us
* (fep
->itr_clk_rate
/ 64000) / 1000;
2528 /* Set threshold for interrupt coalescing */
2529 static void fec_enet_itr_coal_set(struct net_device
*ndev
)
2531 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2534 /* Must be greater than zero to avoid unpredictable behavior */
2535 if (!fep
->rx_time_itr
|| !fep
->rx_pkts_itr
||
2536 !fep
->tx_time_itr
|| !fep
->tx_pkts_itr
)
2539 /* Select enet system clock as Interrupt Coalescing
2540 * timer Clock Source
2542 rx_itr
= FEC_ITR_CLK_SEL
;
2543 tx_itr
= FEC_ITR_CLK_SEL
;
2545 /* set ICFT and ICTT */
2546 rx_itr
|= FEC_ITR_ICFT(fep
->rx_pkts_itr
);
2547 rx_itr
|= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev
, fep
->rx_time_itr
));
2548 tx_itr
|= FEC_ITR_ICFT(fep
->tx_pkts_itr
);
2549 tx_itr
|= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev
, fep
->tx_time_itr
));
2551 rx_itr
|= FEC_ITR_EN
;
2552 tx_itr
|= FEC_ITR_EN
;
2554 writel(tx_itr
, fep
->hwp
+ FEC_TXIC0
);
2555 writel(rx_itr
, fep
->hwp
+ FEC_RXIC0
);
2556 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
) {
2557 writel(tx_itr
, fep
->hwp
+ FEC_TXIC1
);
2558 writel(rx_itr
, fep
->hwp
+ FEC_RXIC1
);
2559 writel(tx_itr
, fep
->hwp
+ FEC_TXIC2
);
2560 writel(rx_itr
, fep
->hwp
+ FEC_RXIC2
);
2565 fec_enet_get_coalesce(struct net_device
*ndev
, struct ethtool_coalesce
*ec
)
2567 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2569 if (!(fep
->quirks
& FEC_QUIRK_HAS_COALESCE
))
2572 ec
->rx_coalesce_usecs
= fep
->rx_time_itr
;
2573 ec
->rx_max_coalesced_frames
= fep
->rx_pkts_itr
;
2575 ec
->tx_coalesce_usecs
= fep
->tx_time_itr
;
2576 ec
->tx_max_coalesced_frames
= fep
->tx_pkts_itr
;
2582 fec_enet_set_coalesce(struct net_device
*ndev
, struct ethtool_coalesce
*ec
)
2584 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2585 struct device
*dev
= &fep
->pdev
->dev
;
2588 if (!(fep
->quirks
& FEC_QUIRK_HAS_COALESCE
))
2591 if (ec
->rx_max_coalesced_frames
> 255) {
2592 dev_err(dev
, "Rx coalesced frames exceed hardware limitation\n");
2596 if (ec
->tx_max_coalesced_frames
> 255) {
2597 dev_err(dev
, "Tx coalesced frame exceed hardware limitation\n");
2601 cycle
= fec_enet_us_to_itr_clock(ndev
, ec
->rx_coalesce_usecs
);
2602 if (cycle
> 0xFFFF) {
2603 dev_err(dev
, "Rx coalesced usec exceed hardware limitation\n");
2607 cycle
= fec_enet_us_to_itr_clock(ndev
, ec
->tx_coalesce_usecs
);
2608 if (cycle
> 0xFFFF) {
2609 dev_err(dev
, "Tx coalesced usec exceed hardware limitation\n");
2613 fep
->rx_time_itr
= ec
->rx_coalesce_usecs
;
2614 fep
->rx_pkts_itr
= ec
->rx_max_coalesced_frames
;
2616 fep
->tx_time_itr
= ec
->tx_coalesce_usecs
;
2617 fep
->tx_pkts_itr
= ec
->tx_max_coalesced_frames
;
2619 fec_enet_itr_coal_set(ndev
);
2624 static void fec_enet_itr_coal_init(struct net_device
*ndev
)
2626 struct ethtool_coalesce ec
;
2628 ec
.rx_coalesce_usecs
= FEC_ITR_ICTT_DEFAULT
;
2629 ec
.rx_max_coalesced_frames
= FEC_ITR_ICFT_DEFAULT
;
2631 ec
.tx_coalesce_usecs
= FEC_ITR_ICTT_DEFAULT
;
2632 ec
.tx_max_coalesced_frames
= FEC_ITR_ICFT_DEFAULT
;
2634 fec_enet_set_coalesce(ndev
, &ec
);
2637 static int fec_enet_get_tunable(struct net_device
*netdev
,
2638 const struct ethtool_tunable
*tuna
,
2641 struct fec_enet_private
*fep
= netdev_priv(netdev
);
2645 case ETHTOOL_RX_COPYBREAK
:
2646 *(u32
*)data
= fep
->rx_copybreak
;
2656 static int fec_enet_set_tunable(struct net_device
*netdev
,
2657 const struct ethtool_tunable
*tuna
,
2660 struct fec_enet_private
*fep
= netdev_priv(netdev
);
2664 case ETHTOOL_RX_COPYBREAK
:
2665 fep
->rx_copybreak
= *(u32
*)data
;
2676 fec_enet_get_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
2678 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2680 if (fep
->wol_flag
& FEC_WOL_HAS_MAGIC_PACKET
) {
2681 wol
->supported
= WAKE_MAGIC
;
2682 wol
->wolopts
= fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
? WAKE_MAGIC
: 0;
2684 wol
->supported
= wol
->wolopts
= 0;
2689 fec_enet_set_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
2691 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2693 if (!(fep
->wol_flag
& FEC_WOL_HAS_MAGIC_PACKET
))
2696 if (wol
->wolopts
& ~WAKE_MAGIC
)
2699 device_set_wakeup_enable(&ndev
->dev
, wol
->wolopts
& WAKE_MAGIC
);
2700 if (device_may_wakeup(&ndev
->dev
)) {
2701 fep
->wol_flag
|= FEC_WOL_FLAG_ENABLE
;
2702 if (fep
->irq
[0] > 0)
2703 enable_irq_wake(fep
->irq
[0]);
2705 fep
->wol_flag
&= (~FEC_WOL_FLAG_ENABLE
);
2706 if (fep
->irq
[0] > 0)
2707 disable_irq_wake(fep
->irq
[0]);
2713 static const struct ethtool_ops fec_enet_ethtool_ops
= {
2714 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
2715 ETHTOOL_COALESCE_MAX_FRAMES
,
2716 .get_drvinfo
= fec_enet_get_drvinfo
,
2717 .get_regs_len
= fec_enet_get_regs_len
,
2718 .get_regs
= fec_enet_get_regs
,
2719 .nway_reset
= phy_ethtool_nway_reset
,
2720 .get_link
= ethtool_op_get_link
,
2721 .get_coalesce
= fec_enet_get_coalesce
,
2722 .set_coalesce
= fec_enet_set_coalesce
,
2723 #ifndef CONFIG_M5272
2724 .get_pauseparam
= fec_enet_get_pauseparam
,
2725 .set_pauseparam
= fec_enet_set_pauseparam
,
2726 .get_strings
= fec_enet_get_strings
,
2727 .get_ethtool_stats
= fec_enet_get_ethtool_stats
,
2728 .get_sset_count
= fec_enet_get_sset_count
,
2730 .get_ts_info
= fec_enet_get_ts_info
,
2731 .get_tunable
= fec_enet_get_tunable
,
2732 .set_tunable
= fec_enet_set_tunable
,
2733 .get_wol
= fec_enet_get_wol
,
2734 .set_wol
= fec_enet_set_wol
,
2735 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
2736 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
2739 static int fec_enet_ioctl(struct net_device
*ndev
, struct ifreq
*rq
, int cmd
)
2741 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2742 struct phy_device
*phydev
= ndev
->phydev
;
2744 if (!netif_running(ndev
))
2750 if (fep
->bufdesc_ex
) {
2751 bool use_fec_hwts
= !phy_has_hwtstamp(phydev
);
2753 if (cmd
== SIOCSHWTSTAMP
) {
2755 return fec_ptp_set(ndev
, rq
);
2756 fec_ptp_disable_hwts(ndev
);
2757 } else if (cmd
== SIOCGHWTSTAMP
) {
2759 return fec_ptp_get(ndev
, rq
);
2763 return phy_mii_ioctl(phydev
, rq
, cmd
);
2766 static void fec_enet_free_buffers(struct net_device
*ndev
)
2768 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2770 struct sk_buff
*skb
;
2771 struct bufdesc
*bdp
;
2772 struct fec_enet_priv_tx_q
*txq
;
2773 struct fec_enet_priv_rx_q
*rxq
;
2776 for (q
= 0; q
< fep
->num_rx_queues
; q
++) {
2777 rxq
= fep
->rx_queue
[q
];
2779 for (i
= 0; i
< rxq
->bd
.ring_size
; i
++) {
2780 skb
= rxq
->rx_skbuff
[i
];
2781 rxq
->rx_skbuff
[i
] = NULL
;
2783 dma_unmap_single(&fep
->pdev
->dev
,
2784 fec32_to_cpu(bdp
->cbd_bufaddr
),
2785 FEC_ENET_RX_FRSIZE
- fep
->rx_align
,
2789 bdp
= fec_enet_get_nextdesc(bdp
, &rxq
->bd
);
2793 for (q
= 0; q
< fep
->num_tx_queues
; q
++) {
2794 txq
= fep
->tx_queue
[q
];
2795 for (i
= 0; i
< txq
->bd
.ring_size
; i
++) {
2796 kfree(txq
->tx_bounce
[i
]);
2797 txq
->tx_bounce
[i
] = NULL
;
2798 skb
= txq
->tx_skbuff
[i
];
2799 txq
->tx_skbuff
[i
] = NULL
;
2805 static void fec_enet_free_queue(struct net_device
*ndev
)
2807 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2809 struct fec_enet_priv_tx_q
*txq
;
2811 for (i
= 0; i
< fep
->num_tx_queues
; i
++)
2812 if (fep
->tx_queue
[i
] && fep
->tx_queue
[i
]->tso_hdrs
) {
2813 txq
= fep
->tx_queue
[i
];
2814 dma_free_coherent(&fep
->pdev
->dev
,
2815 txq
->bd
.ring_size
* TSO_HEADER_SIZE
,
2820 for (i
= 0; i
< fep
->num_rx_queues
; i
++)
2821 kfree(fep
->rx_queue
[i
]);
2822 for (i
= 0; i
< fep
->num_tx_queues
; i
++)
2823 kfree(fep
->tx_queue
[i
]);
2826 static int fec_enet_alloc_queue(struct net_device
*ndev
)
2828 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2831 struct fec_enet_priv_tx_q
*txq
;
2833 for (i
= 0; i
< fep
->num_tx_queues
; i
++) {
2834 txq
= kzalloc(sizeof(*txq
), GFP_KERNEL
);
2840 fep
->tx_queue
[i
] = txq
;
2841 txq
->bd
.ring_size
= TX_RING_SIZE
;
2842 fep
->total_tx_ring_size
+= fep
->tx_queue
[i
]->bd
.ring_size
;
2844 txq
->tx_stop_threshold
= FEC_MAX_SKB_DESCS
;
2845 txq
->tx_wake_threshold
=
2846 (txq
->bd
.ring_size
- txq
->tx_stop_threshold
) / 2;
2848 txq
->tso_hdrs
= dma_alloc_coherent(&fep
->pdev
->dev
,
2849 txq
->bd
.ring_size
* TSO_HEADER_SIZE
,
2852 if (!txq
->tso_hdrs
) {
2858 for (i
= 0; i
< fep
->num_rx_queues
; i
++) {
2859 fep
->rx_queue
[i
] = kzalloc(sizeof(*fep
->rx_queue
[i
]),
2861 if (!fep
->rx_queue
[i
]) {
2866 fep
->rx_queue
[i
]->bd
.ring_size
= RX_RING_SIZE
;
2867 fep
->total_rx_ring_size
+= fep
->rx_queue
[i
]->bd
.ring_size
;
2872 fec_enet_free_queue(ndev
);
2877 fec_enet_alloc_rxq_buffers(struct net_device
*ndev
, unsigned int queue
)
2879 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2881 struct sk_buff
*skb
;
2882 struct bufdesc
*bdp
;
2883 struct fec_enet_priv_rx_q
*rxq
;
2885 rxq
= fep
->rx_queue
[queue
];
2887 for (i
= 0; i
< rxq
->bd
.ring_size
; i
++) {
2888 skb
= netdev_alloc_skb(ndev
, FEC_ENET_RX_FRSIZE
);
2892 if (fec_enet_new_rxbdp(ndev
, bdp
, skb
)) {
2897 rxq
->rx_skbuff
[i
] = skb
;
2898 bdp
->cbd_sc
= cpu_to_fec16(BD_ENET_RX_EMPTY
);
2900 if (fep
->bufdesc_ex
) {
2901 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
2902 ebdp
->cbd_esc
= cpu_to_fec32(BD_ENET_RX_INT
);
2905 bdp
= fec_enet_get_nextdesc(bdp
, &rxq
->bd
);
2908 /* Set the last buffer to wrap. */
2909 bdp
= fec_enet_get_prevdesc(bdp
, &rxq
->bd
);
2910 bdp
->cbd_sc
|= cpu_to_fec16(BD_SC_WRAP
);
2914 fec_enet_free_buffers(ndev
);
2919 fec_enet_alloc_txq_buffers(struct net_device
*ndev
, unsigned int queue
)
2921 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2923 struct bufdesc
*bdp
;
2924 struct fec_enet_priv_tx_q
*txq
;
2926 txq
= fep
->tx_queue
[queue
];
2928 for (i
= 0; i
< txq
->bd
.ring_size
; i
++) {
2929 txq
->tx_bounce
[i
] = kmalloc(FEC_ENET_TX_FRSIZE
, GFP_KERNEL
);
2930 if (!txq
->tx_bounce
[i
])
2933 bdp
->cbd_sc
= cpu_to_fec16(0);
2934 bdp
->cbd_bufaddr
= cpu_to_fec32(0);
2936 if (fep
->bufdesc_ex
) {
2937 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
2938 ebdp
->cbd_esc
= cpu_to_fec32(BD_ENET_TX_INT
);
2941 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
2944 /* Set the last buffer to wrap. */
2945 bdp
= fec_enet_get_prevdesc(bdp
, &txq
->bd
);
2946 bdp
->cbd_sc
|= cpu_to_fec16(BD_SC_WRAP
);
2951 fec_enet_free_buffers(ndev
);
2955 static int fec_enet_alloc_buffers(struct net_device
*ndev
)
2957 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2960 for (i
= 0; i
< fep
->num_rx_queues
; i
++)
2961 if (fec_enet_alloc_rxq_buffers(ndev
, i
))
2964 for (i
= 0; i
< fep
->num_tx_queues
; i
++)
2965 if (fec_enet_alloc_txq_buffers(ndev
, i
))
2971 fec_enet_open(struct net_device
*ndev
)
2973 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2977 ret
= pm_runtime_resume_and_get(&fep
->pdev
->dev
);
2981 pinctrl_pm_select_default_state(&fep
->pdev
->dev
);
2982 ret
= fec_enet_clk_enable(ndev
, true);
2986 /* During the first fec_enet_open call the PHY isn't probed at this
2987 * point. Therefore the phy_reset_after_clk_enable() call within
2988 * fec_enet_clk_enable() fails. As we need this reset in order to be
2989 * sure the PHY is working correctly we check if we need to reset again
2990 * later when the PHY is probed
2992 if (ndev
->phydev
&& ndev
->phydev
->drv
)
2993 reset_again
= false;
2997 /* I should reset the ring buffers here, but I don't yet know
2998 * a simple way to do that.
3001 ret
= fec_enet_alloc_buffers(ndev
);
3003 goto err_enet_alloc
;
3005 /* Init MAC prior to mii bus probe */
3008 /* Call phy_reset_after_clk_enable() again if it failed during
3009 * phy_reset_after_clk_enable() before because the PHY wasn't probed.
3012 fec_enet_phy_reset_after_clk_enable(ndev
);
3014 /* Probe and connect to PHY when open the interface */
3015 ret
= fec_enet_mii_probe(ndev
);
3017 goto err_enet_mii_probe
;
3019 if (fep
->quirks
& FEC_QUIRK_ERR006687
)
3020 imx6q_cpuidle_fec_irqs_used();
3022 napi_enable(&fep
->napi
);
3023 phy_start(ndev
->phydev
);
3024 netif_tx_start_all_queues(ndev
);
3026 device_set_wakeup_enable(&ndev
->dev
, fep
->wol_flag
&
3027 FEC_WOL_FLAG_ENABLE
);
3032 fec_enet_free_buffers(ndev
);
3034 fec_enet_clk_enable(ndev
, false);
3036 pm_runtime_mark_last_busy(&fep
->pdev
->dev
);
3037 pm_runtime_put_autosuspend(&fep
->pdev
->dev
);
3038 pinctrl_pm_select_sleep_state(&fep
->pdev
->dev
);
3043 fec_enet_close(struct net_device
*ndev
)
3045 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3047 phy_stop(ndev
->phydev
);
3049 if (netif_device_present(ndev
)) {
3050 napi_disable(&fep
->napi
);
3051 netif_tx_disable(ndev
);
3055 phy_disconnect(ndev
->phydev
);
3057 if (fep
->quirks
& FEC_QUIRK_ERR006687
)
3058 imx6q_cpuidle_fec_irqs_unused();
3060 fec_enet_update_ethtool_stats(ndev
);
3062 fec_enet_clk_enable(ndev
, false);
3063 pinctrl_pm_select_sleep_state(&fep
->pdev
->dev
);
3064 pm_runtime_mark_last_busy(&fep
->pdev
->dev
);
3065 pm_runtime_put_autosuspend(&fep
->pdev
->dev
);
3067 fec_enet_free_buffers(ndev
);
3072 /* Set or clear the multicast filter for this adaptor.
3073 * Skeleton taken from sunlance driver.
3074 * The CPM Ethernet implementation allows Multicast as well as individual
3075 * MAC address filtering. Some of the drivers check to make sure it is
3076 * a group multicast address, and discard those that are not. I guess I
3077 * will do the same for now, but just remove the test if you want
3078 * individual filtering as well (do the upper net layers want or support
3079 * this kind of feature?).
3082 #define FEC_HASH_BITS 6 /* #bits in hash */
3084 static void set_multicast_list(struct net_device
*ndev
)
3086 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3087 struct netdev_hw_addr
*ha
;
3088 unsigned int crc
, tmp
;
3090 unsigned int hash_high
= 0, hash_low
= 0;
3092 if (ndev
->flags
& IFF_PROMISC
) {
3093 tmp
= readl(fep
->hwp
+ FEC_R_CNTRL
);
3095 writel(tmp
, fep
->hwp
+ FEC_R_CNTRL
);
3099 tmp
= readl(fep
->hwp
+ FEC_R_CNTRL
);
3101 writel(tmp
, fep
->hwp
+ FEC_R_CNTRL
);
3103 if (ndev
->flags
& IFF_ALLMULTI
) {
3104 /* Catch all multicast addresses, so set the
3107 writel(0xffffffff, fep
->hwp
+ FEC_GRP_HASH_TABLE_HIGH
);
3108 writel(0xffffffff, fep
->hwp
+ FEC_GRP_HASH_TABLE_LOW
);
3113 /* Add the addresses in hash register */
3114 netdev_for_each_mc_addr(ha
, ndev
) {
3115 /* calculate crc32 value of mac address */
3116 crc
= ether_crc_le(ndev
->addr_len
, ha
->addr
);
3118 /* only upper 6 bits (FEC_HASH_BITS) are used
3119 * which point to specific bit in the hash registers
3121 hash
= (crc
>> (32 - FEC_HASH_BITS
)) & 0x3f;
3124 hash_high
|= 1 << (hash
- 32);
3126 hash_low
|= 1 << hash
;
3129 writel(hash_high
, fep
->hwp
+ FEC_GRP_HASH_TABLE_HIGH
);
3130 writel(hash_low
, fep
->hwp
+ FEC_GRP_HASH_TABLE_LOW
);
3133 /* Set a MAC change in hardware. */
3135 fec_set_mac_address(struct net_device
*ndev
, void *p
)
3137 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3138 struct sockaddr
*addr
= p
;
3141 if (!is_valid_ether_addr(addr
->sa_data
))
3142 return -EADDRNOTAVAIL
;
3143 memcpy(ndev
->dev_addr
, addr
->sa_data
, ndev
->addr_len
);
3146 /* Add netif status check here to avoid system hang in below case:
3147 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
3148 * After ethx down, fec all clocks are gated off and then register
3149 * access causes system hang.
3151 if (!netif_running(ndev
))
3154 writel(ndev
->dev_addr
[3] | (ndev
->dev_addr
[2] << 8) |
3155 (ndev
->dev_addr
[1] << 16) | (ndev
->dev_addr
[0] << 24),
3156 fep
->hwp
+ FEC_ADDR_LOW
);
3157 writel((ndev
->dev_addr
[5] << 16) | (ndev
->dev_addr
[4] << 24),
3158 fep
->hwp
+ FEC_ADDR_HIGH
);
3162 #ifdef CONFIG_NET_POLL_CONTROLLER
3164 * fec_poll_controller - FEC Poll controller function
3165 * @dev: The FEC network adapter
3167 * Polled functionality used by netconsole and others in non interrupt mode
3170 static void fec_poll_controller(struct net_device
*dev
)
3173 struct fec_enet_private
*fep
= netdev_priv(dev
);
3175 for (i
= 0; i
< FEC_IRQ_NUM
; i
++) {
3176 if (fep
->irq
[i
] > 0) {
3177 disable_irq(fep
->irq
[i
]);
3178 fec_enet_interrupt(fep
->irq
[i
], dev
);
3179 enable_irq(fep
->irq
[i
]);
3185 static inline void fec_enet_set_netdev_features(struct net_device
*netdev
,
3186 netdev_features_t features
)
3188 struct fec_enet_private
*fep
= netdev_priv(netdev
);
3189 netdev_features_t changed
= features
^ netdev
->features
;
3191 netdev
->features
= features
;
3193 /* Receive checksum has been changed */
3194 if (changed
& NETIF_F_RXCSUM
) {
3195 if (features
& NETIF_F_RXCSUM
)
3196 fep
->csum_flags
|= FLAG_RX_CSUM_ENABLED
;
3198 fep
->csum_flags
&= ~FLAG_RX_CSUM_ENABLED
;
3202 static int fec_set_features(struct net_device
*netdev
,
3203 netdev_features_t features
)
3205 struct fec_enet_private
*fep
= netdev_priv(netdev
);
3206 netdev_features_t changed
= features
^ netdev
->features
;
3208 if (netif_running(netdev
) && changed
& NETIF_F_RXCSUM
) {
3209 napi_disable(&fep
->napi
);
3210 netif_tx_lock_bh(netdev
);
3212 fec_enet_set_netdev_features(netdev
, features
);
3213 fec_restart(netdev
);
3214 netif_tx_wake_all_queues(netdev
);
3215 netif_tx_unlock_bh(netdev
);
3216 napi_enable(&fep
->napi
);
3218 fec_enet_set_netdev_features(netdev
, features
);
3224 static const struct net_device_ops fec_netdev_ops
= {
3225 .ndo_open
= fec_enet_open
,
3226 .ndo_stop
= fec_enet_close
,
3227 .ndo_start_xmit
= fec_enet_start_xmit
,
3228 .ndo_set_rx_mode
= set_multicast_list
,
3229 .ndo_validate_addr
= eth_validate_addr
,
3230 .ndo_tx_timeout
= fec_timeout
,
3231 .ndo_set_mac_address
= fec_set_mac_address
,
3232 .ndo_do_ioctl
= fec_enet_ioctl
,
3233 #ifdef CONFIG_NET_POLL_CONTROLLER
3234 .ndo_poll_controller
= fec_poll_controller
,
3236 .ndo_set_features
= fec_set_features
,
3239 static const unsigned short offset_des_active_rxq
[] = {
3240 FEC_R_DES_ACTIVE_0
, FEC_R_DES_ACTIVE_1
, FEC_R_DES_ACTIVE_2
3243 static const unsigned short offset_des_active_txq
[] = {
3244 FEC_X_DES_ACTIVE_0
, FEC_X_DES_ACTIVE_1
, FEC_X_DES_ACTIVE_2
3248 * XXX: We need to clean up on failure exits here.
3251 static int fec_enet_init(struct net_device
*ndev
)
3253 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3254 struct bufdesc
*cbd_base
;
3258 unsigned dsize
= fep
->bufdesc_ex
? sizeof(struct bufdesc_ex
) :
3259 sizeof(struct bufdesc
);
3260 unsigned dsize_log2
= __fls(dsize
);
3263 WARN_ON(dsize
!= (1 << dsize_log2
));
3264 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
3265 fep
->rx_align
= 0xf;
3266 fep
->tx_align
= 0xf;
3268 fep
->rx_align
= 0x3;
3269 fep
->tx_align
= 0x3;
3272 /* Check mask of the streaming and coherent API */
3273 ret
= dma_set_mask_and_coherent(&fep
->pdev
->dev
, DMA_BIT_MASK(32));
3275 dev_warn(&fep
->pdev
->dev
, "No suitable DMA available\n");
3279 fec_enet_alloc_queue(ndev
);
3281 bd_size
= (fep
->total_tx_ring_size
+ fep
->total_rx_ring_size
) * dsize
;
3283 /* Allocate memory for buffer descriptors. */
3284 cbd_base
= dmam_alloc_coherent(&fep
->pdev
->dev
, bd_size
, &bd_dma
,
3290 /* Get the Ethernet address */
3292 /* make sure MAC we just acquired is programmed into the hw */
3293 fec_set_mac_address(ndev
, NULL
);
3295 /* Set receive and transmit descriptor base. */
3296 for (i
= 0; i
< fep
->num_rx_queues
; i
++) {
3297 struct fec_enet_priv_rx_q
*rxq
= fep
->rx_queue
[i
];
3298 unsigned size
= dsize
* rxq
->bd
.ring_size
;
3301 rxq
->bd
.base
= cbd_base
;
3302 rxq
->bd
.cur
= cbd_base
;
3303 rxq
->bd
.dma
= bd_dma
;
3304 rxq
->bd
.dsize
= dsize
;
3305 rxq
->bd
.dsize_log2
= dsize_log2
;
3306 rxq
->bd
.reg_desc_active
= fep
->hwp
+ offset_des_active_rxq
[i
];
3308 cbd_base
= (struct bufdesc
*)(((void *)cbd_base
) + size
);
3309 rxq
->bd
.last
= (struct bufdesc
*)(((void *)cbd_base
) - dsize
);
3312 for (i
= 0; i
< fep
->num_tx_queues
; i
++) {
3313 struct fec_enet_priv_tx_q
*txq
= fep
->tx_queue
[i
];
3314 unsigned size
= dsize
* txq
->bd
.ring_size
;
3317 txq
->bd
.base
= cbd_base
;
3318 txq
->bd
.cur
= cbd_base
;
3319 txq
->bd
.dma
= bd_dma
;
3320 txq
->bd
.dsize
= dsize
;
3321 txq
->bd
.dsize_log2
= dsize_log2
;
3322 txq
->bd
.reg_desc_active
= fep
->hwp
+ offset_des_active_txq
[i
];
3324 cbd_base
= (struct bufdesc
*)(((void *)cbd_base
) + size
);
3325 txq
->bd
.last
= (struct bufdesc
*)(((void *)cbd_base
) - dsize
);
3329 /* The FEC Ethernet specific entries in the device structure */
3330 ndev
->watchdog_timeo
= TX_TIMEOUT
;
3331 ndev
->netdev_ops
= &fec_netdev_ops
;
3332 ndev
->ethtool_ops
= &fec_enet_ethtool_ops
;
3334 writel(FEC_RX_DISABLED_IMASK
, fep
->hwp
+ FEC_IMASK
);
3335 netif_napi_add(ndev
, &fep
->napi
, fec_enet_rx_napi
, NAPI_POLL_WEIGHT
);
3337 if (fep
->quirks
& FEC_QUIRK_HAS_VLAN
)
3338 /* enable hw VLAN support */
3339 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_RX
;
3341 if (fep
->quirks
& FEC_QUIRK_HAS_CSUM
) {
3342 ndev
->gso_max_segs
= FEC_MAX_TSO_SEGS
;
3344 /* enable hw accelerator */
3345 ndev
->features
|= (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
3346 | NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_TSO
);
3347 fep
->csum_flags
|= FLAG_RX_CSUM_ENABLED
;
3350 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
) {
3352 fep
->rx_align
= 0x3f;
3355 ndev
->hw_features
= ndev
->features
;
3359 if (fep
->quirks
& FEC_QUIRK_MIB_CLEAR
)
3360 fec_enet_clear_ethtool_stats(ndev
);
3362 fec_enet_update_ethtool_stats(ndev
);
3368 static int fec_reset_phy(struct platform_device
*pdev
)
3371 bool active_high
= false;
3372 int msec
= 1, phy_post_delay
= 0;
3373 struct device_node
*np
= pdev
->dev
.of_node
;
3378 err
= of_property_read_u32(np
, "phy-reset-duration", &msec
);
3379 /* A sane reset duration should not be longer than 1s */
3380 if (!err
&& msec
> 1000)
3383 phy_reset
= of_get_named_gpio(np
, "phy-reset-gpios", 0);
3384 if (phy_reset
== -EPROBE_DEFER
)
3386 else if (!gpio_is_valid(phy_reset
))
3389 err
= of_property_read_u32(np
, "phy-reset-post-delay", &phy_post_delay
);
3390 /* valid reset duration should be less than 1s */
3391 if (!err
&& phy_post_delay
> 1000)
3394 active_high
= of_property_read_bool(np
, "phy-reset-active-high");
3396 err
= devm_gpio_request_one(&pdev
->dev
, phy_reset
,
3397 active_high
? GPIOF_OUT_INIT_HIGH
: GPIOF_OUT_INIT_LOW
,
3400 dev_err(&pdev
->dev
, "failed to get phy-reset-gpios: %d\n", err
);
3407 usleep_range(msec
* 1000, msec
* 1000 + 1000);
3409 gpio_set_value_cansleep(phy_reset
, !active_high
);
3411 if (!phy_post_delay
)
3414 if (phy_post_delay
> 20)
3415 msleep(phy_post_delay
);
3417 usleep_range(phy_post_delay
* 1000,
3418 phy_post_delay
* 1000 + 1000);
3422 #else /* CONFIG_OF */
3423 static int fec_reset_phy(struct platform_device
*pdev
)
3426 * In case of platform probe, the reset has been done
3431 #endif /* CONFIG_OF */
3434 fec_enet_get_queue_num(struct platform_device
*pdev
, int *num_tx
, int *num_rx
)
3436 struct device_node
*np
= pdev
->dev
.of_node
;
3438 *num_tx
= *num_rx
= 1;
3440 if (!np
|| !of_device_is_available(np
))
3443 /* parse the num of tx and rx queues */
3444 of_property_read_u32(np
, "fsl,num-tx-queues", num_tx
);
3446 of_property_read_u32(np
, "fsl,num-rx-queues", num_rx
);
3448 if (*num_tx
< 1 || *num_tx
> FEC_ENET_MAX_TX_QS
) {
3449 dev_warn(&pdev
->dev
, "Invalid num_tx(=%d), fall back to 1\n",
3455 if (*num_rx
< 1 || *num_rx
> FEC_ENET_MAX_RX_QS
) {
3456 dev_warn(&pdev
->dev
, "Invalid num_rx(=%d), fall back to 1\n",
3464 static int fec_enet_get_irq_cnt(struct platform_device
*pdev
)
3466 int irq_cnt
= platform_irq_count(pdev
);
3468 if (irq_cnt
> FEC_IRQ_NUM
)
3469 irq_cnt
= FEC_IRQ_NUM
; /* last for pps */
3470 else if (irq_cnt
== 2)
3471 irq_cnt
= 1; /* last for pps */
3472 else if (irq_cnt
<= 0)
3473 irq_cnt
= 1; /* At least 1 irq is needed */
3477 static int fec_enet_init_stop_mode(struct fec_enet_private
*fep
,
3478 struct device_node
*np
)
3480 struct device_node
*gpr_np
;
3484 gpr_np
= of_parse_phandle(np
, "fsl,stop-mode", 0);
3488 ret
= of_property_read_u32_array(np
, "fsl,stop-mode", out_val
,
3489 ARRAY_SIZE(out_val
));
3491 dev_dbg(&fep
->pdev
->dev
, "no stop mode property\n");
3495 fep
->stop_gpr
.gpr
= syscon_node_to_regmap(gpr_np
);
3496 if (IS_ERR(fep
->stop_gpr
.gpr
)) {
3497 dev_err(&fep
->pdev
->dev
, "could not find gpr regmap\n");
3498 ret
= PTR_ERR(fep
->stop_gpr
.gpr
);
3499 fep
->stop_gpr
.gpr
= NULL
;
3503 fep
->stop_gpr
.reg
= out_val
[1];
3504 fep
->stop_gpr
.bit
= out_val
[2];
3507 of_node_put(gpr_np
);
3513 fec_probe(struct platform_device
*pdev
)
3515 struct fec_enet_private
*fep
;
3516 struct fec_platform_data
*pdata
;
3517 phy_interface_t interface
;
3518 struct net_device
*ndev
;
3519 int i
, irq
, ret
= 0;
3520 const struct of_device_id
*of_id
;
3522 struct device_node
*np
= pdev
->dev
.of_node
, *phy_node
;
3527 struct fec_devinfo
*dev_info
;
3529 fec_enet_get_queue_num(pdev
, &num_tx_qs
, &num_rx_qs
);
3531 /* Init network device */
3532 ndev
= alloc_etherdev_mqs(sizeof(struct fec_enet_private
) +
3533 FEC_STATS_SIZE
, num_tx_qs
, num_rx_qs
);
3537 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
3539 /* setup board info structure */
3540 fep
= netdev_priv(ndev
);
3542 of_id
= of_match_device(fec_dt_ids
, &pdev
->dev
);
3544 pdev
->id_entry
= of_id
->data
;
3545 dev_info
= (struct fec_devinfo
*)pdev
->id_entry
->driver_data
;
3547 fep
->quirks
= dev_info
->quirks
;
3550 fep
->num_rx_queues
= num_rx_qs
;
3551 fep
->num_tx_queues
= num_tx_qs
;
3553 #if !defined(CONFIG_M5272)
3554 /* default enable pause frame auto negotiation */
3555 if (fep
->quirks
& FEC_QUIRK_HAS_GBIT
)
3556 fep
->pause_flag
|= FEC_PAUSE_FLAG_AUTONEG
;
3559 /* Select default pin state */
3560 pinctrl_pm_select_default_state(&pdev
->dev
);
3562 fep
->hwp
= devm_platform_ioremap_resource(pdev
, 0);
3563 if (IS_ERR(fep
->hwp
)) {
3564 ret
= PTR_ERR(fep
->hwp
);
3565 goto failed_ioremap
;
3569 fep
->dev_id
= dev_id
++;
3571 platform_set_drvdata(pdev
, ndev
);
3573 if ((of_machine_is_compatible("fsl,imx6q") ||
3574 of_machine_is_compatible("fsl,imx6dl")) &&
3575 !of_property_read_bool(np
, "fsl,err006687-workaround-present"))
3576 fep
->quirks
|= FEC_QUIRK_ERR006687
;
3578 if (of_get_property(np
, "fsl,magic-packet", NULL
))
3579 fep
->wol_flag
|= FEC_WOL_HAS_MAGIC_PACKET
;
3581 ret
= fec_enet_init_stop_mode(fep
, np
);
3583 goto failed_stop_mode
;
3585 phy_node
= of_parse_phandle(np
, "phy-handle", 0);
3586 if (!phy_node
&& of_phy_is_fixed_link(np
)) {
3587 ret
= of_phy_register_fixed_link(np
);
3590 "broken fixed-link specification\n");
3593 phy_node
= of_node_get(np
);
3595 fep
->phy_node
= phy_node
;
3597 ret
= of_get_phy_mode(pdev
->dev
.of_node
, &interface
);
3599 pdata
= dev_get_platdata(&pdev
->dev
);
3601 fep
->phy_interface
= pdata
->phy
;
3603 fep
->phy_interface
= PHY_INTERFACE_MODE_MII
;
3605 fep
->phy_interface
= interface
;
3608 fep
->clk_ipg
= devm_clk_get(&pdev
->dev
, "ipg");
3609 if (IS_ERR(fep
->clk_ipg
)) {
3610 ret
= PTR_ERR(fep
->clk_ipg
);
3614 fep
->clk_ahb
= devm_clk_get(&pdev
->dev
, "ahb");
3615 if (IS_ERR(fep
->clk_ahb
)) {
3616 ret
= PTR_ERR(fep
->clk_ahb
);
3620 fep
->itr_clk_rate
= clk_get_rate(fep
->clk_ahb
);
3622 /* enet_out is optional, depends on board */
3623 fep
->clk_enet_out
= devm_clk_get(&pdev
->dev
, "enet_out");
3624 if (IS_ERR(fep
->clk_enet_out
))
3625 fep
->clk_enet_out
= NULL
;
3627 fep
->ptp_clk_on
= false;
3628 mutex_init(&fep
->ptp_clk_mutex
);
3630 /* clk_ref is optional, depends on board */
3631 fep
->clk_ref
= devm_clk_get(&pdev
->dev
, "enet_clk_ref");
3632 if (IS_ERR(fep
->clk_ref
))
3633 fep
->clk_ref
= NULL
;
3635 fep
->bufdesc_ex
= fep
->quirks
& FEC_QUIRK_HAS_BUFDESC_EX
;
3636 fep
->clk_ptp
= devm_clk_get(&pdev
->dev
, "ptp");
3637 if (IS_ERR(fep
->clk_ptp
)) {
3638 fep
->clk_ptp
= NULL
;
3639 fep
->bufdesc_ex
= false;
3642 ret
= fec_enet_clk_enable(ndev
, true);
3646 ret
= clk_prepare_enable(fep
->clk_ipg
);
3648 goto failed_clk_ipg
;
3649 ret
= clk_prepare_enable(fep
->clk_ahb
);
3651 goto failed_clk_ahb
;
3653 fep
->reg_phy
= devm_regulator_get_optional(&pdev
->dev
, "phy");
3654 if (!IS_ERR(fep
->reg_phy
)) {
3655 ret
= regulator_enable(fep
->reg_phy
);
3658 "Failed to enable phy regulator: %d\n", ret
);
3659 goto failed_regulator
;
3662 if (PTR_ERR(fep
->reg_phy
) == -EPROBE_DEFER
) {
3663 ret
= -EPROBE_DEFER
;
3664 goto failed_regulator
;
3666 fep
->reg_phy
= NULL
;
3669 pm_runtime_set_autosuspend_delay(&pdev
->dev
, FEC_MDIO_PM_TIMEOUT
);
3670 pm_runtime_use_autosuspend(&pdev
->dev
);
3671 pm_runtime_get_noresume(&pdev
->dev
);
3672 pm_runtime_set_active(&pdev
->dev
);
3673 pm_runtime_enable(&pdev
->dev
);
3675 ret
= fec_reset_phy(pdev
);
3679 irq_cnt
= fec_enet_get_irq_cnt(pdev
);
3680 if (fep
->bufdesc_ex
)
3681 fec_ptp_init(pdev
, irq_cnt
);
3683 ret
= fec_enet_init(ndev
);
3687 for (i
= 0; i
< irq_cnt
; i
++) {
3688 snprintf(irq_name
, sizeof(irq_name
), "int%d", i
);
3689 irq
= platform_get_irq_byname_optional(pdev
, irq_name
);
3691 irq
= platform_get_irq(pdev
, i
);
3696 ret
= devm_request_irq(&pdev
->dev
, irq
, fec_enet_interrupt
,
3697 0, pdev
->name
, ndev
);
3704 ret
= fec_enet_mii_init(pdev
);
3706 goto failed_mii_init
;
3708 /* Carrier starts down, phylib will bring it up */
3709 netif_carrier_off(ndev
);
3710 fec_enet_clk_enable(ndev
, false);
3711 pinctrl_pm_select_sleep_state(&pdev
->dev
);
3713 ndev
->max_mtu
= PKT_MAXBUF_SIZE
- ETH_HLEN
- ETH_FCS_LEN
;
3715 ret
= register_netdev(ndev
);
3717 goto failed_register
;
3719 device_init_wakeup(&ndev
->dev
, fep
->wol_flag
&
3720 FEC_WOL_HAS_MAGIC_PACKET
);
3722 if (fep
->bufdesc_ex
&& fep
->ptp_clock
)
3723 netdev_info(ndev
, "registered PHC device %d\n", fep
->dev_id
);
3725 fep
->rx_copybreak
= COPYBREAK_DEFAULT
;
3726 INIT_WORK(&fep
->tx_timeout_work
, fec_enet_timeout_work
);
3728 pm_runtime_mark_last_busy(&pdev
->dev
);
3729 pm_runtime_put_autosuspend(&pdev
->dev
);
3734 fec_enet_mii_remove(fep
);
3740 pm_runtime_put_noidle(&pdev
->dev
);
3741 pm_runtime_disable(&pdev
->dev
);
3743 regulator_disable(fep
->reg_phy
);
3745 clk_disable_unprepare(fep
->clk_ahb
);
3747 clk_disable_unprepare(fep
->clk_ipg
);
3749 fec_enet_clk_enable(ndev
, false);
3751 if (of_phy_is_fixed_link(np
))
3752 of_phy_deregister_fixed_link(np
);
3753 of_node_put(phy_node
);
3764 fec_drv_remove(struct platform_device
*pdev
)
3766 struct net_device
*ndev
= platform_get_drvdata(pdev
);
3767 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3768 struct device_node
*np
= pdev
->dev
.of_node
;
3771 ret
= pm_runtime_resume_and_get(&pdev
->dev
);
3775 cancel_work_sync(&fep
->tx_timeout_work
);
3777 unregister_netdev(ndev
);
3778 fec_enet_mii_remove(fep
);
3780 regulator_disable(fep
->reg_phy
);
3782 if (of_phy_is_fixed_link(np
))
3783 of_phy_deregister_fixed_link(np
);
3784 of_node_put(fep
->phy_node
);
3787 clk_disable_unprepare(fep
->clk_ahb
);
3788 clk_disable_unprepare(fep
->clk_ipg
);
3789 pm_runtime_put_noidle(&pdev
->dev
);
3790 pm_runtime_disable(&pdev
->dev
);
3795 static int __maybe_unused
fec_suspend(struct device
*dev
)
3797 struct net_device
*ndev
= dev_get_drvdata(dev
);
3798 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3801 if (netif_running(ndev
)) {
3802 if (fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
)
3803 fep
->wol_flag
|= FEC_WOL_FLAG_SLEEP_ON
;
3804 phy_stop(ndev
->phydev
);
3805 napi_disable(&fep
->napi
);
3806 netif_tx_lock_bh(ndev
);
3807 netif_device_detach(ndev
);
3808 netif_tx_unlock_bh(ndev
);
3810 fec_enet_clk_enable(ndev
, false);
3811 if (!(fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
))
3812 pinctrl_pm_select_sleep_state(&fep
->pdev
->dev
);
3816 if (fep
->reg_phy
&& !(fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
))
3817 regulator_disable(fep
->reg_phy
);
3819 /* SOC supply clock to phy, when clock is disabled, phy link down
3820 * SOC control phy regulator, when regulator is disabled, phy link down
3822 if (fep
->clk_enet_out
|| fep
->reg_phy
)
3828 static int __maybe_unused
fec_resume(struct device
*dev
)
3830 struct net_device
*ndev
= dev_get_drvdata(dev
);
3831 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3835 if (fep
->reg_phy
&& !(fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
)) {
3836 ret
= regulator_enable(fep
->reg_phy
);
3842 if (netif_running(ndev
)) {
3843 ret
= fec_enet_clk_enable(ndev
, true);
3848 if (fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
) {
3849 fec_enet_stop_mode(fep
, false);
3851 val
= readl(fep
->hwp
+ FEC_ECNTRL
);
3852 val
&= ~(FEC_ECR_MAGICEN
| FEC_ECR_SLEEP
);
3853 writel(val
, fep
->hwp
+ FEC_ECNTRL
);
3854 fep
->wol_flag
&= ~FEC_WOL_FLAG_SLEEP_ON
;
3856 pinctrl_pm_select_default_state(&fep
->pdev
->dev
);
3859 netif_tx_lock_bh(ndev
);
3860 netif_device_attach(ndev
);
3861 netif_tx_unlock_bh(ndev
);
3862 napi_enable(&fep
->napi
);
3863 phy_start(ndev
->phydev
);
3871 regulator_disable(fep
->reg_phy
);
3875 static int __maybe_unused
fec_runtime_suspend(struct device
*dev
)
3877 struct net_device
*ndev
= dev_get_drvdata(dev
);
3878 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3880 clk_disable_unprepare(fep
->clk_ahb
);
3881 clk_disable_unprepare(fep
->clk_ipg
);
3886 static int __maybe_unused
fec_runtime_resume(struct device
*dev
)
3888 struct net_device
*ndev
= dev_get_drvdata(dev
);
3889 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3892 ret
= clk_prepare_enable(fep
->clk_ahb
);
3895 ret
= clk_prepare_enable(fep
->clk_ipg
);
3897 goto failed_clk_ipg
;
3902 clk_disable_unprepare(fep
->clk_ahb
);
3906 static const struct dev_pm_ops fec_pm_ops
= {
3907 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend
, fec_resume
)
3908 SET_RUNTIME_PM_OPS(fec_runtime_suspend
, fec_runtime_resume
, NULL
)
3911 static struct platform_driver fec_driver
= {
3913 .name
= DRIVER_NAME
,
3915 .of_match_table
= fec_dt_ids
,
3916 .suppress_bind_attrs
= true,
3918 .id_table
= fec_devtype
,
3920 .remove
= fec_drv_remove
,
3923 module_platform_driver(fec_driver
);
3925 MODULE_ALIAS("platform:"DRIVER_NAME
);
3926 MODULE_LICENSE("GPL");