1 // SPDX-License-Identifier: GPL-2.0
2 /* Renesas Ethernet AVB device driver
4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
5 * Copyright (C) 2015 Renesas Solutions Corp.
6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
8 * Based on the SuperH Ethernet driver
11 #include <linux/cache.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/etherdevice.h>
17 #include <linux/ethtool.h>
18 #include <linux/if_vlan.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/net_tstamp.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/reset.h>
31 #include <linux/math64.h>
33 #include <net/page_pool/helpers.h>
37 #define RAVB_DEF_MSG_ENABLE \
43 void ravb_modify(struct net_device
*ndev
, enum ravb_reg reg
, u32 clear
,
46 ravb_write(ndev
, (ravb_read(ndev
, reg
) & ~clear
) | set
, reg
);
49 int ravb_wait(struct net_device
*ndev
, enum ravb_reg reg
, u32 mask
, u32 value
)
53 for (i
= 0; i
< 10000; i
++) {
54 if ((ravb_read(ndev
, reg
) & mask
) == value
)
61 static int ravb_set_opmode(struct net_device
*ndev
, u32 opmode
)
63 u32 csr_ops
= 1U << (opmode
& CCC_OPC
);
64 u32 ccc_mask
= CCC_OPC
;
67 /* If gPTP active in config mode is supported it needs to be configured
68 * along with CSEL and operating mode in the same access. This is a
69 * hardware limitation.
72 ccc_mask
|= CCC_GAC
| CCC_CSEL
;
74 /* Set operating mode */
75 ravb_modify(ndev
, CCC
, ccc_mask
, opmode
);
76 /* Check if the operating mode is changed to the requested one */
77 error
= ravb_wait(ndev
, CSR
, CSR_OPS
, csr_ops
);
79 netdev_err(ndev
, "failed to switch device to requested mode (%u)\n",
86 static void ravb_set_rate_gbeth(struct net_device
*ndev
)
88 struct ravb_private
*priv
= netdev_priv(ndev
);
90 switch (priv
->speed
) {
92 ravb_write(ndev
, GBETH_GECMR_SPEED_10
, GECMR
);
94 case 100: /* 100BASE */
95 ravb_write(ndev
, GBETH_GECMR_SPEED_100
, GECMR
);
97 case 1000: /* 1000BASE */
98 ravb_write(ndev
, GBETH_GECMR_SPEED_1000
, GECMR
);
103 static void ravb_set_rate_rcar(struct net_device
*ndev
)
105 struct ravb_private
*priv
= netdev_priv(ndev
);
107 switch (priv
->speed
) {
108 case 100: /* 100BASE */
109 ravb_write(ndev
, GECMR_SPEED_100
, GECMR
);
111 case 1000: /* 1000BASE */
112 ravb_write(ndev
, GECMR_SPEED_1000
, GECMR
);
117 /* Get MAC address from the MAC address registers
119 * Ethernet AVB device doesn't have ROM for MAC address.
120 * This function gets the MAC address that was used by a bootloader.
122 static void ravb_read_mac_address(struct device_node
*np
,
123 struct net_device
*ndev
)
127 ret
= of_get_ethdev_address(np
, ndev
);
129 u32 mahr
= ravb_read(ndev
, MAHR
);
130 u32 malr
= ravb_read(ndev
, MALR
);
133 addr
[0] = (mahr
>> 24) & 0xFF;
134 addr
[1] = (mahr
>> 16) & 0xFF;
135 addr
[2] = (mahr
>> 8) & 0xFF;
136 addr
[3] = (mahr
>> 0) & 0xFF;
137 addr
[4] = (malr
>> 8) & 0xFF;
138 addr
[5] = (malr
>> 0) & 0xFF;
139 eth_hw_addr_set(ndev
, addr
);
143 static void ravb_mdio_ctrl(struct mdiobb_ctrl
*ctrl
, u32 mask
, int set
)
145 struct ravb_private
*priv
= container_of(ctrl
, struct ravb_private
,
148 ravb_modify(priv
->ndev
, PIR
, mask
, set
? mask
: 0);
151 /* MDC pin control */
152 static void ravb_set_mdc(struct mdiobb_ctrl
*ctrl
, int level
)
154 ravb_mdio_ctrl(ctrl
, PIR_MDC
, level
);
157 /* Data I/O pin control */
158 static void ravb_set_mdio_dir(struct mdiobb_ctrl
*ctrl
, int output
)
160 ravb_mdio_ctrl(ctrl
, PIR_MMD
, output
);
164 static void ravb_set_mdio_data(struct mdiobb_ctrl
*ctrl
, int value
)
166 ravb_mdio_ctrl(ctrl
, PIR_MDO
, value
);
170 static int ravb_get_mdio_data(struct mdiobb_ctrl
*ctrl
)
172 struct ravb_private
*priv
= container_of(ctrl
, struct ravb_private
,
175 return (ravb_read(priv
->ndev
, PIR
) & PIR_MDI
) != 0;
178 /* MDIO bus control struct */
179 static const struct mdiobb_ops bb_ops
= {
180 .owner
= THIS_MODULE
,
181 .set_mdc
= ravb_set_mdc
,
182 .set_mdio_dir
= ravb_set_mdio_dir
,
183 .set_mdio_data
= ravb_set_mdio_data
,
184 .get_mdio_data
= ravb_get_mdio_data
,
187 static struct ravb_rx_desc
*
188 ravb_rx_get_desc(struct ravb_private
*priv
, unsigned int q
,
191 return priv
->rx_ring
[q
].raw
+ priv
->info
->rx_desc_size
* i
;
194 /* Free TX skb function for AVB-IP */
195 static int ravb_tx_free(struct net_device
*ndev
, int q
, bool free_txed_only
)
197 struct ravb_private
*priv
= netdev_priv(ndev
);
198 struct net_device_stats
*stats
= &priv
->stats
[q
];
199 unsigned int num_tx_desc
= priv
->num_tx_desc
;
200 struct ravb_tx_desc
*desc
;
205 for (; priv
->cur_tx
[q
] - priv
->dirty_tx
[q
] > 0; priv
->dirty_tx
[q
]++) {
208 entry
= priv
->dirty_tx
[q
] % (priv
->num_tx_ring
[q
] *
210 desc
= &priv
->tx_ring
[q
][entry
];
211 txed
= desc
->die_dt
== DT_FEMPTY
;
212 if (free_txed_only
&& !txed
)
214 /* Descriptor type must be checked before all other reads */
216 size
= le16_to_cpu(desc
->ds_tagl
) & TX_DS
;
217 /* Free the original skb. */
218 if (priv
->tx_skb
[q
][entry
/ num_tx_desc
]) {
219 dma_unmap_single(ndev
->dev
.parent
, le32_to_cpu(desc
->dptr
),
220 size
, DMA_TO_DEVICE
);
221 /* Last packet descriptor? */
222 if (entry
% num_tx_desc
== num_tx_desc
- 1) {
223 entry
/= num_tx_desc
;
224 dev_kfree_skb_any(priv
->tx_skb
[q
][entry
]);
225 priv
->tx_skb
[q
][entry
] = NULL
;
232 stats
->tx_bytes
+= size
;
233 desc
->die_dt
= DT_EEMPTY
;
238 static void ravb_rx_ring_free(struct net_device
*ndev
, int q
)
240 struct ravb_private
*priv
= netdev_priv(ndev
);
241 unsigned int ring_size
;
243 if (!priv
->rx_ring
[q
].raw
)
246 ring_size
= priv
->info
->rx_desc_size
* (priv
->num_rx_ring
[q
] + 1);
247 dma_free_coherent(ndev
->dev
.parent
, ring_size
, priv
->rx_ring
[q
].raw
,
248 priv
->rx_desc_dma
[q
]);
249 priv
->rx_ring
[q
].raw
= NULL
;
252 /* Free skb's and DMA buffers for Ethernet AVB */
253 static void ravb_ring_free(struct net_device
*ndev
, int q
)
255 struct ravb_private
*priv
= netdev_priv(ndev
);
256 unsigned int num_tx_desc
= priv
->num_tx_desc
;
257 unsigned int ring_size
;
260 ravb_rx_ring_free(ndev
, q
);
262 if (priv
->tx_ring
[q
]) {
263 ravb_tx_free(ndev
, q
, false);
265 ring_size
= sizeof(struct ravb_tx_desc
) *
266 (priv
->num_tx_ring
[q
] * num_tx_desc
+ 1);
267 dma_free_coherent(ndev
->dev
.parent
, ring_size
, priv
->tx_ring
[q
],
268 priv
->tx_desc_dma
[q
]);
269 priv
->tx_ring
[q
] = NULL
;
272 /* Free RX buffers */
273 for (i
= 0; i
< priv
->num_rx_ring
[q
]; i
++) {
274 if (priv
->rx_buffers
[q
][i
].page
)
275 page_pool_put_page(priv
->rx_pool
[q
],
276 priv
->rx_buffers
[q
][i
].page
,
279 kfree(priv
->rx_buffers
[q
]);
280 priv
->rx_buffers
[q
] = NULL
;
281 page_pool_destroy(priv
->rx_pool
[q
]);
283 /* Free aligned TX buffers */
284 kfree(priv
->tx_align
[q
]);
285 priv
->tx_align
[q
] = NULL
;
287 /* Free TX skb ringbuffer.
288 * SKBs are freed by ravb_tx_free() call above.
290 kfree(priv
->tx_skb
[q
]);
291 priv
->tx_skb
[q
] = NULL
;
295 ravb_alloc_rx_buffer(struct net_device
*ndev
, int q
, u32 entry
, gfp_t gfp_mask
,
296 struct ravb_rx_desc
*rx_desc
)
298 struct ravb_private
*priv
= netdev_priv(ndev
);
299 const struct ravb_hw_info
*info
= priv
->info
;
300 struct ravb_rx_buffer
*rx_buff
;
304 rx_buff
= &priv
->rx_buffers
[q
][entry
];
305 size
= info
->rx_buffer_size
;
306 rx_buff
->page
= page_pool_alloc(priv
->rx_pool
[q
], &rx_buff
->offset
,
308 if (unlikely(!rx_buff
->page
)) {
309 /* We just set the data size to 0 for a failed mapping which
310 * should prevent DMA from happening...
312 rx_desc
->ds_cc
= cpu_to_le16(0);
316 dma_addr
= page_pool_get_dma_addr(rx_buff
->page
) + rx_buff
->offset
;
317 dma_sync_single_for_device(ndev
->dev
.parent
, dma_addr
,
318 info
->rx_buffer_size
, DMA_FROM_DEVICE
);
319 rx_desc
->dptr
= cpu_to_le32(dma_addr
);
321 /* The end of the RX buffer is used to store skb shared data, so we need
322 * to ensure that the hardware leaves enough space for this.
324 rx_desc
->ds_cc
= cpu_to_le16(info
->rx_buffer_size
-
325 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)) -
326 ETH_FCS_LEN
+ sizeof(__sum16
));
331 ravb_rx_ring_refill(struct net_device
*ndev
, int q
, u32 count
, gfp_t gfp_mask
)
333 struct ravb_private
*priv
= netdev_priv(ndev
);
334 struct ravb_rx_desc
*rx_desc
;
337 for (i
= 0; i
< count
; i
++) {
338 entry
= (priv
->dirty_rx
[q
] + i
) % priv
->num_rx_ring
[q
];
339 rx_desc
= ravb_rx_get_desc(priv
, q
, entry
);
341 if (!priv
->rx_buffers
[q
][entry
].page
) {
342 if (unlikely(ravb_alloc_rx_buffer(ndev
, q
, entry
,
346 /* Descriptor type must be set after all the above writes */
348 rx_desc
->die_dt
= DT_FEMPTY
;
354 /* Format skb and descriptor buffer for Ethernet AVB */
355 static void ravb_ring_format(struct net_device
*ndev
, int q
)
357 struct ravb_private
*priv
= netdev_priv(ndev
);
358 unsigned int num_tx_desc
= priv
->num_tx_desc
;
359 struct ravb_rx_desc
*rx_desc
;
360 struct ravb_tx_desc
*tx_desc
;
361 struct ravb_desc
*desc
;
362 unsigned int tx_ring_size
= sizeof(*tx_desc
) * priv
->num_tx_ring
[q
] *
368 priv
->dirty_rx
[q
] = 0;
369 priv
->dirty_tx
[q
] = 0;
371 /* Regular RX descriptors have already been initialized by
372 * ravb_rx_ring_refill(), we just need to initialize the final link
375 rx_desc
= ravb_rx_get_desc(priv
, q
, priv
->num_rx_ring
[q
]);
376 rx_desc
->dptr
= cpu_to_le32((u32
)priv
->rx_desc_dma
[q
]);
377 rx_desc
->die_dt
= DT_LINKFIX
; /* type */
379 memset(priv
->tx_ring
[q
], 0, tx_ring_size
);
380 /* Build TX ring buffer */
381 for (i
= 0, tx_desc
= priv
->tx_ring
[q
]; i
< priv
->num_tx_ring
[q
];
383 tx_desc
->die_dt
= DT_EEMPTY
;
384 if (num_tx_desc
> 1) {
386 tx_desc
->die_dt
= DT_EEMPTY
;
389 tx_desc
->dptr
= cpu_to_le32((u32
)priv
->tx_desc_dma
[q
]);
390 tx_desc
->die_dt
= DT_LINKFIX
; /* type */
392 /* RX descriptor base address for best effort */
393 desc
= &priv
->desc_bat
[RX_QUEUE_OFFSET
+ q
];
394 desc
->die_dt
= DT_LINKFIX
; /* type */
395 desc
->dptr
= cpu_to_le32((u32
)priv
->rx_desc_dma
[q
]);
397 /* TX descriptor base address for best effort */
398 desc
= &priv
->desc_bat
[q
];
399 desc
->die_dt
= DT_LINKFIX
; /* type */
400 desc
->dptr
= cpu_to_le32((u32
)priv
->tx_desc_dma
[q
]);
403 static void *ravb_alloc_rx_desc(struct net_device
*ndev
, int q
)
405 struct ravb_private
*priv
= netdev_priv(ndev
);
406 unsigned int ring_size
;
408 ring_size
= priv
->info
->rx_desc_size
* (priv
->num_rx_ring
[q
] + 1);
410 priv
->rx_ring
[q
].raw
= dma_alloc_coherent(ndev
->dev
.parent
, ring_size
,
411 &priv
->rx_desc_dma
[q
],
414 return priv
->rx_ring
[q
].raw
;
417 /* Init skb and descriptor buffer for Ethernet AVB */
418 static int ravb_ring_init(struct net_device
*ndev
, int q
)
420 struct ravb_private
*priv
= netdev_priv(ndev
);
421 unsigned int num_tx_desc
= priv
->num_tx_desc
;
422 struct page_pool_params params
= {
424 .flags
= PP_FLAG_DMA_MAP
,
425 .pool_size
= priv
->num_rx_ring
[q
],
427 .dev
= ndev
->dev
.parent
,
428 .dma_dir
= DMA_FROM_DEVICE
,
430 unsigned int ring_size
;
433 /* Allocate RX page pool and buffers */
434 priv
->rx_pool
[q
] = page_pool_create(¶ms
);
435 if (IS_ERR(priv
->rx_pool
[q
]))
438 /* Allocate RX buffers */
439 priv
->rx_buffers
[q
] = kcalloc(priv
->num_rx_ring
[q
],
440 sizeof(*priv
->rx_buffers
[q
]), GFP_KERNEL
);
441 if (!priv
->rx_buffers
[q
])
444 /* Allocate TX skb rings */
445 priv
->tx_skb
[q
] = kcalloc(priv
->num_tx_ring
[q
],
446 sizeof(*priv
->tx_skb
[q
]), GFP_KERNEL
);
447 if (!priv
->tx_skb
[q
])
450 /* Allocate all RX descriptors. */
451 if (!ravb_alloc_rx_desc(ndev
, q
))
454 /* Populate RX ring buffer. */
455 priv
->dirty_rx
[q
] = 0;
456 ring_size
= priv
->info
->rx_desc_size
* priv
->num_rx_ring
[q
];
457 memset(priv
->rx_ring
[q
].raw
, 0, ring_size
);
458 num_filled
= ravb_rx_ring_refill(ndev
, q
, priv
->num_rx_ring
[q
],
460 if (num_filled
!= priv
->num_rx_ring
[q
])
463 if (num_tx_desc
> 1) {
464 /* Allocate rings for the aligned buffers */
465 priv
->tx_align
[q
] = kmalloc(DPTR_ALIGN
* priv
->num_tx_ring
[q
] +
466 DPTR_ALIGN
- 1, GFP_KERNEL
);
467 if (!priv
->tx_align
[q
])
471 /* Allocate all TX descriptors. */
472 ring_size
= sizeof(struct ravb_tx_desc
) *
473 (priv
->num_tx_ring
[q
] * num_tx_desc
+ 1);
474 priv
->tx_ring
[q
] = dma_alloc_coherent(ndev
->dev
.parent
, ring_size
,
475 &priv
->tx_desc_dma
[q
],
477 if (!priv
->tx_ring
[q
])
483 ravb_ring_free(ndev
, q
);
488 static void ravb_csum_init_gbeth(struct net_device
*ndev
)
490 bool tx_enable
= ndev
->features
& NETIF_F_HW_CSUM
;
491 bool rx_enable
= ndev
->features
& NETIF_F_RXCSUM
;
493 if (!(tx_enable
|| rx_enable
))
496 ravb_write(ndev
, 0, CSR0
);
497 if (ravb_wait(ndev
, CSR0
, CSR0_TPE
| CSR0_RPE
, 0)) {
498 netdev_err(ndev
, "Timeout enabling hardware checksum\n");
501 ndev
->features
&= ~NETIF_F_HW_CSUM
;
504 ndev
->features
&= ~NETIF_F_RXCSUM
;
507 ravb_write(ndev
, CSR1_CSUM_ENABLE
, CSR1
);
510 ravb_write(ndev
, CSR2_CSUM_ENABLE
, CSR2
);
514 ravb_write(ndev
, CSR0_TPE
| CSR0_RPE
, CSR0
);
517 static void ravb_emac_init_gbeth(struct net_device
*ndev
)
519 struct ravb_private
*priv
= netdev_priv(ndev
);
521 if (priv
->phy_interface
== PHY_INTERFACE_MODE_MII
) {
522 ravb_write(ndev
, (1000 << 16) | CXR35_SEL_XMII_MII
, CXR35
);
523 ravb_modify(ndev
, CXR31
, CXR31_SEL_LINK0
| CXR31_SEL_LINK1
, 0);
525 ravb_write(ndev
, (1000 << 16) | CXR35_SEL_XMII_RGMII
, CXR35
);
526 ravb_modify(ndev
, CXR31
, CXR31_SEL_LINK0
| CXR31_SEL_LINK1
,
530 /* Receive frame limit set register */
531 ravb_write(ndev
, priv
->info
->rx_max_frame_size
+ ETH_FCS_LEN
, RFLR
);
533 /* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
534 ravb_write(ndev
, ECMR_ZPF
| ((priv
->duplex
> 0) ? ECMR_DM
: 0) |
535 ECMR_TE
| ECMR_RE
| ECMR_RCPT
|
536 ECMR_TXF
| ECMR_RXF
, ECMR
);
538 ravb_set_rate_gbeth(ndev
);
540 /* Set MAC address */
542 (ndev
->dev_addr
[0] << 24) | (ndev
->dev_addr
[1] << 16) |
543 (ndev
->dev_addr
[2] << 8) | (ndev
->dev_addr
[3]), MAHR
);
544 ravb_write(ndev
, (ndev
->dev_addr
[4] << 8) | (ndev
->dev_addr
[5]), MALR
);
546 /* E-MAC status register clear */
547 ravb_write(ndev
, ECSR_ICD
| ECSR_LCHNG
| ECSR_PFRI
, ECSR
);
549 ravb_csum_init_gbeth(ndev
);
551 /* E-MAC interrupt enable register */
552 ravb_write(ndev
, ECSIPR_ICDIP
, ECSIPR
);
555 static void ravb_emac_init_rcar(struct net_device
*ndev
)
557 struct ravb_private
*priv
= netdev_priv(ndev
);
559 /* Set receive frame length
561 * The length set here describes the frame from the destination address
562 * up to and including the CRC data. However only the frame data,
563 * excluding the CRC, are transferred to memory. To allow for the
564 * largest frames add the CRC length to the maximum Rx descriptor size.
566 ravb_write(ndev
, priv
->info
->rx_max_frame_size
+ ETH_FCS_LEN
, RFLR
);
568 /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
569 ravb_write(ndev
, ECMR_ZPF
| ECMR_DM
|
570 (ndev
->features
& NETIF_F_RXCSUM
? ECMR_RCSC
: 0) |
571 ECMR_TE
| ECMR_RE
, ECMR
);
573 ravb_set_rate_rcar(ndev
);
575 /* Set MAC address */
577 (ndev
->dev_addr
[0] << 24) | (ndev
->dev_addr
[1] << 16) |
578 (ndev
->dev_addr
[2] << 8) | (ndev
->dev_addr
[3]), MAHR
);
580 (ndev
->dev_addr
[4] << 8) | (ndev
->dev_addr
[5]), MALR
);
582 /* E-MAC status register clear */
583 ravb_write(ndev
, ECSR_ICD
| ECSR_MPD
, ECSR
);
585 /* E-MAC interrupt enable register */
586 ravb_write(ndev
, ECSIPR_ICDIP
| ECSIPR_MPDIP
| ECSIPR_LCHNGIP
, ECSIPR
);
589 static void ravb_emac_init_rcar_gen4(struct net_device
*ndev
)
591 struct ravb_private
*priv
= netdev_priv(ndev
);
592 bool mii
= priv
->phy_interface
== PHY_INTERFACE_MODE_MII
;
594 ravb_modify(ndev
, APSR
, APSR_MIISELECT
, mii
? APSR_MIISELECT
: 0);
596 ravb_emac_init_rcar(ndev
);
599 /* E-MAC init function */
600 static void ravb_emac_init(struct net_device
*ndev
)
602 struct ravb_private
*priv
= netdev_priv(ndev
);
603 const struct ravb_hw_info
*info
= priv
->info
;
605 info
->emac_init(ndev
);
608 static int ravb_dmac_init_gbeth(struct net_device
*ndev
)
610 struct ravb_private
*priv
= netdev_priv(ndev
);
613 error
= ravb_ring_init(ndev
, RAVB_BE
);
617 /* Descriptor format */
618 ravb_ring_format(ndev
, RAVB_BE
);
621 ravb_write(ndev
, 0x60000000, RCR
);
623 /* Set Max Frame Length (RTC) */
624 ravb_write(ndev
, 0x7ffc0000 | priv
->info
->rx_max_frame_size
, RTC
);
627 ravb_write(ndev
, 0x00222200, TGC
);
629 ravb_write(ndev
, 0, TCCR
);
632 ravb_write(ndev
, RIC0_FRE0
, RIC0
);
633 /* Disable FIFO full warning */
634 ravb_write(ndev
, 0x0, RIC1
);
635 /* Receive FIFO full error, descriptor empty */
636 ravb_write(ndev
, RIC2_QFE0
| RIC2_RFFE
, RIC2
);
638 ravb_write(ndev
, TIC_FTE0
, TIC
);
643 static int ravb_dmac_init_rcar(struct net_device
*ndev
)
645 struct ravb_private
*priv
= netdev_priv(ndev
);
646 const struct ravb_hw_info
*info
= priv
->info
;
649 error
= ravb_ring_init(ndev
, RAVB_BE
);
652 error
= ravb_ring_init(ndev
, RAVB_NC
);
654 ravb_ring_free(ndev
, RAVB_BE
);
658 /* Descriptor format */
659 ravb_ring_format(ndev
, RAVB_BE
);
660 ravb_ring_format(ndev
, RAVB_NC
);
664 RCR_EFFS
| RCR_ENCF
| RCR_ETS0
| RCR_ESF
| 0x18000000, RCR
);
667 ravb_write(ndev
, TGC_TQP_AVBMODE1
| 0x00112200, TGC
);
669 /* Timestamp enable */
670 ravb_write(ndev
, TCCR_TFEN
, TCCR
);
672 /* Interrupt init: */
673 if (info
->multi_irqs
) {
675 ravb_write(ndev
, 0, DIL
);
676 /* Set queue specific interrupt */
677 ravb_write(ndev
, CIE_CRIE
| CIE_CTIE
| CIE_CL0M
, CIE
);
680 ravb_write(ndev
, RIC0_FRE0
| RIC0_FRE1
, RIC0
);
681 /* Disable FIFO full warning */
682 ravb_write(ndev
, 0, RIC1
);
683 /* Receive FIFO full error, descriptor empty */
684 ravb_write(ndev
, RIC2_QFE0
| RIC2_QFE1
| RIC2_RFFE
, RIC2
);
685 /* Frame transmitted, timestamp FIFO updated */
686 ravb_write(ndev
, TIC_FTE0
| TIC_FTE1
| TIC_TFUE
, TIC
);
691 /* Device init function for Ethernet AVB */
692 static int ravb_dmac_init(struct net_device
*ndev
)
694 struct ravb_private
*priv
= netdev_priv(ndev
);
695 const struct ravb_hw_info
*info
= priv
->info
;
698 /* Set CONFIG mode */
699 error
= ravb_set_opmode(ndev
, CCC_OPC_CONFIG
);
703 error
= info
->dmac_init(ndev
);
707 /* Setting the control will start the AVB-DMAC process. */
708 return ravb_set_opmode(ndev
, CCC_OPC_OPERATION
);
711 static void ravb_get_tx_tstamp(struct net_device
*ndev
)
713 struct ravb_private
*priv
= netdev_priv(ndev
);
714 struct ravb_tstamp_skb
*ts_skb
, *ts_skb2
;
715 struct skb_shared_hwtstamps shhwtstamps
;
717 struct timespec64 ts
;
722 count
= (ravb_read(ndev
, TSR
) & TSR_TFFL
) >> 8;
724 tfa2
= ravb_read(ndev
, TFA2
);
725 tfa_tag
= (tfa2
& TFA2_TST
) >> 16;
726 ts
.tv_nsec
= (u64
)ravb_read(ndev
, TFA0
);
727 ts
.tv_sec
= ((u64
)(tfa2
& TFA2_TSV
) << 32) |
728 ravb_read(ndev
, TFA1
);
729 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
730 shhwtstamps
.hwtstamp
= timespec64_to_ktime(ts
);
731 list_for_each_entry_safe(ts_skb
, ts_skb2
, &priv
->ts_skb_list
,
735 list_del(&ts_skb
->list
);
737 if (tag
== tfa_tag
) {
738 skb_tstamp_tx(skb
, &shhwtstamps
);
739 dev_consume_skb_any(skb
);
742 dev_kfree_skb_any(skb
);
745 ravb_modify(ndev
, TCCR
, TCCR_TFR
, TCCR_TFR
);
749 static void ravb_rx_csum_gbeth(struct sk_buff
*skb
)
751 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
755 /* The hardware checksum status is contained in 4 bytes appended to
758 * For ipv4, the first 2 bytes are the ip header checksum status. We can
759 * ignore this as it will always be re-checked in inet_gro_receive().
761 * The last 2 bytes are the protocol checksum status which will be zero
762 * if the checksum has been validated.
764 csum_len
= sizeof(*hw_csum
) * 2;
765 if (unlikely(skb
->len
< csum_len
))
768 if (skb_is_nonlinear(skb
)) {
769 skb_frag_t
*last_frag
= &shinfo
->frags
[shinfo
->nr_frags
- 1];
771 hw_csum
= (u16
*)(skb_frag_address(last_frag
) +
772 skb_frag_size(last_frag
));
773 skb_frag_size_sub(last_frag
, csum_len
);
775 hw_csum
= (u16
*)skb_tail_pointer(skb
);
776 skb_trim(skb
, skb
->len
- csum_len
);
779 if (!get_unaligned(--hw_csum
))
780 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
783 static void ravb_rx_csum(struct sk_buff
*skb
)
787 /* The hardware checksum is contained in sizeof(__sum16) (2) bytes
788 * appended to packet data
790 if (unlikely(skb
->len
< sizeof(__sum16
)))
792 hw_csum
= skb_tail_pointer(skb
) - sizeof(__sum16
);
793 skb
->csum
= csum_unfold((__force __sum16
)get_unaligned_le16(hw_csum
));
794 skb
->ip_summed
= CHECKSUM_COMPLETE
;
795 skb_trim(skb
, skb
->len
- sizeof(__sum16
));
798 /* Packet receive function for Gigabit Ethernet */
799 static int ravb_rx_gbeth(struct net_device
*ndev
, int budget
, int q
)
801 struct ravb_private
*priv
= netdev_priv(ndev
);
802 const struct ravb_hw_info
*info
= priv
->info
;
803 struct net_device_stats
*stats
;
804 struct ravb_rx_desc
*desc
;
814 limit
= priv
->dirty_rx
[q
] + priv
->num_rx_ring
[q
] - priv
->cur_rx
[q
];
815 stats
= &priv
->stats
[q
];
817 for (i
= 0; i
< limit
; i
++, priv
->cur_rx
[q
]++) {
818 entry
= priv
->cur_rx
[q
] % priv
->num_rx_ring
[q
];
819 desc
= &priv
->rx_ring
[q
].desc
[entry
];
820 if (rx_packets
== budget
|| desc
->die_dt
== DT_FEMPTY
)
823 /* Descriptor type must be checked before all other reads */
825 desc_status
= desc
->msc
;
826 desc_len
= le16_to_cpu(desc
->ds_cc
) & RX_DS
;
828 /* We use 0-byte descriptors to mark the DMA mapping errors */
832 if (desc_status
& MSC_MC
)
835 if (desc_status
& (MSC_CRC
| MSC_RFE
| MSC_RTSF
| MSC_RTLF
| MSC_CEEF
)) {
837 if (desc_status
& MSC_CRC
)
838 stats
->rx_crc_errors
++;
839 if (desc_status
& MSC_RFE
)
840 stats
->rx_frame_errors
++;
841 if (desc_status
& (MSC_RTLF
| MSC_RTSF
))
842 stats
->rx_length_errors
++;
843 if (desc_status
& MSC_CEEF
)
844 stats
->rx_missed_errors
++;
846 struct ravb_rx_buffer
*rx_buff
;
849 rx_buff
= &priv
->rx_buffers
[q
][entry
];
850 rx_addr
= page_address(rx_buff
->page
) + rx_buff
->offset
;
851 die_dt
= desc
->die_dt
& 0xF0;
852 dma_sync_single_for_cpu(ndev
->dev
.parent
,
853 le32_to_cpu(desc
->dptr
),
854 desc_len
, DMA_FROM_DEVICE
);
859 /* Start of packet: Set initial data length. */
860 skb
= napi_build_skb(rx_addr
,
861 info
->rx_buffer_size
);
862 if (unlikely(!skb
)) {
864 page_pool_put_page(priv
->rx_pool
[q
],
869 skb_mark_for_recycle(skb
);
870 skb_put(skb
, desc_len
);
872 /* Save this skb if the packet spans multiple
875 if (die_dt
== DT_FSTART
)
876 priv
->rx_1st_skb
= skb
;
881 /* Continuing a packet: Add this buffer as an RX
885 /* rx_1st_skb will be NULL if napi_build_skb()
886 * failed for the first descriptor of a
887 * multi-descriptor packet.
889 if (unlikely(!priv
->rx_1st_skb
)) {
891 page_pool_put_page(priv
->rx_pool
[q
],
895 /* We may find a DT_FSINGLE or DT_FSTART
896 * descriptor in the queue which we can
897 * process, so don't give up yet.
901 skb_add_rx_frag(priv
->rx_1st_skb
,
902 skb_shinfo(priv
->rx_1st_skb
)->nr_frags
,
903 rx_buff
->page
, rx_buff
->offset
,
904 desc_len
, info
->rx_buffer_size
);
906 /* Set skb to point at the whole packet so that
907 * we only need one code path for finishing a
910 skb
= priv
->rx_1st_skb
;
916 /* Finishing a packet: Determine protocol &
917 * checksum, hand off to NAPI and update our
920 skb
->protocol
= eth_type_trans(skb
, ndev
);
921 if (ndev
->features
& NETIF_F_RXCSUM
)
922 ravb_rx_csum_gbeth(skb
);
923 stats
->rx_bytes
+= skb
->len
;
924 napi_gro_receive(&priv
->napi
[q
], skb
);
927 /* Clear rx_1st_skb so that it will only be
928 * non-NULL when valid.
930 priv
->rx_1st_skb
= NULL
;
933 /* Mark this RX buffer as consumed. */
934 rx_buff
->page
= NULL
;
939 /* Refill the RX ring buffers. */
940 priv
->dirty_rx
[q
] += ravb_rx_ring_refill(ndev
, q
,
941 priv
->cur_rx
[q
] - priv
->dirty_rx
[q
],
944 stats
->rx_packets
+= rx_packets
;
948 /* Packet receive function for Ethernet AVB */
949 static int ravb_rx_rcar(struct net_device
*ndev
, int budget
, int q
)
951 struct ravb_private
*priv
= netdev_priv(ndev
);
952 const struct ravb_hw_info
*info
= priv
->info
;
953 struct net_device_stats
*stats
= &priv
->stats
[q
];
954 struct ravb_ex_rx_desc
*desc
;
955 unsigned int limit
, i
;
957 struct timespec64 ts
;
963 limit
= priv
->dirty_rx
[q
] + priv
->num_rx_ring
[q
] - priv
->cur_rx
[q
];
964 for (i
= 0; i
< limit
; i
++, priv
->cur_rx
[q
]++) {
965 entry
= priv
->cur_rx
[q
] % priv
->num_rx_ring
[q
];
966 desc
= &priv
->rx_ring
[q
].ex_desc
[entry
];
967 if (rx_packets
== budget
|| desc
->die_dt
== DT_FEMPTY
)
970 /* Descriptor type must be checked before all other reads */
972 desc_status
= desc
->msc
;
973 pkt_len
= le16_to_cpu(desc
->ds_cc
) & RX_DS
;
975 /* We use 0-byte descriptors to mark the DMA mapping errors */
979 if (desc_status
& MSC_MC
)
982 if (desc_status
& (MSC_CRC
| MSC_RFE
| MSC_RTSF
| MSC_RTLF
|
985 if (desc_status
& MSC_CRC
)
986 stats
->rx_crc_errors
++;
987 if (desc_status
& MSC_RFE
)
988 stats
->rx_frame_errors
++;
989 if (desc_status
& (MSC_RTLF
| MSC_RTSF
))
990 stats
->rx_length_errors
++;
991 if (desc_status
& MSC_CEEF
)
992 stats
->rx_missed_errors
++;
994 u32 get_ts
= priv
->tstamp_rx_ctrl
& RAVB_RXTSTAMP_TYPE
;
995 struct ravb_rx_buffer
*rx_buff
;
998 rx_buff
= &priv
->rx_buffers
[q
][entry
];
999 rx_addr
= page_address(rx_buff
->page
) + rx_buff
->offset
;
1000 dma_sync_single_for_cpu(ndev
->dev
.parent
,
1001 le32_to_cpu(desc
->dptr
),
1002 pkt_len
, DMA_FROM_DEVICE
);
1004 skb
= napi_build_skb(rx_addr
, info
->rx_buffer_size
);
1005 if (unlikely(!skb
)) {
1007 page_pool_put_page(priv
->rx_pool
[q
],
1008 rx_buff
->page
, 0, true);
1011 skb_mark_for_recycle(skb
);
1012 get_ts
&= (q
== RAVB_NC
) ?
1013 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT
:
1014 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT
;
1016 struct skb_shared_hwtstamps
*shhwtstamps
;
1018 shhwtstamps
= skb_hwtstamps(skb
);
1019 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
1020 ts
.tv_sec
= ((u64
) le16_to_cpu(desc
->ts_sh
) <<
1021 32) | le32_to_cpu(desc
->ts_sl
);
1022 ts
.tv_nsec
= le32_to_cpu(desc
->ts_n
);
1023 shhwtstamps
->hwtstamp
= timespec64_to_ktime(ts
);
1026 skb_put(skb
, pkt_len
);
1027 skb
->protocol
= eth_type_trans(skb
, ndev
);
1028 if (ndev
->features
& NETIF_F_RXCSUM
)
1030 napi_gro_receive(&priv
->napi
[q
], skb
);
1032 stats
->rx_bytes
+= pkt_len
;
1034 /* Mark this RX buffer as consumed. */
1035 rx_buff
->page
= NULL
;
1039 /* Refill the RX ring buffers. */
1040 priv
->dirty_rx
[q
] += ravb_rx_ring_refill(ndev
, q
,
1041 priv
->cur_rx
[q
] - priv
->dirty_rx
[q
],
1044 stats
->rx_packets
+= rx_packets
;
1048 /* Packet receive function for Ethernet AVB */
1049 static int ravb_rx(struct net_device
*ndev
, int budget
, int q
)
1051 struct ravb_private
*priv
= netdev_priv(ndev
);
1052 const struct ravb_hw_info
*info
= priv
->info
;
1054 return info
->receive(ndev
, budget
, q
);
1057 static void ravb_rcv_snd_disable(struct net_device
*ndev
)
1059 /* Disable TX and RX */
1060 ravb_modify(ndev
, ECMR
, ECMR_RE
| ECMR_TE
, 0);
1063 static void ravb_rcv_snd_enable(struct net_device
*ndev
)
1065 /* Enable TX and RX */
1066 ravb_modify(ndev
, ECMR
, ECMR_RE
| ECMR_TE
, ECMR_RE
| ECMR_TE
);
1069 /* function for waiting dma process finished */
1070 static int ravb_stop_dma(struct net_device
*ndev
)
1072 struct ravb_private
*priv
= netdev_priv(ndev
);
1073 const struct ravb_hw_info
*info
= priv
->info
;
1076 /* Wait for stopping the hardware TX process */
1077 error
= ravb_wait(ndev
, TCCR
, info
->tccr_mask
, 0);
1082 error
= ravb_wait(ndev
, CSR
, CSR_TPO0
| CSR_TPO1
| CSR_TPO2
| CSR_TPO3
,
1087 /* Stop the E-MAC's RX/TX processes. */
1088 ravb_rcv_snd_disable(ndev
);
1090 /* Wait for stopping the RX DMA process */
1091 error
= ravb_wait(ndev
, CSR
, CSR_RPO
, 0);
1095 /* Stop AVB-DMAC process */
1096 return ravb_set_opmode(ndev
, CCC_OPC_CONFIG
);
1099 /* E-MAC interrupt handler */
1100 static void ravb_emac_interrupt_unlocked(struct net_device
*ndev
)
1102 struct ravb_private
*priv
= netdev_priv(ndev
);
1105 ecsr
= ravb_read(ndev
, ECSR
);
1106 ravb_write(ndev
, ecsr
, ECSR
); /* clear interrupt */
1108 if (ecsr
& ECSR_MPD
)
1109 pm_wakeup_event(&priv
->pdev
->dev
, 0);
1110 if (ecsr
& ECSR_ICD
)
1111 ndev
->stats
.tx_carrier_errors
++;
1112 if (ecsr
& ECSR_LCHNG
) {
1114 if (priv
->no_avb_link
)
1116 psr
= ravb_read(ndev
, PSR
);
1117 if (priv
->avb_link_active_low
)
1119 if (!(psr
& PSR_LMON
)) {
1120 /* DIsable RX and TX */
1121 ravb_rcv_snd_disable(ndev
);
1123 /* Enable RX and TX */
1124 ravb_rcv_snd_enable(ndev
);
1129 static irqreturn_t
ravb_emac_interrupt(int irq
, void *dev_id
)
1131 struct net_device
*ndev
= dev_id
;
1132 struct ravb_private
*priv
= netdev_priv(ndev
);
1133 struct device
*dev
= &priv
->pdev
->dev
;
1134 irqreturn_t result
= IRQ_HANDLED
;
1136 pm_runtime_get_noresume(dev
);
1138 if (unlikely(!pm_runtime_active(dev
))) {
1143 spin_lock(&priv
->lock
);
1144 ravb_emac_interrupt_unlocked(ndev
);
1145 spin_unlock(&priv
->lock
);
1148 pm_runtime_put_noidle(dev
);
1152 /* Error interrupt handler */
1153 static void ravb_error_interrupt(struct net_device
*ndev
)
1155 struct ravb_private
*priv
= netdev_priv(ndev
);
1158 eis
= ravb_read(ndev
, EIS
);
1159 ravb_write(ndev
, ~(EIS_QFS
| EIS_RESERVED
), EIS
);
1160 if (eis
& EIS_QFS
) {
1161 ris2
= ravb_read(ndev
, RIS2
);
1162 ravb_write(ndev
, ~(RIS2_QFF0
| RIS2_QFF1
| RIS2_RFFF
| RIS2_RESERVED
),
1165 /* Receive Descriptor Empty int */
1166 if (ris2
& RIS2_QFF0
)
1167 priv
->stats
[RAVB_BE
].rx_over_errors
++;
1169 /* Receive Descriptor Empty int */
1170 if (ris2
& RIS2_QFF1
)
1171 priv
->stats
[RAVB_NC
].rx_over_errors
++;
1173 /* Receive FIFO Overflow int */
1174 if (ris2
& RIS2_RFFF
)
1175 priv
->rx_fifo_errors
++;
1179 static bool ravb_queue_interrupt(struct net_device
*ndev
, int q
)
1181 struct ravb_private
*priv
= netdev_priv(ndev
);
1182 const struct ravb_hw_info
*info
= priv
->info
;
1183 u32 ris0
= ravb_read(ndev
, RIS0
);
1184 u32 ric0
= ravb_read(ndev
, RIC0
);
1185 u32 tis
= ravb_read(ndev
, TIS
);
1186 u32 tic
= ravb_read(ndev
, TIC
);
1188 if (((ris0
& ric0
) & BIT(q
)) || ((tis
& tic
) & BIT(q
))) {
1189 if (napi_schedule_prep(&priv
->napi
[q
])) {
1190 /* Mask RX and TX interrupts */
1191 if (!info
->irq_en_dis
) {
1192 ravb_write(ndev
, ric0
& ~BIT(q
), RIC0
);
1193 ravb_write(ndev
, tic
& ~BIT(q
), TIC
);
1195 ravb_write(ndev
, BIT(q
), RID0
);
1196 ravb_write(ndev
, BIT(q
), TID
);
1198 __napi_schedule(&priv
->napi
[q
]);
1201 "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
1204 " tx status 0x%08x, tx mask 0x%08x.\n",
1212 static bool ravb_timestamp_interrupt(struct net_device
*ndev
)
1214 u32 tis
= ravb_read(ndev
, TIS
);
1216 if (tis
& TIS_TFUF
) {
1217 ravb_write(ndev
, ~(TIS_TFUF
| TIS_RESERVED
), TIS
);
1218 ravb_get_tx_tstamp(ndev
);
1224 static irqreturn_t
ravb_interrupt(int irq
, void *dev_id
)
1226 struct net_device
*ndev
= dev_id
;
1227 struct ravb_private
*priv
= netdev_priv(ndev
);
1228 const struct ravb_hw_info
*info
= priv
->info
;
1229 struct device
*dev
= &priv
->pdev
->dev
;
1230 irqreturn_t result
= IRQ_NONE
;
1233 pm_runtime_get_noresume(dev
);
1235 if (unlikely(!pm_runtime_active(dev
)))
1238 spin_lock(&priv
->lock
);
1239 /* Get interrupt status */
1240 iss
= ravb_read(ndev
, ISS
);
1242 /* Received and transmitted interrupts */
1243 if (iss
& (ISS_FRS
| ISS_FTS
| ISS_TFUS
)) {
1246 /* Timestamp updated */
1247 if (ravb_timestamp_interrupt(ndev
))
1248 result
= IRQ_HANDLED
;
1250 /* Network control and best effort queue RX/TX */
1251 if (info
->nc_queues
) {
1252 for (q
= RAVB_NC
; q
>= RAVB_BE
; q
--) {
1253 if (ravb_queue_interrupt(ndev
, q
))
1254 result
= IRQ_HANDLED
;
1257 if (ravb_queue_interrupt(ndev
, RAVB_BE
))
1258 result
= IRQ_HANDLED
;
1262 /* E-MAC status summary */
1264 ravb_emac_interrupt_unlocked(ndev
);
1265 result
= IRQ_HANDLED
;
1268 /* Error status summary */
1270 ravb_error_interrupt(ndev
);
1271 result
= IRQ_HANDLED
;
1274 /* gPTP interrupt status summary */
1275 if (iss
& ISS_CGIS
) {
1276 ravb_ptp_interrupt(ndev
);
1277 result
= IRQ_HANDLED
;
1280 spin_unlock(&priv
->lock
);
1283 pm_runtime_put_noidle(dev
);
1287 /* Timestamp/Error/gPTP interrupt handler */
1288 static irqreturn_t
ravb_multi_interrupt(int irq
, void *dev_id
)
1290 struct net_device
*ndev
= dev_id
;
1291 struct ravb_private
*priv
= netdev_priv(ndev
);
1292 struct device
*dev
= &priv
->pdev
->dev
;
1293 irqreturn_t result
= IRQ_NONE
;
1296 pm_runtime_get_noresume(dev
);
1298 if (unlikely(!pm_runtime_active(dev
)))
1301 spin_lock(&priv
->lock
);
1302 /* Get interrupt status */
1303 iss
= ravb_read(ndev
, ISS
);
1305 /* Timestamp updated */
1306 if ((iss
& ISS_TFUS
) && ravb_timestamp_interrupt(ndev
))
1307 result
= IRQ_HANDLED
;
1309 /* Error status summary */
1311 ravb_error_interrupt(ndev
);
1312 result
= IRQ_HANDLED
;
1315 /* gPTP interrupt status summary */
1316 if (iss
& ISS_CGIS
) {
1317 ravb_ptp_interrupt(ndev
);
1318 result
= IRQ_HANDLED
;
1321 spin_unlock(&priv
->lock
);
1324 pm_runtime_put_noidle(dev
);
1328 static irqreturn_t
ravb_dma_interrupt(int irq
, void *dev_id
, int q
)
1330 struct net_device
*ndev
= dev_id
;
1331 struct ravb_private
*priv
= netdev_priv(ndev
);
1332 struct device
*dev
= &priv
->pdev
->dev
;
1333 irqreturn_t result
= IRQ_NONE
;
1335 pm_runtime_get_noresume(dev
);
1337 if (unlikely(!pm_runtime_active(dev
)))
1340 spin_lock(&priv
->lock
);
1342 /* Network control/Best effort queue RX/TX */
1343 if (ravb_queue_interrupt(ndev
, q
))
1344 result
= IRQ_HANDLED
;
1346 spin_unlock(&priv
->lock
);
1349 pm_runtime_put_noidle(dev
);
1353 static irqreturn_t
ravb_be_interrupt(int irq
, void *dev_id
)
1355 return ravb_dma_interrupt(irq
, dev_id
, RAVB_BE
);
1358 static irqreturn_t
ravb_nc_interrupt(int irq
, void *dev_id
)
1360 return ravb_dma_interrupt(irq
, dev_id
, RAVB_NC
);
1363 static int ravb_poll(struct napi_struct
*napi
, int budget
)
1365 struct net_device
*ndev
= napi
->dev
;
1366 struct ravb_private
*priv
= netdev_priv(ndev
);
1367 const struct ravb_hw_info
*info
= priv
->info
;
1368 unsigned long flags
;
1369 int q
= napi
- priv
->napi
;
1373 /* Processing RX Descriptor Ring */
1374 /* Clear RX interrupt */
1375 ravb_write(ndev
, ~(mask
| RIS0_RESERVED
), RIS0
);
1376 work_done
= ravb_rx(ndev
, budget
, q
);
1378 /* Processing TX Descriptor Ring */
1379 spin_lock_irqsave(&priv
->lock
, flags
);
1380 /* Clear TX interrupt */
1381 ravb_write(ndev
, ~(mask
| TIS_RESERVED
), TIS
);
1382 ravb_tx_free(ndev
, q
, true);
1383 netif_wake_subqueue(ndev
, q
);
1384 spin_unlock_irqrestore(&priv
->lock
, flags
);
1386 /* Receive error message handling */
1387 priv
->rx_over_errors
= priv
->stats
[RAVB_BE
].rx_over_errors
;
1388 if (info
->nc_queues
)
1389 priv
->rx_over_errors
+= priv
->stats
[RAVB_NC
].rx_over_errors
;
1390 if (priv
->rx_over_errors
!= ndev
->stats
.rx_over_errors
)
1391 ndev
->stats
.rx_over_errors
= priv
->rx_over_errors
;
1392 if (priv
->rx_fifo_errors
!= ndev
->stats
.rx_fifo_errors
)
1393 ndev
->stats
.rx_fifo_errors
= priv
->rx_fifo_errors
;
1395 if (work_done
< budget
&& napi_complete_done(napi
, work_done
)) {
1396 /* Re-enable RX/TX interrupts */
1397 spin_lock_irqsave(&priv
->lock
, flags
);
1398 if (!info
->irq_en_dis
) {
1399 ravb_modify(ndev
, RIC0
, mask
, mask
);
1400 ravb_modify(ndev
, TIC
, mask
, mask
);
1402 ravb_write(ndev
, mask
, RIE0
);
1403 ravb_write(ndev
, mask
, TIE
);
1405 spin_unlock_irqrestore(&priv
->lock
, flags
);
1411 static void ravb_set_duplex_gbeth(struct net_device
*ndev
)
1413 struct ravb_private
*priv
= netdev_priv(ndev
);
1415 ravb_modify(ndev
, ECMR
, ECMR_DM
, priv
->duplex
> 0 ? ECMR_DM
: 0);
1418 /* PHY state control function */
1419 static void ravb_adjust_link(struct net_device
*ndev
)
1421 struct ravb_private
*priv
= netdev_priv(ndev
);
1422 const struct ravb_hw_info
*info
= priv
->info
;
1423 struct phy_device
*phydev
= ndev
->phydev
;
1424 bool new_state
= false;
1425 unsigned long flags
;
1427 spin_lock_irqsave(&priv
->lock
, flags
);
1429 /* Disable TX and RX right over here, if E-MAC change is ignored */
1430 if (priv
->no_avb_link
)
1431 ravb_rcv_snd_disable(ndev
);
1434 if (info
->half_duplex
&& phydev
->duplex
!= priv
->duplex
) {
1436 priv
->duplex
= phydev
->duplex
;
1437 ravb_set_duplex_gbeth(ndev
);
1440 if (phydev
->speed
!= priv
->speed
) {
1442 priv
->speed
= phydev
->speed
;
1443 info
->set_rate(ndev
);
1446 ravb_modify(ndev
, ECMR
, ECMR_TXF
, 0);
1448 priv
->link
= phydev
->link
;
1450 } else if (priv
->link
) {
1454 if (info
->half_duplex
)
1458 /* Enable TX and RX right over here, if E-MAC change is ignored */
1459 if (priv
->no_avb_link
&& phydev
->link
)
1460 ravb_rcv_snd_enable(ndev
);
1462 spin_unlock_irqrestore(&priv
->lock
, flags
);
1464 if (new_state
&& netif_msg_link(priv
))
1465 phy_print_status(phydev
);
1468 /* PHY init function */
1469 static int ravb_phy_init(struct net_device
*ndev
)
1471 struct device_node
*np
= ndev
->dev
.parent
->of_node
;
1472 struct ravb_private
*priv
= netdev_priv(ndev
);
1473 const struct ravb_hw_info
*info
= priv
->info
;
1474 struct phy_device
*phydev
;
1475 struct device_node
*pn
;
1476 phy_interface_t iface
;
1483 /* Try connecting to PHY */
1484 pn
= of_parse_phandle(np
, "phy-handle", 0);
1486 /* In the case of a fixed PHY, the DT node associated
1487 * to the PHY is the Ethernet MAC DT node.
1489 if (of_phy_is_fixed_link(np
)) {
1490 err
= of_phy_register_fixed_link(np
);
1494 pn
= of_node_get(np
);
1497 iface
= priv
->rgmii_override
? PHY_INTERFACE_MODE_RGMII
1498 : priv
->phy_interface
;
1499 phydev
= of_phy_connect(ndev
, pn
, ravb_adjust_link
, 0, iface
);
1502 netdev_err(ndev
, "failed to connect PHY\n");
1504 goto err_deregister_fixed_link
;
1507 if (!info
->half_duplex
) {
1508 /* 10BASE, Pause and Asym Pause is not supported */
1509 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_10baseT_Half_BIT
);
1510 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_10baseT_Full_BIT
);
1511 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_Pause_BIT
);
1512 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_Asym_Pause_BIT
);
1514 /* Half Duplex is not supported */
1515 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_1000baseT_Half_BIT
);
1516 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_100baseT_Half_BIT
);
1519 phy_attached_info(phydev
);
1523 err_deregister_fixed_link
:
1524 if (of_phy_is_fixed_link(np
))
1525 of_phy_deregister_fixed_link(np
);
1530 /* PHY control start function */
1531 static int ravb_phy_start(struct net_device
*ndev
)
1535 error
= ravb_phy_init(ndev
);
1539 phy_start(ndev
->phydev
);
1544 static u32
ravb_get_msglevel(struct net_device
*ndev
)
1546 struct ravb_private
*priv
= netdev_priv(ndev
);
1548 return priv
->msg_enable
;
1551 static void ravb_set_msglevel(struct net_device
*ndev
, u32 value
)
1553 struct ravb_private
*priv
= netdev_priv(ndev
);
1555 priv
->msg_enable
= value
;
1558 static const char ravb_gstrings_stats_gbeth
[][ETH_GSTRING_LEN
] = {
1559 "rx_queue_0_current",
1560 "tx_queue_0_current",
1563 "rx_queue_0_packets",
1564 "tx_queue_0_packets",
1567 "rx_queue_0_mcast_packets",
1568 "rx_queue_0_errors",
1569 "rx_queue_0_crc_errors",
1570 "rx_queue_0_frame_errors",
1571 "rx_queue_0_length_errors",
1572 "rx_queue_0_csum_offload_errors",
1573 "rx_queue_0_over_errors",
1576 static const char ravb_gstrings_stats
[][ETH_GSTRING_LEN
] = {
1577 "rx_queue_0_current",
1578 "tx_queue_0_current",
1581 "rx_queue_0_packets",
1582 "tx_queue_0_packets",
1585 "rx_queue_0_mcast_packets",
1586 "rx_queue_0_errors",
1587 "rx_queue_0_crc_errors",
1588 "rx_queue_0_frame_errors",
1589 "rx_queue_0_length_errors",
1590 "rx_queue_0_missed_errors",
1591 "rx_queue_0_over_errors",
1593 "rx_queue_1_current",
1594 "tx_queue_1_current",
1597 "rx_queue_1_packets",
1598 "tx_queue_1_packets",
1601 "rx_queue_1_mcast_packets",
1602 "rx_queue_1_errors",
1603 "rx_queue_1_crc_errors",
1604 "rx_queue_1_frame_errors",
1605 "rx_queue_1_length_errors",
1606 "rx_queue_1_missed_errors",
1607 "rx_queue_1_over_errors",
1610 static int ravb_get_sset_count(struct net_device
*netdev
, int sset
)
1612 struct ravb_private
*priv
= netdev_priv(netdev
);
1613 const struct ravb_hw_info
*info
= priv
->info
;
1617 return info
->stats_len
;
1623 static void ravb_get_ethtool_stats(struct net_device
*ndev
,
1624 struct ethtool_stats
*estats
, u64
*data
)
1626 struct ravb_private
*priv
= netdev_priv(ndev
);
1627 const struct ravb_hw_info
*info
= priv
->info
;
1632 num_rx_q
= info
->nc_queues
? NUM_RX_QUEUE
: 1;
1633 /* Device-specific stats */
1634 for (q
= RAVB_BE
; q
< num_rx_q
; q
++) {
1635 struct net_device_stats
*stats
= &priv
->stats
[q
];
1637 data
[i
++] = priv
->cur_rx
[q
];
1638 data
[i
++] = priv
->cur_tx
[q
];
1639 data
[i
++] = priv
->dirty_rx
[q
];
1640 data
[i
++] = priv
->dirty_tx
[q
];
1641 data
[i
++] = stats
->rx_packets
;
1642 data
[i
++] = stats
->tx_packets
;
1643 data
[i
++] = stats
->rx_bytes
;
1644 data
[i
++] = stats
->tx_bytes
;
1645 data
[i
++] = stats
->multicast
;
1646 data
[i
++] = stats
->rx_errors
;
1647 data
[i
++] = stats
->rx_crc_errors
;
1648 data
[i
++] = stats
->rx_frame_errors
;
1649 data
[i
++] = stats
->rx_length_errors
;
1650 data
[i
++] = stats
->rx_missed_errors
;
1651 data
[i
++] = stats
->rx_over_errors
;
1655 static void ravb_get_strings(struct net_device
*ndev
, u32 stringset
, u8
*data
)
1657 struct ravb_private
*priv
= netdev_priv(ndev
);
1658 const struct ravb_hw_info
*info
= priv
->info
;
1660 switch (stringset
) {
1662 memcpy(data
, info
->gstrings_stats
, info
->gstrings_size
);
1667 static void ravb_get_ringparam(struct net_device
*ndev
,
1668 struct ethtool_ringparam
*ring
,
1669 struct kernel_ethtool_ringparam
*kernel_ring
,
1670 struct netlink_ext_ack
*extack
)
1672 struct ravb_private
*priv
= netdev_priv(ndev
);
1674 ring
->rx_max_pending
= BE_RX_RING_MAX
;
1675 ring
->tx_max_pending
= BE_TX_RING_MAX
;
1676 ring
->rx_pending
= priv
->num_rx_ring
[RAVB_BE
];
1677 ring
->tx_pending
= priv
->num_tx_ring
[RAVB_BE
];
1680 static int ravb_set_ringparam(struct net_device
*ndev
,
1681 struct ethtool_ringparam
*ring
,
1682 struct kernel_ethtool_ringparam
*kernel_ring
,
1683 struct netlink_ext_ack
*extack
)
1685 struct ravb_private
*priv
= netdev_priv(ndev
);
1686 const struct ravb_hw_info
*info
= priv
->info
;
1689 if (ring
->tx_pending
> BE_TX_RING_MAX
||
1690 ring
->rx_pending
> BE_RX_RING_MAX
||
1691 ring
->tx_pending
< BE_TX_RING_MIN
||
1692 ring
->rx_pending
< BE_RX_RING_MIN
)
1694 if (ring
->rx_mini_pending
|| ring
->rx_jumbo_pending
)
1697 if (netif_running(ndev
)) {
1698 netif_device_detach(ndev
);
1699 /* Stop PTP Clock driver */
1701 ravb_ptp_stop(ndev
);
1702 /* Wait for DMA stopping */
1703 error
= ravb_stop_dma(ndev
);
1706 "cannot set ringparam! Any AVB processes are still running?\n");
1709 synchronize_irq(ndev
->irq
);
1711 /* Free all the skb's in the RX queue and the DMA buffers. */
1712 ravb_ring_free(ndev
, RAVB_BE
);
1713 if (info
->nc_queues
)
1714 ravb_ring_free(ndev
, RAVB_NC
);
1717 /* Set new parameters */
1718 priv
->num_rx_ring
[RAVB_BE
] = ring
->rx_pending
;
1719 priv
->num_tx_ring
[RAVB_BE
] = ring
->tx_pending
;
1721 if (netif_running(ndev
)) {
1722 error
= ravb_dmac_init(ndev
);
1725 "%s: ravb_dmac_init() failed, error %d\n",
1730 ravb_emac_init(ndev
);
1732 /* Initialise PTP Clock driver */
1734 ravb_ptp_init(ndev
, priv
->pdev
);
1736 netif_device_attach(ndev
);
1742 static int ravb_get_ts_info(struct net_device
*ndev
,
1743 struct kernel_ethtool_ts_info
*info
)
1745 struct ravb_private
*priv
= netdev_priv(ndev
);
1746 const struct ravb_hw_info
*hw_info
= priv
->info
;
1748 if (hw_info
->gptp
|| hw_info
->ccc_gac
) {
1749 info
->so_timestamping
=
1750 SOF_TIMESTAMPING_TX_SOFTWARE
|
1751 SOF_TIMESTAMPING_TX_HARDWARE
|
1752 SOF_TIMESTAMPING_RX_HARDWARE
|
1753 SOF_TIMESTAMPING_RAW_HARDWARE
;
1754 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
1756 (1 << HWTSTAMP_FILTER_NONE
) |
1757 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
1758 (1 << HWTSTAMP_FILTER_ALL
);
1759 info
->phc_index
= ptp_clock_index(priv
->ptp
.clock
);
1765 static void ravb_get_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
1767 struct ravb_private
*priv
= netdev_priv(ndev
);
1769 wol
->supported
= WAKE_MAGIC
;
1770 wol
->wolopts
= priv
->wol_enabled
? WAKE_MAGIC
: 0;
1773 static int ravb_set_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
1775 struct ravb_private
*priv
= netdev_priv(ndev
);
1776 const struct ravb_hw_info
*info
= priv
->info
;
1778 if (!info
->magic_pkt
|| (wol
->wolopts
& ~WAKE_MAGIC
))
1781 priv
->wol_enabled
= !!(wol
->wolopts
& WAKE_MAGIC
);
1783 device_set_wakeup_enable(&priv
->pdev
->dev
, priv
->wol_enabled
);
1788 static const struct ethtool_ops ravb_ethtool_ops
= {
1789 .nway_reset
= phy_ethtool_nway_reset
,
1790 .get_msglevel
= ravb_get_msglevel
,
1791 .set_msglevel
= ravb_set_msglevel
,
1792 .get_link
= ethtool_op_get_link
,
1793 .get_strings
= ravb_get_strings
,
1794 .get_ethtool_stats
= ravb_get_ethtool_stats
,
1795 .get_sset_count
= ravb_get_sset_count
,
1796 .get_ringparam
= ravb_get_ringparam
,
1797 .set_ringparam
= ravb_set_ringparam
,
1798 .get_ts_info
= ravb_get_ts_info
,
1799 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
1800 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
1801 .get_wol
= ravb_get_wol
,
1802 .set_wol
= ravb_set_wol
,
1805 static int ravb_set_config_mode(struct net_device
*ndev
)
1807 struct ravb_private
*priv
= netdev_priv(ndev
);
1808 const struct ravb_hw_info
*info
= priv
->info
;
1812 error
= ravb_set_opmode(ndev
, CCC_OPC_CONFIG
);
1815 /* Set CSEL value */
1816 ravb_modify(ndev
, CCC
, CCC_CSEL
, CCC_CSEL_HPB
);
1817 } else if (info
->ccc_gac
) {
1818 error
= ravb_set_opmode(ndev
, CCC_OPC_CONFIG
| CCC_GAC
| CCC_CSEL_HPB
);
1820 error
= ravb_set_opmode(ndev
, CCC_OPC_CONFIG
);
1826 static void ravb_set_gti(struct net_device
*ndev
)
1828 struct ravb_private
*priv
= netdev_priv(ndev
);
1829 const struct ravb_hw_info
*info
= priv
->info
;
1831 if (!(info
->gptp
|| info
->ccc_gac
))
1834 ravb_write(ndev
, priv
->gti_tiv
, GTI
);
1836 /* Request GTI loading */
1837 ravb_modify(ndev
, GCCR
, GCCR_LTI
, GCCR_LTI
);
1840 static int ravb_compute_gti(struct net_device
*ndev
)
1842 struct ravb_private
*priv
= netdev_priv(ndev
);
1843 const struct ravb_hw_info
*info
= priv
->info
;
1844 struct device
*dev
= ndev
->dev
.parent
;
1848 if (!(info
->gptp
|| info
->ccc_gac
))
1851 if (info
->gptp_ref_clk
)
1852 rate
= clk_get_rate(priv
->gptp_clk
);
1854 rate
= clk_get_rate(priv
->clk
);
1858 inc
= div64_ul(1000000000ULL << 20, rate
);
1860 if (inc
< GTI_TIV_MIN
|| inc
> GTI_TIV_MAX
) {
1861 dev_err(dev
, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
1862 inc
, GTI_TIV_MIN
, GTI_TIV_MAX
);
1865 priv
->gti_tiv
= inc
;
1870 /* Set tx and rx clock internal delay modes */
1871 static void ravb_parse_delay_mode(struct device_node
*np
, struct net_device
*ndev
)
1873 struct ravb_private
*priv
= netdev_priv(ndev
);
1874 bool explicit_delay
= false;
1877 if (!priv
->info
->internal_delay
)
1880 if (!of_property_read_u32(np
, "rx-internal-delay-ps", &delay
)) {
1881 /* Valid values are 0 and 1800, according to DT bindings */
1882 priv
->rxcidm
= !!delay
;
1883 explicit_delay
= true;
1885 if (!of_property_read_u32(np
, "tx-internal-delay-ps", &delay
)) {
1886 /* Valid values are 0 and 2000, according to DT bindings */
1887 priv
->txcidm
= !!delay
;
1888 explicit_delay
= true;
1894 /* Fall back to legacy rgmii-*id behavior */
1895 if (priv
->phy_interface
== PHY_INTERFACE_MODE_RGMII_ID
||
1896 priv
->phy_interface
== PHY_INTERFACE_MODE_RGMII_RXID
) {
1898 priv
->rgmii_override
= 1;
1901 if (priv
->phy_interface
== PHY_INTERFACE_MODE_RGMII_ID
||
1902 priv
->phy_interface
== PHY_INTERFACE_MODE_RGMII_TXID
) {
1904 priv
->rgmii_override
= 1;
1908 static void ravb_set_delay_mode(struct net_device
*ndev
)
1910 struct ravb_private
*priv
= netdev_priv(ndev
);
1913 if (!priv
->info
->internal_delay
)
1920 ravb_modify(ndev
, APSR
, APSR_RDM
| APSR_TDM
, set
);
1923 /* Network device open function for Ethernet AVB */
1924 static int ravb_open(struct net_device
*ndev
)
1926 struct ravb_private
*priv
= netdev_priv(ndev
);
1927 const struct ravb_hw_info
*info
= priv
->info
;
1928 struct device
*dev
= &priv
->pdev
->dev
;
1931 napi_enable(&priv
->napi
[RAVB_BE
]);
1932 if (info
->nc_queues
)
1933 napi_enable(&priv
->napi
[RAVB_NC
]);
1935 error
= pm_runtime_resume_and_get(dev
);
1939 /* Set AVB config mode */
1940 error
= ravb_set_config_mode(ndev
);
1944 ravb_set_delay_mode(ndev
);
1945 ravb_write(ndev
, priv
->desc_bat_dma
, DBAT
);
1948 error
= ravb_dmac_init(ndev
);
1952 ravb_emac_init(ndev
);
1956 /* Initialise PTP Clock driver */
1957 if (info
->gptp
|| info
->ccc_gac
)
1958 ravb_ptp_init(ndev
, priv
->pdev
);
1960 /* PHY control start */
1961 error
= ravb_phy_start(ndev
);
1965 netif_tx_start_all_queues(ndev
);
1970 /* Stop PTP Clock driver */
1971 if (info
->gptp
|| info
->ccc_gac
)
1972 ravb_ptp_stop(ndev
);
1973 ravb_stop_dma(ndev
);
1975 ravb_set_opmode(ndev
, CCC_OPC_RESET
);
1977 pm_runtime_mark_last_busy(dev
);
1978 pm_runtime_put_autosuspend(dev
);
1980 if (info
->nc_queues
)
1981 napi_disable(&priv
->napi
[RAVB_NC
]);
1982 napi_disable(&priv
->napi
[RAVB_BE
]);
1986 /* Timeout function for Ethernet AVB */
1987 static void ravb_tx_timeout(struct net_device
*ndev
, unsigned int txqueue
)
1989 struct ravb_private
*priv
= netdev_priv(ndev
);
1991 netif_err(priv
, tx_err
, ndev
,
1992 "transmit timed out, status %08x, resetting...\n",
1993 ravb_read(ndev
, ISS
));
1995 /* tx_errors count up */
1996 ndev
->stats
.tx_errors
++;
1998 schedule_work(&priv
->work
);
2001 static void ravb_tx_timeout_work(struct work_struct
*work
)
2003 struct ravb_private
*priv
= container_of(work
, struct ravb_private
,
2005 const struct ravb_hw_info
*info
= priv
->info
;
2006 struct net_device
*ndev
= priv
->ndev
;
2009 if (!rtnl_trylock()) {
2010 usleep_range(1000, 2000);
2011 schedule_work(&priv
->work
);
2015 netif_tx_stop_all_queues(ndev
);
2017 /* Stop PTP Clock driver */
2019 ravb_ptp_stop(ndev
);
2021 /* Wait for DMA stopping */
2022 if (ravb_stop_dma(ndev
)) {
2023 /* If ravb_stop_dma() fails, the hardware is still operating
2024 * for TX and/or RX. So, this should not call the following
2025 * functions because ravb_dmac_init() is possible to fail too.
2026 * Also, this should not retry ravb_stop_dma() again and again
2027 * here because it's possible to wait forever. So, this just
2028 * re-enables the TX and RX and skip the following
2029 * re-initialization procedure.
2031 ravb_rcv_snd_enable(ndev
);
2035 ravb_ring_free(ndev
, RAVB_BE
);
2036 if (info
->nc_queues
)
2037 ravb_ring_free(ndev
, RAVB_NC
);
2040 error
= ravb_dmac_init(ndev
);
2042 /* If ravb_dmac_init() fails, descriptors are freed. So, this
2043 * should return here to avoid re-enabling the TX and RX in
2046 netdev_err(ndev
, "%s: ravb_dmac_init() failed, error %d\n",
2050 ravb_emac_init(ndev
);
2053 /* Initialise PTP Clock driver */
2055 ravb_ptp_init(ndev
, priv
->pdev
);
2057 netif_tx_start_all_queues(ndev
);
2063 static bool ravb_can_tx_csum_gbeth(struct sk_buff
*skb
)
2065 u16 net_protocol
= ntohs(skb
->protocol
);
2068 /* GbEth IP can calculate the checksum if:
2069 * - there are zero or one VLAN headers with TPID=0x8100
2070 * - the network protocol is IPv4 or IPv6
2071 * - the transport protocol is TCP, UDP or ICMP
2072 * - the packet is not fragmented
2075 if (net_protocol
== ETH_P_8021Q
) {
2076 struct vlan_hdr vhdr
, *vh
;
2078 vh
= skb_header_pointer(skb
, ETH_HLEN
, sizeof(vhdr
), &vhdr
);
2082 net_protocol
= ntohs(vh
->h_vlan_encapsulated_proto
);
2085 switch (net_protocol
) {
2087 inner_protocol
= ip_hdr(skb
)->protocol
;
2090 inner_protocol
= ipv6_hdr(skb
)->nexthdr
;
2096 switch (inner_protocol
) {
2105 /* Packet transmit function for Ethernet AVB */
2106 static netdev_tx_t
ravb_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
2108 struct ravb_private
*priv
= netdev_priv(ndev
);
2109 const struct ravb_hw_info
*info
= priv
->info
;
2110 unsigned int num_tx_desc
= priv
->num_tx_desc
;
2111 u16 q
= skb_get_queue_mapping(skb
);
2112 struct ravb_tstamp_skb
*ts_skb
;
2113 struct ravb_tx_desc
*desc
;
2114 unsigned long flags
;
2115 dma_addr_t dma_addr
;
2120 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&& !ravb_can_tx_csum_gbeth(skb
))
2121 skb_checksum_help(skb
);
2123 spin_lock_irqsave(&priv
->lock
, flags
);
2124 if (priv
->cur_tx
[q
] - priv
->dirty_tx
[q
] > (priv
->num_tx_ring
[q
] - 1) *
2126 netif_err(priv
, tx_queued
, ndev
,
2127 "still transmitting with the full ring!\n");
2128 netif_stop_subqueue(ndev
, q
);
2129 spin_unlock_irqrestore(&priv
->lock
, flags
);
2130 return NETDEV_TX_BUSY
;
2133 if (skb_put_padto(skb
, ETH_ZLEN
))
2136 entry
= priv
->cur_tx
[q
] % (priv
->num_tx_ring
[q
] * num_tx_desc
);
2137 priv
->tx_skb
[q
][entry
/ num_tx_desc
] = skb
;
2139 if (num_tx_desc
> 1) {
2140 buffer
= PTR_ALIGN(priv
->tx_align
[q
], DPTR_ALIGN
) +
2141 entry
/ num_tx_desc
* DPTR_ALIGN
;
2142 len
= PTR_ALIGN(skb
->data
, DPTR_ALIGN
) - skb
->data
;
2144 /* Zero length DMA descriptors are problematic as they seem
2145 * to terminate DMA transfers. Avoid them by simply using a
2146 * length of DPTR_ALIGN (4) when skb data is aligned to
2149 * As skb is guaranteed to have at least ETH_ZLEN (60)
2150 * bytes of data by the call to skb_put_padto() above this
2151 * is safe with respect to both the length of the first DMA
2152 * descriptor (len) overflowing the available data and the
2153 * length of the second DMA descriptor (skb->len - len)
2159 memcpy(buffer
, skb
->data
, len
);
2160 dma_addr
= dma_map_single(ndev
->dev
.parent
, buffer
, len
,
2162 if (dma_mapping_error(ndev
->dev
.parent
, dma_addr
))
2165 desc
= &priv
->tx_ring
[q
][entry
];
2166 desc
->ds_tagl
= cpu_to_le16(len
);
2167 desc
->dptr
= cpu_to_le32(dma_addr
);
2169 buffer
= skb
->data
+ len
;
2170 len
= skb
->len
- len
;
2171 dma_addr
= dma_map_single(ndev
->dev
.parent
, buffer
, len
,
2173 if (dma_mapping_error(ndev
->dev
.parent
, dma_addr
))
2178 desc
= &priv
->tx_ring
[q
][entry
];
2180 dma_addr
= dma_map_single(ndev
->dev
.parent
, skb
->data
, skb
->len
,
2182 if (dma_mapping_error(ndev
->dev
.parent
, dma_addr
))
2185 desc
->ds_tagl
= cpu_to_le16(len
);
2186 desc
->dptr
= cpu_to_le32(dma_addr
);
2188 /* TX timestamp required */
2189 if (info
->gptp
|| info
->ccc_gac
) {
2191 ts_skb
= kmalloc(sizeof(*ts_skb
), GFP_ATOMIC
);
2193 if (num_tx_desc
> 1) {
2195 dma_unmap_single(ndev
->dev
.parent
, dma_addr
,
2196 len
, DMA_TO_DEVICE
);
2200 ts_skb
->skb
= skb_get(skb
);
2201 ts_skb
->tag
= priv
->ts_skb_tag
++;
2202 priv
->ts_skb_tag
&= 0x3ff;
2203 list_add_tail(&ts_skb
->list
, &priv
->ts_skb_list
);
2205 /* TAG and timestamp required flag */
2206 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
2207 desc
->tagh_tsr
= (ts_skb
->tag
>> 4) | TX_TSR
;
2208 desc
->ds_tagl
|= cpu_to_le16(ts_skb
->tag
<< 12);
2211 skb_tx_timestamp(skb
);
2213 /* Descriptor type must be set after all the above writes */
2215 if (num_tx_desc
> 1) {
2216 desc
->die_dt
= DT_FEND
;
2218 desc
->die_dt
= DT_FSTART
;
2220 desc
->die_dt
= DT_FSINGLE
;
2222 ravb_modify(ndev
, TCCR
, TCCR_TSRQ0
<< q
, TCCR_TSRQ0
<< q
);
2224 priv
->cur_tx
[q
] += num_tx_desc
;
2225 if (priv
->cur_tx
[q
] - priv
->dirty_tx
[q
] >
2226 (priv
->num_tx_ring
[q
] - 1) * num_tx_desc
&&
2227 !ravb_tx_free(ndev
, q
, true))
2228 netif_stop_subqueue(ndev
, q
);
2231 spin_unlock_irqrestore(&priv
->lock
, flags
);
2232 return NETDEV_TX_OK
;
2235 dma_unmap_single(ndev
->dev
.parent
, le32_to_cpu(desc
->dptr
),
2236 le16_to_cpu(desc
->ds_tagl
), DMA_TO_DEVICE
);
2238 dev_kfree_skb_any(skb
);
2239 priv
->tx_skb
[q
][entry
/ num_tx_desc
] = NULL
;
2243 static u16
ravb_select_queue(struct net_device
*ndev
, struct sk_buff
*skb
,
2244 struct net_device
*sb_dev
)
2246 /* If skb needs TX timestamp, it is handled in network control queue */
2247 return (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) ? RAVB_NC
:
2252 static struct net_device_stats
*ravb_get_stats(struct net_device
*ndev
)
2254 struct ravb_private
*priv
= netdev_priv(ndev
);
2255 const struct ravb_hw_info
*info
= priv
->info
;
2256 struct net_device_stats
*nstats
, *stats0
, *stats1
;
2257 struct device
*dev
= &priv
->pdev
->dev
;
2259 nstats
= &ndev
->stats
;
2261 pm_runtime_get_noresume(dev
);
2263 if (!pm_runtime_active(dev
))
2266 stats0
= &priv
->stats
[RAVB_BE
];
2268 if (info
->tx_counters
) {
2269 nstats
->tx_dropped
+= ravb_read(ndev
, TROCR
);
2270 ravb_write(ndev
, 0, TROCR
); /* (write clear) */
2273 if (info
->carrier_counters
) {
2274 nstats
->collisions
+= ravb_read(ndev
, CXR41
);
2275 ravb_write(ndev
, 0, CXR41
); /* (write clear) */
2276 nstats
->tx_carrier_errors
+= ravb_read(ndev
, CXR42
);
2277 ravb_write(ndev
, 0, CXR42
); /* (write clear) */
2280 nstats
->rx_packets
= stats0
->rx_packets
;
2281 nstats
->tx_packets
= stats0
->tx_packets
;
2282 nstats
->rx_bytes
= stats0
->rx_bytes
;
2283 nstats
->tx_bytes
= stats0
->tx_bytes
;
2284 nstats
->multicast
= stats0
->multicast
;
2285 nstats
->rx_errors
= stats0
->rx_errors
;
2286 nstats
->rx_crc_errors
= stats0
->rx_crc_errors
;
2287 nstats
->rx_frame_errors
= stats0
->rx_frame_errors
;
2288 nstats
->rx_length_errors
= stats0
->rx_length_errors
;
2289 nstats
->rx_missed_errors
= stats0
->rx_missed_errors
;
2290 nstats
->rx_over_errors
= stats0
->rx_over_errors
;
2291 if (info
->nc_queues
) {
2292 stats1
= &priv
->stats
[RAVB_NC
];
2294 nstats
->rx_packets
+= stats1
->rx_packets
;
2295 nstats
->tx_packets
+= stats1
->tx_packets
;
2296 nstats
->rx_bytes
+= stats1
->rx_bytes
;
2297 nstats
->tx_bytes
+= stats1
->tx_bytes
;
2298 nstats
->multicast
+= stats1
->multicast
;
2299 nstats
->rx_errors
+= stats1
->rx_errors
;
2300 nstats
->rx_crc_errors
+= stats1
->rx_crc_errors
;
2301 nstats
->rx_frame_errors
+= stats1
->rx_frame_errors
;
2302 nstats
->rx_length_errors
+= stats1
->rx_length_errors
;
2303 nstats
->rx_missed_errors
+= stats1
->rx_missed_errors
;
2304 nstats
->rx_over_errors
+= stats1
->rx_over_errors
;
2308 pm_runtime_put_noidle(dev
);
2312 /* Update promiscuous bit */
2313 static void ravb_set_rx_mode(struct net_device
*ndev
)
2315 struct ravb_private
*priv
= netdev_priv(ndev
);
2316 unsigned long flags
;
2318 spin_lock_irqsave(&priv
->lock
, flags
);
2319 ravb_modify(ndev
, ECMR
, ECMR_PRM
,
2320 ndev
->flags
& IFF_PROMISC
? ECMR_PRM
: 0);
2321 spin_unlock_irqrestore(&priv
->lock
, flags
);
2324 /* Device close function for Ethernet AVB */
2325 static int ravb_close(struct net_device
*ndev
)
2327 struct device_node
*np
= ndev
->dev
.parent
->of_node
;
2328 struct ravb_private
*priv
= netdev_priv(ndev
);
2329 const struct ravb_hw_info
*info
= priv
->info
;
2330 struct ravb_tstamp_skb
*ts_skb
, *ts_skb2
;
2331 struct device
*dev
= &priv
->pdev
->dev
;
2334 netif_tx_stop_all_queues(ndev
);
2336 /* Disable interrupts by clearing the interrupt masks. */
2337 ravb_write(ndev
, 0, RIC0
);
2338 ravb_write(ndev
, 0, RIC2
);
2339 ravb_write(ndev
, 0, TIC
);
2341 /* PHY disconnect */
2343 phy_stop(ndev
->phydev
);
2344 phy_disconnect(ndev
->phydev
);
2345 if (of_phy_is_fixed_link(np
))
2346 of_phy_deregister_fixed_link(np
);
2349 /* Stop PTP Clock driver */
2350 if (info
->gptp
|| info
->ccc_gac
)
2351 ravb_ptp_stop(ndev
);
2353 /* Set the config mode to stop the AVB-DMAC's processes */
2354 if (ravb_stop_dma(ndev
) < 0)
2356 "device will be stopped after h/w processes are done.\n");
2358 /* Clear the timestamp list */
2359 if (info
->gptp
|| info
->ccc_gac
) {
2360 list_for_each_entry_safe(ts_skb
, ts_skb2
, &priv
->ts_skb_list
, list
) {
2361 list_del(&ts_skb
->list
);
2362 kfree_skb(ts_skb
->skb
);
2367 cancel_work_sync(&priv
->work
);
2369 if (info
->nc_queues
)
2370 napi_disable(&priv
->napi
[RAVB_NC
]);
2371 napi_disable(&priv
->napi
[RAVB_BE
]);
2373 /* Free all the skb's in the RX queue and the DMA buffers. */
2374 ravb_ring_free(ndev
, RAVB_BE
);
2375 if (info
->nc_queues
)
2376 ravb_ring_free(ndev
, RAVB_NC
);
2378 /* Update statistics. */
2379 ravb_get_stats(ndev
);
2381 /* Set reset mode. */
2382 error
= ravb_set_opmode(ndev
, CCC_OPC_RESET
);
2386 pm_runtime_mark_last_busy(dev
);
2387 pm_runtime_put_autosuspend(dev
);
2392 static int ravb_hwtstamp_get(struct net_device
*ndev
, struct ifreq
*req
)
2394 struct ravb_private
*priv
= netdev_priv(ndev
);
2395 struct hwtstamp_config config
;
2398 config
.tx_type
= priv
->tstamp_tx_ctrl
? HWTSTAMP_TX_ON
:
2400 switch (priv
->tstamp_rx_ctrl
& RAVB_RXTSTAMP_TYPE
) {
2401 case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT
:
2402 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_EVENT
;
2404 case RAVB_RXTSTAMP_TYPE_ALL
:
2405 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
2408 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
2411 return copy_to_user(req
->ifr_data
, &config
, sizeof(config
)) ?
2415 /* Control hardware time stamping */
2416 static int ravb_hwtstamp_set(struct net_device
*ndev
, struct ifreq
*req
)
2418 struct ravb_private
*priv
= netdev_priv(ndev
);
2419 struct hwtstamp_config config
;
2420 u32 tstamp_rx_ctrl
= RAVB_RXTSTAMP_ENABLED
;
2423 if (copy_from_user(&config
, req
->ifr_data
, sizeof(config
)))
2426 switch (config
.tx_type
) {
2427 case HWTSTAMP_TX_OFF
:
2430 case HWTSTAMP_TX_ON
:
2431 tstamp_tx_ctrl
= RAVB_TXTSTAMP_ENABLED
;
2437 switch (config
.rx_filter
) {
2438 case HWTSTAMP_FILTER_NONE
:
2441 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
2442 tstamp_rx_ctrl
|= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT
;
2445 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
2446 tstamp_rx_ctrl
|= RAVB_RXTSTAMP_TYPE_ALL
;
2449 priv
->tstamp_tx_ctrl
= tstamp_tx_ctrl
;
2450 priv
->tstamp_rx_ctrl
= tstamp_rx_ctrl
;
2452 return copy_to_user(req
->ifr_data
, &config
, sizeof(config
)) ?
2456 /* ioctl to device function */
2457 static int ravb_do_ioctl(struct net_device
*ndev
, struct ifreq
*req
, int cmd
)
2459 struct phy_device
*phydev
= ndev
->phydev
;
2461 if (!netif_running(ndev
))
2469 return ravb_hwtstamp_get(ndev
, req
);
2471 return ravb_hwtstamp_set(ndev
, req
);
2474 return phy_mii_ioctl(phydev
, req
, cmd
);
2477 static int ravb_change_mtu(struct net_device
*ndev
, int new_mtu
)
2479 struct ravb_private
*priv
= netdev_priv(ndev
);
2481 WRITE_ONCE(ndev
->mtu
, new_mtu
);
2483 if (netif_running(ndev
)) {
2484 synchronize_irq(priv
->emac_irq
);
2485 ravb_emac_init(ndev
);
2488 netdev_update_features(ndev
);
2493 static void ravb_set_rx_csum(struct net_device
*ndev
, bool enable
)
2495 struct ravb_private
*priv
= netdev_priv(ndev
);
2496 unsigned long flags
;
2498 spin_lock_irqsave(&priv
->lock
, flags
);
2500 /* Disable TX and RX */
2501 ravb_rcv_snd_disable(ndev
);
2503 /* Modify RX Checksum setting */
2504 ravb_modify(ndev
, ECMR
, ECMR_RCSC
, enable
? ECMR_RCSC
: 0);
2506 /* Enable TX and RX */
2507 ravb_rcv_snd_enable(ndev
);
2509 spin_unlock_irqrestore(&priv
->lock
, flags
);
2512 static int ravb_endisable_csum_gbeth(struct net_device
*ndev
, enum ravb_reg reg
,
2515 u32 csr0
= CSR0_TPE
| CSR0_RPE
;
2518 ravb_write(ndev
, csr0
& ~mask
, CSR0
);
2519 ret
= ravb_wait(ndev
, CSR0
, mask
, 0);
2521 ravb_write(ndev
, val
, reg
);
2523 ravb_write(ndev
, csr0
, CSR0
);
2528 static int ravb_set_features_gbeth(struct net_device
*ndev
,
2529 netdev_features_t features
)
2531 netdev_features_t changed
= ndev
->features
^ features
;
2532 struct ravb_private
*priv
= netdev_priv(ndev
);
2533 unsigned long flags
;
2537 spin_lock_irqsave(&priv
->lock
, flags
);
2538 if (changed
& NETIF_F_RXCSUM
) {
2539 if (features
& NETIF_F_RXCSUM
)
2540 val
= CSR2_CSUM_ENABLE
;
2544 ret
= ravb_endisable_csum_gbeth(ndev
, CSR2
, val
, CSR0_RPE
);
2549 if (changed
& NETIF_F_HW_CSUM
) {
2550 if (features
& NETIF_F_HW_CSUM
)
2551 val
= CSR1_CSUM_ENABLE
;
2555 ret
= ravb_endisable_csum_gbeth(ndev
, CSR1
, val
, CSR0_TPE
);
2561 spin_unlock_irqrestore(&priv
->lock
, flags
);
2566 static int ravb_set_features_rcar(struct net_device
*ndev
,
2567 netdev_features_t features
)
2569 netdev_features_t changed
= ndev
->features
^ features
;
2571 if (changed
& NETIF_F_RXCSUM
)
2572 ravb_set_rx_csum(ndev
, features
& NETIF_F_RXCSUM
);
2577 static int ravb_set_features(struct net_device
*ndev
,
2578 netdev_features_t features
)
2580 struct ravb_private
*priv
= netdev_priv(ndev
);
2581 const struct ravb_hw_info
*info
= priv
->info
;
2582 struct device
*dev
= &priv
->pdev
->dev
;
2585 pm_runtime_get_noresume(dev
);
2587 if (pm_runtime_active(dev
))
2588 ret
= info
->set_feature(ndev
, features
);
2592 pm_runtime_put_noidle(dev
);
2597 ndev
->features
= features
;
2602 static const struct net_device_ops ravb_netdev_ops
= {
2603 .ndo_open
= ravb_open
,
2604 .ndo_stop
= ravb_close
,
2605 .ndo_start_xmit
= ravb_start_xmit
,
2606 .ndo_select_queue
= ravb_select_queue
,
2607 .ndo_get_stats
= ravb_get_stats
,
2608 .ndo_set_rx_mode
= ravb_set_rx_mode
,
2609 .ndo_tx_timeout
= ravb_tx_timeout
,
2610 .ndo_eth_ioctl
= ravb_do_ioctl
,
2611 .ndo_change_mtu
= ravb_change_mtu
,
2612 .ndo_validate_addr
= eth_validate_addr
,
2613 .ndo_set_mac_address
= eth_mac_addr
,
2614 .ndo_set_features
= ravb_set_features
,
2617 /* MDIO bus init function */
2618 static int ravb_mdio_init(struct ravb_private
*priv
)
2620 struct platform_device
*pdev
= priv
->pdev
;
2621 struct device
*dev
= &pdev
->dev
;
2622 struct device_node
*mdio_node
;
2623 struct phy_device
*phydev
;
2624 struct device_node
*pn
;
2628 priv
->mdiobb
.ops
= &bb_ops
;
2630 /* MII controller setting */
2631 priv
->mii_bus
= alloc_mdio_bitbang(&priv
->mdiobb
);
2635 /* Hook up MII support for ethtool */
2636 priv
->mii_bus
->name
= "ravb_mii";
2637 priv
->mii_bus
->parent
= dev
;
2638 snprintf(priv
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
2639 pdev
->name
, pdev
->id
);
2641 /* Register MDIO bus */
2642 mdio_node
= of_get_child_by_name(dev
->of_node
, "mdio");
2644 /* backwards compatibility for DT lacking mdio subnode */
2645 mdio_node
= of_node_get(dev
->of_node
);
2647 error
= of_mdiobus_register(priv
->mii_bus
, mdio_node
);
2648 of_node_put(mdio_node
);
2652 pn
= of_parse_phandle(dev
->of_node
, "phy-handle", 0);
2653 phydev
= of_phy_find_device(pn
);
2655 phydev
->mac_managed_pm
= true;
2656 put_device(&phydev
->mdio
.dev
);
2663 free_mdio_bitbang(priv
->mii_bus
);
2667 /* MDIO bus release function */
2668 static int ravb_mdio_release(struct ravb_private
*priv
)
2670 /* Unregister mdio bus */
2671 mdiobus_unregister(priv
->mii_bus
);
2673 /* Free bitbang info */
2674 free_mdio_bitbang(priv
->mii_bus
);
2679 static const struct ravb_hw_info ravb_gen2_hw_info
= {
2680 .receive
= ravb_rx_rcar
,
2681 .set_rate
= ravb_set_rate_rcar
,
2682 .set_feature
= ravb_set_features_rcar
,
2683 .dmac_init
= ravb_dmac_init_rcar
,
2684 .emac_init
= ravb_emac_init_rcar
,
2685 .gstrings_stats
= ravb_gstrings_stats
,
2686 .gstrings_size
= sizeof(ravb_gstrings_stats
),
2687 .net_hw_features
= NETIF_F_RXCSUM
,
2688 .net_features
= NETIF_F_RXCSUM
,
2689 .stats_len
= ARRAY_SIZE(ravb_gstrings_stats
),
2690 .tccr_mask
= TCCR_TSRQ0
| TCCR_TSRQ1
| TCCR_TSRQ2
| TCCR_TSRQ3
,
2691 .tx_max_frame_size
= SZ_2K
,
2692 .rx_max_frame_size
= SZ_2K
,
2693 .rx_buffer_size
= SZ_2K
+
2694 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)),
2695 .rx_desc_size
= sizeof(struct ravb_ex_rx_desc
),
2702 static const struct ravb_hw_info ravb_gen3_hw_info
= {
2703 .receive
= ravb_rx_rcar
,
2704 .set_rate
= ravb_set_rate_rcar
,
2705 .set_feature
= ravb_set_features_rcar
,
2706 .dmac_init
= ravb_dmac_init_rcar
,
2707 .emac_init
= ravb_emac_init_rcar
,
2708 .gstrings_stats
= ravb_gstrings_stats
,
2709 .gstrings_size
= sizeof(ravb_gstrings_stats
),
2710 .net_hw_features
= NETIF_F_RXCSUM
,
2711 .net_features
= NETIF_F_RXCSUM
,
2712 .stats_len
= ARRAY_SIZE(ravb_gstrings_stats
),
2713 .tccr_mask
= TCCR_TSRQ0
| TCCR_TSRQ1
| TCCR_TSRQ2
| TCCR_TSRQ3
,
2714 .tx_max_frame_size
= SZ_2K
,
2715 .rx_max_frame_size
= SZ_2K
,
2716 .rx_buffer_size
= SZ_2K
+
2717 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)),
2718 .rx_desc_size
= sizeof(struct ravb_ex_rx_desc
),
2719 .internal_delay
= 1,
2728 static const struct ravb_hw_info ravb_gen4_hw_info
= {
2729 .receive
= ravb_rx_rcar
,
2730 .set_rate
= ravb_set_rate_rcar
,
2731 .set_feature
= ravb_set_features_rcar
,
2732 .dmac_init
= ravb_dmac_init_rcar
,
2733 .emac_init
= ravb_emac_init_rcar_gen4
,
2734 .gstrings_stats
= ravb_gstrings_stats
,
2735 .gstrings_size
= sizeof(ravb_gstrings_stats
),
2736 .net_hw_features
= NETIF_F_RXCSUM
,
2737 .net_features
= NETIF_F_RXCSUM
,
2738 .stats_len
= ARRAY_SIZE(ravb_gstrings_stats
),
2739 .tccr_mask
= TCCR_TSRQ0
| TCCR_TSRQ1
| TCCR_TSRQ2
| TCCR_TSRQ3
,
2740 .tx_max_frame_size
= SZ_2K
,
2741 .rx_max_frame_size
= SZ_2K
,
2742 .rx_buffer_size
= SZ_2K
+
2743 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)),
2744 .rx_desc_size
= sizeof(struct ravb_ex_rx_desc
),
2745 .internal_delay
= 1,
2754 static const struct ravb_hw_info ravb_rzv2m_hw_info
= {
2755 .receive
= ravb_rx_rcar
,
2756 .set_rate
= ravb_set_rate_rcar
,
2757 .set_feature
= ravb_set_features_rcar
,
2758 .dmac_init
= ravb_dmac_init_rcar
,
2759 .emac_init
= ravb_emac_init_rcar
,
2760 .gstrings_stats
= ravb_gstrings_stats
,
2761 .gstrings_size
= sizeof(ravb_gstrings_stats
),
2762 .net_hw_features
= NETIF_F_RXCSUM
,
2763 .net_features
= NETIF_F_RXCSUM
,
2764 .stats_len
= ARRAY_SIZE(ravb_gstrings_stats
),
2765 .tccr_mask
= TCCR_TSRQ0
| TCCR_TSRQ1
| TCCR_TSRQ2
| TCCR_TSRQ3
,
2766 .rx_max_frame_size
= SZ_2K
,
2767 .rx_buffer_size
= SZ_2K
+
2768 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)),
2769 .rx_desc_size
= sizeof(struct ravb_ex_rx_desc
),
2778 static const struct ravb_hw_info gbeth_hw_info
= {
2779 .receive
= ravb_rx_gbeth
,
2780 .set_rate
= ravb_set_rate_gbeth
,
2781 .set_feature
= ravb_set_features_gbeth
,
2782 .dmac_init
= ravb_dmac_init_gbeth
,
2783 .emac_init
= ravb_emac_init_gbeth
,
2784 .gstrings_stats
= ravb_gstrings_stats_gbeth
,
2785 .gstrings_size
= sizeof(ravb_gstrings_stats_gbeth
),
2786 .net_hw_features
= NETIF_F_RXCSUM
| NETIF_F_HW_CSUM
,
2787 .net_features
= NETIF_F_RXCSUM
| NETIF_F_HW_CSUM
,
2788 .vlan_features
= NETIF_F_RXCSUM
| NETIF_F_HW_CSUM
,
2789 .stats_len
= ARRAY_SIZE(ravb_gstrings_stats_gbeth
),
2790 .tccr_mask
= TCCR_TSRQ0
,
2791 .tx_max_frame_size
= 1522,
2792 .rx_max_frame_size
= SZ_8K
,
2793 .rx_buffer_size
= SZ_2K
,
2794 .rx_desc_size
= sizeof(struct ravb_rx_desc
),
2798 .carrier_counters
= 1,
2802 static const struct of_device_id ravb_match_table
[] = {
2803 { .compatible
= "renesas,etheravb-r8a7790", .data
= &ravb_gen2_hw_info
},
2804 { .compatible
= "renesas,etheravb-r8a7794", .data
= &ravb_gen2_hw_info
},
2805 { .compatible
= "renesas,etheravb-rcar-gen2", .data
= &ravb_gen2_hw_info
},
2806 { .compatible
= "renesas,etheravb-r8a7795", .data
= &ravb_gen3_hw_info
},
2807 { .compatible
= "renesas,etheravb-rcar-gen3", .data
= &ravb_gen3_hw_info
},
2808 { .compatible
= "renesas,etheravb-rcar-gen4", .data
= &ravb_gen4_hw_info
},
2809 { .compatible
= "renesas,etheravb-rzv2m", .data
= &ravb_rzv2m_hw_info
},
2810 { .compatible
= "renesas,rzg2l-gbeth", .data
= &gbeth_hw_info
},
2813 MODULE_DEVICE_TABLE(of
, ravb_match_table
);
2815 static int ravb_setup_irq(struct ravb_private
*priv
, const char *irq_name
,
2816 const char *ch
, int *irq
, irq_handler_t handler
)
2818 struct platform_device
*pdev
= priv
->pdev
;
2819 struct net_device
*ndev
= priv
->ndev
;
2820 struct device
*dev
= &pdev
->dev
;
2821 const char *devname
= dev_name(dev
);
2822 unsigned long flags
;
2826 devname
= devm_kasprintf(dev
, GFP_KERNEL
, "%s:%s", devname
, ch
);
2830 irq_num
= platform_get_irq_byname(pdev
, irq_name
);
2833 irq_num
= platform_get_irq(pdev
, 0);
2834 flags
= IRQF_SHARED
;
2842 error
= devm_request_irq(dev
, irq_num
, handler
, flags
, devname
, ndev
);
2844 netdev_err(ndev
, "cannot request IRQ %s\n", devname
);
2849 static int ravb_setup_irqs(struct ravb_private
*priv
)
2851 const struct ravb_hw_info
*info
= priv
->info
;
2852 struct net_device
*ndev
= priv
->ndev
;
2853 const char *irq_name
, *emac_irq_name
;
2856 if (!info
->multi_irqs
)
2857 return ravb_setup_irq(priv
, NULL
, NULL
, &ndev
->irq
, ravb_interrupt
);
2859 if (info
->err_mgmt_irqs
) {
2861 emac_irq_name
= "line3";
2864 emac_irq_name
= "ch24";
2867 error
= ravb_setup_irq(priv
, irq_name
, "ch22:multi", &ndev
->irq
, ravb_multi_interrupt
);
2871 error
= ravb_setup_irq(priv
, emac_irq_name
, "ch24:emac", &priv
->emac_irq
,
2872 ravb_emac_interrupt
);
2876 if (info
->err_mgmt_irqs
) {
2877 error
= ravb_setup_irq(priv
, "err_a", "err_a", NULL
, ravb_multi_interrupt
);
2881 error
= ravb_setup_irq(priv
, "mgmt_a", "mgmt_a", NULL
, ravb_multi_interrupt
);
2886 error
= ravb_setup_irq(priv
, "ch0", "ch0:rx_be", NULL
, ravb_be_interrupt
);
2890 error
= ravb_setup_irq(priv
, "ch1", "ch1:rx_nc", NULL
, ravb_nc_interrupt
);
2894 error
= ravb_setup_irq(priv
, "ch18", "ch18:tx_be", NULL
, ravb_be_interrupt
);
2898 return ravb_setup_irq(priv
, "ch19", "ch19:tx_nc", NULL
, ravb_nc_interrupt
);
2901 static int ravb_probe(struct platform_device
*pdev
)
2903 struct device_node
*np
= pdev
->dev
.of_node
;
2904 const struct ravb_hw_info
*info
;
2905 struct reset_control
*rstc
;
2906 struct ravb_private
*priv
;
2907 struct net_device
*ndev
;
2908 struct resource
*res
;
2913 "this driver is required to be instantiated from device tree\n");
2917 rstc
= devm_reset_control_get_exclusive(&pdev
->dev
, NULL
);
2919 return dev_err_probe(&pdev
->dev
, PTR_ERR(rstc
),
2920 "failed to get cpg reset\n");
2922 ndev
= alloc_etherdev_mqs(sizeof(struct ravb_private
),
2923 NUM_TX_QUEUE
, NUM_RX_QUEUE
);
2927 info
= of_device_get_match_data(&pdev
->dev
);
2929 ndev
->features
= info
->net_features
;
2930 ndev
->hw_features
= info
->net_hw_features
;
2931 ndev
->vlan_features
= info
->vlan_features
;
2933 error
= reset_control_deassert(rstc
);
2935 goto out_free_netdev
;
2937 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
2939 priv
= netdev_priv(ndev
);
2944 priv
->num_tx_ring
[RAVB_BE
] = BE_TX_RING_SIZE
;
2945 priv
->num_rx_ring
[RAVB_BE
] = BE_RX_RING_SIZE
;
2946 if (info
->nc_queues
) {
2947 priv
->num_tx_ring
[RAVB_NC
] = NC_TX_RING_SIZE
;
2948 priv
->num_rx_ring
[RAVB_NC
] = NC_RX_RING_SIZE
;
2951 error
= ravb_setup_irqs(priv
);
2953 goto out_reset_assert
;
2955 priv
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
2956 if (IS_ERR(priv
->clk
)) {
2957 error
= PTR_ERR(priv
->clk
);
2958 goto out_reset_assert
;
2961 if (info
->gptp_ref_clk
) {
2962 priv
->gptp_clk
= devm_clk_get(&pdev
->dev
, "gptp");
2963 if (IS_ERR(priv
->gptp_clk
)) {
2964 error
= PTR_ERR(priv
->gptp_clk
);
2965 goto out_reset_assert
;
2969 priv
->refclk
= devm_clk_get_optional(&pdev
->dev
, "refclk");
2970 if (IS_ERR(priv
->refclk
)) {
2971 error
= PTR_ERR(priv
->refclk
);
2972 goto out_reset_assert
;
2974 clk_prepare(priv
->refclk
);
2976 platform_set_drvdata(pdev
, ndev
);
2977 pm_runtime_set_autosuspend_delay(&pdev
->dev
, 100);
2978 pm_runtime_use_autosuspend(&pdev
->dev
);
2979 pm_runtime_enable(&pdev
->dev
);
2980 error
= pm_runtime_resume_and_get(&pdev
->dev
);
2982 goto out_rpm_disable
;
2984 priv
->addr
= devm_platform_get_and_ioremap_resource(pdev
, 0, &res
);
2985 if (IS_ERR(priv
->addr
)) {
2986 error
= PTR_ERR(priv
->addr
);
2990 /* The Ether-specific entries in the device structure. */
2991 ndev
->base_addr
= res
->start
;
2993 spin_lock_init(&priv
->lock
);
2994 INIT_WORK(&priv
->work
, ravb_tx_timeout_work
);
2996 error
= of_get_phy_mode(np
, &priv
->phy_interface
);
2997 if (error
&& error
!= -ENODEV
)
3000 priv
->no_avb_link
= of_property_read_bool(np
, "renesas,no-ether-link");
3001 priv
->avb_link_active_low
=
3002 of_property_read_bool(np
, "renesas,ether-link-active-low");
3004 ndev
->max_mtu
= info
->tx_max_frame_size
-
3005 (ETH_HLEN
+ VLAN_HLEN
+ ETH_FCS_LEN
);
3006 ndev
->min_mtu
= ETH_MIN_MTU
;
3008 /* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
3009 * Use two descriptor to handle such situation. First descriptor to
3010 * handle aligned data buffer and second descriptor to handle the
3011 * overflow data because of alignment.
3013 priv
->num_tx_desc
= info
->aligned_tx
? 2 : 1;
3016 ndev
->netdev_ops
= &ravb_netdev_ops
;
3017 ndev
->ethtool_ops
= &ravb_ethtool_ops
;
3019 error
= ravb_compute_gti(ndev
);
3023 ravb_parse_delay_mode(np
, ndev
);
3025 /* Allocate descriptor base address table */
3026 priv
->desc_bat_size
= sizeof(struct ravb_desc
) * DBAT_ENTRY_NUM
;
3027 priv
->desc_bat
= dma_alloc_coherent(ndev
->dev
.parent
, priv
->desc_bat_size
,
3028 &priv
->desc_bat_dma
, GFP_KERNEL
);
3029 if (!priv
->desc_bat
) {
3031 "Cannot allocate desc base address table (size %d bytes)\n",
3032 priv
->desc_bat_size
);
3036 for (q
= RAVB_BE
; q
< DBAT_ENTRY_NUM
; q
++)
3037 priv
->desc_bat
[q
].die_dt
= DT_EOS
;
3039 /* Initialise HW timestamp list */
3040 INIT_LIST_HEAD(&priv
->ts_skb_list
);
3042 /* Debug message level */
3043 priv
->msg_enable
= RAVB_DEF_MSG_ENABLE
;
3045 /* Set config mode as this is needed for PHY initialization. */
3046 error
= ravb_set_opmode(ndev
, CCC_OPC_CONFIG
);
3050 /* Read and set MAC address */
3051 ravb_read_mac_address(np
, ndev
);
3052 if (!is_valid_ether_addr(ndev
->dev_addr
)) {
3053 dev_warn(&pdev
->dev
,
3054 "no valid MAC address supplied, using a random one\n");
3055 eth_hw_addr_random(ndev
);
3059 error
= ravb_mdio_init(priv
);
3061 dev_err(&pdev
->dev
, "failed to initialize MDIO\n");
3062 goto out_reset_mode
;
3065 /* Undo previous switch to config opmode. */
3066 error
= ravb_set_opmode(ndev
, CCC_OPC_RESET
);
3068 goto out_mdio_release
;
3070 netif_napi_add(ndev
, &priv
->napi
[RAVB_BE
], ravb_poll
);
3071 if (info
->nc_queues
)
3072 netif_napi_add(ndev
, &priv
->napi
[RAVB_NC
], ravb_poll
);
3074 if (info
->coalesce_irqs
) {
3075 netdev_sw_irq_coalesce_default_on(ndev
);
3076 if (num_present_cpus() == 1)
3077 dev_set_threaded(ndev
, true);
3080 /* Network device register */
3081 error
= register_netdev(ndev
);
3085 device_set_wakeup_capable(&pdev
->dev
, 1);
3087 /* Print device information */
3088 netdev_info(ndev
, "Base address at %#x, %pM, IRQ %d.\n",
3089 (u32
)ndev
->base_addr
, ndev
->dev_addr
, ndev
->irq
);
3091 pm_runtime_mark_last_busy(&pdev
->dev
);
3092 pm_runtime_put_autosuspend(&pdev
->dev
);
3097 if (info
->nc_queues
)
3098 netif_napi_del(&priv
->napi
[RAVB_NC
]);
3100 netif_napi_del(&priv
->napi
[RAVB_BE
]);
3102 ravb_mdio_release(priv
);
3104 ravb_set_opmode(ndev
, CCC_OPC_RESET
);
3105 dma_free_coherent(ndev
->dev
.parent
, priv
->desc_bat_size
, priv
->desc_bat
,
3106 priv
->desc_bat_dma
);
3108 pm_runtime_put(&pdev
->dev
);
3110 pm_runtime_disable(&pdev
->dev
);
3111 pm_runtime_dont_use_autosuspend(&pdev
->dev
);
3112 clk_unprepare(priv
->refclk
);
3114 reset_control_assert(rstc
);
3120 static void ravb_remove(struct platform_device
*pdev
)
3122 struct net_device
*ndev
= platform_get_drvdata(pdev
);
3123 struct ravb_private
*priv
= netdev_priv(ndev
);
3124 const struct ravb_hw_info
*info
= priv
->info
;
3125 struct device
*dev
= &priv
->pdev
->dev
;
3128 error
= pm_runtime_resume_and_get(dev
);
3132 unregister_netdev(ndev
);
3133 if (info
->nc_queues
)
3134 netif_napi_del(&priv
->napi
[RAVB_NC
]);
3135 netif_napi_del(&priv
->napi
[RAVB_BE
]);
3137 ravb_mdio_release(priv
);
3139 dma_free_coherent(ndev
->dev
.parent
, priv
->desc_bat_size
, priv
->desc_bat
,
3140 priv
->desc_bat_dma
);
3142 pm_runtime_put_sync_suspend(&pdev
->dev
);
3143 pm_runtime_disable(&pdev
->dev
);
3144 pm_runtime_dont_use_autosuspend(dev
);
3145 clk_unprepare(priv
->refclk
);
3146 reset_control_assert(priv
->rstc
);
3148 platform_set_drvdata(pdev
, NULL
);
3151 static int ravb_wol_setup(struct net_device
*ndev
)
3153 struct ravb_private
*priv
= netdev_priv(ndev
);
3154 const struct ravb_hw_info
*info
= priv
->info
;
3156 /* Disable interrupts by clearing the interrupt masks. */
3157 ravb_write(ndev
, 0, RIC0
);
3158 ravb_write(ndev
, 0, RIC2
);
3159 ravb_write(ndev
, 0, TIC
);
3161 /* Only allow ECI interrupts */
3162 synchronize_irq(priv
->emac_irq
);
3163 if (info
->nc_queues
)
3164 napi_disable(&priv
->napi
[RAVB_NC
]);
3165 napi_disable(&priv
->napi
[RAVB_BE
]);
3166 ravb_write(ndev
, ECSIPR_MPDIP
, ECSIPR
);
3168 /* Enable MagicPacket */
3169 ravb_modify(ndev
, ECMR
, ECMR_MPDE
, ECMR_MPDE
);
3171 if (priv
->info
->ccc_gac
)
3172 ravb_ptp_stop(ndev
);
3174 return enable_irq_wake(priv
->emac_irq
);
3177 static int ravb_wol_restore(struct net_device
*ndev
)
3179 struct ravb_private
*priv
= netdev_priv(ndev
);
3180 const struct ravb_hw_info
*info
= priv
->info
;
3183 /* Set reset mode to rearm the WoL logic. */
3184 error
= ravb_set_opmode(ndev
, CCC_OPC_RESET
);
3188 /* Set AVB config mode. */
3189 error
= ravb_set_config_mode(ndev
);
3193 if (priv
->info
->ccc_gac
)
3194 ravb_ptp_init(ndev
, priv
->pdev
);
3196 if (info
->nc_queues
)
3197 napi_enable(&priv
->napi
[RAVB_NC
]);
3198 napi_enable(&priv
->napi
[RAVB_BE
]);
3200 /* Disable MagicPacket */
3201 ravb_modify(ndev
, ECMR
, ECMR_MPDE
, 0);
3205 return disable_irq_wake(priv
->emac_irq
);
3208 static int ravb_suspend(struct device
*dev
)
3210 struct net_device
*ndev
= dev_get_drvdata(dev
);
3211 struct ravb_private
*priv
= netdev_priv(ndev
);
3214 if (!netif_running(ndev
))
3217 netif_device_detach(ndev
);
3219 if (priv
->wol_enabled
)
3220 return ravb_wol_setup(ndev
);
3222 ret
= ravb_close(ndev
);
3226 ret
= pm_runtime_force_suspend(&priv
->pdev
->dev
);
3231 return reset_control_assert(priv
->rstc
);
3234 static int ravb_resume(struct device
*dev
)
3236 struct net_device
*ndev
= dev_get_drvdata(dev
);
3237 struct ravb_private
*priv
= netdev_priv(ndev
);
3240 ret
= reset_control_deassert(priv
->rstc
);
3244 if (!netif_running(ndev
))
3247 /* If WoL is enabled restore the interface. */
3248 if (priv
->wol_enabled
) {
3249 ret
= ravb_wol_restore(ndev
);
3253 ret
= pm_runtime_force_resume(dev
);
3258 /* Reopening the interface will restore the device to the working state. */
3259 ret
= ravb_open(ndev
);
3263 ravb_set_rx_mode(ndev
);
3264 netif_device_attach(ndev
);
3269 if (!priv
->wol_enabled
) {
3270 pm_runtime_mark_last_busy(dev
);
3271 pm_runtime_put_autosuspend(dev
);
3277 static int ravb_runtime_suspend(struct device
*dev
)
3279 struct net_device
*ndev
= dev_get_drvdata(dev
);
3280 struct ravb_private
*priv
= netdev_priv(ndev
);
3282 clk_disable(priv
->refclk
);
3287 static int ravb_runtime_resume(struct device
*dev
)
3289 struct net_device
*ndev
= dev_get_drvdata(dev
);
3290 struct ravb_private
*priv
= netdev_priv(ndev
);
3292 return clk_enable(priv
->refclk
);
3295 static const struct dev_pm_ops ravb_dev_pm_ops
= {
3296 SYSTEM_SLEEP_PM_OPS(ravb_suspend
, ravb_resume
)
3297 RUNTIME_PM_OPS(ravb_runtime_suspend
, ravb_runtime_resume
, NULL
)
3300 static struct platform_driver ravb_driver
= {
3301 .probe
= ravb_probe
,
3302 .remove
= ravb_remove
,
3305 .pm
= pm_ptr(&ravb_dev_pm_ops
),
3306 .of_match_table
= ravb_match_table
,
3310 module_platform_driver(ravb_driver
);
3312 MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
3313 MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
3314 MODULE_LICENSE("GPL v2");