1 // SPDX-License-Identifier: GPL-2.0
2 /* Renesas Ethernet AVB device driver
4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
5 * Copyright (C) 2015 Renesas Solutions Corp.
6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
8 * Based on the SuperH Ethernet driver
11 #include <linux/cache.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/etherdevice.h>
17 #include <linux/ethtool.h>
18 #include <linux/if_vlan.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/net_tstamp.h>
24 #include <linux/of_device.h>
25 #include <linux/of_irq.h>
26 #include <linux/of_mdio.h>
27 #include <linux/of_net.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/sys_soc.h>
33 #include <asm/div64.h>
37 #define RAVB_DEF_MSG_ENABLE \
43 static const char *ravb_rx_irqs
[NUM_RX_QUEUE
] = {
48 static const char *ravb_tx_irqs
[NUM_TX_QUEUE
] = {
53 void ravb_modify(struct net_device
*ndev
, enum ravb_reg reg
, u32 clear
,
56 ravb_write(ndev
, (ravb_read(ndev
, reg
) & ~clear
) | set
, reg
);
59 int ravb_wait(struct net_device
*ndev
, enum ravb_reg reg
, u32 mask
, u32 value
)
63 for (i
= 0; i
< 10000; i
++) {
64 if ((ravb_read(ndev
, reg
) & mask
) == value
)
71 static int ravb_config(struct net_device
*ndev
)
76 ravb_modify(ndev
, CCC
, CCC_OPC
, CCC_OPC_CONFIG
);
77 /* Check if the operating mode is changed to the config mode */
78 error
= ravb_wait(ndev
, CSR
, CSR_OPS
, CSR_OPS_CONFIG
);
80 netdev_err(ndev
, "failed to switch device to config mode\n");
85 static void ravb_set_rate(struct net_device
*ndev
)
87 struct ravb_private
*priv
= netdev_priv(ndev
);
89 switch (priv
->speed
) {
90 case 100: /* 100BASE */
91 ravb_write(ndev
, GECMR_SPEED_100
, GECMR
);
93 case 1000: /* 1000BASE */
94 ravb_write(ndev
, GECMR_SPEED_1000
, GECMR
);
99 static void ravb_set_buffer_align(struct sk_buff
*skb
)
101 u32 reserve
= (unsigned long)skb
->data
& (RAVB_ALIGN
- 1);
104 skb_reserve(skb
, RAVB_ALIGN
- reserve
);
107 /* Get MAC address from the MAC address registers
109 * Ethernet AVB device doesn't have ROM for MAC address.
110 * This function gets the MAC address that was used by a bootloader.
112 static void ravb_read_mac_address(struct net_device
*ndev
, const u8
*mac
)
115 ether_addr_copy(ndev
->dev_addr
, mac
);
117 u32 mahr
= ravb_read(ndev
, MAHR
);
118 u32 malr
= ravb_read(ndev
, MALR
);
120 ndev
->dev_addr
[0] = (mahr
>> 24) & 0xFF;
121 ndev
->dev_addr
[1] = (mahr
>> 16) & 0xFF;
122 ndev
->dev_addr
[2] = (mahr
>> 8) & 0xFF;
123 ndev
->dev_addr
[3] = (mahr
>> 0) & 0xFF;
124 ndev
->dev_addr
[4] = (malr
>> 8) & 0xFF;
125 ndev
->dev_addr
[5] = (malr
>> 0) & 0xFF;
129 static void ravb_mdio_ctrl(struct mdiobb_ctrl
*ctrl
, u32 mask
, int set
)
131 struct ravb_private
*priv
= container_of(ctrl
, struct ravb_private
,
134 ravb_modify(priv
->ndev
, PIR
, mask
, set
? mask
: 0);
137 /* MDC pin control */
138 static void ravb_set_mdc(struct mdiobb_ctrl
*ctrl
, int level
)
140 ravb_mdio_ctrl(ctrl
, PIR_MDC
, level
);
143 /* Data I/O pin control */
144 static void ravb_set_mdio_dir(struct mdiobb_ctrl
*ctrl
, int output
)
146 ravb_mdio_ctrl(ctrl
, PIR_MMD
, output
);
150 static void ravb_set_mdio_data(struct mdiobb_ctrl
*ctrl
, int value
)
152 ravb_mdio_ctrl(ctrl
, PIR_MDO
, value
);
156 static int ravb_get_mdio_data(struct mdiobb_ctrl
*ctrl
)
158 struct ravb_private
*priv
= container_of(ctrl
, struct ravb_private
,
161 return (ravb_read(priv
->ndev
, PIR
) & PIR_MDI
) != 0;
164 /* MDIO bus control struct */
165 static struct mdiobb_ops bb_ops
= {
166 .owner
= THIS_MODULE
,
167 .set_mdc
= ravb_set_mdc
,
168 .set_mdio_dir
= ravb_set_mdio_dir
,
169 .set_mdio_data
= ravb_set_mdio_data
,
170 .get_mdio_data
= ravb_get_mdio_data
,
173 /* Free TX skb function for AVB-IP */
174 static int ravb_tx_free(struct net_device
*ndev
, int q
, bool free_txed_only
)
176 struct ravb_private
*priv
= netdev_priv(ndev
);
177 struct net_device_stats
*stats
= &priv
->stats
[q
];
178 int num_tx_desc
= priv
->num_tx_desc
;
179 struct ravb_tx_desc
*desc
;
184 for (; priv
->cur_tx
[q
] - priv
->dirty_tx
[q
] > 0; priv
->dirty_tx
[q
]++) {
187 entry
= priv
->dirty_tx
[q
] % (priv
->num_tx_ring
[q
] *
189 desc
= &priv
->tx_ring
[q
][entry
];
190 txed
= desc
->die_dt
== DT_FEMPTY
;
191 if (free_txed_only
&& !txed
)
193 /* Descriptor type must be checked before all other reads */
195 size
= le16_to_cpu(desc
->ds_tagl
) & TX_DS
;
196 /* Free the original skb. */
197 if (priv
->tx_skb
[q
][entry
/ num_tx_desc
]) {
198 dma_unmap_single(ndev
->dev
.parent
, le32_to_cpu(desc
->dptr
),
199 size
, DMA_TO_DEVICE
);
200 /* Last packet descriptor? */
201 if (entry
% num_tx_desc
== num_tx_desc
- 1) {
202 entry
/= num_tx_desc
;
203 dev_kfree_skb_any(priv
->tx_skb
[q
][entry
]);
204 priv
->tx_skb
[q
][entry
] = NULL
;
211 stats
->tx_bytes
+= size
;
212 desc
->die_dt
= DT_EEMPTY
;
217 /* Free skb's and DMA buffers for Ethernet AVB */
218 static void ravb_ring_free(struct net_device
*ndev
, int q
)
220 struct ravb_private
*priv
= netdev_priv(ndev
);
221 int num_tx_desc
= priv
->num_tx_desc
;
225 if (priv
->rx_ring
[q
]) {
226 for (i
= 0; i
< priv
->num_rx_ring
[q
]; i
++) {
227 struct ravb_ex_rx_desc
*desc
= &priv
->rx_ring
[q
][i
];
229 if (!dma_mapping_error(ndev
->dev
.parent
,
230 le32_to_cpu(desc
->dptr
)))
231 dma_unmap_single(ndev
->dev
.parent
,
232 le32_to_cpu(desc
->dptr
),
236 ring_size
= sizeof(struct ravb_ex_rx_desc
) *
237 (priv
->num_rx_ring
[q
] + 1);
238 dma_free_coherent(ndev
->dev
.parent
, ring_size
, priv
->rx_ring
[q
],
239 priv
->rx_desc_dma
[q
]);
240 priv
->rx_ring
[q
] = NULL
;
243 if (priv
->tx_ring
[q
]) {
244 ravb_tx_free(ndev
, q
, false);
246 ring_size
= sizeof(struct ravb_tx_desc
) *
247 (priv
->num_tx_ring
[q
] * num_tx_desc
+ 1);
248 dma_free_coherent(ndev
->dev
.parent
, ring_size
, priv
->tx_ring
[q
],
249 priv
->tx_desc_dma
[q
]);
250 priv
->tx_ring
[q
] = NULL
;
253 /* Free RX skb ringbuffer */
254 if (priv
->rx_skb
[q
]) {
255 for (i
= 0; i
< priv
->num_rx_ring
[q
]; i
++)
256 dev_kfree_skb(priv
->rx_skb
[q
][i
]);
258 kfree(priv
->rx_skb
[q
]);
259 priv
->rx_skb
[q
] = NULL
;
261 /* Free aligned TX buffers */
262 kfree(priv
->tx_align
[q
]);
263 priv
->tx_align
[q
] = NULL
;
265 /* Free TX skb ringbuffer.
266 * SKBs are freed by ravb_tx_free() call above.
268 kfree(priv
->tx_skb
[q
]);
269 priv
->tx_skb
[q
] = NULL
;
272 /* Format skb and descriptor buffer for Ethernet AVB */
273 static void ravb_ring_format(struct net_device
*ndev
, int q
)
275 struct ravb_private
*priv
= netdev_priv(ndev
);
276 int num_tx_desc
= priv
->num_tx_desc
;
277 struct ravb_ex_rx_desc
*rx_desc
;
278 struct ravb_tx_desc
*tx_desc
;
279 struct ravb_desc
*desc
;
280 int rx_ring_size
= sizeof(*rx_desc
) * priv
->num_rx_ring
[q
];
281 int tx_ring_size
= sizeof(*tx_desc
) * priv
->num_tx_ring
[q
] *
288 priv
->dirty_rx
[q
] = 0;
289 priv
->dirty_tx
[q
] = 0;
291 memset(priv
->rx_ring
[q
], 0, rx_ring_size
);
292 /* Build RX ring buffer */
293 for (i
= 0; i
< priv
->num_rx_ring
[q
]; i
++) {
295 rx_desc
= &priv
->rx_ring
[q
][i
];
296 rx_desc
->ds_cc
= cpu_to_le16(RX_BUF_SZ
);
297 dma_addr
= dma_map_single(ndev
->dev
.parent
, priv
->rx_skb
[q
][i
]->data
,
300 /* We just set the data size to 0 for a failed mapping which
301 * should prevent DMA from happening...
303 if (dma_mapping_error(ndev
->dev
.parent
, dma_addr
))
304 rx_desc
->ds_cc
= cpu_to_le16(0);
305 rx_desc
->dptr
= cpu_to_le32(dma_addr
);
306 rx_desc
->die_dt
= DT_FEMPTY
;
308 rx_desc
= &priv
->rx_ring
[q
][i
];
309 rx_desc
->dptr
= cpu_to_le32((u32
)priv
->rx_desc_dma
[q
]);
310 rx_desc
->die_dt
= DT_LINKFIX
; /* type */
312 memset(priv
->tx_ring
[q
], 0, tx_ring_size
);
313 /* Build TX ring buffer */
314 for (i
= 0, tx_desc
= priv
->tx_ring
[q
]; i
< priv
->num_tx_ring
[q
];
316 tx_desc
->die_dt
= DT_EEMPTY
;
317 if (num_tx_desc
> 1) {
319 tx_desc
->die_dt
= DT_EEMPTY
;
322 tx_desc
->dptr
= cpu_to_le32((u32
)priv
->tx_desc_dma
[q
]);
323 tx_desc
->die_dt
= DT_LINKFIX
; /* type */
325 /* RX descriptor base address for best effort */
326 desc
= &priv
->desc_bat
[RX_QUEUE_OFFSET
+ q
];
327 desc
->die_dt
= DT_LINKFIX
; /* type */
328 desc
->dptr
= cpu_to_le32((u32
)priv
->rx_desc_dma
[q
]);
330 /* TX descriptor base address for best effort */
331 desc
= &priv
->desc_bat
[q
];
332 desc
->die_dt
= DT_LINKFIX
; /* type */
333 desc
->dptr
= cpu_to_le32((u32
)priv
->tx_desc_dma
[q
]);
336 /* Init skb and descriptor buffer for Ethernet AVB */
337 static int ravb_ring_init(struct net_device
*ndev
, int q
)
339 struct ravb_private
*priv
= netdev_priv(ndev
);
340 int num_tx_desc
= priv
->num_tx_desc
;
345 /* Allocate RX and TX skb rings */
346 priv
->rx_skb
[q
] = kcalloc(priv
->num_rx_ring
[q
],
347 sizeof(*priv
->rx_skb
[q
]), GFP_KERNEL
);
348 priv
->tx_skb
[q
] = kcalloc(priv
->num_tx_ring
[q
],
349 sizeof(*priv
->tx_skb
[q
]), GFP_KERNEL
);
350 if (!priv
->rx_skb
[q
] || !priv
->tx_skb
[q
])
353 for (i
= 0; i
< priv
->num_rx_ring
[q
]; i
++) {
354 skb
= netdev_alloc_skb(ndev
, RX_BUF_SZ
+ RAVB_ALIGN
- 1);
357 ravb_set_buffer_align(skb
);
358 priv
->rx_skb
[q
][i
] = skb
;
361 if (num_tx_desc
> 1) {
362 /* Allocate rings for the aligned buffers */
363 priv
->tx_align
[q
] = kmalloc(DPTR_ALIGN
* priv
->num_tx_ring
[q
] +
364 DPTR_ALIGN
- 1, GFP_KERNEL
);
365 if (!priv
->tx_align
[q
])
369 /* Allocate all RX descriptors. */
370 ring_size
= sizeof(struct ravb_ex_rx_desc
) * (priv
->num_rx_ring
[q
] + 1);
371 priv
->rx_ring
[q
] = dma_alloc_coherent(ndev
->dev
.parent
, ring_size
,
372 &priv
->rx_desc_dma
[q
],
374 if (!priv
->rx_ring
[q
])
377 priv
->dirty_rx
[q
] = 0;
379 /* Allocate all TX descriptors. */
380 ring_size
= sizeof(struct ravb_tx_desc
) *
381 (priv
->num_tx_ring
[q
] * num_tx_desc
+ 1);
382 priv
->tx_ring
[q
] = dma_alloc_coherent(ndev
->dev
.parent
, ring_size
,
383 &priv
->tx_desc_dma
[q
],
385 if (!priv
->tx_ring
[q
])
391 ravb_ring_free(ndev
, q
);
396 /* E-MAC init function */
397 static void ravb_emac_init(struct net_device
*ndev
)
399 /* Receive frame limit set register */
400 ravb_write(ndev
, ndev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
+ ETH_FCS_LEN
, RFLR
);
402 /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
403 ravb_write(ndev
, ECMR_ZPF
| ECMR_DM
|
404 (ndev
->features
& NETIF_F_RXCSUM
? ECMR_RCSC
: 0) |
405 ECMR_TE
| ECMR_RE
, ECMR
);
409 /* Set MAC address */
411 (ndev
->dev_addr
[0] << 24) | (ndev
->dev_addr
[1] << 16) |
412 (ndev
->dev_addr
[2] << 8) | (ndev
->dev_addr
[3]), MAHR
);
414 (ndev
->dev_addr
[4] << 8) | (ndev
->dev_addr
[5]), MALR
);
416 /* E-MAC status register clear */
417 ravb_write(ndev
, ECSR_ICD
| ECSR_MPD
, ECSR
);
419 /* E-MAC interrupt enable register */
420 ravb_write(ndev
, ECSIPR_ICDIP
| ECSIPR_MPDIP
| ECSIPR_LCHNGIP
, ECSIPR
);
423 /* Device init function for Ethernet AVB */
424 static int ravb_dmac_init(struct net_device
*ndev
)
426 struct ravb_private
*priv
= netdev_priv(ndev
);
429 /* Set CONFIG mode */
430 error
= ravb_config(ndev
);
434 error
= ravb_ring_init(ndev
, RAVB_BE
);
437 error
= ravb_ring_init(ndev
, RAVB_NC
);
439 ravb_ring_free(ndev
, RAVB_BE
);
443 /* Descriptor format */
444 ravb_ring_format(ndev
, RAVB_BE
);
445 ravb_ring_format(ndev
, RAVB_NC
);
449 RCR_EFFS
| RCR_ENCF
| RCR_ETS0
| RCR_ESF
| 0x18000000, RCR
);
452 ravb_write(ndev
, TGC_TQP_AVBMODE1
| 0x00112200, TGC
);
454 /* Timestamp enable */
455 ravb_write(ndev
, TCCR_TFEN
, TCCR
);
457 /* Interrupt init: */
458 if (priv
->chip_id
== RCAR_GEN3
) {
460 ravb_write(ndev
, 0, DIL
);
461 /* Set queue specific interrupt */
462 ravb_write(ndev
, CIE_CRIE
| CIE_CTIE
| CIE_CL0M
, CIE
);
465 ravb_write(ndev
, RIC0_FRE0
| RIC0_FRE1
, RIC0
);
466 /* Disable FIFO full warning */
467 ravb_write(ndev
, 0, RIC1
);
468 /* Receive FIFO full error, descriptor empty */
469 ravb_write(ndev
, RIC2_QFE0
| RIC2_QFE1
| RIC2_RFFE
, RIC2
);
470 /* Frame transmitted, timestamp FIFO updated */
471 ravb_write(ndev
, TIC_FTE0
| TIC_FTE1
| TIC_TFUE
, TIC
);
473 /* Setting the control will start the AVB-DMAC process. */
474 ravb_modify(ndev
, CCC
, CCC_OPC
, CCC_OPC_OPERATION
);
479 static void ravb_get_tx_tstamp(struct net_device
*ndev
)
481 struct ravb_private
*priv
= netdev_priv(ndev
);
482 struct ravb_tstamp_skb
*ts_skb
, *ts_skb2
;
483 struct skb_shared_hwtstamps shhwtstamps
;
485 struct timespec64 ts
;
490 count
= (ravb_read(ndev
, TSR
) & TSR_TFFL
) >> 8;
492 tfa2
= ravb_read(ndev
, TFA2
);
493 tfa_tag
= (tfa2
& TFA2_TST
) >> 16;
494 ts
.tv_nsec
= (u64
)ravb_read(ndev
, TFA0
);
495 ts
.tv_sec
= ((u64
)(tfa2
& TFA2_TSV
) << 32) |
496 ravb_read(ndev
, TFA1
);
497 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
498 shhwtstamps
.hwtstamp
= timespec64_to_ktime(ts
);
499 list_for_each_entry_safe(ts_skb
, ts_skb2
, &priv
->ts_skb_list
,
503 list_del(&ts_skb
->list
);
505 if (tag
== tfa_tag
) {
506 skb_tstamp_tx(skb
, &shhwtstamps
);
507 dev_consume_skb_any(skb
);
510 dev_kfree_skb_any(skb
);
513 ravb_modify(ndev
, TCCR
, TCCR_TFR
, TCCR_TFR
);
517 static void ravb_rx_csum(struct sk_buff
*skb
)
521 /* The hardware checksum is contained in sizeof(__sum16) (2) bytes
522 * appended to packet data
524 if (unlikely(skb
->len
< sizeof(__sum16
)))
526 hw_csum
= skb_tail_pointer(skb
) - sizeof(__sum16
);
527 skb
->csum
= csum_unfold((__force __sum16
)get_unaligned_le16(hw_csum
));
528 skb
->ip_summed
= CHECKSUM_COMPLETE
;
529 skb_trim(skb
, skb
->len
- sizeof(__sum16
));
532 /* Packet receive function for Ethernet AVB */
533 static bool ravb_rx(struct net_device
*ndev
, int *quota
, int q
)
535 struct ravb_private
*priv
= netdev_priv(ndev
);
536 int entry
= priv
->cur_rx
[q
] % priv
->num_rx_ring
[q
];
537 int boguscnt
= (priv
->dirty_rx
[q
] + priv
->num_rx_ring
[q
]) -
539 struct net_device_stats
*stats
= &priv
->stats
[q
];
540 struct ravb_ex_rx_desc
*desc
;
543 struct timespec64 ts
;
548 boguscnt
= min(boguscnt
, *quota
);
550 desc
= &priv
->rx_ring
[q
][entry
];
551 while (desc
->die_dt
!= DT_FEMPTY
) {
552 /* Descriptor type must be checked before all other reads */
554 desc_status
= desc
->msc
;
555 pkt_len
= le16_to_cpu(desc
->ds_cc
) & RX_DS
;
560 /* We use 0-byte descriptors to mark the DMA mapping errors */
564 if (desc_status
& MSC_MC
)
567 if (desc_status
& (MSC_CRC
| MSC_RFE
| MSC_RTSF
| MSC_RTLF
|
570 if (desc_status
& MSC_CRC
)
571 stats
->rx_crc_errors
++;
572 if (desc_status
& MSC_RFE
)
573 stats
->rx_frame_errors
++;
574 if (desc_status
& (MSC_RTLF
| MSC_RTSF
))
575 stats
->rx_length_errors
++;
576 if (desc_status
& MSC_CEEF
)
577 stats
->rx_missed_errors
++;
579 u32 get_ts
= priv
->tstamp_rx_ctrl
& RAVB_RXTSTAMP_TYPE
;
581 skb
= priv
->rx_skb
[q
][entry
];
582 priv
->rx_skb
[q
][entry
] = NULL
;
583 dma_unmap_single(ndev
->dev
.parent
, le32_to_cpu(desc
->dptr
),
586 get_ts
&= (q
== RAVB_NC
) ?
587 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT
:
588 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT
;
590 struct skb_shared_hwtstamps
*shhwtstamps
;
592 shhwtstamps
= skb_hwtstamps(skb
);
593 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
594 ts
.tv_sec
= ((u64
) le16_to_cpu(desc
->ts_sh
) <<
595 32) | le32_to_cpu(desc
->ts_sl
);
596 ts
.tv_nsec
= le32_to_cpu(desc
->ts_n
);
597 shhwtstamps
->hwtstamp
= timespec64_to_ktime(ts
);
600 skb_put(skb
, pkt_len
);
601 skb
->protocol
= eth_type_trans(skb
, ndev
);
602 if (ndev
->features
& NETIF_F_RXCSUM
)
604 napi_gro_receive(&priv
->napi
[q
], skb
);
606 stats
->rx_bytes
+= pkt_len
;
609 entry
= (++priv
->cur_rx
[q
]) % priv
->num_rx_ring
[q
];
610 desc
= &priv
->rx_ring
[q
][entry
];
613 /* Refill the RX ring buffers. */
614 for (; priv
->cur_rx
[q
] - priv
->dirty_rx
[q
] > 0; priv
->dirty_rx
[q
]++) {
615 entry
= priv
->dirty_rx
[q
] % priv
->num_rx_ring
[q
];
616 desc
= &priv
->rx_ring
[q
][entry
];
617 desc
->ds_cc
= cpu_to_le16(RX_BUF_SZ
);
619 if (!priv
->rx_skb
[q
][entry
]) {
620 skb
= netdev_alloc_skb(ndev
,
624 break; /* Better luck next round. */
625 ravb_set_buffer_align(skb
);
626 dma_addr
= dma_map_single(ndev
->dev
.parent
, skb
->data
,
627 le16_to_cpu(desc
->ds_cc
),
629 skb_checksum_none_assert(skb
);
630 /* We just set the data size to 0 for a failed mapping
631 * which should prevent DMA from happening...
633 if (dma_mapping_error(ndev
->dev
.parent
, dma_addr
))
634 desc
->ds_cc
= cpu_to_le16(0);
635 desc
->dptr
= cpu_to_le32(dma_addr
);
636 priv
->rx_skb
[q
][entry
] = skb
;
638 /* Descriptor type must be set after all the above writes */
640 desc
->die_dt
= DT_FEMPTY
;
643 *quota
-= limit
- (++boguscnt
);
645 return boguscnt
<= 0;
648 static void ravb_rcv_snd_disable(struct net_device
*ndev
)
650 /* Disable TX and RX */
651 ravb_modify(ndev
, ECMR
, ECMR_RE
| ECMR_TE
, 0);
654 static void ravb_rcv_snd_enable(struct net_device
*ndev
)
656 /* Enable TX and RX */
657 ravb_modify(ndev
, ECMR
, ECMR_RE
| ECMR_TE
, ECMR_RE
| ECMR_TE
);
660 /* function for waiting dma process finished */
661 static int ravb_stop_dma(struct net_device
*ndev
)
665 /* Wait for stopping the hardware TX process */
666 error
= ravb_wait(ndev
, TCCR
,
667 TCCR_TSRQ0
| TCCR_TSRQ1
| TCCR_TSRQ2
| TCCR_TSRQ3
, 0);
671 error
= ravb_wait(ndev
, CSR
, CSR_TPO0
| CSR_TPO1
| CSR_TPO2
| CSR_TPO3
,
676 /* Stop the E-MAC's RX/TX processes. */
677 ravb_rcv_snd_disable(ndev
);
679 /* Wait for stopping the RX DMA process */
680 error
= ravb_wait(ndev
, CSR
, CSR_RPO
, 0);
684 /* Stop AVB-DMAC process */
685 return ravb_config(ndev
);
688 /* E-MAC interrupt handler */
689 static void ravb_emac_interrupt_unlocked(struct net_device
*ndev
)
691 struct ravb_private
*priv
= netdev_priv(ndev
);
694 ecsr
= ravb_read(ndev
, ECSR
);
695 ravb_write(ndev
, ecsr
, ECSR
); /* clear interrupt */
698 pm_wakeup_event(&priv
->pdev
->dev
, 0);
700 ndev
->stats
.tx_carrier_errors
++;
701 if (ecsr
& ECSR_LCHNG
) {
703 if (priv
->no_avb_link
)
705 psr
= ravb_read(ndev
, PSR
);
706 if (priv
->avb_link_active_low
)
708 if (!(psr
& PSR_LMON
)) {
709 /* DIsable RX and TX */
710 ravb_rcv_snd_disable(ndev
);
712 /* Enable RX and TX */
713 ravb_rcv_snd_enable(ndev
);
718 static irqreturn_t
ravb_emac_interrupt(int irq
, void *dev_id
)
720 struct net_device
*ndev
= dev_id
;
721 struct ravb_private
*priv
= netdev_priv(ndev
);
723 spin_lock(&priv
->lock
);
724 ravb_emac_interrupt_unlocked(ndev
);
725 spin_unlock(&priv
->lock
);
729 /* Error interrupt handler */
730 static void ravb_error_interrupt(struct net_device
*ndev
)
732 struct ravb_private
*priv
= netdev_priv(ndev
);
735 eis
= ravb_read(ndev
, EIS
);
736 ravb_write(ndev
, ~(EIS_QFS
| EIS_RESERVED
), EIS
);
738 ris2
= ravb_read(ndev
, RIS2
);
739 ravb_write(ndev
, ~(RIS2_QFF0
| RIS2_RFFF
| RIS2_RESERVED
),
742 /* Receive Descriptor Empty int */
743 if (ris2
& RIS2_QFF0
)
744 priv
->stats
[RAVB_BE
].rx_over_errors
++;
746 /* Receive Descriptor Empty int */
747 if (ris2
& RIS2_QFF1
)
748 priv
->stats
[RAVB_NC
].rx_over_errors
++;
750 /* Receive FIFO Overflow int */
751 if (ris2
& RIS2_RFFF
)
752 priv
->rx_fifo_errors
++;
756 static bool ravb_queue_interrupt(struct net_device
*ndev
, int q
)
758 struct ravb_private
*priv
= netdev_priv(ndev
);
759 u32 ris0
= ravb_read(ndev
, RIS0
);
760 u32 ric0
= ravb_read(ndev
, RIC0
);
761 u32 tis
= ravb_read(ndev
, TIS
);
762 u32 tic
= ravb_read(ndev
, TIC
);
764 if (((ris0
& ric0
) & BIT(q
)) || ((tis
& tic
) & BIT(q
))) {
765 if (napi_schedule_prep(&priv
->napi
[q
])) {
766 /* Mask RX and TX interrupts */
767 if (priv
->chip_id
== RCAR_GEN2
) {
768 ravb_write(ndev
, ric0
& ~BIT(q
), RIC0
);
769 ravb_write(ndev
, tic
& ~BIT(q
), TIC
);
771 ravb_write(ndev
, BIT(q
), RID0
);
772 ravb_write(ndev
, BIT(q
), TID
);
774 __napi_schedule(&priv
->napi
[q
]);
777 "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
780 " tx status 0x%08x, tx mask 0x%08x.\n",
788 static bool ravb_timestamp_interrupt(struct net_device
*ndev
)
790 u32 tis
= ravb_read(ndev
, TIS
);
792 if (tis
& TIS_TFUF
) {
793 ravb_write(ndev
, ~(TIS_TFUF
| TIS_RESERVED
), TIS
);
794 ravb_get_tx_tstamp(ndev
);
800 static irqreturn_t
ravb_interrupt(int irq
, void *dev_id
)
802 struct net_device
*ndev
= dev_id
;
803 struct ravb_private
*priv
= netdev_priv(ndev
);
804 irqreturn_t result
= IRQ_NONE
;
807 spin_lock(&priv
->lock
);
808 /* Get interrupt status */
809 iss
= ravb_read(ndev
, ISS
);
811 /* Received and transmitted interrupts */
812 if (iss
& (ISS_FRS
| ISS_FTS
| ISS_TFUS
)) {
815 /* Timestamp updated */
816 if (ravb_timestamp_interrupt(ndev
))
817 result
= IRQ_HANDLED
;
819 /* Network control and best effort queue RX/TX */
820 for (q
= RAVB_NC
; q
>= RAVB_BE
; q
--) {
821 if (ravb_queue_interrupt(ndev
, q
))
822 result
= IRQ_HANDLED
;
826 /* E-MAC status summary */
828 ravb_emac_interrupt_unlocked(ndev
);
829 result
= IRQ_HANDLED
;
832 /* Error status summary */
834 ravb_error_interrupt(ndev
);
835 result
= IRQ_HANDLED
;
838 /* gPTP interrupt status summary */
839 if (iss
& ISS_CGIS
) {
840 ravb_ptp_interrupt(ndev
);
841 result
= IRQ_HANDLED
;
844 spin_unlock(&priv
->lock
);
848 /* Timestamp/Error/gPTP interrupt handler */
849 static irqreturn_t
ravb_multi_interrupt(int irq
, void *dev_id
)
851 struct net_device
*ndev
= dev_id
;
852 struct ravb_private
*priv
= netdev_priv(ndev
);
853 irqreturn_t result
= IRQ_NONE
;
856 spin_lock(&priv
->lock
);
857 /* Get interrupt status */
858 iss
= ravb_read(ndev
, ISS
);
860 /* Timestamp updated */
861 if ((iss
& ISS_TFUS
) && ravb_timestamp_interrupt(ndev
))
862 result
= IRQ_HANDLED
;
864 /* Error status summary */
866 ravb_error_interrupt(ndev
);
867 result
= IRQ_HANDLED
;
870 /* gPTP interrupt status summary */
871 if (iss
& ISS_CGIS
) {
872 ravb_ptp_interrupt(ndev
);
873 result
= IRQ_HANDLED
;
876 spin_unlock(&priv
->lock
);
880 static irqreturn_t
ravb_dma_interrupt(int irq
, void *dev_id
, int q
)
882 struct net_device
*ndev
= dev_id
;
883 struct ravb_private
*priv
= netdev_priv(ndev
);
884 irqreturn_t result
= IRQ_NONE
;
886 spin_lock(&priv
->lock
);
888 /* Network control/Best effort queue RX/TX */
889 if (ravb_queue_interrupt(ndev
, q
))
890 result
= IRQ_HANDLED
;
892 spin_unlock(&priv
->lock
);
896 static irqreturn_t
ravb_be_interrupt(int irq
, void *dev_id
)
898 return ravb_dma_interrupt(irq
, dev_id
, RAVB_BE
);
901 static irqreturn_t
ravb_nc_interrupt(int irq
, void *dev_id
)
903 return ravb_dma_interrupt(irq
, dev_id
, RAVB_NC
);
906 static int ravb_poll(struct napi_struct
*napi
, int budget
)
908 struct net_device
*ndev
= napi
->dev
;
909 struct ravb_private
*priv
= netdev_priv(ndev
);
911 int q
= napi
- priv
->napi
;
917 tis
= ravb_read(ndev
, TIS
);
918 ris0
= ravb_read(ndev
, RIS0
);
919 if (!((ris0
& mask
) || (tis
& mask
)))
922 /* Processing RX Descriptor Ring */
924 /* Clear RX interrupt */
925 ravb_write(ndev
, ~(mask
| RIS0_RESERVED
), RIS0
);
926 if (ravb_rx(ndev
, "a
, q
))
929 /* Processing TX Descriptor Ring */
931 spin_lock_irqsave(&priv
->lock
, flags
);
932 /* Clear TX interrupt */
933 ravb_write(ndev
, ~(mask
| TIS_RESERVED
), TIS
);
934 ravb_tx_free(ndev
, q
, true);
935 netif_wake_subqueue(ndev
, q
);
936 spin_unlock_irqrestore(&priv
->lock
, flags
);
942 /* Re-enable RX/TX interrupts */
943 spin_lock_irqsave(&priv
->lock
, flags
);
944 if (priv
->chip_id
== RCAR_GEN2
) {
945 ravb_modify(ndev
, RIC0
, mask
, mask
);
946 ravb_modify(ndev
, TIC
, mask
, mask
);
948 ravb_write(ndev
, mask
, RIE0
);
949 ravb_write(ndev
, mask
, TIE
);
951 spin_unlock_irqrestore(&priv
->lock
, flags
);
953 /* Receive error message handling */
954 priv
->rx_over_errors
= priv
->stats
[RAVB_BE
].rx_over_errors
;
955 priv
->rx_over_errors
+= priv
->stats
[RAVB_NC
].rx_over_errors
;
956 if (priv
->rx_over_errors
!= ndev
->stats
.rx_over_errors
)
957 ndev
->stats
.rx_over_errors
= priv
->rx_over_errors
;
958 if (priv
->rx_fifo_errors
!= ndev
->stats
.rx_fifo_errors
)
959 ndev
->stats
.rx_fifo_errors
= priv
->rx_fifo_errors
;
961 return budget
- quota
;
964 /* PHY state control function */
965 static void ravb_adjust_link(struct net_device
*ndev
)
967 struct ravb_private
*priv
= netdev_priv(ndev
);
968 struct phy_device
*phydev
= ndev
->phydev
;
969 bool new_state
= false;
972 spin_lock_irqsave(&priv
->lock
, flags
);
974 /* Disable TX and RX right over here, if E-MAC change is ignored */
975 if (priv
->no_avb_link
)
976 ravb_rcv_snd_disable(ndev
);
979 if (phydev
->speed
!= priv
->speed
) {
981 priv
->speed
= phydev
->speed
;
985 ravb_modify(ndev
, ECMR
, ECMR_TXF
, 0);
987 priv
->link
= phydev
->link
;
989 } else if (priv
->link
) {
995 /* Enable TX and RX right over here, if E-MAC change is ignored */
996 if (priv
->no_avb_link
&& phydev
->link
)
997 ravb_rcv_snd_enable(ndev
);
999 spin_unlock_irqrestore(&priv
->lock
, flags
);
1001 if (new_state
&& netif_msg_link(priv
))
1002 phy_print_status(phydev
);
1005 static const struct soc_device_attribute r8a7795es10
[] = {
1006 { .soc_id
= "r8a7795", .revision
= "ES1.0", },
1010 /* PHY init function */
1011 static int ravb_phy_init(struct net_device
*ndev
)
1013 struct device_node
*np
= ndev
->dev
.parent
->of_node
;
1014 struct ravb_private
*priv
= netdev_priv(ndev
);
1015 struct phy_device
*phydev
;
1016 struct device_node
*pn
;
1022 /* Try connecting to PHY */
1023 pn
= of_parse_phandle(np
, "phy-handle", 0);
1025 /* In the case of a fixed PHY, the DT node associated
1026 * to the PHY is the Ethernet MAC DT node.
1028 if (of_phy_is_fixed_link(np
)) {
1029 err
= of_phy_register_fixed_link(np
);
1033 pn
= of_node_get(np
);
1035 phydev
= of_phy_connect(ndev
, pn
, ravb_adjust_link
, 0,
1036 priv
->phy_interface
);
1039 netdev_err(ndev
, "failed to connect PHY\n");
1041 goto err_deregister_fixed_link
;
1044 /* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0
1047 if (soc_device_match(r8a7795es10
)) {
1048 err
= phy_set_max_speed(phydev
, SPEED_100
);
1050 netdev_err(ndev
, "failed to limit PHY to 100Mbit/s\n");
1051 goto err_phy_disconnect
;
1054 netdev_info(ndev
, "limited PHY to 100Mbit/s\n");
1057 /* 10BASE, Pause and Asym Pause is not supported */
1058 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_10baseT_Half_BIT
);
1059 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_10baseT_Full_BIT
);
1060 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_Pause_BIT
);
1061 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_Asym_Pause_BIT
);
1063 /* Half Duplex is not supported */
1064 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_1000baseT_Half_BIT
);
1065 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_100baseT_Half_BIT
);
1067 phy_attached_info(phydev
);
1072 phy_disconnect(phydev
);
1073 err_deregister_fixed_link
:
1074 if (of_phy_is_fixed_link(np
))
1075 of_phy_deregister_fixed_link(np
);
1080 /* PHY control start function */
1081 static int ravb_phy_start(struct net_device
*ndev
)
1085 error
= ravb_phy_init(ndev
);
1089 phy_start(ndev
->phydev
);
1094 static u32
ravb_get_msglevel(struct net_device
*ndev
)
1096 struct ravb_private
*priv
= netdev_priv(ndev
);
1098 return priv
->msg_enable
;
1101 static void ravb_set_msglevel(struct net_device
*ndev
, u32 value
)
1103 struct ravb_private
*priv
= netdev_priv(ndev
);
1105 priv
->msg_enable
= value
;
1108 static const char ravb_gstrings_stats
[][ETH_GSTRING_LEN
] = {
1109 "rx_queue_0_current",
1110 "tx_queue_0_current",
1113 "rx_queue_0_packets",
1114 "tx_queue_0_packets",
1117 "rx_queue_0_mcast_packets",
1118 "rx_queue_0_errors",
1119 "rx_queue_0_crc_errors",
1120 "rx_queue_0_frame_errors",
1121 "rx_queue_0_length_errors",
1122 "rx_queue_0_missed_errors",
1123 "rx_queue_0_over_errors",
1125 "rx_queue_1_current",
1126 "tx_queue_1_current",
1129 "rx_queue_1_packets",
1130 "tx_queue_1_packets",
1133 "rx_queue_1_mcast_packets",
1134 "rx_queue_1_errors",
1135 "rx_queue_1_crc_errors",
1136 "rx_queue_1_frame_errors",
1137 "rx_queue_1_length_errors",
1138 "rx_queue_1_missed_errors",
1139 "rx_queue_1_over_errors",
1142 #define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats)
1144 static int ravb_get_sset_count(struct net_device
*netdev
, int sset
)
1148 return RAVB_STATS_LEN
;
1154 static void ravb_get_ethtool_stats(struct net_device
*ndev
,
1155 struct ethtool_stats
*estats
, u64
*data
)
1157 struct ravb_private
*priv
= netdev_priv(ndev
);
1161 /* Device-specific stats */
1162 for (q
= RAVB_BE
; q
< NUM_RX_QUEUE
; q
++) {
1163 struct net_device_stats
*stats
= &priv
->stats
[q
];
1165 data
[i
++] = priv
->cur_rx
[q
];
1166 data
[i
++] = priv
->cur_tx
[q
];
1167 data
[i
++] = priv
->dirty_rx
[q
];
1168 data
[i
++] = priv
->dirty_tx
[q
];
1169 data
[i
++] = stats
->rx_packets
;
1170 data
[i
++] = stats
->tx_packets
;
1171 data
[i
++] = stats
->rx_bytes
;
1172 data
[i
++] = stats
->tx_bytes
;
1173 data
[i
++] = stats
->multicast
;
1174 data
[i
++] = stats
->rx_errors
;
1175 data
[i
++] = stats
->rx_crc_errors
;
1176 data
[i
++] = stats
->rx_frame_errors
;
1177 data
[i
++] = stats
->rx_length_errors
;
1178 data
[i
++] = stats
->rx_missed_errors
;
1179 data
[i
++] = stats
->rx_over_errors
;
1183 static void ravb_get_strings(struct net_device
*ndev
, u32 stringset
, u8
*data
)
1185 switch (stringset
) {
1187 memcpy(data
, ravb_gstrings_stats
, sizeof(ravb_gstrings_stats
));
1192 static void ravb_get_ringparam(struct net_device
*ndev
,
1193 struct ethtool_ringparam
*ring
)
1195 struct ravb_private
*priv
= netdev_priv(ndev
);
1197 ring
->rx_max_pending
= BE_RX_RING_MAX
;
1198 ring
->tx_max_pending
= BE_TX_RING_MAX
;
1199 ring
->rx_pending
= priv
->num_rx_ring
[RAVB_BE
];
1200 ring
->tx_pending
= priv
->num_tx_ring
[RAVB_BE
];
1203 static int ravb_set_ringparam(struct net_device
*ndev
,
1204 struct ethtool_ringparam
*ring
)
1206 struct ravb_private
*priv
= netdev_priv(ndev
);
1209 if (ring
->tx_pending
> BE_TX_RING_MAX
||
1210 ring
->rx_pending
> BE_RX_RING_MAX
||
1211 ring
->tx_pending
< BE_TX_RING_MIN
||
1212 ring
->rx_pending
< BE_RX_RING_MIN
)
1214 if (ring
->rx_mini_pending
|| ring
->rx_jumbo_pending
)
1217 if (netif_running(ndev
)) {
1218 netif_device_detach(ndev
);
1219 /* Stop PTP Clock driver */
1220 if (priv
->chip_id
== RCAR_GEN2
)
1221 ravb_ptp_stop(ndev
);
1222 /* Wait for DMA stopping */
1223 error
= ravb_stop_dma(ndev
);
1226 "cannot set ringparam! Any AVB processes are still running?\n");
1229 synchronize_irq(ndev
->irq
);
1231 /* Free all the skb's in the RX queue and the DMA buffers. */
1232 ravb_ring_free(ndev
, RAVB_BE
);
1233 ravb_ring_free(ndev
, RAVB_NC
);
1236 /* Set new parameters */
1237 priv
->num_rx_ring
[RAVB_BE
] = ring
->rx_pending
;
1238 priv
->num_tx_ring
[RAVB_BE
] = ring
->tx_pending
;
1240 if (netif_running(ndev
)) {
1241 error
= ravb_dmac_init(ndev
);
1244 "%s: ravb_dmac_init() failed, error %d\n",
1249 ravb_emac_init(ndev
);
1251 /* Initialise PTP Clock driver */
1252 if (priv
->chip_id
== RCAR_GEN2
)
1253 ravb_ptp_init(ndev
, priv
->pdev
);
1255 netif_device_attach(ndev
);
1261 static int ravb_get_ts_info(struct net_device
*ndev
,
1262 struct ethtool_ts_info
*info
)
1264 struct ravb_private
*priv
= netdev_priv(ndev
);
1266 info
->so_timestamping
=
1267 SOF_TIMESTAMPING_TX_SOFTWARE
|
1268 SOF_TIMESTAMPING_RX_SOFTWARE
|
1269 SOF_TIMESTAMPING_SOFTWARE
|
1270 SOF_TIMESTAMPING_TX_HARDWARE
|
1271 SOF_TIMESTAMPING_RX_HARDWARE
|
1272 SOF_TIMESTAMPING_RAW_HARDWARE
;
1273 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
1275 (1 << HWTSTAMP_FILTER_NONE
) |
1276 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
1277 (1 << HWTSTAMP_FILTER_ALL
);
1278 info
->phc_index
= ptp_clock_index(priv
->ptp
.clock
);
1283 static void ravb_get_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
1285 struct ravb_private
*priv
= netdev_priv(ndev
);
1287 wol
->supported
= WAKE_MAGIC
;
1288 wol
->wolopts
= priv
->wol_enabled
? WAKE_MAGIC
: 0;
1291 static int ravb_set_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
1293 struct ravb_private
*priv
= netdev_priv(ndev
);
1295 if (wol
->wolopts
& ~WAKE_MAGIC
)
1298 priv
->wol_enabled
= !!(wol
->wolopts
& WAKE_MAGIC
);
1300 device_set_wakeup_enable(&priv
->pdev
->dev
, priv
->wol_enabled
);
1305 static const struct ethtool_ops ravb_ethtool_ops
= {
1306 .nway_reset
= phy_ethtool_nway_reset
,
1307 .get_msglevel
= ravb_get_msglevel
,
1308 .set_msglevel
= ravb_set_msglevel
,
1309 .get_link
= ethtool_op_get_link
,
1310 .get_strings
= ravb_get_strings
,
1311 .get_ethtool_stats
= ravb_get_ethtool_stats
,
1312 .get_sset_count
= ravb_get_sset_count
,
1313 .get_ringparam
= ravb_get_ringparam
,
1314 .set_ringparam
= ravb_set_ringparam
,
1315 .get_ts_info
= ravb_get_ts_info
,
1316 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
1317 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
1318 .get_wol
= ravb_get_wol
,
1319 .set_wol
= ravb_set_wol
,
1322 static inline int ravb_hook_irq(unsigned int irq
, irq_handler_t handler
,
1323 struct net_device
*ndev
, struct device
*dev
,
1329 name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s:%s", ndev
->name
, ch
);
1332 error
= request_irq(irq
, handler
, 0, name
, ndev
);
1334 netdev_err(ndev
, "cannot request IRQ %s\n", name
);
1339 /* Network device open function for Ethernet AVB */
1340 static int ravb_open(struct net_device
*ndev
)
1342 struct ravb_private
*priv
= netdev_priv(ndev
);
1343 struct platform_device
*pdev
= priv
->pdev
;
1344 struct device
*dev
= &pdev
->dev
;
1347 napi_enable(&priv
->napi
[RAVB_BE
]);
1348 napi_enable(&priv
->napi
[RAVB_NC
]);
1350 if (priv
->chip_id
== RCAR_GEN2
) {
1351 error
= request_irq(ndev
->irq
, ravb_interrupt
, IRQF_SHARED
,
1354 netdev_err(ndev
, "cannot request IRQ\n");
1358 error
= ravb_hook_irq(ndev
->irq
, ravb_multi_interrupt
, ndev
,
1362 error
= ravb_hook_irq(priv
->emac_irq
, ravb_emac_interrupt
, ndev
,
1366 error
= ravb_hook_irq(priv
->rx_irqs
[RAVB_BE
], ravb_be_interrupt
,
1367 ndev
, dev
, "ch0:rx_be");
1369 goto out_free_irq_emac
;
1370 error
= ravb_hook_irq(priv
->tx_irqs
[RAVB_BE
], ravb_be_interrupt
,
1371 ndev
, dev
, "ch18:tx_be");
1373 goto out_free_irq_be_rx
;
1374 error
= ravb_hook_irq(priv
->rx_irqs
[RAVB_NC
], ravb_nc_interrupt
,
1375 ndev
, dev
, "ch1:rx_nc");
1377 goto out_free_irq_be_tx
;
1378 error
= ravb_hook_irq(priv
->tx_irqs
[RAVB_NC
], ravb_nc_interrupt
,
1379 ndev
, dev
, "ch19:tx_nc");
1381 goto out_free_irq_nc_rx
;
1385 error
= ravb_dmac_init(ndev
);
1387 goto out_free_irq_nc_tx
;
1388 ravb_emac_init(ndev
);
1390 /* Initialise PTP Clock driver */
1391 if (priv
->chip_id
== RCAR_GEN2
)
1392 ravb_ptp_init(ndev
, priv
->pdev
);
1394 netif_tx_start_all_queues(ndev
);
1396 /* PHY control start */
1397 error
= ravb_phy_start(ndev
);
1404 /* Stop PTP Clock driver */
1405 if (priv
->chip_id
== RCAR_GEN2
)
1406 ravb_ptp_stop(ndev
);
1408 if (priv
->chip_id
== RCAR_GEN2
)
1410 free_irq(priv
->tx_irqs
[RAVB_NC
], ndev
);
1412 free_irq(priv
->rx_irqs
[RAVB_NC
], ndev
);
1414 free_irq(priv
->tx_irqs
[RAVB_BE
], ndev
);
1416 free_irq(priv
->rx_irqs
[RAVB_BE
], ndev
);
1418 free_irq(priv
->emac_irq
, ndev
);
1420 free_irq(ndev
->irq
, ndev
);
1422 napi_disable(&priv
->napi
[RAVB_NC
]);
1423 napi_disable(&priv
->napi
[RAVB_BE
]);
1427 /* Timeout function for Ethernet AVB */
1428 static void ravb_tx_timeout(struct net_device
*ndev
, unsigned int txqueue
)
1430 struct ravb_private
*priv
= netdev_priv(ndev
);
1432 netif_err(priv
, tx_err
, ndev
,
1433 "transmit timed out, status %08x, resetting...\n",
1434 ravb_read(ndev
, ISS
));
1436 /* tx_errors count up */
1437 ndev
->stats
.tx_errors
++;
1439 schedule_work(&priv
->work
);
1442 static void ravb_tx_timeout_work(struct work_struct
*work
)
1444 struct ravb_private
*priv
= container_of(work
, struct ravb_private
,
1446 struct net_device
*ndev
= priv
->ndev
;
1448 netif_tx_stop_all_queues(ndev
);
1450 /* Stop PTP Clock driver */
1451 if (priv
->chip_id
== RCAR_GEN2
)
1452 ravb_ptp_stop(ndev
);
1454 /* Wait for DMA stopping */
1455 ravb_stop_dma(ndev
);
1457 ravb_ring_free(ndev
, RAVB_BE
);
1458 ravb_ring_free(ndev
, RAVB_NC
);
1461 ravb_dmac_init(ndev
);
1462 ravb_emac_init(ndev
);
1464 /* Initialise PTP Clock driver */
1465 if (priv
->chip_id
== RCAR_GEN2
)
1466 ravb_ptp_init(ndev
, priv
->pdev
);
1468 netif_tx_start_all_queues(ndev
);
1471 /* Packet transmit function for Ethernet AVB */
1472 static netdev_tx_t
ravb_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1474 struct ravb_private
*priv
= netdev_priv(ndev
);
1475 int num_tx_desc
= priv
->num_tx_desc
;
1476 u16 q
= skb_get_queue_mapping(skb
);
1477 struct ravb_tstamp_skb
*ts_skb
;
1478 struct ravb_tx_desc
*desc
;
1479 unsigned long flags
;
1485 spin_lock_irqsave(&priv
->lock
, flags
);
1486 if (priv
->cur_tx
[q
] - priv
->dirty_tx
[q
] > (priv
->num_tx_ring
[q
] - 1) *
1488 netif_err(priv
, tx_queued
, ndev
,
1489 "still transmitting with the full ring!\n");
1490 netif_stop_subqueue(ndev
, q
);
1491 spin_unlock_irqrestore(&priv
->lock
, flags
);
1492 return NETDEV_TX_BUSY
;
1495 if (skb_put_padto(skb
, ETH_ZLEN
))
1498 entry
= priv
->cur_tx
[q
] % (priv
->num_tx_ring
[q
] * num_tx_desc
);
1499 priv
->tx_skb
[q
][entry
/ num_tx_desc
] = skb
;
1501 if (num_tx_desc
> 1) {
1502 buffer
= PTR_ALIGN(priv
->tx_align
[q
], DPTR_ALIGN
) +
1503 entry
/ num_tx_desc
* DPTR_ALIGN
;
1504 len
= PTR_ALIGN(skb
->data
, DPTR_ALIGN
) - skb
->data
;
1506 /* Zero length DMA descriptors are problematic as they seem
1507 * to terminate DMA transfers. Avoid them by simply using a
1508 * length of DPTR_ALIGN (4) when skb data is aligned to
1511 * As skb is guaranteed to have at least ETH_ZLEN (60)
1512 * bytes of data by the call to skb_put_padto() above this
1513 * is safe with respect to both the length of the first DMA
1514 * descriptor (len) overflowing the available data and the
1515 * length of the second DMA descriptor (skb->len - len)
1521 memcpy(buffer
, skb
->data
, len
);
1522 dma_addr
= dma_map_single(ndev
->dev
.parent
, buffer
, len
,
1524 if (dma_mapping_error(ndev
->dev
.parent
, dma_addr
))
1527 desc
= &priv
->tx_ring
[q
][entry
];
1528 desc
->ds_tagl
= cpu_to_le16(len
);
1529 desc
->dptr
= cpu_to_le32(dma_addr
);
1531 buffer
= skb
->data
+ len
;
1532 len
= skb
->len
- len
;
1533 dma_addr
= dma_map_single(ndev
->dev
.parent
, buffer
, len
,
1535 if (dma_mapping_error(ndev
->dev
.parent
, dma_addr
))
1540 desc
= &priv
->tx_ring
[q
][entry
];
1542 dma_addr
= dma_map_single(ndev
->dev
.parent
, skb
->data
, skb
->len
,
1544 if (dma_mapping_error(ndev
->dev
.parent
, dma_addr
))
1547 desc
->ds_tagl
= cpu_to_le16(len
);
1548 desc
->dptr
= cpu_to_le32(dma_addr
);
1550 /* TX timestamp required */
1552 ts_skb
= kmalloc(sizeof(*ts_skb
), GFP_ATOMIC
);
1554 if (num_tx_desc
> 1) {
1556 dma_unmap_single(ndev
->dev
.parent
, dma_addr
,
1557 len
, DMA_TO_DEVICE
);
1561 ts_skb
->skb
= skb_get(skb
);
1562 ts_skb
->tag
= priv
->ts_skb_tag
++;
1563 priv
->ts_skb_tag
&= 0x3ff;
1564 list_add_tail(&ts_skb
->list
, &priv
->ts_skb_list
);
1566 /* TAG and timestamp required flag */
1567 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1568 desc
->tagh_tsr
= (ts_skb
->tag
>> 4) | TX_TSR
;
1569 desc
->ds_tagl
|= cpu_to_le16(ts_skb
->tag
<< 12);
1572 skb_tx_timestamp(skb
);
1573 /* Descriptor type must be set after all the above writes */
1575 if (num_tx_desc
> 1) {
1576 desc
->die_dt
= DT_FEND
;
1578 desc
->die_dt
= DT_FSTART
;
1580 desc
->die_dt
= DT_FSINGLE
;
1582 ravb_modify(ndev
, TCCR
, TCCR_TSRQ0
<< q
, TCCR_TSRQ0
<< q
);
1584 priv
->cur_tx
[q
] += num_tx_desc
;
1585 if (priv
->cur_tx
[q
] - priv
->dirty_tx
[q
] >
1586 (priv
->num_tx_ring
[q
] - 1) * num_tx_desc
&&
1587 !ravb_tx_free(ndev
, q
, true))
1588 netif_stop_subqueue(ndev
, q
);
1591 spin_unlock_irqrestore(&priv
->lock
, flags
);
1592 return NETDEV_TX_OK
;
1595 dma_unmap_single(ndev
->dev
.parent
, le32_to_cpu(desc
->dptr
),
1596 le16_to_cpu(desc
->ds_tagl
), DMA_TO_DEVICE
);
1598 dev_kfree_skb_any(skb
);
1599 priv
->tx_skb
[q
][entry
/ num_tx_desc
] = NULL
;
1603 static u16
ravb_select_queue(struct net_device
*ndev
, struct sk_buff
*skb
,
1604 struct net_device
*sb_dev
)
1606 /* If skb needs TX timestamp, it is handled in network control queue */
1607 return (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) ? RAVB_NC
:
1612 static struct net_device_stats
*ravb_get_stats(struct net_device
*ndev
)
1614 struct ravb_private
*priv
= netdev_priv(ndev
);
1615 struct net_device_stats
*nstats
, *stats0
, *stats1
;
1617 nstats
= &ndev
->stats
;
1618 stats0
= &priv
->stats
[RAVB_BE
];
1619 stats1
= &priv
->stats
[RAVB_NC
];
1621 if (priv
->chip_id
== RCAR_GEN3
) {
1622 nstats
->tx_dropped
+= ravb_read(ndev
, TROCR
);
1623 ravb_write(ndev
, 0, TROCR
); /* (write clear) */
1626 nstats
->rx_packets
= stats0
->rx_packets
+ stats1
->rx_packets
;
1627 nstats
->tx_packets
= stats0
->tx_packets
+ stats1
->tx_packets
;
1628 nstats
->rx_bytes
= stats0
->rx_bytes
+ stats1
->rx_bytes
;
1629 nstats
->tx_bytes
= stats0
->tx_bytes
+ stats1
->tx_bytes
;
1630 nstats
->multicast
= stats0
->multicast
+ stats1
->multicast
;
1631 nstats
->rx_errors
= stats0
->rx_errors
+ stats1
->rx_errors
;
1632 nstats
->rx_crc_errors
= stats0
->rx_crc_errors
+ stats1
->rx_crc_errors
;
1633 nstats
->rx_frame_errors
=
1634 stats0
->rx_frame_errors
+ stats1
->rx_frame_errors
;
1635 nstats
->rx_length_errors
=
1636 stats0
->rx_length_errors
+ stats1
->rx_length_errors
;
1637 nstats
->rx_missed_errors
=
1638 stats0
->rx_missed_errors
+ stats1
->rx_missed_errors
;
1639 nstats
->rx_over_errors
=
1640 stats0
->rx_over_errors
+ stats1
->rx_over_errors
;
1645 /* Update promiscuous bit */
1646 static void ravb_set_rx_mode(struct net_device
*ndev
)
1648 struct ravb_private
*priv
= netdev_priv(ndev
);
1649 unsigned long flags
;
1651 spin_lock_irqsave(&priv
->lock
, flags
);
1652 ravb_modify(ndev
, ECMR
, ECMR_PRM
,
1653 ndev
->flags
& IFF_PROMISC
? ECMR_PRM
: 0);
1654 spin_unlock_irqrestore(&priv
->lock
, flags
);
1657 /* Device close function for Ethernet AVB */
1658 static int ravb_close(struct net_device
*ndev
)
1660 struct device_node
*np
= ndev
->dev
.parent
->of_node
;
1661 struct ravb_private
*priv
= netdev_priv(ndev
);
1662 struct ravb_tstamp_skb
*ts_skb
, *ts_skb2
;
1664 netif_tx_stop_all_queues(ndev
);
1666 /* Disable interrupts by clearing the interrupt masks. */
1667 ravb_write(ndev
, 0, RIC0
);
1668 ravb_write(ndev
, 0, RIC2
);
1669 ravb_write(ndev
, 0, TIC
);
1671 /* Stop PTP Clock driver */
1672 if (priv
->chip_id
== RCAR_GEN2
)
1673 ravb_ptp_stop(ndev
);
1675 /* Set the config mode to stop the AVB-DMAC's processes */
1676 if (ravb_stop_dma(ndev
) < 0)
1678 "device will be stopped after h/w processes are done.\n");
1680 /* Clear the timestamp list */
1681 list_for_each_entry_safe(ts_skb
, ts_skb2
, &priv
->ts_skb_list
, list
) {
1682 list_del(&ts_skb
->list
);
1683 kfree_skb(ts_skb
->skb
);
1687 /* PHY disconnect */
1689 phy_stop(ndev
->phydev
);
1690 phy_disconnect(ndev
->phydev
);
1691 if (of_phy_is_fixed_link(np
))
1692 of_phy_deregister_fixed_link(np
);
1695 if (priv
->chip_id
!= RCAR_GEN2
) {
1696 free_irq(priv
->tx_irqs
[RAVB_NC
], ndev
);
1697 free_irq(priv
->rx_irqs
[RAVB_NC
], ndev
);
1698 free_irq(priv
->tx_irqs
[RAVB_BE
], ndev
);
1699 free_irq(priv
->rx_irqs
[RAVB_BE
], ndev
);
1700 free_irq(priv
->emac_irq
, ndev
);
1702 free_irq(ndev
->irq
, ndev
);
1704 napi_disable(&priv
->napi
[RAVB_NC
]);
1705 napi_disable(&priv
->napi
[RAVB_BE
]);
1707 /* Free all the skb's in the RX queue and the DMA buffers. */
1708 ravb_ring_free(ndev
, RAVB_BE
);
1709 ravb_ring_free(ndev
, RAVB_NC
);
1714 static int ravb_hwtstamp_get(struct net_device
*ndev
, struct ifreq
*req
)
1716 struct ravb_private
*priv
= netdev_priv(ndev
);
1717 struct hwtstamp_config config
;
1720 config
.tx_type
= priv
->tstamp_tx_ctrl
? HWTSTAMP_TX_ON
:
1722 if (priv
->tstamp_rx_ctrl
& RAVB_RXTSTAMP_TYPE_V2_L2_EVENT
)
1723 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_EVENT
;
1724 else if (priv
->tstamp_rx_ctrl
& RAVB_RXTSTAMP_TYPE_ALL
)
1725 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
1727 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
1729 return copy_to_user(req
->ifr_data
, &config
, sizeof(config
)) ?
1733 /* Control hardware time stamping */
1734 static int ravb_hwtstamp_set(struct net_device
*ndev
, struct ifreq
*req
)
1736 struct ravb_private
*priv
= netdev_priv(ndev
);
1737 struct hwtstamp_config config
;
1738 u32 tstamp_rx_ctrl
= RAVB_RXTSTAMP_ENABLED
;
1741 if (copy_from_user(&config
, req
->ifr_data
, sizeof(config
)))
1744 /* Reserved for future extensions */
1748 switch (config
.tx_type
) {
1749 case HWTSTAMP_TX_OFF
:
1752 case HWTSTAMP_TX_ON
:
1753 tstamp_tx_ctrl
= RAVB_TXTSTAMP_ENABLED
;
1759 switch (config
.rx_filter
) {
1760 case HWTSTAMP_FILTER_NONE
:
1763 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
1764 tstamp_rx_ctrl
|= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT
;
1767 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
1768 tstamp_rx_ctrl
|= RAVB_RXTSTAMP_TYPE_ALL
;
1771 priv
->tstamp_tx_ctrl
= tstamp_tx_ctrl
;
1772 priv
->tstamp_rx_ctrl
= tstamp_rx_ctrl
;
1774 return copy_to_user(req
->ifr_data
, &config
, sizeof(config
)) ?
1778 /* ioctl to device function */
1779 static int ravb_do_ioctl(struct net_device
*ndev
, struct ifreq
*req
, int cmd
)
1781 struct phy_device
*phydev
= ndev
->phydev
;
1783 if (!netif_running(ndev
))
1791 return ravb_hwtstamp_get(ndev
, req
);
1793 return ravb_hwtstamp_set(ndev
, req
);
1796 return phy_mii_ioctl(phydev
, req
, cmd
);
1799 static int ravb_change_mtu(struct net_device
*ndev
, int new_mtu
)
1801 struct ravb_private
*priv
= netdev_priv(ndev
);
1803 ndev
->mtu
= new_mtu
;
1805 if (netif_running(ndev
)) {
1806 synchronize_irq(priv
->emac_irq
);
1807 ravb_emac_init(ndev
);
1810 netdev_update_features(ndev
);
1815 static void ravb_set_rx_csum(struct net_device
*ndev
, bool enable
)
1817 struct ravb_private
*priv
= netdev_priv(ndev
);
1818 unsigned long flags
;
1820 spin_lock_irqsave(&priv
->lock
, flags
);
1822 /* Disable TX and RX */
1823 ravb_rcv_snd_disable(ndev
);
1825 /* Modify RX Checksum setting */
1826 ravb_modify(ndev
, ECMR
, ECMR_RCSC
, enable
? ECMR_RCSC
: 0);
1828 /* Enable TX and RX */
1829 ravb_rcv_snd_enable(ndev
);
1831 spin_unlock_irqrestore(&priv
->lock
, flags
);
1834 static int ravb_set_features(struct net_device
*ndev
,
1835 netdev_features_t features
)
1837 netdev_features_t changed
= ndev
->features
^ features
;
1839 if (changed
& NETIF_F_RXCSUM
)
1840 ravb_set_rx_csum(ndev
, features
& NETIF_F_RXCSUM
);
1842 ndev
->features
= features
;
1847 static const struct net_device_ops ravb_netdev_ops
= {
1848 .ndo_open
= ravb_open
,
1849 .ndo_stop
= ravb_close
,
1850 .ndo_start_xmit
= ravb_start_xmit
,
1851 .ndo_select_queue
= ravb_select_queue
,
1852 .ndo_get_stats
= ravb_get_stats
,
1853 .ndo_set_rx_mode
= ravb_set_rx_mode
,
1854 .ndo_tx_timeout
= ravb_tx_timeout
,
1855 .ndo_do_ioctl
= ravb_do_ioctl
,
1856 .ndo_change_mtu
= ravb_change_mtu
,
1857 .ndo_validate_addr
= eth_validate_addr
,
1858 .ndo_set_mac_address
= eth_mac_addr
,
1859 .ndo_set_features
= ravb_set_features
,
1862 /* MDIO bus init function */
1863 static int ravb_mdio_init(struct ravb_private
*priv
)
1865 struct platform_device
*pdev
= priv
->pdev
;
1866 struct device
*dev
= &pdev
->dev
;
1870 priv
->mdiobb
.ops
= &bb_ops
;
1872 /* MII controller setting */
1873 priv
->mii_bus
= alloc_mdio_bitbang(&priv
->mdiobb
);
1877 /* Hook up MII support for ethtool */
1878 priv
->mii_bus
->name
= "ravb_mii";
1879 priv
->mii_bus
->parent
= dev
;
1880 snprintf(priv
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
1881 pdev
->name
, pdev
->id
);
1883 /* Register MDIO bus */
1884 error
= of_mdiobus_register(priv
->mii_bus
, dev
->of_node
);
1891 free_mdio_bitbang(priv
->mii_bus
);
1895 /* MDIO bus release function */
1896 static int ravb_mdio_release(struct ravb_private
*priv
)
1898 /* Unregister mdio bus */
1899 mdiobus_unregister(priv
->mii_bus
);
1901 /* Free bitbang info */
1902 free_mdio_bitbang(priv
->mii_bus
);
1907 static const struct of_device_id ravb_match_table
[] = {
1908 { .compatible
= "renesas,etheravb-r8a7790", .data
= (void *)RCAR_GEN2
},
1909 { .compatible
= "renesas,etheravb-r8a7794", .data
= (void *)RCAR_GEN2
},
1910 { .compatible
= "renesas,etheravb-rcar-gen2", .data
= (void *)RCAR_GEN2
},
1911 { .compatible
= "renesas,etheravb-r8a7795", .data
= (void *)RCAR_GEN3
},
1912 { .compatible
= "renesas,etheravb-rcar-gen3", .data
= (void *)RCAR_GEN3
},
1915 MODULE_DEVICE_TABLE(of
, ravb_match_table
);
1917 static int ravb_set_gti(struct net_device
*ndev
)
1919 struct ravb_private
*priv
= netdev_priv(ndev
);
1920 struct device
*dev
= ndev
->dev
.parent
;
1924 rate
= clk_get_rate(priv
->clk
);
1928 inc
= 1000000000ULL << 20;
1931 if (inc
< GTI_TIV_MIN
|| inc
> GTI_TIV_MAX
) {
1932 dev_err(dev
, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
1933 inc
, GTI_TIV_MIN
, GTI_TIV_MAX
);
1937 ravb_write(ndev
, inc
, GTI
);
1942 static void ravb_set_config_mode(struct net_device
*ndev
)
1944 struct ravb_private
*priv
= netdev_priv(ndev
);
1946 if (priv
->chip_id
== RCAR_GEN2
) {
1947 ravb_modify(ndev
, CCC
, CCC_OPC
, CCC_OPC_CONFIG
);
1948 /* Set CSEL value */
1949 ravb_modify(ndev
, CCC
, CCC_CSEL
, CCC_CSEL_HPB
);
1951 ravb_modify(ndev
, CCC
, CCC_OPC
, CCC_OPC_CONFIG
|
1952 CCC_GAC
| CCC_CSEL_HPB
);
1956 static const struct soc_device_attribute ravb_delay_mode_quirk_match
[] = {
1957 { .soc_id
= "r8a774c0" },
1958 { .soc_id
= "r8a77990" },
1959 { .soc_id
= "r8a77995" },
1963 /* Set tx and rx clock internal delay modes */
1964 static void ravb_set_delay_mode(struct net_device
*ndev
)
1966 struct ravb_private
*priv
= netdev_priv(ndev
);
1969 if (priv
->phy_interface
== PHY_INTERFACE_MODE_RGMII_ID
||
1970 priv
->phy_interface
== PHY_INTERFACE_MODE_RGMII_RXID
)
1973 if (priv
->phy_interface
== PHY_INTERFACE_MODE_RGMII_ID
||
1974 priv
->phy_interface
== PHY_INTERFACE_MODE_RGMII_TXID
) {
1975 if (!WARN(soc_device_match(ravb_delay_mode_quirk_match
),
1976 "phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree",
1977 phy_modes(priv
->phy_interface
)))
1981 ravb_modify(ndev
, APSR
, APSR_DM
, set
);
1984 static int ravb_probe(struct platform_device
*pdev
)
1986 struct device_node
*np
= pdev
->dev
.of_node
;
1987 struct ravb_private
*priv
;
1988 enum ravb_chip_id chip_id
;
1989 struct net_device
*ndev
;
1991 struct resource
*res
;
1996 "this driver is required to be instantiated from device tree\n");
2000 /* Get base address */
2001 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2003 dev_err(&pdev
->dev
, "invalid resource\n");
2007 ndev
= alloc_etherdev_mqs(sizeof(struct ravb_private
),
2008 NUM_TX_QUEUE
, NUM_RX_QUEUE
);
2012 ndev
->features
= NETIF_F_RXCSUM
;
2013 ndev
->hw_features
= NETIF_F_RXCSUM
;
2015 pm_runtime_enable(&pdev
->dev
);
2016 pm_runtime_get_sync(&pdev
->dev
);
2018 /* The Ether-specific entries in the device structure. */
2019 ndev
->base_addr
= res
->start
;
2021 chip_id
= (enum ravb_chip_id
)of_device_get_match_data(&pdev
->dev
);
2023 if (chip_id
== RCAR_GEN3
)
2024 irq
= platform_get_irq_byname(pdev
, "ch22");
2026 irq
= platform_get_irq(pdev
, 0);
2033 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
2035 priv
= netdev_priv(ndev
);
2038 priv
->num_tx_ring
[RAVB_BE
] = BE_TX_RING_SIZE
;
2039 priv
->num_rx_ring
[RAVB_BE
] = BE_RX_RING_SIZE
;
2040 priv
->num_tx_ring
[RAVB_NC
] = NC_TX_RING_SIZE
;
2041 priv
->num_rx_ring
[RAVB_NC
] = NC_RX_RING_SIZE
;
2042 priv
->addr
= devm_ioremap_resource(&pdev
->dev
, res
);
2043 if (IS_ERR(priv
->addr
)) {
2044 error
= PTR_ERR(priv
->addr
);
2048 spin_lock_init(&priv
->lock
);
2049 INIT_WORK(&priv
->work
, ravb_tx_timeout_work
);
2051 error
= of_get_phy_mode(np
, &priv
->phy_interface
);
2052 if (error
&& error
!= -ENODEV
)
2055 priv
->no_avb_link
= of_property_read_bool(np
, "renesas,no-ether-link");
2056 priv
->avb_link_active_low
=
2057 of_property_read_bool(np
, "renesas,ether-link-active-low");
2059 if (chip_id
== RCAR_GEN3
) {
2060 irq
= platform_get_irq_byname(pdev
, "ch24");
2065 priv
->emac_irq
= irq
;
2066 for (i
= 0; i
< NUM_RX_QUEUE
; i
++) {
2067 irq
= platform_get_irq_byname(pdev
, ravb_rx_irqs
[i
]);
2072 priv
->rx_irqs
[i
] = irq
;
2074 for (i
= 0; i
< NUM_TX_QUEUE
; i
++) {
2075 irq
= platform_get_irq_byname(pdev
, ravb_tx_irqs
[i
]);
2080 priv
->tx_irqs
[i
] = irq
;
2084 priv
->chip_id
= chip_id
;
2086 priv
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
2087 if (IS_ERR(priv
->clk
)) {
2088 error
= PTR_ERR(priv
->clk
);
2092 ndev
->max_mtu
= 2048 - (ETH_HLEN
+ VLAN_HLEN
+ ETH_FCS_LEN
);
2093 ndev
->min_mtu
= ETH_MIN_MTU
;
2095 priv
->num_tx_desc
= chip_id
== RCAR_GEN2
?
2096 NUM_TX_DESC_GEN2
: NUM_TX_DESC_GEN3
;
2099 ndev
->netdev_ops
= &ravb_netdev_ops
;
2100 ndev
->ethtool_ops
= &ravb_ethtool_ops
;
2102 /* Set AVB config mode */
2103 ravb_set_config_mode(ndev
);
2106 error
= ravb_set_gti(ndev
);
2110 /* Request GTI loading */
2111 ravb_modify(ndev
, GCCR
, GCCR_LTI
, GCCR_LTI
);
2113 if (priv
->chip_id
!= RCAR_GEN2
)
2114 ravb_set_delay_mode(ndev
);
2116 /* Allocate descriptor base address table */
2117 priv
->desc_bat_size
= sizeof(struct ravb_desc
) * DBAT_ENTRY_NUM
;
2118 priv
->desc_bat
= dma_alloc_coherent(ndev
->dev
.parent
, priv
->desc_bat_size
,
2119 &priv
->desc_bat_dma
, GFP_KERNEL
);
2120 if (!priv
->desc_bat
) {
2122 "Cannot allocate desc base address table (size %d bytes)\n",
2123 priv
->desc_bat_size
);
2127 for (q
= RAVB_BE
; q
< DBAT_ENTRY_NUM
; q
++)
2128 priv
->desc_bat
[q
].die_dt
= DT_EOS
;
2129 ravb_write(ndev
, priv
->desc_bat_dma
, DBAT
);
2131 /* Initialise HW timestamp list */
2132 INIT_LIST_HEAD(&priv
->ts_skb_list
);
2134 /* Initialise PTP Clock driver */
2135 if (chip_id
!= RCAR_GEN2
)
2136 ravb_ptp_init(ndev
, pdev
);
2138 /* Debug message level */
2139 priv
->msg_enable
= RAVB_DEF_MSG_ENABLE
;
2141 /* Read and set MAC address */
2142 ravb_read_mac_address(ndev
, of_get_mac_address(np
));
2143 if (!is_valid_ether_addr(ndev
->dev_addr
)) {
2144 dev_warn(&pdev
->dev
,
2145 "no valid MAC address supplied, using a random one\n");
2146 eth_hw_addr_random(ndev
);
2150 error
= ravb_mdio_init(priv
);
2152 dev_err(&pdev
->dev
, "failed to initialize MDIO\n");
2156 netif_napi_add(ndev
, &priv
->napi
[RAVB_BE
], ravb_poll
, 64);
2157 netif_napi_add(ndev
, &priv
->napi
[RAVB_NC
], ravb_poll
, 64);
2159 /* Network device register */
2160 error
= register_netdev(ndev
);
2164 device_set_wakeup_capable(&pdev
->dev
, 1);
2166 /* Print device information */
2167 netdev_info(ndev
, "Base address at %#x, %pM, IRQ %d.\n",
2168 (u32
)ndev
->base_addr
, ndev
->dev_addr
, ndev
->irq
);
2170 platform_set_drvdata(pdev
, ndev
);
2175 netif_napi_del(&priv
->napi
[RAVB_NC
]);
2176 netif_napi_del(&priv
->napi
[RAVB_BE
]);
2177 ravb_mdio_release(priv
);
2179 dma_free_coherent(ndev
->dev
.parent
, priv
->desc_bat_size
, priv
->desc_bat
,
2180 priv
->desc_bat_dma
);
2182 /* Stop PTP Clock driver */
2183 if (chip_id
!= RCAR_GEN2
)
2184 ravb_ptp_stop(ndev
);
2188 pm_runtime_put(&pdev
->dev
);
2189 pm_runtime_disable(&pdev
->dev
);
2193 static int ravb_remove(struct platform_device
*pdev
)
2195 struct net_device
*ndev
= platform_get_drvdata(pdev
);
2196 struct ravb_private
*priv
= netdev_priv(ndev
);
2198 /* Stop PTP Clock driver */
2199 if (priv
->chip_id
!= RCAR_GEN2
)
2200 ravb_ptp_stop(ndev
);
2202 dma_free_coherent(ndev
->dev
.parent
, priv
->desc_bat_size
, priv
->desc_bat
,
2203 priv
->desc_bat_dma
);
2204 /* Set reset mode */
2205 ravb_write(ndev
, CCC_OPC_RESET
, CCC
);
2206 pm_runtime_put_sync(&pdev
->dev
);
2207 unregister_netdev(ndev
);
2208 netif_napi_del(&priv
->napi
[RAVB_NC
]);
2209 netif_napi_del(&priv
->napi
[RAVB_BE
]);
2210 ravb_mdio_release(priv
);
2211 pm_runtime_disable(&pdev
->dev
);
2213 platform_set_drvdata(pdev
, NULL
);
2218 static int ravb_wol_setup(struct net_device
*ndev
)
2220 struct ravb_private
*priv
= netdev_priv(ndev
);
2222 /* Disable interrupts by clearing the interrupt masks. */
2223 ravb_write(ndev
, 0, RIC0
);
2224 ravb_write(ndev
, 0, RIC2
);
2225 ravb_write(ndev
, 0, TIC
);
2227 /* Only allow ECI interrupts */
2228 synchronize_irq(priv
->emac_irq
);
2229 napi_disable(&priv
->napi
[RAVB_NC
]);
2230 napi_disable(&priv
->napi
[RAVB_BE
]);
2231 ravb_write(ndev
, ECSIPR_MPDIP
, ECSIPR
);
2233 /* Enable MagicPacket */
2234 ravb_modify(ndev
, ECMR
, ECMR_MPDE
, ECMR_MPDE
);
2236 return enable_irq_wake(priv
->emac_irq
);
2239 static int ravb_wol_restore(struct net_device
*ndev
)
2241 struct ravb_private
*priv
= netdev_priv(ndev
);
2244 napi_enable(&priv
->napi
[RAVB_NC
]);
2245 napi_enable(&priv
->napi
[RAVB_BE
]);
2247 /* Disable MagicPacket */
2248 ravb_modify(ndev
, ECMR
, ECMR_MPDE
, 0);
2250 ret
= ravb_close(ndev
);
2254 return disable_irq_wake(priv
->emac_irq
);
2257 static int __maybe_unused
ravb_suspend(struct device
*dev
)
2259 struct net_device
*ndev
= dev_get_drvdata(dev
);
2260 struct ravb_private
*priv
= netdev_priv(ndev
);
2263 if (!netif_running(ndev
))
2266 netif_device_detach(ndev
);
2268 if (priv
->wol_enabled
)
2269 ret
= ravb_wol_setup(ndev
);
2271 ret
= ravb_close(ndev
);
2276 static int __maybe_unused
ravb_resume(struct device
*dev
)
2278 struct net_device
*ndev
= dev_get_drvdata(dev
);
2279 struct ravb_private
*priv
= netdev_priv(ndev
);
2282 /* If WoL is enabled set reset mode to rearm the WoL logic */
2283 if (priv
->wol_enabled
)
2284 ravb_write(ndev
, CCC_OPC_RESET
, CCC
);
2286 /* All register have been reset to default values.
2287 * Restore all registers which where setup at probe time and
2288 * reopen device if it was running before system suspended.
2291 /* Set AVB config mode */
2292 ravb_set_config_mode(ndev
);
2295 ret
= ravb_set_gti(ndev
);
2299 /* Request GTI loading */
2300 ravb_modify(ndev
, GCCR
, GCCR_LTI
, GCCR_LTI
);
2302 if (priv
->chip_id
!= RCAR_GEN2
)
2303 ravb_set_delay_mode(ndev
);
2305 /* Restore descriptor base address table */
2306 ravb_write(ndev
, priv
->desc_bat_dma
, DBAT
);
2308 if (netif_running(ndev
)) {
2309 if (priv
->wol_enabled
) {
2310 ret
= ravb_wol_restore(ndev
);
2314 ret
= ravb_open(ndev
);
2317 netif_device_attach(ndev
);
2323 static int __maybe_unused
ravb_runtime_nop(struct device
*dev
)
2325 /* Runtime PM callback shared between ->runtime_suspend()
2326 * and ->runtime_resume(). Simply returns success.
2328 * This driver re-initializes all registers after
2329 * pm_runtime_get_sync() anyway so there is no need
2330 * to save and restore registers here.
2335 static const struct dev_pm_ops ravb_dev_pm_ops
= {
2336 SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend
, ravb_resume
)
2337 SET_RUNTIME_PM_OPS(ravb_runtime_nop
, ravb_runtime_nop
, NULL
)
2340 static struct platform_driver ravb_driver
= {
2341 .probe
= ravb_probe
,
2342 .remove
= ravb_remove
,
2345 .pm
= &ravb_dev_pm_ops
,
2346 .of_match_table
= ravb_match_table
,
2350 module_platform_driver(ravb_driver
);
2352 MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
2353 MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
2354 MODULE_LICENSE("GPL v2");