1 // SPDX-License-Identifier: GPL-2.0
2 /* Renesas Ethernet Switch device driver
4 * Copyright (C) 2022 Renesas Electronics Corporation
8 #include <linux/dma-mapping.h>
10 #include <linux/etherdevice.h>
11 #include <linux/iopoll.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/net_tstamp.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/phy/phy.h>
19 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/sys_soc.h>
29 static int rswitch_reg_wait(void __iomem
*addr
, u32 offs
, u32 mask
, u32 expected
)
33 return readl_poll_timeout_atomic(addr
+ offs
, val
, (val
& mask
) == expected
,
34 1, RSWITCH_TIMEOUT_US
);
37 static void rswitch_modify(void __iomem
*addr
, enum rswitch_reg reg
, u32 clear
, u32 set
)
39 iowrite32((ioread32(addr
+ reg
) & ~clear
) | set
, addr
+ reg
);
42 /* Common Agent block (COMA) */
43 static void rswitch_reset(struct rswitch_private
*priv
)
45 iowrite32(RRC_RR
, priv
->addr
+ RRC
);
46 iowrite32(RRC_RR_CLR
, priv
->addr
+ RRC
);
49 static void rswitch_clock_enable(struct rswitch_private
*priv
)
51 iowrite32(RCEC_ACE_DEFAULT
| RCEC_RCE
, priv
->addr
+ RCEC
);
54 static void rswitch_clock_disable(struct rswitch_private
*priv
)
56 iowrite32(RCDC_RCD
, priv
->addr
+ RCDC
);
59 static bool rswitch_agent_clock_is_enabled(void __iomem
*coma_addr
,
62 u32 val
= ioread32(coma_addr
+ RCEC
);
65 return (val
& BIT(port
)) ? true : false;
70 static void rswitch_agent_clock_ctrl(void __iomem
*coma_addr
, unsigned int port
,
76 val
= ioread32(coma_addr
+ RCEC
);
77 iowrite32(val
| RCEC_RCE
| BIT(port
), coma_addr
+ RCEC
);
79 val
= ioread32(coma_addr
+ RCDC
);
80 iowrite32(val
| BIT(port
), coma_addr
+ RCDC
);
84 static int rswitch_bpool_config(struct rswitch_private
*priv
)
88 val
= ioread32(priv
->addr
+ CABPIRM
);
89 if (val
& CABPIRM_BPR
)
92 iowrite32(CABPIRM_BPIOG
, priv
->addr
+ CABPIRM
);
94 return rswitch_reg_wait(priv
->addr
, CABPIRM
, CABPIRM_BPR
, CABPIRM_BPR
);
97 static void rswitch_coma_init(struct rswitch_private
*priv
)
99 iowrite32(CABPPFLC_INIT_VALUE
, priv
->addr
+ CABPPFLC0
);
102 /* R-Switch-2 block (TOP) */
103 static void rswitch_top_init(struct rswitch_private
*priv
)
107 for (i
= 0; i
< RSWITCH_MAX_NUM_QUEUES
; i
++)
108 iowrite32((i
/ 16) << (GWCA_INDEX
* 8), priv
->addr
+ TPEMIMC7(i
));
111 /* Forwarding engine block (MFWD) */
112 static void rswitch_fwd_init(struct rswitch_private
*priv
)
117 for (i
= 0; i
< RSWITCH_NUM_PORTS
; i
++) {
118 iowrite32(FWPC0_DEFAULT
, priv
->addr
+ FWPC0(i
));
119 iowrite32(0, priv
->addr
+ FWPBFC(i
));
122 for (i
= 0; i
< RSWITCH_NUM_PORTS
; i
++) {
123 iowrite32(priv
->rdev
[i
]->rx_queue
->index
,
124 priv
->addr
+ FWPBFCSDC(GWCA_INDEX
, i
));
125 iowrite32(BIT(priv
->gwca
.index
), priv
->addr
+ FWPBFC(i
));
129 iowrite32(FWPC0_DEFAULT
, priv
->addr
+ FWPC0(priv
->gwca
.index
));
130 iowrite32(FWPC1_DDE
, priv
->addr
+ FWPC1(priv
->gwca
.index
));
131 iowrite32(0, priv
->addr
+ FWPBFC(priv
->gwca
.index
));
132 iowrite32(GENMASK(RSWITCH_NUM_PORTS
- 1, 0), priv
->addr
+ FWPBFC(priv
->gwca
.index
));
135 /* Gateway CPU agent block (GWCA) */
136 static int rswitch_gwca_change_mode(struct rswitch_private
*priv
,
137 enum rswitch_gwca_mode mode
)
141 if (!rswitch_agent_clock_is_enabled(priv
->addr
, priv
->gwca
.index
))
142 rswitch_agent_clock_ctrl(priv
->addr
, priv
->gwca
.index
, 1);
144 iowrite32(mode
, priv
->addr
+ GWMC
);
146 ret
= rswitch_reg_wait(priv
->addr
, GWMS
, GWMS_OPS_MASK
, mode
);
148 if (mode
== GWMC_OPC_DISABLE
)
149 rswitch_agent_clock_ctrl(priv
->addr
, priv
->gwca
.index
, 0);
154 static int rswitch_gwca_mcast_table_reset(struct rswitch_private
*priv
)
156 iowrite32(GWMTIRM_MTIOG
, priv
->addr
+ GWMTIRM
);
158 return rswitch_reg_wait(priv
->addr
, GWMTIRM
, GWMTIRM_MTR
, GWMTIRM_MTR
);
161 static int rswitch_gwca_axi_ram_reset(struct rswitch_private
*priv
)
163 iowrite32(GWARIRM_ARIOG
, priv
->addr
+ GWARIRM
);
165 return rswitch_reg_wait(priv
->addr
, GWARIRM
, GWARIRM_ARR
, GWARIRM_ARR
);
168 static bool rswitch_is_any_data_irq(struct rswitch_private
*priv
, u32
*dis
, bool tx
)
170 u32
*mask
= tx
? priv
->gwca
.tx_irq_bits
: priv
->gwca
.rx_irq_bits
;
173 for (i
= 0; i
< RSWITCH_NUM_IRQ_REGS
; i
++) {
174 if (dis
[i
] & mask
[i
])
181 static void rswitch_get_data_irq_status(struct rswitch_private
*priv
, u32
*dis
)
185 for (i
= 0; i
< RSWITCH_NUM_IRQ_REGS
; i
++) {
186 dis
[i
] = ioread32(priv
->addr
+ GWDIS(i
));
187 dis
[i
] &= ioread32(priv
->addr
+ GWDIE(i
));
191 static void rswitch_enadis_data_irq(struct rswitch_private
*priv
,
192 unsigned int index
, bool enable
)
194 u32 offs
= enable
? GWDIE(index
/ 32) : GWDID(index
/ 32);
196 iowrite32(BIT(index
% 32), priv
->addr
+ offs
);
199 static void rswitch_ack_data_irq(struct rswitch_private
*priv
,
202 u32 offs
= GWDIS(index
/ 32);
204 iowrite32(BIT(index
% 32), priv
->addr
+ offs
);
207 static unsigned int rswitch_next_queue_index(struct rswitch_gwca_queue
*gq
,
208 bool cur
, unsigned int num
)
210 unsigned int index
= cur
? gq
->cur
: gq
->dirty
;
212 if (index
+ num
>= gq
->ring_size
)
213 index
= (index
+ num
) % gq
->ring_size
;
220 static unsigned int rswitch_get_num_cur_queues(struct rswitch_gwca_queue
*gq
)
222 if (gq
->cur
>= gq
->dirty
)
223 return gq
->cur
- gq
->dirty
;
225 return gq
->ring_size
- gq
->dirty
+ gq
->cur
;
228 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue
*gq
)
230 struct rswitch_ext_ts_desc
*desc
= &gq
->rx_ring
[gq
->dirty
];
232 if ((desc
->desc
.die_dt
& DT_MASK
) != DT_FEMPTY
)
238 static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue
*gq
,
239 unsigned int start_index
,
242 unsigned int i
, index
;
244 for (i
= 0; i
< num
; i
++) {
245 index
= (i
+ start_index
) % gq
->ring_size
;
246 if (gq
->rx_bufs
[index
])
248 gq
->rx_bufs
[index
] = netdev_alloc_frag(RSWITCH_BUF_SIZE
);
249 if (!gq
->rx_bufs
[index
])
257 index
= (i
+ start_index
) % gq
->ring_size
;
258 skb_free_frag(gq
->rx_bufs
[index
]);
259 gq
->rx_bufs
[index
] = NULL
;
265 static void rswitch_gwca_queue_free(struct net_device
*ndev
,
266 struct rswitch_gwca_queue
*gq
)
271 dma_free_coherent(ndev
->dev
.parent
,
272 sizeof(struct rswitch_ext_ts_desc
) *
273 (gq
->ring_size
+ 1), gq
->rx_ring
, gq
->ring_dma
);
276 for (i
= 0; i
< gq
->ring_size
; i
++)
277 skb_free_frag(gq
->rx_bufs
[i
]);
281 dma_free_coherent(ndev
->dev
.parent
,
282 sizeof(struct rswitch_ext_desc
) *
283 (gq
->ring_size
+ 1), gq
->tx_ring
, gq
->ring_dma
);
287 kfree(gq
->unmap_addrs
);
288 gq
->unmap_addrs
= NULL
;
292 static void rswitch_gwca_ts_queue_free(struct rswitch_private
*priv
)
294 struct rswitch_gwca_queue
*gq
= &priv
->gwca
.ts_queue
;
296 dma_free_coherent(&priv
->pdev
->dev
,
297 sizeof(struct rswitch_ts_desc
) * (gq
->ring_size
+ 1),
298 gq
->ts_ring
, gq
->ring_dma
);
302 static int rswitch_gwca_queue_alloc(struct net_device
*ndev
,
303 struct rswitch_private
*priv
,
304 struct rswitch_gwca_queue
*gq
,
305 bool dir_tx
, unsigned int ring_size
)
310 gq
->ring_size
= ring_size
;
314 gq
->rx_bufs
= kcalloc(gq
->ring_size
, sizeof(*gq
->rx_bufs
), GFP_KERNEL
);
317 if (rswitch_gwca_queue_alloc_rx_buf(gq
, 0, gq
->ring_size
) < 0)
320 gq
->rx_ring
= dma_alloc_coherent(ndev
->dev
.parent
,
321 sizeof(struct rswitch_ext_ts_desc
) *
322 (gq
->ring_size
+ 1), &gq
->ring_dma
, GFP_KERNEL
);
324 gq
->skbs
= kcalloc(gq
->ring_size
, sizeof(*gq
->skbs
), GFP_KERNEL
);
327 gq
->unmap_addrs
= kcalloc(gq
->ring_size
, sizeof(*gq
->unmap_addrs
), GFP_KERNEL
);
328 if (!gq
->unmap_addrs
)
330 gq
->tx_ring
= dma_alloc_coherent(ndev
->dev
.parent
,
331 sizeof(struct rswitch_ext_desc
) *
332 (gq
->ring_size
+ 1), &gq
->ring_dma
, GFP_KERNEL
);
335 if (!gq
->rx_ring
&& !gq
->tx_ring
)
339 bit
= BIT(gq
->index
% 32);
341 priv
->gwca
.tx_irq_bits
[i
] |= bit
;
343 priv
->gwca
.rx_irq_bits
[i
] |= bit
;
348 rswitch_gwca_queue_free(ndev
, gq
);
353 static void rswitch_desc_set_dptr(struct rswitch_desc
*desc
, dma_addr_t addr
)
355 desc
->dptrl
= cpu_to_le32(lower_32_bits(addr
));
356 desc
->dptrh
= upper_32_bits(addr
) & 0xff;
359 static dma_addr_t
rswitch_desc_get_dptr(const struct rswitch_desc
*desc
)
361 return __le32_to_cpu(desc
->dptrl
) | (u64
)(desc
->dptrh
) << 32;
364 static int rswitch_gwca_queue_format(struct net_device
*ndev
,
365 struct rswitch_private
*priv
,
366 struct rswitch_gwca_queue
*gq
)
368 unsigned int ring_size
= sizeof(struct rswitch_ext_desc
) * gq
->ring_size
;
369 struct rswitch_ext_desc
*desc
;
370 struct rswitch_desc
*linkfix
;
374 memset(gq
->tx_ring
, 0, ring_size
);
375 for (i
= 0, desc
= gq
->tx_ring
; i
< gq
->ring_size
; i
++, desc
++) {
377 dma_addr
= dma_map_single(ndev
->dev
.parent
,
378 gq
->rx_bufs
[i
] + RSWITCH_HEADROOM
,
379 RSWITCH_MAP_BUF_SIZE
,
381 if (dma_mapping_error(ndev
->dev
.parent
, dma_addr
))
384 desc
->desc
.info_ds
= cpu_to_le16(RSWITCH_DESC_BUF_SIZE
);
385 rswitch_desc_set_dptr(&desc
->desc
, dma_addr
);
386 desc
->desc
.die_dt
= DT_FEMPTY
| DIE
;
388 desc
->desc
.die_dt
= DT_EEMPTY
| DIE
;
391 rswitch_desc_set_dptr(&desc
->desc
, gq
->ring_dma
);
392 desc
->desc
.die_dt
= DT_LINKFIX
;
394 linkfix
= &priv
->gwca
.linkfix_table
[gq
->index
];
395 linkfix
->die_dt
= DT_LINKFIX
;
396 rswitch_desc_set_dptr(linkfix
, gq
->ring_dma
);
398 iowrite32(GWDCC_BALR
| (gq
->dir_tx
? GWDCC_DCP(GWCA_IPV_NUM
) | GWDCC_DQT
: 0) | GWDCC_EDE
,
399 priv
->addr
+ GWDCC_OFFS(gq
->index
));
405 for (desc
= gq
->tx_ring
; i
-- > 0; desc
++) {
406 dma_addr
= rswitch_desc_get_dptr(&desc
->desc
);
407 dma_unmap_single(ndev
->dev
.parent
, dma_addr
,
408 RSWITCH_MAP_BUF_SIZE
, DMA_FROM_DEVICE
);
415 static void rswitch_gwca_ts_queue_fill(struct rswitch_private
*priv
,
416 unsigned int start_index
,
419 struct rswitch_gwca_queue
*gq
= &priv
->gwca
.ts_queue
;
420 struct rswitch_ts_desc
*desc
;
421 unsigned int i
, index
;
423 for (i
= 0; i
< num
; i
++) {
424 index
= (i
+ start_index
) % gq
->ring_size
;
425 desc
= &gq
->ts_ring
[index
];
426 desc
->desc
.die_dt
= DT_FEMPTY_ND
| DIE
;
430 static int rswitch_gwca_queue_ext_ts_fill(struct net_device
*ndev
,
431 struct rswitch_gwca_queue
*gq
,
432 unsigned int start_index
,
435 struct rswitch_device
*rdev
= netdev_priv(ndev
);
436 struct rswitch_ext_ts_desc
*desc
;
437 unsigned int i
, index
;
440 for (i
= 0; i
< num
; i
++) {
441 index
= (i
+ start_index
) % gq
->ring_size
;
442 desc
= &gq
->rx_ring
[index
];
444 dma_addr
= dma_map_single(ndev
->dev
.parent
,
445 gq
->rx_bufs
[index
] + RSWITCH_HEADROOM
,
446 RSWITCH_MAP_BUF_SIZE
,
448 if (dma_mapping_error(ndev
->dev
.parent
, dma_addr
))
451 desc
->desc
.info_ds
= cpu_to_le16(RSWITCH_DESC_BUF_SIZE
);
452 rswitch_desc_set_dptr(&desc
->desc
, dma_addr
);
454 desc
->desc
.die_dt
= DT_FEMPTY
| DIE
;
455 desc
->info1
= cpu_to_le64(INFO1_SPN(rdev
->etha
->index
));
457 desc
->desc
.die_dt
= DT_EEMPTY
| DIE
;
466 index
= (i
+ start_index
) % gq
->ring_size
;
467 desc
= &gq
->rx_ring
[index
];
468 dma_addr
= rswitch_desc_get_dptr(&desc
->desc
);
469 dma_unmap_single(ndev
->dev
.parent
, dma_addr
,
470 RSWITCH_MAP_BUF_SIZE
, DMA_FROM_DEVICE
);
477 static int rswitch_gwca_queue_ext_ts_format(struct net_device
*ndev
,
478 struct rswitch_private
*priv
,
479 struct rswitch_gwca_queue
*gq
)
481 unsigned int ring_size
= sizeof(struct rswitch_ext_ts_desc
) * gq
->ring_size
;
482 struct rswitch_ext_ts_desc
*desc
;
483 struct rswitch_desc
*linkfix
;
486 memset(gq
->rx_ring
, 0, ring_size
);
487 err
= rswitch_gwca_queue_ext_ts_fill(ndev
, gq
, 0, gq
->ring_size
);
491 desc
= &gq
->rx_ring
[gq
->ring_size
]; /* Last */
492 rswitch_desc_set_dptr(&desc
->desc
, gq
->ring_dma
);
493 desc
->desc
.die_dt
= DT_LINKFIX
;
495 linkfix
= &priv
->gwca
.linkfix_table
[gq
->index
];
496 linkfix
->die_dt
= DT_LINKFIX
;
497 rswitch_desc_set_dptr(linkfix
, gq
->ring_dma
);
499 iowrite32(GWDCC_BALR
| (gq
->dir_tx
? GWDCC_DCP(GWCA_IPV_NUM
) | GWDCC_DQT
: 0) |
500 GWDCC_ETS
| GWDCC_EDE
,
501 priv
->addr
+ GWDCC_OFFS(gq
->index
));
506 static int rswitch_gwca_linkfix_alloc(struct rswitch_private
*priv
)
508 unsigned int i
, num_queues
= priv
->gwca
.num_queues
;
509 struct rswitch_gwca
*gwca
= &priv
->gwca
;
510 struct device
*dev
= &priv
->pdev
->dev
;
512 gwca
->linkfix_table_size
= sizeof(struct rswitch_desc
) * num_queues
;
513 gwca
->linkfix_table
= dma_alloc_coherent(dev
, gwca
->linkfix_table_size
,
514 &gwca
->linkfix_table_dma
, GFP_KERNEL
);
515 if (!gwca
->linkfix_table
)
517 for (i
= 0; i
< num_queues
; i
++)
518 gwca
->linkfix_table
[i
].die_dt
= DT_EOS
;
523 static void rswitch_gwca_linkfix_free(struct rswitch_private
*priv
)
525 struct rswitch_gwca
*gwca
= &priv
->gwca
;
527 if (gwca
->linkfix_table
)
528 dma_free_coherent(&priv
->pdev
->dev
, gwca
->linkfix_table_size
,
529 gwca
->linkfix_table
, gwca
->linkfix_table_dma
);
530 gwca
->linkfix_table
= NULL
;
533 static int rswitch_gwca_ts_queue_alloc(struct rswitch_private
*priv
)
535 struct rswitch_gwca_queue
*gq
= &priv
->gwca
.ts_queue
;
536 struct rswitch_ts_desc
*desc
;
538 gq
->ring_size
= TS_RING_SIZE
;
539 gq
->ts_ring
= dma_alloc_coherent(&priv
->pdev
->dev
,
540 sizeof(struct rswitch_ts_desc
) *
541 (gq
->ring_size
+ 1), &gq
->ring_dma
, GFP_KERNEL
);
546 rswitch_gwca_ts_queue_fill(priv
, 0, TS_RING_SIZE
);
547 desc
= &gq
->ts_ring
[gq
->ring_size
];
548 desc
->desc
.die_dt
= DT_LINKFIX
;
549 rswitch_desc_set_dptr(&desc
->desc
, gq
->ring_dma
);
550 INIT_LIST_HEAD(&priv
->gwca
.ts_info_list
);
555 static struct rswitch_gwca_queue
*rswitch_gwca_get(struct rswitch_private
*priv
)
557 struct rswitch_gwca_queue
*gq
;
560 index
= find_first_zero_bit(priv
->gwca
.used
, priv
->gwca
.num_queues
);
561 if (index
>= priv
->gwca
.num_queues
)
563 set_bit(index
, priv
->gwca
.used
);
564 gq
= &priv
->gwca
.queues
[index
];
565 memset(gq
, 0, sizeof(*gq
));
571 static void rswitch_gwca_put(struct rswitch_private
*priv
,
572 struct rswitch_gwca_queue
*gq
)
574 clear_bit(gq
->index
, priv
->gwca
.used
);
577 static int rswitch_txdmac_alloc(struct net_device
*ndev
)
579 struct rswitch_device
*rdev
= netdev_priv(ndev
);
580 struct rswitch_private
*priv
= rdev
->priv
;
583 rdev
->tx_queue
= rswitch_gwca_get(priv
);
587 err
= rswitch_gwca_queue_alloc(ndev
, priv
, rdev
->tx_queue
, true, TX_RING_SIZE
);
589 rswitch_gwca_put(priv
, rdev
->tx_queue
);
596 static void rswitch_txdmac_free(struct net_device
*ndev
)
598 struct rswitch_device
*rdev
= netdev_priv(ndev
);
600 rswitch_gwca_queue_free(ndev
, rdev
->tx_queue
);
601 rswitch_gwca_put(rdev
->priv
, rdev
->tx_queue
);
604 static int rswitch_txdmac_init(struct rswitch_private
*priv
, unsigned int index
)
606 struct rswitch_device
*rdev
= priv
->rdev
[index
];
608 return rswitch_gwca_queue_format(rdev
->ndev
, priv
, rdev
->tx_queue
);
611 static int rswitch_rxdmac_alloc(struct net_device
*ndev
)
613 struct rswitch_device
*rdev
= netdev_priv(ndev
);
614 struct rswitch_private
*priv
= rdev
->priv
;
617 rdev
->rx_queue
= rswitch_gwca_get(priv
);
621 err
= rswitch_gwca_queue_alloc(ndev
, priv
, rdev
->rx_queue
, false, RX_RING_SIZE
);
623 rswitch_gwca_put(priv
, rdev
->rx_queue
);
630 static void rswitch_rxdmac_free(struct net_device
*ndev
)
632 struct rswitch_device
*rdev
= netdev_priv(ndev
);
634 rswitch_gwca_queue_free(ndev
, rdev
->rx_queue
);
635 rswitch_gwca_put(rdev
->priv
, rdev
->rx_queue
);
638 static int rswitch_rxdmac_init(struct rswitch_private
*priv
, unsigned int index
)
640 struct rswitch_device
*rdev
= priv
->rdev
[index
];
641 struct net_device
*ndev
= rdev
->ndev
;
643 return rswitch_gwca_queue_ext_ts_format(ndev
, priv
, rdev
->rx_queue
);
646 static int rswitch_gwca_hw_init(struct rswitch_private
*priv
)
651 err
= rswitch_gwca_change_mode(priv
, GWMC_OPC_DISABLE
);
654 err
= rswitch_gwca_change_mode(priv
, GWMC_OPC_CONFIG
);
658 err
= rswitch_gwca_mcast_table_reset(priv
);
661 err
= rswitch_gwca_axi_ram_reset(priv
);
665 iowrite32(GWVCC_VEM_SC_TAG
, priv
->addr
+ GWVCC
);
666 iowrite32(0, priv
->addr
+ GWTTFC
);
667 iowrite32(lower_32_bits(priv
->gwca
.linkfix_table_dma
), priv
->addr
+ GWDCBAC1
);
668 iowrite32(upper_32_bits(priv
->gwca
.linkfix_table_dma
), priv
->addr
+ GWDCBAC0
);
669 iowrite32(lower_32_bits(priv
->gwca
.ts_queue
.ring_dma
), priv
->addr
+ GWTDCAC10
);
670 iowrite32(upper_32_bits(priv
->gwca
.ts_queue
.ring_dma
), priv
->addr
+ GWTDCAC00
);
671 iowrite32(GWMDNC_TSDMN(1) | GWMDNC_TXDMN(0x1e) | GWMDNC_RXDMN(0x1f),
672 priv
->addr
+ GWMDNC
);
673 iowrite32(GWCA_TS_IRQ_BIT
, priv
->addr
+ GWTSDCC0
);
675 iowrite32(GWTPC_PPPL(GWCA_IPV_NUM
), priv
->addr
+ GWTPC0
);
677 for (i
= 0; i
< RSWITCH_NUM_PORTS
; i
++) {
678 err
= rswitch_rxdmac_init(priv
, i
);
681 err
= rswitch_txdmac_init(priv
, i
);
686 err
= rswitch_gwca_change_mode(priv
, GWMC_OPC_DISABLE
);
689 return rswitch_gwca_change_mode(priv
, GWMC_OPC_OPERATION
);
692 static int rswitch_gwca_hw_deinit(struct rswitch_private
*priv
)
696 err
= rswitch_gwca_change_mode(priv
, GWMC_OPC_DISABLE
);
699 err
= rswitch_gwca_change_mode(priv
, GWMC_OPC_RESET
);
703 return rswitch_gwca_change_mode(priv
, GWMC_OPC_DISABLE
);
706 static int rswitch_gwca_halt(struct rswitch_private
*priv
)
710 priv
->gwca_halt
= true;
711 err
= rswitch_gwca_hw_deinit(priv
);
712 dev_err(&priv
->pdev
->dev
, "halted (%d)\n", err
);
717 static struct sk_buff
*rswitch_rx_handle_desc(struct net_device
*ndev
,
718 struct rswitch_gwca_queue
*gq
,
719 struct rswitch_ext_ts_desc
*desc
)
721 dma_addr_t dma_addr
= rswitch_desc_get_dptr(&desc
->desc
);
722 u16 pkt_len
= le16_to_cpu(desc
->desc
.info_ds
) & RX_DS
;
723 u8 die_dt
= desc
->desc
.die_dt
& DT_MASK
;
724 struct sk_buff
*skb
= NULL
;
726 dma_unmap_single(ndev
->dev
.parent
, dma_addr
, RSWITCH_MAP_BUF_SIZE
,
729 /* The RX descriptor order will be one of the following:
732 * - FSTART -> FMID -> FEND
735 /* Check whether the descriptor is unexpected order */
739 if (gq
->skb_fstart
) {
740 dev_kfree_skb_any(gq
->skb_fstart
);
741 gq
->skb_fstart
= NULL
;
742 ndev
->stats
.rx_dropped
++;
747 if (!gq
->skb_fstart
) {
748 ndev
->stats
.rx_dropped
++;
756 /* Handle the descriptor */
760 skb
= build_skb(gq
->rx_bufs
[gq
->cur
], RSWITCH_BUF_SIZE
);
762 skb_reserve(skb
, RSWITCH_HEADROOM
);
763 skb_put(skb
, pkt_len
);
764 gq
->pkt_len
= pkt_len
;
765 if (die_dt
== DT_FSTART
) {
766 gq
->skb_fstart
= skb
;
773 skb_add_rx_frag(gq
->skb_fstart
, skb_shinfo(gq
->skb_fstart
)->nr_frags
,
774 virt_to_page(gq
->rx_bufs
[gq
->cur
]),
775 offset_in_page(gq
->rx_bufs
[gq
->cur
]) + RSWITCH_HEADROOM
,
776 pkt_len
, RSWITCH_BUF_SIZE
);
777 if (die_dt
== DT_FEND
) {
778 skb
= gq
->skb_fstart
;
779 gq
->skb_fstart
= NULL
;
781 gq
->pkt_len
+= pkt_len
;
784 netdev_err(ndev
, "%s: unexpected value (%x)\n", __func__
, die_dt
);
791 static bool rswitch_rx(struct net_device
*ndev
, int *quota
)
793 struct rswitch_device
*rdev
= netdev_priv(ndev
);
794 struct rswitch_gwca_queue
*gq
= rdev
->rx_queue
;
795 struct rswitch_ext_ts_desc
*desc
;
796 int limit
, boguscnt
, ret
;
804 boguscnt
= min_t(int, gq
->ring_size
, *quota
);
807 desc
= &gq
->rx_ring
[gq
->cur
];
808 while ((desc
->desc
.die_dt
& DT_MASK
) != DT_FEMPTY
) {
810 skb
= rswitch_rx_handle_desc(ndev
, gq
, desc
);
814 get_ts
= rdev
->priv
->ptp_priv
->tstamp_rx_ctrl
& RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT
;
816 struct skb_shared_hwtstamps
*shhwtstamps
;
817 struct timespec64 ts
;
819 shhwtstamps
= skb_hwtstamps(skb
);
820 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
821 ts
.tv_sec
= __le32_to_cpu(desc
->ts_sec
);
822 ts
.tv_nsec
= __le32_to_cpu(desc
->ts_nsec
& cpu_to_le32(0x3fffffff));
823 shhwtstamps
->hwtstamp
= timespec64_to_ktime(ts
);
825 skb
->protocol
= eth_type_trans(skb
, ndev
);
826 napi_gro_receive(&rdev
->napi
, skb
);
827 rdev
->ndev
->stats
.rx_packets
++;
828 rdev
->ndev
->stats
.rx_bytes
+= gq
->pkt_len
;
831 gq
->rx_bufs
[gq
->cur
] = NULL
;
832 gq
->cur
= rswitch_next_queue_index(gq
, true, 1);
833 desc
= &gq
->rx_ring
[gq
->cur
];
839 num
= rswitch_get_num_cur_queues(gq
);
840 ret
= rswitch_gwca_queue_alloc_rx_buf(gq
, gq
->dirty
, num
);
843 ret
= rswitch_gwca_queue_ext_ts_fill(ndev
, gq
, gq
->dirty
, num
);
846 gq
->dirty
= rswitch_next_queue_index(gq
, false, num
);
848 *quota
-= limit
- boguscnt
;
850 return boguscnt
<= 0;
853 rswitch_gwca_halt(rdev
->priv
);
858 static void rswitch_tx_free(struct net_device
*ndev
)
860 struct rswitch_device
*rdev
= netdev_priv(ndev
);
861 struct rswitch_gwca_queue
*gq
= rdev
->tx_queue
;
862 struct rswitch_ext_desc
*desc
;
865 for (; rswitch_get_num_cur_queues(gq
) > 0;
866 gq
->dirty
= rswitch_next_queue_index(gq
, false, 1)) {
867 desc
= &gq
->tx_ring
[gq
->dirty
];
868 if ((desc
->desc
.die_dt
& DT_MASK
) != DT_FEMPTY
)
872 skb
= gq
->skbs
[gq
->dirty
];
874 rdev
->ndev
->stats
.tx_packets
++;
875 rdev
->ndev
->stats
.tx_bytes
+= skb
->len
;
876 dma_unmap_single(ndev
->dev
.parent
,
877 gq
->unmap_addrs
[gq
->dirty
],
878 skb
->len
, DMA_TO_DEVICE
);
879 dev_kfree_skb_any(gq
->skbs
[gq
->dirty
]);
880 gq
->skbs
[gq
->dirty
] = NULL
;
882 desc
->desc
.die_dt
= DT_EEMPTY
;
886 static int rswitch_poll(struct napi_struct
*napi
, int budget
)
888 struct net_device
*ndev
= napi
->dev
;
889 struct rswitch_private
*priv
;
890 struct rswitch_device
*rdev
;
894 rdev
= netdev_priv(ndev
);
898 rswitch_tx_free(ndev
);
900 if (rswitch_rx(ndev
, "a
))
902 else if (rdev
->priv
->gwca_halt
)
904 else if (rswitch_is_queue_rxed(rdev
->rx_queue
))
907 netif_wake_subqueue(ndev
, 0);
909 if (napi_complete_done(napi
, budget
- quota
)) {
910 spin_lock_irqsave(&priv
->lock
, flags
);
911 rswitch_enadis_data_irq(priv
, rdev
->tx_queue
->index
, true);
912 rswitch_enadis_data_irq(priv
, rdev
->rx_queue
->index
, true);
913 spin_unlock_irqrestore(&priv
->lock
, flags
);
917 return budget
- quota
;
925 static void rswitch_queue_interrupt(struct net_device
*ndev
)
927 struct rswitch_device
*rdev
= netdev_priv(ndev
);
929 if (napi_schedule_prep(&rdev
->napi
)) {
930 spin_lock(&rdev
->priv
->lock
);
931 rswitch_enadis_data_irq(rdev
->priv
, rdev
->tx_queue
->index
, false);
932 rswitch_enadis_data_irq(rdev
->priv
, rdev
->rx_queue
->index
, false);
933 spin_unlock(&rdev
->priv
->lock
);
934 __napi_schedule(&rdev
->napi
);
938 static irqreturn_t
rswitch_data_irq(struct rswitch_private
*priv
, u32
*dis
)
940 struct rswitch_gwca_queue
*gq
;
941 unsigned int i
, index
, bit
;
943 for (i
= 0; i
< priv
->gwca
.num_queues
; i
++) {
944 gq
= &priv
->gwca
.queues
[i
];
945 index
= gq
->index
/ 32;
946 bit
= BIT(gq
->index
% 32);
947 if (!(dis
[index
] & bit
))
950 rswitch_ack_data_irq(priv
, gq
->index
);
951 rswitch_queue_interrupt(gq
->ndev
);
957 static irqreturn_t
rswitch_gwca_irq(int irq
, void *dev_id
)
959 struct rswitch_private
*priv
= dev_id
;
960 u32 dis
[RSWITCH_NUM_IRQ_REGS
];
961 irqreturn_t ret
= IRQ_NONE
;
963 rswitch_get_data_irq_status(priv
, dis
);
965 if (rswitch_is_any_data_irq(priv
, dis
, true) ||
966 rswitch_is_any_data_irq(priv
, dis
, false))
967 ret
= rswitch_data_irq(priv
, dis
);
972 static int rswitch_gwca_request_irqs(struct rswitch_private
*priv
)
974 char *resource_name
, *irq_name
;
977 for (i
= 0; i
< GWCA_NUM_IRQS
; i
++) {
978 resource_name
= kasprintf(GFP_KERNEL
, GWCA_IRQ_RESOURCE_NAME
, i
);
982 irq
= platform_get_irq_byname(priv
->pdev
, resource_name
);
983 kfree(resource_name
);
987 irq_name
= devm_kasprintf(&priv
->pdev
->dev
, GFP_KERNEL
,
992 ret
= devm_request_irq(&priv
->pdev
->dev
, irq
, rswitch_gwca_irq
,
1001 static void rswitch_ts(struct rswitch_private
*priv
)
1003 struct rswitch_gwca_queue
*gq
= &priv
->gwca
.ts_queue
;
1004 struct rswitch_gwca_ts_info
*ts_info
, *ts_info2
;
1005 struct skb_shared_hwtstamps shhwtstamps
;
1006 struct rswitch_ts_desc
*desc
;
1007 struct timespec64 ts
;
1011 desc
= &gq
->ts_ring
[gq
->cur
];
1012 while ((desc
->desc
.die_dt
& DT_MASK
) != DT_FEMPTY_ND
) {
1015 port
= TS_DESC_DPN(__le32_to_cpu(desc
->desc
.dptrl
));
1016 tag
= TS_DESC_TSUN(__le32_to_cpu(desc
->desc
.dptrl
));
1018 list_for_each_entry_safe(ts_info
, ts_info2
, &priv
->gwca
.ts_info_list
, list
) {
1019 if (!(ts_info
->port
== port
&& ts_info
->tag
== tag
))
1022 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
1023 ts
.tv_sec
= __le32_to_cpu(desc
->ts_sec
);
1024 ts
.tv_nsec
= __le32_to_cpu(desc
->ts_nsec
& cpu_to_le32(0x3fffffff));
1025 shhwtstamps
.hwtstamp
= timespec64_to_ktime(ts
);
1026 skb_tstamp_tx(ts_info
->skb
, &shhwtstamps
);
1027 dev_consume_skb_irq(ts_info
->skb
);
1028 list_del(&ts_info
->list
);
1033 gq
->cur
= rswitch_next_queue_index(gq
, true, 1);
1034 desc
= &gq
->ts_ring
[gq
->cur
];
1037 num
= rswitch_get_num_cur_queues(gq
);
1038 rswitch_gwca_ts_queue_fill(priv
, gq
->dirty
, num
);
1039 gq
->dirty
= rswitch_next_queue_index(gq
, false, num
);
1042 static irqreturn_t
rswitch_gwca_ts_irq(int irq
, void *dev_id
)
1044 struct rswitch_private
*priv
= dev_id
;
1046 if (ioread32(priv
->addr
+ GWTSDIS
) & GWCA_TS_IRQ_BIT
) {
1047 iowrite32(GWCA_TS_IRQ_BIT
, priv
->addr
+ GWTSDIS
);
1056 static int rswitch_gwca_ts_request_irqs(struct rswitch_private
*priv
)
1060 irq
= platform_get_irq_byname(priv
->pdev
, GWCA_TS_IRQ_RESOURCE_NAME
);
1064 return devm_request_irq(&priv
->pdev
->dev
, irq
, rswitch_gwca_ts_irq
,
1065 0, GWCA_TS_IRQ_NAME
, priv
);
1068 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */
1069 static int rswitch_etha_change_mode(struct rswitch_etha
*etha
,
1070 enum rswitch_etha_mode mode
)
1074 if (!rswitch_agent_clock_is_enabled(etha
->coma_addr
, etha
->index
))
1075 rswitch_agent_clock_ctrl(etha
->coma_addr
, etha
->index
, 1);
1077 iowrite32(mode
, etha
->addr
+ EAMC
);
1079 ret
= rswitch_reg_wait(etha
->addr
, EAMS
, EAMS_OPS_MASK
, mode
);
1081 if (mode
== EAMC_OPC_DISABLE
)
1082 rswitch_agent_clock_ctrl(etha
->coma_addr
, etha
->index
, 0);
1087 static void rswitch_etha_read_mac_address(struct rswitch_etha
*etha
)
1089 u32 mrmac0
= ioread32(etha
->addr
+ MRMAC0
);
1090 u32 mrmac1
= ioread32(etha
->addr
+ MRMAC1
);
1091 u8
*mac
= ða
->mac_addr
[0];
1093 mac
[0] = (mrmac0
>> 8) & 0xFF;
1094 mac
[1] = (mrmac0
>> 0) & 0xFF;
1095 mac
[2] = (mrmac1
>> 24) & 0xFF;
1096 mac
[3] = (mrmac1
>> 16) & 0xFF;
1097 mac
[4] = (mrmac1
>> 8) & 0xFF;
1098 mac
[5] = (mrmac1
>> 0) & 0xFF;
1101 static void rswitch_etha_write_mac_address(struct rswitch_etha
*etha
, const u8
*mac
)
1103 iowrite32((mac
[0] << 8) | mac
[1], etha
->addr
+ MRMAC0
);
1104 iowrite32((mac
[2] << 24) | (mac
[3] << 16) | (mac
[4] << 8) | mac
[5],
1105 etha
->addr
+ MRMAC1
);
1108 static int rswitch_etha_wait_link_verification(struct rswitch_etha
*etha
)
1110 iowrite32(MLVC_PLV
, etha
->addr
+ MLVC
);
1112 return rswitch_reg_wait(etha
->addr
, MLVC
, MLVC_PLV
, 0);
1115 static void rswitch_rmac_setting(struct rswitch_etha
*etha
, const u8
*mac
)
1119 rswitch_etha_write_mac_address(etha
, mac
);
1121 switch (etha
->speed
) {
1123 val
= MPIC_LSC_100M
;
1129 val
= MPIC_LSC_2_5G
;
1135 iowrite32(MPIC_PIS_GMII
| val
, etha
->addr
+ MPIC
);
1138 static void rswitch_etha_enable_mii(struct rswitch_etha
*etha
)
1140 rswitch_modify(etha
->addr
, MPIC
, MPIC_PSMCS_MASK
| MPIC_PSMHT_MASK
,
1141 MPIC_PSMCS(etha
->psmcs
) | MPIC_PSMHT(0x06));
1142 rswitch_modify(etha
->addr
, MPSM
, 0, MPSM_MFF_C45
);
1145 static int rswitch_etha_hw_init(struct rswitch_etha
*etha
, const u8
*mac
)
1149 err
= rswitch_etha_change_mode(etha
, EAMC_OPC_DISABLE
);
1152 err
= rswitch_etha_change_mode(etha
, EAMC_OPC_CONFIG
);
1156 iowrite32(EAVCC_VEM_SC_TAG
, etha
->addr
+ EAVCC
);
1157 rswitch_rmac_setting(etha
, mac
);
1158 rswitch_etha_enable_mii(etha
);
1160 err
= rswitch_etha_wait_link_verification(etha
);
1164 err
= rswitch_etha_change_mode(etha
, EAMC_OPC_DISABLE
);
1168 return rswitch_etha_change_mode(etha
, EAMC_OPC_OPERATION
);
1171 static int rswitch_etha_set_access(struct rswitch_etha
*etha
, bool read
,
1172 int phyad
, int devad
, int regad
, int data
)
1174 int pop
= read
? MDIO_READ_C45
: MDIO_WRITE_C45
;
1178 if (devad
== 0xffffffff)
1181 writel(MMIS1_CLEAR_FLAGS
, etha
->addr
+ MMIS1
);
1183 val
= MPSM_PSME
| MPSM_MFF_C45
;
1184 iowrite32((regad
<< 16) | (devad
<< 8) | (phyad
<< 3) | val
, etha
->addr
+ MPSM
);
1186 ret
= rswitch_reg_wait(etha
->addr
, MMIS1
, MMIS1_PAACS
, MMIS1_PAACS
);
1190 rswitch_modify(etha
->addr
, MMIS1
, MMIS1_PAACS
, MMIS1_PAACS
);
1193 writel((pop
<< 13) | (devad
<< 8) | (phyad
<< 3) | val
, etha
->addr
+ MPSM
);
1195 ret
= rswitch_reg_wait(etha
->addr
, MMIS1
, MMIS1_PRACS
, MMIS1_PRACS
);
1199 ret
= (ioread32(etha
->addr
+ MPSM
) & MPSM_PRD_MASK
) >> 16;
1201 rswitch_modify(etha
->addr
, MMIS1
, MMIS1_PRACS
, MMIS1_PRACS
);
1203 iowrite32((data
<< 16) | (pop
<< 13) | (devad
<< 8) | (phyad
<< 3) | val
,
1206 ret
= rswitch_reg_wait(etha
->addr
, MMIS1
, MMIS1_PWACS
, MMIS1_PWACS
);
1212 static int rswitch_etha_mii_read_c45(struct mii_bus
*bus
, int addr
, int devad
,
1215 struct rswitch_etha
*etha
= bus
->priv
;
1217 return rswitch_etha_set_access(etha
, true, addr
, devad
, regad
, 0);
1220 static int rswitch_etha_mii_write_c45(struct mii_bus
*bus
, int addr
, int devad
,
1223 struct rswitch_etha
*etha
= bus
->priv
;
1225 return rswitch_etha_set_access(etha
, false, addr
, devad
, regad
, val
);
1228 /* Call of_node_put(port) after done */
1229 static struct device_node
*rswitch_get_port_node(struct rswitch_device
*rdev
)
1231 struct device_node
*ports
, *port
;
1235 ports
= of_get_child_by_name(rdev
->ndev
->dev
.parent
->of_node
,
1240 for_each_child_of_node(ports
, port
) {
1241 err
= of_property_read_u32(port
, "reg", &index
);
1246 if (index
== rdev
->etha
->index
) {
1247 if (!of_device_is_available(port
))
1259 static int rswitch_etha_get_params(struct rswitch_device
*rdev
)
1265 return 0; /* ignored */
1267 err
= of_get_phy_mode(rdev
->np_port
, &rdev
->etha
->phy_interface
);
1271 err
= of_property_read_u32(rdev
->np_port
, "max-speed", &max_speed
);
1273 rdev
->etha
->speed
= max_speed
;
1277 /* if no "max-speed" property, let's use default speed */
1278 switch (rdev
->etha
->phy_interface
) {
1279 case PHY_INTERFACE_MODE_MII
:
1280 rdev
->etha
->speed
= SPEED_100
;
1282 case PHY_INTERFACE_MODE_SGMII
:
1283 rdev
->etha
->speed
= SPEED_1000
;
1285 case PHY_INTERFACE_MODE_USXGMII
:
1286 rdev
->etha
->speed
= SPEED_2500
;
1295 static int rswitch_mii_register(struct rswitch_device
*rdev
)
1297 struct device_node
*mdio_np
;
1298 struct mii_bus
*mii_bus
;
1301 mii_bus
= mdiobus_alloc();
1305 mii_bus
->name
= "rswitch_mii";
1306 sprintf(mii_bus
->id
, "etha%d", rdev
->etha
->index
);
1307 mii_bus
->priv
= rdev
->etha
;
1308 mii_bus
->read_c45
= rswitch_etha_mii_read_c45
;
1309 mii_bus
->write_c45
= rswitch_etha_mii_write_c45
;
1310 mii_bus
->parent
= &rdev
->priv
->pdev
->dev
;
1312 mdio_np
= of_get_child_by_name(rdev
->np_port
, "mdio");
1313 err
= of_mdiobus_register(mii_bus
, mdio_np
);
1315 mdiobus_free(mii_bus
);
1319 rdev
->etha
->mii
= mii_bus
;
1322 of_node_put(mdio_np
);
1327 static void rswitch_mii_unregister(struct rswitch_device
*rdev
)
1329 if (rdev
->etha
->mii
) {
1330 mdiobus_unregister(rdev
->etha
->mii
);
1331 mdiobus_free(rdev
->etha
->mii
);
1332 rdev
->etha
->mii
= NULL
;
1336 static void rswitch_adjust_link(struct net_device
*ndev
)
1338 struct rswitch_device
*rdev
= netdev_priv(ndev
);
1339 struct phy_device
*phydev
= ndev
->phydev
;
1341 if (phydev
->link
!= rdev
->etha
->link
) {
1342 phy_print_status(phydev
);
1344 phy_power_on(rdev
->serdes
);
1345 else if (rdev
->serdes
->power_count
)
1346 phy_power_off(rdev
->serdes
);
1348 rdev
->etha
->link
= phydev
->link
;
1350 if (!rdev
->priv
->etha_no_runtime_change
&&
1351 phydev
->speed
!= rdev
->etha
->speed
) {
1352 rdev
->etha
->speed
= phydev
->speed
;
1354 rswitch_etha_hw_init(rdev
->etha
, rdev
->ndev
->dev_addr
);
1355 phy_set_speed(rdev
->serdes
, rdev
->etha
->speed
);
1360 static void rswitch_phy_remove_link_mode(struct rswitch_device
*rdev
,
1361 struct phy_device
*phydev
)
1363 if (!rdev
->priv
->etha_no_runtime_change
)
1366 switch (rdev
->etha
->speed
) {
1368 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_1000baseT_Full_BIT
);
1369 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_100baseT_Full_BIT
);
1372 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_2500baseX_Full_BIT
);
1373 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_100baseT_Full_BIT
);
1376 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_2500baseX_Full_BIT
);
1377 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_1000baseT_Full_BIT
);
1383 phy_set_max_speed(phydev
, rdev
->etha
->speed
);
1386 static int rswitch_phy_device_init(struct rswitch_device
*rdev
)
1388 struct phy_device
*phydev
;
1389 struct device_node
*phy
;
1395 phy
= of_parse_phandle(rdev
->np_port
, "phy-handle", 0);
1399 /* Set phydev->host_interfaces before calling of_phy_connect() to
1400 * configure the PHY with the information of host_interfaces.
1402 phydev
= of_phy_find_device(phy
);
1405 __set_bit(rdev
->etha
->phy_interface
, phydev
->host_interfaces
);
1406 phydev
->mac_managed_pm
= true;
1408 phydev
= of_phy_connect(rdev
->ndev
, phy
, rswitch_adjust_link
, 0,
1409 rdev
->etha
->phy_interface
);
1413 phy_set_max_speed(phydev
, SPEED_2500
);
1414 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_10baseT_Half_BIT
);
1415 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_10baseT_Full_BIT
);
1416 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_100baseT_Half_BIT
);
1417 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_1000baseT_Half_BIT
);
1418 rswitch_phy_remove_link_mode(rdev
, phydev
);
1420 phy_attached_info(phydev
);
1429 static void rswitch_phy_device_deinit(struct rswitch_device
*rdev
)
1431 if (rdev
->ndev
->phydev
)
1432 phy_disconnect(rdev
->ndev
->phydev
);
1435 static int rswitch_serdes_set_params(struct rswitch_device
*rdev
)
1439 err
= phy_set_mode_ext(rdev
->serdes
, PHY_MODE_ETHERNET
,
1440 rdev
->etha
->phy_interface
);
1444 return phy_set_speed(rdev
->serdes
, rdev
->etha
->speed
);
1447 static int rswitch_ether_port_init_one(struct rswitch_device
*rdev
)
1451 if (!rdev
->etha
->operated
) {
1452 err
= rswitch_etha_hw_init(rdev
->etha
, rdev
->ndev
->dev_addr
);
1455 if (rdev
->priv
->etha_no_runtime_change
)
1456 rdev
->etha
->operated
= true;
1459 err
= rswitch_mii_register(rdev
);
1463 err
= rswitch_phy_device_init(rdev
);
1465 goto err_phy_device_init
;
1467 rdev
->serdes
= devm_of_phy_get(&rdev
->priv
->pdev
->dev
, rdev
->np_port
, NULL
);
1468 if (IS_ERR(rdev
->serdes
)) {
1469 err
= PTR_ERR(rdev
->serdes
);
1470 goto err_serdes_phy_get
;
1473 err
= rswitch_serdes_set_params(rdev
);
1475 goto err_serdes_set_params
;
1479 err_serdes_set_params
:
1481 rswitch_phy_device_deinit(rdev
);
1483 err_phy_device_init
:
1484 rswitch_mii_unregister(rdev
);
1489 static void rswitch_ether_port_deinit_one(struct rswitch_device
*rdev
)
1491 rswitch_phy_device_deinit(rdev
);
1492 rswitch_mii_unregister(rdev
);
1495 static int rswitch_ether_port_init_all(struct rswitch_private
*priv
)
1500 rswitch_for_each_enabled_port(priv
, i
) {
1501 err
= rswitch_ether_port_init_one(priv
->rdev
[i
]);
1506 rswitch_for_each_enabled_port(priv
, i
) {
1507 err
= phy_init(priv
->rdev
[i
]->serdes
);
1515 rswitch_for_each_enabled_port_continue_reverse(priv
, i
)
1516 phy_exit(priv
->rdev
[i
]->serdes
);
1517 i
= RSWITCH_NUM_PORTS
;
1520 rswitch_for_each_enabled_port_continue_reverse(priv
, i
)
1521 rswitch_ether_port_deinit_one(priv
->rdev
[i
]);
1526 static void rswitch_ether_port_deinit_all(struct rswitch_private
*priv
)
1530 for (i
= 0; i
< RSWITCH_NUM_PORTS
; i
++) {
1531 phy_exit(priv
->rdev
[i
]->serdes
);
1532 rswitch_ether_port_deinit_one(priv
->rdev
[i
]);
1536 static int rswitch_open(struct net_device
*ndev
)
1538 struct rswitch_device
*rdev
= netdev_priv(ndev
);
1539 unsigned long flags
;
1541 phy_start(ndev
->phydev
);
1543 napi_enable(&rdev
->napi
);
1544 netif_start_queue(ndev
);
1546 spin_lock_irqsave(&rdev
->priv
->lock
, flags
);
1547 rswitch_enadis_data_irq(rdev
->priv
, rdev
->tx_queue
->index
, true);
1548 rswitch_enadis_data_irq(rdev
->priv
, rdev
->rx_queue
->index
, true);
1549 spin_unlock_irqrestore(&rdev
->priv
->lock
, flags
);
1551 if (bitmap_empty(rdev
->priv
->opened_ports
, RSWITCH_NUM_PORTS
))
1552 iowrite32(GWCA_TS_IRQ_BIT
, rdev
->priv
->addr
+ GWTSDIE
);
1554 bitmap_set(rdev
->priv
->opened_ports
, rdev
->port
, 1);
1559 static int rswitch_stop(struct net_device
*ndev
)
1561 struct rswitch_device
*rdev
= netdev_priv(ndev
);
1562 struct rswitch_gwca_ts_info
*ts_info
, *ts_info2
;
1563 unsigned long flags
;
1565 netif_tx_stop_all_queues(ndev
);
1566 bitmap_clear(rdev
->priv
->opened_ports
, rdev
->port
, 1);
1568 if (bitmap_empty(rdev
->priv
->opened_ports
, RSWITCH_NUM_PORTS
))
1569 iowrite32(GWCA_TS_IRQ_BIT
, rdev
->priv
->addr
+ GWTSDID
);
1571 list_for_each_entry_safe(ts_info
, ts_info2
, &rdev
->priv
->gwca
.ts_info_list
, list
) {
1572 if (ts_info
->port
!= rdev
->port
)
1574 dev_kfree_skb_irq(ts_info
->skb
);
1575 list_del(&ts_info
->list
);
1579 spin_lock_irqsave(&rdev
->priv
->lock
, flags
);
1580 rswitch_enadis_data_irq(rdev
->priv
, rdev
->tx_queue
->index
, false);
1581 rswitch_enadis_data_irq(rdev
->priv
, rdev
->rx_queue
->index
, false);
1582 spin_unlock_irqrestore(&rdev
->priv
->lock
, flags
);
1584 phy_stop(ndev
->phydev
);
1585 napi_disable(&rdev
->napi
);
1590 static bool rswitch_ext_desc_set_info1(struct rswitch_device
*rdev
,
1591 struct sk_buff
*skb
,
1592 struct rswitch_ext_desc
*desc
)
1594 desc
->info1
= cpu_to_le64(INFO1_DV(BIT(rdev
->etha
->index
)) |
1595 INFO1_IPV(GWCA_IPV_NUM
) | INFO1_FMT
);
1596 if (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) {
1597 struct rswitch_gwca_ts_info
*ts_info
;
1599 ts_info
= kzalloc(sizeof(*ts_info
), GFP_ATOMIC
);
1603 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1605 desc
->info1
|= cpu_to_le64(INFO1_TSUN(rdev
->ts_tag
) | INFO1_TXC
);
1607 ts_info
->skb
= skb_get(skb
);
1608 ts_info
->port
= rdev
->port
;
1609 ts_info
->tag
= rdev
->ts_tag
;
1610 list_add_tail(&ts_info
->list
, &rdev
->priv
->gwca
.ts_info_list
);
1612 skb_tx_timestamp(skb
);
1618 static bool rswitch_ext_desc_set(struct rswitch_device
*rdev
,
1619 struct sk_buff
*skb
,
1620 struct rswitch_ext_desc
*desc
,
1621 dma_addr_t dma_addr
, u16 len
, u8 die_dt
)
1623 rswitch_desc_set_dptr(&desc
->desc
, dma_addr
);
1624 desc
->desc
.info_ds
= cpu_to_le16(len
);
1625 if (!rswitch_ext_desc_set_info1(rdev
, skb
, desc
))
1630 desc
->desc
.die_dt
= die_dt
;
1635 static u8
rswitch_ext_desc_get_die_dt(unsigned int nr_desc
, unsigned int index
)
1638 return DT_FSINGLE
| DIE
;
1641 if (nr_desc
- 1 == index
)
1642 return DT_FEND
| DIE
;
1646 static u16
rswitch_ext_desc_get_len(u8 die_dt
, unsigned int orig_len
)
1648 switch (die_dt
& DT_MASK
) {
1651 return (orig_len
% RSWITCH_DESC_BUF_SIZE
) ?: RSWITCH_DESC_BUF_SIZE
;
1654 return RSWITCH_DESC_BUF_SIZE
;
1660 static netdev_tx_t
rswitch_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1662 struct rswitch_device
*rdev
= netdev_priv(ndev
);
1663 struct rswitch_gwca_queue
*gq
= rdev
->tx_queue
;
1664 dma_addr_t dma_addr
, dma_addr_orig
;
1665 netdev_tx_t ret
= NETDEV_TX_OK
;
1666 struct rswitch_ext_desc
*desc
;
1667 unsigned int i
, nr_desc
;
1671 nr_desc
= (skb
->len
- 1) / RSWITCH_DESC_BUF_SIZE
+ 1;
1672 if (rswitch_get_num_cur_queues(gq
) >= gq
->ring_size
- nr_desc
) {
1673 netif_stop_subqueue(ndev
, 0);
1674 return NETDEV_TX_BUSY
;
1677 if (skb_put_padto(skb
, ETH_ZLEN
))
1680 dma_addr_orig
= dma_map_single(ndev
->dev
.parent
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
1681 if (dma_mapping_error(ndev
->dev
.parent
, dma_addr_orig
))
1684 gq
->skbs
[gq
->cur
] = skb
;
1685 gq
->unmap_addrs
[gq
->cur
] = dma_addr_orig
;
1687 /* DT_FSTART should be set at last. So, this is reverse order. */
1688 for (i
= nr_desc
; i
-- > 0; ) {
1689 desc
= &gq
->tx_ring
[rswitch_next_queue_index(gq
, true, i
)];
1690 die_dt
= rswitch_ext_desc_get_die_dt(nr_desc
, i
);
1691 dma_addr
= dma_addr_orig
+ i
* RSWITCH_DESC_BUF_SIZE
;
1692 len
= rswitch_ext_desc_get_len(die_dt
, skb
->len
);
1693 if (!rswitch_ext_desc_set(rdev
, skb
, desc
, dma_addr
, len
, die_dt
))
1697 wmb(); /* gq->cur must be incremented after die_dt was set */
1699 gq
->cur
= rswitch_next_queue_index(gq
, true, nr_desc
);
1700 rswitch_modify(rdev
->addr
, GWTRC(gq
->index
), 0, BIT(gq
->index
% 32));
1705 dma_unmap_single(ndev
->dev
.parent
, dma_addr_orig
, skb
->len
, DMA_TO_DEVICE
);
1708 dev_kfree_skb_any(skb
);
1713 static struct net_device_stats
*rswitch_get_stats(struct net_device
*ndev
)
1715 return &ndev
->stats
;
1718 static int rswitch_hwstamp_get(struct net_device
*ndev
, struct ifreq
*req
)
1720 struct rswitch_device
*rdev
= netdev_priv(ndev
);
1721 struct rcar_gen4_ptp_private
*ptp_priv
;
1722 struct hwtstamp_config config
;
1724 ptp_priv
= rdev
->priv
->ptp_priv
;
1727 config
.tx_type
= ptp_priv
->tstamp_tx_ctrl
? HWTSTAMP_TX_ON
:
1729 switch (ptp_priv
->tstamp_rx_ctrl
& RCAR_GEN4_RXTSTAMP_TYPE
) {
1730 case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT
:
1731 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_EVENT
;
1733 case RCAR_GEN4_RXTSTAMP_TYPE_ALL
:
1734 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
1737 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
1741 return copy_to_user(req
->ifr_data
, &config
, sizeof(config
)) ? -EFAULT
: 0;
1744 static int rswitch_hwstamp_set(struct net_device
*ndev
, struct ifreq
*req
)
1746 struct rswitch_device
*rdev
= netdev_priv(ndev
);
1747 u32 tstamp_rx_ctrl
= RCAR_GEN4_RXTSTAMP_ENABLED
;
1748 struct hwtstamp_config config
;
1751 if (copy_from_user(&config
, req
->ifr_data
, sizeof(config
)))
1757 switch (config
.tx_type
) {
1758 case HWTSTAMP_TX_OFF
:
1761 case HWTSTAMP_TX_ON
:
1762 tstamp_tx_ctrl
= RCAR_GEN4_TXTSTAMP_ENABLED
;
1768 switch (config
.rx_filter
) {
1769 case HWTSTAMP_FILTER_NONE
:
1772 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
1773 tstamp_rx_ctrl
|= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT
;
1776 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
1777 tstamp_rx_ctrl
|= RCAR_GEN4_RXTSTAMP_TYPE_ALL
;
1781 rdev
->priv
->ptp_priv
->tstamp_tx_ctrl
= tstamp_tx_ctrl
;
1782 rdev
->priv
->ptp_priv
->tstamp_rx_ctrl
= tstamp_rx_ctrl
;
1784 return copy_to_user(req
->ifr_data
, &config
, sizeof(config
)) ? -EFAULT
: 0;
1787 static int rswitch_eth_ioctl(struct net_device
*ndev
, struct ifreq
*req
, int cmd
)
1789 if (!netif_running(ndev
))
1794 return rswitch_hwstamp_get(ndev
, req
);
1796 return rswitch_hwstamp_set(ndev
, req
);
1798 return phy_mii_ioctl(ndev
->phydev
, req
, cmd
);
1802 static const struct net_device_ops rswitch_netdev_ops
= {
1803 .ndo_open
= rswitch_open
,
1804 .ndo_stop
= rswitch_stop
,
1805 .ndo_start_xmit
= rswitch_start_xmit
,
1806 .ndo_get_stats
= rswitch_get_stats
,
1807 .ndo_eth_ioctl
= rswitch_eth_ioctl
,
1808 .ndo_validate_addr
= eth_validate_addr
,
1809 .ndo_set_mac_address
= eth_mac_addr
,
1812 static int rswitch_get_ts_info(struct net_device
*ndev
, struct kernel_ethtool_ts_info
*info
)
1814 struct rswitch_device
*rdev
= netdev_priv(ndev
);
1816 info
->phc_index
= ptp_clock_index(rdev
->priv
->ptp_priv
->clock
);
1817 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
1818 SOF_TIMESTAMPING_TX_HARDWARE
|
1819 SOF_TIMESTAMPING_RX_HARDWARE
|
1820 SOF_TIMESTAMPING_RAW_HARDWARE
;
1821 info
->tx_types
= BIT(HWTSTAMP_TX_OFF
) | BIT(HWTSTAMP_TX_ON
);
1822 info
->rx_filters
= BIT(HWTSTAMP_FILTER_NONE
) | BIT(HWTSTAMP_FILTER_ALL
);
1827 static const struct ethtool_ops rswitch_ethtool_ops
= {
1828 .get_ts_info
= rswitch_get_ts_info
,
1829 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
1830 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
1833 static const struct of_device_id renesas_eth_sw_of_table
[] = {
1834 { .compatible
= "renesas,r8a779f0-ether-switch", },
1837 MODULE_DEVICE_TABLE(of
, renesas_eth_sw_of_table
);
1839 static void rswitch_etha_init(struct rswitch_private
*priv
, unsigned int index
)
1841 struct rswitch_etha
*etha
= &priv
->etha
[index
];
1843 memset(etha
, 0, sizeof(*etha
));
1844 etha
->index
= index
;
1845 etha
->addr
= priv
->addr
+ RSWITCH_ETHA_OFFSET
+ index
* RSWITCH_ETHA_SIZE
;
1846 etha
->coma_addr
= priv
->addr
;
1848 /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1.
1849 * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply
1850 * both the numerator and the denominator by 10.
1852 etha
->psmcs
= clk_get_rate(priv
->clk
) / 100000 / (25 * 2) - 1;
1855 static int rswitch_device_alloc(struct rswitch_private
*priv
, unsigned int index
)
1857 struct platform_device
*pdev
= priv
->pdev
;
1858 struct rswitch_device
*rdev
;
1859 struct net_device
*ndev
;
1862 if (index
>= RSWITCH_NUM_PORTS
)
1865 ndev
= alloc_etherdev_mqs(sizeof(struct rswitch_device
), 1, 1);
1869 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
1872 rdev
= netdev_priv(ndev
);
1875 priv
->rdev
[index
] = rdev
;
1877 rdev
->etha
= &priv
->etha
[index
];
1878 rdev
->addr
= priv
->addr
;
1880 ndev
->base_addr
= (unsigned long)rdev
->addr
;
1881 snprintf(ndev
->name
, IFNAMSIZ
, "tsn%d", index
);
1882 ndev
->netdev_ops
= &rswitch_netdev_ops
;
1883 ndev
->ethtool_ops
= &rswitch_ethtool_ops
;
1884 ndev
->max_mtu
= RSWITCH_MAX_MTU
;
1885 ndev
->min_mtu
= ETH_MIN_MTU
;
1887 netif_napi_add(ndev
, &rdev
->napi
, rswitch_poll
);
1889 rdev
->np_port
= rswitch_get_port_node(rdev
);
1890 rdev
->disabled
= !rdev
->np_port
;
1891 err
= of_get_ethdev_address(rdev
->np_port
, ndev
);
1892 of_node_put(rdev
->np_port
);
1894 if (is_valid_ether_addr(rdev
->etha
->mac_addr
))
1895 eth_hw_addr_set(ndev
, rdev
->etha
->mac_addr
);
1897 eth_hw_addr_random(ndev
);
1900 err
= rswitch_etha_get_params(rdev
);
1902 goto out_get_params
;
1904 if (rdev
->priv
->gwca
.speed
< rdev
->etha
->speed
)
1905 rdev
->priv
->gwca
.speed
= rdev
->etha
->speed
;
1907 err
= rswitch_rxdmac_alloc(ndev
);
1911 err
= rswitch_txdmac_alloc(ndev
);
1918 rswitch_rxdmac_free(ndev
);
1922 netif_napi_del(&rdev
->napi
);
1928 static void rswitch_device_free(struct rswitch_private
*priv
, unsigned int index
)
1930 struct rswitch_device
*rdev
= priv
->rdev
[index
];
1931 struct net_device
*ndev
= rdev
->ndev
;
1933 rswitch_txdmac_free(ndev
);
1934 rswitch_rxdmac_free(ndev
);
1935 netif_napi_del(&rdev
->napi
);
1939 static int rswitch_init(struct rswitch_private
*priv
)
1944 for (i
= 0; i
< RSWITCH_NUM_PORTS
; i
++)
1945 rswitch_etha_init(priv
, i
);
1947 rswitch_clock_enable(priv
);
1948 for (i
= 0; i
< RSWITCH_NUM_PORTS
; i
++)
1949 rswitch_etha_read_mac_address(&priv
->etha
[i
]);
1951 rswitch_reset(priv
);
1953 rswitch_clock_enable(priv
);
1954 rswitch_top_init(priv
);
1955 err
= rswitch_bpool_config(priv
);
1959 rswitch_coma_init(priv
);
1961 err
= rswitch_gwca_linkfix_alloc(priv
);
1965 err
= rswitch_gwca_ts_queue_alloc(priv
);
1967 goto err_ts_queue_alloc
;
1969 for (i
= 0; i
< RSWITCH_NUM_PORTS
; i
++) {
1970 err
= rswitch_device_alloc(priv
, i
);
1973 rswitch_device_free(priv
, i
);
1974 goto err_device_alloc
;
1978 rswitch_fwd_init(priv
);
1980 err
= rcar_gen4_ptp_register(priv
->ptp_priv
, RCAR_GEN4_PTP_REG_LAYOUT
,
1981 clk_get_rate(priv
->clk
));
1983 goto err_ptp_register
;
1985 err
= rswitch_gwca_request_irqs(priv
);
1987 goto err_gwca_request_irq
;
1989 err
= rswitch_gwca_ts_request_irqs(priv
);
1991 goto err_gwca_ts_request_irq
;
1993 err
= rswitch_gwca_hw_init(priv
);
1995 goto err_gwca_hw_init
;
1997 err
= rswitch_ether_port_init_all(priv
);
1999 goto err_ether_port_init_all
;
2001 rswitch_for_each_enabled_port(priv
, i
) {
2002 err
= register_netdev(priv
->rdev
[i
]->ndev
);
2004 rswitch_for_each_enabled_port_continue_reverse(priv
, i
)
2005 unregister_netdev(priv
->rdev
[i
]->ndev
);
2006 goto err_register_netdev
;
2010 rswitch_for_each_enabled_port(priv
, i
)
2011 netdev_info(priv
->rdev
[i
]->ndev
, "MAC address %pM\n",
2012 priv
->rdev
[i
]->ndev
->dev_addr
);
2016 err_register_netdev
:
2017 rswitch_ether_port_deinit_all(priv
);
2019 err_ether_port_init_all
:
2020 rswitch_gwca_hw_deinit(priv
);
2023 err_gwca_ts_request_irq
:
2024 err_gwca_request_irq
:
2025 rcar_gen4_ptp_unregister(priv
->ptp_priv
);
2028 for (i
= 0; i
< RSWITCH_NUM_PORTS
; i
++)
2029 rswitch_device_free(priv
, i
);
2032 rswitch_gwca_ts_queue_free(priv
);
2035 rswitch_gwca_linkfix_free(priv
);
2040 static const struct soc_device_attribute rswitch_soc_no_speed_change
[] = {
2041 { .soc_id
= "r8a779f0", .revision
= "ES1.0" },
2045 static int renesas_eth_sw_probe(struct platform_device
*pdev
)
2047 const struct soc_device_attribute
*attr
;
2048 struct rswitch_private
*priv
;
2049 struct resource
*res
;
2052 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "secure_base");
2054 dev_err(&pdev
->dev
, "invalid resource\n");
2058 priv
= devm_kzalloc(&pdev
->dev
, sizeof(*priv
), GFP_KERNEL
);
2061 spin_lock_init(&priv
->lock
);
2063 priv
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
2064 if (IS_ERR(priv
->clk
))
2065 return PTR_ERR(priv
->clk
);
2067 attr
= soc_device_match(rswitch_soc_no_speed_change
);
2069 priv
->etha_no_runtime_change
= true;
2071 priv
->ptp_priv
= rcar_gen4_ptp_alloc(pdev
);
2072 if (!priv
->ptp_priv
)
2075 platform_set_drvdata(pdev
, priv
);
2077 priv
->addr
= devm_ioremap_resource(&pdev
->dev
, res
);
2078 if (IS_ERR(priv
->addr
))
2079 return PTR_ERR(priv
->addr
);
2081 priv
->ptp_priv
->addr
= priv
->addr
+ RCAR_GEN4_GPTP_OFFSET_S4
;
2083 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(40));
2085 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
2090 priv
->gwca
.index
= AGENT_INDEX_GWCA
;
2091 priv
->gwca
.num_queues
= min(RSWITCH_NUM_PORTS
* NUM_QUEUES_PER_NDEV
,
2092 RSWITCH_MAX_NUM_QUEUES
);
2093 priv
->gwca
.queues
= devm_kcalloc(&pdev
->dev
, priv
->gwca
.num_queues
,
2094 sizeof(*priv
->gwca
.queues
), GFP_KERNEL
);
2095 if (!priv
->gwca
.queues
)
2098 pm_runtime_enable(&pdev
->dev
);
2099 pm_runtime_get_sync(&pdev
->dev
);
2101 ret
= rswitch_init(priv
);
2103 pm_runtime_put(&pdev
->dev
);
2104 pm_runtime_disable(&pdev
->dev
);
2108 device_set_wakeup_capable(&pdev
->dev
, 1);
2113 static void rswitch_deinit(struct rswitch_private
*priv
)
2117 rswitch_gwca_hw_deinit(priv
);
2118 rcar_gen4_ptp_unregister(priv
->ptp_priv
);
2120 rswitch_for_each_enabled_port(priv
, i
) {
2121 struct rswitch_device
*rdev
= priv
->rdev
[i
];
2123 unregister_netdev(rdev
->ndev
);
2124 rswitch_ether_port_deinit_one(rdev
);
2125 phy_exit(priv
->rdev
[i
]->serdes
);
2128 for (i
= 0; i
< RSWITCH_NUM_PORTS
; i
++)
2129 rswitch_device_free(priv
, i
);
2131 rswitch_gwca_ts_queue_free(priv
);
2132 rswitch_gwca_linkfix_free(priv
);
2134 rswitch_clock_disable(priv
);
2137 static void renesas_eth_sw_remove(struct platform_device
*pdev
)
2139 struct rswitch_private
*priv
= platform_get_drvdata(pdev
);
2141 rswitch_deinit(priv
);
2143 pm_runtime_put(&pdev
->dev
);
2144 pm_runtime_disable(&pdev
->dev
);
2146 platform_set_drvdata(pdev
, NULL
);
2149 static int renesas_eth_sw_suspend(struct device
*dev
)
2151 struct rswitch_private
*priv
= dev_get_drvdata(dev
);
2152 struct net_device
*ndev
;
2155 rswitch_for_each_enabled_port(priv
, i
) {
2156 ndev
= priv
->rdev
[i
]->ndev
;
2157 if (netif_running(ndev
)) {
2158 netif_device_detach(ndev
);
2161 if (priv
->rdev
[i
]->serdes
->init_count
)
2162 phy_exit(priv
->rdev
[i
]->serdes
);
2168 static int renesas_eth_sw_resume(struct device
*dev
)
2170 struct rswitch_private
*priv
= dev_get_drvdata(dev
);
2171 struct net_device
*ndev
;
2174 rswitch_for_each_enabled_port(priv
, i
) {
2175 phy_init(priv
->rdev
[i
]->serdes
);
2176 ndev
= priv
->rdev
[i
]->ndev
;
2177 if (netif_running(ndev
)) {
2179 netif_device_attach(ndev
);
2186 static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops
, renesas_eth_sw_suspend
,
2187 renesas_eth_sw_resume
);
2189 static struct platform_driver renesas_eth_sw_driver_platform
= {
2190 .probe
= renesas_eth_sw_probe
,
2191 .remove
= renesas_eth_sw_remove
,
2193 .name
= "renesas_eth_sw",
2194 .pm
= pm_sleep_ptr(&renesas_eth_sw_pm_ops
),
2195 .of_match_table
= renesas_eth_sw_of_table
,
2198 module_platform_driver(renesas_eth_sw_driver_platform
);
2199 MODULE_AUTHOR("Yoshihiro Shimoda");
2200 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver");
2201 MODULE_LICENSE("GPL");