1 // SPDX-License-Identifier: GPL-2.0
2 /* SuperH Ethernet device driver
4 * Copyright (C) 2014 Renesas Electronics Corporation
5 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
6 * Copyright (C) 2008-2014 Renesas Solutions Corp.
7 * Copyright (C) 2013-2017 Cogent Embedded, Inc.
8 * Copyright (C) 2014 Codethink Limited
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/etherdevice.h>
17 #include <linux/delay.h>
18 #include <linux/platform_device.h>
19 #include <linux/mdio-bitbang.h>
20 #include <linux/netdevice.h>
22 #include <linux/of_net.h>
23 #include <linux/phy.h>
24 #include <linux/cache.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/slab.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #include <linux/sh_eth.h>
31 #include <linux/of_mdio.h>
35 #define SH_ETH_DEF_MSG_ENABLE \
41 #define SH_ETH_OFFSET_INVALID ((u16)~0)
43 #define SH_ETH_OFFSET_DEFAULTS \
44 [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
46 /* use some intentionally tricky logic here to initialize the whole struct to
47 * 0xffff, but then override certain fields, requiring us to indicate that we
48 * "know" that there are overrides in this structure, and we'll need to disable
49 * that warning from W=1 builds. GCC has supported this option since 4.2.X, but
50 * the macros available to do this only define GCC 8.
53 __diag_ignore_all("-Woverride-init",
54 "logic to initialize all and then override some is OK");
55 static const u16 sh_eth_offset_gigabit
[SH_ETH_MAX_REGISTER_OFFSET
] = {
56 SH_ETH_OFFSET_DEFAULTS
,
111 [TSU_CTRST
] = 0x0004,
112 [TSU_FWEN0
] = 0x0010,
113 [TSU_FWEN1
] = 0x0014,
115 [TSU_BSYSL0
] = 0x0020,
116 [TSU_BSYSL1
] = 0x0024,
117 [TSU_PRISL0
] = 0x0028,
118 [TSU_PRISL1
] = 0x002c,
119 [TSU_FWSL0
] = 0x0030,
120 [TSU_FWSL1
] = 0x0034,
121 [TSU_FWSLC
] = 0x0038,
122 [TSU_QTAGM0
] = 0x0040,
123 [TSU_QTAGM1
] = 0x0044,
125 [TSU_FWINMK
] = 0x0054,
126 [TSU_ADQT0
] = 0x0048,
127 [TSU_ADQT1
] = 0x004c,
128 [TSU_VTAG0
] = 0x0058,
129 [TSU_VTAG1
] = 0x005c,
130 [TSU_ADSBSY
] = 0x0060,
132 [TSU_POST1
] = 0x0070,
133 [TSU_POST2
] = 0x0074,
134 [TSU_POST3
] = 0x0078,
135 [TSU_POST4
] = 0x007c,
136 [TSU_ADRH0
] = 0x0100,
152 static const u16 sh_eth_offset_fast_rcar
[SH_ETH_MAX_REGISTER_OFFSET
] = {
153 SH_ETH_OFFSET_DEFAULTS
,
200 static const u16 sh_eth_offset_fast_sh4
[SH_ETH_MAX_REGISTER_OFFSET
] = {
201 SH_ETH_OFFSET_DEFAULTS
,
254 static const u16 sh_eth_offset_fast_sh3_sh2
[SH_ETH_MAX_REGISTER_OFFSET
] = {
255 SH_ETH_OFFSET_DEFAULTS
,
303 [TSU_CTRST
] = 0x0004,
304 [TSU_FWEN0
] = 0x0010,
305 [TSU_FWEN1
] = 0x0014,
307 [TSU_BSYSL0
] = 0x0020,
308 [TSU_BSYSL1
] = 0x0024,
309 [TSU_PRISL0
] = 0x0028,
310 [TSU_PRISL1
] = 0x002c,
311 [TSU_FWSL0
] = 0x0030,
312 [TSU_FWSL1
] = 0x0034,
313 [TSU_FWSLC
] = 0x0038,
314 [TSU_QTAGM0
] = 0x0040,
315 [TSU_QTAGM1
] = 0x0044,
316 [TSU_ADQT0
] = 0x0048,
317 [TSU_ADQT1
] = 0x004c,
319 [TSU_FWINMK
] = 0x0054,
320 [TSU_ADSBSY
] = 0x0060,
322 [TSU_POST1
] = 0x0070,
323 [TSU_POST2
] = 0x0074,
324 [TSU_POST3
] = 0x0078,
325 [TSU_POST4
] = 0x007c,
340 [TSU_ADRH0
] = 0x0100,
344 static void sh_eth_rcv_snd_disable(struct net_device
*ndev
);
345 static struct net_device_stats
*sh_eth_get_stats(struct net_device
*ndev
);
347 static void sh_eth_write(struct net_device
*ndev
, u32 data
, int enum_index
)
349 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
350 u16 offset
= mdp
->reg_offset
[enum_index
];
352 if (WARN_ON(offset
== SH_ETH_OFFSET_INVALID
))
355 iowrite32(data
, mdp
->addr
+ offset
);
358 static u32
sh_eth_read(struct net_device
*ndev
, int enum_index
)
360 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
361 u16 offset
= mdp
->reg_offset
[enum_index
];
363 if (WARN_ON(offset
== SH_ETH_OFFSET_INVALID
))
366 return ioread32(mdp
->addr
+ offset
);
369 static void sh_eth_modify(struct net_device
*ndev
, int enum_index
, u32 clear
,
372 sh_eth_write(ndev
, (sh_eth_read(ndev
, enum_index
) & ~clear
) | set
,
376 static u16
sh_eth_tsu_get_offset(struct sh_eth_private
*mdp
, int enum_index
)
378 return mdp
->reg_offset
[enum_index
];
381 static void sh_eth_tsu_write(struct sh_eth_private
*mdp
, u32 data
,
384 u16 offset
= sh_eth_tsu_get_offset(mdp
, enum_index
);
386 if (WARN_ON(offset
== SH_ETH_OFFSET_INVALID
))
389 iowrite32(data
, mdp
->tsu_addr
+ offset
);
392 static u32
sh_eth_tsu_read(struct sh_eth_private
*mdp
, int enum_index
)
394 u16 offset
= sh_eth_tsu_get_offset(mdp
, enum_index
);
396 if (WARN_ON(offset
== SH_ETH_OFFSET_INVALID
))
399 return ioread32(mdp
->tsu_addr
+ offset
);
402 static void sh_eth_soft_swap(char *src
, int len
)
404 #ifdef __LITTLE_ENDIAN
406 u32
*maxp
= p
+ DIV_ROUND_UP(len
, sizeof(u32
));
408 for (; p
< maxp
; p
++)
413 static void sh_eth_select_mii(struct net_device
*ndev
)
415 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
418 switch (mdp
->phy_interface
) {
419 case PHY_INTERFACE_MODE_RGMII
... PHY_INTERFACE_MODE_RGMII_TXID
:
422 case PHY_INTERFACE_MODE_GMII
:
425 case PHY_INTERFACE_MODE_MII
:
428 case PHY_INTERFACE_MODE_RMII
:
433 "PHY interface mode was not setup. Set to MII.\n");
438 sh_eth_write(ndev
, value
, RMII_MII
);
441 static void sh_eth_set_duplex(struct net_device
*ndev
)
443 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
445 sh_eth_modify(ndev
, ECMR
, ECMR_DM
, mdp
->duplex
? ECMR_DM
: 0);
448 static void sh_eth_chip_reset(struct net_device
*ndev
)
450 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
453 sh_eth_tsu_write(mdp
, ARSTR_ARST
, ARSTR
);
457 static int sh_eth_soft_reset(struct net_device
*ndev
)
459 sh_eth_modify(ndev
, EDMR
, EDMR_SRST_ETHER
, EDMR_SRST_ETHER
);
461 sh_eth_modify(ndev
, EDMR
, EDMR_SRST_ETHER
, 0);
466 static int sh_eth_check_soft_reset(struct net_device
*ndev
)
470 for (cnt
= 100; cnt
> 0; cnt
--) {
471 if (!(sh_eth_read(ndev
, EDMR
) & EDMR_SRST_GETHER
))
476 netdev_err(ndev
, "Device reset failed\n");
480 static int sh_eth_soft_reset_gether(struct net_device
*ndev
)
482 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
485 sh_eth_write(ndev
, EDSR_ENALL
, EDSR
);
486 sh_eth_modify(ndev
, EDMR
, EDMR_SRST_GETHER
, EDMR_SRST_GETHER
);
488 ret
= sh_eth_check_soft_reset(ndev
);
493 sh_eth_write(ndev
, 0, TDLAR
);
494 sh_eth_write(ndev
, 0, TDFAR
);
495 sh_eth_write(ndev
, 0, TDFXR
);
496 sh_eth_write(ndev
, 0, TDFFR
);
497 sh_eth_write(ndev
, 0, RDLAR
);
498 sh_eth_write(ndev
, 0, RDFAR
);
499 sh_eth_write(ndev
, 0, RDFXR
);
500 sh_eth_write(ndev
, 0, RDFFR
);
502 /* Reset HW CRC register */
504 sh_eth_write(ndev
, 0, CSMR
);
506 /* Select MII mode */
507 if (mdp
->cd
->select_mii
)
508 sh_eth_select_mii(ndev
);
513 static void sh_eth_set_rate_gether(struct net_device
*ndev
)
515 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
517 if (WARN_ON(!mdp
->cd
->gecmr
))
520 switch (mdp
->speed
) {
521 case 10: /* 10BASE */
522 sh_eth_write(ndev
, GECMR_10
, GECMR
);
524 case 100:/* 100BASE */
525 sh_eth_write(ndev
, GECMR_100
, GECMR
);
527 case 1000: /* 1000BASE */
528 sh_eth_write(ndev
, GECMR_1000
, GECMR
);
535 static struct sh_eth_cpu_data r7s72100_data
= {
536 .soft_reset
= sh_eth_soft_reset_gether
,
538 .chip_reset
= sh_eth_chip_reset
,
539 .set_duplex
= sh_eth_set_duplex
,
541 .register_type
= SH_ETH_REG_GIGABIT
,
543 .edtrr_trns
= EDTRR_TRNS_GETHER
,
544 .ecsr_value
= ECSR_ICD
,
545 .ecsipr_value
= ECSIPR_ICDIP
,
546 .eesipr_value
= EESIPR_TWB1IP
| EESIPR_TWBIP
| EESIPR_TC1IP
|
547 EESIPR_TABTIP
| EESIPR_RABTIP
| EESIPR_RFCOFIP
|
549 EESIPR_FTCIP
| EESIPR_TDEIP
| EESIPR_TFUFIP
|
550 EESIPR_FRIP
| EESIPR_RDEIP
| EESIPR_RFOFIP
|
551 EESIPR_RMAFIP
| EESIPR_RRFIP
|
552 EESIPR_RTLFIP
| EESIPR_RTSFIP
|
553 EESIPR_PREIP
| EESIPR_CERFIP
,
555 .tx_check
= EESR_TC1
| EESR_FTC
,
556 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
|
557 EESR_RFE
| EESR_RDE
| EESR_RFRMER
| EESR_TFE
|
559 .fdr_value
= 0x0000070f,
561 .trscer_err_mask
= TRSCER_RMAFCE
| TRSCER_RRFCE
,
578 static void sh_eth_chip_reset_r8a7740(struct net_device
*ndev
)
580 sh_eth_chip_reset(ndev
);
582 sh_eth_select_mii(ndev
);
586 static struct sh_eth_cpu_data r8a7740_data
= {
587 .soft_reset
= sh_eth_soft_reset_gether
,
589 .chip_reset
= sh_eth_chip_reset_r8a7740
,
590 .set_duplex
= sh_eth_set_duplex
,
591 .set_rate
= sh_eth_set_rate_gether
,
593 .register_type
= SH_ETH_REG_GIGABIT
,
595 .edtrr_trns
= EDTRR_TRNS_GETHER
,
596 .ecsr_value
= ECSR_ICD
| ECSR_MPD
,
597 .ecsipr_value
= ECSIPR_LCHNGIP
| ECSIPR_ICDIP
| ECSIPR_MPDIP
,
598 .eesipr_value
= EESIPR_RFCOFIP
| EESIPR_ECIIP
|
599 EESIPR_FTCIP
| EESIPR_TDEIP
| EESIPR_TFUFIP
|
600 EESIPR_FRIP
| EESIPR_RDEIP
| EESIPR_RFOFIP
|
601 0x0000f000 | EESIPR_CNDIP
| EESIPR_DLCIP
|
602 EESIPR_CDIP
| EESIPR_TROIP
| EESIPR_RMAFIP
|
603 EESIPR_CEEFIP
| EESIPR_CELFIP
|
604 EESIPR_RRFIP
| EESIPR_RTLFIP
| EESIPR_RTSFIP
|
605 EESIPR_PREIP
| EESIPR_CERFIP
,
607 .tx_check
= EESR_TC1
| EESR_FTC
,
608 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
|
609 EESR_RFE
| EESR_RDE
| EESR_RFRMER
| EESR_TFE
|
611 .fdr_value
= 0x0000070f,
631 /* There is CPU dependent code */
632 static void sh_eth_set_rate_rcar(struct net_device
*ndev
)
634 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
636 switch (mdp
->speed
) {
637 case 10: /* 10BASE */
638 sh_eth_modify(ndev
, ECMR
, ECMR_ELB
, 0);
640 case 100:/* 100BASE */
641 sh_eth_modify(ndev
, ECMR
, ECMR_ELB
, ECMR_ELB
);
647 static struct sh_eth_cpu_data rcar_gen1_data
= {
648 .soft_reset
= sh_eth_soft_reset
,
650 .set_duplex
= sh_eth_set_duplex
,
651 .set_rate
= sh_eth_set_rate_rcar
,
653 .register_type
= SH_ETH_REG_FAST_RCAR
,
655 .edtrr_trns
= EDTRR_TRNS_ETHER
,
656 .ecsr_value
= ECSR_PSRTO
| ECSR_LCHNG
| ECSR_ICD
,
657 .ecsipr_value
= ECSIPR_PSRTOIP
| ECSIPR_LCHNGIP
| ECSIPR_ICDIP
,
658 .eesipr_value
= EESIPR_RFCOFIP
| EESIPR_ADEIP
| EESIPR_ECIIP
|
659 EESIPR_FTCIP
| EESIPR_TDEIP
| EESIPR_TFUFIP
|
660 EESIPR_FRIP
| EESIPR_RDEIP
| EESIPR_RFOFIP
|
661 EESIPR_RMAFIP
| EESIPR_RRFIP
|
662 EESIPR_RTLFIP
| EESIPR_RTSFIP
|
663 EESIPR_PREIP
| EESIPR_CERFIP
,
665 .tx_check
= EESR_FTC
| EESR_CND
| EESR_DLC
| EESR_CD
| EESR_TRO
,
666 .eesr_err_check
= EESR_TWB
| EESR_TABT
| EESR_RABT
| EESR_RFE
|
667 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
,
668 .fdr_value
= 0x00000f0f,
677 /* R-Car Gen2 and RZ/G1 */
678 static struct sh_eth_cpu_data rcar_gen2_data
= {
679 .soft_reset
= sh_eth_soft_reset
,
681 .set_duplex
= sh_eth_set_duplex
,
682 .set_rate
= sh_eth_set_rate_rcar
,
684 .register_type
= SH_ETH_REG_FAST_RCAR
,
686 .edtrr_trns
= EDTRR_TRNS_ETHER
,
687 .ecsr_value
= ECSR_PSRTO
| ECSR_LCHNG
| ECSR_ICD
| ECSR_MPD
,
688 .ecsipr_value
= ECSIPR_PSRTOIP
| ECSIPR_LCHNGIP
| ECSIPR_ICDIP
|
690 .eesipr_value
= EESIPR_RFCOFIP
| EESIPR_ADEIP
| EESIPR_ECIIP
|
691 EESIPR_FTCIP
| EESIPR_TDEIP
| EESIPR_TFUFIP
|
692 EESIPR_FRIP
| EESIPR_RDEIP
| EESIPR_RFOFIP
|
693 EESIPR_RMAFIP
| EESIPR_RRFIP
|
694 EESIPR_RTLFIP
| EESIPR_RTSFIP
|
695 EESIPR_PREIP
| EESIPR_CERFIP
,
697 .tx_check
= EESR_FTC
| EESR_CND
| EESR_DLC
| EESR_CD
| EESR_TRO
,
698 .eesr_err_check
= EESR_TWB
| EESR_TABT
| EESR_RABT
| EESR_RFE
|
699 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
,
700 .fdr_value
= 0x00000f0f,
702 .trscer_err_mask
= TRSCER_RMAFCE
,
714 static struct sh_eth_cpu_data r8a77980_data
= {
715 .soft_reset
= sh_eth_soft_reset_gether
,
717 .set_duplex
= sh_eth_set_duplex
,
718 .set_rate
= sh_eth_set_rate_gether
,
720 .register_type
= SH_ETH_REG_GIGABIT
,
722 .edtrr_trns
= EDTRR_TRNS_GETHER
,
723 .ecsr_value
= ECSR_PSRTO
| ECSR_LCHNG
| ECSR_ICD
| ECSR_MPD
,
724 .ecsipr_value
= ECSIPR_PSRTOIP
| ECSIPR_LCHNGIP
| ECSIPR_ICDIP
|
726 .eesipr_value
= EESIPR_RFCOFIP
| EESIPR_ECIIP
|
727 EESIPR_FTCIP
| EESIPR_TDEIP
| EESIPR_TFUFIP
|
728 EESIPR_FRIP
| EESIPR_RDEIP
| EESIPR_RFOFIP
|
729 EESIPR_RMAFIP
| EESIPR_RRFIP
|
730 EESIPR_RTLFIP
| EESIPR_RTSFIP
|
731 EESIPR_PREIP
| EESIPR_CERFIP
,
733 .tx_check
= EESR_FTC
| EESR_CD
| EESR_TRO
,
734 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
|
735 EESR_RFE
| EESR_RDE
| EESR_RFRMER
|
736 EESR_TFE
| EESR_TDE
| EESR_ECI
,
737 .fdr_value
= 0x0000070f,
758 static struct sh_eth_cpu_data r7s9210_data
= {
759 .soft_reset
= sh_eth_soft_reset
,
761 .set_duplex
= sh_eth_set_duplex
,
762 .set_rate
= sh_eth_set_rate_rcar
,
764 .register_type
= SH_ETH_REG_FAST_SH4
,
766 .edtrr_trns
= EDTRR_TRNS_ETHER
,
767 .ecsr_value
= ECSR_ICD
,
768 .ecsipr_value
= ECSIPR_ICDIP
,
769 .eesipr_value
= EESIPR_TWBIP
| EESIPR_TABTIP
| EESIPR_RABTIP
|
770 EESIPR_RFCOFIP
| EESIPR_ECIIP
| EESIPR_FTCIP
|
771 EESIPR_TDEIP
| EESIPR_TFUFIP
| EESIPR_FRIP
|
772 EESIPR_RDEIP
| EESIPR_RFOFIP
| EESIPR_CNDIP
|
773 EESIPR_DLCIP
| EESIPR_CDIP
| EESIPR_TROIP
|
774 EESIPR_RMAFIP
| EESIPR_RRFIP
| EESIPR_RTLFIP
|
775 EESIPR_RTSFIP
| EESIPR_PREIP
| EESIPR_CERFIP
,
777 .tx_check
= EESR_FTC
| EESR_CND
| EESR_DLC
| EESR_CD
| EESR_TRO
,
778 .eesr_err_check
= EESR_TWB
| EESR_TABT
| EESR_RABT
| EESR_RFE
|
779 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
,
781 .fdr_value
= 0x0000070f,
783 .trscer_err_mask
= TRSCER_RMAFCE
| TRSCER_RRFCE
,
793 #endif /* CONFIG_OF */
795 static void sh_eth_set_rate_sh7724(struct net_device
*ndev
)
797 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
799 switch (mdp
->speed
) {
800 case 10: /* 10BASE */
801 sh_eth_modify(ndev
, ECMR
, ECMR_RTM
, 0);
803 case 100:/* 100BASE */
804 sh_eth_modify(ndev
, ECMR
, ECMR_RTM
, ECMR_RTM
);
810 static struct sh_eth_cpu_data sh7724_data
= {
811 .soft_reset
= sh_eth_soft_reset
,
813 .set_duplex
= sh_eth_set_duplex
,
814 .set_rate
= sh_eth_set_rate_sh7724
,
816 .register_type
= SH_ETH_REG_FAST_SH4
,
818 .edtrr_trns
= EDTRR_TRNS_ETHER
,
819 .ecsr_value
= ECSR_PSRTO
| ECSR_LCHNG
| ECSR_ICD
,
820 .ecsipr_value
= ECSIPR_PSRTOIP
| ECSIPR_LCHNGIP
| ECSIPR_ICDIP
,
821 .eesipr_value
= EESIPR_RFCOFIP
| EESIPR_ADEIP
| EESIPR_ECIIP
|
822 EESIPR_FTCIP
| EESIPR_TDEIP
| EESIPR_TFUFIP
|
823 EESIPR_FRIP
| EESIPR_RDEIP
| EESIPR_RFOFIP
|
824 EESIPR_RMAFIP
| EESIPR_RRFIP
|
825 EESIPR_RTLFIP
| EESIPR_RTSFIP
|
826 EESIPR_PREIP
| EESIPR_CERFIP
,
828 .tx_check
= EESR_FTC
| EESR_CND
| EESR_DLC
| EESR_CD
| EESR_TRO
,
829 .eesr_err_check
= EESR_TWB
| EESR_TABT
| EESR_RABT
| EESR_RFE
|
830 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
,
839 static void sh_eth_set_rate_sh7757(struct net_device
*ndev
)
841 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
843 switch (mdp
->speed
) {
844 case 10: /* 10BASE */
845 sh_eth_write(ndev
, 0, RTRATE
);
847 case 100:/* 100BASE */
848 sh_eth_write(ndev
, 1, RTRATE
);
854 static struct sh_eth_cpu_data sh7757_data
= {
855 .soft_reset
= sh_eth_soft_reset
,
857 .set_duplex
= sh_eth_set_duplex
,
858 .set_rate
= sh_eth_set_rate_sh7757
,
860 .register_type
= SH_ETH_REG_FAST_SH4
,
862 .edtrr_trns
= EDTRR_TRNS_ETHER
,
863 .eesipr_value
= EESIPR_RFCOFIP
| EESIPR_ECIIP
|
864 EESIPR_FTCIP
| EESIPR_TDEIP
| EESIPR_TFUFIP
|
865 EESIPR_FRIP
| EESIPR_RDEIP
| EESIPR_RFOFIP
|
866 0x0000f000 | EESIPR_CNDIP
| EESIPR_DLCIP
|
867 EESIPR_CDIP
| EESIPR_TROIP
| EESIPR_RMAFIP
|
868 EESIPR_CEEFIP
| EESIPR_CELFIP
|
869 EESIPR_RRFIP
| EESIPR_RTLFIP
| EESIPR_RTSFIP
|
870 EESIPR_PREIP
| EESIPR_CERFIP
,
872 .tx_check
= EESR_FTC
| EESR_CND
| EESR_DLC
| EESR_CD
| EESR_TRO
,
873 .eesr_err_check
= EESR_TWB
| EESR_TABT
| EESR_RABT
| EESR_RFE
|
874 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
,
876 .irq_flags
= IRQF_SHARED
,
887 #define SH_GIGA_ETH_BASE 0xfee00000UL
888 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
889 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
890 static void sh_eth_chip_reset_giga(struct net_device
*ndev
)
892 u32 mahr
[2], malr
[2];
895 /* save MAHR and MALR */
896 for (i
= 0; i
< 2; i
++) {
897 malr
[i
] = ioread32((void *)GIGA_MALR(i
));
898 mahr
[i
] = ioread32((void *)GIGA_MAHR(i
));
901 sh_eth_chip_reset(ndev
);
903 /* restore MAHR and MALR */
904 for (i
= 0; i
< 2; i
++) {
905 iowrite32(malr
[i
], (void *)GIGA_MALR(i
));
906 iowrite32(mahr
[i
], (void *)GIGA_MAHR(i
));
910 static void sh_eth_set_rate_giga(struct net_device
*ndev
)
912 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
914 if (WARN_ON(!mdp
->cd
->gecmr
))
917 switch (mdp
->speed
) {
918 case 10: /* 10BASE */
919 sh_eth_write(ndev
, 0x00000000, GECMR
);
921 case 100:/* 100BASE */
922 sh_eth_write(ndev
, 0x00000010, GECMR
);
924 case 1000: /* 1000BASE */
925 sh_eth_write(ndev
, 0x00000020, GECMR
);
930 /* SH7757(GETHERC) */
931 static struct sh_eth_cpu_data sh7757_data_giga
= {
932 .soft_reset
= sh_eth_soft_reset_gether
,
934 .chip_reset
= sh_eth_chip_reset_giga
,
935 .set_duplex
= sh_eth_set_duplex
,
936 .set_rate
= sh_eth_set_rate_giga
,
938 .register_type
= SH_ETH_REG_GIGABIT
,
940 .edtrr_trns
= EDTRR_TRNS_GETHER
,
941 .ecsr_value
= ECSR_ICD
| ECSR_MPD
,
942 .ecsipr_value
= ECSIPR_LCHNGIP
| ECSIPR_ICDIP
| ECSIPR_MPDIP
,
943 .eesipr_value
= EESIPR_RFCOFIP
| EESIPR_ECIIP
|
944 EESIPR_FTCIP
| EESIPR_TDEIP
| EESIPR_TFUFIP
|
945 EESIPR_FRIP
| EESIPR_RDEIP
| EESIPR_RFOFIP
|
946 0x0000f000 | EESIPR_CNDIP
| EESIPR_DLCIP
|
947 EESIPR_CDIP
| EESIPR_TROIP
| EESIPR_RMAFIP
|
948 EESIPR_CEEFIP
| EESIPR_CELFIP
|
949 EESIPR_RRFIP
| EESIPR_RTLFIP
| EESIPR_RTSFIP
|
950 EESIPR_PREIP
| EESIPR_CERFIP
,
952 .tx_check
= EESR_TC1
| EESR_FTC
,
953 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
|
954 EESR_RFE
| EESR_RDE
| EESR_RFRMER
| EESR_TFE
|
956 .fdr_value
= 0x0000072f,
958 .irq_flags
= IRQF_SHARED
,
975 static struct sh_eth_cpu_data sh7734_data
= {
976 .soft_reset
= sh_eth_soft_reset_gether
,
978 .chip_reset
= sh_eth_chip_reset
,
979 .set_duplex
= sh_eth_set_duplex
,
980 .set_rate
= sh_eth_set_rate_gether
,
982 .register_type
= SH_ETH_REG_GIGABIT
,
984 .edtrr_trns
= EDTRR_TRNS_GETHER
,
985 .ecsr_value
= ECSR_ICD
| ECSR_MPD
,
986 .ecsipr_value
= ECSIPR_LCHNGIP
| ECSIPR_ICDIP
| ECSIPR_MPDIP
,
987 .eesipr_value
= EESIPR_RFCOFIP
| EESIPR_ECIIP
|
988 EESIPR_FTCIP
| EESIPR_TDEIP
| EESIPR_TFUFIP
|
989 EESIPR_FRIP
| EESIPR_RDEIP
| EESIPR_RFOFIP
|
990 EESIPR_DLCIP
| EESIPR_CDIP
| EESIPR_TROIP
|
991 EESIPR_RMAFIP
| EESIPR_CEEFIP
| EESIPR_CELFIP
|
992 EESIPR_RRFIP
| EESIPR_RTLFIP
| EESIPR_RTSFIP
|
993 EESIPR_PREIP
| EESIPR_CERFIP
,
995 .tx_check
= EESR_TC1
| EESR_FTC
,
996 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
|
997 EESR_RFE
| EESR_RDE
| EESR_RFRMER
| EESR_TFE
|
1018 static struct sh_eth_cpu_data sh7763_data
= {
1019 .soft_reset
= sh_eth_soft_reset_gether
,
1021 .chip_reset
= sh_eth_chip_reset
,
1022 .set_duplex
= sh_eth_set_duplex
,
1023 .set_rate
= sh_eth_set_rate_gether
,
1025 .register_type
= SH_ETH_REG_GIGABIT
,
1027 .edtrr_trns
= EDTRR_TRNS_GETHER
,
1028 .ecsr_value
= ECSR_ICD
| ECSR_MPD
,
1029 .ecsipr_value
= ECSIPR_LCHNGIP
| ECSIPR_ICDIP
| ECSIPR_MPDIP
,
1030 .eesipr_value
= EESIPR_RFCOFIP
| EESIPR_ECIIP
|
1031 EESIPR_FTCIP
| EESIPR_TDEIP
| EESIPR_TFUFIP
|
1032 EESIPR_FRIP
| EESIPR_RDEIP
| EESIPR_RFOFIP
|
1033 EESIPR_DLCIP
| EESIPR_CDIP
| EESIPR_TROIP
|
1034 EESIPR_RMAFIP
| EESIPR_CEEFIP
| EESIPR_CELFIP
|
1035 EESIPR_RRFIP
| EESIPR_RTLFIP
| EESIPR_RTSFIP
|
1036 EESIPR_PREIP
| EESIPR_CERFIP
,
1038 .tx_check
= EESR_TC1
| EESR_FTC
,
1039 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
|
1040 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
,
1052 .irq_flags
= IRQF_SHARED
,
1059 static struct sh_eth_cpu_data sh7619_data
= {
1060 .soft_reset
= sh_eth_soft_reset
,
1062 .register_type
= SH_ETH_REG_FAST_SH3_SH2
,
1064 .edtrr_trns
= EDTRR_TRNS_ETHER
,
1065 .eesipr_value
= EESIPR_RFCOFIP
| EESIPR_ECIIP
|
1066 EESIPR_FTCIP
| EESIPR_TDEIP
| EESIPR_TFUFIP
|
1067 EESIPR_FRIP
| EESIPR_RDEIP
| EESIPR_RFOFIP
|
1068 0x0000f000 | EESIPR_CNDIP
| EESIPR_DLCIP
|
1069 EESIPR_CDIP
| EESIPR_TROIP
| EESIPR_RMAFIP
|
1070 EESIPR_CEEFIP
| EESIPR_CELFIP
|
1071 EESIPR_RRFIP
| EESIPR_RTLFIP
| EESIPR_RTSFIP
|
1072 EESIPR_PREIP
| EESIPR_CERFIP
,
1080 static struct sh_eth_cpu_data sh771x_data
= {
1081 .soft_reset
= sh_eth_soft_reset
,
1083 .register_type
= SH_ETH_REG_FAST_SH3_SH2
,
1085 .edtrr_trns
= EDTRR_TRNS_ETHER
,
1086 .eesipr_value
= EESIPR_RFCOFIP
| EESIPR_ECIIP
|
1087 EESIPR_FTCIP
| EESIPR_TDEIP
| EESIPR_TFUFIP
|
1088 EESIPR_FRIP
| EESIPR_RDEIP
| EESIPR_RFOFIP
|
1089 0x0000f000 | EESIPR_CNDIP
| EESIPR_DLCIP
|
1090 EESIPR_CDIP
| EESIPR_TROIP
| EESIPR_RMAFIP
|
1091 EESIPR_CEEFIP
| EESIPR_CELFIP
|
1092 EESIPR_RRFIP
| EESIPR_RTLFIP
| EESIPR_RTSFIP
|
1093 EESIPR_PREIP
| EESIPR_CERFIP
,
1095 .trscer_err_mask
= TRSCER_RMAFCE
,
1101 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data
*cd
)
1103 if (!cd
->ecsr_value
)
1104 cd
->ecsr_value
= DEFAULT_ECSR_INIT
;
1106 if (!cd
->ecsipr_value
)
1107 cd
->ecsipr_value
= DEFAULT_ECSIPR_INIT
;
1109 if (!cd
->fcftr_value
)
1110 cd
->fcftr_value
= DEFAULT_FIFO_F_D_RFF
|
1111 DEFAULT_FIFO_F_D_RFD
;
1114 cd
->fdr_value
= DEFAULT_FDR_INIT
;
1117 cd
->tx_check
= DEFAULT_TX_CHECK
;
1119 if (!cd
->eesr_err_check
)
1120 cd
->eesr_err_check
= DEFAULT_EESR_ERR_CHECK
;
1122 if (!cd
->trscer_err_mask
)
1123 cd
->trscer_err_mask
= DEFAULT_TRSCER_ERR_MASK
;
1126 static void sh_eth_set_receive_align(struct sk_buff
*skb
)
1128 uintptr_t reserve
= (uintptr_t)skb
->data
& (SH_ETH_RX_ALIGN
- 1);
1131 skb_reserve(skb
, SH_ETH_RX_ALIGN
- reserve
);
1134 /* Program the hardware MAC address from dev->dev_addr. */
1135 static void update_mac_address(struct net_device
*ndev
)
1138 (ndev
->dev_addr
[0] << 24) | (ndev
->dev_addr
[1] << 16) |
1139 (ndev
->dev_addr
[2] << 8) | (ndev
->dev_addr
[3]), MAHR
);
1141 (ndev
->dev_addr
[4] << 8) | (ndev
->dev_addr
[5]), MALR
);
1144 /* Get MAC address from SuperH MAC address register
1146 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
1147 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
1148 * When you want use this device, you must set MAC address in bootloader.
1151 static void read_mac_address(struct net_device
*ndev
, unsigned char *mac
)
1153 if (mac
[0] || mac
[1] || mac
[2] || mac
[3] || mac
[4] || mac
[5]) {
1154 eth_hw_addr_set(ndev
, mac
);
1156 u32 mahr
= sh_eth_read(ndev
, MAHR
);
1157 u32 malr
= sh_eth_read(ndev
, MALR
);
1160 addr
[0] = (mahr
>> 24) & 0xFF;
1161 addr
[1] = (mahr
>> 16) & 0xFF;
1162 addr
[2] = (mahr
>> 8) & 0xFF;
1163 addr
[3] = (mahr
>> 0) & 0xFF;
1164 addr
[4] = (malr
>> 8) & 0xFF;
1165 addr
[5] = (malr
>> 0) & 0xFF;
1166 eth_hw_addr_set(ndev
, addr
);
1171 void (*set_gate
)(void *addr
);
1172 struct mdiobb_ctrl ctrl
;
1176 static void sh_mdio_ctrl(struct mdiobb_ctrl
*ctrl
, u32 mask
, int set
)
1178 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
1181 if (bitbang
->set_gate
)
1182 bitbang
->set_gate(bitbang
->addr
);
1184 pir
= ioread32(bitbang
->addr
);
1189 iowrite32(pir
, bitbang
->addr
);
1192 /* Data I/O pin control */
1193 static void sh_mmd_ctrl(struct mdiobb_ctrl
*ctrl
, int bit
)
1195 sh_mdio_ctrl(ctrl
, PIR_MMD
, bit
);
1199 static void sh_set_mdio(struct mdiobb_ctrl
*ctrl
, int bit
)
1201 sh_mdio_ctrl(ctrl
, PIR_MDO
, bit
);
1205 static int sh_get_mdio(struct mdiobb_ctrl
*ctrl
)
1207 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
1209 if (bitbang
->set_gate
)
1210 bitbang
->set_gate(bitbang
->addr
);
1212 return (ioread32(bitbang
->addr
) & PIR_MDI
) != 0;
1215 /* MDC pin control */
1216 static void sh_mdc_ctrl(struct mdiobb_ctrl
*ctrl
, int bit
)
1218 sh_mdio_ctrl(ctrl
, PIR_MDC
, bit
);
1221 /* mdio bus control struct */
1222 static const struct mdiobb_ops bb_ops
= {
1223 .owner
= THIS_MODULE
,
1224 .set_mdc
= sh_mdc_ctrl
,
1225 .set_mdio_dir
= sh_mmd_ctrl
,
1226 .set_mdio_data
= sh_set_mdio
,
1227 .get_mdio_data
= sh_get_mdio
,
1230 /* free Tx skb function */
1231 static int sh_eth_tx_free(struct net_device
*ndev
, bool sent_only
)
1233 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1234 struct sh_eth_txdesc
*txdesc
;
1239 for (; mdp
->cur_tx
- mdp
->dirty_tx
> 0; mdp
->dirty_tx
++) {
1240 entry
= mdp
->dirty_tx
% mdp
->num_tx_ring
;
1241 txdesc
= &mdp
->tx_ring
[entry
];
1242 sent
= !(txdesc
->status
& cpu_to_le32(TD_TACT
));
1243 if (sent_only
&& !sent
)
1245 /* TACT bit must be checked before all the following reads */
1247 netif_info(mdp
, tx_done
, ndev
,
1248 "tx entry %d status 0x%08x\n",
1249 entry
, le32_to_cpu(txdesc
->status
));
1250 /* Free the original skb. */
1251 if (mdp
->tx_skbuff
[entry
]) {
1252 dma_unmap_single(&mdp
->pdev
->dev
,
1253 le32_to_cpu(txdesc
->addr
),
1254 le32_to_cpu(txdesc
->len
) >> 16,
1256 dev_kfree_skb_irq(mdp
->tx_skbuff
[entry
]);
1257 mdp
->tx_skbuff
[entry
] = NULL
;
1260 txdesc
->status
= cpu_to_le32(TD_TFP
);
1261 if (entry
>= mdp
->num_tx_ring
- 1)
1262 txdesc
->status
|= cpu_to_le32(TD_TDLE
);
1265 ndev
->stats
.tx_packets
++;
1266 ndev
->stats
.tx_bytes
+= le32_to_cpu(txdesc
->len
) >> 16;
1272 /* free skb and descriptor buffer */
1273 static void sh_eth_ring_free(struct net_device
*ndev
)
1275 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1279 for (i
= 0; i
< mdp
->num_rx_ring
; i
++) {
1280 if (mdp
->rx_skbuff
[i
]) {
1281 struct sh_eth_rxdesc
*rxdesc
= &mdp
->rx_ring
[i
];
1283 dma_unmap_single(&mdp
->pdev
->dev
,
1284 le32_to_cpu(rxdesc
->addr
),
1285 ALIGN(mdp
->rx_buf_sz
, 32),
1289 ringsize
= sizeof(struct sh_eth_rxdesc
) * mdp
->num_rx_ring
;
1290 dma_free_coherent(&mdp
->pdev
->dev
, ringsize
, mdp
->rx_ring
,
1292 mdp
->rx_ring
= NULL
;
1295 /* Free Rx skb ringbuffer */
1296 if (mdp
->rx_skbuff
) {
1297 for (i
= 0; i
< mdp
->num_rx_ring
; i
++)
1298 dev_kfree_skb(mdp
->rx_skbuff
[i
]);
1300 kfree(mdp
->rx_skbuff
);
1301 mdp
->rx_skbuff
= NULL
;
1304 sh_eth_tx_free(ndev
, false);
1306 ringsize
= sizeof(struct sh_eth_txdesc
) * mdp
->num_tx_ring
;
1307 dma_free_coherent(&mdp
->pdev
->dev
, ringsize
, mdp
->tx_ring
,
1309 mdp
->tx_ring
= NULL
;
1312 /* Free Tx skb ringbuffer */
1313 kfree(mdp
->tx_skbuff
);
1314 mdp
->tx_skbuff
= NULL
;
1317 /* format skb and descriptor buffer */
1318 static void sh_eth_ring_format(struct net_device
*ndev
)
1320 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1322 struct sk_buff
*skb
;
1323 struct sh_eth_rxdesc
*rxdesc
= NULL
;
1324 struct sh_eth_txdesc
*txdesc
= NULL
;
1325 int rx_ringsize
= sizeof(*rxdesc
) * mdp
->num_rx_ring
;
1326 int tx_ringsize
= sizeof(*txdesc
) * mdp
->num_tx_ring
;
1327 int skbuff_size
= mdp
->rx_buf_sz
+ SH_ETH_RX_ALIGN
+ 32 - 1;
1328 dma_addr_t dma_addr
;
1336 memset(mdp
->rx_ring
, 0, rx_ringsize
);
1338 /* build Rx ring buffer */
1339 for (i
= 0; i
< mdp
->num_rx_ring
; i
++) {
1341 mdp
->rx_skbuff
[i
] = NULL
;
1342 skb
= netdev_alloc_skb(ndev
, skbuff_size
);
1345 sh_eth_set_receive_align(skb
);
1347 /* The size of the buffer is a multiple of 32 bytes. */
1348 buf_len
= ALIGN(mdp
->rx_buf_sz
, 32);
1349 dma_addr
= dma_map_single(&mdp
->pdev
->dev
, skb
->data
, buf_len
,
1351 if (dma_mapping_error(&mdp
->pdev
->dev
, dma_addr
)) {
1355 mdp
->rx_skbuff
[i
] = skb
;
1358 rxdesc
= &mdp
->rx_ring
[i
];
1359 rxdesc
->len
= cpu_to_le32(buf_len
<< 16);
1360 rxdesc
->addr
= cpu_to_le32(dma_addr
);
1361 rxdesc
->status
= cpu_to_le32(RD_RACT
| RD_RFP
);
1363 /* Rx descriptor address set */
1365 sh_eth_write(ndev
, mdp
->rx_desc_dma
, RDLAR
);
1366 if (mdp
->cd
->xdfar_rw
)
1367 sh_eth_write(ndev
, mdp
->rx_desc_dma
, RDFAR
);
1371 mdp
->dirty_rx
= (u32
) (i
- mdp
->num_rx_ring
);
1373 /* Mark the last entry as wrapping the ring. */
1375 rxdesc
->status
|= cpu_to_le32(RD_RDLE
);
1377 memset(mdp
->tx_ring
, 0, tx_ringsize
);
1379 /* build Tx ring buffer */
1380 for (i
= 0; i
< mdp
->num_tx_ring
; i
++) {
1381 mdp
->tx_skbuff
[i
] = NULL
;
1382 txdesc
= &mdp
->tx_ring
[i
];
1383 txdesc
->status
= cpu_to_le32(TD_TFP
);
1384 txdesc
->len
= cpu_to_le32(0);
1386 /* Tx descriptor address set */
1387 sh_eth_write(ndev
, mdp
->tx_desc_dma
, TDLAR
);
1388 if (mdp
->cd
->xdfar_rw
)
1389 sh_eth_write(ndev
, mdp
->tx_desc_dma
, TDFAR
);
1393 txdesc
->status
|= cpu_to_le32(TD_TDLE
);
1396 /* Get skb and descriptor buffer */
1397 static int sh_eth_ring_init(struct net_device
*ndev
)
1399 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1400 int rx_ringsize
, tx_ringsize
;
1402 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1403 * card needs room to do 8 byte alignment, +2 so we can reserve
1404 * the first 2 bytes, and +16 gets room for the status word from the
1407 mdp
->rx_buf_sz
= (ndev
->mtu
<= 1492 ? PKT_BUF_SZ
:
1408 (((ndev
->mtu
+ 26 + 7) & ~7) + 2 + 16));
1409 if (mdp
->cd
->rpadir
)
1410 mdp
->rx_buf_sz
+= NET_IP_ALIGN
;
1412 /* Allocate RX and TX skb rings */
1413 mdp
->rx_skbuff
= kcalloc(mdp
->num_rx_ring
, sizeof(*mdp
->rx_skbuff
),
1415 if (!mdp
->rx_skbuff
)
1418 mdp
->tx_skbuff
= kcalloc(mdp
->num_tx_ring
, sizeof(*mdp
->tx_skbuff
),
1420 if (!mdp
->tx_skbuff
)
1423 /* Allocate all Rx descriptors. */
1424 rx_ringsize
= sizeof(struct sh_eth_rxdesc
) * mdp
->num_rx_ring
;
1425 mdp
->rx_ring
= dma_alloc_coherent(&mdp
->pdev
->dev
, rx_ringsize
,
1426 &mdp
->rx_desc_dma
, GFP_KERNEL
);
1432 /* Allocate all Tx descriptors. */
1433 tx_ringsize
= sizeof(struct sh_eth_txdesc
) * mdp
->num_tx_ring
;
1434 mdp
->tx_ring
= dma_alloc_coherent(&mdp
->pdev
->dev
, tx_ringsize
,
1435 &mdp
->tx_desc_dma
, GFP_KERNEL
);
1441 /* Free Rx and Tx skb ring buffer and DMA buffer */
1442 sh_eth_ring_free(ndev
);
1447 static int sh_eth_dev_init(struct net_device
*ndev
)
1449 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1453 ret
= mdp
->cd
->soft_reset(ndev
);
1457 if (mdp
->cd
->rmiimode
)
1458 sh_eth_write(ndev
, 0x1, RMIIMODE
);
1460 /* Descriptor format */
1461 sh_eth_ring_format(ndev
);
1462 if (mdp
->cd
->rpadir
)
1463 sh_eth_write(ndev
, NET_IP_ALIGN
<< 16, RPADIR
);
1465 /* all sh_eth int mask */
1466 sh_eth_write(ndev
, 0, EESIPR
);
1468 #if defined(__LITTLE_ENDIAN)
1469 if (mdp
->cd
->hw_swap
)
1470 sh_eth_write(ndev
, EDMR_EL
, EDMR
);
1473 sh_eth_write(ndev
, 0, EDMR
);
1476 sh_eth_write(ndev
, mdp
->cd
->fdr_value
, FDR
);
1477 sh_eth_write(ndev
, 0, TFTR
);
1479 /* Frame recv control (enable multiple-packets per rx irq) */
1480 sh_eth_write(ndev
, RMCR_RNC
, RMCR
);
1482 sh_eth_write(ndev
, mdp
->cd
->trscer_err_mask
, TRSCER
);
1484 /* DMA transfer burst mode */
1486 sh_eth_modify(ndev
, EDMR
, EDMR_NBST
, EDMR_NBST
);
1488 /* Burst cycle count upper-limit */
1490 sh_eth_write(ndev
, 0x800, BCULR
);
1492 sh_eth_write(ndev
, mdp
->cd
->fcftr_value
, FCFTR
);
1494 if (!mdp
->cd
->no_trimd
)
1495 sh_eth_write(ndev
, 0, TRIMD
);
1497 /* Recv frame limit set register */
1498 sh_eth_write(ndev
, ndev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
+ ETH_FCS_LEN
,
1501 sh_eth_modify(ndev
, EESR
, 0, 0);
1502 mdp
->irq_enabled
= true;
1503 sh_eth_write(ndev
, mdp
->cd
->eesipr_value
, EESIPR
);
1505 /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
1506 sh_eth_write(ndev
, ECMR_ZPF
| (mdp
->duplex
? ECMR_DM
: 0) |
1507 (ndev
->features
& NETIF_F_RXCSUM
? ECMR_RCSC
: 0) |
1508 ECMR_TE
| ECMR_RE
, ECMR
);
1510 if (mdp
->cd
->set_rate
)
1511 mdp
->cd
->set_rate(ndev
);
1513 /* E-MAC Status Register clear */
1514 sh_eth_write(ndev
, mdp
->cd
->ecsr_value
, ECSR
);
1516 /* E-MAC Interrupt Enable register */
1517 sh_eth_write(ndev
, mdp
->cd
->ecsipr_value
, ECSIPR
);
1519 /* Set MAC address */
1520 update_mac_address(ndev
);
1524 sh_eth_write(ndev
, 1, APR
);
1526 sh_eth_write(ndev
, 1, MPR
);
1527 if (mdp
->cd
->tpauser
)
1528 sh_eth_write(ndev
, TPAUSER_UNLIMITED
, TPAUSER
);
1530 /* Setting the Rx mode will start the Rx process. */
1531 sh_eth_write(ndev
, EDRRR_R
, EDRRR
);
1536 static void sh_eth_dev_exit(struct net_device
*ndev
)
1538 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1541 /* Deactivate all TX descriptors, so DMA should stop at next
1542 * packet boundary if it's currently running
1544 for (i
= 0; i
< mdp
->num_tx_ring
; i
++)
1545 mdp
->tx_ring
[i
].status
&= ~cpu_to_le32(TD_TACT
);
1547 /* Disable TX FIFO egress to MAC */
1548 sh_eth_rcv_snd_disable(ndev
);
1550 /* Stop RX DMA at next packet boundary */
1551 sh_eth_write(ndev
, 0, EDRRR
);
1553 /* Aside from TX DMA, we can't tell when the hardware is
1554 * really stopped, so we need to reset to make sure.
1555 * Before doing that, wait for long enough to *probably*
1556 * finish transmitting the last packet and poll stats.
1558 msleep(2); /* max frame time at 10 Mbps < 1250 us */
1559 sh_eth_get_stats(ndev
);
1560 mdp
->cd
->soft_reset(ndev
);
1562 /* Set the RMII mode again if required */
1563 if (mdp
->cd
->rmiimode
)
1564 sh_eth_write(ndev
, 0x1, RMIIMODE
);
1566 /* Set MAC address again */
1567 update_mac_address(ndev
);
1570 static void sh_eth_rx_csum(struct sk_buff
*skb
)
1574 /* The hardware checksum is 2 bytes appended to packet data */
1575 if (unlikely(skb
->len
< sizeof(__sum16
)))
1577 hw_csum
= skb_tail_pointer(skb
) - sizeof(__sum16
);
1578 skb
->csum
= csum_unfold((__force __sum16
)get_unaligned_le16(hw_csum
));
1579 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1580 skb_trim(skb
, skb
->len
- sizeof(__sum16
));
1583 /* Packet receive function */
1584 static int sh_eth_rx(struct net_device
*ndev
, u32 intr_status
, int *quota
)
1586 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1587 struct sh_eth_rxdesc
*rxdesc
;
1589 int entry
= mdp
->cur_rx
% mdp
->num_rx_ring
;
1590 int boguscnt
= (mdp
->dirty_rx
+ mdp
->num_rx_ring
) - mdp
->cur_rx
;
1592 struct sk_buff
*skb
;
1594 int skbuff_size
= mdp
->rx_buf_sz
+ SH_ETH_RX_ALIGN
+ 32 - 1;
1595 dma_addr_t dma_addr
;
1599 boguscnt
= min(boguscnt
, *quota
);
1601 rxdesc
= &mdp
->rx_ring
[entry
];
1602 while (!(rxdesc
->status
& cpu_to_le32(RD_RACT
))) {
1603 /* RACT bit must be checked before all the following reads */
1605 desc_status
= le32_to_cpu(rxdesc
->status
);
1606 pkt_len
= le32_to_cpu(rxdesc
->len
) & RD_RFL
;
1611 netif_info(mdp
, rx_status
, ndev
,
1612 "rx entry %d status 0x%08x len %d\n",
1613 entry
, desc_status
, pkt_len
);
1615 if (!(desc_status
& RDFEND
))
1616 ndev
->stats
.rx_length_errors
++;
1618 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1619 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1620 * bit 0. However, in case of the R8A7740 and R7S72100
1621 * the RFS bits are from bit 25 to bit 16. So, the
1622 * driver needs right shifting by 16.
1627 skb
= mdp
->rx_skbuff
[entry
];
1628 if (desc_status
& (RD_RFS1
| RD_RFS2
| RD_RFS3
| RD_RFS4
|
1629 RD_RFS5
| RD_RFS6
| RD_RFS10
)) {
1630 ndev
->stats
.rx_errors
++;
1631 if (desc_status
& RD_RFS1
)
1632 ndev
->stats
.rx_crc_errors
++;
1633 if (desc_status
& RD_RFS2
)
1634 ndev
->stats
.rx_frame_errors
++;
1635 if (desc_status
& RD_RFS3
)
1636 ndev
->stats
.rx_length_errors
++;
1637 if (desc_status
& RD_RFS4
)
1638 ndev
->stats
.rx_length_errors
++;
1639 if (desc_status
& RD_RFS6
)
1640 ndev
->stats
.rx_missed_errors
++;
1641 if (desc_status
& RD_RFS10
)
1642 ndev
->stats
.rx_over_errors
++;
1644 dma_addr
= le32_to_cpu(rxdesc
->addr
);
1645 if (!mdp
->cd
->hw_swap
)
1647 phys_to_virt(ALIGN(dma_addr
, 4)),
1649 mdp
->rx_skbuff
[entry
] = NULL
;
1650 if (mdp
->cd
->rpadir
)
1651 skb_reserve(skb
, NET_IP_ALIGN
);
1652 dma_unmap_single(&mdp
->pdev
->dev
, dma_addr
,
1653 ALIGN(mdp
->rx_buf_sz
, 32),
1655 skb_put(skb
, pkt_len
);
1656 skb
->protocol
= eth_type_trans(skb
, ndev
);
1657 if (ndev
->features
& NETIF_F_RXCSUM
)
1658 sh_eth_rx_csum(skb
);
1659 netif_receive_skb(skb
);
1660 ndev
->stats
.rx_packets
++;
1661 ndev
->stats
.rx_bytes
+= pkt_len
;
1662 if (desc_status
& RD_RFS8
)
1663 ndev
->stats
.multicast
++;
1665 entry
= (++mdp
->cur_rx
) % mdp
->num_rx_ring
;
1666 rxdesc
= &mdp
->rx_ring
[entry
];
1669 /* Refill the Rx ring buffers. */
1670 for (; mdp
->cur_rx
- mdp
->dirty_rx
> 0; mdp
->dirty_rx
++) {
1671 entry
= mdp
->dirty_rx
% mdp
->num_rx_ring
;
1672 rxdesc
= &mdp
->rx_ring
[entry
];
1673 /* The size of the buffer is 32 byte boundary. */
1674 buf_len
= ALIGN(mdp
->rx_buf_sz
, 32);
1675 rxdesc
->len
= cpu_to_le32(buf_len
<< 16);
1677 if (mdp
->rx_skbuff
[entry
] == NULL
) {
1678 skb
= netdev_alloc_skb(ndev
, skbuff_size
);
1680 break; /* Better luck next round. */
1681 sh_eth_set_receive_align(skb
);
1682 dma_addr
= dma_map_single(&mdp
->pdev
->dev
, skb
->data
,
1683 buf_len
, DMA_FROM_DEVICE
);
1684 if (dma_mapping_error(&mdp
->pdev
->dev
, dma_addr
)) {
1688 mdp
->rx_skbuff
[entry
] = skb
;
1690 skb_checksum_none_assert(skb
);
1691 rxdesc
->addr
= cpu_to_le32(dma_addr
);
1693 dma_wmb(); /* RACT bit must be set after all the above writes */
1694 if (entry
>= mdp
->num_rx_ring
- 1)
1696 cpu_to_le32(RD_RACT
| RD_RFP
| RD_RDLE
);
1698 rxdesc
->status
|= cpu_to_le32(RD_RACT
| RD_RFP
);
1701 /* Restart Rx engine if stopped. */
1702 /* If we don't need to check status, don't. -KDU */
1703 if (!(sh_eth_read(ndev
, EDRRR
) & EDRRR_R
)) {
1704 /* fix the values for the next receiving if RDE is set */
1705 if (intr_status
& EESR_RDE
&& !mdp
->cd
->no_xdfar
) {
1706 u32 count
= (sh_eth_read(ndev
, RDFAR
) -
1707 sh_eth_read(ndev
, RDLAR
)) >> 4;
1709 mdp
->cur_rx
= count
;
1710 mdp
->dirty_rx
= count
;
1712 sh_eth_write(ndev
, EDRRR_R
, EDRRR
);
1715 *quota
-= limit
- boguscnt
- 1;
1720 static void sh_eth_rcv_snd_disable(struct net_device
*ndev
)
1722 /* disable tx and rx */
1723 sh_eth_modify(ndev
, ECMR
, ECMR_RE
| ECMR_TE
, 0);
1726 static void sh_eth_rcv_snd_enable(struct net_device
*ndev
)
1728 /* enable tx and rx */
1729 sh_eth_modify(ndev
, ECMR
, ECMR_RE
| ECMR_TE
, ECMR_RE
| ECMR_TE
);
1732 /* E-MAC interrupt handler */
1733 static void sh_eth_emac_interrupt(struct net_device
*ndev
)
1735 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1739 felic_stat
= sh_eth_read(ndev
, ECSR
) & sh_eth_read(ndev
, ECSIPR
);
1740 sh_eth_write(ndev
, felic_stat
, ECSR
); /* clear int */
1741 if (felic_stat
& ECSR_ICD
)
1742 ndev
->stats
.tx_carrier_errors
++;
1743 if (felic_stat
& ECSR_MPD
)
1744 pm_wakeup_event(&mdp
->pdev
->dev
, 0);
1745 if (felic_stat
& ECSR_LCHNG
) {
1747 if (mdp
->cd
->no_psr
|| mdp
->no_ether_link
)
1749 link_stat
= sh_eth_read(ndev
, PSR
);
1750 if (mdp
->ether_link_active_low
)
1751 link_stat
= ~link_stat
;
1752 if (!(link_stat
& PSR_LMON
)) {
1753 sh_eth_rcv_snd_disable(ndev
);
1756 sh_eth_modify(ndev
, EESIPR
, EESIPR_ECIIP
, 0);
1758 sh_eth_modify(ndev
, ECSR
, 0, 0);
1759 sh_eth_modify(ndev
, EESIPR
, EESIPR_ECIIP
, EESIPR_ECIIP
);
1760 /* enable tx and rx */
1761 sh_eth_rcv_snd_enable(ndev
);
1766 /* error control function */
1767 static void sh_eth_error(struct net_device
*ndev
, u32 intr_status
)
1769 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1772 if (intr_status
& EESR_TWB
) {
1773 /* Unused write back interrupt */
1774 if (intr_status
& EESR_TABT
) { /* Transmit Abort int */
1775 ndev
->stats
.tx_aborted_errors
++;
1776 netif_err(mdp
, tx_err
, ndev
, "Transmit Abort\n");
1780 if (intr_status
& EESR_RABT
) {
1781 /* Receive Abort int */
1782 if (intr_status
& EESR_RFRMER
) {
1783 /* Receive Frame Overflow int */
1784 ndev
->stats
.rx_frame_errors
++;
1788 if (intr_status
& EESR_TDE
) {
1789 /* Transmit Descriptor Empty int */
1790 ndev
->stats
.tx_fifo_errors
++;
1791 netif_err(mdp
, tx_err
, ndev
, "Transmit Descriptor Empty\n");
1794 if (intr_status
& EESR_TFE
) {
1795 /* FIFO under flow */
1796 ndev
->stats
.tx_fifo_errors
++;
1797 netif_err(mdp
, tx_err
, ndev
, "Transmit FIFO Under flow\n");
1800 if (intr_status
& EESR_RDE
) {
1801 /* Receive Descriptor Empty int */
1802 ndev
->stats
.rx_over_errors
++;
1805 if (intr_status
& EESR_RFE
) {
1806 /* Receive FIFO Overflow int */
1807 ndev
->stats
.rx_fifo_errors
++;
1810 if (!mdp
->cd
->no_ade
&& (intr_status
& EESR_ADE
)) {
1812 ndev
->stats
.tx_fifo_errors
++;
1813 netif_err(mdp
, tx_err
, ndev
, "Address Error\n");
1816 mask
= EESR_TWB
| EESR_TABT
| EESR_ADE
| EESR_TDE
| EESR_TFE
;
1817 if (mdp
->cd
->no_ade
)
1819 if (intr_status
& mask
) {
1821 u32 edtrr
= sh_eth_read(ndev
, EDTRR
);
1824 netdev_err(ndev
, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1825 intr_status
, mdp
->cur_tx
, mdp
->dirty_tx
,
1826 (u32
)ndev
->state
, edtrr
);
1827 /* dirty buffer free */
1828 sh_eth_tx_free(ndev
, true);
1831 if (edtrr
^ mdp
->cd
->edtrr_trns
) {
1833 sh_eth_write(ndev
, mdp
->cd
->edtrr_trns
, EDTRR
);
1836 netif_wake_queue(ndev
);
1840 static irqreturn_t
sh_eth_interrupt(int irq
, void *netdev
)
1842 struct net_device
*ndev
= netdev
;
1843 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1844 struct sh_eth_cpu_data
*cd
= mdp
->cd
;
1845 irqreturn_t ret
= IRQ_NONE
;
1846 u32 intr_status
, intr_enable
;
1848 spin_lock(&mdp
->lock
);
1850 /* Get interrupt status */
1851 intr_status
= sh_eth_read(ndev
, EESR
);
1852 /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1853 * enabled since it's the one that comes thru regardless of the mask,
1854 * and we need to fully handle it in sh_eth_emac_interrupt() in order
1855 * to quench it as it doesn't get cleared by just writing 1 to the ECI
1858 intr_enable
= sh_eth_read(ndev
, EESIPR
);
1859 intr_status
&= intr_enable
| EESIPR_ECIIP
;
1860 if (intr_status
& (EESR_RX_CHECK
| cd
->tx_check
| EESR_ECI
|
1861 cd
->eesr_err_check
))
1866 if (unlikely(!mdp
->irq_enabled
)) {
1867 sh_eth_write(ndev
, 0, EESIPR
);
1871 if (intr_status
& EESR_RX_CHECK
) {
1872 if (napi_schedule_prep(&mdp
->napi
)) {
1873 /* Mask Rx interrupts */
1874 sh_eth_write(ndev
, intr_enable
& ~EESR_RX_CHECK
,
1876 __napi_schedule(&mdp
->napi
);
1879 "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1880 intr_status
, intr_enable
);
1885 if (intr_status
& cd
->tx_check
) {
1886 /* Clear Tx interrupts */
1887 sh_eth_write(ndev
, intr_status
& cd
->tx_check
, EESR
);
1889 sh_eth_tx_free(ndev
, true);
1890 netif_wake_queue(ndev
);
1893 /* E-MAC interrupt */
1894 if (intr_status
& EESR_ECI
)
1895 sh_eth_emac_interrupt(ndev
);
1897 if (intr_status
& cd
->eesr_err_check
) {
1898 /* Clear error interrupts */
1899 sh_eth_write(ndev
, intr_status
& cd
->eesr_err_check
, EESR
);
1901 sh_eth_error(ndev
, intr_status
);
1905 spin_unlock(&mdp
->lock
);
1910 static int sh_eth_poll(struct napi_struct
*napi
, int budget
)
1912 struct sh_eth_private
*mdp
= container_of(napi
, struct sh_eth_private
,
1914 struct net_device
*ndev
= napi
->dev
;
1919 intr_status
= sh_eth_read(ndev
, EESR
);
1920 if (!(intr_status
& EESR_RX_CHECK
))
1922 /* Clear Rx interrupts */
1923 sh_eth_write(ndev
, intr_status
& EESR_RX_CHECK
, EESR
);
1925 if (sh_eth_rx(ndev
, intr_status
, "a
))
1929 napi_complete(napi
);
1931 /* Reenable Rx interrupts */
1932 if (mdp
->irq_enabled
)
1933 sh_eth_write(ndev
, mdp
->cd
->eesipr_value
, EESIPR
);
1935 return budget
- quota
;
1938 /* PHY state control function */
1939 static void sh_eth_adjust_link(struct net_device
*ndev
)
1941 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1942 struct phy_device
*phydev
= ndev
->phydev
;
1943 unsigned long flags
;
1946 spin_lock_irqsave(&mdp
->lock
, flags
);
1948 /* Disable TX and RX right over here, if E-MAC change is ignored */
1949 if (mdp
->cd
->no_psr
|| mdp
->no_ether_link
)
1950 sh_eth_rcv_snd_disable(ndev
);
1953 if (phydev
->duplex
!= mdp
->duplex
) {
1955 mdp
->duplex
= phydev
->duplex
;
1956 if (mdp
->cd
->set_duplex
)
1957 mdp
->cd
->set_duplex(ndev
);
1960 if (phydev
->speed
!= mdp
->speed
) {
1962 mdp
->speed
= phydev
->speed
;
1963 if (mdp
->cd
->set_rate
)
1964 mdp
->cd
->set_rate(ndev
);
1967 sh_eth_modify(ndev
, ECMR
, ECMR_TXF
, 0);
1969 mdp
->link
= phydev
->link
;
1971 } else if (mdp
->link
) {
1978 /* Enable TX and RX right over here, if E-MAC change is ignored */
1979 if ((mdp
->cd
->no_psr
|| mdp
->no_ether_link
) && phydev
->link
)
1980 sh_eth_rcv_snd_enable(ndev
);
1982 spin_unlock_irqrestore(&mdp
->lock
, flags
);
1984 if (new_state
&& netif_msg_link(mdp
))
1985 phy_print_status(phydev
);
1988 /* PHY init function */
1989 static int sh_eth_phy_init(struct net_device
*ndev
)
1991 struct device_node
*np
= ndev
->dev
.parent
->of_node
;
1992 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1993 struct phy_device
*phydev
;
1999 /* Try connect to PHY */
2001 struct device_node
*pn
;
2003 pn
= of_parse_phandle(np
, "phy-handle", 0);
2004 phydev
= of_phy_connect(ndev
, pn
,
2005 sh_eth_adjust_link
, 0,
2006 mdp
->phy_interface
);
2010 phydev
= ERR_PTR(-ENOENT
);
2012 char phy_id
[MII_BUS_ID_SIZE
+ 3];
2014 snprintf(phy_id
, sizeof(phy_id
), PHY_ID_FMT
,
2015 mdp
->mii_bus
->id
, mdp
->phy_id
);
2017 phydev
= phy_connect(ndev
, phy_id
, sh_eth_adjust_link
,
2018 mdp
->phy_interface
);
2021 if (IS_ERR(phydev
)) {
2022 netdev_err(ndev
, "failed to connect PHY\n");
2023 return PTR_ERR(phydev
);
2026 /* mask with MAC supported features */
2027 if (mdp
->cd
->register_type
!= SH_ETH_REG_GIGABIT
)
2028 phy_set_max_speed(phydev
, SPEED_100
);
2030 phy_attached_info(phydev
);
2035 /* PHY control start function */
2036 static int sh_eth_phy_start(struct net_device
*ndev
)
2040 ret
= sh_eth_phy_init(ndev
);
2044 phy_start(ndev
->phydev
);
2049 /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
2050 * version must be bumped as well. Just adding registers up to that
2051 * limit is fine, as long as the existing register indices don't
2054 #define SH_ETH_REG_DUMP_VERSION 1
2055 #define SH_ETH_REG_DUMP_MAX_REGS 256
2057 static size_t __sh_eth_get_regs(struct net_device
*ndev
, u32
*buf
)
2059 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2060 struct sh_eth_cpu_data
*cd
= mdp
->cd
;
2064 BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET
> SH_ETH_REG_DUMP_MAX_REGS
);
2066 /* Dump starts with a bitmap that tells ethtool which
2067 * registers are defined for this chip.
2069 len
= DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS
, 32);
2077 /* Add a register to the dump, if it has a defined offset.
2078 * This automatically skips most undefined registers, but for
2079 * some it is also necessary to check a capability flag in
2080 * struct sh_eth_cpu_data.
2082 #define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
2083 #define add_reg_from(reg, read_expr) do { \
2084 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \
2086 mark_reg_valid(reg); \
2087 *buf++ = read_expr; \
2092 #define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
2093 #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
2149 if (!cd
->no_tx_cntrs
) {
2172 add_tsu_reg(TSU_CTRST
);
2173 if (cd
->dual_port
) {
2174 add_tsu_reg(TSU_FWEN0
);
2175 add_tsu_reg(TSU_FWEN1
);
2176 add_tsu_reg(TSU_FCM
);
2177 add_tsu_reg(TSU_BSYSL0
);
2178 add_tsu_reg(TSU_BSYSL1
);
2179 add_tsu_reg(TSU_PRISL0
);
2180 add_tsu_reg(TSU_PRISL1
);
2181 add_tsu_reg(TSU_FWSL0
);
2182 add_tsu_reg(TSU_FWSL1
);
2184 add_tsu_reg(TSU_FWSLC
);
2185 if (cd
->dual_port
) {
2186 add_tsu_reg(TSU_QTAGM0
);
2187 add_tsu_reg(TSU_QTAGM1
);
2188 add_tsu_reg(TSU_FWSR
);
2189 add_tsu_reg(TSU_FWINMK
);
2190 add_tsu_reg(TSU_ADQT0
);
2191 add_tsu_reg(TSU_ADQT1
);
2192 add_tsu_reg(TSU_VTAG0
);
2193 add_tsu_reg(TSU_VTAG1
);
2195 add_tsu_reg(TSU_ADSBSY
);
2196 add_tsu_reg(TSU_TEN
);
2197 add_tsu_reg(TSU_POST1
);
2198 add_tsu_reg(TSU_POST2
);
2199 add_tsu_reg(TSU_POST3
);
2200 add_tsu_reg(TSU_POST4
);
2201 /* This is the start of a table, not just a single register. */
2205 mark_reg_valid(TSU_ADRH0
);
2206 for (i
= 0; i
< SH_ETH_TSU_CAM_ENTRIES
* 2; i
++)
2207 *buf
++ = ioread32(mdp
->tsu_addr
+
2208 mdp
->reg_offset
[TSU_ADRH0
] +
2211 len
+= SH_ETH_TSU_CAM_ENTRIES
* 2;
2214 #undef mark_reg_valid
2222 static int sh_eth_get_regs_len(struct net_device
*ndev
)
2224 return __sh_eth_get_regs(ndev
, NULL
);
2227 static void sh_eth_get_regs(struct net_device
*ndev
, struct ethtool_regs
*regs
,
2230 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2232 regs
->version
= SH_ETH_REG_DUMP_VERSION
;
2234 pm_runtime_get_sync(&mdp
->pdev
->dev
);
2235 __sh_eth_get_regs(ndev
, buf
);
2236 pm_runtime_put_sync(&mdp
->pdev
->dev
);
2239 static u32
sh_eth_get_msglevel(struct net_device
*ndev
)
2241 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2242 return mdp
->msg_enable
;
2245 static void sh_eth_set_msglevel(struct net_device
*ndev
, u32 value
)
2247 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2248 mdp
->msg_enable
= value
;
2251 static const char sh_eth_gstrings_stats
[][ETH_GSTRING_LEN
] = {
2252 "rx_current", "tx_current",
2253 "rx_dirty", "tx_dirty",
2255 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
2257 static int sh_eth_get_sset_count(struct net_device
*netdev
, int sset
)
2261 return SH_ETH_STATS_LEN
;
2267 static void sh_eth_get_ethtool_stats(struct net_device
*ndev
,
2268 struct ethtool_stats
*stats
, u64
*data
)
2270 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2273 /* device-specific stats */
2274 data
[i
++] = mdp
->cur_rx
;
2275 data
[i
++] = mdp
->cur_tx
;
2276 data
[i
++] = mdp
->dirty_rx
;
2277 data
[i
++] = mdp
->dirty_tx
;
2280 static void sh_eth_get_strings(struct net_device
*ndev
, u32 stringset
, u8
*data
)
2282 switch (stringset
) {
2284 memcpy(data
, sh_eth_gstrings_stats
,
2285 sizeof(sh_eth_gstrings_stats
));
2290 static void sh_eth_get_ringparam(struct net_device
*ndev
,
2291 struct ethtool_ringparam
*ring
,
2292 struct kernel_ethtool_ringparam
*kernel_ring
,
2293 struct netlink_ext_ack
*extack
)
2295 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2297 ring
->rx_max_pending
= RX_RING_MAX
;
2298 ring
->tx_max_pending
= TX_RING_MAX
;
2299 ring
->rx_pending
= mdp
->num_rx_ring
;
2300 ring
->tx_pending
= mdp
->num_tx_ring
;
2303 static int sh_eth_set_ringparam(struct net_device
*ndev
,
2304 struct ethtool_ringparam
*ring
,
2305 struct kernel_ethtool_ringparam
*kernel_ring
,
2306 struct netlink_ext_ack
*extack
)
2308 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2311 if (ring
->tx_pending
> TX_RING_MAX
||
2312 ring
->rx_pending
> RX_RING_MAX
||
2313 ring
->tx_pending
< TX_RING_MIN
||
2314 ring
->rx_pending
< RX_RING_MIN
)
2316 if (ring
->rx_mini_pending
|| ring
->rx_jumbo_pending
)
2319 if (netif_running(ndev
)) {
2320 netif_device_detach(ndev
);
2321 netif_tx_disable(ndev
);
2323 /* Serialise with the interrupt handler and NAPI, then
2324 * disable interrupts. We have to clear the
2325 * irq_enabled flag first to ensure that interrupts
2326 * won't be re-enabled.
2328 mdp
->irq_enabled
= false;
2329 synchronize_irq(ndev
->irq
);
2330 napi_synchronize(&mdp
->napi
);
2331 sh_eth_write(ndev
, 0x0000, EESIPR
);
2333 sh_eth_dev_exit(ndev
);
2335 /* Free all the skbuffs in the Rx queue and the DMA buffers. */
2336 sh_eth_ring_free(ndev
);
2339 /* Set new parameters */
2340 mdp
->num_rx_ring
= ring
->rx_pending
;
2341 mdp
->num_tx_ring
= ring
->tx_pending
;
2343 if (netif_running(ndev
)) {
2344 ret
= sh_eth_ring_init(ndev
);
2346 netdev_err(ndev
, "%s: sh_eth_ring_init failed.\n",
2350 ret
= sh_eth_dev_init(ndev
);
2352 netdev_err(ndev
, "%s: sh_eth_dev_init failed.\n",
2357 netif_device_attach(ndev
);
2363 static void sh_eth_get_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
2365 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2370 if (mdp
->cd
->magic
) {
2371 wol
->supported
= WAKE_MAGIC
;
2372 wol
->wolopts
= mdp
->wol_enabled
? WAKE_MAGIC
: 0;
2376 static int sh_eth_set_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
2378 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2380 if (!mdp
->cd
->magic
|| wol
->wolopts
& ~WAKE_MAGIC
)
2383 mdp
->wol_enabled
= !!(wol
->wolopts
& WAKE_MAGIC
);
2385 device_set_wakeup_enable(&mdp
->pdev
->dev
, mdp
->wol_enabled
);
2390 static const struct ethtool_ops sh_eth_ethtool_ops
= {
2391 .get_regs_len
= sh_eth_get_regs_len
,
2392 .get_regs
= sh_eth_get_regs
,
2393 .nway_reset
= phy_ethtool_nway_reset
,
2394 .get_msglevel
= sh_eth_get_msglevel
,
2395 .set_msglevel
= sh_eth_set_msglevel
,
2396 .get_link
= ethtool_op_get_link
,
2397 .get_strings
= sh_eth_get_strings
,
2398 .get_ethtool_stats
= sh_eth_get_ethtool_stats
,
2399 .get_sset_count
= sh_eth_get_sset_count
,
2400 .get_ringparam
= sh_eth_get_ringparam
,
2401 .set_ringparam
= sh_eth_set_ringparam
,
2402 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
2403 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
2404 .get_wol
= sh_eth_get_wol
,
2405 .set_wol
= sh_eth_set_wol
,
2408 /* network device open function */
2409 static int sh_eth_open(struct net_device
*ndev
)
2411 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2414 pm_runtime_get_sync(&mdp
->pdev
->dev
);
2416 napi_enable(&mdp
->napi
);
2418 ret
= request_irq(ndev
->irq
, sh_eth_interrupt
,
2419 mdp
->cd
->irq_flags
, ndev
->name
, ndev
);
2421 netdev_err(ndev
, "Can not assign IRQ number\n");
2425 /* Descriptor set */
2426 ret
= sh_eth_ring_init(ndev
);
2431 ret
= sh_eth_dev_init(ndev
);
2435 /* PHY control start*/
2436 ret
= sh_eth_phy_start(ndev
);
2440 netif_start_queue(ndev
);
2447 free_irq(ndev
->irq
, ndev
);
2449 napi_disable(&mdp
->napi
);
2450 pm_runtime_put_sync(&mdp
->pdev
->dev
);
2454 /* Timeout function */
2455 static void sh_eth_tx_timeout(struct net_device
*ndev
, unsigned int txqueue
)
2457 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2458 struct sh_eth_rxdesc
*rxdesc
;
2461 netif_stop_queue(ndev
);
2463 netif_err(mdp
, timer
, ndev
,
2464 "transmit timed out, status %8.8x, resetting...\n",
2465 sh_eth_read(ndev
, EESR
));
2467 /* tx_errors count up */
2468 ndev
->stats
.tx_errors
++;
2470 /* Free all the skbuffs in the Rx queue. */
2471 for (i
= 0; i
< mdp
->num_rx_ring
; i
++) {
2472 rxdesc
= &mdp
->rx_ring
[i
];
2473 rxdesc
->status
= cpu_to_le32(0);
2474 rxdesc
->addr
= cpu_to_le32(0xBADF00D0);
2475 dev_kfree_skb(mdp
->rx_skbuff
[i
]);
2476 mdp
->rx_skbuff
[i
] = NULL
;
2478 for (i
= 0; i
< mdp
->num_tx_ring
; i
++) {
2479 dev_kfree_skb(mdp
->tx_skbuff
[i
]);
2480 mdp
->tx_skbuff
[i
] = NULL
;
2484 sh_eth_dev_init(ndev
);
2486 netif_start_queue(ndev
);
2489 /* Packet transmit function */
2490 static netdev_tx_t
sh_eth_start_xmit(struct sk_buff
*skb
,
2491 struct net_device
*ndev
)
2493 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2494 struct sh_eth_txdesc
*txdesc
;
2495 dma_addr_t dma_addr
;
2497 unsigned long flags
;
2499 spin_lock_irqsave(&mdp
->lock
, flags
);
2500 if ((mdp
->cur_tx
- mdp
->dirty_tx
) >= (mdp
->num_tx_ring
- 4)) {
2501 if (!sh_eth_tx_free(ndev
, true)) {
2502 netif_warn(mdp
, tx_queued
, ndev
, "TxFD exhausted.\n");
2503 netif_stop_queue(ndev
);
2504 spin_unlock_irqrestore(&mdp
->lock
, flags
);
2505 return NETDEV_TX_BUSY
;
2508 spin_unlock_irqrestore(&mdp
->lock
, flags
);
2510 if (skb_put_padto(skb
, ETH_ZLEN
))
2511 return NETDEV_TX_OK
;
2513 entry
= mdp
->cur_tx
% mdp
->num_tx_ring
;
2514 mdp
->tx_skbuff
[entry
] = skb
;
2515 txdesc
= &mdp
->tx_ring
[entry
];
2517 if (!mdp
->cd
->hw_swap
)
2518 sh_eth_soft_swap(PTR_ALIGN(skb
->data
, 4), skb
->len
+ 2);
2519 dma_addr
= dma_map_single(&mdp
->pdev
->dev
, skb
->data
, skb
->len
,
2521 if (dma_mapping_error(&mdp
->pdev
->dev
, dma_addr
)) {
2523 return NETDEV_TX_OK
;
2525 txdesc
->addr
= cpu_to_le32(dma_addr
);
2526 txdesc
->len
= cpu_to_le32(skb
->len
<< 16);
2528 dma_wmb(); /* TACT bit must be set after all the above writes */
2529 if (entry
>= mdp
->num_tx_ring
- 1)
2530 txdesc
->status
|= cpu_to_le32(TD_TACT
| TD_TDLE
);
2532 txdesc
->status
|= cpu_to_le32(TD_TACT
);
2534 wmb(); /* cur_tx must be incremented after TACT bit was set */
2537 if (!(sh_eth_read(ndev
, EDTRR
) & mdp
->cd
->edtrr_trns
))
2538 sh_eth_write(ndev
, mdp
->cd
->edtrr_trns
, EDTRR
);
2540 return NETDEV_TX_OK
;
2543 /* The statistics registers have write-clear behaviour, which means we
2544 * will lose any increment between the read and write. We mitigate
2545 * this by only clearing when we read a non-zero value, so we will
2546 * never falsely report a total of zero.
2549 sh_eth_update_stat(struct net_device
*ndev
, unsigned long *stat
, int reg
)
2551 u32 delta
= sh_eth_read(ndev
, reg
);
2555 sh_eth_write(ndev
, 0, reg
);
2559 static struct net_device_stats
*sh_eth_get_stats(struct net_device
*ndev
)
2561 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2563 if (mdp
->cd
->no_tx_cntrs
)
2564 return &ndev
->stats
;
2566 if (!mdp
->is_opened
)
2567 return &ndev
->stats
;
2569 sh_eth_update_stat(ndev
, &ndev
->stats
.tx_dropped
, TROCR
);
2570 sh_eth_update_stat(ndev
, &ndev
->stats
.collisions
, CDCR
);
2571 sh_eth_update_stat(ndev
, &ndev
->stats
.tx_carrier_errors
, LCCR
);
2573 if (mdp
->cd
->cexcr
) {
2574 sh_eth_update_stat(ndev
, &ndev
->stats
.tx_carrier_errors
,
2576 sh_eth_update_stat(ndev
, &ndev
->stats
.tx_carrier_errors
,
2579 sh_eth_update_stat(ndev
, &ndev
->stats
.tx_carrier_errors
,
2583 return &ndev
->stats
;
2586 /* device close function */
2587 static int sh_eth_close(struct net_device
*ndev
)
2589 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2591 netif_stop_queue(ndev
);
2593 /* Serialise with the interrupt handler and NAPI, then disable
2594 * interrupts. We have to clear the irq_enabled flag first to
2595 * ensure that interrupts won't be re-enabled.
2597 mdp
->irq_enabled
= false;
2598 synchronize_irq(ndev
->irq
);
2599 napi_disable(&mdp
->napi
);
2600 sh_eth_write(ndev
, 0x0000, EESIPR
);
2602 sh_eth_dev_exit(ndev
);
2604 /* PHY Disconnect */
2606 phy_stop(ndev
->phydev
);
2607 phy_disconnect(ndev
->phydev
);
2610 free_irq(ndev
->irq
, ndev
);
2612 /* Free all the skbuffs in the Rx queue and the DMA buffer. */
2613 sh_eth_ring_free(ndev
);
2617 pm_runtime_put(&mdp
->pdev
->dev
);
2622 static int sh_eth_change_mtu(struct net_device
*ndev
, int new_mtu
)
2624 if (netif_running(ndev
))
2627 WRITE_ONCE(ndev
->mtu
, new_mtu
);
2628 netdev_update_features(ndev
);
2633 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2634 static u32
sh_eth_tsu_get_post_mask(int entry
)
2636 return 0x0f << (28 - ((entry
% 8) * 4));
2639 static u32
sh_eth_tsu_get_post_bit(struct sh_eth_private
*mdp
, int entry
)
2641 return (0x08 >> (mdp
->port
<< 1)) << (28 - ((entry
% 8) * 4));
2644 static void sh_eth_tsu_enable_cam_entry_post(struct net_device
*ndev
,
2647 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2648 int reg
= TSU_POST1
+ entry
/ 8;
2651 tmp
= sh_eth_tsu_read(mdp
, reg
);
2652 sh_eth_tsu_write(mdp
, tmp
| sh_eth_tsu_get_post_bit(mdp
, entry
), reg
);
2655 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device
*ndev
,
2658 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2659 int reg
= TSU_POST1
+ entry
/ 8;
2660 u32 post_mask
, ref_mask
, tmp
;
2662 post_mask
= sh_eth_tsu_get_post_mask(entry
);
2663 ref_mask
= sh_eth_tsu_get_post_bit(mdp
, entry
) & ~post_mask
;
2665 tmp
= sh_eth_tsu_read(mdp
, reg
);
2666 sh_eth_tsu_write(mdp
, tmp
& ~post_mask
, reg
);
2668 /* If other port enables, the function returns "true" */
2669 return tmp
& ref_mask
;
2672 static int sh_eth_tsu_busy(struct net_device
*ndev
)
2674 int timeout
= SH_ETH_TSU_TIMEOUT_MS
* 100;
2675 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2677 while ((sh_eth_tsu_read(mdp
, TSU_ADSBSY
) & TSU_ADSBSY_0
)) {
2681 netdev_err(ndev
, "%s: timeout\n", __func__
);
2689 static int sh_eth_tsu_write_entry(struct net_device
*ndev
, u16 offset
,
2692 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2695 val
= addr
[0] << 24 | addr
[1] << 16 | addr
[2] << 8 | addr
[3];
2696 iowrite32(val
, mdp
->tsu_addr
+ offset
);
2697 if (sh_eth_tsu_busy(ndev
) < 0)
2700 val
= addr
[4] << 8 | addr
[5];
2701 iowrite32(val
, mdp
->tsu_addr
+ offset
+ 4);
2702 if (sh_eth_tsu_busy(ndev
) < 0)
2708 static void sh_eth_tsu_read_entry(struct net_device
*ndev
, u16 offset
, u8
*addr
)
2710 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2713 val
= ioread32(mdp
->tsu_addr
+ offset
);
2714 addr
[0] = (val
>> 24) & 0xff;
2715 addr
[1] = (val
>> 16) & 0xff;
2716 addr
[2] = (val
>> 8) & 0xff;
2717 addr
[3] = val
& 0xff;
2718 val
= ioread32(mdp
->tsu_addr
+ offset
+ 4);
2719 addr
[4] = (val
>> 8) & 0xff;
2720 addr
[5] = val
& 0xff;
2724 static int sh_eth_tsu_find_entry(struct net_device
*ndev
, const u8
*addr
)
2726 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2727 u16 reg_offset
= sh_eth_tsu_get_offset(mdp
, TSU_ADRH0
);
2729 u8 c_addr
[ETH_ALEN
];
2731 for (i
= 0; i
< SH_ETH_TSU_CAM_ENTRIES
; i
++, reg_offset
+= 8) {
2732 sh_eth_tsu_read_entry(ndev
, reg_offset
, c_addr
);
2733 if (ether_addr_equal(addr
, c_addr
))
2740 static int sh_eth_tsu_find_empty(struct net_device
*ndev
)
2745 memset(blank
, 0, sizeof(blank
));
2746 entry
= sh_eth_tsu_find_entry(ndev
, blank
);
2747 return (entry
< 0) ? -ENOMEM
: entry
;
2750 static int sh_eth_tsu_disable_cam_entry_table(struct net_device
*ndev
,
2753 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2754 u16 reg_offset
= sh_eth_tsu_get_offset(mdp
, TSU_ADRH0
);
2758 sh_eth_tsu_write(mdp
, sh_eth_tsu_read(mdp
, TSU_TEN
) &
2759 ~(1 << (31 - entry
)), TSU_TEN
);
2761 memset(blank
, 0, sizeof(blank
));
2762 ret
= sh_eth_tsu_write_entry(ndev
, reg_offset
+ entry
* 8, blank
);
2768 static int sh_eth_tsu_add_entry(struct net_device
*ndev
, const u8
*addr
)
2770 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2771 u16 reg_offset
= sh_eth_tsu_get_offset(mdp
, TSU_ADRH0
);
2777 i
= sh_eth_tsu_find_entry(ndev
, addr
);
2779 /* No entry found, create one */
2780 i
= sh_eth_tsu_find_empty(ndev
);
2783 ret
= sh_eth_tsu_write_entry(ndev
, reg_offset
+ i
* 8, addr
);
2787 /* Enable the entry */
2788 sh_eth_tsu_write(mdp
, sh_eth_tsu_read(mdp
, TSU_TEN
) |
2789 (1 << (31 - i
)), TSU_TEN
);
2792 /* Entry found or created, enable POST */
2793 sh_eth_tsu_enable_cam_entry_post(ndev
, i
);
2798 static int sh_eth_tsu_del_entry(struct net_device
*ndev
, const u8
*addr
)
2800 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2806 i
= sh_eth_tsu_find_entry(ndev
, addr
);
2809 if (sh_eth_tsu_disable_cam_entry_post(ndev
, i
))
2812 /* Disable the entry if both ports was disabled */
2813 ret
= sh_eth_tsu_disable_cam_entry_table(ndev
, i
);
2821 static int sh_eth_tsu_purge_all(struct net_device
*ndev
)
2823 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2829 for (i
= 0; i
< SH_ETH_TSU_CAM_ENTRIES
; i
++) {
2830 if (sh_eth_tsu_disable_cam_entry_post(ndev
, i
))
2833 /* Disable the entry if both ports was disabled */
2834 ret
= sh_eth_tsu_disable_cam_entry_table(ndev
, i
);
2842 static void sh_eth_tsu_purge_mcast(struct net_device
*ndev
)
2844 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2845 u16 reg_offset
= sh_eth_tsu_get_offset(mdp
, TSU_ADRH0
);
2852 for (i
= 0; i
< SH_ETH_TSU_CAM_ENTRIES
; i
++, reg_offset
+= 8) {
2853 sh_eth_tsu_read_entry(ndev
, reg_offset
, addr
);
2854 if (is_multicast_ether_addr(addr
))
2855 sh_eth_tsu_del_entry(ndev
, addr
);
2859 /* Update promiscuous flag and multicast filter */
2860 static void sh_eth_set_rx_mode(struct net_device
*ndev
)
2862 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2865 unsigned long flags
;
2867 spin_lock_irqsave(&mdp
->lock
, flags
);
2868 /* Initial condition is MCT = 1, PRM = 0.
2869 * Depending on ndev->flags, set PRM or clear MCT
2871 ecmr_bits
= sh_eth_read(ndev
, ECMR
) & ~ECMR_PRM
;
2873 ecmr_bits
|= ECMR_MCT
;
2875 if (!(ndev
->flags
& IFF_MULTICAST
)) {
2876 sh_eth_tsu_purge_mcast(ndev
);
2879 if (ndev
->flags
& IFF_ALLMULTI
) {
2880 sh_eth_tsu_purge_mcast(ndev
);
2881 ecmr_bits
&= ~ECMR_MCT
;
2885 if (ndev
->flags
& IFF_PROMISC
) {
2886 sh_eth_tsu_purge_all(ndev
);
2887 ecmr_bits
= (ecmr_bits
& ~ECMR_MCT
) | ECMR_PRM
;
2888 } else if (mdp
->cd
->tsu
) {
2889 struct netdev_hw_addr
*ha
;
2890 netdev_for_each_mc_addr(ha
, ndev
) {
2891 if (mcast_all
&& is_multicast_ether_addr(ha
->addr
))
2894 if (sh_eth_tsu_add_entry(ndev
, ha
->addr
) < 0) {
2896 sh_eth_tsu_purge_mcast(ndev
);
2897 ecmr_bits
&= ~ECMR_MCT
;
2904 /* update the ethernet mode */
2905 sh_eth_write(ndev
, ecmr_bits
, ECMR
);
2907 spin_unlock_irqrestore(&mdp
->lock
, flags
);
2910 static void sh_eth_set_rx_csum(struct net_device
*ndev
, bool enable
)
2912 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2913 unsigned long flags
;
2915 spin_lock_irqsave(&mdp
->lock
, flags
);
2917 /* Disable TX and RX */
2918 sh_eth_rcv_snd_disable(ndev
);
2920 /* Modify RX Checksum setting */
2921 sh_eth_modify(ndev
, ECMR
, ECMR_RCSC
, enable
? ECMR_RCSC
: 0);
2923 /* Enable TX and RX */
2924 sh_eth_rcv_snd_enable(ndev
);
2926 spin_unlock_irqrestore(&mdp
->lock
, flags
);
2929 static int sh_eth_set_features(struct net_device
*ndev
,
2930 netdev_features_t features
)
2932 netdev_features_t changed
= ndev
->features
^ features
;
2933 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2935 if (changed
& NETIF_F_RXCSUM
&& mdp
->cd
->rx_csum
)
2936 sh_eth_set_rx_csum(ndev
, features
& NETIF_F_RXCSUM
);
2938 ndev
->features
= features
;
2943 static int sh_eth_get_vtag_index(struct sh_eth_private
*mdp
)
2951 static int sh_eth_vlan_rx_add_vid(struct net_device
*ndev
,
2952 __be16 proto
, u16 vid
)
2954 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2955 int vtag_reg_index
= sh_eth_get_vtag_index(mdp
);
2957 if (unlikely(!mdp
->cd
->tsu
))
2960 /* No filtering if vid = 0 */
2964 mdp
->vlan_num_ids
++;
2966 /* The controller has one VLAN tag HW filter. So, if the filter is
2967 * already enabled, the driver disables it and the filte
2969 if (mdp
->vlan_num_ids
> 1) {
2970 /* disable VLAN filter */
2971 sh_eth_tsu_write(mdp
, 0, vtag_reg_index
);
2975 sh_eth_tsu_write(mdp
, TSU_VTAG_ENABLE
| (vid
& TSU_VTAG_VID_MASK
),
2981 static int sh_eth_vlan_rx_kill_vid(struct net_device
*ndev
,
2982 __be16 proto
, u16 vid
)
2984 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2985 int vtag_reg_index
= sh_eth_get_vtag_index(mdp
);
2987 if (unlikely(!mdp
->cd
->tsu
))
2990 /* No filtering if vid = 0 */
2994 mdp
->vlan_num_ids
--;
2995 sh_eth_tsu_write(mdp
, 0, vtag_reg_index
);
3000 /* SuperH's TSU register init function */
3001 static void sh_eth_tsu_init(struct sh_eth_private
*mdp
)
3003 if (!mdp
->cd
->dual_port
) {
3004 sh_eth_tsu_write(mdp
, 0, TSU_TEN
); /* Disable all CAM entry */
3005 sh_eth_tsu_write(mdp
, TSU_FWSLC_POSTENU
| TSU_FWSLC_POSTENL
,
3006 TSU_FWSLC
); /* Enable POST registers */
3010 sh_eth_tsu_write(mdp
, 0, TSU_FWEN0
); /* Disable forward(0->1) */
3011 sh_eth_tsu_write(mdp
, 0, TSU_FWEN1
); /* Disable forward(1->0) */
3012 sh_eth_tsu_write(mdp
, 0, TSU_FCM
); /* forward fifo 3k-3k */
3013 sh_eth_tsu_write(mdp
, 0xc, TSU_BSYSL0
);
3014 sh_eth_tsu_write(mdp
, 0xc, TSU_BSYSL1
);
3015 sh_eth_tsu_write(mdp
, 0, TSU_PRISL0
);
3016 sh_eth_tsu_write(mdp
, 0, TSU_PRISL1
);
3017 sh_eth_tsu_write(mdp
, 0, TSU_FWSL0
);
3018 sh_eth_tsu_write(mdp
, 0, TSU_FWSL1
);
3019 sh_eth_tsu_write(mdp
, TSU_FWSLC_POSTENU
| TSU_FWSLC_POSTENL
, TSU_FWSLC
);
3020 sh_eth_tsu_write(mdp
, 0, TSU_QTAGM0
); /* Disable QTAG(0->1) */
3021 sh_eth_tsu_write(mdp
, 0, TSU_QTAGM1
); /* Disable QTAG(1->0) */
3022 sh_eth_tsu_write(mdp
, 0, TSU_FWSR
); /* all interrupt status clear */
3023 sh_eth_tsu_write(mdp
, 0, TSU_FWINMK
); /* Disable all interrupt */
3024 sh_eth_tsu_write(mdp
, 0, TSU_TEN
); /* Disable all CAM entry */
3025 sh_eth_tsu_write(mdp
, 0, TSU_POST1
); /* Disable CAM entry [ 0- 7] */
3026 sh_eth_tsu_write(mdp
, 0, TSU_POST2
); /* Disable CAM entry [ 8-15] */
3027 sh_eth_tsu_write(mdp
, 0, TSU_POST3
); /* Disable CAM entry [16-23] */
3028 sh_eth_tsu_write(mdp
, 0, TSU_POST4
); /* Disable CAM entry [24-31] */
3031 /* MDIO bus release function */
3032 static int sh_mdio_release(struct sh_eth_private
*mdp
)
3034 /* unregister mdio bus */
3035 mdiobus_unregister(mdp
->mii_bus
);
3037 /* free bitbang info */
3038 free_mdio_bitbang(mdp
->mii_bus
);
3043 static int sh_mdiobb_read_c22(struct mii_bus
*bus
, int phy
, int reg
)
3047 pm_runtime_get_sync(bus
->parent
);
3048 res
= mdiobb_read_c22(bus
, phy
, reg
);
3049 pm_runtime_put(bus
->parent
);
3054 static int sh_mdiobb_write_c22(struct mii_bus
*bus
, int phy
, int reg
, u16 val
)
3058 pm_runtime_get_sync(bus
->parent
);
3059 res
= mdiobb_write_c22(bus
, phy
, reg
, val
);
3060 pm_runtime_put(bus
->parent
);
3065 static int sh_mdiobb_read_c45(struct mii_bus
*bus
, int phy
, int devad
, int reg
)
3069 pm_runtime_get_sync(bus
->parent
);
3070 res
= mdiobb_read_c45(bus
, phy
, devad
, reg
);
3071 pm_runtime_put(bus
->parent
);
3076 static int sh_mdiobb_write_c45(struct mii_bus
*bus
, int phy
, int devad
,
3081 pm_runtime_get_sync(bus
->parent
);
3082 res
= mdiobb_write_c45(bus
, phy
, devad
, reg
, val
);
3083 pm_runtime_put(bus
->parent
);
3088 /* MDIO bus init function */
3089 static int sh_mdio_init(struct sh_eth_private
*mdp
,
3090 struct sh_eth_plat_data
*pd
)
3093 struct bb_info
*bitbang
;
3094 struct platform_device
*pdev
= mdp
->pdev
;
3095 struct device
*dev
= &mdp
->pdev
->dev
;
3096 struct phy_device
*phydev
;
3097 struct device_node
*pn
;
3099 /* create bit control struct for PHY */
3100 bitbang
= devm_kzalloc(dev
, sizeof(struct bb_info
), GFP_KERNEL
);
3105 bitbang
->addr
= mdp
->addr
+ mdp
->reg_offset
[PIR
];
3106 bitbang
->set_gate
= pd
->set_mdio_gate
;
3107 bitbang
->ctrl
.ops
= &bb_ops
;
3109 /* MII controller setting */
3110 mdp
->mii_bus
= alloc_mdio_bitbang(&bitbang
->ctrl
);
3114 /* Wrap accessors with Runtime PM-aware ops */
3115 mdp
->mii_bus
->read
= sh_mdiobb_read_c22
;
3116 mdp
->mii_bus
->write
= sh_mdiobb_write_c22
;
3117 mdp
->mii_bus
->read_c45
= sh_mdiobb_read_c45
;
3118 mdp
->mii_bus
->write_c45
= sh_mdiobb_write_c45
;
3120 /* Hook up MII support for ethtool */
3121 mdp
->mii_bus
->name
= "sh_mii";
3122 mdp
->mii_bus
->parent
= dev
;
3123 snprintf(mdp
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
3124 pdev
->name
, pdev
->id
);
3126 /* register MDIO bus */
3127 if (pd
->phy_irq
> 0)
3128 mdp
->mii_bus
->irq
[pd
->phy
] = pd
->phy_irq
;
3130 ret
= of_mdiobus_register(mdp
->mii_bus
, dev
->of_node
);
3134 pn
= of_parse_phandle(dev
->of_node
, "phy-handle", 0);
3135 phydev
= of_phy_find_device(pn
);
3137 phydev
->mac_managed_pm
= true;
3138 put_device(&phydev
->mdio
.dev
);
3145 free_mdio_bitbang(mdp
->mii_bus
);
3149 static const u16
*sh_eth_get_register_offset(int register_type
)
3151 const u16
*reg_offset
= NULL
;
3153 switch (register_type
) {
3154 case SH_ETH_REG_GIGABIT
:
3155 reg_offset
= sh_eth_offset_gigabit
;
3157 case SH_ETH_REG_FAST_RCAR
:
3158 reg_offset
= sh_eth_offset_fast_rcar
;
3160 case SH_ETH_REG_FAST_SH4
:
3161 reg_offset
= sh_eth_offset_fast_sh4
;
3163 case SH_ETH_REG_FAST_SH3_SH2
:
3164 reg_offset
= sh_eth_offset_fast_sh3_sh2
;
3171 static const struct net_device_ops sh_eth_netdev_ops
= {
3172 .ndo_open
= sh_eth_open
,
3173 .ndo_stop
= sh_eth_close
,
3174 .ndo_start_xmit
= sh_eth_start_xmit
,
3175 .ndo_get_stats
= sh_eth_get_stats
,
3176 .ndo_set_rx_mode
= sh_eth_set_rx_mode
,
3177 .ndo_tx_timeout
= sh_eth_tx_timeout
,
3178 .ndo_eth_ioctl
= phy_do_ioctl_running
,
3179 .ndo_change_mtu
= sh_eth_change_mtu
,
3180 .ndo_validate_addr
= eth_validate_addr
,
3181 .ndo_set_mac_address
= eth_mac_addr
,
3182 .ndo_set_features
= sh_eth_set_features
,
3185 static const struct net_device_ops sh_eth_netdev_ops_tsu
= {
3186 .ndo_open
= sh_eth_open
,
3187 .ndo_stop
= sh_eth_close
,
3188 .ndo_start_xmit
= sh_eth_start_xmit
,
3189 .ndo_get_stats
= sh_eth_get_stats
,
3190 .ndo_set_rx_mode
= sh_eth_set_rx_mode
,
3191 .ndo_vlan_rx_add_vid
= sh_eth_vlan_rx_add_vid
,
3192 .ndo_vlan_rx_kill_vid
= sh_eth_vlan_rx_kill_vid
,
3193 .ndo_tx_timeout
= sh_eth_tx_timeout
,
3194 .ndo_eth_ioctl
= phy_do_ioctl_running
,
3195 .ndo_change_mtu
= sh_eth_change_mtu
,
3196 .ndo_validate_addr
= eth_validate_addr
,
3197 .ndo_set_mac_address
= eth_mac_addr
,
3198 .ndo_set_features
= sh_eth_set_features
,
3202 static struct sh_eth_plat_data
*sh_eth_parse_dt(struct device
*dev
)
3204 struct device_node
*np
= dev
->of_node
;
3205 struct sh_eth_plat_data
*pdata
;
3206 phy_interface_t interface
;
3209 pdata
= devm_kzalloc(dev
, sizeof(*pdata
), GFP_KERNEL
);
3213 ret
= of_get_phy_mode(np
, &interface
);
3216 pdata
->phy_interface
= interface
;
3218 of_get_mac_address(np
, pdata
->mac_addr
);
3220 pdata
->no_ether_link
=
3221 of_property_read_bool(np
, "renesas,no-ether-link");
3222 pdata
->ether_link_active_low
=
3223 of_property_read_bool(np
, "renesas,ether-link-active-low");
3228 static const struct of_device_id sh_eth_match_table
[] = {
3229 { .compatible
= "renesas,gether-r8a7740", .data
= &r8a7740_data
},
3230 { .compatible
= "renesas,ether-r8a7743", .data
= &rcar_gen2_data
},
3231 { .compatible
= "renesas,ether-r8a7745", .data
= &rcar_gen2_data
},
3232 { .compatible
= "renesas,ether-r8a7778", .data
= &rcar_gen1_data
},
3233 { .compatible
= "renesas,ether-r8a7779", .data
= &rcar_gen1_data
},
3234 { .compatible
= "renesas,ether-r8a7790", .data
= &rcar_gen2_data
},
3235 { .compatible
= "renesas,ether-r8a7791", .data
= &rcar_gen2_data
},
3236 { .compatible
= "renesas,ether-r8a7793", .data
= &rcar_gen2_data
},
3237 { .compatible
= "renesas,ether-r8a7794", .data
= &rcar_gen2_data
},
3238 { .compatible
= "renesas,gether-r8a77980", .data
= &r8a77980_data
},
3239 { .compatible
= "renesas,ether-r7s72100", .data
= &r7s72100_data
},
3240 { .compatible
= "renesas,ether-r7s9210", .data
= &r7s9210_data
},
3241 { .compatible
= "renesas,rcar-gen1-ether", .data
= &rcar_gen1_data
},
3242 { .compatible
= "renesas,rcar-gen2-ether", .data
= &rcar_gen2_data
},
3245 MODULE_DEVICE_TABLE(of
, sh_eth_match_table
);
3247 static inline struct sh_eth_plat_data
*sh_eth_parse_dt(struct device
*dev
)
3253 static int sh_eth_drv_probe(struct platform_device
*pdev
)
3255 struct resource
*res
;
3256 struct sh_eth_plat_data
*pd
= dev_get_platdata(&pdev
->dev
);
3257 const struct platform_device_id
*id
= platform_get_device_id(pdev
);
3258 struct sh_eth_private
*mdp
;
3259 struct net_device
*ndev
;
3262 ndev
= alloc_etherdev(sizeof(struct sh_eth_private
));
3266 pm_runtime_enable(&pdev
->dev
);
3267 pm_runtime_get_sync(&pdev
->dev
);
3269 ret
= platform_get_irq(pdev
, 0);
3274 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
3276 mdp
= netdev_priv(ndev
);
3277 mdp
->num_tx_ring
= TX_RING_SIZE
;
3278 mdp
->num_rx_ring
= RX_RING_SIZE
;
3279 mdp
->addr
= devm_platform_get_and_ioremap_resource(pdev
, 0, &res
);
3280 if (IS_ERR(mdp
->addr
)) {
3281 ret
= PTR_ERR(mdp
->addr
);
3285 ndev
->base_addr
= res
->start
;
3287 spin_lock_init(&mdp
->lock
);
3290 if (pdev
->dev
.of_node
)
3291 pd
= sh_eth_parse_dt(&pdev
->dev
);
3293 dev_err(&pdev
->dev
, "no platform data\n");
3299 mdp
->phy_id
= pd
->phy
;
3300 mdp
->phy_interface
= pd
->phy_interface
;
3301 mdp
->no_ether_link
= pd
->no_ether_link
;
3302 mdp
->ether_link_active_low
= pd
->ether_link_active_low
;
3306 mdp
->cd
= (struct sh_eth_cpu_data
*)id
->driver_data
;
3308 mdp
->cd
= (struct sh_eth_cpu_data
*)of_device_get_match_data(&pdev
->dev
);
3310 mdp
->reg_offset
= sh_eth_get_register_offset(mdp
->cd
->register_type
);
3311 if (!mdp
->reg_offset
) {
3312 dev_err(&pdev
->dev
, "Unknown register type (%d)\n",
3313 mdp
->cd
->register_type
);
3317 sh_eth_set_default_cpu_data(mdp
->cd
);
3319 /* User's manual states max MTU should be 2048 but due to the
3320 * alignment calculations in sh_eth_ring_init() the practical
3321 * MTU is a bit less. Maybe this can be optimized some more.
3323 ndev
->max_mtu
= 2000 - (ETH_HLEN
+ VLAN_HLEN
+ ETH_FCS_LEN
);
3324 ndev
->min_mtu
= ETH_MIN_MTU
;
3326 if (mdp
->cd
->rx_csum
) {
3327 ndev
->features
= NETIF_F_RXCSUM
;
3328 ndev
->hw_features
= NETIF_F_RXCSUM
;
3333 ndev
->netdev_ops
= &sh_eth_netdev_ops_tsu
;
3335 ndev
->netdev_ops
= &sh_eth_netdev_ops
;
3336 ndev
->ethtool_ops
= &sh_eth_ethtool_ops
;
3337 ndev
->watchdog_timeo
= TX_TIMEOUT
;
3339 /* debug message level */
3340 mdp
->msg_enable
= SH_ETH_DEF_MSG_ENABLE
;
3342 /* read and set MAC address */
3343 read_mac_address(ndev
, pd
->mac_addr
);
3344 if (!is_valid_ether_addr(ndev
->dev_addr
)) {
3345 dev_warn(&pdev
->dev
,
3346 "no valid MAC address supplied, using a random one.\n");
3347 eth_hw_addr_random(ndev
);
3351 int port
= pdev
->id
< 0 ? 0 : pdev
->id
% 2;
3352 struct resource
*rtsu
;
3354 rtsu
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
3356 dev_err(&pdev
->dev
, "no TSU resource\n");
3360 /* We can only request the TSU region for the first port
3361 * of the two sharing this TSU for the probe to succeed...
3364 !devm_request_mem_region(&pdev
->dev
, rtsu
->start
,
3365 resource_size(rtsu
),
3366 dev_name(&pdev
->dev
))) {
3367 dev_err(&pdev
->dev
, "can't request TSU resource.\n");
3371 /* ioremap the TSU registers */
3372 mdp
->tsu_addr
= devm_ioremap(&pdev
->dev
, rtsu
->start
,
3373 resource_size(rtsu
));
3374 if (!mdp
->tsu_addr
) {
3375 dev_err(&pdev
->dev
, "TSU region ioremap() failed.\n");
3380 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
3382 /* Need to init only the first port of the two sharing a TSU */
3384 if (mdp
->cd
->chip_reset
)
3385 mdp
->cd
->chip_reset(ndev
);
3387 /* TSU init (Init only)*/
3388 sh_eth_tsu_init(mdp
);
3392 if (mdp
->cd
->rmiimode
)
3393 sh_eth_write(ndev
, 0x1, RMIIMODE
);
3396 ret
= sh_mdio_init(mdp
, pd
);
3398 dev_err_probe(&pdev
->dev
, ret
, "MDIO init failed\n");
3402 netif_napi_add(ndev
, &mdp
->napi
, sh_eth_poll
);
3404 /* network device register */
3405 ret
= register_netdev(ndev
);
3410 device_set_wakeup_capable(&pdev
->dev
, 1);
3412 /* print device information */
3413 netdev_info(ndev
, "Base address at 0x%x, %pM, IRQ %d.\n",
3414 (u32
)ndev
->base_addr
, ndev
->dev_addr
, ndev
->irq
);
3416 pm_runtime_put(&pdev
->dev
);
3417 platform_set_drvdata(pdev
, ndev
);
3422 netif_napi_del(&mdp
->napi
);
3423 sh_mdio_release(mdp
);
3429 pm_runtime_put(&pdev
->dev
);
3430 pm_runtime_disable(&pdev
->dev
);
3434 static void sh_eth_drv_remove(struct platform_device
*pdev
)
3436 struct net_device
*ndev
= platform_get_drvdata(pdev
);
3437 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
3439 unregister_netdev(ndev
);
3440 netif_napi_del(&mdp
->napi
);
3441 sh_mdio_release(mdp
);
3442 pm_runtime_disable(&pdev
->dev
);
3447 #ifdef CONFIG_PM_SLEEP
3448 static int sh_eth_wol_setup(struct net_device
*ndev
)
3450 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
3452 /* Only allow ECI interrupts */
3453 synchronize_irq(ndev
->irq
);
3454 napi_disable(&mdp
->napi
);
3455 sh_eth_write(ndev
, EESIPR_ECIIP
, EESIPR
);
3457 /* Enable MagicPacket */
3458 sh_eth_modify(ndev
, ECMR
, ECMR_MPDE
, ECMR_MPDE
);
3460 return enable_irq_wake(ndev
->irq
);
3463 static int sh_eth_wol_restore(struct net_device
*ndev
)
3465 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
3468 napi_enable(&mdp
->napi
);
3470 /* Disable MagicPacket */
3471 sh_eth_modify(ndev
, ECMR
, ECMR_MPDE
, 0);
3473 /* The device needs to be reset to restore MagicPacket logic
3474 * for next wakeup. If we close and open the device it will
3475 * both be reset and all registers restored. This is what
3476 * happens during suspend and resume without WoL enabled.
3479 ret
= sh_eth_open(ndev
);
3483 return disable_irq_wake(ndev
->irq
);
3486 static int sh_eth_suspend(struct device
*dev
)
3488 struct net_device
*ndev
= dev_get_drvdata(dev
);
3489 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
3492 if (!netif_running(ndev
))
3495 netif_device_detach(ndev
);
3497 if (mdp
->wol_enabled
)
3498 ret
= sh_eth_wol_setup(ndev
);
3500 ret
= sh_eth_close(ndev
);
3505 static int sh_eth_resume(struct device
*dev
)
3507 struct net_device
*ndev
= dev_get_drvdata(dev
);
3508 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
3511 if (!netif_running(ndev
))
3514 if (mdp
->wol_enabled
)
3515 ret
= sh_eth_wol_restore(ndev
);
3517 ret
= sh_eth_open(ndev
);
3522 netif_device_attach(ndev
);
3528 static int sh_eth_runtime_nop(struct device
*dev
)
3530 /* Runtime PM callback shared between ->runtime_suspend()
3531 * and ->runtime_resume(). Simply returns success.
3533 * This driver re-initializes all registers after
3534 * pm_runtime_get_sync() anyway so there is no need
3535 * to save and restore registers here.
3540 static const struct dev_pm_ops sh_eth_dev_pm_ops
= {
3541 SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend
, sh_eth_resume
)
3542 SET_RUNTIME_PM_OPS(sh_eth_runtime_nop
, sh_eth_runtime_nop
, NULL
)
3544 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3546 #define SH_ETH_PM_OPS NULL
3549 static const struct platform_device_id sh_eth_id_table
[] = {
3550 { "sh7619-ether", (kernel_ulong_t
)&sh7619_data
},
3551 { "sh771x-ether", (kernel_ulong_t
)&sh771x_data
},
3552 { "sh7724-ether", (kernel_ulong_t
)&sh7724_data
},
3553 { "sh7734-gether", (kernel_ulong_t
)&sh7734_data
},
3554 { "sh7757-ether", (kernel_ulong_t
)&sh7757_data
},
3555 { "sh7757-gether", (kernel_ulong_t
)&sh7757_data_giga
},
3556 { "sh7763-gether", (kernel_ulong_t
)&sh7763_data
},
3559 MODULE_DEVICE_TABLE(platform
, sh_eth_id_table
);
3561 static struct platform_driver sh_eth_driver
= {
3562 .probe
= sh_eth_drv_probe
,
3563 .remove
= sh_eth_drv_remove
,
3564 .id_table
= sh_eth_id_table
,
3567 .pm
= SH_ETH_PM_OPS
,
3568 .of_match_table
= of_match_ptr(sh_eth_match_table
),
3572 module_platform_driver(sh_eth_driver
);
3574 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3575 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3576 MODULE_LICENSE("GPL v2");