1 // SPDX-License-Identifier: GPL-2.0+
3 #include <linux/types.h>
5 #include <linux/platform_device.h>
6 #include <linux/pm_runtime.h>
7 #include <linux/acpi.h>
8 #include <linux/of_mdio.h>
9 #include <linux/etherdevice.h>
10 #include <linux/interrupt.h>
14 #include <net/ip6_checksum.h>
16 #define NETSEC_REG_SOFT_RST 0x104
17 #define NETSEC_REG_COM_INIT 0x120
19 #define NETSEC_REG_TOP_STATUS 0x200
20 #define NETSEC_IRQ_RX BIT(1)
21 #define NETSEC_IRQ_TX BIT(0)
23 #define NETSEC_REG_TOP_INTEN 0x204
24 #define NETSEC_REG_INTEN_SET 0x234
25 #define NETSEC_REG_INTEN_CLR 0x238
27 #define NETSEC_REG_NRM_TX_STATUS 0x400
28 #define NETSEC_REG_NRM_TX_INTEN 0x404
29 #define NETSEC_REG_NRM_TX_INTEN_SET 0x428
30 #define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c
31 #define NRM_TX_ST_NTOWNR BIT(17)
32 #define NRM_TX_ST_TR_ERR BIT(16)
33 #define NRM_TX_ST_TXDONE BIT(15)
34 #define NRM_TX_ST_TMREXP BIT(14)
36 #define NETSEC_REG_NRM_RX_STATUS 0x440
37 #define NETSEC_REG_NRM_RX_INTEN 0x444
38 #define NETSEC_REG_NRM_RX_INTEN_SET 0x468
39 #define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c
40 #define NRM_RX_ST_RC_ERR BIT(16)
41 #define NRM_RX_ST_PKTCNT BIT(15)
42 #define NRM_RX_ST_TMREXP BIT(14)
44 #define NETSEC_REG_PKT_CMD_BUF 0xd0
46 #define NETSEC_REG_CLK_EN 0x100
48 #define NETSEC_REG_PKT_CTRL 0x140
50 #define NETSEC_REG_DMA_TMR_CTRL 0x20c
51 #define NETSEC_REG_F_TAIKI_MC_VER 0x22c
52 #define NETSEC_REG_F_TAIKI_VER 0x230
53 #define NETSEC_REG_DMA_HM_CTRL 0x214
54 #define NETSEC_REG_DMA_MH_CTRL 0x220
55 #define NETSEC_REG_ADDR_DIS_CORE 0x218
56 #define NETSEC_REG_DMAC_HM_CMD_BUF 0x210
57 #define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c
59 #define NETSEC_REG_NRM_TX_PKTCNT 0x410
61 #define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414
62 #define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418
64 #define NETSEC_REG_NRM_TX_TMR 0x41c
66 #define NETSEC_REG_NRM_RX_PKTCNT 0x454
67 #define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458
68 #define NETSEC_REG_NRM_TX_TXINT_TMR 0x420
69 #define NETSEC_REG_NRM_RX_RXINT_TMR 0x460
71 #define NETSEC_REG_NRM_RX_TMR 0x45c
73 #define NETSEC_REG_NRM_TX_DESC_START_UP 0x434
74 #define NETSEC_REG_NRM_TX_DESC_START_LW 0x408
75 #define NETSEC_REG_NRM_RX_DESC_START_UP 0x474
76 #define NETSEC_REG_NRM_RX_DESC_START_LW 0x448
78 #define NETSEC_REG_NRM_TX_CONFIG 0x430
79 #define NETSEC_REG_NRM_RX_CONFIG 0x470
81 #define MAC_REG_STATUS 0x1024
82 #define MAC_REG_DATA 0x11c0
83 #define MAC_REG_CMD 0x11c4
84 #define MAC_REG_FLOW_TH 0x11cc
85 #define MAC_REG_INTF_SEL 0x11d4
86 #define MAC_REG_DESC_INIT 0x11fc
87 #define MAC_REG_DESC_SOFT_RST 0x1204
88 #define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500
90 #define GMAC_REG_MCR 0x0000
91 #define GMAC_REG_MFFR 0x0004
92 #define GMAC_REG_GAR 0x0010
93 #define GMAC_REG_GDR 0x0014
94 #define GMAC_REG_FCR 0x0018
95 #define GMAC_REG_BMR 0x1000
96 #define GMAC_REG_RDLAR 0x100c
97 #define GMAC_REG_TDLAR 0x1010
98 #define GMAC_REG_OMR 0x1018
100 #define MHZ(n) ((n) * 1000 * 1000)
102 #define NETSEC_TX_SHIFT_OWN_FIELD 31
103 #define NETSEC_TX_SHIFT_LD_FIELD 30
104 #define NETSEC_TX_SHIFT_DRID_FIELD 24
105 #define NETSEC_TX_SHIFT_PT_FIELD 21
106 #define NETSEC_TX_SHIFT_TDRID_FIELD 16
107 #define NETSEC_TX_SHIFT_CC_FIELD 15
108 #define NETSEC_TX_SHIFT_FS_FIELD 9
109 #define NETSEC_TX_LAST 8
110 #define NETSEC_TX_SHIFT_CO 7
111 #define NETSEC_TX_SHIFT_SO 6
112 #define NETSEC_TX_SHIFT_TRS_FIELD 4
114 #define NETSEC_RX_PKT_OWN_FIELD 31
115 #define NETSEC_RX_PKT_LD_FIELD 30
116 #define NETSEC_RX_PKT_SDRID_FIELD 24
117 #define NETSEC_RX_PKT_FR_FIELD 23
118 #define NETSEC_RX_PKT_ER_FIELD 21
119 #define NETSEC_RX_PKT_ERR_FIELD 16
120 #define NETSEC_RX_PKT_TDRID_FIELD 12
121 #define NETSEC_RX_PKT_FS_FIELD 9
122 #define NETSEC_RX_PKT_LS_FIELD 8
123 #define NETSEC_RX_PKT_CO_FIELD 6
125 #define NETSEC_RX_PKT_ERR_MASK 3
127 #define NETSEC_MAX_TX_PKT_LEN 1518
128 #define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018
130 #define NETSEC_RING_GMAC 15
131 #define NETSEC_RING_MAX 2
133 #define NETSEC_TCP_SEG_LEN_MAX 1460
134 #define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960
136 #define NETSEC_RX_CKSUM_NOTAVAIL 0
137 #define NETSEC_RX_CKSUM_OK 1
138 #define NETSEC_RX_CKSUM_NG 2
140 #define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20)
141 #define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4)
143 #define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20)
144 #define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19)
146 #define NETSEC_INT_PKTCNT_MAX 2047
148 #define NETSEC_FLOW_START_TH_MAX 95
149 #define NETSEC_FLOW_STOP_TH_MAX 95
150 #define NETSEC_FLOW_PAUSE_TIME_MIN 5
152 #define NETSEC_CLK_EN_REG_DOM_ALL 0x3f
154 #define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28)
155 #define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27)
156 #define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3)
157 #define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2)
158 #define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1)
159 #define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0)
161 #define NETSEC_CLK_EN_REG_DOM_G BIT(5)
162 #define NETSEC_CLK_EN_REG_DOM_C BIT(1)
163 #define NETSEC_CLK_EN_REG_DOM_D BIT(0)
165 #define NETSEC_COM_INIT_REG_DB BIT(2)
166 #define NETSEC_COM_INIT_REG_CLS BIT(1)
167 #define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \
168 NETSEC_COM_INIT_REG_DB)
170 #define NETSEC_SOFT_RST_REG_RESET 0
171 #define NETSEC_SOFT_RST_REG_RUN BIT(31)
173 #define NETSEC_DMA_CTRL_REG_STOP 1
174 #define MH_CTRL__MODE_TRANS BIT(20)
176 #define NETSEC_GMAC_CMD_ST_READ 0
177 #define NETSEC_GMAC_CMD_ST_WRITE BIT(28)
178 #define NETSEC_GMAC_CMD_ST_BUSY BIT(31)
180 #define NETSEC_GMAC_BMR_REG_COMMON 0x00412080
181 #define NETSEC_GMAC_BMR_REG_RESET 0x00020181
182 #define NETSEC_GMAC_BMR_REG_SWR 0x00000001
184 #define NETSEC_GMAC_OMR_REG_ST BIT(13)
185 #define NETSEC_GMAC_OMR_REG_SR BIT(1)
187 #define NETSEC_GMAC_MCR_REG_IBN BIT(30)
188 #define NETSEC_GMAC_MCR_REG_CST BIT(25)
189 #define NETSEC_GMAC_MCR_REG_JE BIT(20)
190 #define NETSEC_MCR_PS BIT(15)
191 #define NETSEC_GMAC_MCR_REG_FES BIT(14)
192 #define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c
193 #define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c
195 #define NETSEC_FCR_RFE BIT(2)
196 #define NETSEC_FCR_TFE BIT(1)
198 #define NETSEC_GMAC_GAR_REG_GW BIT(1)
199 #define NETSEC_GMAC_GAR_REG_GB BIT(0)
201 #define NETSEC_GMAC_GAR_REG_SHIFT_PA 11
202 #define NETSEC_GMAC_GAR_REG_SHIFT_GR 6
203 #define GMAC_REG_SHIFT_CR_GAR 2
205 #define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2
206 #define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3
207 #define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0
208 #define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1
209 #define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4
210 #define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5
212 #define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000
213 #define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000
215 #define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000
217 #define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31)
218 #define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30)
219 #define NETSEC_REG_DESC_TMR_MODE 4
220 #define NETSEC_REG_DESC_ENDIAN 0
222 #define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1
223 #define NETSEC_MAC_DESC_INIT_REG_INIT 1
225 #define NETSEC_EEPROM_MAC_ADDRESS 0x00
226 #define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08
227 #define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C
228 #define NETSEC_EEPROM_HM_ME_SIZE 0x10
229 #define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14
230 #define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18
231 #define NETSEC_EEPROM_MH_ME_SIZE 0x1C
232 #define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
233 #define NETSEC_EEPROM_PKT_ME_SIZE 0x24
237 #define DESC_SZ sizeof(struct netsec_de)
239 #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
253 struct netsec_desc_ring
{
255 struct netsec_desc
*desc
;
262 struct netsec_desc_ring desc_ring
[NETSEC_RING_MAX
];
263 struct ethtool_coalesce et_coalesce
;
264 spinlock_t reglock
; /* protect reg access */
265 struct napi_struct napi
;
266 phy_interface_t phy_interface
;
267 struct net_device
*ndev
;
268 struct device_node
*phy_np
;
269 struct phy_device
*phydev
;
270 struct mii_bus
*mii_bus
;
271 void __iomem
*ioaddr
;
272 void __iomem
*eeprom_base
;
278 bool rx_cksum_offload_flag
;
281 struct netsec_de
{ /* Netsec Descriptor layout */
283 u32 data_buf_addr_up
;
284 u32 data_buf_addr_lw
;
288 struct netsec_tx_pkt_ctrl
{
290 bool tcp_seg_offload_flag
;
291 bool cksum_offload_flag
;
294 struct netsec_rx_pkt_info
{
300 static void netsec_write(struct netsec_priv
*priv
, u32 reg_addr
, u32 val
)
302 writel(val
, priv
->ioaddr
+ reg_addr
);
305 static u32
netsec_read(struct netsec_priv
*priv
, u32 reg_addr
)
307 return readl(priv
->ioaddr
+ reg_addr
);
310 /************* MDIO BUS OPS FOLLOW *************/
312 #define TIMEOUT_SPINS_MAC 1000
313 #define TIMEOUT_SECONDARY_MS_MAC 100
315 static u32
netsec_clk_type(u32 freq
)
318 return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ
;
320 return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ
;
322 return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ
;
324 return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ
;
326 return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ
;
328 return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ
;
331 static int netsec_wait_while_busy(struct netsec_priv
*priv
, u32 addr
, u32 mask
)
333 u32 timeout
= TIMEOUT_SPINS_MAC
;
335 while (--timeout
&& netsec_read(priv
, addr
) & mask
)
340 timeout
= TIMEOUT_SECONDARY_MS_MAC
;
341 while (--timeout
&& netsec_read(priv
, addr
) & mask
)
342 usleep_range(1000, 2000);
347 netdev_WARN(priv
->ndev
, "%s: timeout\n", __func__
);
352 static int netsec_mac_write(struct netsec_priv
*priv
, u32 addr
, u32 value
)
354 netsec_write(priv
, MAC_REG_DATA
, value
);
355 netsec_write(priv
, MAC_REG_CMD
, addr
| NETSEC_GMAC_CMD_ST_WRITE
);
356 return netsec_wait_while_busy(priv
,
357 MAC_REG_CMD
, NETSEC_GMAC_CMD_ST_BUSY
);
360 static int netsec_mac_read(struct netsec_priv
*priv
, u32 addr
, u32
*read
)
364 netsec_write(priv
, MAC_REG_CMD
, addr
| NETSEC_GMAC_CMD_ST_READ
);
365 ret
= netsec_wait_while_busy(priv
,
366 MAC_REG_CMD
, NETSEC_GMAC_CMD_ST_BUSY
);
370 *read
= netsec_read(priv
, MAC_REG_DATA
);
375 static int netsec_mac_wait_while_busy(struct netsec_priv
*priv
,
378 u32 timeout
= TIMEOUT_SPINS_MAC
;
382 ret
= netsec_mac_read(priv
, addr
, &data
);
386 } while (--timeout
&& (data
& mask
));
391 timeout
= TIMEOUT_SECONDARY_MS_MAC
;
393 usleep_range(1000, 2000);
395 ret
= netsec_mac_read(priv
, addr
, &data
);
399 } while (--timeout
&& (data
& mask
));
404 netdev_WARN(priv
->ndev
, "%s: timeout\n", __func__
);
409 static int netsec_mac_update_to_phy_state(struct netsec_priv
*priv
)
411 struct phy_device
*phydev
= priv
->ndev
->phydev
;
414 value
= phydev
->duplex
? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON
:
415 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON
;
417 if (phydev
->speed
!= SPEED_1000
)
418 value
|= NETSEC_MCR_PS
;
420 if (priv
->phy_interface
!= PHY_INTERFACE_MODE_GMII
&&
421 phydev
->speed
== SPEED_100
)
422 value
|= NETSEC_GMAC_MCR_REG_FES
;
424 value
|= NETSEC_GMAC_MCR_REG_CST
| NETSEC_GMAC_MCR_REG_JE
;
426 if (phy_interface_mode_is_rgmii(priv
->phy_interface
))
427 value
|= NETSEC_GMAC_MCR_REG_IBN
;
429 if (netsec_mac_write(priv
, GMAC_REG_MCR
, value
))
435 static int netsec_phy_read(struct mii_bus
*bus
, int phy_addr
, int reg_addr
);
437 static int netsec_phy_write(struct mii_bus
*bus
,
438 int phy_addr
, int reg
, u16 val
)
441 struct netsec_priv
*priv
= bus
->priv
;
443 if (netsec_mac_write(priv
, GMAC_REG_GDR
, val
))
445 if (netsec_mac_write(priv
, GMAC_REG_GAR
,
446 phy_addr
<< NETSEC_GMAC_GAR_REG_SHIFT_PA
|
447 reg
<< NETSEC_GMAC_GAR_REG_SHIFT_GR
|
448 NETSEC_GMAC_GAR_REG_GW
| NETSEC_GMAC_GAR_REG_GB
|
449 (netsec_clk_type(priv
->freq
) <<
450 GMAC_REG_SHIFT_CR_GAR
)))
453 status
= netsec_mac_wait_while_busy(priv
, GMAC_REG_GAR
,
454 NETSEC_GMAC_GAR_REG_GB
);
456 /* Developerbox implements RTL8211E PHY and there is
457 * a compatibility problem with F_GMAC4.
458 * RTL8211E expects MDC clock must be kept toggling for several
459 * clock cycle with MDIO high before entering the IDLE state.
460 * To meet this requirement, netsec driver needs to issue dummy
461 * read(e.g. read PHYID1(offset 0x2) register) right after write.
463 netsec_phy_read(bus
, phy_addr
, MII_PHYSID1
);
468 static int netsec_phy_read(struct mii_bus
*bus
, int phy_addr
, int reg_addr
)
470 struct netsec_priv
*priv
= bus
->priv
;
474 if (netsec_mac_write(priv
, GMAC_REG_GAR
, NETSEC_GMAC_GAR_REG_GB
|
475 phy_addr
<< NETSEC_GMAC_GAR_REG_SHIFT_PA
|
476 reg_addr
<< NETSEC_GMAC_GAR_REG_SHIFT_GR
|
477 (netsec_clk_type(priv
->freq
) <<
478 GMAC_REG_SHIFT_CR_GAR
)))
481 ret
= netsec_mac_wait_while_busy(priv
, GMAC_REG_GAR
,
482 NETSEC_GMAC_GAR_REG_GB
);
486 ret
= netsec_mac_read(priv
, GMAC_REG_GDR
, &data
);
493 /************* ETHTOOL_OPS FOLLOW *************/
495 static void netsec_et_get_drvinfo(struct net_device
*net_device
,
496 struct ethtool_drvinfo
*info
)
498 strlcpy(info
->driver
, "netsec", sizeof(info
->driver
));
499 strlcpy(info
->bus_info
, dev_name(net_device
->dev
.parent
),
500 sizeof(info
->bus_info
));
503 static int netsec_et_get_coalesce(struct net_device
*net_device
,
504 struct ethtool_coalesce
*et_coalesce
)
506 struct netsec_priv
*priv
= netdev_priv(net_device
);
508 *et_coalesce
= priv
->et_coalesce
;
513 static int netsec_et_set_coalesce(struct net_device
*net_device
,
514 struct ethtool_coalesce
*et_coalesce
)
516 struct netsec_priv
*priv
= netdev_priv(net_device
);
518 priv
->et_coalesce
= *et_coalesce
;
520 if (priv
->et_coalesce
.tx_coalesce_usecs
< 50)
521 priv
->et_coalesce
.tx_coalesce_usecs
= 50;
522 if (priv
->et_coalesce
.tx_max_coalesced_frames
< 1)
523 priv
->et_coalesce
.tx_max_coalesced_frames
= 1;
525 netsec_write(priv
, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT
,
526 priv
->et_coalesce
.tx_max_coalesced_frames
);
527 netsec_write(priv
, NETSEC_REG_NRM_TX_TXINT_TMR
,
528 priv
->et_coalesce
.tx_coalesce_usecs
);
529 netsec_write(priv
, NETSEC_REG_NRM_TX_INTEN_SET
, NRM_TX_ST_TXDONE
);
530 netsec_write(priv
, NETSEC_REG_NRM_TX_INTEN_SET
, NRM_TX_ST_TMREXP
);
532 if (priv
->et_coalesce
.rx_coalesce_usecs
< 50)
533 priv
->et_coalesce
.rx_coalesce_usecs
= 50;
534 if (priv
->et_coalesce
.rx_max_coalesced_frames
< 1)
535 priv
->et_coalesce
.rx_max_coalesced_frames
= 1;
537 netsec_write(priv
, NETSEC_REG_NRM_RX_RXINT_PKTCNT
,
538 priv
->et_coalesce
.rx_max_coalesced_frames
);
539 netsec_write(priv
, NETSEC_REG_NRM_RX_RXINT_TMR
,
540 priv
->et_coalesce
.rx_coalesce_usecs
);
541 netsec_write(priv
, NETSEC_REG_NRM_RX_INTEN_SET
, NRM_RX_ST_PKTCNT
);
542 netsec_write(priv
, NETSEC_REG_NRM_RX_INTEN_SET
, NRM_RX_ST_TMREXP
);
547 static u32
netsec_et_get_msglevel(struct net_device
*dev
)
549 struct netsec_priv
*priv
= netdev_priv(dev
);
551 return priv
->msg_enable
;
554 static void netsec_et_set_msglevel(struct net_device
*dev
, u32 datum
)
556 struct netsec_priv
*priv
= netdev_priv(dev
);
558 priv
->msg_enable
= datum
;
561 static const struct ethtool_ops netsec_ethtool_ops
= {
562 .get_drvinfo
= netsec_et_get_drvinfo
,
563 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
564 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
565 .get_link
= ethtool_op_get_link
,
566 .get_coalesce
= netsec_et_get_coalesce
,
567 .set_coalesce
= netsec_et_set_coalesce
,
568 .get_msglevel
= netsec_et_get_msglevel
,
569 .set_msglevel
= netsec_et_set_msglevel
,
572 /************* NETDEV_OPS FOLLOW *************/
574 static struct sk_buff
*netsec_alloc_skb(struct netsec_priv
*priv
,
575 struct netsec_desc
*desc
)
579 if (device_get_dma_attr(priv
->dev
) == DEV_DMA_COHERENT
) {
580 skb
= netdev_alloc_skb_ip_align(priv
->ndev
, desc
->len
);
582 desc
->len
= L1_CACHE_ALIGN(desc
->len
);
583 skb
= netdev_alloc_skb(priv
->ndev
, desc
->len
);
588 desc
->addr
= skb
->data
;
589 desc
->dma_addr
= dma_map_single(priv
->dev
, desc
->addr
, desc
->len
,
591 if (dma_mapping_error(priv
->dev
, desc
->dma_addr
)) {
592 dev_kfree_skb_any(skb
);
598 static void netsec_set_rx_de(struct netsec_priv
*priv
,
599 struct netsec_desc_ring
*dring
, u16 idx
,
600 const struct netsec_desc
*desc
,
603 struct netsec_de
*de
= dring
->vaddr
+ DESC_SZ
* idx
;
604 u32 attr
= (1 << NETSEC_RX_PKT_OWN_FIELD
) |
605 (1 << NETSEC_RX_PKT_FS_FIELD
) |
606 (1 << NETSEC_RX_PKT_LS_FIELD
);
608 if (idx
== DESC_NUM
- 1)
609 attr
|= (1 << NETSEC_RX_PKT_LD_FIELD
);
611 de
->data_buf_addr_up
= upper_32_bits(desc
->dma_addr
);
612 de
->data_buf_addr_lw
= lower_32_bits(desc
->dma_addr
);
613 de
->buf_len_info
= desc
->len
;
617 dring
->desc
[idx
].dma_addr
= desc
->dma_addr
;
618 dring
->desc
[idx
].addr
= desc
->addr
;
619 dring
->desc
[idx
].len
= desc
->len
;
620 dring
->desc
[idx
].skb
= skb
;
623 static struct sk_buff
*netsec_get_rx_de(struct netsec_priv
*priv
,
624 struct netsec_desc_ring
*dring
,
626 struct netsec_rx_pkt_info
*rxpi
,
627 struct netsec_desc
*desc
, u16
*len
)
629 struct netsec_de de
= {};
631 memcpy(&de
, dring
->vaddr
+ DESC_SZ
* idx
, DESC_SZ
);
633 *len
= de
.buf_len_info
>> 16;
635 rxpi
->err_flag
= (de
.attr
>> NETSEC_RX_PKT_ER_FIELD
) & 1;
636 rxpi
->rx_cksum_result
= (de
.attr
>> NETSEC_RX_PKT_CO_FIELD
) & 3;
637 rxpi
->err_code
= (de
.attr
>> NETSEC_RX_PKT_ERR_FIELD
) &
638 NETSEC_RX_PKT_ERR_MASK
;
639 *desc
= dring
->desc
[idx
];
643 static struct sk_buff
*netsec_get_rx_pkt_data(struct netsec_priv
*priv
,
644 struct netsec_rx_pkt_info
*rxpi
,
645 struct netsec_desc
*desc
,
648 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_RX
];
649 struct sk_buff
*tmp_skb
, *skb
= NULL
;
650 struct netsec_desc td
;
653 *rxpi
= (struct netsec_rx_pkt_info
){};
655 td
.len
= priv
->ndev
->mtu
+ 22;
657 tmp_skb
= netsec_alloc_skb(priv
, &td
);
662 netsec_set_rx_de(priv
, dring
, tail
, &dring
->desc
[tail
],
663 dring
->desc
[tail
].skb
);
665 skb
= netsec_get_rx_de(priv
, dring
, tail
, rxpi
, desc
, len
);
666 netsec_set_rx_de(priv
, dring
, tail
, &td
, tmp_skb
);
669 /* move tail ahead */
670 dring
->tail
= (dring
->tail
+ 1) % DESC_NUM
;
675 static int netsec_clean_tx_dring(struct netsec_priv
*priv
, int budget
)
677 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_TX
];
678 unsigned int pkts
, bytes
;
680 dring
->pkt_cnt
+= netsec_read(priv
, NETSEC_REG_NRM_TX_DONE_PKTCNT
);
682 if (dring
->pkt_cnt
< budget
)
683 budget
= dring
->pkt_cnt
;
688 while (pkts
< budget
) {
689 struct netsec_desc
*desc
;
690 struct netsec_de
*entry
;
695 /* move tail ahead */
696 dring
->tail
= (tail
+ 1) % DESC_NUM
;
698 desc
= &dring
->desc
[tail
];
699 entry
= dring
->vaddr
+ DESC_SZ
* tail
;
701 eop
= (entry
->attr
>> NETSEC_TX_LAST
) & 1;
703 dma_unmap_single(priv
->dev
, desc
->dma_addr
, desc
->len
,
707 bytes
+= desc
->skb
->len
;
708 dev_kfree_skb(desc
->skb
);
710 *desc
= (struct netsec_desc
){};
712 dring
->pkt_cnt
-= budget
;
714 priv
->ndev
->stats
.tx_packets
+= budget
;
715 priv
->ndev
->stats
.tx_bytes
+= bytes
;
717 netdev_completed_queue(priv
->ndev
, budget
, bytes
);
722 static int netsec_process_tx(struct netsec_priv
*priv
, int budget
)
724 struct net_device
*ndev
= priv
->ndev
;
728 new = netsec_clean_tx_dring(priv
, budget
);
733 if (done
&& netif_queue_stopped(ndev
))
734 netif_wake_queue(ndev
);
739 static int netsec_process_rx(struct netsec_priv
*priv
, int budget
)
741 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_RX
];
742 struct net_device
*ndev
= priv
->ndev
;
743 struct netsec_rx_pkt_info rx_info
;
745 struct netsec_desc desc
;
749 while (done
< budget
) {
750 u16 idx
= dring
->tail
;
751 struct netsec_de
*de
= dring
->vaddr
+ (DESC_SZ
* idx
);
753 if (de
->attr
& (1U << NETSEC_RX_PKT_OWN_FIELD
)) {
754 /* reading the register clears the irq */
755 netsec_read(priv
, NETSEC_REG_NRM_RX_PKTCNT
);
759 /* This barrier is needed to keep us from reading
760 * any other fields out of the netsec_de until we have
761 * verified the descriptor has been written back
765 skb
= netsec_get_rx_pkt_data(priv
, &rx_info
, &desc
, &len
);
766 if (unlikely(!skb
) || rx_info
.err_flag
) {
767 netif_err(priv
, drv
, priv
->ndev
,
768 "%s: rx fail err(%d)\n",
769 __func__
, rx_info
.err_code
);
770 ndev
->stats
.rx_dropped
++;
774 dma_unmap_single(priv
->dev
, desc
.dma_addr
, desc
.len
,
777 skb
->protocol
= eth_type_trans(skb
, priv
->ndev
);
779 if (priv
->rx_cksum_offload_flag
&&
780 rx_info
.rx_cksum_result
== NETSEC_RX_CKSUM_OK
)
781 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
783 if (napi_gro_receive(&priv
->napi
, skb
) != GRO_DROP
) {
784 ndev
->stats
.rx_packets
++;
785 ndev
->stats
.rx_bytes
+= len
;
792 static int netsec_napi_poll(struct napi_struct
*napi
, int budget
)
794 struct netsec_priv
*priv
;
795 int tx
, rx
, done
, todo
;
797 priv
= container_of(napi
, struct netsec_priv
, napi
);
804 tx
= netsec_process_tx(priv
, todo
);
810 rx
= netsec_process_rx(priv
, todo
);
814 done
= budget
- todo
;
816 if (done
< budget
&& napi_complete_done(napi
, done
)) {
819 spin_lock_irqsave(&priv
->reglock
, flags
);
820 netsec_write(priv
, NETSEC_REG_INTEN_SET
,
821 NETSEC_IRQ_RX
| NETSEC_IRQ_TX
);
822 spin_unlock_irqrestore(&priv
->reglock
, flags
);
828 static void netsec_set_tx_de(struct netsec_priv
*priv
,
829 struct netsec_desc_ring
*dring
,
830 const struct netsec_tx_pkt_ctrl
*tx_ctrl
,
831 const struct netsec_desc
*desc
,
834 int idx
= dring
->head
;
835 struct netsec_de
*de
;
838 de
= dring
->vaddr
+ (DESC_SZ
* idx
);
840 attr
= (1 << NETSEC_TX_SHIFT_OWN_FIELD
) |
841 (1 << NETSEC_TX_SHIFT_PT_FIELD
) |
842 (NETSEC_RING_GMAC
<< NETSEC_TX_SHIFT_TDRID_FIELD
) |
843 (1 << NETSEC_TX_SHIFT_FS_FIELD
) |
844 (1 << NETSEC_TX_LAST
) |
845 (tx_ctrl
->cksum_offload_flag
<< NETSEC_TX_SHIFT_CO
) |
846 (tx_ctrl
->tcp_seg_offload_flag
<< NETSEC_TX_SHIFT_SO
) |
847 (1 << NETSEC_TX_SHIFT_TRS_FIELD
);
848 if (idx
== DESC_NUM
- 1)
849 attr
|= (1 << NETSEC_TX_SHIFT_LD_FIELD
);
851 de
->data_buf_addr_up
= upper_32_bits(desc
->dma_addr
);
852 de
->data_buf_addr_lw
= lower_32_bits(desc
->dma_addr
);
853 de
->buf_len_info
= (tx_ctrl
->tcp_seg_len
<< 16) | desc
->len
;
857 dring
->desc
[idx
] = *desc
;
858 dring
->desc
[idx
].skb
= skb
;
860 /* move head ahead */
861 dring
->head
= (dring
->head
+ 1) % DESC_NUM
;
864 static netdev_tx_t
netsec_netdev_start_xmit(struct sk_buff
*skb
,
865 struct net_device
*ndev
)
867 struct netsec_priv
*priv
= netdev_priv(ndev
);
868 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_TX
];
869 struct netsec_tx_pkt_ctrl tx_ctrl
= {};
870 struct netsec_desc tx_desc
;
874 /* differentiate between full/emtpy ring */
875 if (dring
->head
>= dring
->tail
)
876 filled
= dring
->head
- dring
->tail
;
878 filled
= dring
->head
+ DESC_NUM
- dring
->tail
;
880 if (DESC_NUM
- filled
< 2) { /* if less than 2 available */
881 netif_err(priv
, drv
, priv
->ndev
, "%s: TxQFull!\n", __func__
);
882 netif_stop_queue(priv
->ndev
);
884 return NETDEV_TX_BUSY
;
887 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
888 tx_ctrl
.cksum_offload_flag
= true;
891 tso_seg_len
= skb_shinfo(skb
)->gso_size
;
893 if (tso_seg_len
> 0) {
894 if (skb
->protocol
== htons(ETH_P_IP
)) {
895 ip_hdr(skb
)->tot_len
= 0;
896 tcp_hdr(skb
)->check
=
897 ~tcp_v4_check(0, ip_hdr(skb
)->saddr
,
898 ip_hdr(skb
)->daddr
, 0);
900 ipv6_hdr(skb
)->payload_len
= 0;
901 tcp_hdr(skb
)->check
=
902 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
903 &ipv6_hdr(skb
)->daddr
,
907 tx_ctrl
.tcp_seg_offload_flag
= true;
908 tx_ctrl
.tcp_seg_len
= tso_seg_len
;
911 tx_desc
.dma_addr
= dma_map_single(priv
->dev
, skb
->data
,
912 skb_headlen(skb
), DMA_TO_DEVICE
);
913 if (dma_mapping_error(priv
->dev
, tx_desc
.dma_addr
)) {
914 netif_err(priv
, drv
, priv
->ndev
,
915 "%s: DMA mapping failed\n", __func__
);
916 ndev
->stats
.tx_dropped
++;
917 dev_kfree_skb_any(skb
);
920 tx_desc
.addr
= skb
->data
;
921 tx_desc
.len
= skb_headlen(skb
);
923 skb_tx_timestamp(skb
);
924 netdev_sent_queue(priv
->ndev
, skb
->len
);
926 netsec_set_tx_de(priv
, dring
, &tx_ctrl
, &tx_desc
, skb
);
927 netsec_write(priv
, NETSEC_REG_NRM_TX_PKTCNT
, 1); /* submit another tx */
932 static void netsec_uninit_pkt_dring(struct netsec_priv
*priv
, int id
)
934 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[id
];
935 struct netsec_desc
*desc
;
938 if (!dring
->vaddr
|| !dring
->desc
)
941 for (idx
= 0; idx
< DESC_NUM
; idx
++) {
942 desc
= &dring
->desc
[idx
];
946 dma_unmap_single(priv
->dev
, desc
->dma_addr
, desc
->len
,
947 id
== NETSEC_RING_RX
? DMA_FROM_DEVICE
:
949 dev_kfree_skb(desc
->skb
);
952 memset(dring
->desc
, 0, sizeof(struct netsec_desc
) * DESC_NUM
);
953 memset(dring
->vaddr
, 0, DESC_SZ
* DESC_NUM
);
959 if (id
== NETSEC_RING_TX
)
960 netdev_reset_queue(priv
->ndev
);
963 static void netsec_free_dring(struct netsec_priv
*priv
, int id
)
965 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[id
];
968 dma_free_coherent(priv
->dev
, DESC_SZ
* DESC_NUM
,
969 dring
->vaddr
, dring
->desc_dma
);
977 static int netsec_alloc_dring(struct netsec_priv
*priv
, enum ring_id id
)
979 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[id
];
982 dring
->vaddr
= dma_zalloc_coherent(priv
->dev
, DESC_SZ
* DESC_NUM
,
983 &dring
->desc_dma
, GFP_KERNEL
);
989 dring
->desc
= kcalloc(DESC_NUM
, sizeof(*dring
->desc
), GFP_KERNEL
);
997 netsec_free_dring(priv
, id
);
1002 static int netsec_setup_rx_dring(struct netsec_priv
*priv
)
1004 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_RX
];
1005 struct netsec_desc desc
;
1006 struct sk_buff
*skb
;
1009 desc
.len
= priv
->ndev
->mtu
+ 22;
1011 for (n
= 0; n
< DESC_NUM
; n
++) {
1012 skb
= netsec_alloc_skb(priv
, &desc
);
1014 netsec_uninit_pkt_dring(priv
, NETSEC_RING_RX
);
1017 netsec_set_rx_de(priv
, dring
, n
, &desc
, skb
);
1023 static int netsec_netdev_load_ucode_region(struct netsec_priv
*priv
, u32 reg
,
1024 u32 addr_h
, u32 addr_l
, u32 size
)
1026 u64 base
= (u64
)addr_h
<< 32 | addr_l
;
1027 void __iomem
*ucode
;
1030 ucode
= ioremap(base
, size
* sizeof(u32
));
1034 for (i
= 0; i
< size
; i
++)
1035 netsec_write(priv
, reg
, readl(ucode
+ i
* 4));
1041 static int netsec_netdev_load_microcode(struct netsec_priv
*priv
)
1043 u32 addr_h
, addr_l
, size
;
1046 addr_h
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_HM_ME_ADDRESS_H
);
1047 addr_l
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_HM_ME_ADDRESS_L
);
1048 size
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_HM_ME_SIZE
);
1049 err
= netsec_netdev_load_ucode_region(priv
, NETSEC_REG_DMAC_HM_CMD_BUF
,
1050 addr_h
, addr_l
, size
);
1054 addr_h
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_MH_ME_ADDRESS_H
);
1055 addr_l
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_MH_ME_ADDRESS_L
);
1056 size
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_MH_ME_SIZE
);
1057 err
= netsec_netdev_load_ucode_region(priv
, NETSEC_REG_DMAC_MH_CMD_BUF
,
1058 addr_h
, addr_l
, size
);
1063 addr_l
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_PKT_ME_ADDRESS
);
1064 size
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_PKT_ME_SIZE
);
1065 err
= netsec_netdev_load_ucode_region(priv
, NETSEC_REG_PKT_CMD_BUF
,
1066 addr_h
, addr_l
, size
);
1073 static int netsec_reset_hardware(struct netsec_priv
*priv
,
1079 /* stop DMA engines */
1080 if (!netsec_read(priv
, NETSEC_REG_ADDR_DIS_CORE
)) {
1081 netsec_write(priv
, NETSEC_REG_DMA_HM_CTRL
,
1082 NETSEC_DMA_CTRL_REG_STOP
);
1083 netsec_write(priv
, NETSEC_REG_DMA_MH_CTRL
,
1084 NETSEC_DMA_CTRL_REG_STOP
);
1086 while (netsec_read(priv
, NETSEC_REG_DMA_HM_CTRL
) &
1087 NETSEC_DMA_CTRL_REG_STOP
)
1090 while (netsec_read(priv
, NETSEC_REG_DMA_MH_CTRL
) &
1091 NETSEC_DMA_CTRL_REG_STOP
)
1095 netsec_write(priv
, NETSEC_REG_SOFT_RST
, NETSEC_SOFT_RST_REG_RESET
);
1096 netsec_write(priv
, NETSEC_REG_SOFT_RST
, NETSEC_SOFT_RST_REG_RUN
);
1097 netsec_write(priv
, NETSEC_REG_COM_INIT
, NETSEC_COM_INIT_REG_ALL
);
1099 while (netsec_read(priv
, NETSEC_REG_COM_INIT
) != 0)
1102 /* set desc_start addr */
1103 netsec_write(priv
, NETSEC_REG_NRM_RX_DESC_START_UP
,
1104 upper_32_bits(priv
->desc_ring
[NETSEC_RING_RX
].desc_dma
));
1105 netsec_write(priv
, NETSEC_REG_NRM_RX_DESC_START_LW
,
1106 lower_32_bits(priv
->desc_ring
[NETSEC_RING_RX
].desc_dma
));
1108 netsec_write(priv
, NETSEC_REG_NRM_TX_DESC_START_UP
,
1109 upper_32_bits(priv
->desc_ring
[NETSEC_RING_TX
].desc_dma
));
1110 netsec_write(priv
, NETSEC_REG_NRM_TX_DESC_START_LW
,
1111 lower_32_bits(priv
->desc_ring
[NETSEC_RING_TX
].desc_dma
));
1113 /* set normal tx dring ring config */
1114 netsec_write(priv
, NETSEC_REG_NRM_TX_CONFIG
,
1115 1 << NETSEC_REG_DESC_ENDIAN
);
1116 netsec_write(priv
, NETSEC_REG_NRM_RX_CONFIG
,
1117 1 << NETSEC_REG_DESC_ENDIAN
);
1120 err
= netsec_netdev_load_microcode(priv
);
1122 netif_err(priv
, probe
, priv
->ndev
,
1123 "%s: failed to load microcode (%d)\n",
1129 /* start DMA engines */
1130 netsec_write(priv
, NETSEC_REG_DMA_TMR_CTRL
, priv
->freq
/ 1000000 - 1);
1131 netsec_write(priv
, NETSEC_REG_ADDR_DIS_CORE
, 0);
1133 usleep_range(1000, 2000);
1135 if (!(netsec_read(priv
, NETSEC_REG_TOP_STATUS
) &
1136 NETSEC_TOP_IRQ_REG_CODE_LOAD_END
)) {
1137 netif_err(priv
, probe
, priv
->ndev
,
1138 "microengine start failed\n");
1141 netsec_write(priv
, NETSEC_REG_TOP_STATUS
,
1142 NETSEC_TOP_IRQ_REG_CODE_LOAD_END
);
1144 value
= NETSEC_PKT_CTRL_REG_MODE_NRM
;
1145 if (priv
->ndev
->mtu
> ETH_DATA_LEN
)
1146 value
|= NETSEC_PKT_CTRL_REG_EN_JUMBO
;
1148 /* change to normal mode */
1149 netsec_write(priv
, NETSEC_REG_DMA_MH_CTRL
, MH_CTRL__MODE_TRANS
);
1150 netsec_write(priv
, NETSEC_REG_PKT_CTRL
, value
);
1152 while ((netsec_read(priv
, NETSEC_REG_MODE_TRANS_COMP_STATUS
) &
1153 NETSEC_MODE_TRANS_COMP_IRQ_T2N
) == 0)
1156 /* clear any pending EMPTY/ERR irq status */
1157 netsec_write(priv
, NETSEC_REG_NRM_TX_STATUS
, ~0);
1159 /* Disable TX & RX intr */
1160 netsec_write(priv
, NETSEC_REG_INTEN_CLR
, ~0);
1165 static int netsec_start_gmac(struct netsec_priv
*priv
)
1167 struct phy_device
*phydev
= priv
->ndev
->phydev
;
1171 if (phydev
->speed
!= SPEED_1000
)
1172 value
= (NETSEC_GMAC_MCR_REG_CST
|
1173 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON
);
1175 if (netsec_mac_write(priv
, GMAC_REG_MCR
, value
))
1177 if (netsec_mac_write(priv
, GMAC_REG_BMR
,
1178 NETSEC_GMAC_BMR_REG_RESET
))
1181 /* Wait soft reset */
1182 usleep_range(1000, 5000);
1184 ret
= netsec_mac_read(priv
, GMAC_REG_BMR
, &value
);
1187 if (value
& NETSEC_GMAC_BMR_REG_SWR
)
1190 netsec_write(priv
, MAC_REG_DESC_SOFT_RST
, 1);
1191 if (netsec_wait_while_busy(priv
, MAC_REG_DESC_SOFT_RST
, 1))
1194 netsec_write(priv
, MAC_REG_DESC_INIT
, 1);
1195 if (netsec_wait_while_busy(priv
, MAC_REG_DESC_INIT
, 1))
1198 if (netsec_mac_write(priv
, GMAC_REG_BMR
,
1199 NETSEC_GMAC_BMR_REG_COMMON
))
1201 if (netsec_mac_write(priv
, GMAC_REG_RDLAR
,
1202 NETSEC_GMAC_RDLAR_REG_COMMON
))
1204 if (netsec_mac_write(priv
, GMAC_REG_TDLAR
,
1205 NETSEC_GMAC_TDLAR_REG_COMMON
))
1207 if (netsec_mac_write(priv
, GMAC_REG_MFFR
, 0x80000001))
1210 ret
= netsec_mac_update_to_phy_state(priv
);
1214 ret
= netsec_mac_read(priv
, GMAC_REG_OMR
, &value
);
1218 value
|= NETSEC_GMAC_OMR_REG_SR
;
1219 value
|= NETSEC_GMAC_OMR_REG_ST
;
1221 netsec_write(priv
, NETSEC_REG_NRM_RX_INTEN_CLR
, ~0);
1222 netsec_write(priv
, NETSEC_REG_NRM_TX_INTEN_CLR
, ~0);
1224 netsec_et_set_coalesce(priv
->ndev
, &priv
->et_coalesce
);
1226 if (netsec_mac_write(priv
, GMAC_REG_OMR
, value
))
1232 static int netsec_stop_gmac(struct netsec_priv
*priv
)
1237 ret
= netsec_mac_read(priv
, GMAC_REG_OMR
, &value
);
1240 value
&= ~NETSEC_GMAC_OMR_REG_SR
;
1241 value
&= ~NETSEC_GMAC_OMR_REG_ST
;
1243 /* disable all interrupts */
1244 netsec_write(priv
, NETSEC_REG_NRM_RX_INTEN_CLR
, ~0);
1245 netsec_write(priv
, NETSEC_REG_NRM_TX_INTEN_CLR
, ~0);
1247 return netsec_mac_write(priv
, GMAC_REG_OMR
, value
);
1250 static void netsec_phy_adjust_link(struct net_device
*ndev
)
1252 struct netsec_priv
*priv
= netdev_priv(ndev
);
1254 if (ndev
->phydev
->link
)
1255 netsec_start_gmac(priv
);
1257 netsec_stop_gmac(priv
);
1259 phy_print_status(ndev
->phydev
);
1262 static irqreturn_t
netsec_irq_handler(int irq
, void *dev_id
)
1264 struct netsec_priv
*priv
= dev_id
;
1265 u32 val
, status
= netsec_read(priv
, NETSEC_REG_TOP_STATUS
);
1266 unsigned long flags
;
1268 /* Disable interrupts */
1269 if (status
& NETSEC_IRQ_TX
) {
1270 val
= netsec_read(priv
, NETSEC_REG_NRM_TX_STATUS
);
1271 netsec_write(priv
, NETSEC_REG_NRM_TX_STATUS
, val
);
1273 if (status
& NETSEC_IRQ_RX
) {
1274 val
= netsec_read(priv
, NETSEC_REG_NRM_RX_STATUS
);
1275 netsec_write(priv
, NETSEC_REG_NRM_RX_STATUS
, val
);
1278 spin_lock_irqsave(&priv
->reglock
, flags
);
1279 netsec_write(priv
, NETSEC_REG_INTEN_CLR
, NETSEC_IRQ_RX
| NETSEC_IRQ_TX
);
1280 spin_unlock_irqrestore(&priv
->reglock
, flags
);
1282 napi_schedule(&priv
->napi
);
1287 static int netsec_netdev_open(struct net_device
*ndev
)
1289 struct netsec_priv
*priv
= netdev_priv(ndev
);
1292 pm_runtime_get_sync(priv
->dev
);
1294 ret
= netsec_setup_rx_dring(priv
);
1296 netif_err(priv
, probe
, priv
->ndev
,
1297 "%s: fail setup ring\n", __func__
);
1301 ret
= request_irq(priv
->ndev
->irq
, netsec_irq_handler
,
1302 IRQF_SHARED
, "netsec", priv
);
1304 netif_err(priv
, drv
, priv
->ndev
, "request_irq failed\n");
1308 if (dev_of_node(priv
->dev
)) {
1309 if (!of_phy_connect(priv
->ndev
, priv
->phy_np
,
1310 netsec_phy_adjust_link
, 0,
1311 priv
->phy_interface
)) {
1312 netif_err(priv
, link
, priv
->ndev
, "missing PHY\n");
1317 ret
= phy_connect_direct(priv
->ndev
, priv
->phydev
,
1318 netsec_phy_adjust_link
,
1319 priv
->phy_interface
);
1321 netif_err(priv
, link
, priv
->ndev
,
1322 "phy_connect_direct() failed (%d)\n", ret
);
1327 phy_start(ndev
->phydev
);
1329 netsec_start_gmac(priv
);
1330 napi_enable(&priv
->napi
);
1331 netif_start_queue(ndev
);
1333 /* Enable TX+RX intr. */
1334 netsec_write(priv
, NETSEC_REG_INTEN_SET
, NETSEC_IRQ_RX
| NETSEC_IRQ_TX
);
1338 free_irq(priv
->ndev
->irq
, priv
);
1340 netsec_uninit_pkt_dring(priv
, NETSEC_RING_RX
);
1342 pm_runtime_put_sync(priv
->dev
);
1346 static int netsec_netdev_stop(struct net_device
*ndev
)
1349 struct netsec_priv
*priv
= netdev_priv(ndev
);
1351 netif_stop_queue(priv
->ndev
);
1354 napi_disable(&priv
->napi
);
1356 netsec_write(priv
, NETSEC_REG_INTEN_CLR
, ~0);
1357 netsec_stop_gmac(priv
);
1359 free_irq(priv
->ndev
->irq
, priv
);
1361 netsec_uninit_pkt_dring(priv
, NETSEC_RING_TX
);
1362 netsec_uninit_pkt_dring(priv
, NETSEC_RING_RX
);
1364 phy_stop(ndev
->phydev
);
1365 phy_disconnect(ndev
->phydev
);
1367 ret
= netsec_reset_hardware(priv
, false);
1369 pm_runtime_put_sync(priv
->dev
);
1374 static int netsec_netdev_init(struct net_device
*ndev
)
1376 struct netsec_priv
*priv
= netdev_priv(ndev
);
1380 ret
= netsec_alloc_dring(priv
, NETSEC_RING_TX
);
1384 ret
= netsec_alloc_dring(priv
, NETSEC_RING_RX
);
1388 /* set phy power down */
1389 data
= netsec_phy_read(priv
->mii_bus
, priv
->phy_addr
, MII_BMCR
) |
1391 netsec_phy_write(priv
->mii_bus
, priv
->phy_addr
, MII_BMCR
, data
);
1393 ret
= netsec_reset_hardware(priv
, true);
1399 netsec_free_dring(priv
, NETSEC_RING_RX
);
1401 netsec_free_dring(priv
, NETSEC_RING_TX
);
1405 static void netsec_netdev_uninit(struct net_device
*ndev
)
1407 struct netsec_priv
*priv
= netdev_priv(ndev
);
1409 netsec_free_dring(priv
, NETSEC_RING_RX
);
1410 netsec_free_dring(priv
, NETSEC_RING_TX
);
1413 static int netsec_netdev_set_features(struct net_device
*ndev
,
1414 netdev_features_t features
)
1416 struct netsec_priv
*priv
= netdev_priv(ndev
);
1418 priv
->rx_cksum_offload_flag
= !!(features
& NETIF_F_RXCSUM
);
1423 static int netsec_netdev_ioctl(struct net_device
*ndev
, struct ifreq
*ifr
,
1426 return phy_mii_ioctl(ndev
->phydev
, ifr
, cmd
);
1429 static const struct net_device_ops netsec_netdev_ops
= {
1430 .ndo_init
= netsec_netdev_init
,
1431 .ndo_uninit
= netsec_netdev_uninit
,
1432 .ndo_open
= netsec_netdev_open
,
1433 .ndo_stop
= netsec_netdev_stop
,
1434 .ndo_start_xmit
= netsec_netdev_start_xmit
,
1435 .ndo_set_features
= netsec_netdev_set_features
,
1436 .ndo_set_mac_address
= eth_mac_addr
,
1437 .ndo_validate_addr
= eth_validate_addr
,
1438 .ndo_do_ioctl
= netsec_netdev_ioctl
,
1441 static int netsec_of_probe(struct platform_device
*pdev
,
1442 struct netsec_priv
*priv
, u32
*phy_addr
)
1444 priv
->phy_np
= of_parse_phandle(pdev
->dev
.of_node
, "phy-handle", 0);
1445 if (!priv
->phy_np
) {
1446 dev_err(&pdev
->dev
, "missing required property 'phy-handle'\n");
1450 *phy_addr
= of_mdio_parse_addr(&pdev
->dev
, priv
->phy_np
);
1452 priv
->clk
= devm_clk_get(&pdev
->dev
, NULL
); /* get by 'phy_ref_clk' */
1453 if (IS_ERR(priv
->clk
)) {
1454 dev_err(&pdev
->dev
, "phy_ref_clk not found\n");
1455 return PTR_ERR(priv
->clk
);
1457 priv
->freq
= clk_get_rate(priv
->clk
);
1462 static int netsec_acpi_probe(struct platform_device
*pdev
,
1463 struct netsec_priv
*priv
, u32
*phy_addr
)
1467 if (!IS_ENABLED(CONFIG_ACPI
))
1470 ret
= device_property_read_u32(&pdev
->dev
, "phy-channel", phy_addr
);
1473 "missing required property 'phy-channel'\n");
1477 ret
= device_property_read_u32(&pdev
->dev
,
1478 "socionext,phy-clock-frequency",
1482 "missing required property 'socionext,phy-clock-frequency'\n");
1486 static void netsec_unregister_mdio(struct netsec_priv
*priv
)
1488 struct phy_device
*phydev
= priv
->phydev
;
1490 if (!dev_of_node(priv
->dev
) && phydev
) {
1491 phy_device_remove(phydev
);
1492 phy_device_free(phydev
);
1495 mdiobus_unregister(priv
->mii_bus
);
1498 static int netsec_register_mdio(struct netsec_priv
*priv
, u32 phy_addr
)
1500 struct mii_bus
*bus
;
1503 bus
= devm_mdiobus_alloc(priv
->dev
);
1507 snprintf(bus
->id
, MII_BUS_ID_SIZE
, "%s", dev_name(priv
->dev
));
1509 bus
->name
= "SNI NETSEC MDIO";
1510 bus
->read
= netsec_phy_read
;
1511 bus
->write
= netsec_phy_write
;
1512 bus
->parent
= priv
->dev
;
1513 priv
->mii_bus
= bus
;
1515 if (dev_of_node(priv
->dev
)) {
1516 struct device_node
*mdio_node
, *parent
= dev_of_node(priv
->dev
);
1518 mdio_node
= of_get_child_by_name(parent
, "mdio");
1522 /* older f/w doesn't populate the mdio subnode,
1523 * allow relaxed upgrade of f/w in due time.
1525 dev_info(priv
->dev
, "Upgrade f/w for mdio subnode!\n");
1528 ret
= of_mdiobus_register(bus
, parent
);
1529 of_node_put(mdio_node
);
1532 dev_err(priv
->dev
, "mdiobus register err(%d)\n", ret
);
1536 /* Mask out all PHYs from auto probing. */
1538 ret
= mdiobus_register(bus
);
1540 dev_err(priv
->dev
, "mdiobus register err(%d)\n", ret
);
1544 priv
->phydev
= get_phy_device(bus
, phy_addr
, false);
1545 if (IS_ERR(priv
->phydev
)) {
1546 ret
= PTR_ERR(priv
->phydev
);
1547 dev_err(priv
->dev
, "get_phy_device err(%d)\n", ret
);
1548 priv
->phydev
= NULL
;
1552 ret
= phy_device_register(priv
->phydev
);
1554 mdiobus_unregister(bus
);
1556 "phy_device_register err(%d)\n", ret
);
1563 static int netsec_probe(struct platform_device
*pdev
)
1565 struct resource
*mmio_res
, *eeprom_res
, *irq_res
;
1566 u8
*mac
, macbuf
[ETH_ALEN
];
1567 struct netsec_priv
*priv
;
1568 u32 hw_ver
, phy_addr
= 0;
1569 struct net_device
*ndev
;
1572 mmio_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1574 dev_err(&pdev
->dev
, "No MMIO resource found.\n");
1578 eeprom_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1580 dev_info(&pdev
->dev
, "No EEPROM resource found.\n");
1584 irq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
1586 dev_err(&pdev
->dev
, "No IRQ resource found.\n");
1590 ndev
= alloc_etherdev(sizeof(*priv
));
1594 priv
= netdev_priv(ndev
);
1596 spin_lock_init(&priv
->reglock
);
1597 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
1598 platform_set_drvdata(pdev
, priv
);
1599 ndev
->irq
= irq_res
->start
;
1600 priv
->dev
= &pdev
->dev
;
1603 priv
->msg_enable
= NETIF_MSG_TX_ERR
| NETIF_MSG_HW
| NETIF_MSG_DRV
|
1604 NETIF_MSG_LINK
| NETIF_MSG_PROBE
;
1606 priv
->phy_interface
= device_get_phy_mode(&pdev
->dev
);
1607 if ((int)priv
->phy_interface
< 0) {
1608 dev_err(&pdev
->dev
, "missing required property 'phy-mode'\n");
1613 priv
->ioaddr
= devm_ioremap(&pdev
->dev
, mmio_res
->start
,
1614 resource_size(mmio_res
));
1615 if (!priv
->ioaddr
) {
1616 dev_err(&pdev
->dev
, "devm_ioremap() failed\n");
1621 priv
->eeprom_base
= devm_ioremap(&pdev
->dev
, eeprom_res
->start
,
1622 resource_size(eeprom_res
));
1623 if (!priv
->eeprom_base
) {
1624 dev_err(&pdev
->dev
, "devm_ioremap() failed for EEPROM\n");
1629 mac
= device_get_mac_address(&pdev
->dev
, macbuf
, sizeof(macbuf
));
1631 ether_addr_copy(ndev
->dev_addr
, mac
);
1633 if (priv
->eeprom_base
&&
1634 (!mac
|| !is_valid_ether_addr(ndev
->dev_addr
))) {
1635 void __iomem
*macp
= priv
->eeprom_base
+
1636 NETSEC_EEPROM_MAC_ADDRESS
;
1638 ndev
->dev_addr
[0] = readb(macp
+ 3);
1639 ndev
->dev_addr
[1] = readb(macp
+ 2);
1640 ndev
->dev_addr
[2] = readb(macp
+ 1);
1641 ndev
->dev_addr
[3] = readb(macp
+ 0);
1642 ndev
->dev_addr
[4] = readb(macp
+ 7);
1643 ndev
->dev_addr
[5] = readb(macp
+ 6);
1646 if (!is_valid_ether_addr(ndev
->dev_addr
)) {
1647 dev_warn(&pdev
->dev
, "No MAC address found, using random\n");
1648 eth_hw_addr_random(ndev
);
1651 if (dev_of_node(&pdev
->dev
))
1652 ret
= netsec_of_probe(pdev
, priv
, &phy_addr
);
1654 ret
= netsec_acpi_probe(pdev
, priv
, &phy_addr
);
1658 priv
->phy_addr
= phy_addr
;
1661 dev_err(&pdev
->dev
, "missing PHY reference clock frequency\n");
1666 /* default for throughput */
1667 priv
->et_coalesce
.rx_coalesce_usecs
= 500;
1668 priv
->et_coalesce
.rx_max_coalesced_frames
= 8;
1669 priv
->et_coalesce
.tx_coalesce_usecs
= 500;
1670 priv
->et_coalesce
.tx_max_coalesced_frames
= 8;
1672 ret
= device_property_read_u32(&pdev
->dev
, "max-frame-size",
1675 ndev
->max_mtu
= ETH_DATA_LEN
;
1677 /* runtime_pm coverage just for probe, open/close also cover it */
1678 pm_runtime_enable(&pdev
->dev
);
1679 pm_runtime_get_sync(&pdev
->dev
);
1681 hw_ver
= netsec_read(priv
, NETSEC_REG_F_TAIKI_VER
);
1682 /* this driver only supports F_TAIKI style NETSEC */
1683 if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver
) !=
1684 NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI
)) {
1689 dev_info(&pdev
->dev
, "hardware revision %d.%d\n",
1690 hw_ver
>> 16, hw_ver
& 0xffff);
1692 netif_napi_add(ndev
, &priv
->napi
, netsec_napi_poll
, NAPI_POLL_WEIGHT
);
1694 ndev
->netdev_ops
= &netsec_netdev_ops
;
1695 ndev
->ethtool_ops
= &netsec_ethtool_ops
;
1697 ndev
->features
|= NETIF_F_HIGHDMA
| NETIF_F_RXCSUM
| NETIF_F_GSO
|
1698 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
1699 ndev
->hw_features
= ndev
->features
;
1701 priv
->rx_cksum_offload_flag
= true;
1703 ret
= netsec_register_mdio(priv
, phy_addr
);
1707 if (dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(40)))
1708 dev_warn(&pdev
->dev
, "Failed to set DMA mask\n");
1710 ret
= register_netdev(ndev
);
1712 netif_err(priv
, probe
, ndev
, "register_netdev() failed\n");
1716 pm_runtime_put_sync(&pdev
->dev
);
1720 netsec_unregister_mdio(priv
);
1722 netif_napi_del(&priv
->napi
);
1724 pm_runtime_put_sync(&pdev
->dev
);
1725 pm_runtime_disable(&pdev
->dev
);
1728 dev_err(&pdev
->dev
, "init failed\n");
1733 static int netsec_remove(struct platform_device
*pdev
)
1735 struct netsec_priv
*priv
= platform_get_drvdata(pdev
);
1737 unregister_netdev(priv
->ndev
);
1739 netsec_unregister_mdio(priv
);
1741 netif_napi_del(&priv
->napi
);
1743 pm_runtime_disable(&pdev
->dev
);
1744 free_netdev(priv
->ndev
);
1750 static int netsec_runtime_suspend(struct device
*dev
)
1752 struct netsec_priv
*priv
= dev_get_drvdata(dev
);
1754 netsec_write(priv
, NETSEC_REG_CLK_EN
, 0);
1756 clk_disable_unprepare(priv
->clk
);
1761 static int netsec_runtime_resume(struct device
*dev
)
1763 struct netsec_priv
*priv
= dev_get_drvdata(dev
);
1765 clk_prepare_enable(priv
->clk
);
1767 netsec_write(priv
, NETSEC_REG_CLK_EN
, NETSEC_CLK_EN_REG_DOM_D
|
1768 NETSEC_CLK_EN_REG_DOM_C
|
1769 NETSEC_CLK_EN_REG_DOM_G
);
1774 static const struct dev_pm_ops netsec_pm_ops
= {
1775 SET_RUNTIME_PM_OPS(netsec_runtime_suspend
, netsec_runtime_resume
, NULL
)
1778 static const struct of_device_id netsec_dt_ids
[] = {
1779 { .compatible
= "socionext,synquacer-netsec" },
1782 MODULE_DEVICE_TABLE(of
, netsec_dt_ids
);
1785 static const struct acpi_device_id netsec_acpi_ids
[] = {
1789 MODULE_DEVICE_TABLE(acpi
, netsec_acpi_ids
);
1792 static struct platform_driver netsec_driver
= {
1793 .probe
= netsec_probe
,
1794 .remove
= netsec_remove
,
1797 .pm
= &netsec_pm_ops
,
1798 .of_match_table
= netsec_dt_ids
,
1799 .acpi_match_table
= ACPI_PTR(netsec_acpi_ids
),
1802 module_platform_driver(netsec_driver
);
1804 MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
1805 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
1806 MODULE_DESCRIPTION("NETSEC Ethernet driver");
1807 MODULE_LICENSE("GPL");