1 // SPDX-License-Identifier: GPL-2.0+
3 #include <linux/types.h>
5 #include <linux/platform_device.h>
6 #include <linux/pm_runtime.h>
7 #include <linux/acpi.h>
8 #include <linux/of_mdio.h>
9 #include <linux/etherdevice.h>
10 #include <linux/interrupt.h>
12 #include <linux/netlink.h>
13 #include <linux/bpf.h>
14 #include <linux/bpf_trace.h>
17 #include <net/page_pool.h>
18 #include <net/ip6_checksum.h>
20 #define NETSEC_REG_SOFT_RST 0x104
21 #define NETSEC_REG_COM_INIT 0x120
23 #define NETSEC_REG_TOP_STATUS 0x200
24 #define NETSEC_IRQ_RX BIT(1)
25 #define NETSEC_IRQ_TX BIT(0)
27 #define NETSEC_REG_TOP_INTEN 0x204
28 #define NETSEC_REG_INTEN_SET 0x234
29 #define NETSEC_REG_INTEN_CLR 0x238
31 #define NETSEC_REG_NRM_TX_STATUS 0x400
32 #define NETSEC_REG_NRM_TX_INTEN 0x404
33 #define NETSEC_REG_NRM_TX_INTEN_SET 0x428
34 #define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c
35 #define NRM_TX_ST_NTOWNR BIT(17)
36 #define NRM_TX_ST_TR_ERR BIT(16)
37 #define NRM_TX_ST_TXDONE BIT(15)
38 #define NRM_TX_ST_TMREXP BIT(14)
40 #define NETSEC_REG_NRM_RX_STATUS 0x440
41 #define NETSEC_REG_NRM_RX_INTEN 0x444
42 #define NETSEC_REG_NRM_RX_INTEN_SET 0x468
43 #define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c
44 #define NRM_RX_ST_RC_ERR BIT(16)
45 #define NRM_RX_ST_PKTCNT BIT(15)
46 #define NRM_RX_ST_TMREXP BIT(14)
48 #define NETSEC_REG_PKT_CMD_BUF 0xd0
50 #define NETSEC_REG_CLK_EN 0x100
52 #define NETSEC_REG_PKT_CTRL 0x140
54 #define NETSEC_REG_DMA_TMR_CTRL 0x20c
55 #define NETSEC_REG_F_TAIKI_MC_VER 0x22c
56 #define NETSEC_REG_F_TAIKI_VER 0x230
57 #define NETSEC_REG_DMA_HM_CTRL 0x214
58 #define NETSEC_REG_DMA_MH_CTRL 0x220
59 #define NETSEC_REG_ADDR_DIS_CORE 0x218
60 #define NETSEC_REG_DMAC_HM_CMD_BUF 0x210
61 #define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c
63 #define NETSEC_REG_NRM_TX_PKTCNT 0x410
65 #define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414
66 #define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418
68 #define NETSEC_REG_NRM_TX_TMR 0x41c
70 #define NETSEC_REG_NRM_RX_PKTCNT 0x454
71 #define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458
72 #define NETSEC_REG_NRM_TX_TXINT_TMR 0x420
73 #define NETSEC_REG_NRM_RX_RXINT_TMR 0x460
75 #define NETSEC_REG_NRM_RX_TMR 0x45c
77 #define NETSEC_REG_NRM_TX_DESC_START_UP 0x434
78 #define NETSEC_REG_NRM_TX_DESC_START_LW 0x408
79 #define NETSEC_REG_NRM_RX_DESC_START_UP 0x474
80 #define NETSEC_REG_NRM_RX_DESC_START_LW 0x448
82 #define NETSEC_REG_NRM_TX_CONFIG 0x430
83 #define NETSEC_REG_NRM_RX_CONFIG 0x470
85 #define MAC_REG_STATUS 0x1024
86 #define MAC_REG_DATA 0x11c0
87 #define MAC_REG_CMD 0x11c4
88 #define MAC_REG_FLOW_TH 0x11cc
89 #define MAC_REG_INTF_SEL 0x11d4
90 #define MAC_REG_DESC_INIT 0x11fc
91 #define MAC_REG_DESC_SOFT_RST 0x1204
92 #define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500
94 #define GMAC_REG_MCR 0x0000
95 #define GMAC_REG_MFFR 0x0004
96 #define GMAC_REG_GAR 0x0010
97 #define GMAC_REG_GDR 0x0014
98 #define GMAC_REG_FCR 0x0018
99 #define GMAC_REG_BMR 0x1000
100 #define GMAC_REG_RDLAR 0x100c
101 #define GMAC_REG_TDLAR 0x1010
102 #define GMAC_REG_OMR 0x1018
104 #define MHZ(n) ((n) * 1000 * 1000)
106 #define NETSEC_TX_SHIFT_OWN_FIELD 31
107 #define NETSEC_TX_SHIFT_LD_FIELD 30
108 #define NETSEC_TX_SHIFT_DRID_FIELD 24
109 #define NETSEC_TX_SHIFT_PT_FIELD 21
110 #define NETSEC_TX_SHIFT_TDRID_FIELD 16
111 #define NETSEC_TX_SHIFT_CC_FIELD 15
112 #define NETSEC_TX_SHIFT_FS_FIELD 9
113 #define NETSEC_TX_LAST 8
114 #define NETSEC_TX_SHIFT_CO 7
115 #define NETSEC_TX_SHIFT_SO 6
116 #define NETSEC_TX_SHIFT_TRS_FIELD 4
118 #define NETSEC_RX_PKT_OWN_FIELD 31
119 #define NETSEC_RX_PKT_LD_FIELD 30
120 #define NETSEC_RX_PKT_SDRID_FIELD 24
121 #define NETSEC_RX_PKT_FR_FIELD 23
122 #define NETSEC_RX_PKT_ER_FIELD 21
123 #define NETSEC_RX_PKT_ERR_FIELD 16
124 #define NETSEC_RX_PKT_TDRID_FIELD 12
125 #define NETSEC_RX_PKT_FS_FIELD 9
126 #define NETSEC_RX_PKT_LS_FIELD 8
127 #define NETSEC_RX_PKT_CO_FIELD 6
129 #define NETSEC_RX_PKT_ERR_MASK 3
131 #define NETSEC_MAX_TX_PKT_LEN 1518
132 #define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018
134 #define NETSEC_RING_GMAC 15
135 #define NETSEC_RING_MAX 2
137 #define NETSEC_TCP_SEG_LEN_MAX 1460
138 #define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960
140 #define NETSEC_RX_CKSUM_NOTAVAIL 0
141 #define NETSEC_RX_CKSUM_OK 1
142 #define NETSEC_RX_CKSUM_NG 2
144 #define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20)
145 #define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4)
147 #define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20)
148 #define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19)
150 #define NETSEC_INT_PKTCNT_MAX 2047
152 #define NETSEC_FLOW_START_TH_MAX 95
153 #define NETSEC_FLOW_STOP_TH_MAX 95
154 #define NETSEC_FLOW_PAUSE_TIME_MIN 5
156 #define NETSEC_CLK_EN_REG_DOM_ALL 0x3f
158 #define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28)
159 #define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27)
160 #define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3)
161 #define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2)
162 #define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1)
163 #define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0)
165 #define NETSEC_CLK_EN_REG_DOM_G BIT(5)
166 #define NETSEC_CLK_EN_REG_DOM_C BIT(1)
167 #define NETSEC_CLK_EN_REG_DOM_D BIT(0)
169 #define NETSEC_COM_INIT_REG_DB BIT(2)
170 #define NETSEC_COM_INIT_REG_CLS BIT(1)
171 #define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \
172 NETSEC_COM_INIT_REG_DB)
174 #define NETSEC_SOFT_RST_REG_RESET 0
175 #define NETSEC_SOFT_RST_REG_RUN BIT(31)
177 #define NETSEC_DMA_CTRL_REG_STOP 1
178 #define MH_CTRL__MODE_TRANS BIT(20)
180 #define NETSEC_GMAC_CMD_ST_READ 0
181 #define NETSEC_GMAC_CMD_ST_WRITE BIT(28)
182 #define NETSEC_GMAC_CMD_ST_BUSY BIT(31)
184 #define NETSEC_GMAC_BMR_REG_COMMON 0x00412080
185 #define NETSEC_GMAC_BMR_REG_RESET 0x00020181
186 #define NETSEC_GMAC_BMR_REG_SWR 0x00000001
188 #define NETSEC_GMAC_OMR_REG_ST BIT(13)
189 #define NETSEC_GMAC_OMR_REG_SR BIT(1)
191 #define NETSEC_GMAC_MCR_REG_IBN BIT(30)
192 #define NETSEC_GMAC_MCR_REG_CST BIT(25)
193 #define NETSEC_GMAC_MCR_REG_JE BIT(20)
194 #define NETSEC_MCR_PS BIT(15)
195 #define NETSEC_GMAC_MCR_REG_FES BIT(14)
196 #define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c
197 #define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c
199 #define NETSEC_FCR_RFE BIT(2)
200 #define NETSEC_FCR_TFE BIT(1)
202 #define NETSEC_GMAC_GAR_REG_GW BIT(1)
203 #define NETSEC_GMAC_GAR_REG_GB BIT(0)
205 #define NETSEC_GMAC_GAR_REG_SHIFT_PA 11
206 #define NETSEC_GMAC_GAR_REG_SHIFT_GR 6
207 #define GMAC_REG_SHIFT_CR_GAR 2
209 #define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2
210 #define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3
211 #define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0
212 #define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1
213 #define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4
214 #define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5
216 #define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000
217 #define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000
219 #define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000
221 #define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31)
222 #define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30)
223 #define NETSEC_REG_DESC_TMR_MODE 4
224 #define NETSEC_REG_DESC_ENDIAN 0
226 #define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1
227 #define NETSEC_MAC_DESC_INIT_REG_INIT 1
229 #define NETSEC_EEPROM_MAC_ADDRESS 0x00
230 #define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08
231 #define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C
232 #define NETSEC_EEPROM_HM_ME_SIZE 0x10
233 #define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14
234 #define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18
235 #define NETSEC_EEPROM_MH_ME_SIZE 0x1C
236 #define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
237 #define NETSEC_EEPROM_PKT_ME_SIZE 0x24
241 #define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
242 #define NETSEC_RXBUF_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
244 #define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
245 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
246 #define NETSEC_RX_BUF_SIZE (PAGE_SIZE - NETSEC_RX_BUF_NON_DATA)
248 #define DESC_SZ sizeof(struct netsec_de)
250 #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
252 #define NETSEC_XDP_PASS 0
253 #define NETSEC_XDP_CONSUMED BIT(0)
254 #define NETSEC_XDP_TX BIT(1)
255 #define NETSEC_XDP_REDIR BIT(2)
271 struct xdp_frame
*xdpf
;
279 struct netsec_desc_ring
{
281 struct netsec_desc
*desc
;
284 u16 xdp_xmit
; /* netsec_xdp_xmit packets */
285 struct page_pool
*page_pool
;
286 struct xdp_rxq_info xdp_rxq
;
287 spinlock_t lock
; /* XDP tx queue locking */
291 struct netsec_desc_ring desc_ring
[NETSEC_RING_MAX
];
292 struct ethtool_coalesce et_coalesce
;
293 struct bpf_prog
*xdp_prog
;
294 spinlock_t reglock
; /* protect reg access */
295 struct napi_struct napi
;
296 phy_interface_t phy_interface
;
297 struct net_device
*ndev
;
298 struct device_node
*phy_np
;
299 struct phy_device
*phydev
;
300 struct mii_bus
*mii_bus
;
301 void __iomem
*ioaddr
;
302 void __iomem
*eeprom_base
;
308 bool rx_cksum_offload_flag
;
311 struct netsec_de
{ /* Netsec Descriptor layout */
313 u32 data_buf_addr_up
;
314 u32 data_buf_addr_lw
;
318 struct netsec_tx_pkt_ctrl
{
320 bool tcp_seg_offload_flag
;
321 bool cksum_offload_flag
;
324 struct netsec_rx_pkt_info
{
330 static void netsec_write(struct netsec_priv
*priv
, u32 reg_addr
, u32 val
)
332 writel(val
, priv
->ioaddr
+ reg_addr
);
335 static u32
netsec_read(struct netsec_priv
*priv
, u32 reg_addr
)
337 return readl(priv
->ioaddr
+ reg_addr
);
340 /************* MDIO BUS OPS FOLLOW *************/
342 #define TIMEOUT_SPINS_MAC 1000
343 #define TIMEOUT_SECONDARY_MS_MAC 100
345 static u32
netsec_clk_type(u32 freq
)
348 return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ
;
350 return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ
;
352 return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ
;
354 return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ
;
356 return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ
;
358 return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ
;
361 static int netsec_wait_while_busy(struct netsec_priv
*priv
, u32 addr
, u32 mask
)
363 u32 timeout
= TIMEOUT_SPINS_MAC
;
365 while (--timeout
&& netsec_read(priv
, addr
) & mask
)
370 timeout
= TIMEOUT_SECONDARY_MS_MAC
;
371 while (--timeout
&& netsec_read(priv
, addr
) & mask
)
372 usleep_range(1000, 2000);
377 netdev_WARN(priv
->ndev
, "%s: timeout\n", __func__
);
382 static int netsec_mac_write(struct netsec_priv
*priv
, u32 addr
, u32 value
)
384 netsec_write(priv
, MAC_REG_DATA
, value
);
385 netsec_write(priv
, MAC_REG_CMD
, addr
| NETSEC_GMAC_CMD_ST_WRITE
);
386 return netsec_wait_while_busy(priv
,
387 MAC_REG_CMD
, NETSEC_GMAC_CMD_ST_BUSY
);
390 static int netsec_mac_read(struct netsec_priv
*priv
, u32 addr
, u32
*read
)
394 netsec_write(priv
, MAC_REG_CMD
, addr
| NETSEC_GMAC_CMD_ST_READ
);
395 ret
= netsec_wait_while_busy(priv
,
396 MAC_REG_CMD
, NETSEC_GMAC_CMD_ST_BUSY
);
400 *read
= netsec_read(priv
, MAC_REG_DATA
);
405 static int netsec_mac_wait_while_busy(struct netsec_priv
*priv
,
408 u32 timeout
= TIMEOUT_SPINS_MAC
;
412 ret
= netsec_mac_read(priv
, addr
, &data
);
416 } while (--timeout
&& (data
& mask
));
421 timeout
= TIMEOUT_SECONDARY_MS_MAC
;
423 usleep_range(1000, 2000);
425 ret
= netsec_mac_read(priv
, addr
, &data
);
429 } while (--timeout
&& (data
& mask
));
434 netdev_WARN(priv
->ndev
, "%s: timeout\n", __func__
);
439 static int netsec_mac_update_to_phy_state(struct netsec_priv
*priv
)
441 struct phy_device
*phydev
= priv
->ndev
->phydev
;
444 value
= phydev
->duplex
? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON
:
445 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON
;
447 if (phydev
->speed
!= SPEED_1000
)
448 value
|= NETSEC_MCR_PS
;
450 if (priv
->phy_interface
!= PHY_INTERFACE_MODE_GMII
&&
451 phydev
->speed
== SPEED_100
)
452 value
|= NETSEC_GMAC_MCR_REG_FES
;
454 value
|= NETSEC_GMAC_MCR_REG_CST
| NETSEC_GMAC_MCR_REG_JE
;
456 if (phy_interface_mode_is_rgmii(priv
->phy_interface
))
457 value
|= NETSEC_GMAC_MCR_REG_IBN
;
459 if (netsec_mac_write(priv
, GMAC_REG_MCR
, value
))
465 static int netsec_phy_read(struct mii_bus
*bus
, int phy_addr
, int reg_addr
);
467 static int netsec_phy_write(struct mii_bus
*bus
,
468 int phy_addr
, int reg
, u16 val
)
471 struct netsec_priv
*priv
= bus
->priv
;
473 if (netsec_mac_write(priv
, GMAC_REG_GDR
, val
))
475 if (netsec_mac_write(priv
, GMAC_REG_GAR
,
476 phy_addr
<< NETSEC_GMAC_GAR_REG_SHIFT_PA
|
477 reg
<< NETSEC_GMAC_GAR_REG_SHIFT_GR
|
478 NETSEC_GMAC_GAR_REG_GW
| NETSEC_GMAC_GAR_REG_GB
|
479 (netsec_clk_type(priv
->freq
) <<
480 GMAC_REG_SHIFT_CR_GAR
)))
483 status
= netsec_mac_wait_while_busy(priv
, GMAC_REG_GAR
,
484 NETSEC_GMAC_GAR_REG_GB
);
486 /* Developerbox implements RTL8211E PHY and there is
487 * a compatibility problem with F_GMAC4.
488 * RTL8211E expects MDC clock must be kept toggling for several
489 * clock cycle with MDIO high before entering the IDLE state.
490 * To meet this requirement, netsec driver needs to issue dummy
491 * read(e.g. read PHYID1(offset 0x2) register) right after write.
493 netsec_phy_read(bus
, phy_addr
, MII_PHYSID1
);
498 static int netsec_phy_read(struct mii_bus
*bus
, int phy_addr
, int reg_addr
)
500 struct netsec_priv
*priv
= bus
->priv
;
504 if (netsec_mac_write(priv
, GMAC_REG_GAR
, NETSEC_GMAC_GAR_REG_GB
|
505 phy_addr
<< NETSEC_GMAC_GAR_REG_SHIFT_PA
|
506 reg_addr
<< NETSEC_GMAC_GAR_REG_SHIFT_GR
|
507 (netsec_clk_type(priv
->freq
) <<
508 GMAC_REG_SHIFT_CR_GAR
)))
511 ret
= netsec_mac_wait_while_busy(priv
, GMAC_REG_GAR
,
512 NETSEC_GMAC_GAR_REG_GB
);
516 ret
= netsec_mac_read(priv
, GMAC_REG_GDR
, &data
);
523 /************* ETHTOOL_OPS FOLLOW *************/
525 static void netsec_et_get_drvinfo(struct net_device
*net_device
,
526 struct ethtool_drvinfo
*info
)
528 strlcpy(info
->driver
, "netsec", sizeof(info
->driver
));
529 strlcpy(info
->bus_info
, dev_name(net_device
->dev
.parent
),
530 sizeof(info
->bus_info
));
533 static int netsec_et_get_coalesce(struct net_device
*net_device
,
534 struct ethtool_coalesce
*et_coalesce
)
536 struct netsec_priv
*priv
= netdev_priv(net_device
);
538 *et_coalesce
= priv
->et_coalesce
;
543 static int netsec_et_set_coalesce(struct net_device
*net_device
,
544 struct ethtool_coalesce
*et_coalesce
)
546 struct netsec_priv
*priv
= netdev_priv(net_device
);
548 priv
->et_coalesce
= *et_coalesce
;
550 if (priv
->et_coalesce
.tx_coalesce_usecs
< 50)
551 priv
->et_coalesce
.tx_coalesce_usecs
= 50;
552 if (priv
->et_coalesce
.tx_max_coalesced_frames
< 1)
553 priv
->et_coalesce
.tx_max_coalesced_frames
= 1;
555 netsec_write(priv
, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT
,
556 priv
->et_coalesce
.tx_max_coalesced_frames
);
557 netsec_write(priv
, NETSEC_REG_NRM_TX_TXINT_TMR
,
558 priv
->et_coalesce
.tx_coalesce_usecs
);
559 netsec_write(priv
, NETSEC_REG_NRM_TX_INTEN_SET
, NRM_TX_ST_TXDONE
);
560 netsec_write(priv
, NETSEC_REG_NRM_TX_INTEN_SET
, NRM_TX_ST_TMREXP
);
562 if (priv
->et_coalesce
.rx_coalesce_usecs
< 50)
563 priv
->et_coalesce
.rx_coalesce_usecs
= 50;
564 if (priv
->et_coalesce
.rx_max_coalesced_frames
< 1)
565 priv
->et_coalesce
.rx_max_coalesced_frames
= 1;
567 netsec_write(priv
, NETSEC_REG_NRM_RX_RXINT_PKTCNT
,
568 priv
->et_coalesce
.rx_max_coalesced_frames
);
569 netsec_write(priv
, NETSEC_REG_NRM_RX_RXINT_TMR
,
570 priv
->et_coalesce
.rx_coalesce_usecs
);
571 netsec_write(priv
, NETSEC_REG_NRM_RX_INTEN_SET
, NRM_RX_ST_PKTCNT
);
572 netsec_write(priv
, NETSEC_REG_NRM_RX_INTEN_SET
, NRM_RX_ST_TMREXP
);
577 static u32
netsec_et_get_msglevel(struct net_device
*dev
)
579 struct netsec_priv
*priv
= netdev_priv(dev
);
581 return priv
->msg_enable
;
584 static void netsec_et_set_msglevel(struct net_device
*dev
, u32 datum
)
586 struct netsec_priv
*priv
= netdev_priv(dev
);
588 priv
->msg_enable
= datum
;
591 static const struct ethtool_ops netsec_ethtool_ops
= {
592 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
593 ETHTOOL_COALESCE_MAX_FRAMES
,
594 .get_drvinfo
= netsec_et_get_drvinfo
,
595 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
596 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
597 .get_link
= ethtool_op_get_link
,
598 .get_coalesce
= netsec_et_get_coalesce
,
599 .set_coalesce
= netsec_et_set_coalesce
,
600 .get_msglevel
= netsec_et_get_msglevel
,
601 .set_msglevel
= netsec_et_set_msglevel
,
604 /************* NETDEV_OPS FOLLOW *************/
607 static void netsec_set_rx_de(struct netsec_priv
*priv
,
608 struct netsec_desc_ring
*dring
, u16 idx
,
609 const struct netsec_desc
*desc
)
611 struct netsec_de
*de
= dring
->vaddr
+ DESC_SZ
* idx
;
612 u32 attr
= (1 << NETSEC_RX_PKT_OWN_FIELD
) |
613 (1 << NETSEC_RX_PKT_FS_FIELD
) |
614 (1 << NETSEC_RX_PKT_LS_FIELD
);
616 if (idx
== DESC_NUM
- 1)
617 attr
|= (1 << NETSEC_RX_PKT_LD_FIELD
);
619 de
->data_buf_addr_up
= upper_32_bits(desc
->dma_addr
);
620 de
->data_buf_addr_lw
= lower_32_bits(desc
->dma_addr
);
621 de
->buf_len_info
= desc
->len
;
625 dring
->desc
[idx
].dma_addr
= desc
->dma_addr
;
626 dring
->desc
[idx
].addr
= desc
->addr
;
627 dring
->desc
[idx
].len
= desc
->len
;
630 static bool netsec_clean_tx_dring(struct netsec_priv
*priv
)
632 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_TX
];
633 struct netsec_de
*entry
;
634 int tail
= dring
->tail
;
638 spin_lock(&dring
->lock
);
641 entry
= dring
->vaddr
+ DESC_SZ
* tail
;
643 while (!(entry
->attr
& (1U << NETSEC_TX_SHIFT_OWN_FIELD
)) &&
645 struct netsec_desc
*desc
;
648 desc
= &dring
->desc
[tail
];
649 eop
= (entry
->attr
>> NETSEC_TX_LAST
) & 1;
652 /* if buf_type is either TYPE_NETSEC_SKB or
653 * TYPE_NETSEC_XDP_NDO we mapped it
655 if (desc
->buf_type
!= TYPE_NETSEC_XDP_TX
)
656 dma_unmap_single(priv
->dev
, desc
->dma_addr
, desc
->len
,
662 if (desc
->buf_type
== TYPE_NETSEC_SKB
) {
663 bytes
+= desc
->skb
->len
;
664 dev_kfree_skb(desc
->skb
);
666 bytes
+= desc
->xdpf
->len
;
667 xdp_return_frame(desc
->xdpf
);
670 /* clean up so netsec_uninit_pkt_dring() won't free the skb
673 *desc
= (struct netsec_desc
){};
675 /* entry->attr is not going to be accessed by the NIC until
676 * netsec_set_tx_de() is called. No need for a dma_wmb() here
678 entry
->attr
= 1U << NETSEC_TX_SHIFT_OWN_FIELD
;
679 /* move tail ahead */
680 dring
->tail
= (tail
+ 1) % DESC_NUM
;
683 entry
= dring
->vaddr
+ DESC_SZ
* tail
;
687 spin_unlock(&dring
->lock
);
692 /* reading the register clears the irq */
693 netsec_read(priv
, NETSEC_REG_NRM_TX_DONE_PKTCNT
);
695 priv
->ndev
->stats
.tx_packets
+= cnt
;
696 priv
->ndev
->stats
.tx_bytes
+= bytes
;
698 netdev_completed_queue(priv
->ndev
, cnt
, bytes
);
703 static void netsec_process_tx(struct netsec_priv
*priv
)
705 struct net_device
*ndev
= priv
->ndev
;
708 cleaned
= netsec_clean_tx_dring(priv
);
710 if (cleaned
&& netif_queue_stopped(ndev
)) {
711 /* Make sure we update the value, anyone stopping the queue
712 * after this will read the proper consumer idx
715 netif_wake_queue(ndev
);
719 static void *netsec_alloc_rx_data(struct netsec_priv
*priv
,
720 dma_addr_t
*dma_handle
, u16
*desc_len
)
724 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_RX
];
727 page
= page_pool_dev_alloc_pages(dring
->page_pool
);
731 /* We allocate the same buffer length for XDP and non-XDP cases.
732 * page_pool API will map the whole page, skip what's needed for
733 * network payloads and/or XDP
735 *dma_handle
= page_pool_get_dma_addr(page
) + NETSEC_RXBUF_HEADROOM
;
736 /* Make sure the incoming payload fits in the page for XDP and non-XDP
737 * cases and reserve enough space for headroom + skb_shared_info
739 *desc_len
= NETSEC_RX_BUF_SIZE
;
741 return page_address(page
);
744 static void netsec_rx_fill(struct netsec_priv
*priv
, u16 from
, u16 num
)
746 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_RX
];
750 netsec_set_rx_de(priv
, dring
, idx
, &dring
->desc
[idx
]);
758 static void netsec_xdp_ring_tx_db(struct netsec_priv
*priv
, u16 pkts
)
761 netsec_write(priv
, NETSEC_REG_NRM_TX_PKTCNT
, pkts
);
764 static void netsec_finalize_xdp_rx(struct netsec_priv
*priv
, u32 xdp_res
,
767 if (xdp_res
& NETSEC_XDP_REDIR
)
770 if (xdp_res
& NETSEC_XDP_TX
)
771 netsec_xdp_ring_tx_db(priv
, pkts
);
774 static void netsec_set_tx_de(struct netsec_priv
*priv
,
775 struct netsec_desc_ring
*dring
,
776 const struct netsec_tx_pkt_ctrl
*tx_ctrl
,
777 const struct netsec_desc
*desc
, void *buf
)
779 int idx
= dring
->head
;
780 struct netsec_de
*de
;
783 de
= dring
->vaddr
+ (DESC_SZ
* idx
);
785 attr
= (1 << NETSEC_TX_SHIFT_OWN_FIELD
) |
786 (1 << NETSEC_TX_SHIFT_PT_FIELD
) |
787 (NETSEC_RING_GMAC
<< NETSEC_TX_SHIFT_TDRID_FIELD
) |
788 (1 << NETSEC_TX_SHIFT_FS_FIELD
) |
789 (1 << NETSEC_TX_LAST
) |
790 (tx_ctrl
->cksum_offload_flag
<< NETSEC_TX_SHIFT_CO
) |
791 (tx_ctrl
->tcp_seg_offload_flag
<< NETSEC_TX_SHIFT_SO
) |
792 (1 << NETSEC_TX_SHIFT_TRS_FIELD
);
793 if (idx
== DESC_NUM
- 1)
794 attr
|= (1 << NETSEC_TX_SHIFT_LD_FIELD
);
796 de
->data_buf_addr_up
= upper_32_bits(desc
->dma_addr
);
797 de
->data_buf_addr_lw
= lower_32_bits(desc
->dma_addr
);
798 de
->buf_len_info
= (tx_ctrl
->tcp_seg_len
<< 16) | desc
->len
;
801 dring
->desc
[idx
] = *desc
;
802 if (desc
->buf_type
== TYPE_NETSEC_SKB
)
803 dring
->desc
[idx
].skb
= buf
;
804 else if (desc
->buf_type
== TYPE_NETSEC_XDP_TX
||
805 desc
->buf_type
== TYPE_NETSEC_XDP_NDO
)
806 dring
->desc
[idx
].xdpf
= buf
;
808 /* move head ahead */
809 dring
->head
= (dring
->head
+ 1) % DESC_NUM
;
812 /* The current driver only supports 1 Txq, this should run under spin_lock() */
813 static u32
netsec_xdp_queue_one(struct netsec_priv
*priv
,
814 struct xdp_frame
*xdpf
, bool is_ndo
)
817 struct netsec_desc_ring
*tx_ring
= &priv
->desc_ring
[NETSEC_RING_TX
];
818 struct page
*page
= virt_to_page(xdpf
->data
);
819 struct netsec_tx_pkt_ctrl tx_ctrl
= {};
820 struct netsec_desc tx_desc
;
821 dma_addr_t dma_handle
;
824 if (tx_ring
->head
>= tx_ring
->tail
)
825 filled
= tx_ring
->head
- tx_ring
->tail
;
827 filled
= tx_ring
->head
+ DESC_NUM
- tx_ring
->tail
;
829 if (DESC_NUM
- filled
<= 1)
830 return NETSEC_XDP_CONSUMED
;
833 /* this is for ndo_xdp_xmit, the buffer needs mapping before
836 dma_handle
= dma_map_single(priv
->dev
, xdpf
->data
, xdpf
->len
,
838 if (dma_mapping_error(priv
->dev
, dma_handle
))
839 return NETSEC_XDP_CONSUMED
;
840 tx_desc
.buf_type
= TYPE_NETSEC_XDP_NDO
;
842 /* This is the device Rx buffer from page_pool. No need to remap
843 * just sync and send it
845 struct netsec_desc_ring
*rx_ring
=
846 &priv
->desc_ring
[NETSEC_RING_RX
];
847 enum dma_data_direction dma_dir
=
848 page_pool_get_dma_dir(rx_ring
->page_pool
);
850 dma_handle
= page_pool_get_dma_addr(page
) + xdpf
->headroom
+
852 dma_sync_single_for_device(priv
->dev
, dma_handle
, xdpf
->len
,
854 tx_desc
.buf_type
= TYPE_NETSEC_XDP_TX
;
857 tx_desc
.dma_addr
= dma_handle
;
858 tx_desc
.addr
= xdpf
->data
;
859 tx_desc
.len
= xdpf
->len
;
861 netdev_sent_queue(priv
->ndev
, xdpf
->len
);
862 netsec_set_tx_de(priv
, tx_ring
, &tx_ctrl
, &tx_desc
, xdpf
);
864 return NETSEC_XDP_TX
;
867 static u32
netsec_xdp_xmit_back(struct netsec_priv
*priv
, struct xdp_buff
*xdp
)
869 struct netsec_desc_ring
*tx_ring
= &priv
->desc_ring
[NETSEC_RING_TX
];
870 struct xdp_frame
*xdpf
= convert_to_xdp_frame(xdp
);
874 return NETSEC_XDP_CONSUMED
;
876 spin_lock(&tx_ring
->lock
);
877 ret
= netsec_xdp_queue_one(priv
, xdpf
, false);
878 spin_unlock(&tx_ring
->lock
);
883 static u32
netsec_run_xdp(struct netsec_priv
*priv
, struct bpf_prog
*prog
,
884 struct xdp_buff
*xdp
)
886 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_RX
];
887 unsigned int len
= xdp
->data_end
- xdp
->data
;
888 u32 ret
= NETSEC_XDP_PASS
;
892 act
= bpf_prog_run_xdp(prog
, xdp
);
896 ret
= NETSEC_XDP_PASS
;
899 ret
= netsec_xdp_xmit_back(priv
, xdp
);
900 if (ret
!= NETSEC_XDP_TX
)
901 page_pool_put_page(dring
->page_pool
,
902 virt_to_head_page(xdp
->data
), len
,
906 err
= xdp_do_redirect(priv
->ndev
, xdp
, prog
);
908 ret
= NETSEC_XDP_REDIR
;
910 ret
= NETSEC_XDP_CONSUMED
;
911 page_pool_put_page(dring
->page_pool
,
912 virt_to_head_page(xdp
->data
), len
,
917 bpf_warn_invalid_xdp_action(act
);
920 trace_xdp_exception(priv
->ndev
, prog
, act
);
921 /* fall through -- handle aborts by dropping packet */
923 ret
= NETSEC_XDP_CONSUMED
;
924 page_pool_put_page(dring
->page_pool
,
925 virt_to_head_page(xdp
->data
), len
, true);
932 static int netsec_process_rx(struct netsec_priv
*priv
, int budget
)
934 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_RX
];
935 struct net_device
*ndev
= priv
->ndev
;
936 struct netsec_rx_pkt_info rx_info
;
937 enum dma_data_direction dma_dir
;
938 struct bpf_prog
*xdp_prog
;
944 xdp_prog
= READ_ONCE(priv
->xdp_prog
);
945 dma_dir
= page_pool_get_dma_dir(dring
->page_pool
);
947 while (done
< budget
) {
948 u16 idx
= dring
->tail
;
949 struct netsec_de
*de
= dring
->vaddr
+ (DESC_SZ
* idx
);
950 struct netsec_desc
*desc
= &dring
->desc
[idx
];
951 struct page
*page
= virt_to_page(desc
->addr
);
952 u32 xdp_result
= NETSEC_XDP_PASS
;
953 struct sk_buff
*skb
= NULL
;
954 u16 pkt_len
, desc_len
;
955 dma_addr_t dma_handle
;
959 if (de
->attr
& (1U << NETSEC_RX_PKT_OWN_FIELD
)) {
960 /* reading the register clears the irq */
961 netsec_read(priv
, NETSEC_REG_NRM_RX_PKTCNT
);
965 /* This barrier is needed to keep us from reading
966 * any other fields out of the netsec_de until we have
967 * verified the descriptor has been written back
972 pkt_len
= de
->buf_len_info
>> 16;
973 rx_info
.err_code
= (de
->attr
>> NETSEC_RX_PKT_ERR_FIELD
) &
974 NETSEC_RX_PKT_ERR_MASK
;
975 rx_info
.err_flag
= (de
->attr
>> NETSEC_RX_PKT_ER_FIELD
) & 1;
976 if (rx_info
.err_flag
) {
977 netif_err(priv
, drv
, priv
->ndev
,
978 "%s: rx fail err(%d)\n", __func__
,
980 ndev
->stats
.rx_dropped
++;
981 dring
->tail
= (dring
->tail
+ 1) % DESC_NUM
;
982 /* reuse buffer page frag */
983 netsec_rx_fill(priv
, idx
, 1);
986 rx_info
.rx_cksum_result
=
987 (de
->attr
>> NETSEC_RX_PKT_CO_FIELD
) & 3;
989 /* allocate a fresh buffer and map it to the hardware.
990 * This will eventually replace the old buffer in the hardware
992 buf_addr
= netsec_alloc_rx_data(priv
, &dma_handle
, &desc_len
);
994 if (unlikely(!buf_addr
))
997 dma_sync_single_for_cpu(priv
->dev
, desc
->dma_addr
, pkt_len
,
999 prefetch(desc
->addr
);
1001 xdp
.data_hard_start
= desc
->addr
;
1002 xdp
.data
= desc
->addr
+ NETSEC_RXBUF_HEADROOM
;
1003 xdp_set_data_meta_invalid(&xdp
);
1004 xdp
.data_end
= xdp
.data
+ pkt_len
;
1005 xdp
.rxq
= &dring
->xdp_rxq
;
1008 xdp_result
= netsec_run_xdp(priv
, xdp_prog
, &xdp
);
1009 if (xdp_result
!= NETSEC_XDP_PASS
) {
1010 xdp_act
|= xdp_result
;
1011 if (xdp_result
== NETSEC_XDP_TX
)
1016 skb
= build_skb(desc
->addr
, desc
->len
+ NETSEC_RX_BUF_NON_DATA
);
1018 if (unlikely(!skb
)) {
1019 /* If skb fails recycle_direct will either unmap and
1020 * free the page or refill the cache depending on the
1021 * cache state. Since we paid the allocation cost if
1022 * building an skb fails try to put the page into cache
1024 page_pool_put_page(dring
->page_pool
, page
, pkt_len
,
1026 netif_err(priv
, drv
, priv
->ndev
,
1027 "rx failed to build skb\n");
1030 page_pool_release_page(dring
->page_pool
, page
);
1032 skb_reserve(skb
, xdp
.data
- xdp
.data_hard_start
);
1033 skb_put(skb
, xdp
.data_end
- xdp
.data
);
1034 skb
->protocol
= eth_type_trans(skb
, priv
->ndev
);
1036 if (priv
->rx_cksum_offload_flag
&&
1037 rx_info
.rx_cksum_result
== NETSEC_RX_CKSUM_OK
)
1038 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1041 if ((skb
&& napi_gro_receive(&priv
->napi
, skb
) != GRO_DROP
) ||
1043 ndev
->stats
.rx_packets
++;
1044 ndev
->stats
.rx_bytes
+= xdp
.data_end
- xdp
.data
;
1047 /* Update the descriptor with fresh buffers */
1048 desc
->len
= desc_len
;
1049 desc
->dma_addr
= dma_handle
;
1050 desc
->addr
= buf_addr
;
1052 netsec_rx_fill(priv
, idx
, 1);
1053 dring
->tail
= (dring
->tail
+ 1) % DESC_NUM
;
1055 netsec_finalize_xdp_rx(priv
, xdp_act
, xdp_xmit
);
1062 static int netsec_napi_poll(struct napi_struct
*napi
, int budget
)
1064 struct netsec_priv
*priv
;
1067 priv
= container_of(napi
, struct netsec_priv
, napi
);
1069 netsec_process_tx(priv
);
1070 done
= netsec_process_rx(priv
, budget
);
1072 if (done
< budget
&& napi_complete_done(napi
, done
)) {
1073 unsigned long flags
;
1075 spin_lock_irqsave(&priv
->reglock
, flags
);
1076 netsec_write(priv
, NETSEC_REG_INTEN_SET
,
1077 NETSEC_IRQ_RX
| NETSEC_IRQ_TX
);
1078 spin_unlock_irqrestore(&priv
->reglock
, flags
);
1085 static int netsec_desc_used(struct netsec_desc_ring
*dring
)
1089 if (dring
->head
>= dring
->tail
)
1090 used
= dring
->head
- dring
->tail
;
1092 used
= dring
->head
+ DESC_NUM
- dring
->tail
;
1097 static int netsec_check_stop_tx(struct netsec_priv
*priv
, int used
)
1099 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_TX
];
1101 /* keep tail from touching the queue */
1102 if (DESC_NUM
- used
< 2) {
1103 netif_stop_queue(priv
->ndev
);
1105 /* Make sure we read the updated value in case
1106 * descriptors got freed
1110 used
= netsec_desc_used(dring
);
1111 if (DESC_NUM
- used
< 2)
1112 return NETDEV_TX_BUSY
;
1114 netif_wake_queue(priv
->ndev
);
1120 static netdev_tx_t
netsec_netdev_start_xmit(struct sk_buff
*skb
,
1121 struct net_device
*ndev
)
1123 struct netsec_priv
*priv
= netdev_priv(ndev
);
1124 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_TX
];
1125 struct netsec_tx_pkt_ctrl tx_ctrl
= {};
1126 struct netsec_desc tx_desc
;
1127 u16 tso_seg_len
= 0;
1130 spin_lock_bh(&dring
->lock
);
1131 filled
= netsec_desc_used(dring
);
1132 if (netsec_check_stop_tx(priv
, filled
)) {
1133 spin_unlock_bh(&dring
->lock
);
1134 net_warn_ratelimited("%s %s Tx queue full\n",
1135 dev_name(priv
->dev
), ndev
->name
);
1136 return NETDEV_TX_BUSY
;
1139 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1140 tx_ctrl
.cksum_offload_flag
= true;
1142 if (skb_is_gso(skb
))
1143 tso_seg_len
= skb_shinfo(skb
)->gso_size
;
1145 if (tso_seg_len
> 0) {
1146 if (skb
->protocol
== htons(ETH_P_IP
)) {
1147 ip_hdr(skb
)->tot_len
= 0;
1148 tcp_hdr(skb
)->check
=
1149 ~tcp_v4_check(0, ip_hdr(skb
)->saddr
,
1150 ip_hdr(skb
)->daddr
, 0);
1152 tcp_v6_gso_csum_prep(skb
);
1155 tx_ctrl
.tcp_seg_offload_flag
= true;
1156 tx_ctrl
.tcp_seg_len
= tso_seg_len
;
1159 tx_desc
.dma_addr
= dma_map_single(priv
->dev
, skb
->data
,
1160 skb_headlen(skb
), DMA_TO_DEVICE
);
1161 if (dma_mapping_error(priv
->dev
, tx_desc
.dma_addr
)) {
1162 spin_unlock_bh(&dring
->lock
);
1163 netif_err(priv
, drv
, priv
->ndev
,
1164 "%s: DMA mapping failed\n", __func__
);
1165 ndev
->stats
.tx_dropped
++;
1166 dev_kfree_skb_any(skb
);
1167 return NETDEV_TX_OK
;
1169 tx_desc
.addr
= skb
->data
;
1170 tx_desc
.len
= skb_headlen(skb
);
1171 tx_desc
.buf_type
= TYPE_NETSEC_SKB
;
1173 skb_tx_timestamp(skb
);
1174 netdev_sent_queue(priv
->ndev
, skb
->len
);
1176 netsec_set_tx_de(priv
, dring
, &tx_ctrl
, &tx_desc
, skb
);
1177 spin_unlock_bh(&dring
->lock
);
1178 netsec_write(priv
, NETSEC_REG_NRM_TX_PKTCNT
, 1); /* submit another tx */
1180 return NETDEV_TX_OK
;
1183 static void netsec_uninit_pkt_dring(struct netsec_priv
*priv
, int id
)
1185 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[id
];
1186 struct netsec_desc
*desc
;
1189 if (!dring
->vaddr
|| !dring
->desc
)
1191 for (idx
= 0; idx
< DESC_NUM
; idx
++) {
1192 desc
= &dring
->desc
[idx
];
1196 if (id
== NETSEC_RING_RX
) {
1197 struct page
*page
= virt_to_page(desc
->addr
);
1199 page_pool_put_full_page(dring
->page_pool
, page
, false);
1200 } else if (id
== NETSEC_RING_TX
) {
1201 dma_unmap_single(priv
->dev
, desc
->dma_addr
, desc
->len
,
1203 dev_kfree_skb(desc
->skb
);
1207 /* Rx is currently using page_pool */
1208 if (id
== NETSEC_RING_RX
) {
1209 if (xdp_rxq_info_is_reg(&dring
->xdp_rxq
))
1210 xdp_rxq_info_unreg(&dring
->xdp_rxq
);
1211 page_pool_destroy(dring
->page_pool
);
1214 memset(dring
->desc
, 0, sizeof(struct netsec_desc
) * DESC_NUM
);
1215 memset(dring
->vaddr
, 0, DESC_SZ
* DESC_NUM
);
1220 if (id
== NETSEC_RING_TX
)
1221 netdev_reset_queue(priv
->ndev
);
1224 static void netsec_free_dring(struct netsec_priv
*priv
, int id
)
1226 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[id
];
1229 dma_free_coherent(priv
->dev
, DESC_SZ
* DESC_NUM
,
1230 dring
->vaddr
, dring
->desc_dma
);
1231 dring
->vaddr
= NULL
;
1238 static int netsec_alloc_dring(struct netsec_priv
*priv
, enum ring_id id
)
1240 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[id
];
1242 dring
->vaddr
= dma_alloc_coherent(priv
->dev
, DESC_SZ
* DESC_NUM
,
1243 &dring
->desc_dma
, GFP_KERNEL
);
1247 dring
->desc
= kcalloc(DESC_NUM
, sizeof(*dring
->desc
), GFP_KERNEL
);
1253 netsec_free_dring(priv
, id
);
1258 static void netsec_setup_tx_dring(struct netsec_priv
*priv
)
1260 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_TX
];
1263 for (i
= 0; i
< DESC_NUM
; i
++) {
1264 struct netsec_de
*de
;
1266 de
= dring
->vaddr
+ (DESC_SZ
* i
);
1267 /* de->attr is not going to be accessed by the NIC
1268 * until netsec_set_tx_de() is called.
1269 * No need for a dma_wmb() here
1271 de
->attr
= 1U << NETSEC_TX_SHIFT_OWN_FIELD
;
1275 static int netsec_setup_rx_dring(struct netsec_priv
*priv
)
1277 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_RX
];
1278 struct bpf_prog
*xdp_prog
= READ_ONCE(priv
->xdp_prog
);
1279 struct page_pool_params pp_params
= {
1281 /* internal DMA mapping in page_pool */
1282 .flags
= PP_FLAG_DMA_MAP
| PP_FLAG_DMA_SYNC_DEV
,
1283 .pool_size
= DESC_NUM
,
1284 .nid
= NUMA_NO_NODE
,
1286 .dma_dir
= xdp_prog
? DMA_BIDIRECTIONAL
: DMA_FROM_DEVICE
,
1287 .offset
= NETSEC_RXBUF_HEADROOM
,
1288 .max_len
= NETSEC_RX_BUF_SIZE
,
1292 dring
->page_pool
= page_pool_create(&pp_params
);
1293 if (IS_ERR(dring
->page_pool
)) {
1294 err
= PTR_ERR(dring
->page_pool
);
1295 dring
->page_pool
= NULL
;
1299 err
= xdp_rxq_info_reg(&dring
->xdp_rxq
, priv
->ndev
, 0);
1303 err
= xdp_rxq_info_reg_mem_model(&dring
->xdp_rxq
, MEM_TYPE_PAGE_POOL
,
1308 for (i
= 0; i
< DESC_NUM
; i
++) {
1309 struct netsec_desc
*desc
= &dring
->desc
[i
];
1310 dma_addr_t dma_handle
;
1314 buf
= netsec_alloc_rx_data(priv
, &dma_handle
, &len
);
1320 desc
->dma_addr
= dma_handle
;
1325 netsec_rx_fill(priv
, 0, DESC_NUM
);
1330 netsec_uninit_pkt_dring(priv
, NETSEC_RING_RX
);
1334 static int netsec_netdev_load_ucode_region(struct netsec_priv
*priv
, u32 reg
,
1335 u32 addr_h
, u32 addr_l
, u32 size
)
1337 u64 base
= (u64
)addr_h
<< 32 | addr_l
;
1338 void __iomem
*ucode
;
1341 ucode
= ioremap(base
, size
* sizeof(u32
));
1345 for (i
= 0; i
< size
; i
++)
1346 netsec_write(priv
, reg
, readl(ucode
+ i
* 4));
1352 static int netsec_netdev_load_microcode(struct netsec_priv
*priv
)
1354 u32 addr_h
, addr_l
, size
;
1357 addr_h
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_HM_ME_ADDRESS_H
);
1358 addr_l
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_HM_ME_ADDRESS_L
);
1359 size
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_HM_ME_SIZE
);
1360 err
= netsec_netdev_load_ucode_region(priv
, NETSEC_REG_DMAC_HM_CMD_BUF
,
1361 addr_h
, addr_l
, size
);
1365 addr_h
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_MH_ME_ADDRESS_H
);
1366 addr_l
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_MH_ME_ADDRESS_L
);
1367 size
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_MH_ME_SIZE
);
1368 err
= netsec_netdev_load_ucode_region(priv
, NETSEC_REG_DMAC_MH_CMD_BUF
,
1369 addr_h
, addr_l
, size
);
1374 addr_l
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_PKT_ME_ADDRESS
);
1375 size
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_PKT_ME_SIZE
);
1376 err
= netsec_netdev_load_ucode_region(priv
, NETSEC_REG_PKT_CMD_BUF
,
1377 addr_h
, addr_l
, size
);
1384 static int netsec_reset_hardware(struct netsec_priv
*priv
,
1390 /* stop DMA engines */
1391 if (!netsec_read(priv
, NETSEC_REG_ADDR_DIS_CORE
)) {
1392 netsec_write(priv
, NETSEC_REG_DMA_HM_CTRL
,
1393 NETSEC_DMA_CTRL_REG_STOP
);
1394 netsec_write(priv
, NETSEC_REG_DMA_MH_CTRL
,
1395 NETSEC_DMA_CTRL_REG_STOP
);
1397 while (netsec_read(priv
, NETSEC_REG_DMA_HM_CTRL
) &
1398 NETSEC_DMA_CTRL_REG_STOP
)
1401 while (netsec_read(priv
, NETSEC_REG_DMA_MH_CTRL
) &
1402 NETSEC_DMA_CTRL_REG_STOP
)
1406 netsec_write(priv
, NETSEC_REG_SOFT_RST
, NETSEC_SOFT_RST_REG_RESET
);
1407 netsec_write(priv
, NETSEC_REG_SOFT_RST
, NETSEC_SOFT_RST_REG_RUN
);
1408 netsec_write(priv
, NETSEC_REG_COM_INIT
, NETSEC_COM_INIT_REG_ALL
);
1410 while (netsec_read(priv
, NETSEC_REG_COM_INIT
) != 0)
1413 /* set desc_start addr */
1414 netsec_write(priv
, NETSEC_REG_NRM_RX_DESC_START_UP
,
1415 upper_32_bits(priv
->desc_ring
[NETSEC_RING_RX
].desc_dma
));
1416 netsec_write(priv
, NETSEC_REG_NRM_RX_DESC_START_LW
,
1417 lower_32_bits(priv
->desc_ring
[NETSEC_RING_RX
].desc_dma
));
1419 netsec_write(priv
, NETSEC_REG_NRM_TX_DESC_START_UP
,
1420 upper_32_bits(priv
->desc_ring
[NETSEC_RING_TX
].desc_dma
));
1421 netsec_write(priv
, NETSEC_REG_NRM_TX_DESC_START_LW
,
1422 lower_32_bits(priv
->desc_ring
[NETSEC_RING_TX
].desc_dma
));
1424 /* set normal tx dring ring config */
1425 netsec_write(priv
, NETSEC_REG_NRM_TX_CONFIG
,
1426 1 << NETSEC_REG_DESC_ENDIAN
);
1427 netsec_write(priv
, NETSEC_REG_NRM_RX_CONFIG
,
1428 1 << NETSEC_REG_DESC_ENDIAN
);
1431 err
= netsec_netdev_load_microcode(priv
);
1433 netif_err(priv
, probe
, priv
->ndev
,
1434 "%s: failed to load microcode (%d)\n",
1440 /* start DMA engines */
1441 netsec_write(priv
, NETSEC_REG_DMA_TMR_CTRL
, priv
->freq
/ 1000000 - 1);
1442 netsec_write(priv
, NETSEC_REG_ADDR_DIS_CORE
, 0);
1444 usleep_range(1000, 2000);
1446 if (!(netsec_read(priv
, NETSEC_REG_TOP_STATUS
) &
1447 NETSEC_TOP_IRQ_REG_CODE_LOAD_END
)) {
1448 netif_err(priv
, probe
, priv
->ndev
,
1449 "microengine start failed\n");
1452 netsec_write(priv
, NETSEC_REG_TOP_STATUS
,
1453 NETSEC_TOP_IRQ_REG_CODE_LOAD_END
);
1455 value
= NETSEC_PKT_CTRL_REG_MODE_NRM
;
1456 if (priv
->ndev
->mtu
> ETH_DATA_LEN
)
1457 value
|= NETSEC_PKT_CTRL_REG_EN_JUMBO
;
1459 /* change to normal mode */
1460 netsec_write(priv
, NETSEC_REG_DMA_MH_CTRL
, MH_CTRL__MODE_TRANS
);
1461 netsec_write(priv
, NETSEC_REG_PKT_CTRL
, value
);
1463 while ((netsec_read(priv
, NETSEC_REG_MODE_TRANS_COMP_STATUS
) &
1464 NETSEC_MODE_TRANS_COMP_IRQ_T2N
) == 0)
1467 /* clear any pending EMPTY/ERR irq status */
1468 netsec_write(priv
, NETSEC_REG_NRM_TX_STATUS
, ~0);
1470 /* Disable TX & RX intr */
1471 netsec_write(priv
, NETSEC_REG_INTEN_CLR
, ~0);
1476 static int netsec_start_gmac(struct netsec_priv
*priv
)
1478 struct phy_device
*phydev
= priv
->ndev
->phydev
;
1482 if (phydev
->speed
!= SPEED_1000
)
1483 value
= (NETSEC_GMAC_MCR_REG_CST
|
1484 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON
);
1486 if (netsec_mac_write(priv
, GMAC_REG_MCR
, value
))
1488 if (netsec_mac_write(priv
, GMAC_REG_BMR
,
1489 NETSEC_GMAC_BMR_REG_RESET
))
1492 /* Wait soft reset */
1493 usleep_range(1000, 5000);
1495 ret
= netsec_mac_read(priv
, GMAC_REG_BMR
, &value
);
1498 if (value
& NETSEC_GMAC_BMR_REG_SWR
)
1501 netsec_write(priv
, MAC_REG_DESC_SOFT_RST
, 1);
1502 if (netsec_wait_while_busy(priv
, MAC_REG_DESC_SOFT_RST
, 1))
1505 netsec_write(priv
, MAC_REG_DESC_INIT
, 1);
1506 if (netsec_wait_while_busy(priv
, MAC_REG_DESC_INIT
, 1))
1509 if (netsec_mac_write(priv
, GMAC_REG_BMR
,
1510 NETSEC_GMAC_BMR_REG_COMMON
))
1512 if (netsec_mac_write(priv
, GMAC_REG_RDLAR
,
1513 NETSEC_GMAC_RDLAR_REG_COMMON
))
1515 if (netsec_mac_write(priv
, GMAC_REG_TDLAR
,
1516 NETSEC_GMAC_TDLAR_REG_COMMON
))
1518 if (netsec_mac_write(priv
, GMAC_REG_MFFR
, 0x80000001))
1521 ret
= netsec_mac_update_to_phy_state(priv
);
1525 ret
= netsec_mac_read(priv
, GMAC_REG_OMR
, &value
);
1529 value
|= NETSEC_GMAC_OMR_REG_SR
;
1530 value
|= NETSEC_GMAC_OMR_REG_ST
;
1532 netsec_write(priv
, NETSEC_REG_NRM_RX_INTEN_CLR
, ~0);
1533 netsec_write(priv
, NETSEC_REG_NRM_TX_INTEN_CLR
, ~0);
1535 netsec_et_set_coalesce(priv
->ndev
, &priv
->et_coalesce
);
1537 if (netsec_mac_write(priv
, GMAC_REG_OMR
, value
))
1543 static int netsec_stop_gmac(struct netsec_priv
*priv
)
1548 ret
= netsec_mac_read(priv
, GMAC_REG_OMR
, &value
);
1551 value
&= ~NETSEC_GMAC_OMR_REG_SR
;
1552 value
&= ~NETSEC_GMAC_OMR_REG_ST
;
1554 /* disable all interrupts */
1555 netsec_write(priv
, NETSEC_REG_NRM_RX_INTEN_CLR
, ~0);
1556 netsec_write(priv
, NETSEC_REG_NRM_TX_INTEN_CLR
, ~0);
1558 return netsec_mac_write(priv
, GMAC_REG_OMR
, value
);
1561 static void netsec_phy_adjust_link(struct net_device
*ndev
)
1563 struct netsec_priv
*priv
= netdev_priv(ndev
);
1565 if (ndev
->phydev
->link
)
1566 netsec_start_gmac(priv
);
1568 netsec_stop_gmac(priv
);
1570 phy_print_status(ndev
->phydev
);
1573 static irqreturn_t
netsec_irq_handler(int irq
, void *dev_id
)
1575 struct netsec_priv
*priv
= dev_id
;
1576 u32 val
, status
= netsec_read(priv
, NETSEC_REG_TOP_STATUS
);
1577 unsigned long flags
;
1579 /* Disable interrupts */
1580 if (status
& NETSEC_IRQ_TX
) {
1581 val
= netsec_read(priv
, NETSEC_REG_NRM_TX_STATUS
);
1582 netsec_write(priv
, NETSEC_REG_NRM_TX_STATUS
, val
);
1584 if (status
& NETSEC_IRQ_RX
) {
1585 val
= netsec_read(priv
, NETSEC_REG_NRM_RX_STATUS
);
1586 netsec_write(priv
, NETSEC_REG_NRM_RX_STATUS
, val
);
1589 spin_lock_irqsave(&priv
->reglock
, flags
);
1590 netsec_write(priv
, NETSEC_REG_INTEN_CLR
, NETSEC_IRQ_RX
| NETSEC_IRQ_TX
);
1591 spin_unlock_irqrestore(&priv
->reglock
, flags
);
1593 napi_schedule(&priv
->napi
);
1598 static int netsec_netdev_open(struct net_device
*ndev
)
1600 struct netsec_priv
*priv
= netdev_priv(ndev
);
1603 pm_runtime_get_sync(priv
->dev
);
1605 netsec_setup_tx_dring(priv
);
1606 ret
= netsec_setup_rx_dring(priv
);
1608 netif_err(priv
, probe
, priv
->ndev
,
1609 "%s: fail setup ring\n", __func__
);
1613 ret
= request_irq(priv
->ndev
->irq
, netsec_irq_handler
,
1614 IRQF_SHARED
, "netsec", priv
);
1616 netif_err(priv
, drv
, priv
->ndev
, "request_irq failed\n");
1620 if (dev_of_node(priv
->dev
)) {
1621 if (!of_phy_connect(priv
->ndev
, priv
->phy_np
,
1622 netsec_phy_adjust_link
, 0,
1623 priv
->phy_interface
)) {
1624 netif_err(priv
, link
, priv
->ndev
, "missing PHY\n");
1629 ret
= phy_connect_direct(priv
->ndev
, priv
->phydev
,
1630 netsec_phy_adjust_link
,
1631 priv
->phy_interface
);
1633 netif_err(priv
, link
, priv
->ndev
,
1634 "phy_connect_direct() failed (%d)\n", ret
);
1639 phy_start(ndev
->phydev
);
1641 netsec_start_gmac(priv
);
1642 napi_enable(&priv
->napi
);
1643 netif_start_queue(ndev
);
1645 /* Enable TX+RX intr. */
1646 netsec_write(priv
, NETSEC_REG_INTEN_SET
, NETSEC_IRQ_RX
| NETSEC_IRQ_TX
);
1650 free_irq(priv
->ndev
->irq
, priv
);
1652 netsec_uninit_pkt_dring(priv
, NETSEC_RING_RX
);
1654 pm_runtime_put_sync(priv
->dev
);
1658 static int netsec_netdev_stop(struct net_device
*ndev
)
1661 struct netsec_priv
*priv
= netdev_priv(ndev
);
1663 netif_stop_queue(priv
->ndev
);
1666 napi_disable(&priv
->napi
);
1668 netsec_write(priv
, NETSEC_REG_INTEN_CLR
, ~0);
1669 netsec_stop_gmac(priv
);
1671 free_irq(priv
->ndev
->irq
, priv
);
1673 netsec_uninit_pkt_dring(priv
, NETSEC_RING_TX
);
1674 netsec_uninit_pkt_dring(priv
, NETSEC_RING_RX
);
1676 phy_stop(ndev
->phydev
);
1677 phy_disconnect(ndev
->phydev
);
1679 ret
= netsec_reset_hardware(priv
, false);
1681 pm_runtime_put_sync(priv
->dev
);
1686 static int netsec_netdev_init(struct net_device
*ndev
)
1688 struct netsec_priv
*priv
= netdev_priv(ndev
);
1692 BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM
);
1694 ret
= netsec_alloc_dring(priv
, NETSEC_RING_TX
);
1698 ret
= netsec_alloc_dring(priv
, NETSEC_RING_RX
);
1702 /* set phy power down */
1703 data
= netsec_phy_read(priv
->mii_bus
, priv
->phy_addr
, MII_BMCR
) |
1705 netsec_phy_write(priv
->mii_bus
, priv
->phy_addr
, MII_BMCR
, data
);
1707 ret
= netsec_reset_hardware(priv
, true);
1711 spin_lock_init(&priv
->desc_ring
[NETSEC_RING_TX
].lock
);
1712 spin_lock_init(&priv
->desc_ring
[NETSEC_RING_RX
].lock
);
1716 netsec_free_dring(priv
, NETSEC_RING_RX
);
1718 netsec_free_dring(priv
, NETSEC_RING_TX
);
1722 static void netsec_netdev_uninit(struct net_device
*ndev
)
1724 struct netsec_priv
*priv
= netdev_priv(ndev
);
1726 netsec_free_dring(priv
, NETSEC_RING_RX
);
1727 netsec_free_dring(priv
, NETSEC_RING_TX
);
1730 static int netsec_netdev_set_features(struct net_device
*ndev
,
1731 netdev_features_t features
)
1733 struct netsec_priv
*priv
= netdev_priv(ndev
);
1735 priv
->rx_cksum_offload_flag
= !!(features
& NETIF_F_RXCSUM
);
1740 static int netsec_xdp_xmit(struct net_device
*ndev
, int n
,
1741 struct xdp_frame
**frames
, u32 flags
)
1743 struct netsec_priv
*priv
= netdev_priv(ndev
);
1744 struct netsec_desc_ring
*tx_ring
= &priv
->desc_ring
[NETSEC_RING_TX
];
1748 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
1751 spin_lock(&tx_ring
->lock
);
1752 for (i
= 0; i
< n
; i
++) {
1753 struct xdp_frame
*xdpf
= frames
[i
];
1756 err
= netsec_xdp_queue_one(priv
, xdpf
, true);
1757 if (err
!= NETSEC_XDP_TX
) {
1758 xdp_return_frame_rx_napi(xdpf
);
1761 tx_ring
->xdp_xmit
++;
1764 spin_unlock(&tx_ring
->lock
);
1766 if (unlikely(flags
& XDP_XMIT_FLUSH
)) {
1767 netsec_xdp_ring_tx_db(priv
, tx_ring
->xdp_xmit
);
1768 tx_ring
->xdp_xmit
= 0;
1774 static int netsec_xdp_setup(struct netsec_priv
*priv
, struct bpf_prog
*prog
,
1775 struct netlink_ext_ack
*extack
)
1777 struct net_device
*dev
= priv
->ndev
;
1778 struct bpf_prog
*old_prog
;
1780 /* For now just support only the usual MTU sized frames */
1781 if (prog
&& dev
->mtu
> 1500) {
1782 NL_SET_ERR_MSG_MOD(extack
, "Jumbo frames not supported on XDP");
1786 if (netif_running(dev
))
1787 netsec_netdev_stop(dev
);
1789 /* Detach old prog, if any */
1790 old_prog
= xchg(&priv
->xdp_prog
, prog
);
1792 bpf_prog_put(old_prog
);
1794 if (netif_running(dev
))
1795 netsec_netdev_open(dev
);
1800 static int netsec_xdp(struct net_device
*ndev
, struct netdev_bpf
*xdp
)
1802 struct netsec_priv
*priv
= netdev_priv(ndev
);
1804 switch (xdp
->command
) {
1805 case XDP_SETUP_PROG
:
1806 return netsec_xdp_setup(priv
, xdp
->prog
, xdp
->extack
);
1807 case XDP_QUERY_PROG
:
1808 xdp
->prog_id
= priv
->xdp_prog
? priv
->xdp_prog
->aux
->id
: 0;
1815 static const struct net_device_ops netsec_netdev_ops
= {
1816 .ndo_init
= netsec_netdev_init
,
1817 .ndo_uninit
= netsec_netdev_uninit
,
1818 .ndo_open
= netsec_netdev_open
,
1819 .ndo_stop
= netsec_netdev_stop
,
1820 .ndo_start_xmit
= netsec_netdev_start_xmit
,
1821 .ndo_set_features
= netsec_netdev_set_features
,
1822 .ndo_set_mac_address
= eth_mac_addr
,
1823 .ndo_validate_addr
= eth_validate_addr
,
1824 .ndo_do_ioctl
= phy_do_ioctl
,
1825 .ndo_xdp_xmit
= netsec_xdp_xmit
,
1826 .ndo_bpf
= netsec_xdp
,
1829 static int netsec_of_probe(struct platform_device
*pdev
,
1830 struct netsec_priv
*priv
, u32
*phy_addr
)
1832 priv
->phy_np
= of_parse_phandle(pdev
->dev
.of_node
, "phy-handle", 0);
1833 if (!priv
->phy_np
) {
1834 dev_err(&pdev
->dev
, "missing required property 'phy-handle'\n");
1838 *phy_addr
= of_mdio_parse_addr(&pdev
->dev
, priv
->phy_np
);
1840 priv
->clk
= devm_clk_get(&pdev
->dev
, NULL
); /* get by 'phy_ref_clk' */
1841 if (IS_ERR(priv
->clk
)) {
1842 dev_err(&pdev
->dev
, "phy_ref_clk not found\n");
1843 return PTR_ERR(priv
->clk
);
1845 priv
->freq
= clk_get_rate(priv
->clk
);
1850 static int netsec_acpi_probe(struct platform_device
*pdev
,
1851 struct netsec_priv
*priv
, u32
*phy_addr
)
1855 if (!IS_ENABLED(CONFIG_ACPI
))
1858 ret
= device_property_read_u32(&pdev
->dev
, "phy-channel", phy_addr
);
1861 "missing required property 'phy-channel'\n");
1865 ret
= device_property_read_u32(&pdev
->dev
,
1866 "socionext,phy-clock-frequency",
1870 "missing required property 'socionext,phy-clock-frequency'\n");
1874 static void netsec_unregister_mdio(struct netsec_priv
*priv
)
1876 struct phy_device
*phydev
= priv
->phydev
;
1878 if (!dev_of_node(priv
->dev
) && phydev
) {
1879 phy_device_remove(phydev
);
1880 phy_device_free(phydev
);
1883 mdiobus_unregister(priv
->mii_bus
);
1886 static int netsec_register_mdio(struct netsec_priv
*priv
, u32 phy_addr
)
1888 struct mii_bus
*bus
;
1891 bus
= devm_mdiobus_alloc(priv
->dev
);
1895 snprintf(bus
->id
, MII_BUS_ID_SIZE
, "%s", dev_name(priv
->dev
));
1897 bus
->name
= "SNI NETSEC MDIO";
1898 bus
->read
= netsec_phy_read
;
1899 bus
->write
= netsec_phy_write
;
1900 bus
->parent
= priv
->dev
;
1901 priv
->mii_bus
= bus
;
1903 if (dev_of_node(priv
->dev
)) {
1904 struct device_node
*mdio_node
, *parent
= dev_of_node(priv
->dev
);
1906 mdio_node
= of_get_child_by_name(parent
, "mdio");
1910 /* older f/w doesn't populate the mdio subnode,
1911 * allow relaxed upgrade of f/w in due time.
1913 dev_info(priv
->dev
, "Upgrade f/w for mdio subnode!\n");
1916 ret
= of_mdiobus_register(bus
, parent
);
1917 of_node_put(mdio_node
);
1920 dev_err(priv
->dev
, "mdiobus register err(%d)\n", ret
);
1924 /* Mask out all PHYs from auto probing. */
1926 ret
= mdiobus_register(bus
);
1928 dev_err(priv
->dev
, "mdiobus register err(%d)\n", ret
);
1932 priv
->phydev
= get_phy_device(bus
, phy_addr
, false);
1933 if (IS_ERR(priv
->phydev
)) {
1934 ret
= PTR_ERR(priv
->phydev
);
1935 dev_err(priv
->dev
, "get_phy_device err(%d)\n", ret
);
1936 priv
->phydev
= NULL
;
1940 ret
= phy_device_register(priv
->phydev
);
1942 mdiobus_unregister(bus
);
1944 "phy_device_register err(%d)\n", ret
);
1951 static int netsec_probe(struct platform_device
*pdev
)
1953 struct resource
*mmio_res
, *eeprom_res
, *irq_res
;
1954 u8
*mac
, macbuf
[ETH_ALEN
];
1955 struct netsec_priv
*priv
;
1956 u32 hw_ver
, phy_addr
= 0;
1957 struct net_device
*ndev
;
1960 mmio_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1962 dev_err(&pdev
->dev
, "No MMIO resource found.\n");
1966 eeprom_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1968 dev_info(&pdev
->dev
, "No EEPROM resource found.\n");
1972 irq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
1974 dev_err(&pdev
->dev
, "No IRQ resource found.\n");
1978 ndev
= alloc_etherdev(sizeof(*priv
));
1982 priv
= netdev_priv(ndev
);
1984 spin_lock_init(&priv
->reglock
);
1985 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
1986 platform_set_drvdata(pdev
, priv
);
1987 ndev
->irq
= irq_res
->start
;
1988 priv
->dev
= &pdev
->dev
;
1991 priv
->msg_enable
= NETIF_MSG_TX_ERR
| NETIF_MSG_HW
| NETIF_MSG_DRV
|
1992 NETIF_MSG_LINK
| NETIF_MSG_PROBE
;
1994 priv
->phy_interface
= device_get_phy_mode(&pdev
->dev
);
1995 if ((int)priv
->phy_interface
< 0) {
1996 dev_err(&pdev
->dev
, "missing required property 'phy-mode'\n");
2001 priv
->ioaddr
= devm_ioremap(&pdev
->dev
, mmio_res
->start
,
2002 resource_size(mmio_res
));
2003 if (!priv
->ioaddr
) {
2004 dev_err(&pdev
->dev
, "devm_ioremap() failed\n");
2009 priv
->eeprom_base
= devm_ioremap(&pdev
->dev
, eeprom_res
->start
,
2010 resource_size(eeprom_res
));
2011 if (!priv
->eeprom_base
) {
2012 dev_err(&pdev
->dev
, "devm_ioremap() failed for EEPROM\n");
2017 mac
= device_get_mac_address(&pdev
->dev
, macbuf
, sizeof(macbuf
));
2019 ether_addr_copy(ndev
->dev_addr
, mac
);
2021 if (priv
->eeprom_base
&&
2022 (!mac
|| !is_valid_ether_addr(ndev
->dev_addr
))) {
2023 void __iomem
*macp
= priv
->eeprom_base
+
2024 NETSEC_EEPROM_MAC_ADDRESS
;
2026 ndev
->dev_addr
[0] = readb(macp
+ 3);
2027 ndev
->dev_addr
[1] = readb(macp
+ 2);
2028 ndev
->dev_addr
[2] = readb(macp
+ 1);
2029 ndev
->dev_addr
[3] = readb(macp
+ 0);
2030 ndev
->dev_addr
[4] = readb(macp
+ 7);
2031 ndev
->dev_addr
[5] = readb(macp
+ 6);
2034 if (!is_valid_ether_addr(ndev
->dev_addr
)) {
2035 dev_warn(&pdev
->dev
, "No MAC address found, using random\n");
2036 eth_hw_addr_random(ndev
);
2039 if (dev_of_node(&pdev
->dev
))
2040 ret
= netsec_of_probe(pdev
, priv
, &phy_addr
);
2042 ret
= netsec_acpi_probe(pdev
, priv
, &phy_addr
);
2046 priv
->phy_addr
= phy_addr
;
2049 dev_err(&pdev
->dev
, "missing PHY reference clock frequency\n");
2054 /* default for throughput */
2055 priv
->et_coalesce
.rx_coalesce_usecs
= 500;
2056 priv
->et_coalesce
.rx_max_coalesced_frames
= 8;
2057 priv
->et_coalesce
.tx_coalesce_usecs
= 500;
2058 priv
->et_coalesce
.tx_max_coalesced_frames
= 8;
2060 ret
= device_property_read_u32(&pdev
->dev
, "max-frame-size",
2063 ndev
->max_mtu
= ETH_DATA_LEN
;
2065 /* runtime_pm coverage just for probe, open/close also cover it */
2066 pm_runtime_enable(&pdev
->dev
);
2067 pm_runtime_get_sync(&pdev
->dev
);
2069 hw_ver
= netsec_read(priv
, NETSEC_REG_F_TAIKI_VER
);
2070 /* this driver only supports F_TAIKI style NETSEC */
2071 if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver
) !=
2072 NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI
)) {
2077 dev_info(&pdev
->dev
, "hardware revision %d.%d\n",
2078 hw_ver
>> 16, hw_ver
& 0xffff);
2080 netif_napi_add(ndev
, &priv
->napi
, netsec_napi_poll
, NAPI_POLL_WEIGHT
);
2082 ndev
->netdev_ops
= &netsec_netdev_ops
;
2083 ndev
->ethtool_ops
= &netsec_ethtool_ops
;
2085 ndev
->features
|= NETIF_F_HIGHDMA
| NETIF_F_RXCSUM
| NETIF_F_GSO
|
2086 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
2087 ndev
->hw_features
= ndev
->features
;
2089 priv
->rx_cksum_offload_flag
= true;
2091 ret
= netsec_register_mdio(priv
, phy_addr
);
2095 if (dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(40)))
2096 dev_warn(&pdev
->dev
, "Failed to set DMA mask\n");
2098 ret
= register_netdev(ndev
);
2100 netif_err(priv
, probe
, ndev
, "register_netdev() failed\n");
2104 pm_runtime_put_sync(&pdev
->dev
);
2108 netsec_unregister_mdio(priv
);
2110 netif_napi_del(&priv
->napi
);
2112 pm_runtime_put_sync(&pdev
->dev
);
2113 pm_runtime_disable(&pdev
->dev
);
2116 dev_err(&pdev
->dev
, "init failed\n");
2121 static int netsec_remove(struct platform_device
*pdev
)
2123 struct netsec_priv
*priv
= platform_get_drvdata(pdev
);
2125 unregister_netdev(priv
->ndev
);
2127 netsec_unregister_mdio(priv
);
2129 netif_napi_del(&priv
->napi
);
2131 pm_runtime_disable(&pdev
->dev
);
2132 free_netdev(priv
->ndev
);
2138 static int netsec_runtime_suspend(struct device
*dev
)
2140 struct netsec_priv
*priv
= dev_get_drvdata(dev
);
2142 netsec_write(priv
, NETSEC_REG_CLK_EN
, 0);
2144 clk_disable_unprepare(priv
->clk
);
2149 static int netsec_runtime_resume(struct device
*dev
)
2151 struct netsec_priv
*priv
= dev_get_drvdata(dev
);
2153 clk_prepare_enable(priv
->clk
);
2155 netsec_write(priv
, NETSEC_REG_CLK_EN
, NETSEC_CLK_EN_REG_DOM_D
|
2156 NETSEC_CLK_EN_REG_DOM_C
|
2157 NETSEC_CLK_EN_REG_DOM_G
);
2162 static const struct dev_pm_ops netsec_pm_ops
= {
2163 SET_RUNTIME_PM_OPS(netsec_runtime_suspend
, netsec_runtime_resume
, NULL
)
2166 static const struct of_device_id netsec_dt_ids
[] = {
2167 { .compatible
= "socionext,synquacer-netsec" },
2170 MODULE_DEVICE_TABLE(of
, netsec_dt_ids
);
2173 static const struct acpi_device_id netsec_acpi_ids
[] = {
2177 MODULE_DEVICE_TABLE(acpi
, netsec_acpi_ids
);
2180 static struct platform_driver netsec_driver
= {
2181 .probe
= netsec_probe
,
2182 .remove
= netsec_remove
,
2185 .pm
= &netsec_pm_ops
,
2186 .of_match_table
= netsec_dt_ids
,
2187 .acpi_match_table
= ACPI_PTR(netsec_acpi_ids
),
2190 module_platform_driver(netsec_driver
);
2192 MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
2193 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
2194 MODULE_DESCRIPTION("NETSEC Ethernet driver");
2195 MODULE_LICENSE("GPL");