1 // SPDX-License-Identifier: GPL-2.0+
3 #include <linux/types.h>
5 #include <linux/platform_device.h>
6 #include <linux/pm_runtime.h>
7 #include <linux/acpi.h>
8 #include <linux/of_mdio.h>
9 #include <linux/of_net.h>
10 #include <linux/etherdevice.h>
11 #include <linux/interrupt.h>
13 #include <linux/netlink.h>
14 #include <linux/bpf.h>
15 #include <linux/bpf_trace.h>
18 #include <net/page_pool.h>
19 #include <net/ip6_checksum.h>
21 #define NETSEC_REG_SOFT_RST 0x104
22 #define NETSEC_REG_COM_INIT 0x120
24 #define NETSEC_REG_TOP_STATUS 0x200
25 #define NETSEC_IRQ_RX BIT(1)
26 #define NETSEC_IRQ_TX BIT(0)
28 #define NETSEC_REG_TOP_INTEN 0x204
29 #define NETSEC_REG_INTEN_SET 0x234
30 #define NETSEC_REG_INTEN_CLR 0x238
32 #define NETSEC_REG_NRM_TX_STATUS 0x400
33 #define NETSEC_REG_NRM_TX_INTEN 0x404
34 #define NETSEC_REG_NRM_TX_INTEN_SET 0x428
35 #define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c
36 #define NRM_TX_ST_NTOWNR BIT(17)
37 #define NRM_TX_ST_TR_ERR BIT(16)
38 #define NRM_TX_ST_TXDONE BIT(15)
39 #define NRM_TX_ST_TMREXP BIT(14)
41 #define NETSEC_REG_NRM_RX_STATUS 0x440
42 #define NETSEC_REG_NRM_RX_INTEN 0x444
43 #define NETSEC_REG_NRM_RX_INTEN_SET 0x468
44 #define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c
45 #define NRM_RX_ST_RC_ERR BIT(16)
46 #define NRM_RX_ST_PKTCNT BIT(15)
47 #define NRM_RX_ST_TMREXP BIT(14)
49 #define NETSEC_REG_PKT_CMD_BUF 0xd0
51 #define NETSEC_REG_CLK_EN 0x100
53 #define NETSEC_REG_PKT_CTRL 0x140
55 #define NETSEC_REG_DMA_TMR_CTRL 0x20c
56 #define NETSEC_REG_F_TAIKI_MC_VER 0x22c
57 #define NETSEC_REG_F_TAIKI_VER 0x230
58 #define NETSEC_REG_DMA_HM_CTRL 0x214
59 #define NETSEC_REG_DMA_MH_CTRL 0x220
60 #define NETSEC_REG_ADDR_DIS_CORE 0x218
61 #define NETSEC_REG_DMAC_HM_CMD_BUF 0x210
62 #define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c
64 #define NETSEC_REG_NRM_TX_PKTCNT 0x410
66 #define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414
67 #define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418
69 #define NETSEC_REG_NRM_TX_TMR 0x41c
71 #define NETSEC_REG_NRM_RX_PKTCNT 0x454
72 #define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458
73 #define NETSEC_REG_NRM_TX_TXINT_TMR 0x420
74 #define NETSEC_REG_NRM_RX_RXINT_TMR 0x460
76 #define NETSEC_REG_NRM_RX_TMR 0x45c
78 #define NETSEC_REG_NRM_TX_DESC_START_UP 0x434
79 #define NETSEC_REG_NRM_TX_DESC_START_LW 0x408
80 #define NETSEC_REG_NRM_RX_DESC_START_UP 0x474
81 #define NETSEC_REG_NRM_RX_DESC_START_LW 0x448
83 #define NETSEC_REG_NRM_TX_CONFIG 0x430
84 #define NETSEC_REG_NRM_RX_CONFIG 0x470
86 #define MAC_REG_STATUS 0x1024
87 #define MAC_REG_DATA 0x11c0
88 #define MAC_REG_CMD 0x11c4
89 #define MAC_REG_FLOW_TH 0x11cc
90 #define MAC_REG_INTF_SEL 0x11d4
91 #define MAC_REG_DESC_INIT 0x11fc
92 #define MAC_REG_DESC_SOFT_RST 0x1204
93 #define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500
95 #define GMAC_REG_MCR 0x0000
96 #define GMAC_REG_MFFR 0x0004
97 #define GMAC_REG_GAR 0x0010
98 #define GMAC_REG_GDR 0x0014
99 #define GMAC_REG_FCR 0x0018
100 #define GMAC_REG_BMR 0x1000
101 #define GMAC_REG_RDLAR 0x100c
102 #define GMAC_REG_TDLAR 0x1010
103 #define GMAC_REG_OMR 0x1018
105 #define MHZ(n) ((n) * 1000 * 1000)
107 #define NETSEC_TX_SHIFT_OWN_FIELD 31
108 #define NETSEC_TX_SHIFT_LD_FIELD 30
109 #define NETSEC_TX_SHIFT_DRID_FIELD 24
110 #define NETSEC_TX_SHIFT_PT_FIELD 21
111 #define NETSEC_TX_SHIFT_TDRID_FIELD 16
112 #define NETSEC_TX_SHIFT_CC_FIELD 15
113 #define NETSEC_TX_SHIFT_FS_FIELD 9
114 #define NETSEC_TX_LAST 8
115 #define NETSEC_TX_SHIFT_CO 7
116 #define NETSEC_TX_SHIFT_SO 6
117 #define NETSEC_TX_SHIFT_TRS_FIELD 4
119 #define NETSEC_RX_PKT_OWN_FIELD 31
120 #define NETSEC_RX_PKT_LD_FIELD 30
121 #define NETSEC_RX_PKT_SDRID_FIELD 24
122 #define NETSEC_RX_PKT_FR_FIELD 23
123 #define NETSEC_RX_PKT_ER_FIELD 21
124 #define NETSEC_RX_PKT_ERR_FIELD 16
125 #define NETSEC_RX_PKT_TDRID_FIELD 12
126 #define NETSEC_RX_PKT_FS_FIELD 9
127 #define NETSEC_RX_PKT_LS_FIELD 8
128 #define NETSEC_RX_PKT_CO_FIELD 6
130 #define NETSEC_RX_PKT_ERR_MASK 3
132 #define NETSEC_MAX_TX_PKT_LEN 1518
133 #define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018
135 #define NETSEC_RING_GMAC 15
136 #define NETSEC_RING_MAX 2
138 #define NETSEC_TCP_SEG_LEN_MAX 1460
139 #define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960
141 #define NETSEC_RX_CKSUM_NOTAVAIL 0
142 #define NETSEC_RX_CKSUM_OK 1
143 #define NETSEC_RX_CKSUM_NG 2
145 #define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20)
146 #define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4)
148 #define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20)
149 #define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19)
151 #define NETSEC_INT_PKTCNT_MAX 2047
153 #define NETSEC_FLOW_START_TH_MAX 95
154 #define NETSEC_FLOW_STOP_TH_MAX 95
155 #define NETSEC_FLOW_PAUSE_TIME_MIN 5
157 #define NETSEC_CLK_EN_REG_DOM_ALL 0x3f
159 #define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28)
160 #define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27)
161 #define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3)
162 #define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2)
163 #define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1)
164 #define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0)
166 #define NETSEC_CLK_EN_REG_DOM_G BIT(5)
167 #define NETSEC_CLK_EN_REG_DOM_C BIT(1)
168 #define NETSEC_CLK_EN_REG_DOM_D BIT(0)
170 #define NETSEC_COM_INIT_REG_DB BIT(2)
171 #define NETSEC_COM_INIT_REG_CLS BIT(1)
172 #define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \
173 NETSEC_COM_INIT_REG_DB)
175 #define NETSEC_SOFT_RST_REG_RESET 0
176 #define NETSEC_SOFT_RST_REG_RUN BIT(31)
178 #define NETSEC_DMA_CTRL_REG_STOP 1
179 #define MH_CTRL__MODE_TRANS BIT(20)
181 #define NETSEC_GMAC_CMD_ST_READ 0
182 #define NETSEC_GMAC_CMD_ST_WRITE BIT(28)
183 #define NETSEC_GMAC_CMD_ST_BUSY BIT(31)
185 #define NETSEC_GMAC_BMR_REG_COMMON 0x00412080
186 #define NETSEC_GMAC_BMR_REG_RESET 0x00020181
187 #define NETSEC_GMAC_BMR_REG_SWR 0x00000001
189 #define NETSEC_GMAC_OMR_REG_ST BIT(13)
190 #define NETSEC_GMAC_OMR_REG_SR BIT(1)
192 #define NETSEC_GMAC_MCR_REG_IBN BIT(30)
193 #define NETSEC_GMAC_MCR_REG_CST BIT(25)
194 #define NETSEC_GMAC_MCR_REG_JE BIT(20)
195 #define NETSEC_MCR_PS BIT(15)
196 #define NETSEC_GMAC_MCR_REG_FES BIT(14)
197 #define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c
198 #define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c
200 #define NETSEC_FCR_RFE BIT(2)
201 #define NETSEC_FCR_TFE BIT(1)
203 #define NETSEC_GMAC_GAR_REG_GW BIT(1)
204 #define NETSEC_GMAC_GAR_REG_GB BIT(0)
206 #define NETSEC_GMAC_GAR_REG_SHIFT_PA 11
207 #define NETSEC_GMAC_GAR_REG_SHIFT_GR 6
208 #define GMAC_REG_SHIFT_CR_GAR 2
210 #define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2
211 #define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3
212 #define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0
213 #define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1
214 #define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4
215 #define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5
217 #define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000
218 #define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000
220 #define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000
222 #define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31)
223 #define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30)
224 #define NETSEC_REG_DESC_TMR_MODE 4
225 #define NETSEC_REG_DESC_ENDIAN 0
227 #define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1
228 #define NETSEC_MAC_DESC_INIT_REG_INIT 1
230 #define NETSEC_EEPROM_MAC_ADDRESS 0x00
231 #define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08
232 #define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C
233 #define NETSEC_EEPROM_HM_ME_SIZE 0x10
234 #define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14
235 #define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18
236 #define NETSEC_EEPROM_MH_ME_SIZE 0x1C
237 #define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
238 #define NETSEC_EEPROM_PKT_ME_SIZE 0x24
242 #define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
243 #define NETSEC_RXBUF_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
245 #define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
246 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
247 #define NETSEC_RX_BUF_SIZE (PAGE_SIZE - NETSEC_RX_BUF_NON_DATA)
249 #define DESC_SZ sizeof(struct netsec_de)
251 #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
253 #define NETSEC_XDP_PASS 0
254 #define NETSEC_XDP_CONSUMED BIT(0)
255 #define NETSEC_XDP_TX BIT(1)
256 #define NETSEC_XDP_REDIR BIT(2)
272 struct xdp_frame
*xdpf
;
280 struct netsec_desc_ring
{
282 struct netsec_desc
*desc
;
285 u16 xdp_xmit
; /* netsec_xdp_xmit packets */
286 struct page_pool
*page_pool
;
287 struct xdp_rxq_info xdp_rxq
;
288 spinlock_t lock
; /* XDP tx queue locking */
292 struct netsec_desc_ring desc_ring
[NETSEC_RING_MAX
];
293 struct ethtool_coalesce et_coalesce
;
294 struct bpf_prog
*xdp_prog
;
295 spinlock_t reglock
; /* protect reg access */
296 struct napi_struct napi
;
297 phy_interface_t phy_interface
;
298 struct net_device
*ndev
;
299 struct device_node
*phy_np
;
300 struct phy_device
*phydev
;
301 struct mii_bus
*mii_bus
;
302 void __iomem
*ioaddr
;
303 void __iomem
*eeprom_base
;
309 bool rx_cksum_offload_flag
;
312 struct netsec_de
{ /* Netsec Descriptor layout */
314 u32 data_buf_addr_up
;
315 u32 data_buf_addr_lw
;
319 struct netsec_tx_pkt_ctrl
{
321 bool tcp_seg_offload_flag
;
322 bool cksum_offload_flag
;
325 struct netsec_rx_pkt_info
{
331 static void netsec_write(struct netsec_priv
*priv
, u32 reg_addr
, u32 val
)
333 writel(val
, priv
->ioaddr
+ reg_addr
);
336 static u32
netsec_read(struct netsec_priv
*priv
, u32 reg_addr
)
338 return readl(priv
->ioaddr
+ reg_addr
);
341 /************* MDIO BUS OPS FOLLOW *************/
343 #define TIMEOUT_SPINS_MAC 1000
344 #define TIMEOUT_SECONDARY_MS_MAC 100
346 static u32
netsec_clk_type(u32 freq
)
349 return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ
;
351 return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ
;
353 return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ
;
355 return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ
;
357 return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ
;
359 return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ
;
362 static int netsec_wait_while_busy(struct netsec_priv
*priv
, u32 addr
, u32 mask
)
364 u32 timeout
= TIMEOUT_SPINS_MAC
;
366 while (--timeout
&& netsec_read(priv
, addr
) & mask
)
371 timeout
= TIMEOUT_SECONDARY_MS_MAC
;
372 while (--timeout
&& netsec_read(priv
, addr
) & mask
)
373 usleep_range(1000, 2000);
378 netdev_WARN(priv
->ndev
, "%s: timeout\n", __func__
);
383 static int netsec_mac_write(struct netsec_priv
*priv
, u32 addr
, u32 value
)
385 netsec_write(priv
, MAC_REG_DATA
, value
);
386 netsec_write(priv
, MAC_REG_CMD
, addr
| NETSEC_GMAC_CMD_ST_WRITE
);
387 return netsec_wait_while_busy(priv
,
388 MAC_REG_CMD
, NETSEC_GMAC_CMD_ST_BUSY
);
391 static int netsec_mac_read(struct netsec_priv
*priv
, u32 addr
, u32
*read
)
395 netsec_write(priv
, MAC_REG_CMD
, addr
| NETSEC_GMAC_CMD_ST_READ
);
396 ret
= netsec_wait_while_busy(priv
,
397 MAC_REG_CMD
, NETSEC_GMAC_CMD_ST_BUSY
);
401 *read
= netsec_read(priv
, MAC_REG_DATA
);
406 static int netsec_mac_wait_while_busy(struct netsec_priv
*priv
,
409 u32 timeout
= TIMEOUT_SPINS_MAC
;
413 ret
= netsec_mac_read(priv
, addr
, &data
);
417 } while (--timeout
&& (data
& mask
));
422 timeout
= TIMEOUT_SECONDARY_MS_MAC
;
424 usleep_range(1000, 2000);
426 ret
= netsec_mac_read(priv
, addr
, &data
);
430 } while (--timeout
&& (data
& mask
));
435 netdev_WARN(priv
->ndev
, "%s: timeout\n", __func__
);
440 static int netsec_mac_update_to_phy_state(struct netsec_priv
*priv
)
442 struct phy_device
*phydev
= priv
->ndev
->phydev
;
445 value
= phydev
->duplex
? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON
:
446 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON
;
448 if (phydev
->speed
!= SPEED_1000
)
449 value
|= NETSEC_MCR_PS
;
451 if (priv
->phy_interface
!= PHY_INTERFACE_MODE_GMII
&&
452 phydev
->speed
== SPEED_100
)
453 value
|= NETSEC_GMAC_MCR_REG_FES
;
455 value
|= NETSEC_GMAC_MCR_REG_CST
| NETSEC_GMAC_MCR_REG_JE
;
457 if (phy_interface_mode_is_rgmii(priv
->phy_interface
))
458 value
|= NETSEC_GMAC_MCR_REG_IBN
;
460 if (netsec_mac_write(priv
, GMAC_REG_MCR
, value
))
466 static int netsec_phy_read(struct mii_bus
*bus
, int phy_addr
, int reg_addr
);
468 static int netsec_phy_write(struct mii_bus
*bus
,
469 int phy_addr
, int reg
, u16 val
)
472 struct netsec_priv
*priv
= bus
->priv
;
474 if (netsec_mac_write(priv
, GMAC_REG_GDR
, val
))
476 if (netsec_mac_write(priv
, GMAC_REG_GAR
,
477 phy_addr
<< NETSEC_GMAC_GAR_REG_SHIFT_PA
|
478 reg
<< NETSEC_GMAC_GAR_REG_SHIFT_GR
|
479 NETSEC_GMAC_GAR_REG_GW
| NETSEC_GMAC_GAR_REG_GB
|
480 (netsec_clk_type(priv
->freq
) <<
481 GMAC_REG_SHIFT_CR_GAR
)))
484 status
= netsec_mac_wait_while_busy(priv
, GMAC_REG_GAR
,
485 NETSEC_GMAC_GAR_REG_GB
);
487 /* Developerbox implements RTL8211E PHY and there is
488 * a compatibility problem with F_GMAC4.
489 * RTL8211E expects MDC clock must be kept toggling for several
490 * clock cycle with MDIO high before entering the IDLE state.
491 * To meet this requirement, netsec driver needs to issue dummy
492 * read(e.g. read PHYID1(offset 0x2) register) right after write.
494 netsec_phy_read(bus
, phy_addr
, MII_PHYSID1
);
499 static int netsec_phy_read(struct mii_bus
*bus
, int phy_addr
, int reg_addr
)
501 struct netsec_priv
*priv
= bus
->priv
;
505 if (netsec_mac_write(priv
, GMAC_REG_GAR
, NETSEC_GMAC_GAR_REG_GB
|
506 phy_addr
<< NETSEC_GMAC_GAR_REG_SHIFT_PA
|
507 reg_addr
<< NETSEC_GMAC_GAR_REG_SHIFT_GR
|
508 (netsec_clk_type(priv
->freq
) <<
509 GMAC_REG_SHIFT_CR_GAR
)))
512 ret
= netsec_mac_wait_while_busy(priv
, GMAC_REG_GAR
,
513 NETSEC_GMAC_GAR_REG_GB
);
517 ret
= netsec_mac_read(priv
, GMAC_REG_GDR
, &data
);
524 /************* ETHTOOL_OPS FOLLOW *************/
526 static void netsec_et_get_drvinfo(struct net_device
*net_device
,
527 struct ethtool_drvinfo
*info
)
529 strlcpy(info
->driver
, "netsec", sizeof(info
->driver
));
530 strlcpy(info
->bus_info
, dev_name(net_device
->dev
.parent
),
531 sizeof(info
->bus_info
));
534 static int netsec_et_get_coalesce(struct net_device
*net_device
,
535 struct ethtool_coalesce
*et_coalesce
)
537 struct netsec_priv
*priv
= netdev_priv(net_device
);
539 *et_coalesce
= priv
->et_coalesce
;
544 static int netsec_et_set_coalesce(struct net_device
*net_device
,
545 struct ethtool_coalesce
*et_coalesce
)
547 struct netsec_priv
*priv
= netdev_priv(net_device
);
549 priv
->et_coalesce
= *et_coalesce
;
551 if (priv
->et_coalesce
.tx_coalesce_usecs
< 50)
552 priv
->et_coalesce
.tx_coalesce_usecs
= 50;
553 if (priv
->et_coalesce
.tx_max_coalesced_frames
< 1)
554 priv
->et_coalesce
.tx_max_coalesced_frames
= 1;
556 netsec_write(priv
, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT
,
557 priv
->et_coalesce
.tx_max_coalesced_frames
);
558 netsec_write(priv
, NETSEC_REG_NRM_TX_TXINT_TMR
,
559 priv
->et_coalesce
.tx_coalesce_usecs
);
560 netsec_write(priv
, NETSEC_REG_NRM_TX_INTEN_SET
, NRM_TX_ST_TXDONE
);
561 netsec_write(priv
, NETSEC_REG_NRM_TX_INTEN_SET
, NRM_TX_ST_TMREXP
);
563 if (priv
->et_coalesce
.rx_coalesce_usecs
< 50)
564 priv
->et_coalesce
.rx_coalesce_usecs
= 50;
565 if (priv
->et_coalesce
.rx_max_coalesced_frames
< 1)
566 priv
->et_coalesce
.rx_max_coalesced_frames
= 1;
568 netsec_write(priv
, NETSEC_REG_NRM_RX_RXINT_PKTCNT
,
569 priv
->et_coalesce
.rx_max_coalesced_frames
);
570 netsec_write(priv
, NETSEC_REG_NRM_RX_RXINT_TMR
,
571 priv
->et_coalesce
.rx_coalesce_usecs
);
572 netsec_write(priv
, NETSEC_REG_NRM_RX_INTEN_SET
, NRM_RX_ST_PKTCNT
);
573 netsec_write(priv
, NETSEC_REG_NRM_RX_INTEN_SET
, NRM_RX_ST_TMREXP
);
578 static u32
netsec_et_get_msglevel(struct net_device
*dev
)
580 struct netsec_priv
*priv
= netdev_priv(dev
);
582 return priv
->msg_enable
;
585 static void netsec_et_set_msglevel(struct net_device
*dev
, u32 datum
)
587 struct netsec_priv
*priv
= netdev_priv(dev
);
589 priv
->msg_enable
= datum
;
592 static const struct ethtool_ops netsec_ethtool_ops
= {
593 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
594 ETHTOOL_COALESCE_MAX_FRAMES
,
595 .get_drvinfo
= netsec_et_get_drvinfo
,
596 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
597 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
598 .get_link
= ethtool_op_get_link
,
599 .get_coalesce
= netsec_et_get_coalesce
,
600 .set_coalesce
= netsec_et_set_coalesce
,
601 .get_msglevel
= netsec_et_get_msglevel
,
602 .set_msglevel
= netsec_et_set_msglevel
,
605 /************* NETDEV_OPS FOLLOW *************/
608 static void netsec_set_rx_de(struct netsec_priv
*priv
,
609 struct netsec_desc_ring
*dring
, u16 idx
,
610 const struct netsec_desc
*desc
)
612 struct netsec_de
*de
= dring
->vaddr
+ DESC_SZ
* idx
;
613 u32 attr
= (1 << NETSEC_RX_PKT_OWN_FIELD
) |
614 (1 << NETSEC_RX_PKT_FS_FIELD
) |
615 (1 << NETSEC_RX_PKT_LS_FIELD
);
617 if (idx
== DESC_NUM
- 1)
618 attr
|= (1 << NETSEC_RX_PKT_LD_FIELD
);
620 de
->data_buf_addr_up
= upper_32_bits(desc
->dma_addr
);
621 de
->data_buf_addr_lw
= lower_32_bits(desc
->dma_addr
);
622 de
->buf_len_info
= desc
->len
;
626 dring
->desc
[idx
].dma_addr
= desc
->dma_addr
;
627 dring
->desc
[idx
].addr
= desc
->addr
;
628 dring
->desc
[idx
].len
= desc
->len
;
631 static bool netsec_clean_tx_dring(struct netsec_priv
*priv
)
633 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_TX
];
634 struct xdp_frame_bulk bq
;
635 struct netsec_de
*entry
;
636 int tail
= dring
->tail
;
640 spin_lock(&dring
->lock
);
643 xdp_frame_bulk_init(&bq
);
644 entry
= dring
->vaddr
+ DESC_SZ
* tail
;
646 rcu_read_lock(); /* need for xdp_return_frame_bulk */
648 while (!(entry
->attr
& (1U << NETSEC_TX_SHIFT_OWN_FIELD
)) &&
650 struct netsec_desc
*desc
;
653 desc
= &dring
->desc
[tail
];
654 eop
= (entry
->attr
>> NETSEC_TX_LAST
) & 1;
657 /* if buf_type is either TYPE_NETSEC_SKB or
658 * TYPE_NETSEC_XDP_NDO we mapped it
660 if (desc
->buf_type
!= TYPE_NETSEC_XDP_TX
)
661 dma_unmap_single(priv
->dev
, desc
->dma_addr
, desc
->len
,
667 if (desc
->buf_type
== TYPE_NETSEC_SKB
) {
668 bytes
+= desc
->skb
->len
;
669 dev_kfree_skb(desc
->skb
);
671 bytes
+= desc
->xdpf
->len
;
672 if (desc
->buf_type
== TYPE_NETSEC_XDP_TX
)
673 xdp_return_frame_rx_napi(desc
->xdpf
);
675 xdp_return_frame_bulk(desc
->xdpf
, &bq
);
678 /* clean up so netsec_uninit_pkt_dring() won't free the skb
681 *desc
= (struct netsec_desc
){};
683 /* entry->attr is not going to be accessed by the NIC until
684 * netsec_set_tx_de() is called. No need for a dma_wmb() here
686 entry
->attr
= 1U << NETSEC_TX_SHIFT_OWN_FIELD
;
687 /* move tail ahead */
688 dring
->tail
= (tail
+ 1) % DESC_NUM
;
691 entry
= dring
->vaddr
+ DESC_SZ
* tail
;
694 xdp_flush_frame_bulk(&bq
);
698 spin_unlock(&dring
->lock
);
703 /* reading the register clears the irq */
704 netsec_read(priv
, NETSEC_REG_NRM_TX_DONE_PKTCNT
);
706 priv
->ndev
->stats
.tx_packets
+= cnt
;
707 priv
->ndev
->stats
.tx_bytes
+= bytes
;
709 netdev_completed_queue(priv
->ndev
, cnt
, bytes
);
714 static void netsec_process_tx(struct netsec_priv
*priv
)
716 struct net_device
*ndev
= priv
->ndev
;
719 cleaned
= netsec_clean_tx_dring(priv
);
721 if (cleaned
&& netif_queue_stopped(ndev
)) {
722 /* Make sure we update the value, anyone stopping the queue
723 * after this will read the proper consumer idx
726 netif_wake_queue(ndev
);
730 static void *netsec_alloc_rx_data(struct netsec_priv
*priv
,
731 dma_addr_t
*dma_handle
, u16
*desc_len
)
735 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_RX
];
738 page
= page_pool_dev_alloc_pages(dring
->page_pool
);
742 /* We allocate the same buffer length for XDP and non-XDP cases.
743 * page_pool API will map the whole page, skip what's needed for
744 * network payloads and/or XDP
746 *dma_handle
= page_pool_get_dma_addr(page
) + NETSEC_RXBUF_HEADROOM
;
747 /* Make sure the incoming payload fits in the page for XDP and non-XDP
748 * cases and reserve enough space for headroom + skb_shared_info
750 *desc_len
= NETSEC_RX_BUF_SIZE
;
752 return page_address(page
);
755 static void netsec_rx_fill(struct netsec_priv
*priv
, u16 from
, u16 num
)
757 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_RX
];
761 netsec_set_rx_de(priv
, dring
, idx
, &dring
->desc
[idx
]);
769 static void netsec_xdp_ring_tx_db(struct netsec_priv
*priv
, u16 pkts
)
772 netsec_write(priv
, NETSEC_REG_NRM_TX_PKTCNT
, pkts
);
775 static void netsec_finalize_xdp_rx(struct netsec_priv
*priv
, u32 xdp_res
,
778 if (xdp_res
& NETSEC_XDP_REDIR
)
781 if (xdp_res
& NETSEC_XDP_TX
)
782 netsec_xdp_ring_tx_db(priv
, pkts
);
785 static void netsec_set_tx_de(struct netsec_priv
*priv
,
786 struct netsec_desc_ring
*dring
,
787 const struct netsec_tx_pkt_ctrl
*tx_ctrl
,
788 const struct netsec_desc
*desc
, void *buf
)
790 int idx
= dring
->head
;
791 struct netsec_de
*de
;
794 de
= dring
->vaddr
+ (DESC_SZ
* idx
);
796 attr
= (1 << NETSEC_TX_SHIFT_OWN_FIELD
) |
797 (1 << NETSEC_TX_SHIFT_PT_FIELD
) |
798 (NETSEC_RING_GMAC
<< NETSEC_TX_SHIFT_TDRID_FIELD
) |
799 (1 << NETSEC_TX_SHIFT_FS_FIELD
) |
800 (1 << NETSEC_TX_LAST
) |
801 (tx_ctrl
->cksum_offload_flag
<< NETSEC_TX_SHIFT_CO
) |
802 (tx_ctrl
->tcp_seg_offload_flag
<< NETSEC_TX_SHIFT_SO
) |
803 (1 << NETSEC_TX_SHIFT_TRS_FIELD
);
804 if (idx
== DESC_NUM
- 1)
805 attr
|= (1 << NETSEC_TX_SHIFT_LD_FIELD
);
807 de
->data_buf_addr_up
= upper_32_bits(desc
->dma_addr
);
808 de
->data_buf_addr_lw
= lower_32_bits(desc
->dma_addr
);
809 de
->buf_len_info
= (tx_ctrl
->tcp_seg_len
<< 16) | desc
->len
;
812 dring
->desc
[idx
] = *desc
;
813 if (desc
->buf_type
== TYPE_NETSEC_SKB
)
814 dring
->desc
[idx
].skb
= buf
;
815 else if (desc
->buf_type
== TYPE_NETSEC_XDP_TX
||
816 desc
->buf_type
== TYPE_NETSEC_XDP_NDO
)
817 dring
->desc
[idx
].xdpf
= buf
;
819 /* move head ahead */
820 dring
->head
= (dring
->head
+ 1) % DESC_NUM
;
823 /* The current driver only supports 1 Txq, this should run under spin_lock() */
824 static u32
netsec_xdp_queue_one(struct netsec_priv
*priv
,
825 struct xdp_frame
*xdpf
, bool is_ndo
)
828 struct netsec_desc_ring
*tx_ring
= &priv
->desc_ring
[NETSEC_RING_TX
];
829 struct page
*page
= virt_to_page(xdpf
->data
);
830 struct netsec_tx_pkt_ctrl tx_ctrl
= {};
831 struct netsec_desc tx_desc
;
832 dma_addr_t dma_handle
;
835 if (tx_ring
->head
>= tx_ring
->tail
)
836 filled
= tx_ring
->head
- tx_ring
->tail
;
838 filled
= tx_ring
->head
+ DESC_NUM
- tx_ring
->tail
;
840 if (DESC_NUM
- filled
<= 1)
841 return NETSEC_XDP_CONSUMED
;
844 /* this is for ndo_xdp_xmit, the buffer needs mapping before
847 dma_handle
= dma_map_single(priv
->dev
, xdpf
->data
, xdpf
->len
,
849 if (dma_mapping_error(priv
->dev
, dma_handle
))
850 return NETSEC_XDP_CONSUMED
;
851 tx_desc
.buf_type
= TYPE_NETSEC_XDP_NDO
;
853 /* This is the device Rx buffer from page_pool. No need to remap
854 * just sync and send it
856 struct netsec_desc_ring
*rx_ring
=
857 &priv
->desc_ring
[NETSEC_RING_RX
];
858 enum dma_data_direction dma_dir
=
859 page_pool_get_dma_dir(rx_ring
->page_pool
);
861 dma_handle
= page_pool_get_dma_addr(page
) + xdpf
->headroom
+
863 dma_sync_single_for_device(priv
->dev
, dma_handle
, xdpf
->len
,
865 tx_desc
.buf_type
= TYPE_NETSEC_XDP_TX
;
868 tx_desc
.dma_addr
= dma_handle
;
869 tx_desc
.addr
= xdpf
->data
;
870 tx_desc
.len
= xdpf
->len
;
872 netdev_sent_queue(priv
->ndev
, xdpf
->len
);
873 netsec_set_tx_de(priv
, tx_ring
, &tx_ctrl
, &tx_desc
, xdpf
);
875 return NETSEC_XDP_TX
;
878 static u32
netsec_xdp_xmit_back(struct netsec_priv
*priv
, struct xdp_buff
*xdp
)
880 struct netsec_desc_ring
*tx_ring
= &priv
->desc_ring
[NETSEC_RING_TX
];
881 struct xdp_frame
*xdpf
= xdp_convert_buff_to_frame(xdp
);
885 return NETSEC_XDP_CONSUMED
;
887 spin_lock(&tx_ring
->lock
);
888 ret
= netsec_xdp_queue_one(priv
, xdpf
, false);
889 spin_unlock(&tx_ring
->lock
);
894 static u32
netsec_run_xdp(struct netsec_priv
*priv
, struct bpf_prog
*prog
,
895 struct xdp_buff
*xdp
)
897 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_RX
];
898 unsigned int sync
, len
= xdp
->data_end
- xdp
->data
;
899 u32 ret
= NETSEC_XDP_PASS
;
904 act
= bpf_prog_run_xdp(prog
, xdp
);
906 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
907 sync
= xdp
->data_end
- xdp
->data_hard_start
- NETSEC_RXBUF_HEADROOM
;
908 sync
= max(sync
, len
);
912 ret
= NETSEC_XDP_PASS
;
915 ret
= netsec_xdp_xmit_back(priv
, xdp
);
916 if (ret
!= NETSEC_XDP_TX
) {
917 page
= virt_to_head_page(xdp
->data
);
918 page_pool_put_page(dring
->page_pool
, page
, sync
, true);
922 err
= xdp_do_redirect(priv
->ndev
, xdp
, prog
);
924 ret
= NETSEC_XDP_REDIR
;
926 ret
= NETSEC_XDP_CONSUMED
;
927 page
= virt_to_head_page(xdp
->data
);
928 page_pool_put_page(dring
->page_pool
, page
, sync
, true);
932 bpf_warn_invalid_xdp_action(act
);
935 trace_xdp_exception(priv
->ndev
, prog
, act
);
936 fallthrough
; /* handle aborts by dropping packet */
938 ret
= NETSEC_XDP_CONSUMED
;
939 page
= virt_to_head_page(xdp
->data
);
940 page_pool_put_page(dring
->page_pool
, page
, sync
, true);
947 static int netsec_process_rx(struct netsec_priv
*priv
, int budget
)
949 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_RX
];
950 struct net_device
*ndev
= priv
->ndev
;
951 struct netsec_rx_pkt_info rx_info
;
952 enum dma_data_direction dma_dir
;
953 struct bpf_prog
*xdp_prog
;
959 xdp
.rxq
= &dring
->xdp_rxq
;
960 xdp
.frame_sz
= PAGE_SIZE
;
963 xdp_prog
= READ_ONCE(priv
->xdp_prog
);
964 dma_dir
= page_pool_get_dma_dir(dring
->page_pool
);
966 while (done
< budget
) {
967 u16 idx
= dring
->tail
;
968 struct netsec_de
*de
= dring
->vaddr
+ (DESC_SZ
* idx
);
969 struct netsec_desc
*desc
= &dring
->desc
[idx
];
970 struct page
*page
= virt_to_page(desc
->addr
);
971 u32 xdp_result
= NETSEC_XDP_PASS
;
972 struct sk_buff
*skb
= NULL
;
973 u16 pkt_len
, desc_len
;
974 dma_addr_t dma_handle
;
977 if (de
->attr
& (1U << NETSEC_RX_PKT_OWN_FIELD
)) {
978 /* reading the register clears the irq */
979 netsec_read(priv
, NETSEC_REG_NRM_RX_PKTCNT
);
983 /* This barrier is needed to keep us from reading
984 * any other fields out of the netsec_de until we have
985 * verified the descriptor has been written back
990 pkt_len
= de
->buf_len_info
>> 16;
991 rx_info
.err_code
= (de
->attr
>> NETSEC_RX_PKT_ERR_FIELD
) &
992 NETSEC_RX_PKT_ERR_MASK
;
993 rx_info
.err_flag
= (de
->attr
>> NETSEC_RX_PKT_ER_FIELD
) & 1;
994 if (rx_info
.err_flag
) {
995 netif_err(priv
, drv
, priv
->ndev
,
996 "%s: rx fail err(%d)\n", __func__
,
998 ndev
->stats
.rx_dropped
++;
999 dring
->tail
= (dring
->tail
+ 1) % DESC_NUM
;
1000 /* reuse buffer page frag */
1001 netsec_rx_fill(priv
, idx
, 1);
1004 rx_info
.rx_cksum_result
=
1005 (de
->attr
>> NETSEC_RX_PKT_CO_FIELD
) & 3;
1007 /* allocate a fresh buffer and map it to the hardware.
1008 * This will eventually replace the old buffer in the hardware
1010 buf_addr
= netsec_alloc_rx_data(priv
, &dma_handle
, &desc_len
);
1012 if (unlikely(!buf_addr
))
1015 dma_sync_single_for_cpu(priv
->dev
, desc
->dma_addr
, pkt_len
,
1017 prefetch(desc
->addr
);
1019 xdp
.data_hard_start
= desc
->addr
;
1020 xdp
.data
= desc
->addr
+ NETSEC_RXBUF_HEADROOM
;
1021 xdp_set_data_meta_invalid(&xdp
);
1022 xdp
.data_end
= xdp
.data
+ pkt_len
;
1025 xdp_result
= netsec_run_xdp(priv
, xdp_prog
, &xdp
);
1026 if (xdp_result
!= NETSEC_XDP_PASS
) {
1027 xdp_act
|= xdp_result
;
1028 if (xdp_result
== NETSEC_XDP_TX
)
1033 skb
= build_skb(desc
->addr
, desc
->len
+ NETSEC_RX_BUF_NON_DATA
);
1035 if (unlikely(!skb
)) {
1036 /* If skb fails recycle_direct will either unmap and
1037 * free the page or refill the cache depending on the
1038 * cache state. Since we paid the allocation cost if
1039 * building an skb fails try to put the page into cache
1041 page_pool_put_page(dring
->page_pool
, page
, pkt_len
,
1043 netif_err(priv
, drv
, priv
->ndev
,
1044 "rx failed to build skb\n");
1047 page_pool_release_page(dring
->page_pool
, page
);
1049 skb_reserve(skb
, xdp
.data
- xdp
.data_hard_start
);
1050 skb_put(skb
, xdp
.data_end
- xdp
.data
);
1051 skb
->protocol
= eth_type_trans(skb
, priv
->ndev
);
1053 if (priv
->rx_cksum_offload_flag
&&
1054 rx_info
.rx_cksum_result
== NETSEC_RX_CKSUM_OK
)
1055 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1059 napi_gro_receive(&priv
->napi
, skb
);
1060 if (skb
|| xdp_result
) {
1061 ndev
->stats
.rx_packets
++;
1062 ndev
->stats
.rx_bytes
+= xdp
.data_end
- xdp
.data
;
1065 /* Update the descriptor with fresh buffers */
1066 desc
->len
= desc_len
;
1067 desc
->dma_addr
= dma_handle
;
1068 desc
->addr
= buf_addr
;
1070 netsec_rx_fill(priv
, idx
, 1);
1071 dring
->tail
= (dring
->tail
+ 1) % DESC_NUM
;
1073 netsec_finalize_xdp_rx(priv
, xdp_act
, xdp_xmit
);
1080 static int netsec_napi_poll(struct napi_struct
*napi
, int budget
)
1082 struct netsec_priv
*priv
;
1085 priv
= container_of(napi
, struct netsec_priv
, napi
);
1087 netsec_process_tx(priv
);
1088 done
= netsec_process_rx(priv
, budget
);
1090 if (done
< budget
&& napi_complete_done(napi
, done
)) {
1091 unsigned long flags
;
1093 spin_lock_irqsave(&priv
->reglock
, flags
);
1094 netsec_write(priv
, NETSEC_REG_INTEN_SET
,
1095 NETSEC_IRQ_RX
| NETSEC_IRQ_TX
);
1096 spin_unlock_irqrestore(&priv
->reglock
, flags
);
1103 static int netsec_desc_used(struct netsec_desc_ring
*dring
)
1107 if (dring
->head
>= dring
->tail
)
1108 used
= dring
->head
- dring
->tail
;
1110 used
= dring
->head
+ DESC_NUM
- dring
->tail
;
1115 static int netsec_check_stop_tx(struct netsec_priv
*priv
, int used
)
1117 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_TX
];
1119 /* keep tail from touching the queue */
1120 if (DESC_NUM
- used
< 2) {
1121 netif_stop_queue(priv
->ndev
);
1123 /* Make sure we read the updated value in case
1124 * descriptors got freed
1128 used
= netsec_desc_used(dring
);
1129 if (DESC_NUM
- used
< 2)
1130 return NETDEV_TX_BUSY
;
1132 netif_wake_queue(priv
->ndev
);
1138 static netdev_tx_t
netsec_netdev_start_xmit(struct sk_buff
*skb
,
1139 struct net_device
*ndev
)
1141 struct netsec_priv
*priv
= netdev_priv(ndev
);
1142 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_TX
];
1143 struct netsec_tx_pkt_ctrl tx_ctrl
= {};
1144 struct netsec_desc tx_desc
;
1145 u16 tso_seg_len
= 0;
1148 spin_lock_bh(&dring
->lock
);
1149 filled
= netsec_desc_used(dring
);
1150 if (netsec_check_stop_tx(priv
, filled
)) {
1151 spin_unlock_bh(&dring
->lock
);
1152 net_warn_ratelimited("%s %s Tx queue full\n",
1153 dev_name(priv
->dev
), ndev
->name
);
1154 return NETDEV_TX_BUSY
;
1157 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1158 tx_ctrl
.cksum_offload_flag
= true;
1160 if (skb_is_gso(skb
))
1161 tso_seg_len
= skb_shinfo(skb
)->gso_size
;
1163 if (tso_seg_len
> 0) {
1164 if (skb
->protocol
== htons(ETH_P_IP
)) {
1165 ip_hdr(skb
)->tot_len
= 0;
1166 tcp_hdr(skb
)->check
=
1167 ~tcp_v4_check(0, ip_hdr(skb
)->saddr
,
1168 ip_hdr(skb
)->daddr
, 0);
1170 tcp_v6_gso_csum_prep(skb
);
1173 tx_ctrl
.tcp_seg_offload_flag
= true;
1174 tx_ctrl
.tcp_seg_len
= tso_seg_len
;
1177 tx_desc
.dma_addr
= dma_map_single(priv
->dev
, skb
->data
,
1178 skb_headlen(skb
), DMA_TO_DEVICE
);
1179 if (dma_mapping_error(priv
->dev
, tx_desc
.dma_addr
)) {
1180 spin_unlock_bh(&dring
->lock
);
1181 netif_err(priv
, drv
, priv
->ndev
,
1182 "%s: DMA mapping failed\n", __func__
);
1183 ndev
->stats
.tx_dropped
++;
1184 dev_kfree_skb_any(skb
);
1185 return NETDEV_TX_OK
;
1187 tx_desc
.addr
= skb
->data
;
1188 tx_desc
.len
= skb_headlen(skb
);
1189 tx_desc
.buf_type
= TYPE_NETSEC_SKB
;
1191 skb_tx_timestamp(skb
);
1192 netdev_sent_queue(priv
->ndev
, skb
->len
);
1194 netsec_set_tx_de(priv
, dring
, &tx_ctrl
, &tx_desc
, skb
);
1195 spin_unlock_bh(&dring
->lock
);
1196 netsec_write(priv
, NETSEC_REG_NRM_TX_PKTCNT
, 1); /* submit another tx */
1198 return NETDEV_TX_OK
;
1201 static void netsec_uninit_pkt_dring(struct netsec_priv
*priv
, int id
)
1203 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[id
];
1204 struct netsec_desc
*desc
;
1207 if (!dring
->vaddr
|| !dring
->desc
)
1209 for (idx
= 0; idx
< DESC_NUM
; idx
++) {
1210 desc
= &dring
->desc
[idx
];
1214 if (id
== NETSEC_RING_RX
) {
1215 struct page
*page
= virt_to_page(desc
->addr
);
1217 page_pool_put_full_page(dring
->page_pool
, page
, false);
1218 } else if (id
== NETSEC_RING_TX
) {
1219 dma_unmap_single(priv
->dev
, desc
->dma_addr
, desc
->len
,
1221 dev_kfree_skb(desc
->skb
);
1225 /* Rx is currently using page_pool */
1226 if (id
== NETSEC_RING_RX
) {
1227 if (xdp_rxq_info_is_reg(&dring
->xdp_rxq
))
1228 xdp_rxq_info_unreg(&dring
->xdp_rxq
);
1229 page_pool_destroy(dring
->page_pool
);
1232 memset(dring
->desc
, 0, sizeof(struct netsec_desc
) * DESC_NUM
);
1233 memset(dring
->vaddr
, 0, DESC_SZ
* DESC_NUM
);
1238 if (id
== NETSEC_RING_TX
)
1239 netdev_reset_queue(priv
->ndev
);
1242 static void netsec_free_dring(struct netsec_priv
*priv
, int id
)
1244 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[id
];
1247 dma_free_coherent(priv
->dev
, DESC_SZ
* DESC_NUM
,
1248 dring
->vaddr
, dring
->desc_dma
);
1249 dring
->vaddr
= NULL
;
1256 static int netsec_alloc_dring(struct netsec_priv
*priv
, enum ring_id id
)
1258 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[id
];
1260 dring
->vaddr
= dma_alloc_coherent(priv
->dev
, DESC_SZ
* DESC_NUM
,
1261 &dring
->desc_dma
, GFP_KERNEL
);
1265 dring
->desc
= kcalloc(DESC_NUM
, sizeof(*dring
->desc
), GFP_KERNEL
);
1271 netsec_free_dring(priv
, id
);
1276 static void netsec_setup_tx_dring(struct netsec_priv
*priv
)
1278 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_TX
];
1281 for (i
= 0; i
< DESC_NUM
; i
++) {
1282 struct netsec_de
*de
;
1284 de
= dring
->vaddr
+ (DESC_SZ
* i
);
1285 /* de->attr is not going to be accessed by the NIC
1286 * until netsec_set_tx_de() is called.
1287 * No need for a dma_wmb() here
1289 de
->attr
= 1U << NETSEC_TX_SHIFT_OWN_FIELD
;
1293 static int netsec_setup_rx_dring(struct netsec_priv
*priv
)
1295 struct netsec_desc_ring
*dring
= &priv
->desc_ring
[NETSEC_RING_RX
];
1296 struct bpf_prog
*xdp_prog
= READ_ONCE(priv
->xdp_prog
);
1297 struct page_pool_params pp_params
= {
1299 /* internal DMA mapping in page_pool */
1300 .flags
= PP_FLAG_DMA_MAP
| PP_FLAG_DMA_SYNC_DEV
,
1301 .pool_size
= DESC_NUM
,
1302 .nid
= NUMA_NO_NODE
,
1304 .dma_dir
= xdp_prog
? DMA_BIDIRECTIONAL
: DMA_FROM_DEVICE
,
1305 .offset
= NETSEC_RXBUF_HEADROOM
,
1306 .max_len
= NETSEC_RX_BUF_SIZE
,
1310 dring
->page_pool
= page_pool_create(&pp_params
);
1311 if (IS_ERR(dring
->page_pool
)) {
1312 err
= PTR_ERR(dring
->page_pool
);
1313 dring
->page_pool
= NULL
;
1317 err
= xdp_rxq_info_reg(&dring
->xdp_rxq
, priv
->ndev
, 0, priv
->napi
.napi_id
);
1321 err
= xdp_rxq_info_reg_mem_model(&dring
->xdp_rxq
, MEM_TYPE_PAGE_POOL
,
1326 for (i
= 0; i
< DESC_NUM
; i
++) {
1327 struct netsec_desc
*desc
= &dring
->desc
[i
];
1328 dma_addr_t dma_handle
;
1332 buf
= netsec_alloc_rx_data(priv
, &dma_handle
, &len
);
1338 desc
->dma_addr
= dma_handle
;
1343 netsec_rx_fill(priv
, 0, DESC_NUM
);
1348 netsec_uninit_pkt_dring(priv
, NETSEC_RING_RX
);
1352 static int netsec_netdev_load_ucode_region(struct netsec_priv
*priv
, u32 reg
,
1353 u32 addr_h
, u32 addr_l
, u32 size
)
1355 u64 base
= (u64
)addr_h
<< 32 | addr_l
;
1356 void __iomem
*ucode
;
1359 ucode
= ioremap(base
, size
* sizeof(u32
));
1363 for (i
= 0; i
< size
; i
++)
1364 netsec_write(priv
, reg
, readl(ucode
+ i
* 4));
1370 static int netsec_netdev_load_microcode(struct netsec_priv
*priv
)
1372 u32 addr_h
, addr_l
, size
;
1375 addr_h
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_HM_ME_ADDRESS_H
);
1376 addr_l
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_HM_ME_ADDRESS_L
);
1377 size
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_HM_ME_SIZE
);
1378 err
= netsec_netdev_load_ucode_region(priv
, NETSEC_REG_DMAC_HM_CMD_BUF
,
1379 addr_h
, addr_l
, size
);
1383 addr_h
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_MH_ME_ADDRESS_H
);
1384 addr_l
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_MH_ME_ADDRESS_L
);
1385 size
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_MH_ME_SIZE
);
1386 err
= netsec_netdev_load_ucode_region(priv
, NETSEC_REG_DMAC_MH_CMD_BUF
,
1387 addr_h
, addr_l
, size
);
1392 addr_l
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_PKT_ME_ADDRESS
);
1393 size
= readl(priv
->eeprom_base
+ NETSEC_EEPROM_PKT_ME_SIZE
);
1394 err
= netsec_netdev_load_ucode_region(priv
, NETSEC_REG_PKT_CMD_BUF
,
1395 addr_h
, addr_l
, size
);
1402 static int netsec_reset_hardware(struct netsec_priv
*priv
,
1408 /* stop DMA engines */
1409 if (!netsec_read(priv
, NETSEC_REG_ADDR_DIS_CORE
)) {
1410 netsec_write(priv
, NETSEC_REG_DMA_HM_CTRL
,
1411 NETSEC_DMA_CTRL_REG_STOP
);
1412 netsec_write(priv
, NETSEC_REG_DMA_MH_CTRL
,
1413 NETSEC_DMA_CTRL_REG_STOP
);
1415 while (netsec_read(priv
, NETSEC_REG_DMA_HM_CTRL
) &
1416 NETSEC_DMA_CTRL_REG_STOP
)
1419 while (netsec_read(priv
, NETSEC_REG_DMA_MH_CTRL
) &
1420 NETSEC_DMA_CTRL_REG_STOP
)
1424 netsec_write(priv
, NETSEC_REG_SOFT_RST
, NETSEC_SOFT_RST_REG_RESET
);
1425 netsec_write(priv
, NETSEC_REG_SOFT_RST
, NETSEC_SOFT_RST_REG_RUN
);
1426 netsec_write(priv
, NETSEC_REG_COM_INIT
, NETSEC_COM_INIT_REG_ALL
);
1428 while (netsec_read(priv
, NETSEC_REG_COM_INIT
) != 0)
1431 /* set desc_start addr */
1432 netsec_write(priv
, NETSEC_REG_NRM_RX_DESC_START_UP
,
1433 upper_32_bits(priv
->desc_ring
[NETSEC_RING_RX
].desc_dma
));
1434 netsec_write(priv
, NETSEC_REG_NRM_RX_DESC_START_LW
,
1435 lower_32_bits(priv
->desc_ring
[NETSEC_RING_RX
].desc_dma
));
1437 netsec_write(priv
, NETSEC_REG_NRM_TX_DESC_START_UP
,
1438 upper_32_bits(priv
->desc_ring
[NETSEC_RING_TX
].desc_dma
));
1439 netsec_write(priv
, NETSEC_REG_NRM_TX_DESC_START_LW
,
1440 lower_32_bits(priv
->desc_ring
[NETSEC_RING_TX
].desc_dma
));
1442 /* set normal tx dring ring config */
1443 netsec_write(priv
, NETSEC_REG_NRM_TX_CONFIG
,
1444 1 << NETSEC_REG_DESC_ENDIAN
);
1445 netsec_write(priv
, NETSEC_REG_NRM_RX_CONFIG
,
1446 1 << NETSEC_REG_DESC_ENDIAN
);
1449 err
= netsec_netdev_load_microcode(priv
);
1451 netif_err(priv
, probe
, priv
->ndev
,
1452 "%s: failed to load microcode (%d)\n",
1458 /* start DMA engines */
1459 netsec_write(priv
, NETSEC_REG_DMA_TMR_CTRL
, priv
->freq
/ 1000000 - 1);
1460 netsec_write(priv
, NETSEC_REG_ADDR_DIS_CORE
, 0);
1462 usleep_range(1000, 2000);
1464 if (!(netsec_read(priv
, NETSEC_REG_TOP_STATUS
) &
1465 NETSEC_TOP_IRQ_REG_CODE_LOAD_END
)) {
1466 netif_err(priv
, probe
, priv
->ndev
,
1467 "microengine start failed\n");
1470 netsec_write(priv
, NETSEC_REG_TOP_STATUS
,
1471 NETSEC_TOP_IRQ_REG_CODE_LOAD_END
);
1473 value
= NETSEC_PKT_CTRL_REG_MODE_NRM
;
1474 if (priv
->ndev
->mtu
> ETH_DATA_LEN
)
1475 value
|= NETSEC_PKT_CTRL_REG_EN_JUMBO
;
1477 /* change to normal mode */
1478 netsec_write(priv
, NETSEC_REG_DMA_MH_CTRL
, MH_CTRL__MODE_TRANS
);
1479 netsec_write(priv
, NETSEC_REG_PKT_CTRL
, value
);
1481 while ((netsec_read(priv
, NETSEC_REG_MODE_TRANS_COMP_STATUS
) &
1482 NETSEC_MODE_TRANS_COMP_IRQ_T2N
) == 0)
1485 /* clear any pending EMPTY/ERR irq status */
1486 netsec_write(priv
, NETSEC_REG_NRM_TX_STATUS
, ~0);
1488 /* Disable TX & RX intr */
1489 netsec_write(priv
, NETSEC_REG_INTEN_CLR
, ~0);
1494 static int netsec_start_gmac(struct netsec_priv
*priv
)
1496 struct phy_device
*phydev
= priv
->ndev
->phydev
;
1500 if (phydev
->speed
!= SPEED_1000
)
1501 value
= (NETSEC_GMAC_MCR_REG_CST
|
1502 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON
);
1504 if (netsec_mac_write(priv
, GMAC_REG_MCR
, value
))
1506 if (netsec_mac_write(priv
, GMAC_REG_BMR
,
1507 NETSEC_GMAC_BMR_REG_RESET
))
1510 /* Wait soft reset */
1511 usleep_range(1000, 5000);
1513 ret
= netsec_mac_read(priv
, GMAC_REG_BMR
, &value
);
1516 if (value
& NETSEC_GMAC_BMR_REG_SWR
)
1519 netsec_write(priv
, MAC_REG_DESC_SOFT_RST
, 1);
1520 if (netsec_wait_while_busy(priv
, MAC_REG_DESC_SOFT_RST
, 1))
1523 netsec_write(priv
, MAC_REG_DESC_INIT
, 1);
1524 if (netsec_wait_while_busy(priv
, MAC_REG_DESC_INIT
, 1))
1527 if (netsec_mac_write(priv
, GMAC_REG_BMR
,
1528 NETSEC_GMAC_BMR_REG_COMMON
))
1530 if (netsec_mac_write(priv
, GMAC_REG_RDLAR
,
1531 NETSEC_GMAC_RDLAR_REG_COMMON
))
1533 if (netsec_mac_write(priv
, GMAC_REG_TDLAR
,
1534 NETSEC_GMAC_TDLAR_REG_COMMON
))
1536 if (netsec_mac_write(priv
, GMAC_REG_MFFR
, 0x80000001))
1539 ret
= netsec_mac_update_to_phy_state(priv
);
1543 ret
= netsec_mac_read(priv
, GMAC_REG_OMR
, &value
);
1547 value
|= NETSEC_GMAC_OMR_REG_SR
;
1548 value
|= NETSEC_GMAC_OMR_REG_ST
;
1550 netsec_write(priv
, NETSEC_REG_NRM_RX_INTEN_CLR
, ~0);
1551 netsec_write(priv
, NETSEC_REG_NRM_TX_INTEN_CLR
, ~0);
1553 netsec_et_set_coalesce(priv
->ndev
, &priv
->et_coalesce
);
1555 if (netsec_mac_write(priv
, GMAC_REG_OMR
, value
))
1561 static int netsec_stop_gmac(struct netsec_priv
*priv
)
1566 ret
= netsec_mac_read(priv
, GMAC_REG_OMR
, &value
);
1569 value
&= ~NETSEC_GMAC_OMR_REG_SR
;
1570 value
&= ~NETSEC_GMAC_OMR_REG_ST
;
1572 /* disable all interrupts */
1573 netsec_write(priv
, NETSEC_REG_NRM_RX_INTEN_CLR
, ~0);
1574 netsec_write(priv
, NETSEC_REG_NRM_TX_INTEN_CLR
, ~0);
1576 return netsec_mac_write(priv
, GMAC_REG_OMR
, value
);
1579 static void netsec_phy_adjust_link(struct net_device
*ndev
)
1581 struct netsec_priv
*priv
= netdev_priv(ndev
);
1583 if (ndev
->phydev
->link
)
1584 netsec_start_gmac(priv
);
1586 netsec_stop_gmac(priv
);
1588 phy_print_status(ndev
->phydev
);
1591 static irqreturn_t
netsec_irq_handler(int irq
, void *dev_id
)
1593 struct netsec_priv
*priv
= dev_id
;
1594 u32 val
, status
= netsec_read(priv
, NETSEC_REG_TOP_STATUS
);
1595 unsigned long flags
;
1597 /* Disable interrupts */
1598 if (status
& NETSEC_IRQ_TX
) {
1599 val
= netsec_read(priv
, NETSEC_REG_NRM_TX_STATUS
);
1600 netsec_write(priv
, NETSEC_REG_NRM_TX_STATUS
, val
);
1602 if (status
& NETSEC_IRQ_RX
) {
1603 val
= netsec_read(priv
, NETSEC_REG_NRM_RX_STATUS
);
1604 netsec_write(priv
, NETSEC_REG_NRM_RX_STATUS
, val
);
1607 spin_lock_irqsave(&priv
->reglock
, flags
);
1608 netsec_write(priv
, NETSEC_REG_INTEN_CLR
, NETSEC_IRQ_RX
| NETSEC_IRQ_TX
);
1609 spin_unlock_irqrestore(&priv
->reglock
, flags
);
1611 napi_schedule(&priv
->napi
);
1616 static int netsec_netdev_open(struct net_device
*ndev
)
1618 struct netsec_priv
*priv
= netdev_priv(ndev
);
1621 pm_runtime_get_sync(priv
->dev
);
1623 netsec_setup_tx_dring(priv
);
1624 ret
= netsec_setup_rx_dring(priv
);
1626 netif_err(priv
, probe
, priv
->ndev
,
1627 "%s: fail setup ring\n", __func__
);
1631 ret
= request_irq(priv
->ndev
->irq
, netsec_irq_handler
,
1632 IRQF_SHARED
, "netsec", priv
);
1634 netif_err(priv
, drv
, priv
->ndev
, "request_irq failed\n");
1638 if (dev_of_node(priv
->dev
)) {
1639 if (!of_phy_connect(priv
->ndev
, priv
->phy_np
,
1640 netsec_phy_adjust_link
, 0,
1641 priv
->phy_interface
)) {
1642 netif_err(priv
, link
, priv
->ndev
, "missing PHY\n");
1647 ret
= phy_connect_direct(priv
->ndev
, priv
->phydev
,
1648 netsec_phy_adjust_link
,
1649 priv
->phy_interface
);
1651 netif_err(priv
, link
, priv
->ndev
,
1652 "phy_connect_direct() failed (%d)\n", ret
);
1657 phy_start(ndev
->phydev
);
1659 netsec_start_gmac(priv
);
1660 napi_enable(&priv
->napi
);
1661 netif_start_queue(ndev
);
1663 /* Enable TX+RX intr. */
1664 netsec_write(priv
, NETSEC_REG_INTEN_SET
, NETSEC_IRQ_RX
| NETSEC_IRQ_TX
);
1668 free_irq(priv
->ndev
->irq
, priv
);
1670 netsec_uninit_pkt_dring(priv
, NETSEC_RING_RX
);
1672 pm_runtime_put_sync(priv
->dev
);
1676 static int netsec_netdev_stop(struct net_device
*ndev
)
1679 struct netsec_priv
*priv
= netdev_priv(ndev
);
1681 netif_stop_queue(priv
->ndev
);
1684 napi_disable(&priv
->napi
);
1686 netsec_write(priv
, NETSEC_REG_INTEN_CLR
, ~0);
1687 netsec_stop_gmac(priv
);
1689 free_irq(priv
->ndev
->irq
, priv
);
1691 netsec_uninit_pkt_dring(priv
, NETSEC_RING_TX
);
1692 netsec_uninit_pkt_dring(priv
, NETSEC_RING_RX
);
1694 phy_stop(ndev
->phydev
);
1695 phy_disconnect(ndev
->phydev
);
1697 ret
= netsec_reset_hardware(priv
, false);
1699 pm_runtime_put_sync(priv
->dev
);
1704 static int netsec_netdev_init(struct net_device
*ndev
)
1706 struct netsec_priv
*priv
= netdev_priv(ndev
);
1710 BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM
);
1712 ret
= netsec_alloc_dring(priv
, NETSEC_RING_TX
);
1716 ret
= netsec_alloc_dring(priv
, NETSEC_RING_RX
);
1720 /* set phy power down */
1721 data
= netsec_phy_read(priv
->mii_bus
, priv
->phy_addr
, MII_BMCR
) |
1723 netsec_phy_write(priv
->mii_bus
, priv
->phy_addr
, MII_BMCR
, data
);
1725 ret
= netsec_reset_hardware(priv
, true);
1729 spin_lock_init(&priv
->desc_ring
[NETSEC_RING_TX
].lock
);
1730 spin_lock_init(&priv
->desc_ring
[NETSEC_RING_RX
].lock
);
1734 netsec_free_dring(priv
, NETSEC_RING_RX
);
1736 netsec_free_dring(priv
, NETSEC_RING_TX
);
1740 static void netsec_netdev_uninit(struct net_device
*ndev
)
1742 struct netsec_priv
*priv
= netdev_priv(ndev
);
1744 netsec_free_dring(priv
, NETSEC_RING_RX
);
1745 netsec_free_dring(priv
, NETSEC_RING_TX
);
1748 static int netsec_netdev_set_features(struct net_device
*ndev
,
1749 netdev_features_t features
)
1751 struct netsec_priv
*priv
= netdev_priv(ndev
);
1753 priv
->rx_cksum_offload_flag
= !!(features
& NETIF_F_RXCSUM
);
1758 static int netsec_xdp_xmit(struct net_device
*ndev
, int n
,
1759 struct xdp_frame
**frames
, u32 flags
)
1761 struct netsec_priv
*priv
= netdev_priv(ndev
);
1762 struct netsec_desc_ring
*tx_ring
= &priv
->desc_ring
[NETSEC_RING_TX
];
1766 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
1769 spin_lock(&tx_ring
->lock
);
1770 for (i
= 0; i
< n
; i
++) {
1771 struct xdp_frame
*xdpf
= frames
[i
];
1774 err
= netsec_xdp_queue_one(priv
, xdpf
, true);
1775 if (err
!= NETSEC_XDP_TX
) {
1776 xdp_return_frame_rx_napi(xdpf
);
1779 tx_ring
->xdp_xmit
++;
1782 spin_unlock(&tx_ring
->lock
);
1784 if (unlikely(flags
& XDP_XMIT_FLUSH
)) {
1785 netsec_xdp_ring_tx_db(priv
, tx_ring
->xdp_xmit
);
1786 tx_ring
->xdp_xmit
= 0;
1792 static int netsec_xdp_setup(struct netsec_priv
*priv
, struct bpf_prog
*prog
,
1793 struct netlink_ext_ack
*extack
)
1795 struct net_device
*dev
= priv
->ndev
;
1796 struct bpf_prog
*old_prog
;
1798 /* For now just support only the usual MTU sized frames */
1799 if (prog
&& dev
->mtu
> 1500) {
1800 NL_SET_ERR_MSG_MOD(extack
, "Jumbo frames not supported on XDP");
1804 if (netif_running(dev
))
1805 netsec_netdev_stop(dev
);
1807 /* Detach old prog, if any */
1808 old_prog
= xchg(&priv
->xdp_prog
, prog
);
1810 bpf_prog_put(old_prog
);
1812 if (netif_running(dev
))
1813 netsec_netdev_open(dev
);
1818 static int netsec_xdp(struct net_device
*ndev
, struct netdev_bpf
*xdp
)
1820 struct netsec_priv
*priv
= netdev_priv(ndev
);
1822 switch (xdp
->command
) {
1823 case XDP_SETUP_PROG
:
1824 return netsec_xdp_setup(priv
, xdp
->prog
, xdp
->extack
);
1830 static const struct net_device_ops netsec_netdev_ops
= {
1831 .ndo_init
= netsec_netdev_init
,
1832 .ndo_uninit
= netsec_netdev_uninit
,
1833 .ndo_open
= netsec_netdev_open
,
1834 .ndo_stop
= netsec_netdev_stop
,
1835 .ndo_start_xmit
= netsec_netdev_start_xmit
,
1836 .ndo_set_features
= netsec_netdev_set_features
,
1837 .ndo_set_mac_address
= eth_mac_addr
,
1838 .ndo_validate_addr
= eth_validate_addr
,
1839 .ndo_do_ioctl
= phy_do_ioctl
,
1840 .ndo_xdp_xmit
= netsec_xdp_xmit
,
1841 .ndo_bpf
= netsec_xdp
,
1844 static int netsec_of_probe(struct platform_device
*pdev
,
1845 struct netsec_priv
*priv
, u32
*phy_addr
)
1849 err
= of_get_phy_mode(pdev
->dev
.of_node
, &priv
->phy_interface
);
1851 dev_err(&pdev
->dev
, "missing required property 'phy-mode'\n");
1855 priv
->phy_np
= of_parse_phandle(pdev
->dev
.of_node
, "phy-handle", 0);
1856 if (!priv
->phy_np
) {
1857 dev_err(&pdev
->dev
, "missing required property 'phy-handle'\n");
1861 *phy_addr
= of_mdio_parse_addr(&pdev
->dev
, priv
->phy_np
);
1863 priv
->clk
= devm_clk_get(&pdev
->dev
, NULL
); /* get by 'phy_ref_clk' */
1864 if (IS_ERR(priv
->clk
)) {
1865 dev_err(&pdev
->dev
, "phy_ref_clk not found\n");
1866 return PTR_ERR(priv
->clk
);
1868 priv
->freq
= clk_get_rate(priv
->clk
);
1873 static int netsec_acpi_probe(struct platform_device
*pdev
,
1874 struct netsec_priv
*priv
, u32
*phy_addr
)
1878 if (!IS_ENABLED(CONFIG_ACPI
))
1881 /* ACPI systems are assumed to configure the PHY in firmware, so
1882 * there is really no need to discover the PHY mode from the DSDT.
1883 * Since firmware is known to exist in the field that configures the
1884 * PHY correctly but passes the wrong mode string in the phy-mode
1885 * device property, we have no choice but to ignore it.
1887 priv
->phy_interface
= PHY_INTERFACE_MODE_NA
;
1889 ret
= device_property_read_u32(&pdev
->dev
, "phy-channel", phy_addr
);
1892 "missing required property 'phy-channel'\n");
1896 ret
= device_property_read_u32(&pdev
->dev
,
1897 "socionext,phy-clock-frequency",
1901 "missing required property 'socionext,phy-clock-frequency'\n");
1905 static void netsec_unregister_mdio(struct netsec_priv
*priv
)
1907 struct phy_device
*phydev
= priv
->phydev
;
1909 if (!dev_of_node(priv
->dev
) && phydev
) {
1910 phy_device_remove(phydev
);
1911 phy_device_free(phydev
);
1914 mdiobus_unregister(priv
->mii_bus
);
1917 static int netsec_register_mdio(struct netsec_priv
*priv
, u32 phy_addr
)
1919 struct mii_bus
*bus
;
1922 bus
= devm_mdiobus_alloc(priv
->dev
);
1926 snprintf(bus
->id
, MII_BUS_ID_SIZE
, "%s", dev_name(priv
->dev
));
1928 bus
->name
= "SNI NETSEC MDIO";
1929 bus
->read
= netsec_phy_read
;
1930 bus
->write
= netsec_phy_write
;
1931 bus
->parent
= priv
->dev
;
1932 priv
->mii_bus
= bus
;
1934 if (dev_of_node(priv
->dev
)) {
1935 struct device_node
*mdio_node
, *parent
= dev_of_node(priv
->dev
);
1937 mdio_node
= of_get_child_by_name(parent
, "mdio");
1941 /* older f/w doesn't populate the mdio subnode,
1942 * allow relaxed upgrade of f/w in due time.
1944 dev_info(priv
->dev
, "Upgrade f/w for mdio subnode!\n");
1947 ret
= of_mdiobus_register(bus
, parent
);
1948 of_node_put(mdio_node
);
1951 dev_err(priv
->dev
, "mdiobus register err(%d)\n", ret
);
1955 /* Mask out all PHYs from auto probing. */
1957 ret
= mdiobus_register(bus
);
1959 dev_err(priv
->dev
, "mdiobus register err(%d)\n", ret
);
1963 priv
->phydev
= get_phy_device(bus
, phy_addr
, false);
1964 if (IS_ERR(priv
->phydev
)) {
1965 ret
= PTR_ERR(priv
->phydev
);
1966 dev_err(priv
->dev
, "get_phy_device err(%d)\n", ret
);
1967 priv
->phydev
= NULL
;
1971 ret
= phy_device_register(priv
->phydev
);
1973 mdiobus_unregister(bus
);
1975 "phy_device_register err(%d)\n", ret
);
1982 static int netsec_probe(struct platform_device
*pdev
)
1984 struct resource
*mmio_res
, *eeprom_res
, *irq_res
;
1985 u8
*mac
, macbuf
[ETH_ALEN
];
1986 struct netsec_priv
*priv
;
1987 u32 hw_ver
, phy_addr
= 0;
1988 struct net_device
*ndev
;
1991 mmio_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1993 dev_err(&pdev
->dev
, "No MMIO resource found.\n");
1997 eeprom_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1999 dev_info(&pdev
->dev
, "No EEPROM resource found.\n");
2003 irq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
2005 dev_err(&pdev
->dev
, "No IRQ resource found.\n");
2009 ndev
= alloc_etherdev(sizeof(*priv
));
2013 priv
= netdev_priv(ndev
);
2015 spin_lock_init(&priv
->reglock
);
2016 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
2017 platform_set_drvdata(pdev
, priv
);
2018 ndev
->irq
= irq_res
->start
;
2019 priv
->dev
= &pdev
->dev
;
2022 priv
->msg_enable
= NETIF_MSG_TX_ERR
| NETIF_MSG_HW
| NETIF_MSG_DRV
|
2023 NETIF_MSG_LINK
| NETIF_MSG_PROBE
;
2025 priv
->ioaddr
= devm_ioremap(&pdev
->dev
, mmio_res
->start
,
2026 resource_size(mmio_res
));
2027 if (!priv
->ioaddr
) {
2028 dev_err(&pdev
->dev
, "devm_ioremap() failed\n");
2033 priv
->eeprom_base
= devm_ioremap(&pdev
->dev
, eeprom_res
->start
,
2034 resource_size(eeprom_res
));
2035 if (!priv
->eeprom_base
) {
2036 dev_err(&pdev
->dev
, "devm_ioremap() failed for EEPROM\n");
2041 mac
= device_get_mac_address(&pdev
->dev
, macbuf
, sizeof(macbuf
));
2043 ether_addr_copy(ndev
->dev_addr
, mac
);
2045 if (priv
->eeprom_base
&&
2046 (!mac
|| !is_valid_ether_addr(ndev
->dev_addr
))) {
2047 void __iomem
*macp
= priv
->eeprom_base
+
2048 NETSEC_EEPROM_MAC_ADDRESS
;
2050 ndev
->dev_addr
[0] = readb(macp
+ 3);
2051 ndev
->dev_addr
[1] = readb(macp
+ 2);
2052 ndev
->dev_addr
[2] = readb(macp
+ 1);
2053 ndev
->dev_addr
[3] = readb(macp
+ 0);
2054 ndev
->dev_addr
[4] = readb(macp
+ 7);
2055 ndev
->dev_addr
[5] = readb(macp
+ 6);
2058 if (!is_valid_ether_addr(ndev
->dev_addr
)) {
2059 dev_warn(&pdev
->dev
, "No MAC address found, using random\n");
2060 eth_hw_addr_random(ndev
);
2063 if (dev_of_node(&pdev
->dev
))
2064 ret
= netsec_of_probe(pdev
, priv
, &phy_addr
);
2066 ret
= netsec_acpi_probe(pdev
, priv
, &phy_addr
);
2070 priv
->phy_addr
= phy_addr
;
2073 dev_err(&pdev
->dev
, "missing PHY reference clock frequency\n");
2078 /* default for throughput */
2079 priv
->et_coalesce
.rx_coalesce_usecs
= 500;
2080 priv
->et_coalesce
.rx_max_coalesced_frames
= 8;
2081 priv
->et_coalesce
.tx_coalesce_usecs
= 500;
2082 priv
->et_coalesce
.tx_max_coalesced_frames
= 8;
2084 ret
= device_property_read_u32(&pdev
->dev
, "max-frame-size",
2087 ndev
->max_mtu
= ETH_DATA_LEN
;
2089 /* runtime_pm coverage just for probe, open/close also cover it */
2090 pm_runtime_enable(&pdev
->dev
);
2091 pm_runtime_get_sync(&pdev
->dev
);
2093 hw_ver
= netsec_read(priv
, NETSEC_REG_F_TAIKI_VER
);
2094 /* this driver only supports F_TAIKI style NETSEC */
2095 if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver
) !=
2096 NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI
)) {
2101 dev_info(&pdev
->dev
, "hardware revision %d.%d\n",
2102 hw_ver
>> 16, hw_ver
& 0xffff);
2104 netif_napi_add(ndev
, &priv
->napi
, netsec_napi_poll
, NAPI_POLL_WEIGHT
);
2106 ndev
->netdev_ops
= &netsec_netdev_ops
;
2107 ndev
->ethtool_ops
= &netsec_ethtool_ops
;
2109 ndev
->features
|= NETIF_F_HIGHDMA
| NETIF_F_RXCSUM
| NETIF_F_GSO
|
2110 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
2111 ndev
->hw_features
= ndev
->features
;
2113 priv
->rx_cksum_offload_flag
= true;
2115 ret
= netsec_register_mdio(priv
, phy_addr
);
2119 if (dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(40)))
2120 dev_warn(&pdev
->dev
, "Failed to set DMA mask\n");
2122 ret
= register_netdev(ndev
);
2124 netif_err(priv
, probe
, ndev
, "register_netdev() failed\n");
2128 pm_runtime_put_sync(&pdev
->dev
);
2132 netsec_unregister_mdio(priv
);
2134 netif_napi_del(&priv
->napi
);
2136 pm_runtime_put_sync(&pdev
->dev
);
2137 pm_runtime_disable(&pdev
->dev
);
2140 dev_err(&pdev
->dev
, "init failed\n");
2145 static int netsec_remove(struct platform_device
*pdev
)
2147 struct netsec_priv
*priv
= platform_get_drvdata(pdev
);
2149 unregister_netdev(priv
->ndev
);
2151 netsec_unregister_mdio(priv
);
2153 netif_napi_del(&priv
->napi
);
2155 pm_runtime_disable(&pdev
->dev
);
2156 free_netdev(priv
->ndev
);
2162 static int netsec_runtime_suspend(struct device
*dev
)
2164 struct netsec_priv
*priv
= dev_get_drvdata(dev
);
2166 netsec_write(priv
, NETSEC_REG_CLK_EN
, 0);
2168 clk_disable_unprepare(priv
->clk
);
2173 static int netsec_runtime_resume(struct device
*dev
)
2175 struct netsec_priv
*priv
= dev_get_drvdata(dev
);
2177 clk_prepare_enable(priv
->clk
);
2179 netsec_write(priv
, NETSEC_REG_CLK_EN
, NETSEC_CLK_EN_REG_DOM_D
|
2180 NETSEC_CLK_EN_REG_DOM_C
|
2181 NETSEC_CLK_EN_REG_DOM_G
);
2186 static const struct dev_pm_ops netsec_pm_ops
= {
2187 SET_RUNTIME_PM_OPS(netsec_runtime_suspend
, netsec_runtime_resume
, NULL
)
2190 static const struct of_device_id netsec_dt_ids
[] = {
2191 { .compatible
= "socionext,synquacer-netsec" },
2194 MODULE_DEVICE_TABLE(of
, netsec_dt_ids
);
2197 static const struct acpi_device_id netsec_acpi_ids
[] = {
2201 MODULE_DEVICE_TABLE(acpi
, netsec_acpi_ids
);
2204 static struct platform_driver netsec_driver
= {
2205 .probe
= netsec_probe
,
2206 .remove
= netsec_remove
,
2209 .pm
= &netsec_pm_ops
,
2210 .of_match_table
= netsec_dt_ids
,
2211 .acpi_match_table
= ACPI_PTR(netsec_acpi_ids
),
2214 module_platform_driver(netsec_driver
);
2216 MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
2217 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
2218 MODULE_DESCRIPTION("NETSEC Ethernet driver");
2219 MODULE_LICENSE("GPL");