1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (c) 2014 Linaro Ltd.
3 * Copyright (c) 2014 Hisilicon Limited.
6 #include <linux/module.h>
7 #include <linux/interrupt.h>
8 #include <linux/etherdevice.h>
9 #include <linux/platform_device.h>
10 #include <linux/property.h>
12 #include <linux/of_net.h>
13 #include <linux/of_mdio.h>
14 #include <linux/reset.h>
15 #include <linux/clk.h>
16 #include <linux/circ_buf.h>
18 #define STATION_ADDR_LOW 0x0000
19 #define STATION_ADDR_HIGH 0x0004
20 #define MAC_DUPLEX_HALF_CTRL 0x0008
21 #define MAX_FRM_SIZE 0x003c
22 #define PORT_MODE 0x0040
23 #define PORT_EN 0x0044
24 #define BITS_TX_EN BIT(2)
25 #define BITS_RX_EN BIT(1)
26 #define REC_FILT_CONTROL 0x0064
27 #define BIT_CRC_ERR_PASS BIT(5)
28 #define BIT_PAUSE_FRM_PASS BIT(4)
29 #define BIT_VLAN_DROP_EN BIT(3)
30 #define BIT_BC_DROP_EN BIT(2)
31 #define BIT_MC_MATCH_EN BIT(1)
32 #define BIT_UC_MATCH_EN BIT(0)
33 #define PORT_MC_ADDR_LOW 0x0068
34 #define PORT_MC_ADDR_HIGH 0x006C
35 #define CF_CRC_STRIP 0x01b0
36 #define MODE_CHANGE_EN 0x01b4
37 #define BIT_MODE_CHANGE_EN BIT(0)
38 #define COL_SLOT_TIME 0x01c0
39 #define RECV_CONTROL 0x01e0
40 #define BIT_STRIP_PAD_EN BIT(3)
41 #define BIT_RUNT_PKT_EN BIT(4)
42 #define CONTROL_WORD 0x0214
43 #define MDIO_SINGLE_CMD 0x03c0
44 #define MDIO_SINGLE_DATA 0x03c4
45 #define MDIO_CTRL 0x03cc
46 #define MDIO_RDATA_STATUS 0x03d0
48 #define MDIO_START BIT(20)
49 #define MDIO_R_VALID BIT(0)
50 #define MDIO_READ (BIT(17) | MDIO_START)
51 #define MDIO_WRITE (BIT(16) | MDIO_START)
53 #define RX_FQ_START_ADDR 0x0500
54 #define RX_FQ_DEPTH 0x0504
55 #define RX_FQ_WR_ADDR 0x0508
56 #define RX_FQ_RD_ADDR 0x050c
57 #define RX_FQ_VLDDESC_CNT 0x0510
58 #define RX_FQ_ALEMPTY_TH 0x0514
59 #define RX_FQ_REG_EN 0x0518
60 #define BITS_RX_FQ_START_ADDR_EN BIT(2)
61 #define BITS_RX_FQ_DEPTH_EN BIT(1)
62 #define BITS_RX_FQ_RD_ADDR_EN BIT(0)
63 #define RX_FQ_ALFULL_TH 0x051c
64 #define RX_BQ_START_ADDR 0x0520
65 #define RX_BQ_DEPTH 0x0524
66 #define RX_BQ_WR_ADDR 0x0528
67 #define RX_BQ_RD_ADDR 0x052c
68 #define RX_BQ_FREE_DESC_CNT 0x0530
69 #define RX_BQ_ALEMPTY_TH 0x0534
70 #define RX_BQ_REG_EN 0x0538
71 #define BITS_RX_BQ_START_ADDR_EN BIT(2)
72 #define BITS_RX_BQ_DEPTH_EN BIT(1)
73 #define BITS_RX_BQ_WR_ADDR_EN BIT(0)
74 #define RX_BQ_ALFULL_TH 0x053c
75 #define TX_BQ_START_ADDR 0x0580
76 #define TX_BQ_DEPTH 0x0584
77 #define TX_BQ_WR_ADDR 0x0588
78 #define TX_BQ_RD_ADDR 0x058c
79 #define TX_BQ_VLDDESC_CNT 0x0590
80 #define TX_BQ_ALEMPTY_TH 0x0594
81 #define TX_BQ_REG_EN 0x0598
82 #define BITS_TX_BQ_START_ADDR_EN BIT(2)
83 #define BITS_TX_BQ_DEPTH_EN BIT(1)
84 #define BITS_TX_BQ_RD_ADDR_EN BIT(0)
85 #define TX_BQ_ALFULL_TH 0x059c
86 #define TX_RQ_START_ADDR 0x05a0
87 #define TX_RQ_DEPTH 0x05a4
88 #define TX_RQ_WR_ADDR 0x05a8
89 #define TX_RQ_RD_ADDR 0x05ac
90 #define TX_RQ_FREE_DESC_CNT 0x05b0
91 #define TX_RQ_ALEMPTY_TH 0x05b4
92 #define TX_RQ_REG_EN 0x05b8
93 #define BITS_TX_RQ_START_ADDR_EN BIT(2)
94 #define BITS_TX_RQ_DEPTH_EN BIT(1)
95 #define BITS_TX_RQ_WR_ADDR_EN BIT(0)
96 #define TX_RQ_ALFULL_TH 0x05bc
97 #define RAW_PMU_INT 0x05c0
98 #define ENA_PMU_INT 0x05c4
99 #define STATUS_PMU_INT 0x05c8
100 #define MAC_FIFO_ERR_IN BIT(30)
101 #define TX_RQ_IN_TIMEOUT_INT BIT(29)
102 #define RX_BQ_IN_TIMEOUT_INT BIT(28)
103 #define TXOUTCFF_FULL_INT BIT(27)
104 #define TXOUTCFF_EMPTY_INT BIT(26)
105 #define TXCFF_FULL_INT BIT(25)
106 #define TXCFF_EMPTY_INT BIT(24)
107 #define RXOUTCFF_FULL_INT BIT(23)
108 #define RXOUTCFF_EMPTY_INT BIT(22)
109 #define RXCFF_FULL_INT BIT(21)
110 #define RXCFF_EMPTY_INT BIT(20)
111 #define TX_RQ_IN_INT BIT(19)
112 #define TX_BQ_OUT_INT BIT(18)
113 #define RX_BQ_IN_INT BIT(17)
114 #define RX_FQ_OUT_INT BIT(16)
115 #define TX_RQ_EMPTY_INT BIT(15)
116 #define TX_RQ_FULL_INT BIT(14)
117 #define TX_RQ_ALEMPTY_INT BIT(13)
118 #define TX_RQ_ALFULL_INT BIT(12)
119 #define TX_BQ_EMPTY_INT BIT(11)
120 #define TX_BQ_FULL_INT BIT(10)
121 #define TX_BQ_ALEMPTY_INT BIT(9)
122 #define TX_BQ_ALFULL_INT BIT(8)
123 #define RX_BQ_EMPTY_INT BIT(7)
124 #define RX_BQ_FULL_INT BIT(6)
125 #define RX_BQ_ALEMPTY_INT BIT(5)
126 #define RX_BQ_ALFULL_INT BIT(4)
127 #define RX_FQ_EMPTY_INT BIT(3)
128 #define RX_FQ_FULL_INT BIT(2)
129 #define RX_FQ_ALEMPTY_INT BIT(1)
130 #define RX_FQ_ALFULL_INT BIT(0)
132 #define DEF_INT_MASK (RX_BQ_IN_INT | RX_BQ_IN_TIMEOUT_INT | \
133 TX_RQ_IN_INT | TX_RQ_IN_TIMEOUT_INT)
135 #define DESC_WR_RD_ENA 0x05cc
136 #define IN_QUEUE_TH 0x05d8
137 #define OUT_QUEUE_TH 0x05dc
138 #define QUEUE_TX_BQ_SHIFT 16
139 #define RX_BQ_IN_TIMEOUT_TH 0x05e0
140 #define TX_RQ_IN_TIMEOUT_TH 0x05e4
141 #define STOP_CMD 0x05e8
142 #define BITS_TX_STOP BIT(1)
143 #define BITS_RX_STOP BIT(0)
144 #define FLUSH_CMD 0x05eC
145 #define BITS_TX_FLUSH_CMD BIT(5)
146 #define BITS_RX_FLUSH_CMD BIT(4)
147 #define BITS_TX_FLUSH_FLAG_DOWN BIT(3)
148 #define BITS_TX_FLUSH_FLAG_UP BIT(2)
149 #define BITS_RX_FLUSH_FLAG_DOWN BIT(1)
150 #define BITS_RX_FLUSH_FLAG_UP BIT(0)
151 #define RX_CFF_NUM_REG 0x05f0
152 #define PMU_FSM_REG 0x05f8
153 #define RX_FIFO_PKT_IN_NUM 0x05fc
154 #define RX_FIFO_PKT_OUT_NUM 0x0600
156 #define RGMII_SPEED_1000 0x2c
157 #define RGMII_SPEED_100 0x2f
158 #define RGMII_SPEED_10 0x2d
159 #define MII_SPEED_100 0x0f
160 #define MII_SPEED_10 0x0d
161 #define GMAC_SPEED_1000 0x05
162 #define GMAC_SPEED_100 0x01
163 #define GMAC_SPEED_10 0x00
164 #define GMAC_FULL_DUPLEX BIT(4)
166 #define RX_BQ_INT_THRESHOLD 0x01
167 #define TX_RQ_INT_THRESHOLD 0x01
168 #define RX_BQ_IN_TIMEOUT 0x10000
169 #define TX_RQ_IN_TIMEOUT 0x50000
171 #define MAC_MAX_FRAME_SIZE 1600
173 #define RX_DESC_NUM 1024
174 #define TX_DESC_NUM 1024
176 #define DESC_VLD_FREE 0
177 #define DESC_VLD_BUSY 0x80000000
178 #define DESC_FL_MID 0
179 #define DESC_FL_LAST 0x20000000
180 #define DESC_FL_FIRST 0x40000000
181 #define DESC_FL_FULL 0x60000000
182 #define DESC_DATA_LEN_OFF 16
183 #define DESC_BUFF_LEN_OFF 0
184 #define DESC_DATA_MASK 0x7ff
185 #define DESC_SG BIT(30)
186 #define DESC_FRAGS_NUM_OFF 11
188 /* DMA descriptor ring helpers */
189 #define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
190 #define dma_cnt(n) ((n) >> 5)
191 #define dma_byte(n) ((n) << 5)
193 #define HW_CAP_TSO BIT(0)
195 #define GEMAC_V2 (GEMAC_V1 | HW_CAP_TSO)
196 #define HAS_CAP_TSO(hw_cap) ((hw_cap) & HW_CAP_TSO)
198 #define PHY_RESET_DELAYS_PROPERTY "hisilicon,phy-reset-delays-us"
200 enum phy_reset_delays
{
207 struct hix5hd2_desc
{
212 struct hix5hd2_desc_sw
{
213 struct hix5hd2_desc
*desc
;
214 dma_addr_t phys_addr
;
219 struct hix5hd2_sg_desc_ring
{
220 struct sg_desc
*desc
;
221 dma_addr_t phys_addr
;
229 /* hardware supported max skb frags num */
230 #define SG_MAX_SKB_FRAGS 17
236 /* reserve one more frags for memory alignment */
237 struct frags_info frags
[SG_MAX_SKB_FRAGS
+ 1];
241 struct hix5hd2_priv
{
242 struct hix5hd2_desc_sw pool
[QUEUE_NUMS
];
243 #define rx_fq pool[0]
244 #define rx_bq pool[1]
245 #define tx_bq pool[2]
246 #define tx_rq pool[3]
247 struct hix5hd2_sg_desc_ring tx_ring
;
250 void __iomem
*ctrl_base
;
252 struct sk_buff
*tx_skb
[TX_DESC_NUM
];
253 struct sk_buff
*rx_skb
[RX_DESC_NUM
];
256 struct net_device
*netdev
;
258 struct device_node
*phy_node
;
259 phy_interface_t phy_mode
;
261 unsigned long hw_cap
;
265 struct clk
*mac_core_clk
;
266 struct clk
*mac_ifc_clk
;
267 struct reset_control
*mac_core_rst
;
268 struct reset_control
*mac_ifc_rst
;
269 struct reset_control
*phy_rst
;
270 u32 phy_reset_delays
[DELAYS_NUM
];
272 struct napi_struct napi
;
273 struct work_struct tx_timeout_task
;
276 static inline void hix5hd2_mac_interface_reset(struct hix5hd2_priv
*priv
)
278 if (!priv
->mac_ifc_rst
)
281 reset_control_assert(priv
->mac_ifc_rst
);
282 reset_control_deassert(priv
->mac_ifc_rst
);
285 static void hix5hd2_config_port(struct net_device
*dev
, u32 speed
, u32 duplex
)
287 struct hix5hd2_priv
*priv
= netdev_priv(dev
);
291 priv
->duplex
= duplex
;
293 switch (priv
->phy_mode
) {
294 case PHY_INTERFACE_MODE_RGMII
:
295 if (speed
== SPEED_1000
)
296 val
= RGMII_SPEED_1000
;
297 else if (speed
== SPEED_100
)
298 val
= RGMII_SPEED_100
;
300 val
= RGMII_SPEED_10
;
302 case PHY_INTERFACE_MODE_MII
:
303 if (speed
== SPEED_100
)
309 netdev_warn(dev
, "not supported mode\n");
315 val
|= GMAC_FULL_DUPLEX
;
316 writel_relaxed(val
, priv
->ctrl_base
);
317 hix5hd2_mac_interface_reset(priv
);
319 writel_relaxed(BIT_MODE_CHANGE_EN
, priv
->base
+ MODE_CHANGE_EN
);
320 if (speed
== SPEED_1000
)
321 val
= GMAC_SPEED_1000
;
322 else if (speed
== SPEED_100
)
323 val
= GMAC_SPEED_100
;
326 writel_relaxed(val
, priv
->base
+ PORT_MODE
);
327 writel_relaxed(0, priv
->base
+ MODE_CHANGE_EN
);
328 writel_relaxed(duplex
, priv
->base
+ MAC_DUPLEX_HALF_CTRL
);
331 static void hix5hd2_set_desc_depth(struct hix5hd2_priv
*priv
, int rx
, int tx
)
333 writel_relaxed(BITS_RX_FQ_DEPTH_EN
, priv
->base
+ RX_FQ_REG_EN
);
334 writel_relaxed(rx
<< 3, priv
->base
+ RX_FQ_DEPTH
);
335 writel_relaxed(0, priv
->base
+ RX_FQ_REG_EN
);
337 writel_relaxed(BITS_RX_BQ_DEPTH_EN
, priv
->base
+ RX_BQ_REG_EN
);
338 writel_relaxed(rx
<< 3, priv
->base
+ RX_BQ_DEPTH
);
339 writel_relaxed(0, priv
->base
+ RX_BQ_REG_EN
);
341 writel_relaxed(BITS_TX_BQ_DEPTH_EN
, priv
->base
+ TX_BQ_REG_EN
);
342 writel_relaxed(tx
<< 3, priv
->base
+ TX_BQ_DEPTH
);
343 writel_relaxed(0, priv
->base
+ TX_BQ_REG_EN
);
345 writel_relaxed(BITS_TX_RQ_DEPTH_EN
, priv
->base
+ TX_RQ_REG_EN
);
346 writel_relaxed(tx
<< 3, priv
->base
+ TX_RQ_DEPTH
);
347 writel_relaxed(0, priv
->base
+ TX_RQ_REG_EN
);
350 static void hix5hd2_set_rx_fq(struct hix5hd2_priv
*priv
, dma_addr_t phy_addr
)
352 writel_relaxed(BITS_RX_FQ_START_ADDR_EN
, priv
->base
+ RX_FQ_REG_EN
);
353 writel_relaxed(phy_addr
, priv
->base
+ RX_FQ_START_ADDR
);
354 writel_relaxed(0, priv
->base
+ RX_FQ_REG_EN
);
357 static void hix5hd2_set_rx_bq(struct hix5hd2_priv
*priv
, dma_addr_t phy_addr
)
359 writel_relaxed(BITS_RX_BQ_START_ADDR_EN
, priv
->base
+ RX_BQ_REG_EN
);
360 writel_relaxed(phy_addr
, priv
->base
+ RX_BQ_START_ADDR
);
361 writel_relaxed(0, priv
->base
+ RX_BQ_REG_EN
);
364 static void hix5hd2_set_tx_bq(struct hix5hd2_priv
*priv
, dma_addr_t phy_addr
)
366 writel_relaxed(BITS_TX_BQ_START_ADDR_EN
, priv
->base
+ TX_BQ_REG_EN
);
367 writel_relaxed(phy_addr
, priv
->base
+ TX_BQ_START_ADDR
);
368 writel_relaxed(0, priv
->base
+ TX_BQ_REG_EN
);
371 static void hix5hd2_set_tx_rq(struct hix5hd2_priv
*priv
, dma_addr_t phy_addr
)
373 writel_relaxed(BITS_TX_RQ_START_ADDR_EN
, priv
->base
+ TX_RQ_REG_EN
);
374 writel_relaxed(phy_addr
, priv
->base
+ TX_RQ_START_ADDR
);
375 writel_relaxed(0, priv
->base
+ TX_RQ_REG_EN
);
378 static void hix5hd2_set_desc_addr(struct hix5hd2_priv
*priv
)
380 hix5hd2_set_rx_fq(priv
, priv
->rx_fq
.phys_addr
);
381 hix5hd2_set_rx_bq(priv
, priv
->rx_bq
.phys_addr
);
382 hix5hd2_set_tx_rq(priv
, priv
->tx_rq
.phys_addr
);
383 hix5hd2_set_tx_bq(priv
, priv
->tx_bq
.phys_addr
);
386 static void hix5hd2_hw_init(struct hix5hd2_priv
*priv
)
390 /* disable and clear all interrupts */
391 writel_relaxed(0, priv
->base
+ ENA_PMU_INT
);
392 writel_relaxed(~0, priv
->base
+ RAW_PMU_INT
);
394 writel_relaxed(BIT_CRC_ERR_PASS
, priv
->base
+ REC_FILT_CONTROL
);
395 writel_relaxed(MAC_MAX_FRAME_SIZE
, priv
->base
+ CONTROL_WORD
);
396 writel_relaxed(0, priv
->base
+ COL_SLOT_TIME
);
398 val
= RX_BQ_INT_THRESHOLD
| TX_RQ_INT_THRESHOLD
<< QUEUE_TX_BQ_SHIFT
;
399 writel_relaxed(val
, priv
->base
+ IN_QUEUE_TH
);
401 writel_relaxed(RX_BQ_IN_TIMEOUT
, priv
->base
+ RX_BQ_IN_TIMEOUT_TH
);
402 writel_relaxed(TX_RQ_IN_TIMEOUT
, priv
->base
+ TX_RQ_IN_TIMEOUT_TH
);
404 hix5hd2_set_desc_depth(priv
, RX_DESC_NUM
, TX_DESC_NUM
);
405 hix5hd2_set_desc_addr(priv
);
408 static void hix5hd2_irq_enable(struct hix5hd2_priv
*priv
)
410 writel_relaxed(DEF_INT_MASK
, priv
->base
+ ENA_PMU_INT
);
413 static void hix5hd2_irq_disable(struct hix5hd2_priv
*priv
)
415 writel_relaxed(0, priv
->base
+ ENA_PMU_INT
);
418 static void hix5hd2_port_enable(struct hix5hd2_priv
*priv
)
420 writel_relaxed(0xf, priv
->base
+ DESC_WR_RD_ENA
);
421 writel_relaxed(BITS_RX_EN
| BITS_TX_EN
, priv
->base
+ PORT_EN
);
424 static void hix5hd2_port_disable(struct hix5hd2_priv
*priv
)
426 writel_relaxed(~(u32
)(BITS_RX_EN
| BITS_TX_EN
), priv
->base
+ PORT_EN
);
427 writel_relaxed(0, priv
->base
+ DESC_WR_RD_ENA
);
430 static void hix5hd2_hw_set_mac_addr(struct net_device
*dev
)
432 struct hix5hd2_priv
*priv
= netdev_priv(dev
);
433 const unsigned char *mac
= dev
->dev_addr
;
436 val
= mac
[1] | (mac
[0] << 8);
437 writel_relaxed(val
, priv
->base
+ STATION_ADDR_HIGH
);
439 val
= mac
[5] | (mac
[4] << 8) | (mac
[3] << 16) | (mac
[2] << 24);
440 writel_relaxed(val
, priv
->base
+ STATION_ADDR_LOW
);
443 static int hix5hd2_net_set_mac_address(struct net_device
*dev
, void *p
)
447 ret
= eth_mac_addr(dev
, p
);
449 hix5hd2_hw_set_mac_addr(dev
);
454 static void hix5hd2_adjust_link(struct net_device
*dev
)
456 struct hix5hd2_priv
*priv
= netdev_priv(dev
);
457 struct phy_device
*phy
= dev
->phydev
;
459 if ((priv
->speed
!= phy
->speed
) || (priv
->duplex
!= phy
->duplex
)) {
460 hix5hd2_config_port(dev
, phy
->speed
, phy
->duplex
);
461 phy_print_status(phy
);
465 static void hix5hd2_rx_refill(struct hix5hd2_priv
*priv
)
467 struct hix5hd2_desc
*desc
;
469 u32 start
, end
, num
, pos
, i
;
470 u32 len
= MAC_MAX_FRAME_SIZE
;
473 /* software write pointer */
474 start
= dma_cnt(readl_relaxed(priv
->base
+ RX_FQ_WR_ADDR
));
475 /* logic read pointer */
476 end
= dma_cnt(readl_relaxed(priv
->base
+ RX_FQ_RD_ADDR
));
477 num
= CIRC_SPACE(start
, end
, RX_DESC_NUM
);
479 for (i
= 0, pos
= start
; i
< num
; i
++) {
480 if (priv
->rx_skb
[pos
]) {
483 skb
= netdev_alloc_skb_ip_align(priv
->netdev
, len
);
484 if (unlikely(skb
== NULL
))
488 addr
= dma_map_single(priv
->dev
, skb
->data
, len
, DMA_FROM_DEVICE
);
489 if (dma_mapping_error(priv
->dev
, addr
)) {
490 dev_kfree_skb_any(skb
);
494 desc
= priv
->rx_fq
.desc
+ pos
;
495 desc
->buff_addr
= cpu_to_le32(addr
);
496 priv
->rx_skb
[pos
] = skb
;
497 desc
->cmd
= cpu_to_le32(DESC_VLD_FREE
|
498 (len
- 1) << DESC_BUFF_LEN_OFF
);
499 pos
= dma_ring_incr(pos
, RX_DESC_NUM
);
502 /* ensure desc updated */
506 writel_relaxed(dma_byte(pos
), priv
->base
+ RX_FQ_WR_ADDR
);
509 static int hix5hd2_rx(struct net_device
*dev
, int limit
)
511 struct hix5hd2_priv
*priv
= netdev_priv(dev
);
513 struct hix5hd2_desc
*desc
;
515 u32 start
, end
, num
, pos
, i
, len
;
517 /* software read pointer */
518 start
= dma_cnt(readl_relaxed(priv
->base
+ RX_BQ_RD_ADDR
));
519 /* logic write pointer */
520 end
= dma_cnt(readl_relaxed(priv
->base
+ RX_BQ_WR_ADDR
));
521 num
= CIRC_CNT(end
, start
, RX_DESC_NUM
);
525 /* ensure get updated desc */
527 for (i
= 0, pos
= start
; i
< num
; i
++) {
528 skb
= priv
->rx_skb
[pos
];
529 if (unlikely(!skb
)) {
530 netdev_err(dev
, "inconsistent rx_skb\n");
533 priv
->rx_skb
[pos
] = NULL
;
535 desc
= priv
->rx_bq
.desc
+ pos
;
536 len
= (le32_to_cpu(desc
->cmd
) >> DESC_DATA_LEN_OFF
) &
538 addr
= le32_to_cpu(desc
->buff_addr
);
539 dma_unmap_single(priv
->dev
, addr
, MAC_MAX_FRAME_SIZE
,
543 if (skb
->len
> MAC_MAX_FRAME_SIZE
) {
544 netdev_err(dev
, "rcv len err, len = %d\n", skb
->len
);
545 dev
->stats
.rx_errors
++;
546 dev
->stats
.rx_length_errors
++;
547 dev_kfree_skb_any(skb
);
551 skb
->protocol
= eth_type_trans(skb
, dev
);
552 napi_gro_receive(&priv
->napi
, skb
);
553 dev
->stats
.rx_packets
++;
554 dev
->stats
.rx_bytes
+= len
;
556 pos
= dma_ring_incr(pos
, RX_DESC_NUM
);
560 writel_relaxed(dma_byte(pos
), priv
->base
+ RX_BQ_RD_ADDR
);
562 hix5hd2_rx_refill(priv
);
567 static void hix5hd2_clean_sg_desc(struct hix5hd2_priv
*priv
,
568 struct sk_buff
*skb
, u32 pos
)
570 struct sg_desc
*desc
;
575 desc
= priv
->tx_ring
.desc
+ pos
;
577 addr
= le32_to_cpu(desc
->linear_addr
);
578 len
= le32_to_cpu(desc
->linear_len
);
579 dma_unmap_single(priv
->dev
, addr
, len
, DMA_TO_DEVICE
);
581 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
582 addr
= le32_to_cpu(desc
->frags
[i
].addr
);
583 len
= le32_to_cpu(desc
->frags
[i
].size
);
584 dma_unmap_page(priv
->dev
, addr
, len
, DMA_TO_DEVICE
);
588 static void hix5hd2_xmit_reclaim(struct net_device
*dev
)
591 struct hix5hd2_desc
*desc
;
592 struct hix5hd2_priv
*priv
= netdev_priv(dev
);
593 unsigned int bytes_compl
= 0, pkts_compl
= 0;
594 u32 start
, end
, num
, pos
, i
;
600 start
= dma_cnt(readl_relaxed(priv
->base
+ TX_RQ_RD_ADDR
));
602 end
= dma_cnt(readl_relaxed(priv
->base
+ TX_RQ_WR_ADDR
));
603 num
= CIRC_CNT(end
, start
, TX_DESC_NUM
);
605 for (i
= 0, pos
= start
; i
< num
; i
++) {
606 skb
= priv
->tx_skb
[pos
];
607 if (unlikely(!skb
)) {
608 netdev_err(dev
, "inconsistent tx_skb\n");
613 bytes_compl
+= skb
->len
;
614 desc
= priv
->tx_rq
.desc
+ pos
;
616 if (skb_shinfo(skb
)->nr_frags
) {
617 hix5hd2_clean_sg_desc(priv
, skb
, pos
);
619 addr
= le32_to_cpu(desc
->buff_addr
);
620 dma_unmap_single(priv
->dev
, addr
, skb
->len
,
624 priv
->tx_skb
[pos
] = NULL
;
625 dev_consume_skb_any(skb
);
626 pos
= dma_ring_incr(pos
, TX_DESC_NUM
);
630 writel_relaxed(dma_byte(pos
), priv
->base
+ TX_RQ_RD_ADDR
);
632 netif_tx_unlock(dev
);
634 if (pkts_compl
|| bytes_compl
)
635 netdev_completed_queue(dev
, pkts_compl
, bytes_compl
);
637 if (unlikely(netif_queue_stopped(priv
->netdev
)) && pkts_compl
)
638 netif_wake_queue(priv
->netdev
);
641 static int hix5hd2_poll(struct napi_struct
*napi
, int budget
)
643 struct hix5hd2_priv
*priv
= container_of(napi
,
644 struct hix5hd2_priv
, napi
);
645 struct net_device
*dev
= priv
->netdev
;
646 int work_done
= 0, task
= budget
;
650 hix5hd2_xmit_reclaim(dev
);
651 num
= hix5hd2_rx(dev
, task
);
654 if ((work_done
>= budget
) || (num
== 0))
657 ints
= readl_relaxed(priv
->base
+ RAW_PMU_INT
);
658 writel_relaxed(ints
, priv
->base
+ RAW_PMU_INT
);
659 } while (ints
& DEF_INT_MASK
);
661 if (work_done
< budget
) {
662 napi_complete_done(napi
, work_done
);
663 hix5hd2_irq_enable(priv
);
669 static irqreturn_t
hix5hd2_interrupt(int irq
, void *dev_id
)
671 struct net_device
*dev
= (struct net_device
*)dev_id
;
672 struct hix5hd2_priv
*priv
= netdev_priv(dev
);
673 int ints
= readl_relaxed(priv
->base
+ RAW_PMU_INT
);
675 writel_relaxed(ints
, priv
->base
+ RAW_PMU_INT
);
676 if (likely(ints
& DEF_INT_MASK
)) {
677 hix5hd2_irq_disable(priv
);
678 napi_schedule(&priv
->napi
);
684 static u32
hix5hd2_get_desc_cmd(struct sk_buff
*skb
, unsigned long hw_cap
)
688 if (HAS_CAP_TSO(hw_cap
)) {
689 if (skb_shinfo(skb
)->nr_frags
)
691 cmd
|= skb_shinfo(skb
)->nr_frags
<< DESC_FRAGS_NUM_OFF
;
693 cmd
|= DESC_FL_FULL
|
694 ((skb
->len
& DESC_DATA_MASK
) << DESC_BUFF_LEN_OFF
);
697 cmd
|= (skb
->len
& DESC_DATA_MASK
) << DESC_DATA_LEN_OFF
;
698 cmd
|= DESC_VLD_BUSY
;
703 static int hix5hd2_fill_sg_desc(struct hix5hd2_priv
*priv
,
704 struct sk_buff
*skb
, u32 pos
)
706 struct sg_desc
*desc
;
711 desc
= priv
->tx_ring
.desc
+ pos
;
713 desc
->total_len
= cpu_to_le32(skb
->len
);
714 addr
= dma_map_single(priv
->dev
, skb
->data
, skb_headlen(skb
),
716 if (unlikely(dma_mapping_error(priv
->dev
, addr
)))
718 desc
->linear_addr
= cpu_to_le32(addr
);
719 desc
->linear_len
= cpu_to_le32(skb_headlen(skb
));
721 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
722 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
723 int len
= skb_frag_size(frag
);
725 addr
= skb_frag_dma_map(priv
->dev
, frag
, 0, len
, DMA_TO_DEVICE
);
726 ret
= dma_mapping_error(priv
->dev
, addr
);
729 desc
->frags
[i
].addr
= cpu_to_le32(addr
);
730 desc
->frags
[i
].size
= cpu_to_le32(len
);
736 static netdev_tx_t
hix5hd2_net_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
738 struct hix5hd2_priv
*priv
= netdev_priv(dev
);
739 struct hix5hd2_desc
*desc
;
745 /* software write pointer */
746 pos
= dma_cnt(readl_relaxed(priv
->base
+ TX_BQ_WR_ADDR
));
747 if (unlikely(priv
->tx_skb
[pos
])) {
748 dev
->stats
.tx_dropped
++;
749 dev
->stats
.tx_fifo_errors
++;
750 netif_stop_queue(dev
);
751 return NETDEV_TX_BUSY
;
754 desc
= priv
->tx_bq
.desc
+ pos
;
756 cmd
= hix5hd2_get_desc_cmd(skb
, priv
->hw_cap
);
757 desc
->cmd
= cpu_to_le32(cmd
);
759 if (skb_shinfo(skb
)->nr_frags
) {
760 ret
= hix5hd2_fill_sg_desc(priv
, skb
, pos
);
762 dev_kfree_skb_any(skb
);
763 dev
->stats
.tx_dropped
++;
766 addr
= priv
->tx_ring
.phys_addr
+ pos
* sizeof(struct sg_desc
);
768 addr
= dma_map_single(priv
->dev
, skb
->data
, skb
->len
,
770 if (unlikely(dma_mapping_error(priv
->dev
, addr
))) {
771 dev_kfree_skb_any(skb
);
772 dev
->stats
.tx_dropped
++;
776 desc
->buff_addr
= cpu_to_le32(addr
);
778 priv
->tx_skb
[pos
] = skb
;
780 /* ensure desc updated */
783 pos
= dma_ring_incr(pos
, TX_DESC_NUM
);
784 writel_relaxed(dma_byte(pos
), priv
->base
+ TX_BQ_WR_ADDR
);
786 netif_trans_update(dev
);
787 dev
->stats
.tx_packets
++;
788 dev
->stats
.tx_bytes
+= skb
->len
;
789 netdev_sent_queue(dev
, skb
->len
);
794 static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv
*priv
)
796 struct hix5hd2_desc
*desc
;
800 for (i
= 0; i
< RX_DESC_NUM
; i
++) {
801 struct sk_buff
*skb
= priv
->rx_skb
[i
];
805 desc
= priv
->rx_fq
.desc
+ i
;
806 addr
= le32_to_cpu(desc
->buff_addr
);
807 dma_unmap_single(priv
->dev
, addr
,
808 MAC_MAX_FRAME_SIZE
, DMA_FROM_DEVICE
);
809 dev_kfree_skb_any(skb
);
810 priv
->rx_skb
[i
] = NULL
;
813 for (i
= 0; i
< TX_DESC_NUM
; i
++) {
814 struct sk_buff
*skb
= priv
->tx_skb
[i
];
818 desc
= priv
->tx_rq
.desc
+ i
;
819 addr
= le32_to_cpu(desc
->buff_addr
);
820 dma_unmap_single(priv
->dev
, addr
, skb
->len
, DMA_TO_DEVICE
);
821 dev_kfree_skb_any(skb
);
822 priv
->tx_skb
[i
] = NULL
;
826 static int hix5hd2_net_open(struct net_device
*dev
)
828 struct hix5hd2_priv
*priv
= netdev_priv(dev
);
829 struct phy_device
*phy
;
832 ret
= clk_prepare_enable(priv
->mac_core_clk
);
834 netdev_err(dev
, "failed to enable mac core clk %d\n", ret
);
838 ret
= clk_prepare_enable(priv
->mac_ifc_clk
);
840 clk_disable_unprepare(priv
->mac_core_clk
);
841 netdev_err(dev
, "failed to enable mac ifc clk %d\n", ret
);
845 phy
= of_phy_connect(dev
, priv
->phy_node
,
846 &hix5hd2_adjust_link
, 0, priv
->phy_mode
);
848 clk_disable_unprepare(priv
->mac_ifc_clk
);
849 clk_disable_unprepare(priv
->mac_core_clk
);
854 hix5hd2_hw_init(priv
);
855 hix5hd2_rx_refill(priv
);
857 netdev_reset_queue(dev
);
858 netif_start_queue(dev
);
859 napi_enable(&priv
->napi
);
861 hix5hd2_port_enable(priv
);
862 hix5hd2_irq_enable(priv
);
867 static int hix5hd2_net_close(struct net_device
*dev
)
869 struct hix5hd2_priv
*priv
= netdev_priv(dev
);
871 hix5hd2_port_disable(priv
);
872 hix5hd2_irq_disable(priv
);
873 napi_disable(&priv
->napi
);
874 netif_stop_queue(dev
);
875 hix5hd2_free_dma_desc_rings(priv
);
878 phy_stop(dev
->phydev
);
879 phy_disconnect(dev
->phydev
);
882 clk_disable_unprepare(priv
->mac_ifc_clk
);
883 clk_disable_unprepare(priv
->mac_core_clk
);
888 static void hix5hd2_tx_timeout_task(struct work_struct
*work
)
890 struct hix5hd2_priv
*priv
;
892 priv
= container_of(work
, struct hix5hd2_priv
, tx_timeout_task
);
893 hix5hd2_net_close(priv
->netdev
);
894 hix5hd2_net_open(priv
->netdev
);
897 static void hix5hd2_net_timeout(struct net_device
*dev
, unsigned int txqueue
)
899 struct hix5hd2_priv
*priv
= netdev_priv(dev
);
901 schedule_work(&priv
->tx_timeout_task
);
904 static const struct net_device_ops hix5hd2_netdev_ops
= {
905 .ndo_open
= hix5hd2_net_open
,
906 .ndo_stop
= hix5hd2_net_close
,
907 .ndo_start_xmit
= hix5hd2_net_xmit
,
908 .ndo_tx_timeout
= hix5hd2_net_timeout
,
909 .ndo_set_mac_address
= hix5hd2_net_set_mac_address
,
912 static const struct ethtool_ops hix5hd2_ethtools_ops
= {
913 .get_link
= ethtool_op_get_link
,
914 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
915 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
918 static int hix5hd2_mdio_wait_ready(struct mii_bus
*bus
)
920 struct hix5hd2_priv
*priv
= bus
->priv
;
921 void __iomem
*base
= priv
->base
;
922 int i
, timeout
= 10000;
924 for (i
= 0; readl_relaxed(base
+ MDIO_SINGLE_CMD
) & MDIO_START
; i
++) {
927 usleep_range(10, 20);
933 static int hix5hd2_mdio_read(struct mii_bus
*bus
, int phy
, int reg
)
935 struct hix5hd2_priv
*priv
= bus
->priv
;
936 void __iomem
*base
= priv
->base
;
939 ret
= hix5hd2_mdio_wait_ready(bus
);
943 writel_relaxed(MDIO_READ
| phy
<< 8 | reg
, base
+ MDIO_SINGLE_CMD
);
944 ret
= hix5hd2_mdio_wait_ready(bus
);
948 val
= readl_relaxed(base
+ MDIO_RDATA_STATUS
);
949 if (val
& MDIO_R_VALID
) {
950 dev_err(bus
->parent
, "SMI bus read not valid\n");
955 val
= readl_relaxed(priv
->base
+ MDIO_SINGLE_DATA
);
956 ret
= (val
>> 16) & 0xFFFF;
961 static int hix5hd2_mdio_write(struct mii_bus
*bus
, int phy
, int reg
, u16 val
)
963 struct hix5hd2_priv
*priv
= bus
->priv
;
964 void __iomem
*base
= priv
->base
;
967 ret
= hix5hd2_mdio_wait_ready(bus
);
971 writel_relaxed(val
, base
+ MDIO_SINGLE_DATA
);
972 writel_relaxed(MDIO_WRITE
| phy
<< 8 | reg
, base
+ MDIO_SINGLE_CMD
);
973 ret
= hix5hd2_mdio_wait_ready(bus
);
978 static void hix5hd2_destroy_hw_desc_queue(struct hix5hd2_priv
*priv
)
982 for (i
= 0; i
< QUEUE_NUMS
; i
++) {
983 if (priv
->pool
[i
].desc
) {
984 dma_free_coherent(priv
->dev
, priv
->pool
[i
].size
,
986 priv
->pool
[i
].phys_addr
);
987 priv
->pool
[i
].desc
= NULL
;
992 static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv
*priv
)
994 struct device
*dev
= priv
->dev
;
995 struct hix5hd2_desc
*virt_addr
;
996 dma_addr_t phys_addr
;
999 priv
->rx_fq
.count
= RX_DESC_NUM
;
1000 priv
->rx_bq
.count
= RX_DESC_NUM
;
1001 priv
->tx_bq
.count
= TX_DESC_NUM
;
1002 priv
->tx_rq
.count
= TX_DESC_NUM
;
1004 for (i
= 0; i
< QUEUE_NUMS
; i
++) {
1005 size
= priv
->pool
[i
].count
* sizeof(struct hix5hd2_desc
);
1006 virt_addr
= dma_alloc_coherent(dev
, size
, &phys_addr
,
1008 if (virt_addr
== NULL
)
1009 goto error_free_pool
;
1011 priv
->pool
[i
].size
= size
;
1012 priv
->pool
[i
].desc
= virt_addr
;
1013 priv
->pool
[i
].phys_addr
= phys_addr
;
1018 hix5hd2_destroy_hw_desc_queue(priv
);
1023 static int hix5hd2_init_sg_desc_queue(struct hix5hd2_priv
*priv
)
1025 struct sg_desc
*desc
;
1026 dma_addr_t phys_addr
;
1028 desc
= dma_alloc_coherent(priv
->dev
,
1029 TX_DESC_NUM
* sizeof(struct sg_desc
),
1030 &phys_addr
, GFP_KERNEL
);
1034 priv
->tx_ring
.desc
= desc
;
1035 priv
->tx_ring
.phys_addr
= phys_addr
;
1040 static void hix5hd2_destroy_sg_desc_queue(struct hix5hd2_priv
*priv
)
1042 if (priv
->tx_ring
.desc
) {
1043 dma_free_coherent(priv
->dev
,
1044 TX_DESC_NUM
* sizeof(struct sg_desc
),
1045 priv
->tx_ring
.desc
, priv
->tx_ring
.phys_addr
);
1046 priv
->tx_ring
.desc
= NULL
;
1050 static inline void hix5hd2_mac_core_reset(struct hix5hd2_priv
*priv
)
1052 if (!priv
->mac_core_rst
)
1055 reset_control_assert(priv
->mac_core_rst
);
1056 reset_control_deassert(priv
->mac_core_rst
);
1059 static void hix5hd2_sleep_us(u32 time_us
)
1066 time_ms
= DIV_ROUND_UP(time_us
, 1000);
1068 usleep_range(time_us
, time_us
+ 500);
1073 static void hix5hd2_phy_reset(struct hix5hd2_priv
*priv
)
1075 /* To make sure PHY hardware reset success,
1076 * we must keep PHY in deassert state first and
1077 * then complete the hardware reset operation
1079 reset_control_deassert(priv
->phy_rst
);
1080 hix5hd2_sleep_us(priv
->phy_reset_delays
[PRE_DELAY
]);
1082 reset_control_assert(priv
->phy_rst
);
1083 /* delay some time to ensure reset ok,
1084 * this depends on PHY hardware feature
1086 hix5hd2_sleep_us(priv
->phy_reset_delays
[PULSE
]);
1087 reset_control_deassert(priv
->phy_rst
);
1088 /* delay some time to ensure later MDIO access */
1089 hix5hd2_sleep_us(priv
->phy_reset_delays
[POST_DELAY
]);
1092 static const struct of_device_id hix5hd2_of_match
[];
1094 static int hix5hd2_dev_probe(struct platform_device
*pdev
)
1096 struct device
*dev
= &pdev
->dev
;
1097 struct device_node
*node
= dev
->of_node
;
1098 struct net_device
*ndev
;
1099 struct hix5hd2_priv
*priv
;
1100 struct mii_bus
*bus
;
1103 ndev
= alloc_etherdev(sizeof(struct hix5hd2_priv
));
1107 platform_set_drvdata(pdev
, ndev
);
1109 priv
= netdev_priv(ndev
);
1111 priv
->netdev
= ndev
;
1113 priv
->hw_cap
= (unsigned long)device_get_match_data(dev
);
1115 priv
->base
= devm_platform_ioremap_resource(pdev
, 0);
1116 if (IS_ERR(priv
->base
)) {
1117 ret
= PTR_ERR(priv
->base
);
1118 goto out_free_netdev
;
1121 priv
->ctrl_base
= devm_platform_ioremap_resource(pdev
, 1);
1122 if (IS_ERR(priv
->ctrl_base
)) {
1123 ret
= PTR_ERR(priv
->ctrl_base
);
1124 goto out_free_netdev
;
1127 priv
->mac_core_clk
= devm_clk_get(&pdev
->dev
, "mac_core");
1128 if (IS_ERR(priv
->mac_core_clk
)) {
1129 netdev_err(ndev
, "failed to get mac core clk\n");
1131 goto out_free_netdev
;
1134 ret
= clk_prepare_enable(priv
->mac_core_clk
);
1136 netdev_err(ndev
, "failed to enable mac core clk %d\n", ret
);
1137 goto out_free_netdev
;
1140 priv
->mac_ifc_clk
= devm_clk_get(&pdev
->dev
, "mac_ifc");
1141 if (IS_ERR(priv
->mac_ifc_clk
))
1142 priv
->mac_ifc_clk
= NULL
;
1144 ret
= clk_prepare_enable(priv
->mac_ifc_clk
);
1146 netdev_err(ndev
, "failed to enable mac ifc clk %d\n", ret
);
1147 goto out_disable_mac_core_clk
;
1150 priv
->mac_core_rst
= devm_reset_control_get(dev
, "mac_core");
1151 if (IS_ERR(priv
->mac_core_rst
))
1152 priv
->mac_core_rst
= NULL
;
1153 hix5hd2_mac_core_reset(priv
);
1155 priv
->mac_ifc_rst
= devm_reset_control_get(dev
, "mac_ifc");
1156 if (IS_ERR(priv
->mac_ifc_rst
))
1157 priv
->mac_ifc_rst
= NULL
;
1159 priv
->phy_rst
= devm_reset_control_get(dev
, "phy");
1160 if (IS_ERR(priv
->phy_rst
)) {
1161 priv
->phy_rst
= NULL
;
1163 ret
= of_property_read_u32_array(node
,
1164 PHY_RESET_DELAYS_PROPERTY
,
1165 priv
->phy_reset_delays
,
1168 goto out_disable_clk
;
1169 hix5hd2_phy_reset(priv
);
1172 bus
= mdiobus_alloc();
1175 goto out_disable_clk
;
1179 bus
->name
= "hix5hd2_mii_bus";
1180 bus
->read
= hix5hd2_mdio_read
;
1181 bus
->write
= hix5hd2_mdio_write
;
1182 bus
->parent
= &pdev
->dev
;
1183 snprintf(bus
->id
, MII_BUS_ID_SIZE
, "%s-mii", dev_name(&pdev
->dev
));
1186 ret
= of_mdiobus_register(bus
, node
);
1190 ret
= of_get_phy_mode(node
, &priv
->phy_mode
);
1192 netdev_err(ndev
, "not find phy-mode\n");
1196 priv
->phy_node
= of_parse_phandle(node
, "phy-handle", 0);
1197 if (!priv
->phy_node
) {
1198 netdev_err(ndev
, "not find phy-handle\n");
1203 ndev
->irq
= platform_get_irq(pdev
, 0);
1204 if (ndev
->irq
< 0) {
1209 ret
= devm_request_irq(dev
, ndev
->irq
, hix5hd2_interrupt
,
1210 0, pdev
->name
, ndev
);
1212 netdev_err(ndev
, "devm_request_irq failed\n");
1216 ret
= of_get_ethdev_address(node
, ndev
);
1218 eth_hw_addr_random(ndev
);
1219 netdev_warn(ndev
, "using random MAC address %pM\n",
1223 INIT_WORK(&priv
->tx_timeout_task
, hix5hd2_tx_timeout_task
);
1224 ndev
->watchdog_timeo
= 6 * HZ
;
1225 ndev
->priv_flags
|= IFF_UNICAST_FLT
;
1226 ndev
->netdev_ops
= &hix5hd2_netdev_ops
;
1227 ndev
->ethtool_ops
= &hix5hd2_ethtools_ops
;
1228 SET_NETDEV_DEV(ndev
, dev
);
1230 if (HAS_CAP_TSO(priv
->hw_cap
))
1231 ndev
->hw_features
|= NETIF_F_SG
;
1233 ndev
->features
|= ndev
->hw_features
| NETIF_F_HIGHDMA
;
1234 ndev
->vlan_features
|= ndev
->features
;
1236 ret
= hix5hd2_init_hw_desc_queue(priv
);
1240 netif_napi_add(ndev
, &priv
->napi
, hix5hd2_poll
);
1242 if (HAS_CAP_TSO(priv
->hw_cap
)) {
1243 ret
= hix5hd2_init_sg_desc_queue(priv
);
1245 goto out_destroy_queue
;
1248 ret
= register_netdev(priv
->netdev
);
1250 netdev_err(ndev
, "register_netdev failed!");
1251 goto out_destroy_queue
;
1254 clk_disable_unprepare(priv
->mac_ifc_clk
);
1255 clk_disable_unprepare(priv
->mac_core_clk
);
1260 if (HAS_CAP_TSO(priv
->hw_cap
))
1261 hix5hd2_destroy_sg_desc_queue(priv
);
1262 netif_napi_del(&priv
->napi
);
1263 hix5hd2_destroy_hw_desc_queue(priv
);
1265 of_node_put(priv
->phy_node
);
1267 mdiobus_unregister(bus
);
1271 clk_disable_unprepare(priv
->mac_ifc_clk
);
1272 out_disable_mac_core_clk
:
1273 clk_disable_unprepare(priv
->mac_core_clk
);
1280 static void hix5hd2_dev_remove(struct platform_device
*pdev
)
1282 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1283 struct hix5hd2_priv
*priv
= netdev_priv(ndev
);
1285 netif_napi_del(&priv
->napi
);
1286 unregister_netdev(ndev
);
1287 mdiobus_unregister(priv
->bus
);
1288 mdiobus_free(priv
->bus
);
1290 if (HAS_CAP_TSO(priv
->hw_cap
))
1291 hix5hd2_destroy_sg_desc_queue(priv
);
1292 hix5hd2_destroy_hw_desc_queue(priv
);
1293 of_node_put(priv
->phy_node
);
1294 cancel_work_sync(&priv
->tx_timeout_task
);
1298 static const struct of_device_id hix5hd2_of_match
[] = {
1299 { .compatible
= "hisilicon,hisi-gmac-v1", .data
= (void *)GEMAC_V1
},
1300 { .compatible
= "hisilicon,hisi-gmac-v2", .data
= (void *)GEMAC_V2
},
1301 { .compatible
= "hisilicon,hix5hd2-gmac", .data
= (void *)GEMAC_V1
},
1302 { .compatible
= "hisilicon,hi3798cv200-gmac", .data
= (void *)GEMAC_V2
},
1303 { .compatible
= "hisilicon,hi3516a-gmac", .data
= (void *)GEMAC_V2
},
1307 MODULE_DEVICE_TABLE(of
, hix5hd2_of_match
);
1309 static struct platform_driver hix5hd2_dev_driver
= {
1311 .name
= "hisi-gmac",
1312 .of_match_table
= hix5hd2_of_match
,
1314 .probe
= hix5hd2_dev_probe
,
1315 .remove
= hix5hd2_dev_remove
,
1318 module_platform_driver(hix5hd2_dev_driver
);
1320 MODULE_DESCRIPTION("HISILICON Gigabit Ethernet MAC driver");
1321 MODULE_LICENSE("GPL v2");
1322 MODULE_ALIAS("platform:hisi-gmac");