1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /* Copyright (c) 2014 Linaro Ltd.
4 * Copyright (c) 2014 Hisilicon Limited.
7 #include <linux/module.h>
8 #include <linux/etherdevice.h>
9 #include <linux/platform_device.h>
10 #include <linux/interrupt.h>
11 #include <linux/ktime.h>
12 #include <linux/of_address.h>
13 #include <linux/phy.h>
14 #include <linux/of_mdio.h>
15 #include <linux/of_net.h>
16 #include <linux/mfd/syscon.h>
17 #include <linux/regmap.h>
19 #define SC_PPE_RESET_DREQ 0x026C
21 #define PPE_CFG_RX_ADDR 0x100
22 #define PPE_CFG_POOL_GRP 0x300
23 #define PPE_CFG_RX_BUF_SIZE 0x400
24 #define PPE_CFG_RX_FIFO_SIZE 0x500
25 #define PPE_CURR_BUF_CNT 0xa200
27 #define GE_DUPLEX_TYPE 0x08
28 #define GE_MAX_FRM_SIZE_REG 0x3c
29 #define GE_PORT_MODE 0x40
30 #define GE_PORT_EN 0x44
31 #define GE_SHORT_RUNTS_THR_REG 0x50
32 #define GE_TX_LOCAL_PAGE_REG 0x5c
33 #define GE_TRANSMIT_CONTROL_REG 0x60
34 #define GE_CF_CRC_STRIP_REG 0x1b0
35 #define GE_MODE_CHANGE_REG 0x1b4
36 #define GE_RECV_CONTROL_REG 0x1e0
37 #define GE_STATION_MAC_ADDRESS 0x210
39 #define PPE_CFG_BUS_CTRL_REG 0x424
40 #define PPE_CFG_RX_CTRL_REG 0x428
42 #if defined(CONFIG_HI13X1_GMAC)
43 #define PPE_CFG_CPU_ADD_ADDR 0x6D0
44 #define PPE_CFG_MAX_FRAME_LEN_REG 0x500
45 #define PPE_CFG_RX_PKT_MODE_REG 0x504
46 #define PPE_CFG_QOS_VMID_GEN 0x520
47 #define PPE_CFG_RX_PKT_INT 0x740
48 #define PPE_INTEN 0x700
49 #define PPE_INTSTS 0x708
50 #define PPE_RINT 0x704
51 #define PPE_CFG_STS_MODE 0x880
53 #define PPE_CFG_CPU_ADD_ADDR 0x580
54 #define PPE_CFG_MAX_FRAME_LEN_REG 0x408
55 #define PPE_CFG_RX_PKT_MODE_REG 0x438
56 #define PPE_CFG_QOS_VMID_GEN 0x500
57 #define PPE_CFG_RX_PKT_INT 0x538
58 #define PPE_INTEN 0x600
59 #define PPE_INTSTS 0x608
60 #define PPE_RINT 0x604
61 #define PPE_CFG_STS_MODE 0x700
62 #endif /* CONFIG_HI13X1_GMAC */
64 #define PPE_HIS_RX_PKT_CNT 0x804
66 #define RESET_DREQ_ALL 0xffffffff
69 #define RCV_INT BIT(10)
70 #define RCV_NOBUF BIT(8)
71 #define RCV_DROP BIT(7)
72 #define TX_DROP BIT(6)
73 #define DEF_INT_ERR (RCV_NOBUF | RCV_DROP | TX_DROP)
74 #define DEF_INT_MASK (RCV_INT | DEF_INT_ERR)
76 /* TX descriptor config */
77 #define TX_FREE_MEM BIT(0)
78 #define TX_READ_ALLOC_L3 BIT(1)
79 #if defined(CONFIG_HI13X1_GMAC)
80 #define TX_CLEAR_WB BIT(7)
81 #define TX_RELEASE_TO_PPE BIT(4)
82 #define TX_FINISH_CACHE_INV BIT(6)
83 #define TX_POOL_SHIFT 16
85 #define TX_CLEAR_WB BIT(4)
86 #define TX_FINISH_CACHE_INV BIT(2)
88 #define TX_L3_CHECKSUM BIT(5)
89 #define TX_LOOP_BACK BIT(11)
92 #define RX_PKT_DROP BIT(0)
93 #define RX_L2_ERR BIT(1)
94 #define RX_PKT_ERR (RX_PKT_DROP | RX_L2_ERR)
96 #define SGMII_SPEED_1000 0x08
97 #define SGMII_SPEED_100 0x07
98 #define SGMII_SPEED_10 0x06
99 #define MII_SPEED_100 0x01
100 #define MII_SPEED_10 0x00
102 #define GE_DUPLEX_FULL BIT(0)
103 #define GE_DUPLEX_HALF 0x00
104 #define GE_MODE_CHANGE_EN BIT(0)
106 #define GE_TX_AUTO_NEG BIT(5)
107 #define GE_TX_ADD_CRC BIT(6)
108 #define GE_TX_SHORT_PAD_THROUGH BIT(7)
110 #define GE_RX_STRIP_CRC BIT(0)
111 #define GE_RX_STRIP_PAD BIT(3)
112 #define GE_RX_PAD_EN BIT(4)
114 #define GE_AUTO_NEG_CTL BIT(0)
116 #define GE_RX_INT_THRESHOLD BIT(6)
117 #define GE_RX_TIMEOUT 0x04
119 #define GE_RX_PORT_EN BIT(1)
120 #define GE_TX_PORT_EN BIT(2)
122 #define PPE_CFG_RX_PKT_ALIGN BIT(18)
124 #if defined(CONFIG_HI13X1_GMAC)
125 #define PPE_CFG_QOS_VMID_GRP_SHIFT 4
126 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 7
127 #define PPE_CFG_STS_RX_PKT_CNT_RC BIT(0)
128 #define PPE_CFG_QOS_VMID_MODE BIT(15)
129 #define PPE_CFG_BUS_LOCAL_REL (BIT(9) | BIT(15) | BIT(19) | BIT(23))
131 /* buf unit size is cache_line_size, which is 64, so the shift is 6 */
132 #define PPE_BUF_SIZE_SHIFT 6
133 #define PPE_TX_BUF_HOLD BIT(31)
134 #define CACHE_LINE_MASK 0x3F
136 #define PPE_CFG_QOS_VMID_GRP_SHIFT 8
137 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
138 #define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
139 #define PPE_CFG_QOS_VMID_MODE BIT(14)
140 #define PPE_CFG_BUS_LOCAL_REL BIT(14)
142 /* buf unit size is 1, so the shift is 6 */
143 #define PPE_BUF_SIZE_SHIFT 0
144 #define PPE_TX_BUF_HOLD 0
145 #endif /* CONFIG_HI13X1_GMAC */
147 #define PPE_CFG_RX_FIFO_FSFU BIT(11)
148 #define PPE_CFG_RX_DEPTH_SHIFT 16
149 #define PPE_CFG_RX_START_SHIFT 0
151 #define PPE_CFG_BUS_BIG_ENDIEN BIT(0)
153 #define RX_DESC_NUM 128
154 #define TX_DESC_NUM 256
155 #define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM-1))
156 #define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM-1))
158 #define GMAC_PPE_RX_PKT_MAX_LEN 379
159 #define GMAC_MAX_PKT_LEN 1516
160 #define GMAC_MIN_PKT_LEN 31
161 #define RX_BUF_SIZE 1600
162 #define RESET_TIMEOUT 1000
163 #define TX_TIMEOUT (6 * HZ)
165 #define DRV_NAME "hip04-ether"
166 #define DRV_VERSION "v1.0"
168 #define HIP04_MAX_TX_COALESCE_USECS 200
169 #define HIP04_MIN_TX_COALESCE_USECS 100
170 #define HIP04_MAX_TX_COALESCE_FRAMES 200
171 #define HIP04_MIN_TX_COALESCE_FRAMES 100
174 #if defined(CONFIG_HI13X1_GMAC)
193 #if defined(CONFIG_HI13X1_GMAC)
211 #if defined(CONFIG_HI13X1_GMAC)
212 void __iomem
*sysctrl_base
;
214 phy_interface_t phy_mode
;
220 unsigned int reg_inten
;
222 struct napi_struct napi
;
224 struct net_device
*ndev
;
226 struct tx_desc
*tx_desc
;
227 dma_addr_t tx_desc_dma
;
228 struct sk_buff
*tx_skb
[TX_DESC_NUM
];
229 dma_addr_t tx_phys
[TX_DESC_NUM
];
230 unsigned int tx_head
;
232 int tx_coalesce_frames
;
233 int tx_coalesce_usecs
;
234 struct hrtimer tx_coalesce_timer
;
236 unsigned char *rx_buf
[RX_DESC_NUM
];
237 dma_addr_t rx_phys
[RX_DESC_NUM
];
238 unsigned int rx_head
;
239 unsigned int rx_buf_size
;
240 unsigned int rx_cnt_remaining
;
242 struct device_node
*phy_node
;
243 struct phy_device
*phy
;
245 struct work_struct tx_timeout_task
;
247 /* written only by tx cleanup */
248 unsigned int tx_tail ____cacheline_aligned_in_smp
;
251 static inline unsigned int tx_count(unsigned int head
, unsigned int tail
)
253 return (head
- tail
) % TX_DESC_NUM
;
256 static void hip04_config_port(struct net_device
*ndev
, u32 speed
, u32 duplex
)
258 struct hip04_priv
*priv
= netdev_priv(ndev
);
262 priv
->duplex
= duplex
;
264 switch (priv
->phy_mode
) {
265 case PHY_INTERFACE_MODE_SGMII
:
266 if (speed
== SPEED_1000
)
267 val
= SGMII_SPEED_1000
;
268 else if (speed
== SPEED_100
)
269 val
= SGMII_SPEED_100
;
271 val
= SGMII_SPEED_10
;
273 case PHY_INTERFACE_MODE_MII
:
274 if (speed
== SPEED_100
)
280 netdev_warn(ndev
, "not supported mode\n");
284 writel_relaxed(val
, priv
->base
+ GE_PORT_MODE
);
286 val
= duplex
? GE_DUPLEX_FULL
: GE_DUPLEX_HALF
;
287 writel_relaxed(val
, priv
->base
+ GE_DUPLEX_TYPE
);
289 val
= GE_MODE_CHANGE_EN
;
290 writel_relaxed(val
, priv
->base
+ GE_MODE_CHANGE_REG
);
293 static void hip04_reset_dreq(struct hip04_priv
*priv
)
295 #if defined(CONFIG_HI13X1_GMAC)
296 writel_relaxed(RESET_DREQ_ALL
, priv
->sysctrl_base
+ SC_PPE_RESET_DREQ
);
300 static void hip04_reset_ppe(struct hip04_priv
*priv
)
302 u32 val
, tmp
, timeout
= 0;
305 regmap_read(priv
->map
, priv
->port
* 4 + PPE_CURR_BUF_CNT
, &val
);
306 regmap_read(priv
->map
, priv
->port
* 4 + PPE_CFG_RX_ADDR
, &tmp
);
307 if (timeout
++ > RESET_TIMEOUT
)
309 } while (val
& 0xfff);
312 static void hip04_config_fifo(struct hip04_priv
*priv
)
316 val
= readl_relaxed(priv
->base
+ PPE_CFG_STS_MODE
);
317 val
|= PPE_CFG_STS_RX_PKT_CNT_RC
;
318 writel_relaxed(val
, priv
->base
+ PPE_CFG_STS_MODE
);
320 val
= BIT(priv
->group
);
321 regmap_write(priv
->map
, priv
->port
* 4 + PPE_CFG_POOL_GRP
, val
);
323 val
= priv
->group
<< PPE_CFG_QOS_VMID_GRP_SHIFT
;
324 val
|= PPE_CFG_QOS_VMID_MODE
;
325 writel_relaxed(val
, priv
->base
+ PPE_CFG_QOS_VMID_GEN
);
327 val
= RX_BUF_SIZE
>> PPE_BUF_SIZE_SHIFT
;
328 regmap_write(priv
->map
, priv
->port
* 4 + PPE_CFG_RX_BUF_SIZE
, val
);
330 val
= RX_DESC_NUM
<< PPE_CFG_RX_DEPTH_SHIFT
;
331 val
|= PPE_CFG_RX_FIFO_FSFU
;
332 val
|= priv
->chan
<< PPE_CFG_RX_START_SHIFT
;
333 regmap_write(priv
->map
, priv
->port
* 4 + PPE_CFG_RX_FIFO_SIZE
, val
);
335 val
= NET_IP_ALIGN
<< PPE_CFG_RX_CTRL_ALIGN_SHIFT
;
336 writel_relaxed(val
, priv
->base
+ PPE_CFG_RX_CTRL_REG
);
338 val
= PPE_CFG_RX_PKT_ALIGN
;
339 writel_relaxed(val
, priv
->base
+ PPE_CFG_RX_PKT_MODE_REG
);
341 val
= PPE_CFG_BUS_LOCAL_REL
| PPE_CFG_BUS_BIG_ENDIEN
;
342 writel_relaxed(val
, priv
->base
+ PPE_CFG_BUS_CTRL_REG
);
344 val
= GMAC_PPE_RX_PKT_MAX_LEN
;
345 writel_relaxed(val
, priv
->base
+ PPE_CFG_MAX_FRAME_LEN_REG
);
347 val
= GMAC_MAX_PKT_LEN
;
348 writel_relaxed(val
, priv
->base
+ GE_MAX_FRM_SIZE_REG
);
350 val
= GMAC_MIN_PKT_LEN
;
351 writel_relaxed(val
, priv
->base
+ GE_SHORT_RUNTS_THR_REG
);
353 val
= readl_relaxed(priv
->base
+ GE_TRANSMIT_CONTROL_REG
);
354 val
|= GE_TX_AUTO_NEG
| GE_TX_ADD_CRC
| GE_TX_SHORT_PAD_THROUGH
;
355 writel_relaxed(val
, priv
->base
+ GE_TRANSMIT_CONTROL_REG
);
357 val
= GE_RX_STRIP_CRC
;
358 writel_relaxed(val
, priv
->base
+ GE_CF_CRC_STRIP_REG
);
360 val
= readl_relaxed(priv
->base
+ GE_RECV_CONTROL_REG
);
361 val
|= GE_RX_STRIP_PAD
| GE_RX_PAD_EN
;
362 writel_relaxed(val
, priv
->base
+ GE_RECV_CONTROL_REG
);
364 #ifndef CONFIG_HI13X1_GMAC
365 val
= GE_AUTO_NEG_CTL
;
366 writel_relaxed(val
, priv
->base
+ GE_TX_LOCAL_PAGE_REG
);
370 static void hip04_mac_enable(struct net_device
*ndev
)
372 struct hip04_priv
*priv
= netdev_priv(ndev
);
376 val
= readl_relaxed(priv
->base
+ GE_PORT_EN
);
377 val
|= GE_RX_PORT_EN
| GE_TX_PORT_EN
;
378 writel_relaxed(val
, priv
->base
+ GE_PORT_EN
);
382 writel_relaxed(val
, priv
->base
+ PPE_RINT
);
384 /* config recv int */
385 val
= GE_RX_INT_THRESHOLD
| GE_RX_TIMEOUT
;
386 writel_relaxed(val
, priv
->base
+ PPE_CFG_RX_PKT_INT
);
388 /* enable interrupt */
389 priv
->reg_inten
= DEF_INT_MASK
;
390 writel_relaxed(priv
->reg_inten
, priv
->base
+ PPE_INTEN
);
393 static void hip04_mac_disable(struct net_device
*ndev
)
395 struct hip04_priv
*priv
= netdev_priv(ndev
);
399 priv
->reg_inten
&= ~(DEF_INT_MASK
);
400 writel_relaxed(priv
->reg_inten
, priv
->base
+ PPE_INTEN
);
402 /* disable tx & rx */
403 val
= readl_relaxed(priv
->base
+ GE_PORT_EN
);
404 val
&= ~(GE_RX_PORT_EN
| GE_TX_PORT_EN
);
405 writel_relaxed(val
, priv
->base
+ GE_PORT_EN
);
408 static void hip04_set_xmit_desc(struct hip04_priv
*priv
, dma_addr_t phys
)
412 val
= phys
>> PPE_BUF_SIZE_SHIFT
| PPE_TX_BUF_HOLD
;
413 writel(val
, priv
->base
+ PPE_CFG_CPU_ADD_ADDR
);
416 static void hip04_set_recv_desc(struct hip04_priv
*priv
, dma_addr_t phys
)
420 val
= phys
>> PPE_BUF_SIZE_SHIFT
;
421 regmap_write(priv
->map
, priv
->port
* 4 + PPE_CFG_RX_ADDR
, val
);
424 static u32
hip04_recv_cnt(struct hip04_priv
*priv
)
426 return readl(priv
->base
+ PPE_HIS_RX_PKT_CNT
);
429 static void hip04_update_mac_address(struct net_device
*ndev
)
431 struct hip04_priv
*priv
= netdev_priv(ndev
);
433 writel_relaxed(((ndev
->dev_addr
[0] << 8) | (ndev
->dev_addr
[1])),
434 priv
->base
+ GE_STATION_MAC_ADDRESS
);
435 writel_relaxed(((ndev
->dev_addr
[2] << 24) | (ndev
->dev_addr
[3] << 16) |
436 (ndev
->dev_addr
[4] << 8) | (ndev
->dev_addr
[5])),
437 priv
->base
+ GE_STATION_MAC_ADDRESS
+ 4);
440 static int hip04_set_mac_address(struct net_device
*ndev
, void *addr
)
442 eth_mac_addr(ndev
, addr
);
443 hip04_update_mac_address(ndev
);
447 static int hip04_tx_reclaim(struct net_device
*ndev
, bool force
)
449 struct hip04_priv
*priv
= netdev_priv(ndev
);
450 unsigned tx_tail
= priv
->tx_tail
;
451 struct tx_desc
*desc
;
452 unsigned int bytes_compl
= 0, pkts_compl
= 0;
456 count
= tx_count(READ_ONCE(priv
->tx_head
), tx_tail
);
461 desc
= &priv
->tx_desc
[tx_tail
];
462 if (desc
->send_addr
!= 0) {
469 if (priv
->tx_phys
[tx_tail
]) {
470 dma_unmap_single(priv
->dev
, priv
->tx_phys
[tx_tail
],
471 priv
->tx_skb
[tx_tail
]->len
,
473 priv
->tx_phys
[tx_tail
] = 0;
476 bytes_compl
+= priv
->tx_skb
[tx_tail
]->len
;
477 dev_kfree_skb(priv
->tx_skb
[tx_tail
]);
478 priv
->tx_skb
[tx_tail
] = NULL
;
479 tx_tail
= TX_NEXT(tx_tail
);
483 priv
->tx_tail
= tx_tail
;
484 smp_wmb(); /* Ensure tx_tail visible to xmit */
487 if (pkts_compl
|| bytes_compl
)
488 netdev_completed_queue(ndev
, pkts_compl
, bytes_compl
);
490 if (unlikely(netif_queue_stopped(ndev
)) && (count
< (TX_DESC_NUM
- 1)))
491 netif_wake_queue(ndev
);
496 static void hip04_start_tx_timer(struct hip04_priv
*priv
)
498 unsigned long ns
= priv
->tx_coalesce_usecs
* NSEC_PER_USEC
/ 2;
500 /* allow timer to fire after half the time at the earliest */
501 hrtimer_start_range_ns(&priv
->tx_coalesce_timer
, ns_to_ktime(ns
),
502 ns
, HRTIMER_MODE_REL
);
506 hip04_mac_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
508 struct hip04_priv
*priv
= netdev_priv(ndev
);
509 struct net_device_stats
*stats
= &ndev
->stats
;
510 unsigned int tx_head
= priv
->tx_head
, count
;
511 struct tx_desc
*desc
= &priv
->tx_desc
[tx_head
];
515 count
= tx_count(tx_head
, READ_ONCE(priv
->tx_tail
));
516 if (count
== (TX_DESC_NUM
- 1)) {
517 netif_stop_queue(ndev
);
518 return NETDEV_TX_BUSY
;
521 phys
= dma_map_single(priv
->dev
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
522 if (dma_mapping_error(priv
->dev
, phys
)) {
527 priv
->tx_skb
[tx_head
] = skb
;
528 priv
->tx_phys
[tx_head
] = phys
;
530 desc
->send_size
= (__force u32
)cpu_to_be32(skb
->len
);
531 #if defined(CONFIG_HI13X1_GMAC)
532 desc
->cfg
= (__force u32
)cpu_to_be32(TX_CLEAR_WB
| TX_FINISH_CACHE_INV
533 | TX_RELEASE_TO_PPE
| priv
->port
<< TX_POOL_SHIFT
);
534 desc
->data_offset
= (__force u32
)cpu_to_be32(phys
& CACHE_LINE_MASK
);
535 desc
->send_addr
= (__force u32
)cpu_to_be32(phys
& ~CACHE_LINE_MASK
);
537 desc
->cfg
= (__force u32
)cpu_to_be32(TX_CLEAR_WB
| TX_FINISH_CACHE_INV
);
538 desc
->send_addr
= (__force u32
)cpu_to_be32(phys
);
540 phys
= priv
->tx_desc_dma
+ tx_head
* sizeof(struct tx_desc
);
541 desc
->wb_addr
= (__force u32
)cpu_to_be32(phys
+
542 offsetof(struct tx_desc
, send_addr
));
543 skb_tx_timestamp(skb
);
545 hip04_set_xmit_desc(priv
, phys
);
547 netdev_sent_queue(ndev
, skb
->len
);
548 priv
->tx_head
= TX_NEXT(tx_head
);
550 stats
->tx_bytes
+= skb
->len
;
553 /* Ensure tx_head update visible to tx reclaim */
556 /* queue is getting full, better start cleaning up now */
557 if (count
>= priv
->tx_coalesce_frames
) {
558 if (napi_schedule_prep(&priv
->napi
)) {
559 /* disable rx interrupt and timer */
560 priv
->reg_inten
&= ~(RCV_INT
);
561 writel_relaxed(DEF_INT_MASK
& ~RCV_INT
,
562 priv
->base
+ PPE_INTEN
);
563 hrtimer_cancel(&priv
->tx_coalesce_timer
);
564 __napi_schedule(&priv
->napi
);
566 } else if (!hrtimer_is_queued(&priv
->tx_coalesce_timer
)) {
567 /* cleanup not pending yet, start a new timer */
568 hip04_start_tx_timer(priv
);
574 static int hip04_rx_poll(struct napi_struct
*napi
, int budget
)
576 struct hip04_priv
*priv
= container_of(napi
, struct hip04_priv
, napi
);
577 struct net_device
*ndev
= priv
->ndev
;
578 struct net_device_stats
*stats
= &ndev
->stats
;
579 struct rx_desc
*desc
;
589 /* clean up tx descriptors */
590 tx_remaining
= hip04_tx_reclaim(ndev
, false);
591 priv
->rx_cnt_remaining
+= hip04_recv_cnt(priv
);
592 while (priv
->rx_cnt_remaining
&& !last
) {
593 buf
= priv
->rx_buf
[priv
->rx_head
];
594 skb
= build_skb(buf
, priv
->rx_buf_size
);
595 if (unlikely(!skb
)) {
596 net_dbg_ratelimited("build_skb failed\n");
600 dma_unmap_single(priv
->dev
, priv
->rx_phys
[priv
->rx_head
],
601 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
602 priv
->rx_phys
[priv
->rx_head
] = 0;
604 desc
= (struct rx_desc
*)skb
->data
;
605 len
= be16_to_cpu((__force __be16
)desc
->pkt_len
);
606 err
= be32_to_cpu((__force __be32
)desc
->pkt_err
);
609 dev_kfree_skb_any(skb
);
611 } else if ((err
& RX_PKT_ERR
) || (len
>= GMAC_MAX_PKT_LEN
)) {
612 dev_kfree_skb_any(skb
);
616 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
618 skb
->protocol
= eth_type_trans(skb
, ndev
);
619 napi_gro_receive(&priv
->napi
, skb
);
621 stats
->rx_bytes
+= len
;
626 buf
= netdev_alloc_frag(priv
->rx_buf_size
);
629 phys
= dma_map_single(priv
->dev
, buf
,
630 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
631 if (dma_mapping_error(priv
->dev
, phys
))
633 priv
->rx_buf
[priv
->rx_head
] = buf
;
634 priv
->rx_phys
[priv
->rx_head
] = phys
;
635 hip04_set_recv_desc(priv
, phys
);
637 priv
->rx_head
= RX_NEXT(priv
->rx_head
);
639 --priv
->rx_cnt_remaining
;
643 if (--priv
->rx_cnt_remaining
== 0)
644 priv
->rx_cnt_remaining
+= hip04_recv_cnt(priv
);
647 if (!(priv
->reg_inten
& RCV_INT
)) {
648 /* enable rx interrupt */
649 priv
->reg_inten
|= RCV_INT
;
650 writel_relaxed(priv
->reg_inten
, priv
->base
+ PPE_INTEN
);
652 napi_complete_done(napi
, rx
);
654 /* start a new timer if necessary */
655 if (rx
< budget
&& tx_remaining
)
656 hip04_start_tx_timer(priv
);
661 static irqreturn_t
hip04_mac_interrupt(int irq
, void *dev_id
)
663 struct net_device
*ndev
= (struct net_device
*)dev_id
;
664 struct hip04_priv
*priv
= netdev_priv(ndev
);
665 struct net_device_stats
*stats
= &ndev
->stats
;
666 u32 ists
= readl_relaxed(priv
->base
+ PPE_INTSTS
);
671 writel_relaxed(DEF_INT_MASK
, priv
->base
+ PPE_RINT
);
673 if (unlikely(ists
& DEF_INT_ERR
)) {
674 if (ists
& (RCV_NOBUF
| RCV_DROP
)) {
677 netdev_err(ndev
, "rx drop\n");
679 if (ists
& TX_DROP
) {
681 netdev_err(ndev
, "tx drop\n");
685 if (ists
& RCV_INT
&& napi_schedule_prep(&priv
->napi
)) {
686 /* disable rx interrupt */
687 priv
->reg_inten
&= ~(RCV_INT
);
688 writel_relaxed(DEF_INT_MASK
& ~RCV_INT
, priv
->base
+ PPE_INTEN
);
689 hrtimer_cancel(&priv
->tx_coalesce_timer
);
690 __napi_schedule(&priv
->napi
);
696 static enum hrtimer_restart
tx_done(struct hrtimer
*hrtimer
)
698 struct hip04_priv
*priv
;
700 priv
= container_of(hrtimer
, struct hip04_priv
, tx_coalesce_timer
);
702 if (napi_schedule_prep(&priv
->napi
)) {
703 /* disable rx interrupt */
704 priv
->reg_inten
&= ~(RCV_INT
);
705 writel_relaxed(DEF_INT_MASK
& ~RCV_INT
, priv
->base
+ PPE_INTEN
);
706 __napi_schedule(&priv
->napi
);
709 return HRTIMER_NORESTART
;
712 static void hip04_adjust_link(struct net_device
*ndev
)
714 struct hip04_priv
*priv
= netdev_priv(ndev
);
715 struct phy_device
*phy
= priv
->phy
;
717 if ((priv
->speed
!= phy
->speed
) || (priv
->duplex
!= phy
->duplex
)) {
718 hip04_config_port(ndev
, phy
->speed
, phy
->duplex
);
719 phy_print_status(phy
);
723 static int hip04_mac_open(struct net_device
*ndev
)
725 struct hip04_priv
*priv
= netdev_priv(ndev
);
729 priv
->rx_cnt_remaining
= 0;
732 hip04_reset_ppe(priv
);
734 for (i
= 0; i
< RX_DESC_NUM
; i
++) {
737 phys
= dma_map_single(priv
->dev
, priv
->rx_buf
[i
],
738 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
739 if (dma_mapping_error(priv
->dev
, phys
))
742 priv
->rx_phys
[i
] = phys
;
743 hip04_set_recv_desc(priv
, phys
);
747 phy_start(priv
->phy
);
749 netdev_reset_queue(ndev
);
750 netif_start_queue(ndev
);
751 hip04_mac_enable(ndev
);
752 napi_enable(&priv
->napi
);
757 static int hip04_mac_stop(struct net_device
*ndev
)
759 struct hip04_priv
*priv
= netdev_priv(ndev
);
762 napi_disable(&priv
->napi
);
763 netif_stop_queue(ndev
);
764 hip04_mac_disable(ndev
);
765 hip04_tx_reclaim(ndev
, true);
766 hip04_reset_ppe(priv
);
771 for (i
= 0; i
< RX_DESC_NUM
; i
++) {
772 if (priv
->rx_phys
[i
]) {
773 dma_unmap_single(priv
->dev
, priv
->rx_phys
[i
],
774 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
775 priv
->rx_phys
[i
] = 0;
782 static void hip04_timeout(struct net_device
*ndev
, unsigned int txqueue
)
784 struct hip04_priv
*priv
= netdev_priv(ndev
);
786 schedule_work(&priv
->tx_timeout_task
);
789 static void hip04_tx_timeout_task(struct work_struct
*work
)
791 struct hip04_priv
*priv
;
793 priv
= container_of(work
, struct hip04_priv
, tx_timeout_task
);
794 hip04_mac_stop(priv
->ndev
);
795 hip04_mac_open(priv
->ndev
);
798 static int hip04_get_coalesce(struct net_device
*netdev
,
799 struct ethtool_coalesce
*ec
)
801 struct hip04_priv
*priv
= netdev_priv(netdev
);
803 ec
->tx_coalesce_usecs
= priv
->tx_coalesce_usecs
;
804 ec
->tx_max_coalesced_frames
= priv
->tx_coalesce_frames
;
809 static int hip04_set_coalesce(struct net_device
*netdev
,
810 struct ethtool_coalesce
*ec
)
812 struct hip04_priv
*priv
= netdev_priv(netdev
);
814 if ((ec
->tx_coalesce_usecs
> HIP04_MAX_TX_COALESCE_USECS
||
815 ec
->tx_coalesce_usecs
< HIP04_MIN_TX_COALESCE_USECS
) ||
816 (ec
->tx_max_coalesced_frames
> HIP04_MAX_TX_COALESCE_FRAMES
||
817 ec
->tx_max_coalesced_frames
< HIP04_MIN_TX_COALESCE_FRAMES
))
820 priv
->tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
821 priv
->tx_coalesce_frames
= ec
->tx_max_coalesced_frames
;
826 static void hip04_get_drvinfo(struct net_device
*netdev
,
827 struct ethtool_drvinfo
*drvinfo
)
829 strlcpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
830 strlcpy(drvinfo
->version
, DRV_VERSION
, sizeof(drvinfo
->version
));
833 static const struct ethtool_ops hip04_ethtool_ops
= {
834 .supported_coalesce_params
= ETHTOOL_COALESCE_TX_USECS
|
835 ETHTOOL_COALESCE_TX_MAX_FRAMES
,
836 .get_coalesce
= hip04_get_coalesce
,
837 .set_coalesce
= hip04_set_coalesce
,
838 .get_drvinfo
= hip04_get_drvinfo
,
841 static const struct net_device_ops hip04_netdev_ops
= {
842 .ndo_open
= hip04_mac_open
,
843 .ndo_stop
= hip04_mac_stop
,
844 .ndo_start_xmit
= hip04_mac_start_xmit
,
845 .ndo_set_mac_address
= hip04_set_mac_address
,
846 .ndo_tx_timeout
= hip04_timeout
,
847 .ndo_validate_addr
= eth_validate_addr
,
850 static int hip04_alloc_ring(struct net_device
*ndev
, struct device
*d
)
852 struct hip04_priv
*priv
= netdev_priv(ndev
);
855 priv
->tx_desc
= dma_alloc_coherent(d
,
856 TX_DESC_NUM
* sizeof(struct tx_desc
),
857 &priv
->tx_desc_dma
, GFP_KERNEL
);
861 priv
->rx_buf_size
= RX_BUF_SIZE
+
862 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
863 for (i
= 0; i
< RX_DESC_NUM
; i
++) {
864 priv
->rx_buf
[i
] = netdev_alloc_frag(priv
->rx_buf_size
);
865 if (!priv
->rx_buf
[i
])
872 static void hip04_free_ring(struct net_device
*ndev
, struct device
*d
)
874 struct hip04_priv
*priv
= netdev_priv(ndev
);
877 for (i
= 0; i
< RX_DESC_NUM
; i
++)
879 skb_free_frag(priv
->rx_buf
[i
]);
881 for (i
= 0; i
< TX_DESC_NUM
; i
++)
883 dev_kfree_skb_any(priv
->tx_skb
[i
]);
885 dma_free_coherent(d
, TX_DESC_NUM
* sizeof(struct tx_desc
),
886 priv
->tx_desc
, priv
->tx_desc_dma
);
889 static int hip04_mac_probe(struct platform_device
*pdev
)
891 struct device
*d
= &pdev
->dev
;
892 struct device_node
*node
= d
->of_node
;
893 struct of_phandle_args arg
;
894 struct net_device
*ndev
;
895 struct hip04_priv
*priv
;
899 ndev
= alloc_etherdev(sizeof(struct hip04_priv
));
903 priv
= netdev_priv(ndev
);
906 platform_set_drvdata(pdev
, ndev
);
907 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
909 priv
->base
= devm_platform_ioremap_resource(pdev
, 0);
910 if (IS_ERR(priv
->base
)) {
911 ret
= PTR_ERR(priv
->base
);
915 #if defined(CONFIG_HI13X1_GMAC)
916 priv
->sysctrl_base
= devm_platform_ioremap_resource(pdev
, 1);
917 if (IS_ERR(priv
->sysctrl_base
)) {
918 ret
= PTR_ERR(priv
->sysctrl_base
);
923 ret
= of_parse_phandle_with_fixed_args(node
, "port-handle", 3, 0, &arg
);
925 dev_warn(d
, "no port-handle\n");
929 priv
->port
= arg
.args
[0];
930 priv
->chan
= arg
.args
[1] * RX_DESC_NUM
;
931 priv
->group
= arg
.args
[2];
933 hrtimer_init(&priv
->tx_coalesce_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
935 /* BQL will try to keep the TX queue as short as possible, but it can't
936 * be faster than tx_coalesce_usecs, so we need a fast timeout here,
937 * but also long enough to gather up enough frames to ensure we don't
938 * get more interrupts than necessary.
939 * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
941 priv
->tx_coalesce_frames
= TX_DESC_NUM
* 3 / 4;
942 priv
->tx_coalesce_usecs
= 200;
943 priv
->tx_coalesce_timer
.function
= tx_done
;
945 priv
->map
= syscon_node_to_regmap(arg
.np
);
946 if (IS_ERR(priv
->map
)) {
947 dev_warn(d
, "no syscon hisilicon,hip04-ppe\n");
948 ret
= PTR_ERR(priv
->map
);
952 ret
= of_get_phy_mode(node
, &priv
->phy_mode
);
954 dev_warn(d
, "not find phy-mode\n");
958 irq
= platform_get_irq(pdev
, 0);
964 ret
= devm_request_irq(d
, irq
, hip04_mac_interrupt
,
965 0, pdev
->name
, ndev
);
967 netdev_err(ndev
, "devm_request_irq failed\n");
971 priv
->phy_node
= of_parse_phandle(node
, "phy-handle", 0);
972 if (priv
->phy_node
) {
973 priv
->phy
= of_phy_connect(ndev
, priv
->phy_node
,
982 INIT_WORK(&priv
->tx_timeout_task
, hip04_tx_timeout_task
);
984 ndev
->netdev_ops
= &hip04_netdev_ops
;
985 ndev
->ethtool_ops
= &hip04_ethtool_ops
;
986 ndev
->watchdog_timeo
= TX_TIMEOUT
;
987 ndev
->priv_flags
|= IFF_UNICAST_FLT
;
989 netif_napi_add(ndev
, &priv
->napi
, hip04_rx_poll
, NAPI_POLL_WEIGHT
);
991 hip04_reset_dreq(priv
);
992 hip04_reset_ppe(priv
);
993 if (priv
->phy_mode
== PHY_INTERFACE_MODE_MII
)
994 hip04_config_port(ndev
, SPEED_100
, DUPLEX_FULL
);
996 hip04_config_fifo(priv
);
997 eth_random_addr(ndev
->dev_addr
);
998 hip04_update_mac_address(ndev
);
1000 ret
= hip04_alloc_ring(ndev
, d
);
1002 netdev_err(ndev
, "alloc ring fail\n");
1006 ret
= register_netdev(ndev
);
1013 hip04_free_ring(ndev
, d
);
1015 of_node_put(priv
->phy_node
);
1020 static int hip04_remove(struct platform_device
*pdev
)
1022 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1023 struct hip04_priv
*priv
= netdev_priv(ndev
);
1024 struct device
*d
= &pdev
->dev
;
1027 phy_disconnect(priv
->phy
);
1029 hip04_free_ring(ndev
, d
);
1030 unregister_netdev(ndev
);
1031 of_node_put(priv
->phy_node
);
1032 cancel_work_sync(&priv
->tx_timeout_task
);
1038 static const struct of_device_id hip04_mac_match
[] = {
1039 { .compatible
= "hisilicon,hip04-mac" },
1043 MODULE_DEVICE_TABLE(of
, hip04_mac_match
);
1045 static struct platform_driver hip04_mac_driver
= {
1046 .probe
= hip04_mac_probe
,
1047 .remove
= hip04_remove
,
1050 .of_match_table
= hip04_mac_match
,
1053 module_platform_driver(hip04_mac_driver
);
1055 MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
1056 MODULE_LICENSE("GPL");