2 /* Copyright (c) 2014 Linaro Ltd.
3 * Copyright (c) 2014 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/module.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/ktime.h>
16 #include <linux/of_address.h>
17 #include <linux/phy.h>
18 #include <linux/of_mdio.h>
19 #include <linux/of_net.h>
20 #include <linux/mfd/syscon.h>
21 #include <linux/regmap.h>
23 #define PPE_CFG_RX_ADDR 0x100
24 #define PPE_CFG_POOL_GRP 0x300
25 #define PPE_CFG_RX_BUF_SIZE 0x400
26 #define PPE_CFG_RX_FIFO_SIZE 0x500
27 #define PPE_CURR_BUF_CNT 0xa200
29 #define GE_DUPLEX_TYPE 0x08
30 #define GE_MAX_FRM_SIZE_REG 0x3c
31 #define GE_PORT_MODE 0x40
32 #define GE_PORT_EN 0x44
33 #define GE_SHORT_RUNTS_THR_REG 0x50
34 #define GE_TX_LOCAL_PAGE_REG 0x5c
35 #define GE_TRANSMIT_CONTROL_REG 0x60
36 #define GE_CF_CRC_STRIP_REG 0x1b0
37 #define GE_MODE_CHANGE_REG 0x1b4
38 #define GE_RECV_CONTROL_REG 0x1e0
39 #define GE_STATION_MAC_ADDRESS 0x210
40 #define PPE_CFG_CPU_ADD_ADDR 0x580
41 #define PPE_CFG_MAX_FRAME_LEN_REG 0x408
42 #define PPE_CFG_BUS_CTRL_REG 0x424
43 #define PPE_CFG_RX_CTRL_REG 0x428
44 #define PPE_CFG_RX_PKT_MODE_REG 0x438
45 #define PPE_CFG_QOS_VMID_GEN 0x500
46 #define PPE_CFG_RX_PKT_INT 0x538
47 #define PPE_INTEN 0x600
48 #define PPE_INTSTS 0x608
49 #define PPE_RINT 0x604
50 #define PPE_CFG_STS_MODE 0x700
51 #define PPE_HIS_RX_PKT_CNT 0x804
54 #define RCV_INT BIT(10)
55 #define RCV_NOBUF BIT(8)
56 #define RCV_DROP BIT(7)
57 #define TX_DROP BIT(6)
58 #define DEF_INT_ERR (RCV_NOBUF | RCV_DROP | TX_DROP)
59 #define DEF_INT_MASK (RCV_INT | DEF_INT_ERR)
61 /* TX descriptor config */
62 #define TX_FREE_MEM BIT(0)
63 #define TX_READ_ALLOC_L3 BIT(1)
64 #define TX_FINISH_CACHE_INV BIT(2)
65 #define TX_CLEAR_WB BIT(4)
66 #define TX_L3_CHECKSUM BIT(5)
67 #define TX_LOOP_BACK BIT(11)
70 #define RX_PKT_DROP BIT(0)
71 #define RX_L2_ERR BIT(1)
72 #define RX_PKT_ERR (RX_PKT_DROP | RX_L2_ERR)
74 #define SGMII_SPEED_1000 0x08
75 #define SGMII_SPEED_100 0x07
76 #define SGMII_SPEED_10 0x06
77 #define MII_SPEED_100 0x01
78 #define MII_SPEED_10 0x00
80 #define GE_DUPLEX_FULL BIT(0)
81 #define GE_DUPLEX_HALF 0x00
82 #define GE_MODE_CHANGE_EN BIT(0)
84 #define GE_TX_AUTO_NEG BIT(5)
85 #define GE_TX_ADD_CRC BIT(6)
86 #define GE_TX_SHORT_PAD_THROUGH BIT(7)
88 #define GE_RX_STRIP_CRC BIT(0)
89 #define GE_RX_STRIP_PAD BIT(3)
90 #define GE_RX_PAD_EN BIT(4)
92 #define GE_AUTO_NEG_CTL BIT(0)
94 #define GE_RX_INT_THRESHOLD BIT(6)
95 #define GE_RX_TIMEOUT 0x04
97 #define GE_RX_PORT_EN BIT(1)
98 #define GE_TX_PORT_EN BIT(2)
100 #define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
102 #define PPE_CFG_RX_PKT_ALIGN BIT(18)
103 #define PPE_CFG_QOS_VMID_MODE BIT(14)
104 #define PPE_CFG_QOS_VMID_GRP_SHIFT 8
106 #define PPE_CFG_RX_FIFO_FSFU BIT(11)
107 #define PPE_CFG_RX_DEPTH_SHIFT 16
108 #define PPE_CFG_RX_START_SHIFT 0
109 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
111 #define PPE_CFG_BUS_LOCAL_REL BIT(14)
112 #define PPE_CFG_BUS_BIG_ENDIEN BIT(0)
114 #define RX_DESC_NUM 128
115 #define TX_DESC_NUM 256
116 #define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM-1))
117 #define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM-1))
119 #define GMAC_PPE_RX_PKT_MAX_LEN 379
120 #define GMAC_MAX_PKT_LEN 1516
121 #define GMAC_MIN_PKT_LEN 31
122 #define RX_BUF_SIZE 1600
123 #define RESET_TIMEOUT 1000
124 #define TX_TIMEOUT (6 * HZ)
126 #define DRV_NAME "hip04-ether"
127 #define DRV_VERSION "v1.0"
129 #define HIP04_MAX_TX_COALESCE_USECS 200
130 #define HIP04_MIN_TX_COALESCE_USECS 100
131 #define HIP04_MAX_TX_COALESCE_FRAMES 200
132 #define HIP04_MIN_TX_COALESCE_FRAMES 100
157 unsigned int reg_inten
;
159 struct napi_struct napi
;
160 struct net_device
*ndev
;
162 struct tx_desc
*tx_desc
;
163 dma_addr_t tx_desc_dma
;
164 struct sk_buff
*tx_skb
[TX_DESC_NUM
];
165 dma_addr_t tx_phys
[TX_DESC_NUM
];
166 unsigned int tx_head
;
168 int tx_coalesce_frames
;
169 int tx_coalesce_usecs
;
170 struct hrtimer tx_coalesce_timer
;
172 unsigned char *rx_buf
[RX_DESC_NUM
];
173 dma_addr_t rx_phys
[RX_DESC_NUM
];
174 unsigned int rx_head
;
175 unsigned int rx_buf_size
;
177 struct device_node
*phy_node
;
178 struct phy_device
*phy
;
180 struct work_struct tx_timeout_task
;
182 /* written only by tx cleanup */
183 unsigned int tx_tail ____cacheline_aligned_in_smp
;
186 static inline unsigned int tx_count(unsigned int head
, unsigned int tail
)
188 return (head
- tail
) % (TX_DESC_NUM
- 1);
191 static void hip04_config_port(struct net_device
*ndev
, u32 speed
, u32 duplex
)
193 struct hip04_priv
*priv
= netdev_priv(ndev
);
197 priv
->duplex
= duplex
;
199 switch (priv
->phy_mode
) {
200 case PHY_INTERFACE_MODE_SGMII
:
201 if (speed
== SPEED_1000
)
202 val
= SGMII_SPEED_1000
;
203 else if (speed
== SPEED_100
)
204 val
= SGMII_SPEED_100
;
206 val
= SGMII_SPEED_10
;
208 case PHY_INTERFACE_MODE_MII
:
209 if (speed
== SPEED_100
)
215 netdev_warn(ndev
, "not supported mode\n");
219 writel_relaxed(val
, priv
->base
+ GE_PORT_MODE
);
221 val
= duplex
? GE_DUPLEX_FULL
: GE_DUPLEX_HALF
;
222 writel_relaxed(val
, priv
->base
+ GE_DUPLEX_TYPE
);
224 val
= GE_MODE_CHANGE_EN
;
225 writel_relaxed(val
, priv
->base
+ GE_MODE_CHANGE_REG
);
228 static void hip04_reset_ppe(struct hip04_priv
*priv
)
230 u32 val
, tmp
, timeout
= 0;
233 regmap_read(priv
->map
, priv
->port
* 4 + PPE_CURR_BUF_CNT
, &val
);
234 regmap_read(priv
->map
, priv
->port
* 4 + PPE_CFG_RX_ADDR
, &tmp
);
235 if (timeout
++ > RESET_TIMEOUT
)
237 } while (val
& 0xfff);
240 static void hip04_config_fifo(struct hip04_priv
*priv
)
244 val
= readl_relaxed(priv
->base
+ PPE_CFG_STS_MODE
);
245 val
|= PPE_CFG_STS_RX_PKT_CNT_RC
;
246 writel_relaxed(val
, priv
->base
+ PPE_CFG_STS_MODE
);
248 val
= BIT(priv
->port
);
249 regmap_write(priv
->map
, priv
->port
* 4 + PPE_CFG_POOL_GRP
, val
);
251 val
= priv
->port
<< PPE_CFG_QOS_VMID_GRP_SHIFT
;
252 val
|= PPE_CFG_QOS_VMID_MODE
;
253 writel_relaxed(val
, priv
->base
+ PPE_CFG_QOS_VMID_GEN
);
256 regmap_write(priv
->map
, priv
->port
* 4 + PPE_CFG_RX_BUF_SIZE
, val
);
258 val
= RX_DESC_NUM
<< PPE_CFG_RX_DEPTH_SHIFT
;
259 val
|= PPE_CFG_RX_FIFO_FSFU
;
260 val
|= priv
->chan
<< PPE_CFG_RX_START_SHIFT
;
261 regmap_write(priv
->map
, priv
->port
* 4 + PPE_CFG_RX_FIFO_SIZE
, val
);
263 val
= NET_IP_ALIGN
<< PPE_CFG_RX_CTRL_ALIGN_SHIFT
;
264 writel_relaxed(val
, priv
->base
+ PPE_CFG_RX_CTRL_REG
);
266 val
= PPE_CFG_RX_PKT_ALIGN
;
267 writel_relaxed(val
, priv
->base
+ PPE_CFG_RX_PKT_MODE_REG
);
269 val
= PPE_CFG_BUS_LOCAL_REL
| PPE_CFG_BUS_BIG_ENDIEN
;
270 writel_relaxed(val
, priv
->base
+ PPE_CFG_BUS_CTRL_REG
);
272 val
= GMAC_PPE_RX_PKT_MAX_LEN
;
273 writel_relaxed(val
, priv
->base
+ PPE_CFG_MAX_FRAME_LEN_REG
);
275 val
= GMAC_MAX_PKT_LEN
;
276 writel_relaxed(val
, priv
->base
+ GE_MAX_FRM_SIZE_REG
);
278 val
= GMAC_MIN_PKT_LEN
;
279 writel_relaxed(val
, priv
->base
+ GE_SHORT_RUNTS_THR_REG
);
281 val
= readl_relaxed(priv
->base
+ GE_TRANSMIT_CONTROL_REG
);
282 val
|= GE_TX_AUTO_NEG
| GE_TX_ADD_CRC
| GE_TX_SHORT_PAD_THROUGH
;
283 writel_relaxed(val
, priv
->base
+ GE_TRANSMIT_CONTROL_REG
);
285 val
= GE_RX_STRIP_CRC
;
286 writel_relaxed(val
, priv
->base
+ GE_CF_CRC_STRIP_REG
);
288 val
= readl_relaxed(priv
->base
+ GE_RECV_CONTROL_REG
);
289 val
|= GE_RX_STRIP_PAD
| GE_RX_PAD_EN
;
290 writel_relaxed(val
, priv
->base
+ GE_RECV_CONTROL_REG
);
292 val
= GE_AUTO_NEG_CTL
;
293 writel_relaxed(val
, priv
->base
+ GE_TX_LOCAL_PAGE_REG
);
296 static void hip04_mac_enable(struct net_device
*ndev
)
298 struct hip04_priv
*priv
= netdev_priv(ndev
);
302 val
= readl_relaxed(priv
->base
+ GE_PORT_EN
);
303 val
|= GE_RX_PORT_EN
| GE_TX_PORT_EN
;
304 writel_relaxed(val
, priv
->base
+ GE_PORT_EN
);
308 writel_relaxed(val
, priv
->base
+ PPE_RINT
);
310 /* config recv int */
311 val
= GE_RX_INT_THRESHOLD
| GE_RX_TIMEOUT
;
312 writel_relaxed(val
, priv
->base
+ PPE_CFG_RX_PKT_INT
);
314 /* enable interrupt */
315 priv
->reg_inten
= DEF_INT_MASK
;
316 writel_relaxed(priv
->reg_inten
, priv
->base
+ PPE_INTEN
);
319 static void hip04_mac_disable(struct net_device
*ndev
)
321 struct hip04_priv
*priv
= netdev_priv(ndev
);
325 priv
->reg_inten
&= ~(DEF_INT_MASK
);
326 writel_relaxed(priv
->reg_inten
, priv
->base
+ PPE_INTEN
);
328 /* disable tx & rx */
329 val
= readl_relaxed(priv
->base
+ GE_PORT_EN
);
330 val
&= ~(GE_RX_PORT_EN
| GE_TX_PORT_EN
);
331 writel_relaxed(val
, priv
->base
+ GE_PORT_EN
);
334 static void hip04_set_xmit_desc(struct hip04_priv
*priv
, dma_addr_t phys
)
336 writel(phys
, priv
->base
+ PPE_CFG_CPU_ADD_ADDR
);
339 static void hip04_set_recv_desc(struct hip04_priv
*priv
, dma_addr_t phys
)
341 regmap_write(priv
->map
, priv
->port
* 4 + PPE_CFG_RX_ADDR
, phys
);
344 static u32
hip04_recv_cnt(struct hip04_priv
*priv
)
346 return readl(priv
->base
+ PPE_HIS_RX_PKT_CNT
);
349 static void hip04_update_mac_address(struct net_device
*ndev
)
351 struct hip04_priv
*priv
= netdev_priv(ndev
);
353 writel_relaxed(((ndev
->dev_addr
[0] << 8) | (ndev
->dev_addr
[1])),
354 priv
->base
+ GE_STATION_MAC_ADDRESS
);
355 writel_relaxed(((ndev
->dev_addr
[2] << 24) | (ndev
->dev_addr
[3] << 16) |
356 (ndev
->dev_addr
[4] << 8) | (ndev
->dev_addr
[5])),
357 priv
->base
+ GE_STATION_MAC_ADDRESS
+ 4);
360 static int hip04_set_mac_address(struct net_device
*ndev
, void *addr
)
362 eth_mac_addr(ndev
, addr
);
363 hip04_update_mac_address(ndev
);
367 static int hip04_tx_reclaim(struct net_device
*ndev
, bool force
)
369 struct hip04_priv
*priv
= netdev_priv(ndev
);
370 unsigned tx_tail
= priv
->tx_tail
;
371 struct tx_desc
*desc
;
372 unsigned int bytes_compl
= 0, pkts_compl
= 0;
376 count
= tx_count(READ_ONCE(priv
->tx_head
), tx_tail
);
381 desc
= &priv
->tx_desc
[tx_tail
];
382 if (desc
->send_addr
!= 0) {
389 if (priv
->tx_phys
[tx_tail
]) {
390 dma_unmap_single(&ndev
->dev
, priv
->tx_phys
[tx_tail
],
391 priv
->tx_skb
[tx_tail
]->len
,
393 priv
->tx_phys
[tx_tail
] = 0;
396 bytes_compl
+= priv
->tx_skb
[tx_tail
]->len
;
397 dev_kfree_skb(priv
->tx_skb
[tx_tail
]);
398 priv
->tx_skb
[tx_tail
] = NULL
;
399 tx_tail
= TX_NEXT(tx_tail
);
403 priv
->tx_tail
= tx_tail
;
404 smp_wmb(); /* Ensure tx_tail visible to xmit */
407 if (pkts_compl
|| bytes_compl
)
408 netdev_completed_queue(ndev
, pkts_compl
, bytes_compl
);
410 if (unlikely(netif_queue_stopped(ndev
)) && (count
< (TX_DESC_NUM
- 1)))
411 netif_wake_queue(ndev
);
416 static void hip04_start_tx_timer(struct hip04_priv
*priv
)
418 unsigned long ns
= priv
->tx_coalesce_usecs
* NSEC_PER_USEC
/ 2;
420 /* allow timer to fire after half the time at the earliest */
421 hrtimer_start_range_ns(&priv
->tx_coalesce_timer
, ns_to_ktime(ns
),
422 ns
, HRTIMER_MODE_REL
);
425 static int hip04_mac_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
427 struct hip04_priv
*priv
= netdev_priv(ndev
);
428 struct net_device_stats
*stats
= &ndev
->stats
;
429 unsigned int tx_head
= priv
->tx_head
, count
;
430 struct tx_desc
*desc
= &priv
->tx_desc
[tx_head
];
434 count
= tx_count(tx_head
, READ_ONCE(priv
->tx_tail
));
435 if (count
== (TX_DESC_NUM
- 1)) {
436 netif_stop_queue(ndev
);
437 return NETDEV_TX_BUSY
;
440 phys
= dma_map_single(&ndev
->dev
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
441 if (dma_mapping_error(&ndev
->dev
, phys
)) {
446 priv
->tx_skb
[tx_head
] = skb
;
447 priv
->tx_phys
[tx_head
] = phys
;
448 desc
->send_addr
= cpu_to_be32(phys
);
449 desc
->send_size
= cpu_to_be32(skb
->len
);
450 desc
->cfg
= cpu_to_be32(TX_CLEAR_WB
| TX_FINISH_CACHE_INV
);
451 phys
= priv
->tx_desc_dma
+ tx_head
* sizeof(struct tx_desc
);
452 desc
->wb_addr
= cpu_to_be32(phys
);
453 skb_tx_timestamp(skb
);
455 hip04_set_xmit_desc(priv
, phys
);
456 priv
->tx_head
= TX_NEXT(tx_head
);
458 netdev_sent_queue(ndev
, skb
->len
);
460 stats
->tx_bytes
+= skb
->len
;
463 /* Ensure tx_head update visible to tx reclaim */
466 /* queue is getting full, better start cleaning up now */
467 if (count
>= priv
->tx_coalesce_frames
) {
468 if (napi_schedule_prep(&priv
->napi
)) {
469 /* disable rx interrupt and timer */
470 priv
->reg_inten
&= ~(RCV_INT
);
471 writel_relaxed(DEF_INT_MASK
& ~RCV_INT
,
472 priv
->base
+ PPE_INTEN
);
473 hrtimer_cancel(&priv
->tx_coalesce_timer
);
474 __napi_schedule(&priv
->napi
);
476 } else if (!hrtimer_is_queued(&priv
->tx_coalesce_timer
)) {
477 /* cleanup not pending yet, start a new timer */
478 hip04_start_tx_timer(priv
);
484 static int hip04_rx_poll(struct napi_struct
*napi
, int budget
)
486 struct hip04_priv
*priv
= container_of(napi
, struct hip04_priv
, napi
);
487 struct net_device
*ndev
= priv
->ndev
;
488 struct net_device_stats
*stats
= &ndev
->stats
;
489 unsigned int cnt
= hip04_recv_cnt(priv
);
490 struct rx_desc
*desc
;
500 while (cnt
&& !last
) {
501 buf
= priv
->rx_buf
[priv
->rx_head
];
502 skb
= build_skb(buf
, priv
->rx_buf_size
);
503 if (unlikely(!skb
)) {
504 net_dbg_ratelimited("build_skb failed\n");
508 dma_unmap_single(&ndev
->dev
, priv
->rx_phys
[priv
->rx_head
],
509 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
510 priv
->rx_phys
[priv
->rx_head
] = 0;
512 desc
= (struct rx_desc
*)skb
->data
;
513 len
= be16_to_cpu(desc
->pkt_len
);
514 err
= be32_to_cpu(desc
->pkt_err
);
517 dev_kfree_skb_any(skb
);
519 } else if ((err
& RX_PKT_ERR
) || (len
>= GMAC_MAX_PKT_LEN
)) {
520 dev_kfree_skb_any(skb
);
524 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
526 skb
->protocol
= eth_type_trans(skb
, ndev
);
527 napi_gro_receive(&priv
->napi
, skb
);
529 stats
->rx_bytes
+= len
;
534 buf
= netdev_alloc_frag(priv
->rx_buf_size
);
537 phys
= dma_map_single(&ndev
->dev
, buf
,
538 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
539 if (dma_mapping_error(&ndev
->dev
, phys
))
541 priv
->rx_buf
[priv
->rx_head
] = buf
;
542 priv
->rx_phys
[priv
->rx_head
] = phys
;
543 hip04_set_recv_desc(priv
, phys
);
545 priv
->rx_head
= RX_NEXT(priv
->rx_head
);
550 cnt
= hip04_recv_cnt(priv
);
553 if (!(priv
->reg_inten
& RCV_INT
)) {
554 /* enable rx interrupt */
555 priv
->reg_inten
|= RCV_INT
;
556 writel_relaxed(priv
->reg_inten
, priv
->base
+ PPE_INTEN
);
558 napi_complete_done(napi
, rx
);
560 /* clean up tx descriptors and start a new timer if necessary */
561 tx_remaining
= hip04_tx_reclaim(ndev
, false);
562 if (rx
< budget
&& tx_remaining
)
563 hip04_start_tx_timer(priv
);
568 static irqreturn_t
hip04_mac_interrupt(int irq
, void *dev_id
)
570 struct net_device
*ndev
= (struct net_device
*)dev_id
;
571 struct hip04_priv
*priv
= netdev_priv(ndev
);
572 struct net_device_stats
*stats
= &ndev
->stats
;
573 u32 ists
= readl_relaxed(priv
->base
+ PPE_INTSTS
);
578 writel_relaxed(DEF_INT_MASK
, priv
->base
+ PPE_RINT
);
580 if (unlikely(ists
& DEF_INT_ERR
)) {
581 if (ists
& (RCV_NOBUF
| RCV_DROP
)) {
584 netdev_err(ndev
, "rx drop\n");
586 if (ists
& TX_DROP
) {
588 netdev_err(ndev
, "tx drop\n");
592 if (ists
& RCV_INT
&& napi_schedule_prep(&priv
->napi
)) {
593 /* disable rx interrupt */
594 priv
->reg_inten
&= ~(RCV_INT
);
595 writel_relaxed(DEF_INT_MASK
& ~RCV_INT
, priv
->base
+ PPE_INTEN
);
596 hrtimer_cancel(&priv
->tx_coalesce_timer
);
597 __napi_schedule(&priv
->napi
);
603 static enum hrtimer_restart
tx_done(struct hrtimer
*hrtimer
)
605 struct hip04_priv
*priv
;
607 priv
= container_of(hrtimer
, struct hip04_priv
, tx_coalesce_timer
);
609 if (napi_schedule_prep(&priv
->napi
)) {
610 /* disable rx interrupt */
611 priv
->reg_inten
&= ~(RCV_INT
);
612 writel_relaxed(DEF_INT_MASK
& ~RCV_INT
, priv
->base
+ PPE_INTEN
);
613 __napi_schedule(&priv
->napi
);
616 return HRTIMER_NORESTART
;
619 static void hip04_adjust_link(struct net_device
*ndev
)
621 struct hip04_priv
*priv
= netdev_priv(ndev
);
622 struct phy_device
*phy
= priv
->phy
;
624 if ((priv
->speed
!= phy
->speed
) || (priv
->duplex
!= phy
->duplex
)) {
625 hip04_config_port(ndev
, phy
->speed
, phy
->duplex
);
626 phy_print_status(phy
);
630 static int hip04_mac_open(struct net_device
*ndev
)
632 struct hip04_priv
*priv
= netdev_priv(ndev
);
638 hip04_reset_ppe(priv
);
640 for (i
= 0; i
< RX_DESC_NUM
; i
++) {
643 phys
= dma_map_single(&ndev
->dev
, priv
->rx_buf
[i
],
644 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
645 if (dma_mapping_error(&ndev
->dev
, phys
))
648 priv
->rx_phys
[i
] = phys
;
649 hip04_set_recv_desc(priv
, phys
);
653 phy_start(priv
->phy
);
655 netdev_reset_queue(ndev
);
656 netif_start_queue(ndev
);
657 hip04_mac_enable(ndev
);
658 napi_enable(&priv
->napi
);
663 static int hip04_mac_stop(struct net_device
*ndev
)
665 struct hip04_priv
*priv
= netdev_priv(ndev
);
668 napi_disable(&priv
->napi
);
669 netif_stop_queue(ndev
);
670 hip04_mac_disable(ndev
);
671 hip04_tx_reclaim(ndev
, true);
672 hip04_reset_ppe(priv
);
677 for (i
= 0; i
< RX_DESC_NUM
; i
++) {
678 if (priv
->rx_phys
[i
]) {
679 dma_unmap_single(&ndev
->dev
, priv
->rx_phys
[i
],
680 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
681 priv
->rx_phys
[i
] = 0;
688 static void hip04_timeout(struct net_device
*ndev
)
690 struct hip04_priv
*priv
= netdev_priv(ndev
);
692 schedule_work(&priv
->tx_timeout_task
);
695 static void hip04_tx_timeout_task(struct work_struct
*work
)
697 struct hip04_priv
*priv
;
699 priv
= container_of(work
, struct hip04_priv
, tx_timeout_task
);
700 hip04_mac_stop(priv
->ndev
);
701 hip04_mac_open(priv
->ndev
);
704 static int hip04_get_coalesce(struct net_device
*netdev
,
705 struct ethtool_coalesce
*ec
)
707 struct hip04_priv
*priv
= netdev_priv(netdev
);
709 ec
->tx_coalesce_usecs
= priv
->tx_coalesce_usecs
;
710 ec
->tx_max_coalesced_frames
= priv
->tx_coalesce_frames
;
715 static int hip04_set_coalesce(struct net_device
*netdev
,
716 struct ethtool_coalesce
*ec
)
718 struct hip04_priv
*priv
= netdev_priv(netdev
);
720 /* Check not supported parameters */
721 if ((ec
->rx_max_coalesced_frames
) || (ec
->rx_coalesce_usecs_irq
) ||
722 (ec
->rx_max_coalesced_frames_irq
) || (ec
->tx_coalesce_usecs_irq
) ||
723 (ec
->use_adaptive_rx_coalesce
) || (ec
->use_adaptive_tx_coalesce
) ||
724 (ec
->pkt_rate_low
) || (ec
->rx_coalesce_usecs_low
) ||
725 (ec
->rx_max_coalesced_frames_low
) || (ec
->tx_coalesce_usecs_high
) ||
726 (ec
->tx_max_coalesced_frames_low
) || (ec
->pkt_rate_high
) ||
727 (ec
->tx_coalesce_usecs_low
) || (ec
->rx_coalesce_usecs_high
) ||
728 (ec
->rx_max_coalesced_frames_high
) || (ec
->rx_coalesce_usecs
) ||
729 (ec
->tx_max_coalesced_frames_irq
) ||
730 (ec
->stats_block_coalesce_usecs
) ||
731 (ec
->tx_max_coalesced_frames_high
) || (ec
->rate_sample_interval
))
734 if ((ec
->tx_coalesce_usecs
> HIP04_MAX_TX_COALESCE_USECS
||
735 ec
->tx_coalesce_usecs
< HIP04_MIN_TX_COALESCE_USECS
) ||
736 (ec
->tx_max_coalesced_frames
> HIP04_MAX_TX_COALESCE_FRAMES
||
737 ec
->tx_max_coalesced_frames
< HIP04_MIN_TX_COALESCE_FRAMES
))
740 priv
->tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
741 priv
->tx_coalesce_frames
= ec
->tx_max_coalesced_frames
;
746 static void hip04_get_drvinfo(struct net_device
*netdev
,
747 struct ethtool_drvinfo
*drvinfo
)
749 strlcpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
750 strlcpy(drvinfo
->version
, DRV_VERSION
, sizeof(drvinfo
->version
));
753 static const struct ethtool_ops hip04_ethtool_ops
= {
754 .get_coalesce
= hip04_get_coalesce
,
755 .set_coalesce
= hip04_set_coalesce
,
756 .get_drvinfo
= hip04_get_drvinfo
,
759 static const struct net_device_ops hip04_netdev_ops
= {
760 .ndo_open
= hip04_mac_open
,
761 .ndo_stop
= hip04_mac_stop
,
762 .ndo_start_xmit
= hip04_mac_start_xmit
,
763 .ndo_set_mac_address
= hip04_set_mac_address
,
764 .ndo_tx_timeout
= hip04_timeout
,
765 .ndo_validate_addr
= eth_validate_addr
,
768 static int hip04_alloc_ring(struct net_device
*ndev
, struct device
*d
)
770 struct hip04_priv
*priv
= netdev_priv(ndev
);
773 priv
->tx_desc
= dma_alloc_coherent(d
,
774 TX_DESC_NUM
* sizeof(struct tx_desc
),
775 &priv
->tx_desc_dma
, GFP_KERNEL
);
779 priv
->rx_buf_size
= RX_BUF_SIZE
+
780 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
781 for (i
= 0; i
< RX_DESC_NUM
; i
++) {
782 priv
->rx_buf
[i
] = netdev_alloc_frag(priv
->rx_buf_size
);
783 if (!priv
->rx_buf
[i
])
790 static void hip04_free_ring(struct net_device
*ndev
, struct device
*d
)
792 struct hip04_priv
*priv
= netdev_priv(ndev
);
795 for (i
= 0; i
< RX_DESC_NUM
; i
++)
797 skb_free_frag(priv
->rx_buf
[i
]);
799 for (i
= 0; i
< TX_DESC_NUM
; i
++)
801 dev_kfree_skb_any(priv
->tx_skb
[i
]);
803 dma_free_coherent(d
, TX_DESC_NUM
* sizeof(struct tx_desc
),
804 priv
->tx_desc
, priv
->tx_desc_dma
);
807 static int hip04_mac_probe(struct platform_device
*pdev
)
809 struct device
*d
= &pdev
->dev
;
810 struct device_node
*node
= d
->of_node
;
811 struct of_phandle_args arg
;
812 struct net_device
*ndev
;
813 struct hip04_priv
*priv
;
814 struct resource
*res
;
818 ndev
= alloc_etherdev(sizeof(struct hip04_priv
));
822 priv
= netdev_priv(ndev
);
824 platform_set_drvdata(pdev
, ndev
);
825 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
827 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
828 priv
->base
= devm_ioremap_resource(d
, res
);
829 if (IS_ERR(priv
->base
)) {
830 ret
= PTR_ERR(priv
->base
);
834 ret
= of_parse_phandle_with_fixed_args(node
, "port-handle", 2, 0, &arg
);
836 dev_warn(d
, "no port-handle\n");
840 priv
->port
= arg
.args
[0];
841 priv
->chan
= arg
.args
[1] * RX_DESC_NUM
;
843 hrtimer_init(&priv
->tx_coalesce_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
845 /* BQL will try to keep the TX queue as short as possible, but it can't
846 * be faster than tx_coalesce_usecs, so we need a fast timeout here,
847 * but also long enough to gather up enough frames to ensure we don't
848 * get more interrupts than necessary.
849 * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
851 priv
->tx_coalesce_frames
= TX_DESC_NUM
* 3 / 4;
852 priv
->tx_coalesce_usecs
= 200;
853 priv
->tx_coalesce_timer
.function
= tx_done
;
855 priv
->map
= syscon_node_to_regmap(arg
.np
);
856 if (IS_ERR(priv
->map
)) {
857 dev_warn(d
, "no syscon hisilicon,hip04-ppe\n");
858 ret
= PTR_ERR(priv
->map
);
862 priv
->phy_mode
= of_get_phy_mode(node
);
863 if (priv
->phy_mode
< 0) {
864 dev_warn(d
, "not find phy-mode\n");
869 irq
= platform_get_irq(pdev
, 0);
875 ret
= devm_request_irq(d
, irq
, hip04_mac_interrupt
,
876 0, pdev
->name
, ndev
);
878 netdev_err(ndev
, "devm_request_irq failed\n");
882 priv
->phy_node
= of_parse_phandle(node
, "phy-handle", 0);
883 if (priv
->phy_node
) {
884 priv
->phy
= of_phy_connect(ndev
, priv
->phy_node
,
893 INIT_WORK(&priv
->tx_timeout_task
, hip04_tx_timeout_task
);
895 ndev
->netdev_ops
= &hip04_netdev_ops
;
896 ndev
->ethtool_ops
= &hip04_ethtool_ops
;
897 ndev
->watchdog_timeo
= TX_TIMEOUT
;
898 ndev
->priv_flags
|= IFF_UNICAST_FLT
;
900 netif_napi_add(ndev
, &priv
->napi
, hip04_rx_poll
, NAPI_POLL_WEIGHT
);
902 hip04_reset_ppe(priv
);
903 if (priv
->phy_mode
== PHY_INTERFACE_MODE_MII
)
904 hip04_config_port(ndev
, SPEED_100
, DUPLEX_FULL
);
906 hip04_config_fifo(priv
);
907 random_ether_addr(ndev
->dev_addr
);
908 hip04_update_mac_address(ndev
);
910 ret
= hip04_alloc_ring(ndev
, d
);
912 netdev_err(ndev
, "alloc ring fail\n");
916 ret
= register_netdev(ndev
);
925 hip04_free_ring(ndev
, d
);
927 of_node_put(priv
->phy_node
);
932 static int hip04_remove(struct platform_device
*pdev
)
934 struct net_device
*ndev
= platform_get_drvdata(pdev
);
935 struct hip04_priv
*priv
= netdev_priv(ndev
);
936 struct device
*d
= &pdev
->dev
;
939 phy_disconnect(priv
->phy
);
941 hip04_free_ring(ndev
, d
);
942 unregister_netdev(ndev
);
943 free_irq(ndev
->irq
, ndev
);
944 of_node_put(priv
->phy_node
);
945 cancel_work_sync(&priv
->tx_timeout_task
);
951 static const struct of_device_id hip04_mac_match
[] = {
952 { .compatible
= "hisilicon,hip04-mac" },
956 MODULE_DEVICE_TABLE(of
, hip04_mac_match
);
958 static struct platform_driver hip04_mac_driver
= {
959 .probe
= hip04_mac_probe
,
960 .remove
= hip04_remove
,
963 .of_match_table
= hip04_mac_match
,
966 module_platform_driver(hip04_mac_driver
);
968 MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
969 MODULE_LICENSE("GPL");