2 /* Copyright (c) 2014 Linaro Ltd.
3 * Copyright (c) 2014 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/module.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/ktime.h>
16 #include <linux/of_address.h>
17 #include <linux/phy.h>
18 #include <linux/of_mdio.h>
19 #include <linux/of_net.h>
20 #include <linux/mfd/syscon.h>
21 #include <linux/regmap.h>
23 #define PPE_CFG_RX_ADDR 0x100
24 #define PPE_CFG_POOL_GRP 0x300
25 #define PPE_CFG_RX_BUF_SIZE 0x400
26 #define PPE_CFG_RX_FIFO_SIZE 0x500
27 #define PPE_CURR_BUF_CNT 0xa200
29 #define GE_DUPLEX_TYPE 0x08
30 #define GE_MAX_FRM_SIZE_REG 0x3c
31 #define GE_PORT_MODE 0x40
32 #define GE_PORT_EN 0x44
33 #define GE_SHORT_RUNTS_THR_REG 0x50
34 #define GE_TX_LOCAL_PAGE_REG 0x5c
35 #define GE_TRANSMIT_CONTROL_REG 0x60
36 #define GE_CF_CRC_STRIP_REG 0x1b0
37 #define GE_MODE_CHANGE_REG 0x1b4
38 #define GE_RECV_CONTROL_REG 0x1e0
39 #define GE_STATION_MAC_ADDRESS 0x210
40 #define PPE_CFG_CPU_ADD_ADDR 0x580
41 #define PPE_CFG_MAX_FRAME_LEN_REG 0x408
42 #define PPE_CFG_BUS_CTRL_REG 0x424
43 #define PPE_CFG_RX_CTRL_REG 0x428
44 #define PPE_CFG_RX_PKT_MODE_REG 0x438
45 #define PPE_CFG_QOS_VMID_GEN 0x500
46 #define PPE_CFG_RX_PKT_INT 0x538
47 #define PPE_INTEN 0x600
48 #define PPE_INTSTS 0x608
49 #define PPE_RINT 0x604
50 #define PPE_CFG_STS_MODE 0x700
51 #define PPE_HIS_RX_PKT_CNT 0x804
54 #define RCV_INT BIT(10)
55 #define RCV_NOBUF BIT(8)
56 #define RCV_DROP BIT(7)
57 #define TX_DROP BIT(6)
58 #define DEF_INT_ERR (RCV_NOBUF | RCV_DROP | TX_DROP)
59 #define DEF_INT_MASK (RCV_INT | DEF_INT_ERR)
61 /* TX descriptor config */
62 #define TX_FREE_MEM BIT(0)
63 #define TX_READ_ALLOC_L3 BIT(1)
64 #define TX_FINISH_CACHE_INV BIT(2)
65 #define TX_CLEAR_WB BIT(4)
66 #define TX_L3_CHECKSUM BIT(5)
67 #define TX_LOOP_BACK BIT(11)
70 #define RX_PKT_DROP BIT(0)
71 #define RX_L2_ERR BIT(1)
72 #define RX_PKT_ERR (RX_PKT_DROP | RX_L2_ERR)
74 #define SGMII_SPEED_1000 0x08
75 #define SGMII_SPEED_100 0x07
76 #define SGMII_SPEED_10 0x06
77 #define MII_SPEED_100 0x01
78 #define MII_SPEED_10 0x00
80 #define GE_DUPLEX_FULL BIT(0)
81 #define GE_DUPLEX_HALF 0x00
82 #define GE_MODE_CHANGE_EN BIT(0)
84 #define GE_TX_AUTO_NEG BIT(5)
85 #define GE_TX_ADD_CRC BIT(6)
86 #define GE_TX_SHORT_PAD_THROUGH BIT(7)
88 #define GE_RX_STRIP_CRC BIT(0)
89 #define GE_RX_STRIP_PAD BIT(3)
90 #define GE_RX_PAD_EN BIT(4)
92 #define GE_AUTO_NEG_CTL BIT(0)
94 #define GE_RX_INT_THRESHOLD BIT(6)
95 #define GE_RX_TIMEOUT 0x04
97 #define GE_RX_PORT_EN BIT(1)
98 #define GE_TX_PORT_EN BIT(2)
100 #define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
102 #define PPE_CFG_RX_PKT_ALIGN BIT(18)
103 #define PPE_CFG_QOS_VMID_MODE BIT(14)
104 #define PPE_CFG_QOS_VMID_GRP_SHIFT 8
106 #define PPE_CFG_RX_FIFO_FSFU BIT(11)
107 #define PPE_CFG_RX_DEPTH_SHIFT 16
108 #define PPE_CFG_RX_START_SHIFT 0
109 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
111 #define PPE_CFG_BUS_LOCAL_REL BIT(14)
112 #define PPE_CFG_BUS_BIG_ENDIEN BIT(0)
114 #define RX_DESC_NUM 128
115 #define TX_DESC_NUM 256
116 #define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM-1))
117 #define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM-1))
119 #define GMAC_PPE_RX_PKT_MAX_LEN 379
120 #define GMAC_MAX_PKT_LEN 1516
121 #define GMAC_MIN_PKT_LEN 31
122 #define RX_BUF_SIZE 1600
123 #define RESET_TIMEOUT 1000
124 #define TX_TIMEOUT (6 * HZ)
126 #define DRV_NAME "hip04-ether"
127 #define DRV_VERSION "v1.0"
129 #define HIP04_MAX_TX_COALESCE_USECS 200
130 #define HIP04_MIN_TX_COALESCE_USECS 100
131 #define HIP04_MAX_TX_COALESCE_FRAMES 200
132 #define HIP04_MIN_TX_COALESCE_FRAMES 100
157 unsigned int reg_inten
;
159 struct napi_struct napi
;
160 struct net_device
*ndev
;
162 struct tx_desc
*tx_desc
;
163 dma_addr_t tx_desc_dma
;
164 struct sk_buff
*tx_skb
[TX_DESC_NUM
];
165 dma_addr_t tx_phys
[TX_DESC_NUM
];
166 unsigned int tx_head
;
168 int tx_coalesce_frames
;
169 int tx_coalesce_usecs
;
170 struct hrtimer tx_coalesce_timer
;
172 unsigned char *rx_buf
[RX_DESC_NUM
];
173 dma_addr_t rx_phys
[RX_DESC_NUM
];
174 unsigned int rx_head
;
175 unsigned int rx_buf_size
;
177 struct device_node
*phy_node
;
178 struct phy_device
*phy
;
180 struct work_struct tx_timeout_task
;
182 /* written only by tx cleanup */
183 unsigned int tx_tail ____cacheline_aligned_in_smp
;
186 static inline unsigned int tx_count(unsigned int head
, unsigned int tail
)
188 return (head
- tail
) % (TX_DESC_NUM
- 1);
191 static void hip04_config_port(struct net_device
*ndev
, u32 speed
, u32 duplex
)
193 struct hip04_priv
*priv
= netdev_priv(ndev
);
197 priv
->duplex
= duplex
;
199 switch (priv
->phy_mode
) {
200 case PHY_INTERFACE_MODE_SGMII
:
201 if (speed
== SPEED_1000
)
202 val
= SGMII_SPEED_1000
;
203 else if (speed
== SPEED_100
)
204 val
= SGMII_SPEED_100
;
206 val
= SGMII_SPEED_10
;
208 case PHY_INTERFACE_MODE_MII
:
209 if (speed
== SPEED_100
)
215 netdev_warn(ndev
, "not supported mode\n");
219 writel_relaxed(val
, priv
->base
+ GE_PORT_MODE
);
221 val
= duplex
? GE_DUPLEX_FULL
: GE_DUPLEX_HALF
;
222 writel_relaxed(val
, priv
->base
+ GE_DUPLEX_TYPE
);
224 val
= GE_MODE_CHANGE_EN
;
225 writel_relaxed(val
, priv
->base
+ GE_MODE_CHANGE_REG
);
228 static void hip04_reset_ppe(struct hip04_priv
*priv
)
230 u32 val
, tmp
, timeout
= 0;
233 regmap_read(priv
->map
, priv
->port
* 4 + PPE_CURR_BUF_CNT
, &val
);
234 regmap_read(priv
->map
, priv
->port
* 4 + PPE_CFG_RX_ADDR
, &tmp
);
235 if (timeout
++ > RESET_TIMEOUT
)
237 } while (val
& 0xfff);
240 static void hip04_config_fifo(struct hip04_priv
*priv
)
244 val
= readl_relaxed(priv
->base
+ PPE_CFG_STS_MODE
);
245 val
|= PPE_CFG_STS_RX_PKT_CNT_RC
;
246 writel_relaxed(val
, priv
->base
+ PPE_CFG_STS_MODE
);
248 val
= BIT(priv
->port
);
249 regmap_write(priv
->map
, priv
->port
* 4 + PPE_CFG_POOL_GRP
, val
);
251 val
= priv
->port
<< PPE_CFG_QOS_VMID_GRP_SHIFT
;
252 val
|= PPE_CFG_QOS_VMID_MODE
;
253 writel_relaxed(val
, priv
->base
+ PPE_CFG_QOS_VMID_GEN
);
256 regmap_write(priv
->map
, priv
->port
* 4 + PPE_CFG_RX_BUF_SIZE
, val
);
258 val
= RX_DESC_NUM
<< PPE_CFG_RX_DEPTH_SHIFT
;
259 val
|= PPE_CFG_RX_FIFO_FSFU
;
260 val
|= priv
->chan
<< PPE_CFG_RX_START_SHIFT
;
261 regmap_write(priv
->map
, priv
->port
* 4 + PPE_CFG_RX_FIFO_SIZE
, val
);
263 val
= NET_IP_ALIGN
<< PPE_CFG_RX_CTRL_ALIGN_SHIFT
;
264 writel_relaxed(val
, priv
->base
+ PPE_CFG_RX_CTRL_REG
);
266 val
= PPE_CFG_RX_PKT_ALIGN
;
267 writel_relaxed(val
, priv
->base
+ PPE_CFG_RX_PKT_MODE_REG
);
269 val
= PPE_CFG_BUS_LOCAL_REL
| PPE_CFG_BUS_BIG_ENDIEN
;
270 writel_relaxed(val
, priv
->base
+ PPE_CFG_BUS_CTRL_REG
);
272 val
= GMAC_PPE_RX_PKT_MAX_LEN
;
273 writel_relaxed(val
, priv
->base
+ PPE_CFG_MAX_FRAME_LEN_REG
);
275 val
= GMAC_MAX_PKT_LEN
;
276 writel_relaxed(val
, priv
->base
+ GE_MAX_FRM_SIZE_REG
);
278 val
= GMAC_MIN_PKT_LEN
;
279 writel_relaxed(val
, priv
->base
+ GE_SHORT_RUNTS_THR_REG
);
281 val
= readl_relaxed(priv
->base
+ GE_TRANSMIT_CONTROL_REG
);
282 val
|= GE_TX_AUTO_NEG
| GE_TX_ADD_CRC
| GE_TX_SHORT_PAD_THROUGH
;
283 writel_relaxed(val
, priv
->base
+ GE_TRANSMIT_CONTROL_REG
);
285 val
= GE_RX_STRIP_CRC
;
286 writel_relaxed(val
, priv
->base
+ GE_CF_CRC_STRIP_REG
);
288 val
= readl_relaxed(priv
->base
+ GE_RECV_CONTROL_REG
);
289 val
|= GE_RX_STRIP_PAD
| GE_RX_PAD_EN
;
290 writel_relaxed(val
, priv
->base
+ GE_RECV_CONTROL_REG
);
292 val
= GE_AUTO_NEG_CTL
;
293 writel_relaxed(val
, priv
->base
+ GE_TX_LOCAL_PAGE_REG
);
296 static void hip04_mac_enable(struct net_device
*ndev
)
298 struct hip04_priv
*priv
= netdev_priv(ndev
);
302 val
= readl_relaxed(priv
->base
+ GE_PORT_EN
);
303 val
|= GE_RX_PORT_EN
| GE_TX_PORT_EN
;
304 writel_relaxed(val
, priv
->base
+ GE_PORT_EN
);
308 writel_relaxed(val
, priv
->base
+ PPE_RINT
);
310 /* config recv int */
311 val
= GE_RX_INT_THRESHOLD
| GE_RX_TIMEOUT
;
312 writel_relaxed(val
, priv
->base
+ PPE_CFG_RX_PKT_INT
);
314 /* enable interrupt */
315 priv
->reg_inten
= DEF_INT_MASK
;
316 writel_relaxed(priv
->reg_inten
, priv
->base
+ PPE_INTEN
);
319 static void hip04_mac_disable(struct net_device
*ndev
)
321 struct hip04_priv
*priv
= netdev_priv(ndev
);
325 priv
->reg_inten
&= ~(DEF_INT_MASK
);
326 writel_relaxed(priv
->reg_inten
, priv
->base
+ PPE_INTEN
);
328 /* disable tx & rx */
329 val
= readl_relaxed(priv
->base
+ GE_PORT_EN
);
330 val
&= ~(GE_RX_PORT_EN
| GE_TX_PORT_EN
);
331 writel_relaxed(val
, priv
->base
+ GE_PORT_EN
);
334 static void hip04_set_xmit_desc(struct hip04_priv
*priv
, dma_addr_t phys
)
336 writel(phys
, priv
->base
+ PPE_CFG_CPU_ADD_ADDR
);
339 static void hip04_set_recv_desc(struct hip04_priv
*priv
, dma_addr_t phys
)
341 regmap_write(priv
->map
, priv
->port
* 4 + PPE_CFG_RX_ADDR
, phys
);
344 static u32
hip04_recv_cnt(struct hip04_priv
*priv
)
346 return readl(priv
->base
+ PPE_HIS_RX_PKT_CNT
);
349 static void hip04_update_mac_address(struct net_device
*ndev
)
351 struct hip04_priv
*priv
= netdev_priv(ndev
);
353 writel_relaxed(((ndev
->dev_addr
[0] << 8) | (ndev
->dev_addr
[1])),
354 priv
->base
+ GE_STATION_MAC_ADDRESS
);
355 writel_relaxed(((ndev
->dev_addr
[2] << 24) | (ndev
->dev_addr
[3] << 16) |
356 (ndev
->dev_addr
[4] << 8) | (ndev
->dev_addr
[5])),
357 priv
->base
+ GE_STATION_MAC_ADDRESS
+ 4);
360 static int hip04_set_mac_address(struct net_device
*ndev
, void *addr
)
362 eth_mac_addr(ndev
, addr
);
363 hip04_update_mac_address(ndev
);
367 static int hip04_tx_reclaim(struct net_device
*ndev
, bool force
)
369 struct hip04_priv
*priv
= netdev_priv(ndev
);
370 unsigned tx_tail
= priv
->tx_tail
;
371 struct tx_desc
*desc
;
372 unsigned int bytes_compl
= 0, pkts_compl
= 0;
376 count
= tx_count(READ_ONCE(priv
->tx_head
), tx_tail
);
381 desc
= &priv
->tx_desc
[tx_tail
];
382 if (desc
->send_addr
!= 0) {
389 if (priv
->tx_phys
[tx_tail
]) {
390 dma_unmap_single(&ndev
->dev
, priv
->tx_phys
[tx_tail
],
391 priv
->tx_skb
[tx_tail
]->len
,
393 priv
->tx_phys
[tx_tail
] = 0;
396 bytes_compl
+= priv
->tx_skb
[tx_tail
]->len
;
397 dev_kfree_skb(priv
->tx_skb
[tx_tail
]);
398 priv
->tx_skb
[tx_tail
] = NULL
;
399 tx_tail
= TX_NEXT(tx_tail
);
403 priv
->tx_tail
= tx_tail
;
404 smp_wmb(); /* Ensure tx_tail visible to xmit */
407 if (pkts_compl
|| bytes_compl
)
408 netdev_completed_queue(ndev
, pkts_compl
, bytes_compl
);
410 if (unlikely(netif_queue_stopped(ndev
)) && (count
< (TX_DESC_NUM
- 1)))
411 netif_wake_queue(ndev
);
416 static void hip04_start_tx_timer(struct hip04_priv
*priv
)
418 unsigned long ns
= priv
->tx_coalesce_usecs
* NSEC_PER_USEC
/ 2;
420 /* allow timer to fire after half the time at the earliest */
421 hrtimer_start_range_ns(&priv
->tx_coalesce_timer
, ns_to_ktime(ns
),
422 ns
, HRTIMER_MODE_REL
);
426 hip04_mac_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
428 struct hip04_priv
*priv
= netdev_priv(ndev
);
429 struct net_device_stats
*stats
= &ndev
->stats
;
430 unsigned int tx_head
= priv
->tx_head
, count
;
431 struct tx_desc
*desc
= &priv
->tx_desc
[tx_head
];
435 count
= tx_count(tx_head
, READ_ONCE(priv
->tx_tail
));
436 if (count
== (TX_DESC_NUM
- 1)) {
437 netif_stop_queue(ndev
);
438 return NETDEV_TX_BUSY
;
441 phys
= dma_map_single(&ndev
->dev
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
442 if (dma_mapping_error(&ndev
->dev
, phys
)) {
447 priv
->tx_skb
[tx_head
] = skb
;
448 priv
->tx_phys
[tx_head
] = phys
;
449 desc
->send_addr
= cpu_to_be32(phys
);
450 desc
->send_size
= cpu_to_be32(skb
->len
);
451 desc
->cfg
= cpu_to_be32(TX_CLEAR_WB
| TX_FINISH_CACHE_INV
);
452 phys
= priv
->tx_desc_dma
+ tx_head
* sizeof(struct tx_desc
);
453 desc
->wb_addr
= cpu_to_be32(phys
);
454 skb_tx_timestamp(skb
);
456 hip04_set_xmit_desc(priv
, phys
);
457 priv
->tx_head
= TX_NEXT(tx_head
);
459 netdev_sent_queue(ndev
, skb
->len
);
461 stats
->tx_bytes
+= skb
->len
;
464 /* Ensure tx_head update visible to tx reclaim */
467 /* queue is getting full, better start cleaning up now */
468 if (count
>= priv
->tx_coalesce_frames
) {
469 if (napi_schedule_prep(&priv
->napi
)) {
470 /* disable rx interrupt and timer */
471 priv
->reg_inten
&= ~(RCV_INT
);
472 writel_relaxed(DEF_INT_MASK
& ~RCV_INT
,
473 priv
->base
+ PPE_INTEN
);
474 hrtimer_cancel(&priv
->tx_coalesce_timer
);
475 __napi_schedule(&priv
->napi
);
477 } else if (!hrtimer_is_queued(&priv
->tx_coalesce_timer
)) {
478 /* cleanup not pending yet, start a new timer */
479 hip04_start_tx_timer(priv
);
485 static int hip04_rx_poll(struct napi_struct
*napi
, int budget
)
487 struct hip04_priv
*priv
= container_of(napi
, struct hip04_priv
, napi
);
488 struct net_device
*ndev
= priv
->ndev
;
489 struct net_device_stats
*stats
= &ndev
->stats
;
490 unsigned int cnt
= hip04_recv_cnt(priv
);
491 struct rx_desc
*desc
;
501 while (cnt
&& !last
) {
502 buf
= priv
->rx_buf
[priv
->rx_head
];
503 skb
= build_skb(buf
, priv
->rx_buf_size
);
504 if (unlikely(!skb
)) {
505 net_dbg_ratelimited("build_skb failed\n");
509 dma_unmap_single(&ndev
->dev
, priv
->rx_phys
[priv
->rx_head
],
510 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
511 priv
->rx_phys
[priv
->rx_head
] = 0;
513 desc
= (struct rx_desc
*)skb
->data
;
514 len
= be16_to_cpu(desc
->pkt_len
);
515 err
= be32_to_cpu(desc
->pkt_err
);
518 dev_kfree_skb_any(skb
);
520 } else if ((err
& RX_PKT_ERR
) || (len
>= GMAC_MAX_PKT_LEN
)) {
521 dev_kfree_skb_any(skb
);
525 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
527 skb
->protocol
= eth_type_trans(skb
, ndev
);
528 napi_gro_receive(&priv
->napi
, skb
);
530 stats
->rx_bytes
+= len
;
535 buf
= netdev_alloc_frag(priv
->rx_buf_size
);
538 phys
= dma_map_single(&ndev
->dev
, buf
,
539 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
540 if (dma_mapping_error(&ndev
->dev
, phys
))
542 priv
->rx_buf
[priv
->rx_head
] = buf
;
543 priv
->rx_phys
[priv
->rx_head
] = phys
;
544 hip04_set_recv_desc(priv
, phys
);
546 priv
->rx_head
= RX_NEXT(priv
->rx_head
);
551 cnt
= hip04_recv_cnt(priv
);
554 if (!(priv
->reg_inten
& RCV_INT
)) {
555 /* enable rx interrupt */
556 priv
->reg_inten
|= RCV_INT
;
557 writel_relaxed(priv
->reg_inten
, priv
->base
+ PPE_INTEN
);
559 napi_complete_done(napi
, rx
);
561 /* clean up tx descriptors and start a new timer if necessary */
562 tx_remaining
= hip04_tx_reclaim(ndev
, false);
563 if (rx
< budget
&& tx_remaining
)
564 hip04_start_tx_timer(priv
);
569 static irqreturn_t
hip04_mac_interrupt(int irq
, void *dev_id
)
571 struct net_device
*ndev
= (struct net_device
*)dev_id
;
572 struct hip04_priv
*priv
= netdev_priv(ndev
);
573 struct net_device_stats
*stats
= &ndev
->stats
;
574 u32 ists
= readl_relaxed(priv
->base
+ PPE_INTSTS
);
579 writel_relaxed(DEF_INT_MASK
, priv
->base
+ PPE_RINT
);
581 if (unlikely(ists
& DEF_INT_ERR
)) {
582 if (ists
& (RCV_NOBUF
| RCV_DROP
)) {
585 netdev_err(ndev
, "rx drop\n");
587 if (ists
& TX_DROP
) {
589 netdev_err(ndev
, "tx drop\n");
593 if (ists
& RCV_INT
&& napi_schedule_prep(&priv
->napi
)) {
594 /* disable rx interrupt */
595 priv
->reg_inten
&= ~(RCV_INT
);
596 writel_relaxed(DEF_INT_MASK
& ~RCV_INT
, priv
->base
+ PPE_INTEN
);
597 hrtimer_cancel(&priv
->tx_coalesce_timer
);
598 __napi_schedule(&priv
->napi
);
604 static enum hrtimer_restart
tx_done(struct hrtimer
*hrtimer
)
606 struct hip04_priv
*priv
;
608 priv
= container_of(hrtimer
, struct hip04_priv
, tx_coalesce_timer
);
610 if (napi_schedule_prep(&priv
->napi
)) {
611 /* disable rx interrupt */
612 priv
->reg_inten
&= ~(RCV_INT
);
613 writel_relaxed(DEF_INT_MASK
& ~RCV_INT
, priv
->base
+ PPE_INTEN
);
614 __napi_schedule(&priv
->napi
);
617 return HRTIMER_NORESTART
;
620 static void hip04_adjust_link(struct net_device
*ndev
)
622 struct hip04_priv
*priv
= netdev_priv(ndev
);
623 struct phy_device
*phy
= priv
->phy
;
625 if ((priv
->speed
!= phy
->speed
) || (priv
->duplex
!= phy
->duplex
)) {
626 hip04_config_port(ndev
, phy
->speed
, phy
->duplex
);
627 phy_print_status(phy
);
631 static int hip04_mac_open(struct net_device
*ndev
)
633 struct hip04_priv
*priv
= netdev_priv(ndev
);
639 hip04_reset_ppe(priv
);
641 for (i
= 0; i
< RX_DESC_NUM
; i
++) {
644 phys
= dma_map_single(&ndev
->dev
, priv
->rx_buf
[i
],
645 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
646 if (dma_mapping_error(&ndev
->dev
, phys
))
649 priv
->rx_phys
[i
] = phys
;
650 hip04_set_recv_desc(priv
, phys
);
654 phy_start(priv
->phy
);
656 netdev_reset_queue(ndev
);
657 netif_start_queue(ndev
);
658 hip04_mac_enable(ndev
);
659 napi_enable(&priv
->napi
);
664 static int hip04_mac_stop(struct net_device
*ndev
)
666 struct hip04_priv
*priv
= netdev_priv(ndev
);
669 napi_disable(&priv
->napi
);
670 netif_stop_queue(ndev
);
671 hip04_mac_disable(ndev
);
672 hip04_tx_reclaim(ndev
, true);
673 hip04_reset_ppe(priv
);
678 for (i
= 0; i
< RX_DESC_NUM
; i
++) {
679 if (priv
->rx_phys
[i
]) {
680 dma_unmap_single(&ndev
->dev
, priv
->rx_phys
[i
],
681 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
682 priv
->rx_phys
[i
] = 0;
689 static void hip04_timeout(struct net_device
*ndev
)
691 struct hip04_priv
*priv
= netdev_priv(ndev
);
693 schedule_work(&priv
->tx_timeout_task
);
696 static void hip04_tx_timeout_task(struct work_struct
*work
)
698 struct hip04_priv
*priv
;
700 priv
= container_of(work
, struct hip04_priv
, tx_timeout_task
);
701 hip04_mac_stop(priv
->ndev
);
702 hip04_mac_open(priv
->ndev
);
705 static int hip04_get_coalesce(struct net_device
*netdev
,
706 struct ethtool_coalesce
*ec
)
708 struct hip04_priv
*priv
= netdev_priv(netdev
);
710 ec
->tx_coalesce_usecs
= priv
->tx_coalesce_usecs
;
711 ec
->tx_max_coalesced_frames
= priv
->tx_coalesce_frames
;
716 static int hip04_set_coalesce(struct net_device
*netdev
,
717 struct ethtool_coalesce
*ec
)
719 struct hip04_priv
*priv
= netdev_priv(netdev
);
721 /* Check not supported parameters */
722 if ((ec
->rx_max_coalesced_frames
) || (ec
->rx_coalesce_usecs_irq
) ||
723 (ec
->rx_max_coalesced_frames_irq
) || (ec
->tx_coalesce_usecs_irq
) ||
724 (ec
->use_adaptive_rx_coalesce
) || (ec
->use_adaptive_tx_coalesce
) ||
725 (ec
->pkt_rate_low
) || (ec
->rx_coalesce_usecs_low
) ||
726 (ec
->rx_max_coalesced_frames_low
) || (ec
->tx_coalesce_usecs_high
) ||
727 (ec
->tx_max_coalesced_frames_low
) || (ec
->pkt_rate_high
) ||
728 (ec
->tx_coalesce_usecs_low
) || (ec
->rx_coalesce_usecs_high
) ||
729 (ec
->rx_max_coalesced_frames_high
) || (ec
->rx_coalesce_usecs
) ||
730 (ec
->tx_max_coalesced_frames_irq
) ||
731 (ec
->stats_block_coalesce_usecs
) ||
732 (ec
->tx_max_coalesced_frames_high
) || (ec
->rate_sample_interval
))
735 if ((ec
->tx_coalesce_usecs
> HIP04_MAX_TX_COALESCE_USECS
||
736 ec
->tx_coalesce_usecs
< HIP04_MIN_TX_COALESCE_USECS
) ||
737 (ec
->tx_max_coalesced_frames
> HIP04_MAX_TX_COALESCE_FRAMES
||
738 ec
->tx_max_coalesced_frames
< HIP04_MIN_TX_COALESCE_FRAMES
))
741 priv
->tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
742 priv
->tx_coalesce_frames
= ec
->tx_max_coalesced_frames
;
747 static void hip04_get_drvinfo(struct net_device
*netdev
,
748 struct ethtool_drvinfo
*drvinfo
)
750 strlcpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
751 strlcpy(drvinfo
->version
, DRV_VERSION
, sizeof(drvinfo
->version
));
754 static const struct ethtool_ops hip04_ethtool_ops
= {
755 .get_coalesce
= hip04_get_coalesce
,
756 .set_coalesce
= hip04_set_coalesce
,
757 .get_drvinfo
= hip04_get_drvinfo
,
760 static const struct net_device_ops hip04_netdev_ops
= {
761 .ndo_open
= hip04_mac_open
,
762 .ndo_stop
= hip04_mac_stop
,
763 .ndo_start_xmit
= hip04_mac_start_xmit
,
764 .ndo_set_mac_address
= hip04_set_mac_address
,
765 .ndo_tx_timeout
= hip04_timeout
,
766 .ndo_validate_addr
= eth_validate_addr
,
769 static int hip04_alloc_ring(struct net_device
*ndev
, struct device
*d
)
771 struct hip04_priv
*priv
= netdev_priv(ndev
);
774 priv
->tx_desc
= dma_alloc_coherent(d
,
775 TX_DESC_NUM
* sizeof(struct tx_desc
),
776 &priv
->tx_desc_dma
, GFP_KERNEL
);
780 priv
->rx_buf_size
= RX_BUF_SIZE
+
781 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
782 for (i
= 0; i
< RX_DESC_NUM
; i
++) {
783 priv
->rx_buf
[i
] = netdev_alloc_frag(priv
->rx_buf_size
);
784 if (!priv
->rx_buf
[i
])
791 static void hip04_free_ring(struct net_device
*ndev
, struct device
*d
)
793 struct hip04_priv
*priv
= netdev_priv(ndev
);
796 for (i
= 0; i
< RX_DESC_NUM
; i
++)
798 skb_free_frag(priv
->rx_buf
[i
]);
800 for (i
= 0; i
< TX_DESC_NUM
; i
++)
802 dev_kfree_skb_any(priv
->tx_skb
[i
]);
804 dma_free_coherent(d
, TX_DESC_NUM
* sizeof(struct tx_desc
),
805 priv
->tx_desc
, priv
->tx_desc_dma
);
808 static int hip04_mac_probe(struct platform_device
*pdev
)
810 struct device
*d
= &pdev
->dev
;
811 struct device_node
*node
= d
->of_node
;
812 struct of_phandle_args arg
;
813 struct net_device
*ndev
;
814 struct hip04_priv
*priv
;
815 struct resource
*res
;
819 ndev
= alloc_etherdev(sizeof(struct hip04_priv
));
823 priv
= netdev_priv(ndev
);
825 platform_set_drvdata(pdev
, ndev
);
826 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
828 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
829 priv
->base
= devm_ioremap_resource(d
, res
);
830 if (IS_ERR(priv
->base
)) {
831 ret
= PTR_ERR(priv
->base
);
835 ret
= of_parse_phandle_with_fixed_args(node
, "port-handle", 2, 0, &arg
);
837 dev_warn(d
, "no port-handle\n");
841 priv
->port
= arg
.args
[0];
842 priv
->chan
= arg
.args
[1] * RX_DESC_NUM
;
844 hrtimer_init(&priv
->tx_coalesce_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
846 /* BQL will try to keep the TX queue as short as possible, but it can't
847 * be faster than tx_coalesce_usecs, so we need a fast timeout here,
848 * but also long enough to gather up enough frames to ensure we don't
849 * get more interrupts than necessary.
850 * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
852 priv
->tx_coalesce_frames
= TX_DESC_NUM
* 3 / 4;
853 priv
->tx_coalesce_usecs
= 200;
854 priv
->tx_coalesce_timer
.function
= tx_done
;
856 priv
->map
= syscon_node_to_regmap(arg
.np
);
857 if (IS_ERR(priv
->map
)) {
858 dev_warn(d
, "no syscon hisilicon,hip04-ppe\n");
859 ret
= PTR_ERR(priv
->map
);
863 priv
->phy_mode
= of_get_phy_mode(node
);
864 if (priv
->phy_mode
< 0) {
865 dev_warn(d
, "not find phy-mode\n");
870 irq
= platform_get_irq(pdev
, 0);
876 ret
= devm_request_irq(d
, irq
, hip04_mac_interrupt
,
877 0, pdev
->name
, ndev
);
879 netdev_err(ndev
, "devm_request_irq failed\n");
883 priv
->phy_node
= of_parse_phandle(node
, "phy-handle", 0);
884 if (priv
->phy_node
) {
885 priv
->phy
= of_phy_connect(ndev
, priv
->phy_node
,
894 INIT_WORK(&priv
->tx_timeout_task
, hip04_tx_timeout_task
);
896 ndev
->netdev_ops
= &hip04_netdev_ops
;
897 ndev
->ethtool_ops
= &hip04_ethtool_ops
;
898 ndev
->watchdog_timeo
= TX_TIMEOUT
;
899 ndev
->priv_flags
|= IFF_UNICAST_FLT
;
901 netif_napi_add(ndev
, &priv
->napi
, hip04_rx_poll
, NAPI_POLL_WEIGHT
);
903 hip04_reset_ppe(priv
);
904 if (priv
->phy_mode
== PHY_INTERFACE_MODE_MII
)
905 hip04_config_port(ndev
, SPEED_100
, DUPLEX_FULL
);
907 hip04_config_fifo(priv
);
908 eth_random_addr(ndev
->dev_addr
);
909 hip04_update_mac_address(ndev
);
911 ret
= hip04_alloc_ring(ndev
, d
);
913 netdev_err(ndev
, "alloc ring fail\n");
917 ret
= register_netdev(ndev
);
924 hip04_free_ring(ndev
, d
);
926 of_node_put(priv
->phy_node
);
931 static int hip04_remove(struct platform_device
*pdev
)
933 struct net_device
*ndev
= platform_get_drvdata(pdev
);
934 struct hip04_priv
*priv
= netdev_priv(ndev
);
935 struct device
*d
= &pdev
->dev
;
938 phy_disconnect(priv
->phy
);
940 hip04_free_ring(ndev
, d
);
941 unregister_netdev(ndev
);
942 free_irq(ndev
->irq
, ndev
);
943 of_node_put(priv
->phy_node
);
944 cancel_work_sync(&priv
->tx_timeout_task
);
950 static const struct of_device_id hip04_mac_match
[] = {
951 { .compatible
= "hisilicon,hip04-mac" },
955 MODULE_DEVICE_TABLE(of
, hip04_mac_match
);
957 static struct platform_driver hip04_mac_driver
= {
958 .probe
= hip04_mac_probe
,
959 .remove
= hip04_remove
,
962 .of_match_table
= hip04_mac_match
,
965 module_platform_driver(hip04_mac_driver
);
967 MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
968 MODULE_LICENSE("GPL");