2 * Hisilicon Fast Ethernet MAC Driver
4 * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/circ_buf.h>
21 #include <linux/clk.h>
22 #include <linux/etherdevice.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/of_mdio.h>
26 #include <linux/of_net.h>
27 #include <linux/platform_device.h>
28 #include <linux/reset.h>
30 /* MAC control register list */
31 #define MAC_PORTSEL 0x0200
32 #define MAC_PORTSEL_STAT_CPU BIT(0)
33 #define MAC_PORTSEL_RMII BIT(1)
34 #define MAC_PORTSET 0x0208
35 #define MAC_PORTSET_DUPLEX_FULL BIT(0)
36 #define MAC_PORTSET_LINKED BIT(1)
37 #define MAC_PORTSET_SPEED_100M BIT(2)
38 #define MAC_SET 0x0210
39 #define MAX_FRAME_SIZE 1600
40 #define MAX_FRAME_SIZE_MASK GENMASK(10, 0)
41 #define BIT_PAUSE_EN BIT(18)
42 #define RX_COALESCE_SET 0x0340
43 #define RX_COALESCED_FRAME_OFFSET 24
44 #define RX_COALESCED_FRAMES 8
45 #define RX_COALESCED_TIMER 0x74
46 #define QLEN_SET 0x0344
47 #define RX_DEPTH_OFFSET 8
48 #define MAX_HW_FIFO_DEPTH 64
49 #define HW_TX_FIFO_DEPTH 12
50 #define HW_RX_FIFO_DEPTH (MAX_HW_FIFO_DEPTH - HW_TX_FIFO_DEPTH)
51 #define IQFRM_DES 0x0354
52 #define RX_FRAME_LEN_MASK GENMASK(11, 0)
53 #define IQ_ADDR 0x0358
54 #define EQ_ADDR 0x0360
55 #define EQFRM_LEN 0x0364
56 #define ADDRQ_STAT 0x036C
57 #define TX_CNT_INUSE_MASK GENMASK(5, 0)
58 #define BIT_TX_READY BIT(24)
59 #define BIT_RX_READY BIT(25)
60 /* global control register list */
61 #define GLB_HOSTMAC_L32 0x0000
62 #define GLB_HOSTMAC_H16 0x0004
63 #define GLB_SOFT_RESET 0x0008
64 #define SOFT_RESET_ALL BIT(0)
65 #define GLB_FWCTRL 0x0010
66 #define FWCTRL_VLAN_ENABLE BIT(0)
67 #define FWCTRL_FW2CPU_ENA BIT(5)
68 #define FWCTRL_FWALL2CPU BIT(7)
69 #define GLB_MACTCTRL 0x0014
70 #define MACTCTRL_UNI2CPU BIT(1)
71 #define MACTCTRL_MULTI2CPU BIT(3)
72 #define MACTCTRL_BROAD2CPU BIT(5)
73 #define MACTCTRL_MACT_ENA BIT(7)
74 #define GLB_IRQ_STAT 0x0030
75 #define GLB_IRQ_ENA 0x0034
76 #define IRQ_ENA_PORT0_MASK GENMASK(7, 0)
77 #define IRQ_ENA_PORT0 BIT(18)
78 #define IRQ_ENA_ALL BIT(19)
79 #define GLB_IRQ_RAW 0x0038
80 #define IRQ_INT_RX_RDY BIT(0)
81 #define IRQ_INT_TX_PER_PACKET BIT(1)
82 #define IRQ_INT_TX_FIFO_EMPTY BIT(6)
83 #define IRQ_INT_MULTI_RXRDY BIT(7)
84 #define DEF_INT_MASK (IRQ_INT_MULTI_RXRDY | \
85 IRQ_INT_TX_PER_PACKET | \
86 IRQ_INT_TX_FIFO_EMPTY)
87 #define GLB_MAC_L32_BASE 0x0100
88 #define GLB_MAC_H16_BASE 0x0104
89 #define MACFLT_HI16_MASK GENMASK(15, 0)
90 #define BIT_MACFLT_ENA BIT(17)
91 #define BIT_MACFLT_FW2CPU BIT(21)
92 #define GLB_MAC_H16(reg) (GLB_MAC_H16_BASE + ((reg) * 0x8))
93 #define GLB_MAC_L32(reg) (GLB_MAC_L32_BASE + ((reg) * 0x8))
94 #define MAX_MAC_FILTER_NUM 8
95 #define MAX_UNICAST_ADDRESSES 2
96 #define MAX_MULTICAST_ADDRESSES (MAX_MAC_FILTER_NUM - \
97 MAX_UNICAST_ADDRESSES)
98 /* software tx and rx queue number, should be power of 2 */
101 #define FEMAC_POLL_WEIGHT 16
103 #define PHY_RESET_DELAYS_PROPERTY "hisilicon,phy-reset-delays-us"
105 enum phy_reset_delays
{
112 struct hisi_femac_queue
{
113 struct sk_buff
**skb
;
114 dma_addr_t
*dma_phys
;
120 struct hisi_femac_priv
{
121 void __iomem
*port_base
;
122 void __iomem
*glb_base
;
124 struct reset_control
*mac_rst
;
125 struct reset_control
*phy_rst
;
126 u32 phy_reset_delays
[DELAYS_NUM
];
130 struct net_device
*ndev
;
132 struct hisi_femac_queue txq
;
133 struct hisi_femac_queue rxq
;
134 u32 tx_fifo_used_cnt
;
135 struct napi_struct napi
;
138 static void hisi_femac_irq_enable(struct hisi_femac_priv
*priv
, int irqs
)
142 val
= readl(priv
->glb_base
+ GLB_IRQ_ENA
);
143 writel(val
| irqs
, priv
->glb_base
+ GLB_IRQ_ENA
);
146 static void hisi_femac_irq_disable(struct hisi_femac_priv
*priv
, int irqs
)
150 val
= readl(priv
->glb_base
+ GLB_IRQ_ENA
);
151 writel(val
& (~irqs
), priv
->glb_base
+ GLB_IRQ_ENA
);
154 static void hisi_femac_tx_dma_unmap(struct hisi_femac_priv
*priv
,
155 struct sk_buff
*skb
, unsigned int pos
)
159 dma_addr
= priv
->txq
.dma_phys
[pos
];
160 dma_unmap_single(priv
->dev
, dma_addr
, skb
->len
, DMA_TO_DEVICE
);
163 static void hisi_femac_xmit_reclaim(struct net_device
*dev
)
166 struct hisi_femac_priv
*priv
= netdev_priv(dev
);
167 struct hisi_femac_queue
*txq
= &priv
->txq
;
168 unsigned int bytes_compl
= 0, pkts_compl
= 0;
173 val
= readl(priv
->port_base
+ ADDRQ_STAT
) & TX_CNT_INUSE_MASK
;
174 while (val
< priv
->tx_fifo_used_cnt
) {
175 skb
= txq
->skb
[txq
->tail
];
176 if (unlikely(!skb
)) {
177 netdev_err(dev
, "xmitq_cnt_inuse=%d, tx_fifo_used=%d\n",
178 val
, priv
->tx_fifo_used_cnt
);
181 hisi_femac_tx_dma_unmap(priv
, skb
, txq
->tail
);
183 bytes_compl
+= skb
->len
;
184 dev_kfree_skb_any(skb
);
186 priv
->tx_fifo_used_cnt
--;
188 val
= readl(priv
->port_base
+ ADDRQ_STAT
) & TX_CNT_INUSE_MASK
;
189 txq
->skb
[txq
->tail
] = NULL
;
190 txq
->tail
= (txq
->tail
+ 1) % txq
->num
;
193 netdev_completed_queue(dev
, pkts_compl
, bytes_compl
);
195 if (unlikely(netif_queue_stopped(dev
)) && pkts_compl
)
196 netif_wake_queue(dev
);
198 netif_tx_unlock(dev
);
201 static void hisi_femac_adjust_link(struct net_device
*dev
)
203 struct hisi_femac_priv
*priv
= netdev_priv(dev
);
204 struct phy_device
*phy
= dev
->phydev
;
208 status
|= MAC_PORTSET_LINKED
;
209 if (phy
->duplex
== DUPLEX_FULL
)
210 status
|= MAC_PORTSET_DUPLEX_FULL
;
211 if (phy
->speed
== SPEED_100
)
212 status
|= MAC_PORTSET_SPEED_100M
;
214 if ((status
!= priv
->link_status
) &&
215 ((status
| priv
->link_status
) & MAC_PORTSET_LINKED
)) {
216 writel(status
, priv
->port_base
+ MAC_PORTSET
);
217 priv
->link_status
= status
;
218 phy_print_status(phy
);
222 static void hisi_femac_rx_refill(struct hisi_femac_priv
*priv
)
224 struct hisi_femac_queue
*rxq
= &priv
->rxq
;
227 u32 len
= MAX_FRAME_SIZE
;
231 while (readl(priv
->port_base
+ ADDRQ_STAT
) & BIT_RX_READY
) {
232 if (!CIRC_SPACE(pos
, rxq
->tail
, rxq
->num
))
234 if (unlikely(rxq
->skb
[pos
])) {
235 netdev_err(priv
->ndev
, "err skb[%d]=%p\n",
239 skb
= netdev_alloc_skb_ip_align(priv
->ndev
, len
);
243 addr
= dma_map_single(priv
->dev
, skb
->data
, len
,
245 if (dma_mapping_error(priv
->dev
, addr
)) {
246 dev_kfree_skb_any(skb
);
249 rxq
->dma_phys
[pos
] = addr
;
251 writel(addr
, priv
->port_base
+ IQ_ADDR
);
252 pos
= (pos
+ 1) % rxq
->num
;
257 static int hisi_femac_rx(struct net_device
*dev
, int limit
)
259 struct hisi_femac_priv
*priv
= netdev_priv(dev
);
260 struct hisi_femac_queue
*rxq
= &priv
->rxq
;
263 u32 rx_pkt_info
, pos
, len
, rx_pkts_num
= 0;
266 while (readl(priv
->glb_base
+ GLB_IRQ_RAW
) & IRQ_INT_RX_RDY
) {
267 rx_pkt_info
= readl(priv
->port_base
+ IQFRM_DES
);
268 len
= rx_pkt_info
& RX_FRAME_LEN_MASK
;
271 /* tell hardware we will deal with this packet */
272 writel(IRQ_INT_RX_RDY
, priv
->glb_base
+ GLB_IRQ_RAW
);
277 if (unlikely(!skb
)) {
278 netdev_err(dev
, "rx skb NULL. pos=%d\n", pos
);
281 rxq
->skb
[pos
] = NULL
;
283 addr
= rxq
->dma_phys
[pos
];
284 dma_unmap_single(priv
->dev
, addr
, MAX_FRAME_SIZE
,
287 if (unlikely(skb
->len
> MAX_FRAME_SIZE
)) {
288 netdev_err(dev
, "rcv len err, len = %d\n", skb
->len
);
289 dev
->stats
.rx_errors
++;
290 dev
->stats
.rx_length_errors
++;
291 dev_kfree_skb_any(skb
);
295 skb
->protocol
= eth_type_trans(skb
, dev
);
296 napi_gro_receive(&priv
->napi
, skb
);
297 dev
->stats
.rx_packets
++;
298 dev
->stats
.rx_bytes
+= skb
->len
;
300 pos
= (pos
+ 1) % rxq
->num
;
301 if (rx_pkts_num
>= limit
)
306 hisi_femac_rx_refill(priv
);
311 static int hisi_femac_poll(struct napi_struct
*napi
, int budget
)
313 struct hisi_femac_priv
*priv
= container_of(napi
,
314 struct hisi_femac_priv
, napi
);
315 struct net_device
*dev
= priv
->ndev
;
316 int work_done
= 0, task
= budget
;
320 hisi_femac_xmit_reclaim(dev
);
321 num
= hisi_femac_rx(dev
, task
);
324 if (work_done
>= budget
)
327 ints
= readl(priv
->glb_base
+ GLB_IRQ_RAW
);
328 writel(ints
& DEF_INT_MASK
,
329 priv
->glb_base
+ GLB_IRQ_RAW
);
330 } while (ints
& DEF_INT_MASK
);
332 if (work_done
< budget
) {
333 napi_complete_done(napi
, work_done
);
334 hisi_femac_irq_enable(priv
, DEF_INT_MASK
&
335 (~IRQ_INT_TX_PER_PACKET
));
341 static irqreturn_t
hisi_femac_interrupt(int irq
, void *dev_id
)
344 struct net_device
*dev
= (struct net_device
*)dev_id
;
345 struct hisi_femac_priv
*priv
= netdev_priv(dev
);
347 ints
= readl(priv
->glb_base
+ GLB_IRQ_RAW
);
349 if (likely(ints
& DEF_INT_MASK
)) {
350 writel(ints
& DEF_INT_MASK
,
351 priv
->glb_base
+ GLB_IRQ_RAW
);
352 hisi_femac_irq_disable(priv
, DEF_INT_MASK
);
353 napi_schedule(&priv
->napi
);
359 static int hisi_femac_init_queue(struct device
*dev
,
360 struct hisi_femac_queue
*queue
,
363 queue
->skb
= devm_kcalloc(dev
, num
, sizeof(struct sk_buff
*),
368 queue
->dma_phys
= devm_kcalloc(dev
, num
, sizeof(dma_addr_t
),
370 if (!queue
->dma_phys
)
380 static int hisi_femac_init_tx_and_rx_queues(struct hisi_femac_priv
*priv
)
384 ret
= hisi_femac_init_queue(priv
->dev
, &priv
->txq
, TXQ_NUM
);
388 ret
= hisi_femac_init_queue(priv
->dev
, &priv
->rxq
, RXQ_NUM
);
392 priv
->tx_fifo_used_cnt
= 0;
397 static void hisi_femac_free_skb_rings(struct hisi_femac_priv
*priv
)
399 struct hisi_femac_queue
*txq
= &priv
->txq
;
400 struct hisi_femac_queue
*rxq
= &priv
->rxq
;
406 while (pos
!= rxq
->head
) {
408 if (unlikely(!skb
)) {
409 netdev_err(priv
->ndev
, "NULL rx skb. pos=%d, head=%d\n",
414 dma_addr
= rxq
->dma_phys
[pos
];
415 dma_unmap_single(priv
->dev
, dma_addr
, MAX_FRAME_SIZE
,
418 dev_kfree_skb_any(skb
);
419 rxq
->skb
[pos
] = NULL
;
420 pos
= (pos
+ 1) % rxq
->num
;
425 while (pos
!= txq
->head
) {
427 if (unlikely(!skb
)) {
428 netdev_err(priv
->ndev
, "NULL tx skb. pos=%d, head=%d\n",
432 hisi_femac_tx_dma_unmap(priv
, skb
, pos
);
433 dev_kfree_skb_any(skb
);
434 txq
->skb
[pos
] = NULL
;
435 pos
= (pos
+ 1) % txq
->num
;
438 priv
->tx_fifo_used_cnt
= 0;
441 static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv
*priv
,
446 reg
= mac
[1] | (mac
[0] << 8);
447 writel(reg
, priv
->glb_base
+ GLB_HOSTMAC_H16
);
449 reg
= mac
[5] | (mac
[4] << 8) | (mac
[3] << 16) | (mac
[2] << 24);
450 writel(reg
, priv
->glb_base
+ GLB_HOSTMAC_L32
);
455 static int hisi_femac_port_reset(struct hisi_femac_priv
*priv
)
459 val
= readl(priv
->glb_base
+ GLB_SOFT_RESET
);
460 val
|= SOFT_RESET_ALL
;
461 writel(val
, priv
->glb_base
+ GLB_SOFT_RESET
);
463 usleep_range(500, 800);
465 val
&= ~SOFT_RESET_ALL
;
466 writel(val
, priv
->glb_base
+ GLB_SOFT_RESET
);
471 static int hisi_femac_net_open(struct net_device
*dev
)
473 struct hisi_femac_priv
*priv
= netdev_priv(dev
);
475 hisi_femac_port_reset(priv
);
476 hisi_femac_set_hw_mac_addr(priv
, dev
->dev_addr
);
477 hisi_femac_rx_refill(priv
);
479 netif_carrier_off(dev
);
480 netdev_reset_queue(dev
);
481 netif_start_queue(dev
);
482 napi_enable(&priv
->napi
);
484 priv
->link_status
= 0;
486 phy_start(dev
->phydev
);
488 writel(IRQ_ENA_PORT0_MASK
, priv
->glb_base
+ GLB_IRQ_RAW
);
489 hisi_femac_irq_enable(priv
, IRQ_ENA_ALL
| IRQ_ENA_PORT0
| DEF_INT_MASK
);
494 static int hisi_femac_net_close(struct net_device
*dev
)
496 struct hisi_femac_priv
*priv
= netdev_priv(dev
);
498 hisi_femac_irq_disable(priv
, IRQ_ENA_PORT0
);
501 phy_stop(dev
->phydev
);
503 netif_stop_queue(dev
);
504 napi_disable(&priv
->napi
);
506 hisi_femac_free_skb_rings(priv
);
511 static netdev_tx_t
hisi_femac_net_xmit(struct sk_buff
*skb
,
512 struct net_device
*dev
)
514 struct hisi_femac_priv
*priv
= netdev_priv(dev
);
515 struct hisi_femac_queue
*txq
= &priv
->txq
;
519 val
= readl(priv
->port_base
+ ADDRQ_STAT
);
522 hisi_femac_irq_enable(priv
, IRQ_INT_TX_PER_PACKET
);
523 dev
->stats
.tx_dropped
++;
524 dev
->stats
.tx_fifo_errors
++;
525 netif_stop_queue(dev
);
526 return NETDEV_TX_BUSY
;
529 if (unlikely(!CIRC_SPACE(txq
->head
, txq
->tail
,
531 hisi_femac_irq_enable(priv
, IRQ_INT_TX_PER_PACKET
);
532 dev
->stats
.tx_dropped
++;
533 dev
->stats
.tx_fifo_errors
++;
534 netif_stop_queue(dev
);
535 return NETDEV_TX_BUSY
;
538 addr
= dma_map_single(priv
->dev
, skb
->data
,
539 skb
->len
, DMA_TO_DEVICE
);
540 if (unlikely(dma_mapping_error(priv
->dev
, addr
))) {
541 dev_kfree_skb_any(skb
);
542 dev
->stats
.tx_dropped
++;
545 txq
->dma_phys
[txq
->head
] = addr
;
547 txq
->skb
[txq
->head
] = skb
;
548 txq
->head
= (txq
->head
+ 1) % txq
->num
;
550 writel(addr
, priv
->port_base
+ EQ_ADDR
);
551 writel(skb
->len
+ ETH_FCS_LEN
, priv
->port_base
+ EQFRM_LEN
);
553 priv
->tx_fifo_used_cnt
++;
555 dev
->stats
.tx_packets
++;
556 dev
->stats
.tx_bytes
+= skb
->len
;
557 netdev_sent_queue(dev
, skb
->len
);
562 static int hisi_femac_set_mac_address(struct net_device
*dev
, void *p
)
564 struct hisi_femac_priv
*priv
= netdev_priv(dev
);
565 struct sockaddr
*skaddr
= p
;
567 if (!is_valid_ether_addr(skaddr
->sa_data
))
568 return -EADDRNOTAVAIL
;
570 memcpy(dev
->dev_addr
, skaddr
->sa_data
, dev
->addr_len
);
571 dev
->addr_assign_type
&= ~NET_ADDR_RANDOM
;
573 hisi_femac_set_hw_mac_addr(priv
, dev
->dev_addr
);
578 static void hisi_femac_enable_hw_addr_filter(struct hisi_femac_priv
*priv
,
579 unsigned int reg_n
, bool enable
)
583 val
= readl(priv
->glb_base
+ GLB_MAC_H16(reg_n
));
585 val
|= BIT_MACFLT_ENA
;
587 val
&= ~BIT_MACFLT_ENA
;
588 writel(val
, priv
->glb_base
+ GLB_MAC_H16(reg_n
));
591 static void hisi_femac_set_hw_addr_filter(struct hisi_femac_priv
*priv
,
595 unsigned int high
, low
;
598 high
= GLB_MAC_H16(reg_n
);
599 low
= GLB_MAC_L32(reg_n
);
601 val
= (addr
[2] << 24) | (addr
[3] << 16) | (addr
[4] << 8) | addr
[5];
602 writel(val
, priv
->glb_base
+ low
);
604 val
= readl(priv
->glb_base
+ high
);
605 val
&= ~MACFLT_HI16_MASK
;
606 val
|= ((addr
[0] << 8) | addr
[1]);
607 val
|= (BIT_MACFLT_ENA
| BIT_MACFLT_FW2CPU
);
608 writel(val
, priv
->glb_base
+ high
);
611 static void hisi_femac_set_promisc_mode(struct hisi_femac_priv
*priv
,
616 val
= readl(priv
->glb_base
+ GLB_FWCTRL
);
618 val
|= FWCTRL_FWALL2CPU
;
620 val
&= ~FWCTRL_FWALL2CPU
;
621 writel(val
, priv
->glb_base
+ GLB_FWCTRL
);
624 /* Handle multiple multicast addresses (perfect filtering)*/
625 static void hisi_femac_set_mc_addr_filter(struct hisi_femac_priv
*priv
)
627 struct net_device
*dev
= priv
->ndev
;
630 val
= readl(priv
->glb_base
+ GLB_MACTCTRL
);
631 if ((netdev_mc_count(dev
) > MAX_MULTICAST_ADDRESSES
) ||
632 (dev
->flags
& IFF_ALLMULTI
)) {
633 val
|= MACTCTRL_MULTI2CPU
;
635 int reg
= MAX_UNICAST_ADDRESSES
;
637 struct netdev_hw_addr
*ha
;
639 for (i
= reg
; i
< MAX_MAC_FILTER_NUM
; i
++)
640 hisi_femac_enable_hw_addr_filter(priv
, i
, false);
642 netdev_for_each_mc_addr(ha
, dev
) {
643 hisi_femac_set_hw_addr_filter(priv
, ha
->addr
, reg
);
646 val
&= ~MACTCTRL_MULTI2CPU
;
648 writel(val
, priv
->glb_base
+ GLB_MACTCTRL
);
651 /* Handle multiple unicast addresses (perfect filtering)*/
652 static void hisi_femac_set_uc_addr_filter(struct hisi_femac_priv
*priv
)
654 struct net_device
*dev
= priv
->ndev
;
657 val
= readl(priv
->glb_base
+ GLB_MACTCTRL
);
658 if (netdev_uc_count(dev
) > MAX_UNICAST_ADDRESSES
) {
659 val
|= MACTCTRL_UNI2CPU
;
663 struct netdev_hw_addr
*ha
;
665 for (i
= reg
; i
< MAX_UNICAST_ADDRESSES
; i
++)
666 hisi_femac_enable_hw_addr_filter(priv
, i
, false);
668 netdev_for_each_uc_addr(ha
, dev
) {
669 hisi_femac_set_hw_addr_filter(priv
, ha
->addr
, reg
);
672 val
&= ~MACTCTRL_UNI2CPU
;
674 writel(val
, priv
->glb_base
+ GLB_MACTCTRL
);
677 static void hisi_femac_net_set_rx_mode(struct net_device
*dev
)
679 struct hisi_femac_priv
*priv
= netdev_priv(dev
);
681 if (dev
->flags
& IFF_PROMISC
) {
682 hisi_femac_set_promisc_mode(priv
, true);
684 hisi_femac_set_promisc_mode(priv
, false);
685 hisi_femac_set_mc_addr_filter(priv
);
686 hisi_femac_set_uc_addr_filter(priv
);
690 static int hisi_femac_net_ioctl(struct net_device
*dev
,
691 struct ifreq
*ifreq
, int cmd
)
693 if (!netif_running(dev
))
699 return phy_mii_ioctl(dev
->phydev
, ifreq
, cmd
);
702 static const struct ethtool_ops hisi_femac_ethtools_ops
= {
703 .get_link
= ethtool_op_get_link
,
704 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
705 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
708 static const struct net_device_ops hisi_femac_netdev_ops
= {
709 .ndo_open
= hisi_femac_net_open
,
710 .ndo_stop
= hisi_femac_net_close
,
711 .ndo_start_xmit
= hisi_femac_net_xmit
,
712 .ndo_do_ioctl
= hisi_femac_net_ioctl
,
713 .ndo_set_mac_address
= hisi_femac_set_mac_address
,
714 .ndo_set_rx_mode
= hisi_femac_net_set_rx_mode
,
717 static void hisi_femac_core_reset(struct hisi_femac_priv
*priv
)
719 reset_control_assert(priv
->mac_rst
);
720 reset_control_deassert(priv
->mac_rst
);
723 static void hisi_femac_sleep_us(u32 time_us
)
730 time_ms
= DIV_ROUND_UP(time_us
, 1000);
732 usleep_range(time_us
, time_us
+ 500);
737 static void hisi_femac_phy_reset(struct hisi_femac_priv
*priv
)
739 /* To make sure PHY hardware reset success,
740 * we must keep PHY in deassert state first and
741 * then complete the hardware reset operation
743 reset_control_deassert(priv
->phy_rst
);
744 hisi_femac_sleep_us(priv
->phy_reset_delays
[PRE_DELAY
]);
746 reset_control_assert(priv
->phy_rst
);
747 /* delay some time to ensure reset ok,
748 * this depends on PHY hardware feature
750 hisi_femac_sleep_us(priv
->phy_reset_delays
[PULSE
]);
751 reset_control_deassert(priv
->phy_rst
);
752 /* delay some time to ensure later MDIO access */
753 hisi_femac_sleep_us(priv
->phy_reset_delays
[POST_DELAY
]);
756 static void hisi_femac_port_init(struct hisi_femac_priv
*priv
)
760 /* MAC gets link status info and phy mode by software config */
761 val
= MAC_PORTSEL_STAT_CPU
;
762 if (priv
->ndev
->phydev
->interface
== PHY_INTERFACE_MODE_RMII
)
763 val
|= MAC_PORTSEL_RMII
;
764 writel(val
, priv
->port_base
+ MAC_PORTSEL
);
766 /*clear all interrupt status */
767 writel(IRQ_ENA_PORT0_MASK
, priv
->glb_base
+ GLB_IRQ_RAW
);
768 hisi_femac_irq_disable(priv
, IRQ_ENA_PORT0_MASK
| IRQ_ENA_PORT0
);
770 val
= readl(priv
->glb_base
+ GLB_FWCTRL
);
771 val
&= ~(FWCTRL_VLAN_ENABLE
| FWCTRL_FWALL2CPU
);
772 val
|= FWCTRL_FW2CPU_ENA
;
773 writel(val
, priv
->glb_base
+ GLB_FWCTRL
);
775 val
= readl(priv
->glb_base
+ GLB_MACTCTRL
);
776 val
|= (MACTCTRL_BROAD2CPU
| MACTCTRL_MACT_ENA
);
777 writel(val
, priv
->glb_base
+ GLB_MACTCTRL
);
779 val
= readl(priv
->port_base
+ MAC_SET
);
780 val
&= ~MAX_FRAME_SIZE_MASK
;
781 val
|= MAX_FRAME_SIZE
;
782 writel(val
, priv
->port_base
+ MAC_SET
);
784 val
= RX_COALESCED_TIMER
|
785 (RX_COALESCED_FRAMES
<< RX_COALESCED_FRAME_OFFSET
);
786 writel(val
, priv
->port_base
+ RX_COALESCE_SET
);
788 val
= (HW_RX_FIFO_DEPTH
<< RX_DEPTH_OFFSET
) | HW_TX_FIFO_DEPTH
;
789 writel(val
, priv
->port_base
+ QLEN_SET
);
792 static int hisi_femac_drv_probe(struct platform_device
*pdev
)
794 struct device
*dev
= &pdev
->dev
;
795 struct device_node
*node
= dev
->of_node
;
796 struct resource
*res
;
797 struct net_device
*ndev
;
798 struct hisi_femac_priv
*priv
;
799 struct phy_device
*phy
;
800 const char *mac_addr
;
803 ndev
= alloc_etherdev(sizeof(*priv
));
807 platform_set_drvdata(pdev
, ndev
);
808 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
810 priv
= netdev_priv(ndev
);
814 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
815 priv
->port_base
= devm_ioremap_resource(dev
, res
);
816 if (IS_ERR(priv
->port_base
)) {
817 ret
= PTR_ERR(priv
->port_base
);
818 goto out_free_netdev
;
821 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
822 priv
->glb_base
= devm_ioremap_resource(dev
, res
);
823 if (IS_ERR(priv
->glb_base
)) {
824 ret
= PTR_ERR(priv
->glb_base
);
825 goto out_free_netdev
;
828 priv
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
829 if (IS_ERR(priv
->clk
)) {
830 dev_err(dev
, "failed to get clk\n");
832 goto out_free_netdev
;
835 ret
= clk_prepare_enable(priv
->clk
);
837 dev_err(dev
, "failed to enable clk %d\n", ret
);
838 goto out_free_netdev
;
841 priv
->mac_rst
= devm_reset_control_get(dev
, "mac");
842 if (IS_ERR(priv
->mac_rst
)) {
843 ret
= PTR_ERR(priv
->mac_rst
);
844 goto out_disable_clk
;
846 hisi_femac_core_reset(priv
);
848 priv
->phy_rst
= devm_reset_control_get(dev
, "phy");
849 if (IS_ERR(priv
->phy_rst
)) {
850 priv
->phy_rst
= NULL
;
852 ret
= of_property_read_u32_array(node
,
853 PHY_RESET_DELAYS_PROPERTY
,
854 priv
->phy_reset_delays
,
857 goto out_disable_clk
;
858 hisi_femac_phy_reset(priv
);
861 phy
= of_phy_get_and_connect(ndev
, node
, hisi_femac_adjust_link
);
863 dev_err(dev
, "connect to PHY failed!\n");
865 goto out_disable_clk
;
868 phy_attached_print(phy
, "phy_id=0x%.8lx, phy_mode=%s\n",
869 (unsigned long)phy
->phy_id
,
870 phy_modes(phy
->interface
));
872 mac_addr
= of_get_mac_address(node
);
874 ether_addr_copy(ndev
->dev_addr
, mac_addr
);
875 if (!is_valid_ether_addr(ndev
->dev_addr
)) {
876 eth_hw_addr_random(ndev
);
877 dev_warn(dev
, "using random MAC address %pM\n",
881 ndev
->watchdog_timeo
= 6 * HZ
;
882 ndev
->priv_flags
|= IFF_UNICAST_FLT
;
883 ndev
->netdev_ops
= &hisi_femac_netdev_ops
;
884 ndev
->ethtool_ops
= &hisi_femac_ethtools_ops
;
885 netif_napi_add(ndev
, &priv
->napi
, hisi_femac_poll
, FEMAC_POLL_WEIGHT
);
887 hisi_femac_port_init(priv
);
889 ret
= hisi_femac_init_tx_and_rx_queues(priv
);
891 goto out_disconnect_phy
;
893 ndev
->irq
= platform_get_irq(pdev
, 0);
894 if (ndev
->irq
<= 0) {
895 dev_err(dev
, "No irq resource\n");
897 goto out_disconnect_phy
;
900 ret
= devm_request_irq(dev
, ndev
->irq
, hisi_femac_interrupt
,
901 IRQF_SHARED
, pdev
->name
, ndev
);
903 dev_err(dev
, "devm_request_irq %d failed!\n", ndev
->irq
);
904 goto out_disconnect_phy
;
907 ret
= register_netdev(ndev
);
909 dev_err(dev
, "register_netdev failed!\n");
910 goto out_disconnect_phy
;
916 netif_napi_del(&priv
->napi
);
919 clk_disable_unprepare(priv
->clk
);
926 static int hisi_femac_drv_remove(struct platform_device
*pdev
)
928 struct net_device
*ndev
= platform_get_drvdata(pdev
);
929 struct hisi_femac_priv
*priv
= netdev_priv(ndev
);
931 netif_napi_del(&priv
->napi
);
932 unregister_netdev(ndev
);
934 phy_disconnect(ndev
->phydev
);
935 clk_disable_unprepare(priv
->clk
);
942 static int hisi_femac_drv_suspend(struct platform_device
*pdev
,
945 struct net_device
*ndev
= platform_get_drvdata(pdev
);
946 struct hisi_femac_priv
*priv
= netdev_priv(ndev
);
948 disable_irq(ndev
->irq
);
949 if (netif_running(ndev
)) {
950 hisi_femac_net_close(ndev
);
951 netif_device_detach(ndev
);
954 clk_disable_unprepare(priv
->clk
);
959 static int hisi_femac_drv_resume(struct platform_device
*pdev
)
961 struct net_device
*ndev
= platform_get_drvdata(pdev
);
962 struct hisi_femac_priv
*priv
= netdev_priv(ndev
);
964 clk_prepare_enable(priv
->clk
);
966 hisi_femac_phy_reset(priv
);
968 if (netif_running(ndev
)) {
969 hisi_femac_port_init(priv
);
970 hisi_femac_net_open(ndev
);
971 netif_device_attach(ndev
);
973 enable_irq(ndev
->irq
);
979 static const struct of_device_id hisi_femac_match
[] = {
980 {.compatible
= "hisilicon,hisi-femac-v1",},
981 {.compatible
= "hisilicon,hisi-femac-v2",},
982 {.compatible
= "hisilicon,hi3516cv300-femac",},
986 MODULE_DEVICE_TABLE(of
, hisi_femac_match
);
988 static struct platform_driver hisi_femac_driver
= {
990 .name
= "hisi-femac",
991 .of_match_table
= hisi_femac_match
,
993 .probe
= hisi_femac_drv_probe
,
994 .remove
= hisi_femac_drv_remove
,
996 .suspend
= hisi_femac_drv_suspend
,
997 .resume
= hisi_femac_drv_resume
,
1001 module_platform_driver(hisi_femac_driver
);
1003 MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC driver");
1004 MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>");
1005 MODULE_LICENSE("GPL v2");
1006 MODULE_ALIAS("platform:hisi-femac");