1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) "bcmasp_intf: " fmt
4 #include <asm/byteorder.h>
5 #include <linux/brcmphy.h>
7 #include <linux/delay.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/of_net.h>
11 #include <linux/of_mdio.h>
12 #include <linux/phy.h>
13 #include <linux/phy_fixed.h>
14 #include <linux/ptp_classify.h>
15 #include <linux/platform_device.h>
20 #include "bcmasp_intf_defs.h"
22 static int incr_ring(int index
, int ring_count
)
25 if (index
== ring_count
)
31 /* Points to last byte of descriptor */
32 static dma_addr_t
incr_last_byte(dma_addr_t addr
, dma_addr_t beg
,
35 dma_addr_t end
= beg
+ (ring_count
* DESC_SIZE
);
39 return beg
+ DESC_SIZE
- 1;
44 /* Points to first byte of descriptor */
45 static dma_addr_t
incr_first_byte(dma_addr_t addr
, dma_addr_t beg
,
48 dma_addr_t end
= beg
+ (ring_count
* DESC_SIZE
);
57 static void bcmasp_enable_tx(struct bcmasp_intf
*intf
, int en
)
60 tx_spb_ctrl_wl(intf
, TX_SPB_CTRL_ENABLE_EN
, TX_SPB_CTRL_ENABLE
);
61 tx_epkt_core_wl(intf
, (TX_EPKT_C_CFG_MISC_EN
|
62 TX_EPKT_C_CFG_MISC_PT
|
63 (intf
->port
<< TX_EPKT_C_CFG_MISC_PS_SHIFT
)),
66 tx_spb_ctrl_wl(intf
, 0x0, TX_SPB_CTRL_ENABLE
);
67 tx_epkt_core_wl(intf
, 0x0, TX_EPKT_C_CFG_MISC
);
71 static void bcmasp_enable_rx(struct bcmasp_intf
*intf
, int en
)
74 rx_edpkt_cfg_wl(intf
, RX_EDPKT_CFG_ENABLE_EN
,
77 rx_edpkt_cfg_wl(intf
, 0x0, RX_EDPKT_CFG_ENABLE
);
80 static void bcmasp_set_rx_mode(struct net_device
*dev
)
82 unsigned char mask
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
83 struct bcmasp_intf
*intf
= netdev_priv(dev
);
84 struct netdev_hw_addr
*ha
;
87 spin_lock_bh(&intf
->parent
->mda_lock
);
89 bcmasp_disable_all_filters(intf
);
91 if (dev
->flags
& IFF_PROMISC
)
94 bcmasp_set_promisc(intf
, 0);
96 bcmasp_set_broad(intf
, 1);
98 bcmasp_set_oaddr(intf
, dev
->dev_addr
, 1);
100 if (dev
->flags
& IFF_ALLMULTI
) {
101 bcmasp_set_allmulti(intf
, 1);
103 bcmasp_set_allmulti(intf
, 0);
105 netdev_for_each_mc_addr(ha
, dev
) {
106 ret
= bcmasp_set_en_mda_filter(intf
, ha
->addr
, mask
);
108 intf
->mib
.mc_filters_full_cnt
++;
114 netdev_for_each_uc_addr(ha
, dev
) {
115 ret
= bcmasp_set_en_mda_filter(intf
, ha
->addr
, mask
);
117 intf
->mib
.uc_filters_full_cnt
++;
122 spin_unlock_bh(&intf
->parent
->mda_lock
);
126 bcmasp_set_promisc(intf
, 1);
127 intf
->mib
.promisc_filters_cnt
++;
129 /* disable all filters used by this port */
130 bcmasp_disable_all_filters(intf
);
132 spin_unlock_bh(&intf
->parent
->mda_lock
);
135 static void bcmasp_clean_txcb(struct bcmasp_intf
*intf
, int index
)
137 struct bcmasp_tx_cb
*txcb
= &intf
->tx_cbs
[index
];
140 dma_unmap_addr_set(txcb
, dma_addr
, 0);
141 dma_unmap_len_set(txcb
, dma_len
, 0);
145 static int tx_spb_ring_full(struct bcmasp_intf
*intf
, int cnt
)
149 /* Check if we have enough room for cnt descriptors */
150 for (i
= 0; i
< cnt
; i
++) {
151 next_index
= incr_ring(intf
->tx_spb_index
, DESC_RING_COUNT
);
152 if (next_index
== intf
->tx_spb_clean_index
)
159 static struct sk_buff
*bcmasp_csum_offload(struct net_device
*dev
,
163 struct bcmasp_intf
*intf
= netdev_priv(dev
);
164 u32 header
= 0, header2
= 0, epkt
= 0;
165 struct bcmasp_pkt_offload
*offload
;
166 unsigned int header_cnt
= 0;
170 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
173 ret
= skb_cow_head(skb
, sizeof(*offload
));
175 intf
->mib
.tx_realloc_offload_failed
++;
179 switch (skb
->protocol
) {
180 case htons(ETH_P_IP
):
181 header
|= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb
) >> 8) & 0xf);
182 header2
|= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb
) & 0xff);
183 epkt
|= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2
;
184 ip_proto
= ip_hdr(skb
)->protocol
;
187 case htons(ETH_P_IPV6
):
188 header
|= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN
>> 8) & 0xf);
189 header2
|= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN
& 0xff);
190 epkt
|= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2
;
191 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
200 header2
|= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb
));
201 epkt
|= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3
;
205 header2
|= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN
);
206 epkt
|= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3
;
213 offload
= (struct bcmasp_pkt_offload
*)skb_push(skb
, sizeof(*offload
));
215 header
|= PKT_OFFLOAD_HDR_OP
| PKT_OFFLOAD_HDR_COUNT(header_cnt
) |
216 PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN
);
217 epkt
|= PKT_OFFLOAD_EPKT_OP
;
219 offload
->nop
= htonl(PKT_OFFLOAD_NOP
);
220 offload
->header
= htonl(header
);
221 offload
->header2
= htonl(header2
);
222 offload
->epkt
= htonl(epkt
);
223 offload
->end
= htonl(PKT_OFFLOAD_END_OP
);
229 skb_checksum_help(skb
);
234 static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf
*intf
)
236 return rx_edpkt_dma_rq(intf
, RX_EDPKT_DMA_VALID
);
239 static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf
*intf
, dma_addr_t addr
)
241 rx_edpkt_cfg_wq(intf
, addr
, RX_EDPKT_RING_BUFFER_READ
);
244 static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf
*intf
, dma_addr_t addr
)
246 rx_edpkt_dma_wq(intf
, addr
, RX_EDPKT_DMA_READ
);
249 static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf
*intf
)
251 return tx_spb_dma_rq(intf
, TX_SPB_DMA_READ
);
254 static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf
*intf
, dma_addr_t addr
)
256 tx_spb_dma_wq(intf
, addr
, TX_SPB_DMA_VALID
);
259 static const struct bcmasp_intf_ops bcmasp_intf_ops
= {
260 .rx_desc_read
= bcmasp_rx_edpkt_dma_rq
,
261 .rx_buffer_write
= bcmasp_rx_edpkt_cfg_wq
,
262 .rx_desc_write
= bcmasp_rx_edpkt_dma_wq
,
263 .tx_read
= bcmasp_tx_spb_dma_rq
,
264 .tx_write
= bcmasp_tx_spb_dma_wq
,
267 static netdev_tx_t
bcmasp_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
269 struct bcmasp_intf
*intf
= netdev_priv(dev
);
270 unsigned int total_bytes
, size
;
271 int spb_index
, nr_frags
, i
, j
;
272 struct bcmasp_tx_cb
*txcb
;
273 dma_addr_t mapping
, valid
;
274 struct bcmasp_desc
*desc
;
275 bool csum_hw
= false;
279 kdev
= &intf
->parent
->pdev
->dev
;
281 nr_frags
= skb_shinfo(skb
)->nr_frags
;
283 if (tx_spb_ring_full(intf
, nr_frags
+ 1)) {
284 netif_stop_queue(dev
);
286 netdev_err(dev
, "Tx Ring Full!\n");
287 return NETDEV_TX_BUSY
;
290 /* Save skb len before adding csum offload header */
291 total_bytes
= skb
->len
;
292 skb
= bcmasp_csum_offload(dev
, skb
, &csum_hw
);
296 spb_index
= intf
->tx_spb_index
;
297 valid
= intf
->tx_spb_dma_valid
;
298 for (i
= 0; i
<= nr_frags
; i
++) {
300 size
= skb_headlen(skb
);
301 if (!nr_frags
&& size
< (ETH_ZLEN
+ ETH_FCS_LEN
)) {
302 if (skb_put_padto(skb
, ETH_ZLEN
+ ETH_FCS_LEN
))
306 mapping
= dma_map_single(kdev
, skb
->data
, size
,
309 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
310 size
= skb_frag_size(frag
);
311 mapping
= skb_frag_dma_map(kdev
, frag
, 0, size
,
315 if (dma_mapping_error(kdev
, mapping
)) {
316 intf
->mib
.tx_dma_failed
++;
317 spb_index
= intf
->tx_spb_index
;
318 for (j
= 0; j
< i
; j
++) {
319 bcmasp_clean_txcb(intf
, spb_index
);
320 spb_index
= incr_ring(spb_index
,
323 /* Rewind so we do not have a hole */
324 spb_index
= intf
->tx_spb_index
;
329 txcb
= &intf
->tx_cbs
[spb_index
];
330 desc
= &intf
->tx_spb_cpu
[spb_index
];
331 memset(desc
, 0, sizeof(*desc
));
333 txcb
->bytes_sent
= total_bytes
;
334 dma_unmap_addr_set(txcb
, dma_addr
, mapping
);
335 dma_unmap_len_set(txcb
, dma_len
, size
);
337 desc
->flags
|= DESC_SOF
;
339 desc
->flags
|= DESC_EPKT_CMD
;
343 desc
->flags
|= DESC_EOF
;
349 desc
->flags
|= DESC_INT_EN
;
351 netif_dbg(intf
, tx_queued
, dev
,
352 "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n",
353 __func__
, &mapping
, desc
->size
, desc
->flags
,
356 spb_index
= incr_ring(spb_index
, DESC_RING_COUNT
);
357 valid
= incr_last_byte(valid
, intf
->tx_spb_dma_addr
,
361 /* Ensure all descriptors have been written to DRAM for the
362 * hardware to see up-to-date contents.
366 intf
->tx_spb_index
= spb_index
;
367 intf
->tx_spb_dma_valid
= valid
;
369 skb_tx_timestamp(skb
);
371 bcmasp_intf_tx_write(intf
, intf
->tx_spb_dma_valid
);
373 if (tx_spb_ring_full(intf
, MAX_SKB_FRAGS
+ 1))
374 netif_stop_queue(dev
);
379 static void bcmasp_netif_start(struct net_device
*dev
)
381 struct bcmasp_intf
*intf
= netdev_priv(dev
);
383 bcmasp_set_rx_mode(dev
);
384 napi_enable(&intf
->tx_napi
);
385 napi_enable(&intf
->rx_napi
);
387 bcmasp_enable_rx_irq(intf
, 1);
388 bcmasp_enable_tx_irq(intf
, 1);
389 bcmasp_enable_phy_irq(intf
, 1);
391 phy_start(dev
->phydev
);
394 static void umac_reset(struct bcmasp_intf
*intf
)
396 umac_wl(intf
, 0x0, UMC_CMD
);
397 umac_wl(intf
, UMC_CMD_SW_RESET
, UMC_CMD
);
398 usleep_range(10, 100);
399 /* We hold the umac in reset and bring it out of
400 * reset when phy link is up.
404 static void umac_set_hw_addr(struct bcmasp_intf
*intf
,
405 const unsigned char *addr
)
407 u32 mac0
= (addr
[0] << 24) | (addr
[1] << 16) | (addr
[2] << 8) |
409 u32 mac1
= (addr
[4] << 8) | addr
[5];
411 umac_wl(intf
, mac0
, UMC_MAC0
);
412 umac_wl(intf
, mac1
, UMC_MAC1
);
415 static void umac_enable_set(struct bcmasp_intf
*intf
, u32 mask
,
420 reg
= umac_rl(intf
, UMC_CMD
);
421 if (reg
& UMC_CMD_SW_RESET
)
427 umac_wl(intf
, reg
, UMC_CMD
);
429 /* UniMAC stops on a packet boundary, wait for a full-sized packet
430 * to be processed (1 msec).
433 usleep_range(1000, 2000);
436 static void umac_init(struct bcmasp_intf
*intf
)
438 umac_wl(intf
, 0x800, UMC_FRM_LEN
);
439 umac_wl(intf
, 0xffff, UMC_PAUSE_CNTRL
);
440 umac_wl(intf
, 0x800, UMC_RX_MAX_PKT_SZ
);
443 static int bcmasp_tx_reclaim(struct bcmasp_intf
*intf
)
445 struct bcmasp_intf_stats64
*stats
= &intf
->stats64
;
446 struct device
*kdev
= &intf
->parent
->pdev
->dev
;
447 unsigned long read
, released
= 0;
448 struct bcmasp_tx_cb
*txcb
;
449 struct bcmasp_desc
*desc
;
452 read
= bcmasp_intf_tx_read(intf
);
453 while (intf
->tx_spb_dma_read
!= read
) {
454 txcb
= &intf
->tx_cbs
[intf
->tx_spb_clean_index
];
455 mapping
= dma_unmap_addr(txcb
, dma_addr
);
457 dma_unmap_single(kdev
, mapping
,
458 dma_unmap_len(txcb
, dma_len
),
462 dev_consume_skb_any(txcb
->skb
);
464 u64_stats_update_begin(&stats
->syncp
);
465 u64_stats_inc(&stats
->tx_packets
);
466 u64_stats_add(&stats
->tx_bytes
, txcb
->bytes_sent
);
467 u64_stats_update_end(&stats
->syncp
);
470 desc
= &intf
->tx_spb_cpu
[intf
->tx_spb_clean_index
];
472 netif_dbg(intf
, tx_done
, intf
->ndev
,
473 "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n",
474 __func__
, &mapping
, desc
->size
, desc
->flags
,
475 intf
->tx_spb_clean_index
);
477 bcmasp_clean_txcb(intf
, intf
->tx_spb_clean_index
);
480 intf
->tx_spb_clean_index
= incr_ring(intf
->tx_spb_clean_index
,
482 intf
->tx_spb_dma_read
= incr_first_byte(intf
->tx_spb_dma_read
,
483 intf
->tx_spb_dma_addr
,
490 static int bcmasp_tx_poll(struct napi_struct
*napi
, int budget
)
492 struct bcmasp_intf
*intf
=
493 container_of(napi
, struct bcmasp_intf
, tx_napi
);
496 released
= bcmasp_tx_reclaim(intf
);
498 napi_complete(&intf
->tx_napi
);
500 bcmasp_enable_tx_irq(intf
, 1);
503 netif_wake_queue(intf
->ndev
);
508 static int bcmasp_rx_poll(struct napi_struct
*napi
, int budget
)
510 struct bcmasp_intf
*intf
=
511 container_of(napi
, struct bcmasp_intf
, rx_napi
);
512 struct bcmasp_intf_stats64
*stats
= &intf
->stats64
;
513 struct device
*kdev
= &intf
->parent
->pdev
->dev
;
514 unsigned long processed
= 0;
515 struct bcmasp_desc
*desc
;
522 valid
= bcmasp_intf_rx_desc_read(intf
) + 1;
523 if (valid
== intf
->rx_edpkt_dma_addr
+ DESC_RING_SIZE
)
524 valid
= intf
->rx_edpkt_dma_addr
;
526 while ((processed
< budget
) && (valid
!= intf
->rx_edpkt_dma_read
)) {
527 desc
= &intf
->rx_edpkt_cpu
[intf
->rx_edpkt_index
];
529 /* Ensure that descriptor has been fully written to DRAM by
530 * hardware before reading by the CPU
534 /* Calculate virt addr by offsetting from physical addr */
535 data
= intf
->rx_ring_cpu
+
536 (DESC_ADDR(desc
->buf
) - intf
->rx_ring_dma
);
538 flags
= DESC_FLAGS(desc
->buf
);
539 if (unlikely(flags
& (DESC_CRC_ERR
| DESC_RX_SYM_ERR
))) {
540 if (net_ratelimit()) {
541 netif_err(intf
, rx_status
, intf
->ndev
,
542 "flags=0x%llx\n", flags
);
545 u64_stats_update_begin(&stats
->syncp
);
546 if (flags
& DESC_CRC_ERR
)
547 u64_stats_inc(&stats
->rx_crc_errs
);
548 if (flags
& DESC_RX_SYM_ERR
)
549 u64_stats_inc(&stats
->rx_sym_errs
);
550 u64_stats_update_end(&stats
->syncp
);
555 dma_sync_single_for_cpu(kdev
, DESC_ADDR(desc
->buf
), desc
->size
,
560 skb
= napi_alloc_skb(napi
, len
);
562 u64_stats_update_begin(&stats
->syncp
);
563 u64_stats_inc(&stats
->rx_dropped
);
564 u64_stats_update_end(&stats
->syncp
);
565 intf
->mib
.alloc_rx_skb_failed
++;
571 memcpy(skb
->data
, data
, len
);
575 if (likely(intf
->crc_fwd
)) {
576 skb_trim(skb
, len
- ETH_FCS_LEN
);
580 if ((intf
->ndev
->features
& NETIF_F_RXCSUM
) &&
581 (desc
->buf
& DESC_CHKSUM
))
582 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
584 skb
->protocol
= eth_type_trans(skb
, intf
->ndev
);
586 napi_gro_receive(napi
, skb
);
588 u64_stats_update_begin(&stats
->syncp
);
589 u64_stats_inc(&stats
->rx_packets
);
590 u64_stats_add(&stats
->rx_bytes
, len
);
591 u64_stats_update_end(&stats
->syncp
);
594 bcmasp_intf_rx_buffer_write(intf
, (DESC_ADDR(desc
->buf
) +
598 intf
->rx_edpkt_dma_read
=
599 incr_first_byte(intf
->rx_edpkt_dma_read
,
600 intf
->rx_edpkt_dma_addr
,
602 intf
->rx_edpkt_index
= incr_ring(intf
->rx_edpkt_index
,
606 bcmasp_intf_rx_desc_write(intf
, intf
->rx_edpkt_dma_read
);
608 if (processed
< budget
) {
609 napi_complete_done(&intf
->rx_napi
, processed
);
610 bcmasp_enable_rx_irq(intf
, 1);
616 static void bcmasp_adj_link(struct net_device
*dev
)
618 struct bcmasp_intf
*intf
= netdev_priv(dev
);
619 struct phy_device
*phydev
= dev
->phydev
;
620 u32 cmd_bits
= 0, reg
;
624 if (intf
->old_link
!= phydev
->link
) {
626 intf
->old_link
= phydev
->link
;
629 if (intf
->old_duplex
!= phydev
->duplex
) {
631 intf
->old_duplex
= phydev
->duplex
;
634 switch (phydev
->speed
) {
636 cmd_bits
= UMC_CMD_SPEED_2500
;
639 cmd_bits
= UMC_CMD_SPEED_1000
;
642 cmd_bits
= UMC_CMD_SPEED_100
;
645 cmd_bits
= UMC_CMD_SPEED_10
;
650 cmd_bits
<<= UMC_CMD_SPEED_SHIFT
;
652 if (phydev
->duplex
== DUPLEX_HALF
)
653 cmd_bits
|= UMC_CMD_HD_EN
;
655 if (intf
->old_pause
!= phydev
->pause
) {
657 intf
->old_pause
= phydev
->pause
;
661 cmd_bits
|= UMC_CMD_RX_PAUSE_IGNORE
| UMC_CMD_TX_PAUSE_IGNORE
;
667 reg
= umac_rl(intf
, UMC_CMD
);
668 reg
&= ~((UMC_CMD_SPEED_MASK
<< UMC_CMD_SPEED_SHIFT
) |
669 UMC_CMD_HD_EN
| UMC_CMD_RX_PAUSE_IGNORE
|
670 UMC_CMD_TX_PAUSE_IGNORE
);
672 if (reg
& UMC_CMD_SW_RESET
) {
673 reg
&= ~UMC_CMD_SW_RESET
;
674 umac_wl(intf
, reg
, UMC_CMD
);
676 reg
|= UMC_CMD_TX_EN
| UMC_CMD_RX_EN
| UMC_CMD_PROMISC
;
678 umac_wl(intf
, reg
, UMC_CMD
);
680 active
= phy_init_eee(phydev
, 0) >= 0;
681 bcmasp_eee_enable_set(intf
, active
);
684 reg
= rgmii_rl(intf
, RGMII_OOB_CNTRL
);
689 rgmii_wl(intf
, reg
, RGMII_OOB_CNTRL
);
692 phy_print_status(phydev
);
695 static int bcmasp_alloc_buffers(struct bcmasp_intf
*intf
)
697 struct device
*kdev
= &intf
->parent
->pdev
->dev
;
698 struct page
*buffer_pg
;
701 intf
->rx_buf_order
= get_order(RING_BUFFER_SIZE
);
702 buffer_pg
= alloc_pages(GFP_KERNEL
, intf
->rx_buf_order
);
706 intf
->rx_ring_cpu
= page_to_virt(buffer_pg
);
707 intf
->rx_ring_dma
= dma_map_page(kdev
, buffer_pg
, 0, RING_BUFFER_SIZE
,
709 if (dma_mapping_error(kdev
, intf
->rx_ring_dma
))
712 intf
->rx_edpkt_cpu
= dma_alloc_coherent(kdev
, DESC_RING_SIZE
,
713 &intf
->rx_edpkt_dma_addr
, GFP_KERNEL
);
714 if (!intf
->rx_edpkt_cpu
)
715 goto free_rx_buffer_dma
;
718 intf
->tx_spb_cpu
= dma_alloc_coherent(kdev
, DESC_RING_SIZE
,
719 &intf
->tx_spb_dma_addr
, GFP_KERNEL
);
720 if (!intf
->tx_spb_cpu
)
721 goto free_rx_edpkt_dma
;
723 intf
->tx_cbs
= kcalloc(DESC_RING_COUNT
, sizeof(struct bcmasp_tx_cb
),
726 goto free_tx_spb_dma
;
731 dma_free_coherent(kdev
, DESC_RING_SIZE
, intf
->tx_spb_cpu
,
732 intf
->tx_spb_dma_addr
);
734 dma_free_coherent(kdev
, DESC_RING_SIZE
, intf
->rx_edpkt_cpu
,
735 intf
->rx_edpkt_dma_addr
);
737 dma_unmap_page(kdev
, intf
->rx_ring_dma
, RING_BUFFER_SIZE
,
740 __free_pages(buffer_pg
, intf
->rx_buf_order
);
745 static void bcmasp_reclaim_free_buffers(struct bcmasp_intf
*intf
)
747 struct device
*kdev
= &intf
->parent
->pdev
->dev
;
750 dma_free_coherent(kdev
, DESC_RING_SIZE
, intf
->rx_edpkt_cpu
,
751 intf
->rx_edpkt_dma_addr
);
752 dma_unmap_page(kdev
, intf
->rx_ring_dma
, RING_BUFFER_SIZE
,
754 __free_pages(virt_to_page(intf
->rx_ring_cpu
), intf
->rx_buf_order
);
757 dma_free_coherent(kdev
, DESC_RING_SIZE
, intf
->tx_spb_cpu
,
758 intf
->tx_spb_dma_addr
);
762 static void bcmasp_init_rx(struct bcmasp_intf
*intf
)
764 /* Restart from index 0 */
765 intf
->rx_ring_dma_valid
= intf
->rx_ring_dma
+ RING_BUFFER_SIZE
- 1;
766 intf
->rx_edpkt_dma_valid
= intf
->rx_edpkt_dma_addr
+ (DESC_RING_SIZE
- 1);
767 intf
->rx_edpkt_dma_read
= intf
->rx_edpkt_dma_addr
;
768 intf
->rx_edpkt_index
= 0;
770 /* Make sure channels are disabled */
771 rx_edpkt_cfg_wl(intf
, 0x0, RX_EDPKT_CFG_ENABLE
);
774 rx_edpkt_cfg_wq(intf
, intf
->rx_ring_dma
, RX_EDPKT_RING_BUFFER_READ
);
775 rx_edpkt_cfg_wq(intf
, intf
->rx_ring_dma
, RX_EDPKT_RING_BUFFER_WRITE
);
776 rx_edpkt_cfg_wq(intf
, intf
->rx_ring_dma
, RX_EDPKT_RING_BUFFER_BASE
);
777 rx_edpkt_cfg_wq(intf
, intf
->rx_ring_dma_valid
,
778 RX_EDPKT_RING_BUFFER_END
);
779 rx_edpkt_cfg_wq(intf
, intf
->rx_ring_dma_valid
,
780 RX_EDPKT_RING_BUFFER_VALID
);
783 rx_edpkt_cfg_wl(intf
, (RX_EDPKT_CFG_CFG0_RBUF_4K
<<
784 RX_EDPKT_CFG_CFG0_DBUF_SHIFT
) |
785 (RX_EDPKT_CFG_CFG0_64_ALN
<<
786 RX_EDPKT_CFG_CFG0_BALN_SHIFT
) |
787 (RX_EDPKT_CFG_CFG0_EFRM_STUF
),
789 rx_edpkt_dma_wq(intf
, intf
->rx_edpkt_dma_addr
, RX_EDPKT_DMA_WRITE
);
790 rx_edpkt_dma_wq(intf
, intf
->rx_edpkt_dma_addr
, RX_EDPKT_DMA_READ
);
791 rx_edpkt_dma_wq(intf
, intf
->rx_edpkt_dma_addr
, RX_EDPKT_DMA_BASE
);
792 rx_edpkt_dma_wq(intf
, intf
->rx_edpkt_dma_valid
, RX_EDPKT_DMA_END
);
793 rx_edpkt_dma_wq(intf
, intf
->rx_edpkt_dma_valid
, RX_EDPKT_DMA_VALID
);
795 umac2fb_wl(intf
, UMAC2FB_CFG_DEFAULT_EN
| ((intf
->channel
+ 11) <<
796 UMAC2FB_CFG_CHID_SHIFT
) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT
),
801 static void bcmasp_init_tx(struct bcmasp_intf
*intf
)
803 /* Restart from index 0 */
804 intf
->tx_spb_dma_valid
= intf
->tx_spb_dma_addr
+ DESC_RING_SIZE
- 1;
805 intf
->tx_spb_dma_read
= intf
->tx_spb_dma_addr
;
806 intf
->tx_spb_index
= 0;
807 intf
->tx_spb_clean_index
= 0;
808 memset(intf
->tx_cbs
, 0, sizeof(struct bcmasp_tx_cb
) * DESC_RING_COUNT
);
810 /* Make sure channels are disabled */
811 tx_spb_ctrl_wl(intf
, 0x0, TX_SPB_CTRL_ENABLE
);
812 tx_epkt_core_wl(intf
, 0x0, TX_EPKT_C_CFG_MISC
);
815 tx_spb_ctrl_wl(intf
, ((intf
->channel
+ 8) << TX_SPB_CTRL_XF_BID_SHIFT
),
816 TX_SPB_CTRL_XF_CTRL2
);
817 tx_pause_ctrl_wl(intf
, (1 << (intf
->channel
+ 8)), TX_PAUSE_MAP_VECTOR
);
818 tx_spb_top_wl(intf
, 0x1e, TX_SPB_TOP_BLKOUT
);
819 tx_spb_top_wl(intf
, 0x0, TX_SPB_TOP_SPRE_BW_CTRL
);
821 tx_spb_dma_wq(intf
, intf
->tx_spb_dma_addr
, TX_SPB_DMA_READ
);
822 tx_spb_dma_wq(intf
, intf
->tx_spb_dma_addr
, TX_SPB_DMA_BASE
);
823 tx_spb_dma_wq(intf
, intf
->tx_spb_dma_valid
, TX_SPB_DMA_END
);
824 tx_spb_dma_wq(intf
, intf
->tx_spb_dma_valid
, TX_SPB_DMA_VALID
);
827 static void bcmasp_ephy_enable_set(struct bcmasp_intf
*intf
, bool enable
)
829 u32 mask
= RGMII_EPHY_CFG_IDDQ_BIAS
| RGMII_EPHY_CFG_EXT_PWRDOWN
|
830 RGMII_EPHY_CFG_IDDQ_GLOBAL
;
833 reg
= rgmii_rl(intf
, RGMII_EPHY_CNTRL
);
835 reg
&= ~RGMII_EPHY_CK25_DIS
;
836 rgmii_wl(intf
, reg
, RGMII_EPHY_CNTRL
);
840 reg
|= RGMII_EPHY_RESET
;
841 rgmii_wl(intf
, reg
, RGMII_EPHY_CNTRL
);
844 reg
&= ~RGMII_EPHY_RESET
;
846 reg
|= mask
| RGMII_EPHY_RESET
;
847 rgmii_wl(intf
, reg
, RGMII_EPHY_CNTRL
);
849 reg
|= RGMII_EPHY_CK25_DIS
;
851 rgmii_wl(intf
, reg
, RGMII_EPHY_CNTRL
);
854 /* Set or clear the LED control override to avoid lighting up LEDs
855 * while the EPHY is powered off and drawing unnecessary current.
857 reg
= rgmii_rl(intf
, RGMII_SYS_LED_CNTRL
);
859 reg
&= ~RGMII_SYS_LED_CNTRL_LINK_OVRD
;
861 reg
|= RGMII_SYS_LED_CNTRL_LINK_OVRD
;
862 rgmii_wl(intf
, reg
, RGMII_SYS_LED_CNTRL
);
865 static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf
*intf
, bool enable
)
869 reg
= rgmii_rl(intf
, RGMII_OOB_CNTRL
);
870 reg
&= ~RGMII_OOB_DIS
;
872 reg
|= RGMII_MODE_EN
;
874 reg
&= ~RGMII_MODE_EN
;
875 rgmii_wl(intf
, reg
, RGMII_OOB_CNTRL
);
878 static void bcmasp_netif_deinit(struct net_device
*dev
)
880 struct bcmasp_intf
*intf
= netdev_priv(dev
);
881 u32 reg
, timeout
= 1000;
883 napi_disable(&intf
->tx_napi
);
885 bcmasp_enable_tx(intf
, 0);
887 /* Flush any TX packets in the pipe */
888 tx_spb_dma_wl(intf
, TX_SPB_DMA_FIFO_FLUSH
, TX_SPB_DMA_FIFO_CTRL
);
890 reg
= tx_spb_dma_rl(intf
, TX_SPB_DMA_FIFO_STATUS
);
891 if (!(reg
& TX_SPB_DMA_FIFO_FLUSH
))
893 usleep_range(1000, 2000);
894 } while (timeout
-- > 0);
895 tx_spb_dma_wl(intf
, 0x0, TX_SPB_DMA_FIFO_CTRL
);
897 bcmasp_tx_reclaim(intf
);
899 umac_enable_set(intf
, UMC_CMD_TX_EN
, 0);
901 phy_stop(dev
->phydev
);
903 umac_enable_set(intf
, UMC_CMD_RX_EN
, 0);
905 bcmasp_flush_rx_port(intf
);
906 usleep_range(1000, 2000);
907 bcmasp_enable_rx(intf
, 0);
909 napi_disable(&intf
->rx_napi
);
911 /* Disable interrupts */
912 bcmasp_enable_tx_irq(intf
, 0);
913 bcmasp_enable_rx_irq(intf
, 0);
914 bcmasp_enable_phy_irq(intf
, 0);
916 netif_napi_del(&intf
->tx_napi
);
917 netif_napi_del(&intf
->rx_napi
);
920 static int bcmasp_stop(struct net_device
*dev
)
922 struct bcmasp_intf
*intf
= netdev_priv(dev
);
924 netif_dbg(intf
, ifdown
, dev
, "bcmasp stop\n");
926 /* Stop tx from updating HW */
927 netif_tx_disable(dev
);
929 bcmasp_netif_deinit(dev
);
931 bcmasp_reclaim_free_buffers(intf
);
933 phy_disconnect(dev
->phydev
);
935 /* Disable internal EPHY or external PHY */
936 if (intf
->internal_phy
)
937 bcmasp_ephy_enable_set(intf
, false);
939 bcmasp_rgmii_mode_en_set(intf
, false);
941 /* Disable the interface clocks */
942 bcmasp_core_clock_set_intf(intf
, false);
944 clk_disable_unprepare(intf
->parent
->clk
);
949 static void bcmasp_configure_port(struct bcmasp_intf
*intf
)
951 u32 reg
, id_mode_dis
= 0;
953 reg
= rgmii_rl(intf
, RGMII_PORT_CNTRL
);
954 reg
&= ~RGMII_PORT_MODE_MASK
;
956 switch (intf
->phy_interface
) {
957 case PHY_INTERFACE_MODE_RGMII
:
958 /* RGMII_NO_ID: TXC transitions at the same time as TXD
959 * (requires PCB or receiver-side delay)
960 * RGMII: Add 2ns delay on TXC (90 degree shift)
962 * ID is implicitly disabled for 100Mbps (RG)MII operation.
964 id_mode_dis
= RGMII_ID_MODE_DIS
;
966 case PHY_INTERFACE_MODE_RGMII_TXID
:
967 reg
|= RGMII_PORT_MODE_EXT_GPHY
;
969 case PHY_INTERFACE_MODE_MII
:
970 reg
|= RGMII_PORT_MODE_EXT_EPHY
;
976 if (intf
->internal_phy
)
977 reg
|= RGMII_PORT_MODE_EPHY
;
979 rgmii_wl(intf
, reg
, RGMII_PORT_CNTRL
);
981 reg
= rgmii_rl(intf
, RGMII_OOB_CNTRL
);
982 reg
&= ~RGMII_ID_MODE_DIS
;
984 rgmii_wl(intf
, reg
, RGMII_OOB_CNTRL
);
987 static int bcmasp_netif_init(struct net_device
*dev
, bool phy_connect
)
989 struct bcmasp_intf
*intf
= netdev_priv(dev
);
990 phy_interface_t phy_iface
= intf
->phy_interface
;
991 u32 phy_flags
= PHY_BRCM_AUTO_PWRDWN_ENABLE
|
992 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
993 PHY_BRCM_IDDQ_SUSPEND
;
994 struct phy_device
*phydev
= NULL
;
997 /* Always enable interface clocks */
998 bcmasp_core_clock_set_intf(intf
, true);
1000 /* Enable internal PHY or external PHY before any MAC activity */
1001 if (intf
->internal_phy
)
1002 bcmasp_ephy_enable_set(intf
, true);
1004 bcmasp_rgmii_mode_en_set(intf
, true);
1005 bcmasp_configure_port(intf
);
1007 /* This is an ugly quirk but we have not been correctly
1008 * interpreting the phy_interface values and we have done that
1009 * across different drivers, so at least we are consistent in
1012 * When the Generic PHY driver is in use either the PHY has
1013 * been strapped or programmed correctly by the boot loader so
1014 * we should stick to our incorrect interpretation since we
1015 * have validated it.
1017 * Now when a dedicated PHY driver is in use, we need to
1018 * reverse the meaning of the phy_interface_mode values to
1019 * something that the PHY driver will interpret and act on such
1020 * that we have two mistakes canceling themselves so to speak.
1021 * We only do this for the two modes that GENET driver
1022 * officially supports on Broadcom STB chips:
1023 * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID.
1024 * Other modes are not *officially* supported with the boot
1025 * loader and the scripted environment generating Device Tree
1026 * blobs for those platforms.
1028 * Note that internal PHY and fixed-link configurations are not
1029 * affected because they use different phy_interface_t values
1030 * or the Generic PHY driver.
1032 switch (phy_iface
) {
1033 case PHY_INTERFACE_MODE_RGMII
:
1034 phy_iface
= PHY_INTERFACE_MODE_RGMII_ID
;
1036 case PHY_INTERFACE_MODE_RGMII_TXID
:
1037 phy_iface
= PHY_INTERFACE_MODE_RGMII_RXID
;
1044 phydev
= of_phy_connect(dev
, intf
->phy_dn
,
1045 bcmasp_adj_link
, phy_flags
,
1049 netdev_err(dev
, "could not attach to PHY\n");
1050 goto err_phy_disable
;
1053 if (intf
->internal_phy
)
1054 dev
->phydev
->irq
= PHY_MAC_INTERRUPT
;
1056 /* Indicate that the MAC is responsible for PHY PM */
1057 phydev
->mac_managed_pm
= true;
1064 umac_set_hw_addr(intf
, dev
->dev_addr
);
1066 intf
->old_duplex
= -1;
1067 intf
->old_link
= -1;
1068 intf
->old_pause
= -1;
1070 bcmasp_init_tx(intf
);
1071 netif_napi_add_tx(intf
->ndev
, &intf
->tx_napi
, bcmasp_tx_poll
);
1072 bcmasp_enable_tx(intf
, 1);
1074 bcmasp_init_rx(intf
);
1075 netif_napi_add(intf
->ndev
, &intf
->rx_napi
, bcmasp_rx_poll
);
1076 bcmasp_enable_rx(intf
, 1);
1078 intf
->crc_fwd
= !!(umac_rl(intf
, UMC_CMD
) & UMC_CMD_CRC_FWD
);
1080 bcmasp_netif_start(dev
);
1082 netif_start_queue(dev
);
1087 if (intf
->internal_phy
)
1088 bcmasp_ephy_enable_set(intf
, false);
1090 bcmasp_rgmii_mode_en_set(intf
, false);
1094 static int bcmasp_open(struct net_device
*dev
)
1096 struct bcmasp_intf
*intf
= netdev_priv(dev
);
1099 netif_dbg(intf
, ifup
, dev
, "bcmasp open\n");
1101 ret
= bcmasp_alloc_buffers(intf
);
1105 ret
= clk_prepare_enable(intf
->parent
->clk
);
1109 ret
= bcmasp_netif_init(dev
, true);
1111 clk_disable_unprepare(intf
->parent
->clk
);
1118 bcmasp_reclaim_free_buffers(intf
);
1123 static void bcmasp_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
1125 struct bcmasp_intf
*intf
= netdev_priv(dev
);
1127 netif_dbg(intf
, tx_err
, dev
, "transmit timeout!\n");
1128 intf
->mib
.tx_timeout_cnt
++;
1131 static int bcmasp_get_phys_port_name(struct net_device
*dev
,
1132 char *name
, size_t len
)
1134 struct bcmasp_intf
*intf
= netdev_priv(dev
);
1136 if (snprintf(name
, len
, "p%d", intf
->port
) >= len
)
1142 static void bcmasp_get_stats64(struct net_device
*dev
,
1143 struct rtnl_link_stats64
*stats
)
1145 struct bcmasp_intf
*intf
= netdev_priv(dev
);
1146 struct bcmasp_intf_stats64
*lstats
;
1149 lstats
= &intf
->stats64
;
1152 start
= u64_stats_fetch_begin(&lstats
->syncp
);
1153 stats
->rx_packets
= u64_stats_read(&lstats
->rx_packets
);
1154 stats
->rx_bytes
= u64_stats_read(&lstats
->rx_bytes
);
1155 stats
->rx_dropped
= u64_stats_read(&lstats
->rx_dropped
);
1156 stats
->rx_crc_errors
= u64_stats_read(&lstats
->rx_crc_errs
);
1157 stats
->rx_frame_errors
= u64_stats_read(&lstats
->rx_sym_errs
);
1158 stats
->rx_errors
= stats
->rx_crc_errors
+ stats
->rx_frame_errors
;
1160 stats
->tx_packets
= u64_stats_read(&lstats
->tx_packets
);
1161 stats
->tx_bytes
= u64_stats_read(&lstats
->tx_bytes
);
1162 } while (u64_stats_fetch_retry(&lstats
->syncp
, start
));
1165 static const struct net_device_ops bcmasp_netdev_ops
= {
1166 .ndo_open
= bcmasp_open
,
1167 .ndo_stop
= bcmasp_stop
,
1168 .ndo_start_xmit
= bcmasp_xmit
,
1169 .ndo_tx_timeout
= bcmasp_tx_timeout
,
1170 .ndo_set_rx_mode
= bcmasp_set_rx_mode
,
1171 .ndo_get_phys_port_name
= bcmasp_get_phys_port_name
,
1172 .ndo_eth_ioctl
= phy_do_ioctl_running
,
1173 .ndo_set_mac_address
= eth_mac_addr
,
1174 .ndo_get_stats64
= bcmasp_get_stats64
,
1177 static void bcmasp_map_res(struct bcmasp_priv
*priv
, struct bcmasp_intf
*intf
)
1180 intf
->res
.umac
= priv
->base
+ UMC_OFFSET(intf
);
1181 intf
->res
.umac2fb
= priv
->base
+ (priv
->hw_info
->umac2fb
+
1182 (intf
->port
* 0x4));
1183 intf
->res
.rgmii
= priv
->base
+ RGMII_OFFSET(intf
);
1186 intf
->tx_spb_dma
= priv
->base
+ TX_SPB_DMA_OFFSET(intf
);
1187 intf
->res
.tx_spb_ctrl
= priv
->base
+ TX_SPB_CTRL_OFFSET(intf
);
1188 intf
->res
.tx_spb_top
= priv
->base
+ TX_SPB_TOP_OFFSET(intf
);
1189 intf
->res
.tx_epkt_core
= priv
->base
+ TX_EPKT_C_OFFSET(intf
);
1190 intf
->res
.tx_pause_ctrl
= priv
->base
+ TX_PAUSE_CTRL_OFFSET(intf
);
1192 intf
->rx_edpkt_dma
= priv
->base
+ RX_EDPKT_DMA_OFFSET(intf
);
1193 intf
->rx_edpkt_cfg
= priv
->base
+ RX_EDPKT_CFG_OFFSET(intf
);
1196 #define MAX_IRQ_STR_LEN 64
1197 struct bcmasp_intf
*bcmasp_interface_create(struct bcmasp_priv
*priv
,
1198 struct device_node
*ndev_dn
, int i
)
1200 struct device
*dev
= &priv
->pdev
->dev
;
1201 struct bcmasp_intf
*intf
;
1202 struct net_device
*ndev
;
1205 if (of_property_read_u32(ndev_dn
, "reg", &port
)) {
1206 dev_warn(dev
, "%s: invalid port number\n", ndev_dn
->name
);
1210 if (of_property_read_u32(ndev_dn
, "brcm,channel", &ch
)) {
1211 dev_warn(dev
, "%s: invalid ch number\n", ndev_dn
->name
);
1215 ndev
= alloc_etherdev(sizeof(struct bcmasp_intf
));
1217 dev_warn(dev
, "%s: unable to alloc ndev\n", ndev_dn
->name
);
1220 intf
= netdev_priv(ndev
);
1222 intf
->parent
= priv
;
1226 intf
->ndev_dn
= ndev_dn
;
1229 ret
= of_get_phy_mode(ndev_dn
, &intf
->phy_interface
);
1231 dev_err(dev
, "invalid PHY mode property\n");
1232 goto err_free_netdev
;
1235 if (intf
->phy_interface
== PHY_INTERFACE_MODE_INTERNAL
)
1236 intf
->internal_phy
= true;
1238 intf
->phy_dn
= of_parse_phandle(ndev_dn
, "phy-handle", 0);
1239 if (!intf
->phy_dn
&& of_phy_is_fixed_link(ndev_dn
)) {
1240 ret
= of_phy_register_fixed_link(ndev_dn
);
1242 dev_warn(dev
, "%s: failed to register fixed PHY\n",
1244 goto err_free_netdev
;
1246 intf
->phy_dn
= ndev_dn
;
1250 bcmasp_map_res(priv
, intf
);
1252 if ((!phy_interface_mode_is_rgmii(intf
->phy_interface
) &&
1253 intf
->phy_interface
!= PHY_INTERFACE_MODE_MII
&&
1254 intf
->phy_interface
!= PHY_INTERFACE_MODE_INTERNAL
) ||
1255 (intf
->port
!= 1 && intf
->internal_phy
)) {
1256 netdev_err(intf
->ndev
, "invalid PHY mode: %s for port %d\n",
1257 phy_modes(intf
->phy_interface
), intf
->port
);
1259 goto err_free_netdev
;
1262 ret
= of_get_ethdev_address(ndev_dn
, ndev
);
1264 netdev_warn(ndev
, "using random Ethernet MAC\n");
1265 eth_hw_addr_random(ndev
);
1268 SET_NETDEV_DEV(ndev
, dev
);
1269 intf
->ops
= &bcmasp_intf_ops
;
1270 ndev
->netdev_ops
= &bcmasp_netdev_ops
;
1271 ndev
->ethtool_ops
= &bcmasp_ethtool_ops
;
1272 intf
->msg_enable
= netif_msg_init(-1, NETIF_MSG_DRV
|
1275 ndev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_SG
|
1277 ndev
->hw_features
|= ndev
->features
;
1278 ndev
->needed_headroom
+= sizeof(struct bcmasp_pkt_offload
);
1288 void bcmasp_interface_destroy(struct bcmasp_intf
*intf
)
1290 if (intf
->ndev
->reg_state
== NETREG_REGISTERED
)
1291 unregister_netdev(intf
->ndev
);
1292 if (of_phy_is_fixed_link(intf
->ndev_dn
))
1293 of_phy_deregister_fixed_link(intf
->ndev_dn
);
1294 free_netdev(intf
->ndev
);
1297 static void bcmasp_suspend_to_wol(struct bcmasp_intf
*intf
)
1299 struct net_device
*ndev
= intf
->ndev
;
1302 reg
= umac_rl(intf
, UMC_MPD_CTRL
);
1303 if (intf
->wolopts
& (WAKE_MAGIC
| WAKE_MAGICSECURE
))
1304 reg
|= UMC_MPD_CTRL_MPD_EN
;
1305 reg
&= ~UMC_MPD_CTRL_PSW_EN
;
1306 if (intf
->wolopts
& WAKE_MAGICSECURE
) {
1307 /* Program the SecureOn password */
1308 umac_wl(intf
, get_unaligned_be16(&intf
->sopass
[0]),
1310 umac_wl(intf
, get_unaligned_be32(&intf
->sopass
[2]),
1312 reg
|= UMC_MPD_CTRL_PSW_EN
;
1314 umac_wl(intf
, reg
, UMC_MPD_CTRL
);
1316 if (intf
->wolopts
& WAKE_FILTER
)
1317 bcmasp_netfilt_suspend(intf
);
1319 /* Bring UniMAC out of reset if needed and enable RX */
1320 reg
= umac_rl(intf
, UMC_CMD
);
1321 if (reg
& UMC_CMD_SW_RESET
)
1322 reg
&= ~UMC_CMD_SW_RESET
;
1324 reg
|= UMC_CMD_RX_EN
| UMC_CMD_PROMISC
;
1325 umac_wl(intf
, reg
, UMC_CMD
);
1327 umac_enable_set(intf
, UMC_CMD_RX_EN
, 1);
1329 if (intf
->parent
->wol_irq
> 0) {
1330 wakeup_intr2_core_wl(intf
->parent
, 0xffffffff,
1331 ASP_WAKEUP_INTR2_MASK_CLEAR
);
1334 if (intf
->eee
.eee_enabled
&& intf
->parent
->eee_fixup
)
1335 intf
->parent
->eee_fixup(intf
, true);
1337 netif_dbg(intf
, wol
, ndev
, "entered WOL mode\n");
1340 int bcmasp_interface_suspend(struct bcmasp_intf
*intf
)
1342 struct device
*kdev
= &intf
->parent
->pdev
->dev
;
1343 struct net_device
*dev
= intf
->ndev
;
1345 if (!netif_running(dev
))
1348 netif_device_detach(dev
);
1350 bcmasp_netif_deinit(dev
);
1352 if (!intf
->wolopts
) {
1353 if (intf
->internal_phy
)
1354 bcmasp_ephy_enable_set(intf
, false);
1356 bcmasp_rgmii_mode_en_set(intf
, false);
1358 /* If Wake-on-LAN is disabled, we can safely
1359 * disable the network interface clocks.
1361 bcmasp_core_clock_set_intf(intf
, false);
1364 if (device_may_wakeup(kdev
) && intf
->wolopts
)
1365 bcmasp_suspend_to_wol(intf
);
1367 clk_disable_unprepare(intf
->parent
->clk
);
1372 static void bcmasp_resume_from_wol(struct bcmasp_intf
*intf
)
1376 if (intf
->eee
.eee_enabled
&& intf
->parent
->eee_fixup
)
1377 intf
->parent
->eee_fixup(intf
, false);
1379 reg
= umac_rl(intf
, UMC_MPD_CTRL
);
1380 reg
&= ~UMC_MPD_CTRL_MPD_EN
;
1381 umac_wl(intf
, reg
, UMC_MPD_CTRL
);
1383 if (intf
->parent
->wol_irq
> 0) {
1384 wakeup_intr2_core_wl(intf
->parent
, 0xffffffff,
1385 ASP_WAKEUP_INTR2_MASK_SET
);
1389 int bcmasp_interface_resume(struct bcmasp_intf
*intf
)
1391 struct net_device
*dev
= intf
->ndev
;
1394 if (!netif_running(dev
))
1397 ret
= clk_prepare_enable(intf
->parent
->clk
);
1401 ret
= bcmasp_netif_init(dev
, false);
1405 bcmasp_resume_from_wol(intf
);
1407 if (intf
->eee
.eee_enabled
)
1408 bcmasp_eee_enable_set(intf
, true);
1410 netif_device_attach(dev
);
1415 clk_disable_unprepare(intf
->parent
->clk
);