1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl>
6 #include <linux/delay.h>
7 #include <linux/etherdevice.h>
8 #include <linux/if_vlan.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
12 #include <linux/of_net.h>
13 #include <linux/platform_device.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
17 #include "bcm4908_enet.h"
20 #define ENET_DMA_CH_RX_CFG ENET_DMA_CH0_CFG
21 #define ENET_DMA_CH_TX_CFG ENET_DMA_CH1_CFG
22 #define ENET_DMA_CH_RX_STATE_RAM ENET_DMA_CH0_STATE_RAM
23 #define ENET_DMA_CH_TX_STATE_RAM ENET_DMA_CH1_STATE_RAM
25 #define ENET_TX_BDS_NUM 200
26 #define ENET_RX_BDS_NUM 200
27 #define ENET_RX_BDS_NUM_MAX 8192
29 #define ENET_DMA_INT_DEFAULTS (ENET_DMA_CH_CFG_INT_DONE | \
30 ENET_DMA_CH_CFG_INT_NO_DESC | \
31 ENET_DMA_CH_CFG_INT_BUFF_DONE)
32 #define ENET_DMA_MAX_BURST_LEN 8 /* in 64 bit words */
34 #define ENET_MTU_MAX ETH_DATA_LEN /* Is it possible to support 2044? */
35 #define BRCM_MAX_TAG_LEN 6
36 #define ENET_MAX_ETH_OVERHEAD (ETH_HLEN + BRCM_MAX_TAG_LEN + VLAN_HLEN + \
37 ETH_FCS_LEN + 4) /* 32 */
39 #define ENET_RX_SKB_BUF_SIZE (NET_SKB_PAD + NET_IP_ALIGN + \
40 ETH_HLEN + BRCM_MAX_TAG_LEN + VLAN_HLEN + \
41 ENET_MTU_MAX + ETH_FCS_LEN + 4)
42 #define ENET_RX_SKB_BUF_ALLOC_SIZE (SKB_DATA_ALIGN(ENET_RX_SKB_BUF_SIZE) + \
43 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
44 #define ENET_RX_BUF_DMA_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
45 #define ENET_RX_BUF_DMA_SIZE (ENET_RX_SKB_BUF_SIZE - ENET_RX_BUF_DMA_OFFSET)
47 struct bcm4908_enet_dma_ring_bd
{
52 struct bcm4908_enet_dma_ring_slot
{
55 struct sk_buff
*skb
; /* TX */
61 struct bcm4908_enet_dma_ring
{
68 struct napi_struct napi
;
72 struct bcm4908_enet_dma_ring_bd
*buf_desc
;
76 struct bcm4908_enet_dma_ring_slot
*slots
;
81 struct net_device
*netdev
;
85 struct bcm4908_enet_dma_ring tx_ring
;
86 struct bcm4908_enet_dma_ring rx_ring
;
93 static u32
enet_read(struct bcm4908_enet
*enet
, u16 offset
)
95 return readl(enet
->base
+ offset
);
98 static void enet_write(struct bcm4908_enet
*enet
, u16 offset
, u32 value
)
100 writel(value
, enet
->base
+ offset
);
103 static void enet_maskset(struct bcm4908_enet
*enet
, u16 offset
, u32 mask
, u32 set
)
107 WARN_ON(set
& ~mask
);
109 val
= enet_read(enet
, offset
);
110 val
= (val
& ~mask
) | (set
& mask
);
111 enet_write(enet
, offset
, val
);
114 static void enet_set(struct bcm4908_enet
*enet
, u16 offset
, u32 set
)
116 enet_maskset(enet
, offset
, set
, set
);
119 static u32
enet_umac_read(struct bcm4908_enet
*enet
, u16 offset
)
121 return enet_read(enet
, ENET_UNIMAC
+ offset
);
124 static void enet_umac_write(struct bcm4908_enet
*enet
, u16 offset
, u32 value
)
126 enet_write(enet
, ENET_UNIMAC
+ offset
, value
);
129 static void enet_umac_set(struct bcm4908_enet
*enet
, u16 offset
, u32 set
)
131 enet_set(enet
, ENET_UNIMAC
+ offset
, set
);
138 static void bcm4908_enet_set_mtu(struct bcm4908_enet
*enet
, int mtu
)
140 enet_umac_write(enet
, UMAC_MAX_FRAME_LEN
, mtu
+ ENET_MAX_ETH_OVERHEAD
);
147 static void bcm4908_enet_dma_ring_intrs_on(struct bcm4908_enet
*enet
,
148 struct bcm4908_enet_dma_ring
*ring
)
150 enet_write(enet
, ring
->cfg_block
+ ENET_DMA_CH_CFG_INT_MASK
, ENET_DMA_INT_DEFAULTS
);
153 static void bcm4908_enet_dma_ring_intrs_off(struct bcm4908_enet
*enet
,
154 struct bcm4908_enet_dma_ring
*ring
)
156 enet_write(enet
, ring
->cfg_block
+ ENET_DMA_CH_CFG_INT_MASK
, 0);
159 static void bcm4908_enet_dma_ring_intrs_ack(struct bcm4908_enet
*enet
,
160 struct bcm4908_enet_dma_ring
*ring
)
162 enet_write(enet
, ring
->cfg_block
+ ENET_DMA_CH_CFG_INT_STAT
, ENET_DMA_INT_DEFAULTS
);
169 static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet
*enet
,
170 struct bcm4908_enet_dma_ring
*ring
)
172 int size
= ring
->length
* sizeof(struct bcm4908_enet_dma_ring_bd
);
173 struct device
*dev
= enet
->dev
;
175 ring
->cpu_addr
= dma_alloc_coherent(dev
, size
, &ring
->dma_addr
, GFP_KERNEL
);
179 if (((uintptr_t)ring
->cpu_addr
) & (0x40 - 1)) {
180 dev_err(dev
, "Invalid DMA ring alignment\n");
181 goto err_free_buf_descs
;
184 ring
->slots
= kcalloc(ring
->length
, sizeof(*ring
->slots
), GFP_KERNEL
);
186 goto err_free_buf_descs
;
191 dma_free_coherent(dev
, size
, ring
->cpu_addr
, ring
->dma_addr
);
192 ring
->cpu_addr
= NULL
;
196 static void bcm4908_enet_dma_free(struct bcm4908_enet
*enet
)
198 struct bcm4908_enet_dma_ring
*tx_ring
= &enet
->tx_ring
;
199 struct bcm4908_enet_dma_ring
*rx_ring
= &enet
->rx_ring
;
200 struct device
*dev
= enet
->dev
;
203 size
= rx_ring
->length
* sizeof(struct bcm4908_enet_dma_ring_bd
);
204 if (rx_ring
->cpu_addr
)
205 dma_free_coherent(dev
, size
, rx_ring
->cpu_addr
, rx_ring
->dma_addr
);
206 kfree(rx_ring
->slots
);
208 size
= tx_ring
->length
* sizeof(struct bcm4908_enet_dma_ring_bd
);
209 if (tx_ring
->cpu_addr
)
210 dma_free_coherent(dev
, size
, tx_ring
->cpu_addr
, tx_ring
->dma_addr
);
211 kfree(tx_ring
->slots
);
214 static int bcm4908_enet_dma_alloc(struct bcm4908_enet
*enet
)
216 struct bcm4908_enet_dma_ring
*tx_ring
= &enet
->tx_ring
;
217 struct bcm4908_enet_dma_ring
*rx_ring
= &enet
->rx_ring
;
218 struct device
*dev
= enet
->dev
;
221 tx_ring
->length
= ENET_TX_BDS_NUM
;
223 tx_ring
->cfg_block
= ENET_DMA_CH_TX_CFG
;
224 tx_ring
->st_ram_block
= ENET_DMA_CH_TX_STATE_RAM
;
225 err
= bcm4908_dma_alloc_buf_descs(enet
, tx_ring
);
227 dev_err(dev
, "Failed to alloc TX buf descriptors: %d\n", err
);
231 rx_ring
->length
= ENET_RX_BDS_NUM
;
233 rx_ring
->cfg_block
= ENET_DMA_CH_RX_CFG
;
234 rx_ring
->st_ram_block
= ENET_DMA_CH_RX_STATE_RAM
;
235 err
= bcm4908_dma_alloc_buf_descs(enet
, rx_ring
);
237 dev_err(dev
, "Failed to alloc RX buf descriptors: %d\n", err
);
238 bcm4908_enet_dma_free(enet
);
245 static void bcm4908_enet_dma_reset(struct bcm4908_enet
*enet
)
247 struct bcm4908_enet_dma_ring
*rings
[] = { &enet
->rx_ring
, &enet
->tx_ring
};
250 /* Disable the DMA controller and channel */
251 for (i
= 0; i
< ARRAY_SIZE(rings
); i
++)
252 enet_write(enet
, rings
[i
]->cfg_block
+ ENET_DMA_CH_CFG
, 0);
253 enet_maskset(enet
, ENET_DMA_CONTROLLER_CFG
, ENET_DMA_CTRL_CFG_MASTER_EN
, 0);
255 /* Reset channels state */
256 for (i
= 0; i
< ARRAY_SIZE(rings
); i
++) {
257 struct bcm4908_enet_dma_ring
*ring
= rings
[i
];
259 enet_write(enet
, ring
->st_ram_block
+ ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR
, 0);
260 enet_write(enet
, ring
->st_ram_block
+ ENET_DMA_CH_STATE_RAM_STATE_DATA
, 0);
261 enet_write(enet
, ring
->st_ram_block
+ ENET_DMA_CH_STATE_RAM_DESC_LEN_STATUS
, 0);
262 enet_write(enet
, ring
->st_ram_block
+ ENET_DMA_CH_STATE_RAM_DESC_BASE_BUFPTR
, 0);
266 static int bcm4908_enet_dma_alloc_rx_buf(struct bcm4908_enet
*enet
, unsigned int idx
)
268 struct bcm4908_enet_dma_ring_bd
*buf_desc
= &enet
->rx_ring
.buf_desc
[idx
];
269 struct bcm4908_enet_dma_ring_slot
*slot
= &enet
->rx_ring
.slots
[idx
];
270 struct device
*dev
= enet
->dev
;
274 slot
->buf
= napi_alloc_frag(ENET_RX_SKB_BUF_ALLOC_SIZE
);
278 slot
->dma_addr
= dma_map_single(dev
, slot
->buf
+ ENET_RX_BUF_DMA_OFFSET
,
279 ENET_RX_BUF_DMA_SIZE
, DMA_FROM_DEVICE
);
280 err
= dma_mapping_error(dev
, slot
->dma_addr
);
282 dev_err(dev
, "Failed to map DMA buffer: %d\n", err
);
283 skb_free_frag(slot
->buf
);
288 tmp
= ENET_RX_BUF_DMA_SIZE
<< DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT
;
289 tmp
|= DMA_CTL_STATUS_OWN
;
290 if (idx
== enet
->rx_ring
.length
- 1)
291 tmp
|= DMA_CTL_STATUS_WRAP
;
292 buf_desc
->ctl
= cpu_to_le32(tmp
);
293 buf_desc
->addr
= cpu_to_le32(slot
->dma_addr
);
298 static void bcm4908_enet_dma_ring_init(struct bcm4908_enet
*enet
,
299 struct bcm4908_enet_dma_ring
*ring
)
301 int reset_channel
= 0; /* We support only 1 main channel (with TX and RX) */
302 int reset_subch
= ring
->is_tx
? 1 : 0;
304 /* Reset the DMA channel */
305 enet_write(enet
, ENET_DMA_CTRL_CHANNEL_RESET
, BIT(reset_channel
* 2 + reset_subch
));
306 enet_write(enet
, ENET_DMA_CTRL_CHANNEL_RESET
, 0);
308 enet_write(enet
, ring
->cfg_block
+ ENET_DMA_CH_CFG
, 0);
309 enet_write(enet
, ring
->cfg_block
+ ENET_DMA_CH_CFG_MAX_BURST
, ENET_DMA_MAX_BURST_LEN
);
310 enet_write(enet
, ring
->cfg_block
+ ENET_DMA_CH_CFG_INT_MASK
, 0);
312 enet_write(enet
, ring
->st_ram_block
+ ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR
,
313 (uint32_t)ring
->dma_addr
);
319 static void bcm4908_enet_dma_uninit(struct bcm4908_enet
*enet
)
321 struct bcm4908_enet_dma_ring
*rx_ring
= &enet
->rx_ring
;
322 struct bcm4908_enet_dma_ring_slot
*slot
;
323 struct device
*dev
= enet
->dev
;
326 for (i
= rx_ring
->length
- 1; i
>= 0; i
--) {
327 slot
= &rx_ring
->slots
[i
];
330 dma_unmap_single(dev
, slot
->dma_addr
, slot
->len
, DMA_FROM_DEVICE
);
331 skb_free_frag(slot
->buf
);
336 static int bcm4908_enet_dma_init(struct bcm4908_enet
*enet
)
338 struct bcm4908_enet_dma_ring
*rx_ring
= &enet
->rx_ring
;
339 struct device
*dev
= enet
->dev
;
343 for (i
= 0; i
< rx_ring
->length
; i
++) {
344 err
= bcm4908_enet_dma_alloc_rx_buf(enet
, i
);
346 dev_err(dev
, "Failed to alloc RX buffer: %d\n", err
);
347 bcm4908_enet_dma_uninit(enet
);
352 bcm4908_enet_dma_ring_init(enet
, &enet
->tx_ring
);
353 bcm4908_enet_dma_ring_init(enet
, &enet
->rx_ring
);
358 static void bcm4908_enet_dma_tx_ring_enable(struct bcm4908_enet
*enet
,
359 struct bcm4908_enet_dma_ring
*ring
)
361 enet_write(enet
, ring
->cfg_block
+ ENET_DMA_CH_CFG
, ENET_DMA_CH_CFG_ENABLE
);
364 static void bcm4908_enet_dma_tx_ring_disable(struct bcm4908_enet
*enet
,
365 struct bcm4908_enet_dma_ring
*ring
)
367 enet_write(enet
, ring
->cfg_block
+ ENET_DMA_CH_CFG
, 0);
370 static void bcm4908_enet_dma_rx_ring_enable(struct bcm4908_enet
*enet
,
371 struct bcm4908_enet_dma_ring
*ring
)
373 enet_set(enet
, ring
->cfg_block
+ ENET_DMA_CH_CFG
, ENET_DMA_CH_CFG_ENABLE
);
376 static void bcm4908_enet_dma_rx_ring_disable(struct bcm4908_enet
*enet
,
377 struct bcm4908_enet_dma_ring
*ring
)
379 unsigned long deadline
;
382 enet_maskset(enet
, ring
->cfg_block
+ ENET_DMA_CH_CFG
, ENET_DMA_CH_CFG_ENABLE
, 0);
384 deadline
= jiffies
+ usecs_to_jiffies(2000);
386 tmp
= enet_read(enet
, ring
->cfg_block
+ ENET_DMA_CH_CFG
);
387 if (!(tmp
& ENET_DMA_CH_CFG_ENABLE
))
389 enet_maskset(enet
, ring
->cfg_block
+ ENET_DMA_CH_CFG
, ENET_DMA_CH_CFG_ENABLE
, 0);
390 usleep_range(10, 30);
391 } while (!time_after_eq(jiffies
, deadline
));
393 dev_warn(enet
->dev
, "Timeout waiting for DMA TX stop\n");
400 static void bcm4908_enet_gmac_init(struct bcm4908_enet
*enet
)
404 bcm4908_enet_set_mtu(enet
, enet
->netdev
->mtu
);
406 cmd
= enet_umac_read(enet
, UMAC_CMD
);
407 enet_umac_write(enet
, UMAC_CMD
, cmd
| CMD_SW_RESET
);
408 enet_umac_write(enet
, UMAC_CMD
, cmd
& ~CMD_SW_RESET
);
410 enet_set(enet
, ENET_FLUSH
, ENET_FLUSH_RXFIFO_FLUSH
| ENET_FLUSH_TXFIFO_FLUSH
);
411 enet_maskset(enet
, ENET_FLUSH
, ENET_FLUSH_RXFIFO_FLUSH
| ENET_FLUSH_TXFIFO_FLUSH
, 0);
413 enet_set(enet
, ENET_MIB_CTRL
, ENET_MIB_CTRL_CLR_MIB
);
414 enet_maskset(enet
, ENET_MIB_CTRL
, ENET_MIB_CTRL_CLR_MIB
, 0);
416 cmd
= enet_umac_read(enet
, UMAC_CMD
);
417 cmd
&= ~(CMD_SPEED_MASK
<< CMD_SPEED_SHIFT
);
420 cmd
|= CMD_SPEED_1000
<< CMD_SPEED_SHIFT
;
421 enet_umac_write(enet
, UMAC_CMD
, cmd
);
423 enet_maskset(enet
, ENET_GMAC_STATUS
,
424 ENET_GMAC_STATUS_ETH_SPEED_MASK
|
425 ENET_GMAC_STATUS_HD
|
426 ENET_GMAC_STATUS_AUTO_CFG_EN
|
427 ENET_GMAC_STATUS_LINK_UP
,
428 ENET_GMAC_STATUS_ETH_SPEED_1000
|
429 ENET_GMAC_STATUS_AUTO_CFG_EN
|
430 ENET_GMAC_STATUS_LINK_UP
);
433 static irqreturn_t
bcm4908_enet_irq_handler(int irq
, void *dev_id
)
435 struct bcm4908_enet
*enet
= dev_id
;
436 struct bcm4908_enet_dma_ring
*ring
;
438 ring
= (irq
== enet
->irq_tx
) ? &enet
->tx_ring
: &enet
->rx_ring
;
440 bcm4908_enet_dma_ring_intrs_off(enet
, ring
);
441 bcm4908_enet_dma_ring_intrs_ack(enet
, ring
);
443 napi_schedule(&ring
->napi
);
448 static int bcm4908_enet_open(struct net_device
*netdev
)
450 struct bcm4908_enet
*enet
= netdev_priv(netdev
);
451 struct bcm4908_enet_dma_ring
*tx_ring
= &enet
->tx_ring
;
452 struct bcm4908_enet_dma_ring
*rx_ring
= &enet
->rx_ring
;
453 struct device
*dev
= enet
->dev
;
456 err
= request_irq(netdev
->irq
, bcm4908_enet_irq_handler
, 0, "enet", enet
);
458 dev_err(dev
, "Failed to request IRQ %d: %d\n", netdev
->irq
, err
);
462 if (enet
->irq_tx
> 0) {
463 err
= request_irq(enet
->irq_tx
, bcm4908_enet_irq_handler
, 0,
466 dev_err(dev
, "Failed to request IRQ %d: %d\n",
468 free_irq(netdev
->irq
, enet
);
473 bcm4908_enet_gmac_init(enet
);
474 bcm4908_enet_dma_reset(enet
);
475 bcm4908_enet_dma_init(enet
);
477 enet_umac_set(enet
, UMAC_CMD
, CMD_TX_EN
| CMD_RX_EN
);
479 enet_set(enet
, ENET_DMA_CONTROLLER_CFG
, ENET_DMA_CTRL_CFG_MASTER_EN
);
480 enet_maskset(enet
, ENET_DMA_CONTROLLER_CFG
, ENET_DMA_CTRL_CFG_FLOWC_CH1_EN
, 0);
482 if (enet
->irq_tx
> 0) {
483 napi_enable(&tx_ring
->napi
);
484 bcm4908_enet_dma_ring_intrs_ack(enet
, tx_ring
);
485 bcm4908_enet_dma_ring_intrs_on(enet
, tx_ring
);
488 bcm4908_enet_dma_rx_ring_enable(enet
, rx_ring
);
489 napi_enable(&rx_ring
->napi
);
490 netif_carrier_on(netdev
);
491 netif_start_queue(netdev
);
492 bcm4908_enet_dma_ring_intrs_ack(enet
, rx_ring
);
493 bcm4908_enet_dma_ring_intrs_on(enet
, rx_ring
);
498 static int bcm4908_enet_stop(struct net_device
*netdev
)
500 struct bcm4908_enet
*enet
= netdev_priv(netdev
);
501 struct bcm4908_enet_dma_ring
*tx_ring
= &enet
->tx_ring
;
502 struct bcm4908_enet_dma_ring
*rx_ring
= &enet
->rx_ring
;
504 netif_stop_queue(netdev
);
505 netif_carrier_off(netdev
);
506 napi_disable(&rx_ring
->napi
);
507 napi_disable(&tx_ring
->napi
);
508 netdev_reset_queue(netdev
);
510 bcm4908_enet_dma_rx_ring_disable(enet
, &enet
->rx_ring
);
511 bcm4908_enet_dma_tx_ring_disable(enet
, &enet
->tx_ring
);
513 bcm4908_enet_dma_uninit(enet
);
515 free_irq(enet
->irq_tx
, enet
);
516 free_irq(enet
->netdev
->irq
, enet
);
521 static netdev_tx_t
bcm4908_enet_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
523 struct bcm4908_enet
*enet
= netdev_priv(netdev
);
524 struct bcm4908_enet_dma_ring
*ring
= &enet
->tx_ring
;
525 struct bcm4908_enet_dma_ring_slot
*slot
;
526 struct device
*dev
= enet
->dev
;
527 struct bcm4908_enet_dma_ring_bd
*buf_desc
;
531 /* Free transmitted skbs */
532 if (enet
->irq_tx
< 0 &&
533 !(le32_to_cpu(ring
->buf_desc
[ring
->read_idx
].ctl
) & DMA_CTL_STATUS_OWN
))
534 napi_schedule(&enet
->tx_ring
.napi
);
536 /* Don't use the last empty buf descriptor */
537 if (ring
->read_idx
<= ring
->write_idx
)
538 free_buf_descs
= ring
->read_idx
- ring
->write_idx
+ ring
->length
;
540 free_buf_descs
= ring
->read_idx
- ring
->write_idx
;
541 if (free_buf_descs
< 2) {
542 netif_stop_queue(netdev
);
543 return NETDEV_TX_BUSY
;
546 /* Hardware removes OWN bit after sending data */
547 buf_desc
= &ring
->buf_desc
[ring
->write_idx
];
548 if (unlikely(le32_to_cpu(buf_desc
->ctl
) & DMA_CTL_STATUS_OWN
)) {
549 netif_stop_queue(netdev
);
550 return NETDEV_TX_BUSY
;
553 slot
= &ring
->slots
[ring
->write_idx
];
555 slot
->len
= skb
->len
;
556 slot
->dma_addr
= dma_map_single(dev
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
557 if (unlikely(dma_mapping_error(dev
, slot
->dma_addr
)))
558 return NETDEV_TX_BUSY
;
560 tmp
= skb
->len
<< DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT
;
561 tmp
|= DMA_CTL_STATUS_OWN
;
562 tmp
|= DMA_CTL_STATUS_SOP
;
563 tmp
|= DMA_CTL_STATUS_EOP
;
564 tmp
|= DMA_CTL_STATUS_APPEND_CRC
;
565 if (ring
->write_idx
+ 1 == ring
->length
- 1)
566 tmp
|= DMA_CTL_STATUS_WRAP
;
568 netdev_sent_queue(enet
->netdev
, skb
->len
);
570 buf_desc
->addr
= cpu_to_le32((uint32_t)slot
->dma_addr
);
571 buf_desc
->ctl
= cpu_to_le32(tmp
);
573 bcm4908_enet_dma_tx_ring_enable(enet
, &enet
->tx_ring
);
575 if (++ring
->write_idx
== ring
->length
- 1)
581 static int bcm4908_enet_poll_rx(struct napi_struct
*napi
, int weight
)
583 struct bcm4908_enet_dma_ring
*rx_ring
= container_of(napi
, struct bcm4908_enet_dma_ring
, napi
);
584 struct bcm4908_enet
*enet
= container_of(rx_ring
, struct bcm4908_enet
, rx_ring
);
585 struct device
*dev
= enet
->dev
;
588 while (handled
< weight
) {
589 struct bcm4908_enet_dma_ring_bd
*buf_desc
;
590 struct bcm4908_enet_dma_ring_slot slot
;
596 buf_desc
= &enet
->rx_ring
.buf_desc
[enet
->rx_ring
.read_idx
];
597 ctl
= le32_to_cpu(buf_desc
->ctl
);
598 if (ctl
& DMA_CTL_STATUS_OWN
)
601 slot
= enet
->rx_ring
.slots
[enet
->rx_ring
.read_idx
];
603 /* Provide new buffer before unpinning the old one */
604 err
= bcm4908_enet_dma_alloc_rx_buf(enet
, enet
->rx_ring
.read_idx
);
608 if (++enet
->rx_ring
.read_idx
== enet
->rx_ring
.length
)
609 enet
->rx_ring
.read_idx
= 0;
611 len
= (ctl
& DMA_CTL_LEN_DESC_BUFLENGTH
) >> DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT
;
613 if (len
< ETH_ZLEN
||
614 (ctl
& (DMA_CTL_STATUS_SOP
| DMA_CTL_STATUS_EOP
)) != (DMA_CTL_STATUS_SOP
| DMA_CTL_STATUS_EOP
)) {
615 skb_free_frag(slot
.buf
);
616 enet
->netdev
->stats
.rx_dropped
++;
620 dma_unmap_single(dev
, slot
.dma_addr
, ENET_RX_BUF_DMA_SIZE
, DMA_FROM_DEVICE
);
622 skb
= build_skb(slot
.buf
, ENET_RX_SKB_BUF_ALLOC_SIZE
);
623 if (unlikely(!skb
)) {
624 skb_free_frag(slot
.buf
);
625 enet
->netdev
->stats
.rx_dropped
++;
628 skb_reserve(skb
, ENET_RX_BUF_DMA_OFFSET
);
629 skb_put(skb
, len
- ETH_FCS_LEN
);
630 skb
->protocol
= eth_type_trans(skb
, enet
->netdev
);
632 netif_receive_skb(skb
);
634 enet
->netdev
->stats
.rx_packets
++;
635 enet
->netdev
->stats
.rx_bytes
+= len
;
640 if (handled
< weight
) {
641 napi_complete_done(napi
, handled
);
642 bcm4908_enet_dma_ring_intrs_on(enet
, rx_ring
);
645 /* Hardware could disable ring if it run out of descriptors */
646 bcm4908_enet_dma_rx_ring_enable(enet
, &enet
->rx_ring
);
651 static int bcm4908_enet_poll_tx(struct napi_struct
*napi
, int weight
)
653 struct bcm4908_enet_dma_ring
*tx_ring
= container_of(napi
, struct bcm4908_enet_dma_ring
, napi
);
654 struct bcm4908_enet
*enet
= container_of(tx_ring
, struct bcm4908_enet
, tx_ring
);
655 struct bcm4908_enet_dma_ring_bd
*buf_desc
;
656 struct bcm4908_enet_dma_ring_slot
*slot
;
657 struct device
*dev
= enet
->dev
;
658 unsigned int bytes
= 0;
661 while (handled
< weight
&& tx_ring
->read_idx
!= tx_ring
->write_idx
) {
662 buf_desc
= &tx_ring
->buf_desc
[tx_ring
->read_idx
];
663 if (le32_to_cpu(buf_desc
->ctl
) & DMA_CTL_STATUS_OWN
)
665 slot
= &tx_ring
->slots
[tx_ring
->read_idx
];
667 dma_unmap_single(dev
, slot
->dma_addr
, slot
->len
, DMA_TO_DEVICE
);
668 dev_kfree_skb(slot
->skb
);
673 if (++tx_ring
->read_idx
== tx_ring
->length
)
674 tx_ring
->read_idx
= 0;
677 netdev_completed_queue(enet
->netdev
, handled
, bytes
);
678 enet
->netdev
->stats
.tx_packets
+= handled
;
679 enet
->netdev
->stats
.tx_bytes
+= bytes
;
681 if (handled
< weight
) {
682 napi_complete_done(napi
, handled
);
683 bcm4908_enet_dma_ring_intrs_on(enet
, tx_ring
);
686 if (netif_queue_stopped(enet
->netdev
))
687 netif_wake_queue(enet
->netdev
);
692 static int bcm4908_enet_change_mtu(struct net_device
*netdev
, int new_mtu
)
694 struct bcm4908_enet
*enet
= netdev_priv(netdev
);
696 bcm4908_enet_set_mtu(enet
, new_mtu
);
701 static const struct net_device_ops bcm4908_enet_netdev_ops
= {
702 .ndo_open
= bcm4908_enet_open
,
703 .ndo_stop
= bcm4908_enet_stop
,
704 .ndo_start_xmit
= bcm4908_enet_start_xmit
,
705 .ndo_set_mac_address
= eth_mac_addr
,
706 .ndo_change_mtu
= bcm4908_enet_change_mtu
,
709 static int bcm4908_enet_probe(struct platform_device
*pdev
)
711 struct device
*dev
= &pdev
->dev
;
712 struct net_device
*netdev
;
713 struct bcm4908_enet
*enet
;
716 netdev
= devm_alloc_etherdev(dev
, sizeof(*enet
));
720 enet
= netdev_priv(netdev
);
722 enet
->netdev
= netdev
;
724 enet
->base
= devm_platform_ioremap_resource(pdev
, 0);
725 if (IS_ERR(enet
->base
)) {
726 dev_err(dev
, "Failed to map registers: %ld\n", PTR_ERR(enet
->base
));
727 return PTR_ERR(enet
->base
);
730 netdev
->irq
= platform_get_irq_byname(pdev
, "rx");
734 enet
->irq_tx
= platform_get_irq_byname(pdev
, "tx");
736 err
= dma_set_coherent_mask(dev
, DMA_BIT_MASK(32));
740 err
= bcm4908_enet_dma_alloc(enet
);
744 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
745 err
= of_get_ethdev_address(dev
->of_node
, netdev
);
746 if (err
== -EPROBE_DEFER
)
749 eth_hw_addr_random(netdev
);
750 netdev
->netdev_ops
= &bcm4908_enet_netdev_ops
;
751 netdev
->min_mtu
= ETH_ZLEN
;
752 netdev
->mtu
= ETH_DATA_LEN
;
753 netdev
->max_mtu
= ENET_MTU_MAX
;
754 netif_napi_add_tx(netdev
, &enet
->tx_ring
.napi
, bcm4908_enet_poll_tx
);
755 netif_napi_add(netdev
, &enet
->rx_ring
.napi
, bcm4908_enet_poll_rx
);
757 err
= register_netdev(netdev
);
761 platform_set_drvdata(pdev
, enet
);
766 bcm4908_enet_dma_free(enet
);
771 static void bcm4908_enet_remove(struct platform_device
*pdev
)
773 struct bcm4908_enet
*enet
= platform_get_drvdata(pdev
);
775 unregister_netdev(enet
->netdev
);
776 netif_napi_del(&enet
->rx_ring
.napi
);
777 netif_napi_del(&enet
->tx_ring
.napi
);
778 bcm4908_enet_dma_free(enet
);
781 static const struct of_device_id bcm4908_enet_of_match
[] = {
782 { .compatible
= "brcm,bcm4908-enet"},
786 static struct platform_driver bcm4908_enet_driver
= {
788 .name
= "bcm4908_enet",
789 .of_match_table
= bcm4908_enet_of_match
,
791 .probe
= bcm4908_enet_probe
,
792 .remove
= bcm4908_enet_remove
,
794 module_platform_driver(bcm4908_enet_driver
);
796 MODULE_DESCRIPTION("Broadcom BCM4908 Gigabit Ethernet driver");
797 MODULE_LICENSE("GPL v2");
798 MODULE_DEVICE_TABLE(of
, bcm4908_enet_of_match
);