2 * Driver for (BCM4706)? GBit MAC core on BCMA bus.
4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
6 * Licensed under the GNU/GPL. See COPYING for details.
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/delay.h>
14 #include <linux/etherdevice.h>
15 #include <linux/mii.h>
16 #include <linux/phy.h>
17 #include <linux/interrupt.h>
18 #include <linux/dma-mapping.h>
19 #include <bcm47xx_nvram.h>
21 static const struct bcma_device_id bgmac_bcma_tbl
[] = {
22 BCMA_CORE(BCMA_MANUF_BCM
, BCMA_CORE_4706_MAC_GBIT
, BCMA_ANY_REV
, BCMA_ANY_CLASS
),
23 BCMA_CORE(BCMA_MANUF_BCM
, BCMA_CORE_MAC_GBIT
, BCMA_ANY_REV
, BCMA_ANY_CLASS
),
26 MODULE_DEVICE_TABLE(bcma
, bgmac_bcma_tbl
);
28 static bool bgmac_wait_value(struct bcma_device
*core
, u16 reg
, u32 mask
,
29 u32 value
, int timeout
)
34 for (i
= 0; i
< timeout
/ 10; i
++) {
35 val
= bcma_read32(core
, reg
);
36 if ((val
& mask
) == value
)
40 pr_err("Timeout waiting for reg 0x%X\n", reg
);
44 /**************************************************
46 **************************************************/
48 static void bgmac_dma_tx_reset(struct bgmac
*bgmac
, struct bgmac_dma_ring
*ring
)
56 /* Suspend DMA TX ring first.
57 * bgmac_wait_value doesn't support waiting for any of few values, so
58 * implement whole loop here.
60 bgmac_write(bgmac
, ring
->mmio_base
+ BGMAC_DMA_TX_CTL
,
61 BGMAC_DMA_TX_SUSPEND
);
62 for (i
= 0; i
< 10000 / 10; i
++) {
63 val
= bgmac_read(bgmac
, ring
->mmio_base
+ BGMAC_DMA_TX_STATUS
);
64 val
&= BGMAC_DMA_TX_STAT
;
65 if (val
== BGMAC_DMA_TX_STAT_DISABLED
||
66 val
== BGMAC_DMA_TX_STAT_IDLEWAIT
||
67 val
== BGMAC_DMA_TX_STAT_STOPPED
) {
74 bgmac_err(bgmac
, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
75 ring
->mmio_base
, val
);
77 /* Remove SUSPEND bit */
78 bgmac_write(bgmac
, ring
->mmio_base
+ BGMAC_DMA_TX_CTL
, 0);
79 if (!bgmac_wait_value(bgmac
->core
,
80 ring
->mmio_base
+ BGMAC_DMA_TX_STATUS
,
81 BGMAC_DMA_TX_STAT
, BGMAC_DMA_TX_STAT_DISABLED
,
83 bgmac_warn(bgmac
, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
86 val
= bgmac_read(bgmac
, ring
->mmio_base
+ BGMAC_DMA_TX_STATUS
);
87 if ((val
& BGMAC_DMA_TX_STAT
) != BGMAC_DMA_TX_STAT_DISABLED
)
88 bgmac_err(bgmac
, "Reset of DMA TX ring 0x%X failed\n",
93 static void bgmac_dma_tx_enable(struct bgmac
*bgmac
,
94 struct bgmac_dma_ring
*ring
)
98 ctl
= bgmac_read(bgmac
, ring
->mmio_base
+ BGMAC_DMA_TX_CTL
);
99 if (bgmac
->core
->id
.rev
>= 4) {
100 ctl
&= ~BGMAC_DMA_TX_BL_MASK
;
101 ctl
|= BGMAC_DMA_TX_BL_128
<< BGMAC_DMA_TX_BL_SHIFT
;
103 ctl
&= ~BGMAC_DMA_TX_MR_MASK
;
104 ctl
|= BGMAC_DMA_TX_MR_2
<< BGMAC_DMA_TX_MR_SHIFT
;
106 ctl
&= ~BGMAC_DMA_TX_PC_MASK
;
107 ctl
|= BGMAC_DMA_TX_PC_16
<< BGMAC_DMA_TX_PC_SHIFT
;
109 ctl
&= ~BGMAC_DMA_TX_PT_MASK
;
110 ctl
|= BGMAC_DMA_TX_PT_8
<< BGMAC_DMA_TX_PT_SHIFT
;
112 ctl
|= BGMAC_DMA_TX_ENABLE
;
113 ctl
|= BGMAC_DMA_TX_PARITY_DISABLE
;
114 bgmac_write(bgmac
, ring
->mmio_base
+ BGMAC_DMA_TX_CTL
, ctl
);
117 static netdev_tx_t
bgmac_dma_tx_add(struct bgmac
*bgmac
,
118 struct bgmac_dma_ring
*ring
,
121 struct device
*dma_dev
= bgmac
->core
->dma_dev
;
122 struct net_device
*net_dev
= bgmac
->net_dev
;
123 struct bgmac_dma_desc
*dma_desc
;
124 struct bgmac_slot_info
*slot
;
128 if (skb
->len
> BGMAC_DESC_CTL1_LEN
) {
129 bgmac_err(bgmac
, "Too long skb (%d)\n", skb
->len
);
133 if (ring
->start
<= ring
->end
)
134 free_slots
= ring
->start
- ring
->end
+ BGMAC_TX_RING_SLOTS
;
136 free_slots
= ring
->start
- ring
->end
;
137 if (free_slots
== 1) {
138 bgmac_err(bgmac
, "TX ring is full, queue should be stopped!\n");
139 netif_stop_queue(net_dev
);
140 return NETDEV_TX_BUSY
;
143 slot
= &ring
->slots
[ring
->end
];
145 slot
->dma_addr
= dma_map_single(dma_dev
, skb
->data
, skb
->len
,
147 if (dma_mapping_error(dma_dev
, slot
->dma_addr
)) {
148 bgmac_err(bgmac
, "Mapping error of skb on ring 0x%X\n",
153 ctl0
= BGMAC_DESC_CTL0_IOC
| BGMAC_DESC_CTL0_SOF
| BGMAC_DESC_CTL0_EOF
;
154 if (ring
->end
== ring
->num_slots
- 1)
155 ctl0
|= BGMAC_DESC_CTL0_EOT
;
156 ctl1
= skb
->len
& BGMAC_DESC_CTL1_LEN
;
158 dma_desc
= ring
->cpu_base
;
159 dma_desc
+= ring
->end
;
160 dma_desc
->addr_low
= cpu_to_le32(lower_32_bits(slot
->dma_addr
));
161 dma_desc
->addr_high
= cpu_to_le32(upper_32_bits(slot
->dma_addr
));
162 dma_desc
->ctl0
= cpu_to_le32(ctl0
);
163 dma_desc
->ctl1
= cpu_to_le32(ctl1
);
165 netdev_sent_queue(net_dev
, skb
->len
);
169 /* Increase ring->end to point empty slot. We tell hardware the first
170 * slot it should *not* read.
172 if (++ring
->end
>= BGMAC_TX_RING_SLOTS
)
174 bgmac_write(bgmac
, ring
->mmio_base
+ BGMAC_DMA_TX_INDEX
,
176 ring
->end
* sizeof(struct bgmac_dma_desc
));
178 /* Always keep one slot free to allow detecting bugged calls. */
179 if (--free_slots
== 1)
180 netif_stop_queue(net_dev
);
185 netif_stop_queue(net_dev
);
190 /* Free transmitted packets */
191 static void bgmac_dma_tx_free(struct bgmac
*bgmac
, struct bgmac_dma_ring
*ring
)
193 struct device
*dma_dev
= bgmac
->core
->dma_dev
;
196 unsigned bytes_compl
= 0, pkts_compl
= 0;
198 /* The last slot that hardware didn't consume yet */
199 empty_slot
= bgmac_read(bgmac
, ring
->mmio_base
+ BGMAC_DMA_TX_STATUS
);
200 empty_slot
&= BGMAC_DMA_TX_STATDPTR
;
201 empty_slot
-= ring
->index_base
;
202 empty_slot
&= BGMAC_DMA_TX_STATDPTR
;
203 empty_slot
/= sizeof(struct bgmac_dma_desc
);
205 while (ring
->start
!= empty_slot
) {
206 struct bgmac_slot_info
*slot
= &ring
->slots
[ring
->start
];
209 /* Unmap no longer used buffer */
210 dma_unmap_single(dma_dev
, slot
->dma_addr
,
211 slot
->skb
->len
, DMA_TO_DEVICE
);
214 bytes_compl
+= slot
->skb
->len
;
217 /* Free memory! :) */
218 dev_kfree_skb(slot
->skb
);
221 bgmac_err(bgmac
, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
222 ring
->start
, ring
->end
);
225 if (++ring
->start
>= BGMAC_TX_RING_SLOTS
)
230 netdev_completed_queue(bgmac
->net_dev
, pkts_compl
, bytes_compl
);
232 if (freed
&& netif_queue_stopped(bgmac
->net_dev
))
233 netif_wake_queue(bgmac
->net_dev
);
236 static void bgmac_dma_rx_reset(struct bgmac
*bgmac
, struct bgmac_dma_ring
*ring
)
238 if (!ring
->mmio_base
)
241 bgmac_write(bgmac
, ring
->mmio_base
+ BGMAC_DMA_RX_CTL
, 0);
242 if (!bgmac_wait_value(bgmac
->core
,
243 ring
->mmio_base
+ BGMAC_DMA_RX_STATUS
,
244 BGMAC_DMA_RX_STAT
, BGMAC_DMA_RX_STAT_DISABLED
,
246 bgmac_err(bgmac
, "Reset of ring 0x%X RX failed\n",
250 static void bgmac_dma_rx_enable(struct bgmac
*bgmac
,
251 struct bgmac_dma_ring
*ring
)
255 ctl
= bgmac_read(bgmac
, ring
->mmio_base
+ BGMAC_DMA_RX_CTL
);
256 if (bgmac
->core
->id
.rev
>= 4) {
257 ctl
&= ~BGMAC_DMA_RX_BL_MASK
;
258 ctl
|= BGMAC_DMA_RX_BL_128
<< BGMAC_DMA_RX_BL_SHIFT
;
260 ctl
&= ~BGMAC_DMA_RX_PC_MASK
;
261 ctl
|= BGMAC_DMA_RX_PC_8
<< BGMAC_DMA_RX_PC_SHIFT
;
263 ctl
&= ~BGMAC_DMA_RX_PT_MASK
;
264 ctl
|= BGMAC_DMA_RX_PT_1
<< BGMAC_DMA_RX_PT_SHIFT
;
266 ctl
&= BGMAC_DMA_RX_ADDREXT_MASK
;
267 ctl
|= BGMAC_DMA_RX_ENABLE
;
268 ctl
|= BGMAC_DMA_RX_PARITY_DISABLE
;
269 ctl
|= BGMAC_DMA_RX_OVERFLOW_CONT
;
270 ctl
|= BGMAC_RX_FRAME_OFFSET
<< BGMAC_DMA_RX_FRAME_OFFSET_SHIFT
;
271 bgmac_write(bgmac
, ring
->mmio_base
+ BGMAC_DMA_RX_CTL
, ctl
);
274 static int bgmac_dma_rx_skb_for_slot(struct bgmac
*bgmac
,
275 struct bgmac_slot_info
*slot
)
277 struct device
*dma_dev
= bgmac
->core
->dma_dev
;
280 struct bgmac_rx_header
*rx
;
283 skb
= netdev_alloc_skb(bgmac
->net_dev
, BGMAC_RX_BUF_SIZE
);
287 /* Poison - if everything goes fine, hardware will overwrite it */
288 rx
= (struct bgmac_rx_header
*)skb
->data
;
289 rx
->len
= cpu_to_le16(0xdead);
290 rx
->flags
= cpu_to_le16(0xbeef);
292 /* Map skb for the DMA */
293 dma_addr
= dma_map_single(dma_dev
, skb
->data
,
294 BGMAC_RX_BUF_SIZE
, DMA_FROM_DEVICE
);
295 if (dma_mapping_error(dma_dev
, dma_addr
)) {
296 bgmac_err(bgmac
, "DMA mapping error\n");
301 /* Update the slot */
303 slot
->dma_addr
= dma_addr
;
305 if (slot
->dma_addr
& 0xC0000000)
306 bgmac_warn(bgmac
, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
311 static void bgmac_dma_rx_setup_desc(struct bgmac
*bgmac
,
312 struct bgmac_dma_ring
*ring
, int desc_idx
)
314 struct bgmac_dma_desc
*dma_desc
= ring
->cpu_base
+ desc_idx
;
315 u32 ctl0
= 0, ctl1
= 0;
317 if (desc_idx
== ring
->num_slots
- 1)
318 ctl0
|= BGMAC_DESC_CTL0_EOT
;
319 ctl1
|= BGMAC_RX_BUF_SIZE
& BGMAC_DESC_CTL1_LEN
;
320 /* Is there any BGMAC device that requires extension? */
321 /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
322 * B43_DMA64_DCTL1_ADDREXT_MASK;
325 dma_desc
->addr_low
= cpu_to_le32(lower_32_bits(ring
->slots
[desc_idx
].dma_addr
));
326 dma_desc
->addr_high
= cpu_to_le32(upper_32_bits(ring
->slots
[desc_idx
].dma_addr
));
327 dma_desc
->ctl0
= cpu_to_le32(ctl0
);
328 dma_desc
->ctl1
= cpu_to_le32(ctl1
);
331 static int bgmac_dma_rx_read(struct bgmac
*bgmac
, struct bgmac_dma_ring
*ring
,
337 end_slot
= bgmac_read(bgmac
, ring
->mmio_base
+ BGMAC_DMA_RX_STATUS
);
338 end_slot
&= BGMAC_DMA_RX_STATDPTR
;
339 end_slot
-= ring
->index_base
;
340 end_slot
&= BGMAC_DMA_RX_STATDPTR
;
341 end_slot
/= sizeof(struct bgmac_dma_desc
);
343 ring
->end
= end_slot
;
345 while (ring
->start
!= ring
->end
) {
346 struct device
*dma_dev
= bgmac
->core
->dma_dev
;
347 struct bgmac_slot_info
*slot
= &ring
->slots
[ring
->start
];
348 struct sk_buff
*skb
= slot
->skb
;
349 struct bgmac_rx_header
*rx
;
352 /* Unmap buffer to make it accessible to the CPU */
353 dma_sync_single_for_cpu(dma_dev
, slot
->dma_addr
,
354 BGMAC_RX_BUF_SIZE
, DMA_FROM_DEVICE
);
356 /* Get info from the header */
357 rx
= (struct bgmac_rx_header
*)skb
->data
;
358 len
= le16_to_cpu(rx
->len
);
359 flags
= le16_to_cpu(rx
->flags
);
362 dma_addr_t old_dma_addr
= slot
->dma_addr
;
365 /* Check for poison and drop or pass the packet */
366 if (len
== 0xdead && flags
== 0xbeef) {
367 bgmac_err(bgmac
, "Found poisoned packet at slot %d, DMA issue!\n",
369 dma_sync_single_for_device(dma_dev
,
379 /* Prepare new skb as replacement */
380 err
= bgmac_dma_rx_skb_for_slot(bgmac
, slot
);
382 /* Poison the old skb */
383 rx
->len
= cpu_to_le16(0xdead);
384 rx
->flags
= cpu_to_le16(0xbeef);
386 dma_sync_single_for_device(dma_dev
,
392 bgmac_dma_rx_setup_desc(bgmac
, ring
, ring
->start
);
394 /* Unmap old skb, we'll pass it to the netfif */
395 dma_unmap_single(dma_dev
, old_dma_addr
,
396 BGMAC_RX_BUF_SIZE
, DMA_FROM_DEVICE
);
398 skb_put(skb
, BGMAC_RX_FRAME_OFFSET
+ len
);
399 skb_pull(skb
, BGMAC_RX_FRAME_OFFSET
);
401 skb_checksum_none_assert(skb
);
402 skb
->protocol
= eth_type_trans(skb
, bgmac
->net_dev
);
403 netif_receive_skb(skb
);
407 if (++ring
->start
>= BGMAC_RX_RING_SLOTS
)
410 if (handled
>= weight
) /* Should never be greater */
417 /* Does ring support unaligned addressing? */
418 static bool bgmac_dma_unaligned(struct bgmac
*bgmac
,
419 struct bgmac_dma_ring
*ring
,
420 enum bgmac_dma_ring_type ring_type
)
423 case BGMAC_DMA_RING_TX
:
424 bgmac_write(bgmac
, ring
->mmio_base
+ BGMAC_DMA_TX_RINGLO
,
426 if (bgmac_read(bgmac
, ring
->mmio_base
+ BGMAC_DMA_TX_RINGLO
))
429 case BGMAC_DMA_RING_RX
:
430 bgmac_write(bgmac
, ring
->mmio_base
+ BGMAC_DMA_RX_RINGLO
,
432 if (bgmac_read(bgmac
, ring
->mmio_base
+ BGMAC_DMA_RX_RINGLO
))
439 static void bgmac_dma_ring_free(struct bgmac
*bgmac
,
440 struct bgmac_dma_ring
*ring
)
442 struct device
*dma_dev
= bgmac
->core
->dma_dev
;
443 struct bgmac_slot_info
*slot
;
447 for (i
= 0; i
< ring
->num_slots
; i
++) {
448 slot
= &ring
->slots
[i
];
451 dma_unmap_single(dma_dev
, slot
->dma_addr
,
452 slot
->skb
->len
, DMA_TO_DEVICE
);
453 dev_kfree_skb(slot
->skb
);
457 if (ring
->cpu_base
) {
458 /* Free ring of descriptors */
459 size
= ring
->num_slots
* sizeof(struct bgmac_dma_desc
);
460 dma_free_coherent(dma_dev
, size
, ring
->cpu_base
,
465 static void bgmac_dma_free(struct bgmac
*bgmac
)
469 for (i
= 0; i
< BGMAC_MAX_TX_RINGS
; i
++)
470 bgmac_dma_ring_free(bgmac
, &bgmac
->tx_ring
[i
]);
471 for (i
= 0; i
< BGMAC_MAX_RX_RINGS
; i
++)
472 bgmac_dma_ring_free(bgmac
, &bgmac
->rx_ring
[i
]);
475 static int bgmac_dma_alloc(struct bgmac
*bgmac
)
477 struct device
*dma_dev
= bgmac
->core
->dma_dev
;
478 struct bgmac_dma_ring
*ring
;
479 static const u16 ring_base
[] = { BGMAC_DMA_BASE0
, BGMAC_DMA_BASE1
,
480 BGMAC_DMA_BASE2
, BGMAC_DMA_BASE3
, };
481 int size
; /* ring size: different for Tx and Rx */
485 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS
> ARRAY_SIZE(ring_base
));
486 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS
> ARRAY_SIZE(ring_base
));
488 if (!(bcma_aread32(bgmac
->core
, BCMA_IOST
) & BCMA_IOST_DMA64
)) {
489 bgmac_err(bgmac
, "Core does not report 64-bit DMA\n");
493 for (i
= 0; i
< BGMAC_MAX_TX_RINGS
; i
++) {
494 ring
= &bgmac
->tx_ring
[i
];
495 ring
->num_slots
= BGMAC_TX_RING_SLOTS
;
496 ring
->mmio_base
= ring_base
[i
];
498 /* Alloc ring of descriptors */
499 size
= ring
->num_slots
* sizeof(struct bgmac_dma_desc
);
500 ring
->cpu_base
= dma_zalloc_coherent(dma_dev
, size
,
503 if (!ring
->cpu_base
) {
504 bgmac_err(bgmac
, "Allocation of TX ring 0x%X failed\n",
508 if (ring
->dma_base
& 0xC0000000)
509 bgmac_warn(bgmac
, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
511 ring
->unaligned
= bgmac_dma_unaligned(bgmac
, ring
,
514 ring
->index_base
= lower_32_bits(ring
->dma_base
);
516 ring
->index_base
= 0;
518 /* No need to alloc TX slots yet */
521 for (i
= 0; i
< BGMAC_MAX_RX_RINGS
; i
++) {
524 ring
= &bgmac
->rx_ring
[i
];
525 ring
->num_slots
= BGMAC_RX_RING_SLOTS
;
526 ring
->mmio_base
= ring_base
[i
];
528 /* Alloc ring of descriptors */
529 size
= ring
->num_slots
* sizeof(struct bgmac_dma_desc
);
530 ring
->cpu_base
= dma_zalloc_coherent(dma_dev
, size
,
533 if (!ring
->cpu_base
) {
534 bgmac_err(bgmac
, "Allocation of RX ring 0x%X failed\n",
539 if (ring
->dma_base
& 0xC0000000)
540 bgmac_warn(bgmac
, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
542 ring
->unaligned
= bgmac_dma_unaligned(bgmac
, ring
,
545 ring
->index_base
= lower_32_bits(ring
->dma_base
);
547 ring
->index_base
= 0;
550 for (j
= 0; j
< ring
->num_slots
; j
++) {
551 err
= bgmac_dma_rx_skb_for_slot(bgmac
, &ring
->slots
[j
]);
553 bgmac_err(bgmac
, "Can't allocate skb for slot in RX ring\n");
562 bgmac_dma_free(bgmac
);
566 static void bgmac_dma_init(struct bgmac
*bgmac
)
568 struct bgmac_dma_ring
*ring
;
571 for (i
= 0; i
< BGMAC_MAX_TX_RINGS
; i
++) {
572 ring
= &bgmac
->tx_ring
[i
];
574 if (!ring
->unaligned
)
575 bgmac_dma_tx_enable(bgmac
, ring
);
576 bgmac_write(bgmac
, ring
->mmio_base
+ BGMAC_DMA_TX_RINGLO
,
577 lower_32_bits(ring
->dma_base
));
578 bgmac_write(bgmac
, ring
->mmio_base
+ BGMAC_DMA_TX_RINGHI
,
579 upper_32_bits(ring
->dma_base
));
581 bgmac_dma_tx_enable(bgmac
, ring
);
584 ring
->end
= 0; /* Points the slot that should *not* be read */
587 for (i
= 0; i
< BGMAC_MAX_RX_RINGS
; i
++) {
590 ring
= &bgmac
->rx_ring
[i
];
592 if (!ring
->unaligned
)
593 bgmac_dma_rx_enable(bgmac
, ring
);
594 bgmac_write(bgmac
, ring
->mmio_base
+ BGMAC_DMA_RX_RINGLO
,
595 lower_32_bits(ring
->dma_base
));
596 bgmac_write(bgmac
, ring
->mmio_base
+ BGMAC_DMA_RX_RINGHI
,
597 upper_32_bits(ring
->dma_base
));
599 bgmac_dma_rx_enable(bgmac
, ring
);
601 for (j
= 0; j
< ring
->num_slots
; j
++)
602 bgmac_dma_rx_setup_desc(bgmac
, ring
, j
);
604 bgmac_write(bgmac
, ring
->mmio_base
+ BGMAC_DMA_RX_INDEX
,
606 ring
->num_slots
* sizeof(struct bgmac_dma_desc
));
613 /**************************************************
615 **************************************************/
617 static u16
bgmac_phy_read(struct bgmac
*bgmac
, u8 phyaddr
, u8 reg
)
619 struct bcma_device
*core
;
624 BUILD_BUG_ON(BGMAC_PA_DATA_MASK
!= BCMA_GMAC_CMN_PA_DATA_MASK
);
625 BUILD_BUG_ON(BGMAC_PA_ADDR_MASK
!= BCMA_GMAC_CMN_PA_ADDR_MASK
);
626 BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT
!= BCMA_GMAC_CMN_PA_ADDR_SHIFT
);
627 BUILD_BUG_ON(BGMAC_PA_REG_MASK
!= BCMA_GMAC_CMN_PA_REG_MASK
);
628 BUILD_BUG_ON(BGMAC_PA_REG_SHIFT
!= BCMA_GMAC_CMN_PA_REG_SHIFT
);
629 BUILD_BUG_ON(BGMAC_PA_WRITE
!= BCMA_GMAC_CMN_PA_WRITE
);
630 BUILD_BUG_ON(BGMAC_PA_START
!= BCMA_GMAC_CMN_PA_START
);
631 BUILD_BUG_ON(BGMAC_PC_EPA_MASK
!= BCMA_GMAC_CMN_PC_EPA_MASK
);
632 BUILD_BUG_ON(BGMAC_PC_MCT_MASK
!= BCMA_GMAC_CMN_PC_MCT_MASK
);
633 BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT
!= BCMA_GMAC_CMN_PC_MCT_SHIFT
);
634 BUILD_BUG_ON(BGMAC_PC_MTE
!= BCMA_GMAC_CMN_PC_MTE
);
636 if (bgmac
->core
->id
.id
== BCMA_CORE_4706_MAC_GBIT
) {
637 core
= bgmac
->core
->bus
->drv_gmac_cmn
.core
;
638 phy_access_addr
= BCMA_GMAC_CMN_PHY_ACCESS
;
639 phy_ctl_addr
= BCMA_GMAC_CMN_PHY_CTL
;
642 phy_access_addr
= BGMAC_PHY_ACCESS
;
643 phy_ctl_addr
= BGMAC_PHY_CNTL
;
646 tmp
= bcma_read32(core
, phy_ctl_addr
);
647 tmp
&= ~BGMAC_PC_EPA_MASK
;
649 bcma_write32(core
, phy_ctl_addr
, tmp
);
651 tmp
= BGMAC_PA_START
;
652 tmp
|= phyaddr
<< BGMAC_PA_ADDR_SHIFT
;
653 tmp
|= reg
<< BGMAC_PA_REG_SHIFT
;
654 bcma_write32(core
, phy_access_addr
, tmp
);
656 if (!bgmac_wait_value(core
, phy_access_addr
, BGMAC_PA_START
, 0, 1000)) {
657 bgmac_err(bgmac
, "Reading PHY %d register 0x%X failed\n",
662 return bcma_read32(core
, phy_access_addr
) & BGMAC_PA_DATA_MASK
;
665 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
666 static int bgmac_phy_write(struct bgmac
*bgmac
, u8 phyaddr
, u8 reg
, u16 value
)
668 struct bcma_device
*core
;
673 if (bgmac
->core
->id
.id
== BCMA_CORE_4706_MAC_GBIT
) {
674 core
= bgmac
->core
->bus
->drv_gmac_cmn
.core
;
675 phy_access_addr
= BCMA_GMAC_CMN_PHY_ACCESS
;
676 phy_ctl_addr
= BCMA_GMAC_CMN_PHY_CTL
;
679 phy_access_addr
= BGMAC_PHY_ACCESS
;
680 phy_ctl_addr
= BGMAC_PHY_CNTL
;
683 tmp
= bcma_read32(core
, phy_ctl_addr
);
684 tmp
&= ~BGMAC_PC_EPA_MASK
;
686 bcma_write32(core
, phy_ctl_addr
, tmp
);
688 bgmac_write(bgmac
, BGMAC_INT_STATUS
, BGMAC_IS_MDIO
);
689 if (bgmac_read(bgmac
, BGMAC_INT_STATUS
) & BGMAC_IS_MDIO
)
690 bgmac_warn(bgmac
, "Error setting MDIO int\n");
692 tmp
= BGMAC_PA_START
;
693 tmp
|= BGMAC_PA_WRITE
;
694 tmp
|= phyaddr
<< BGMAC_PA_ADDR_SHIFT
;
695 tmp
|= reg
<< BGMAC_PA_REG_SHIFT
;
697 bcma_write32(core
, phy_access_addr
, tmp
);
699 if (!bgmac_wait_value(core
, phy_access_addr
, BGMAC_PA_START
, 0, 1000)) {
700 bgmac_err(bgmac
, "Writing to PHY %d register 0x%X failed\n",
708 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
709 static void bgmac_phy_init(struct bgmac
*bgmac
)
711 struct bcma_chipinfo
*ci
= &bgmac
->core
->bus
->chipinfo
;
712 struct bcma_drv_cc
*cc
= &bgmac
->core
->bus
->drv_cc
;
715 if (ci
->id
== BCMA_CHIP_ID_BCM5356
) {
716 for (i
= 0; i
< 5; i
++) {
717 bgmac_phy_write(bgmac
, i
, 0x1f, 0x008b);
718 bgmac_phy_write(bgmac
, i
, 0x15, 0x0100);
719 bgmac_phy_write(bgmac
, i
, 0x1f, 0x000f);
720 bgmac_phy_write(bgmac
, i
, 0x12, 0x2aaa);
721 bgmac_phy_write(bgmac
, i
, 0x1f, 0x000b);
724 if ((ci
->id
== BCMA_CHIP_ID_BCM5357
&& ci
->pkg
!= 10) ||
725 (ci
->id
== BCMA_CHIP_ID_BCM4749
&& ci
->pkg
!= 10) ||
726 (ci
->id
== BCMA_CHIP_ID_BCM53572
&& ci
->pkg
!= 9)) {
727 bcma_chipco_chipctl_maskset(cc
, 2, ~0xc0000000, 0);
728 bcma_chipco_chipctl_maskset(cc
, 4, ~0x80000000, 0);
729 for (i
= 0; i
< 5; i
++) {
730 bgmac_phy_write(bgmac
, i
, 0x1f, 0x000f);
731 bgmac_phy_write(bgmac
, i
, 0x16, 0x5284);
732 bgmac_phy_write(bgmac
, i
, 0x1f, 0x000b);
733 bgmac_phy_write(bgmac
, i
, 0x17, 0x0010);
734 bgmac_phy_write(bgmac
, i
, 0x1f, 0x000f);
735 bgmac_phy_write(bgmac
, i
, 0x16, 0x5296);
736 bgmac_phy_write(bgmac
, i
, 0x17, 0x1073);
737 bgmac_phy_write(bgmac
, i
, 0x17, 0x9073);
738 bgmac_phy_write(bgmac
, i
, 0x16, 0x52b6);
739 bgmac_phy_write(bgmac
, i
, 0x17, 0x9273);
740 bgmac_phy_write(bgmac
, i
, 0x1f, 0x000b);
745 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
746 static void bgmac_phy_reset(struct bgmac
*bgmac
)
748 if (bgmac
->phyaddr
== BGMAC_PHY_NOREGS
)
751 bgmac_phy_write(bgmac
, bgmac
->phyaddr
, MII_BMCR
, BMCR_RESET
);
753 if (bgmac_phy_read(bgmac
, bgmac
->phyaddr
, MII_BMCR
) & BMCR_RESET
)
754 bgmac_err(bgmac
, "PHY reset failed\n");
755 bgmac_phy_init(bgmac
);
758 /**************************************************
760 **************************************************/
762 /* TODO: can we just drop @force? Can we don't reset MAC at all if there is
763 * nothing to change? Try if after stabilizng driver.
765 static void bgmac_cmdcfg_maskset(struct bgmac
*bgmac
, u32 mask
, u32 set
,
768 u32 cmdcfg
= bgmac_read(bgmac
, BGMAC_CMDCFG
);
769 u32 new_val
= (cmdcfg
& mask
) | set
;
771 bgmac_set(bgmac
, BGMAC_CMDCFG
, BGMAC_CMDCFG_SR(bgmac
->core
->id
.rev
));
774 if (new_val
!= cmdcfg
|| force
)
775 bgmac_write(bgmac
, BGMAC_CMDCFG
, new_val
);
777 bgmac_mask(bgmac
, BGMAC_CMDCFG
, ~BGMAC_CMDCFG_SR(bgmac
->core
->id
.rev
));
781 static void bgmac_write_mac_address(struct bgmac
*bgmac
, u8
*addr
)
785 tmp
= (addr
[0] << 24) | (addr
[1] << 16) | (addr
[2] << 8) | addr
[3];
786 bgmac_write(bgmac
, BGMAC_MACADDR_HIGH
, tmp
);
787 tmp
= (addr
[4] << 8) | addr
[5];
788 bgmac_write(bgmac
, BGMAC_MACADDR_LOW
, tmp
);
791 static void bgmac_set_rx_mode(struct net_device
*net_dev
)
793 struct bgmac
*bgmac
= netdev_priv(net_dev
);
795 if (net_dev
->flags
& IFF_PROMISC
)
796 bgmac_cmdcfg_maskset(bgmac
, ~0, BGMAC_CMDCFG_PROM
, true);
798 bgmac_cmdcfg_maskset(bgmac
, ~BGMAC_CMDCFG_PROM
, 0, true);
801 #if 0 /* We don't use that regs yet */
802 static void bgmac_chip_stats_update(struct bgmac
*bgmac
)
806 if (bgmac
->core
->id
.id
!= BCMA_CORE_4706_MAC_GBIT
) {
807 for (i
= 0; i
< BGMAC_NUM_MIB_TX_REGS
; i
++)
808 bgmac
->mib_tx_regs
[i
] =
810 BGMAC_TX_GOOD_OCTETS
+ (i
* 4));
811 for (i
= 0; i
< BGMAC_NUM_MIB_RX_REGS
; i
++)
812 bgmac
->mib_rx_regs
[i
] =
814 BGMAC_RX_GOOD_OCTETS
+ (i
* 4));
817 /* TODO: what else? how to handle BCM4706? Specs are needed */
821 static void bgmac_clear_mib(struct bgmac
*bgmac
)
825 if (bgmac
->core
->id
.id
== BCMA_CORE_4706_MAC_GBIT
)
828 bgmac_set(bgmac
, BGMAC_DEV_CTL
, BGMAC_DC_MROR
);
829 for (i
= 0; i
< BGMAC_NUM_MIB_TX_REGS
; i
++)
830 bgmac_read(bgmac
, BGMAC_TX_GOOD_OCTETS
+ (i
* 4));
831 for (i
= 0; i
< BGMAC_NUM_MIB_RX_REGS
; i
++)
832 bgmac_read(bgmac
, BGMAC_RX_GOOD_OCTETS
+ (i
* 4));
835 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
836 static void bgmac_mac_speed(struct bgmac
*bgmac
)
838 u32 mask
= ~(BGMAC_CMDCFG_ES_MASK
| BGMAC_CMDCFG_HD
);
841 switch (bgmac
->mac_speed
) {
843 set
|= BGMAC_CMDCFG_ES_10
;
846 set
|= BGMAC_CMDCFG_ES_100
;
849 set
|= BGMAC_CMDCFG_ES_1000
;
852 set
|= BGMAC_CMDCFG_ES_2500
;
855 bgmac_err(bgmac
, "Unsupported speed: %d\n", bgmac
->mac_speed
);
858 if (bgmac
->mac_duplex
== DUPLEX_HALF
)
859 set
|= BGMAC_CMDCFG_HD
;
861 bgmac_cmdcfg_maskset(bgmac
, mask
, set
, true);
864 static void bgmac_miiconfig(struct bgmac
*bgmac
)
866 struct bcma_device
*core
= bgmac
->core
;
867 struct bcma_chipinfo
*ci
= &core
->bus
->chipinfo
;
870 if (ci
->id
== BCMA_CHIP_ID_BCM4707
||
871 ci
->id
== BCMA_CHIP_ID_BCM53018
) {
872 bcma_awrite32(core
, BCMA_IOCTL
,
873 bcma_aread32(core
, BCMA_IOCTL
) | 0x40 |
874 BGMAC_BCMA_IOCTL_SW_CLKEN
);
875 bgmac
->mac_speed
= SPEED_2500
;
876 bgmac
->mac_duplex
= DUPLEX_FULL
;
877 bgmac_mac_speed(bgmac
);
879 imode
= (bgmac_read(bgmac
, BGMAC_DEV_STATUS
) &
880 BGMAC_DS_MM_MASK
) >> BGMAC_DS_MM_SHIFT
;
881 if (imode
== 0 || imode
== 1) {
882 bgmac
->mac_speed
= SPEED_100
;
883 bgmac
->mac_duplex
= DUPLEX_FULL
;
884 bgmac_mac_speed(bgmac
);
889 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
890 static void bgmac_chip_reset(struct bgmac
*bgmac
)
892 struct bcma_device
*core
= bgmac
->core
;
893 struct bcma_bus
*bus
= core
->bus
;
894 struct bcma_chipinfo
*ci
= &bus
->chipinfo
;
899 if (bcma_core_is_enabled(core
)) {
900 if (!bgmac
->stats_grabbed
) {
901 /* bgmac_chip_stats_update(bgmac); */
902 bgmac
->stats_grabbed
= true;
905 for (i
= 0; i
< BGMAC_MAX_TX_RINGS
; i
++)
906 bgmac_dma_tx_reset(bgmac
, &bgmac
->tx_ring
[i
]);
908 bgmac_cmdcfg_maskset(bgmac
, ~0, BGMAC_CMDCFG_ML
, false);
911 for (i
= 0; i
< BGMAC_MAX_RX_RINGS
; i
++)
912 bgmac_dma_rx_reset(bgmac
, &bgmac
->rx_ring
[i
]);
914 /* TODO: Clear software multicast filter list */
917 iost
= bcma_aread32(core
, BCMA_IOST
);
918 if ((ci
->id
== BCMA_CHIP_ID_BCM5357
&& ci
->pkg
== BCMA_PKG_ID_BCM47186
) ||
919 (ci
->id
== BCMA_CHIP_ID_BCM4749
&& ci
->pkg
== 10) ||
920 (ci
->id
== BCMA_CHIP_ID_BCM53572
&& ci
->pkg
== BCMA_PKG_ID_BCM47188
))
921 iost
&= ~BGMAC_BCMA_IOST_ATTACHED
;
923 /* 3GMAC: for BCM4707, only do core reset at bgmac_probe() */
924 if (ci
->id
!= BCMA_CHIP_ID_BCM4707
) {
926 if (iost
& BGMAC_BCMA_IOST_ATTACHED
) {
927 flags
= BGMAC_BCMA_IOCTL_SW_CLKEN
;
928 if (!bgmac
->has_robosw
)
929 flags
|= BGMAC_BCMA_IOCTL_SW_RESET
;
931 bcma_core_enable(core
, flags
);
934 /* Request Misc PLL for corerev > 2 */
935 if (core
->id
.rev
> 2 &&
936 ci
->id
!= BCMA_CHIP_ID_BCM4707
&&
937 ci
->id
!= BCMA_CHIP_ID_BCM53018
) {
938 bgmac_set(bgmac
, BCMA_CLKCTLST
,
939 BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ
);
940 bgmac_wait_value(bgmac
->core
, BCMA_CLKCTLST
,
941 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST
,
942 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST
,
946 if (ci
->id
== BCMA_CHIP_ID_BCM5357
||
947 ci
->id
== BCMA_CHIP_ID_BCM4749
||
948 ci
->id
== BCMA_CHIP_ID_BCM53572
) {
949 struct bcma_drv_cc
*cc
= &bgmac
->core
->bus
->drv_cc
;
951 u8 sw_type
= BGMAC_CHIPCTL_1_SW_TYPE_EPHY
|
952 BGMAC_CHIPCTL_1_IF_TYPE_MII
;
955 if (bcm47xx_nvram_getenv("et_swtype", buf
, sizeof(buf
)) > 0) {
956 if (kstrtou8(buf
, 0, &et_swtype
))
957 bgmac_err(bgmac
, "Failed to parse et_swtype (%s)\n",
962 } else if (ci
->id
== BCMA_CHIP_ID_BCM5357
&& ci
->pkg
== BCMA_PKG_ID_BCM5358
) {
963 sw_type
= BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII
;
964 } else if ((ci
->id
== BCMA_CHIP_ID_BCM5357
&& ci
->pkg
== BCMA_PKG_ID_BCM47186
) ||
965 (ci
->id
== BCMA_CHIP_ID_BCM4749
&& ci
->pkg
== 10) ||
966 (ci
->id
== BCMA_CHIP_ID_BCM53572
&& ci
->pkg
== BCMA_PKG_ID_BCM47188
)) {
967 sw_type
= BGMAC_CHIPCTL_1_IF_TYPE_RGMII
|
968 BGMAC_CHIPCTL_1_SW_TYPE_RGMII
;
970 bcma_chipco_chipctl_maskset(cc
, 1,
971 ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK
|
972 BGMAC_CHIPCTL_1_SW_TYPE_MASK
),
976 if (iost
& BGMAC_BCMA_IOST_ATTACHED
&& !bgmac
->has_robosw
)
977 bcma_awrite32(core
, BCMA_IOCTL
,
978 bcma_aread32(core
, BCMA_IOCTL
) &
979 ~BGMAC_BCMA_IOCTL_SW_RESET
);
981 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
982 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
983 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
984 * be keps until taking MAC out of the reset.
986 bgmac_cmdcfg_maskset(bgmac
,
998 BGMAC_CMDCFG_PAD_EN
|
1003 BGMAC_CMDCFG_SR(core
->id
.rev
),
1005 bgmac
->mac_speed
= SPEED_UNKNOWN
;
1006 bgmac
->mac_duplex
= DUPLEX_UNKNOWN
;
1008 bgmac_clear_mib(bgmac
);
1009 if (core
->id
.id
== BCMA_CORE_4706_MAC_GBIT
)
1010 bcma_maskset32(bgmac
->cmn
, BCMA_GMAC_CMN_PHY_CTL
, ~0,
1011 BCMA_GMAC_CMN_PC_MTE
);
1013 bgmac_set(bgmac
, BGMAC_PHY_CNTL
, BGMAC_PC_MTE
);
1014 bgmac_miiconfig(bgmac
);
1015 bgmac_phy_init(bgmac
);
1017 netdev_reset_queue(bgmac
->net_dev
);
1019 bgmac
->int_status
= 0;
1022 static void bgmac_chip_intrs_on(struct bgmac
*bgmac
)
1024 bgmac_write(bgmac
, BGMAC_INT_MASK
, bgmac
->int_mask
);
1027 static void bgmac_chip_intrs_off(struct bgmac
*bgmac
)
1029 bgmac_write(bgmac
, BGMAC_INT_MASK
, 0);
1030 bgmac_read(bgmac
, BGMAC_INT_MASK
);
1033 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
1034 static void bgmac_enable(struct bgmac
*bgmac
)
1036 struct bcma_chipinfo
*ci
= &bgmac
->core
->bus
->chipinfo
;
1044 cmdcfg
= bgmac_read(bgmac
, BGMAC_CMDCFG
);
1045 bgmac_cmdcfg_maskset(bgmac
, ~(BGMAC_CMDCFG_TE
| BGMAC_CMDCFG_RE
),
1046 BGMAC_CMDCFG_SR(bgmac
->core
->id
.rev
), true);
1048 cmdcfg
|= BGMAC_CMDCFG_TE
| BGMAC_CMDCFG_RE
;
1049 bgmac_write(bgmac
, BGMAC_CMDCFG
, cmdcfg
);
1051 mode
= (bgmac_read(bgmac
, BGMAC_DEV_STATUS
) & BGMAC_DS_MM_MASK
) >>
1053 if (ci
->id
!= BCMA_CHIP_ID_BCM47162
|| mode
!= 0)
1054 bgmac_set(bgmac
, BCMA_CLKCTLST
, BCMA_CLKCTLST_FORCEHT
);
1055 if (ci
->id
== BCMA_CHIP_ID_BCM47162
&& mode
== 2)
1056 bcma_chipco_chipctl_maskset(&bgmac
->core
->bus
->drv_cc
, 1, ~0,
1057 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS
);
1060 case BCMA_CHIP_ID_BCM5357
:
1061 case BCMA_CHIP_ID_BCM4749
:
1062 case BCMA_CHIP_ID_BCM53572
:
1063 case BCMA_CHIP_ID_BCM4716
:
1064 case BCMA_CHIP_ID_BCM47162
:
1065 fl_ctl
= 0x03cb04cb;
1066 if (ci
->id
== BCMA_CHIP_ID_BCM5357
||
1067 ci
->id
== BCMA_CHIP_ID_BCM4749
||
1068 ci
->id
== BCMA_CHIP_ID_BCM53572
)
1070 bgmac_write(bgmac
, BGMAC_FLOW_CTL_THRESH
, fl_ctl
);
1071 bgmac_write(bgmac
, BGMAC_PAUSE_CTL
, 0x27fff);
1075 if (ci
->id
!= BCMA_CHIP_ID_BCM4707
&&
1076 ci
->id
!= BCMA_CHIP_ID_BCM53018
) {
1077 rxq_ctl
= bgmac_read(bgmac
, BGMAC_RXQ_CTL
);
1078 rxq_ctl
&= ~BGMAC_RXQ_CTL_MDP_MASK
;
1079 bp_clk
= bcma_pmu_get_bus_clock(&bgmac
->core
->bus
->drv_cc
) /
1081 mdp
= (bp_clk
* 128 / 1000) - 3;
1082 rxq_ctl
|= (mdp
<< BGMAC_RXQ_CTL_MDP_SHIFT
);
1083 bgmac_write(bgmac
, BGMAC_RXQ_CTL
, rxq_ctl
);
1087 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
1088 static void bgmac_chip_init(struct bgmac
*bgmac
, bool full_init
)
1090 struct bgmac_dma_ring
*ring
;
1093 /* 1 interrupt per received frame */
1094 bgmac_write(bgmac
, BGMAC_INT_RECV_LAZY
, 1 << BGMAC_IRL_FC_SHIFT
);
1096 /* Enable 802.3x tx flow control (honor received PAUSE frames) */
1097 bgmac_cmdcfg_maskset(bgmac
, ~BGMAC_CMDCFG_RPI
, 0, true);
1099 bgmac_set_rx_mode(bgmac
->net_dev
);
1101 bgmac_write_mac_address(bgmac
, bgmac
->net_dev
->dev_addr
);
1103 if (bgmac
->loopback
)
1104 bgmac_cmdcfg_maskset(bgmac
, ~0, BGMAC_CMDCFG_ML
, false);
1106 bgmac_cmdcfg_maskset(bgmac
, ~BGMAC_CMDCFG_ML
, 0, false);
1108 bgmac_write(bgmac
, BGMAC_RXMAX_LENGTH
, 32 + ETHER_MAX_LEN
);
1111 bgmac_dma_init(bgmac
);
1112 if (1) /* FIXME: is there any case we don't want IRQs? */
1113 bgmac_chip_intrs_on(bgmac
);
1115 for (i
= 0; i
< BGMAC_MAX_RX_RINGS
; i
++) {
1116 ring
= &bgmac
->rx_ring
[i
];
1117 bgmac_dma_rx_enable(bgmac
, ring
);
1121 bgmac_enable(bgmac
);
1124 static irqreturn_t
bgmac_interrupt(int irq
, void *dev_id
)
1126 struct bgmac
*bgmac
= netdev_priv(dev_id
);
1128 u32 int_status
= bgmac_read(bgmac
, BGMAC_INT_STATUS
);
1129 int_status
&= bgmac
->int_mask
;
1135 bgmac_write(bgmac
, BGMAC_INT_STATUS
, int_status
);
1137 /* Disable new interrupts until handling existing ones */
1138 bgmac_chip_intrs_off(bgmac
);
1140 bgmac
->int_status
= int_status
;
1142 napi_schedule(&bgmac
->napi
);
1147 static int bgmac_poll(struct napi_struct
*napi
, int weight
)
1149 struct bgmac
*bgmac
= container_of(napi
, struct bgmac
, napi
);
1150 struct bgmac_dma_ring
*ring
;
1153 if (bgmac
->int_status
& BGMAC_IS_TX0
) {
1154 ring
= &bgmac
->tx_ring
[0];
1155 bgmac_dma_tx_free(bgmac
, ring
);
1156 bgmac
->int_status
&= ~BGMAC_IS_TX0
;
1159 if (bgmac
->int_status
& BGMAC_IS_RX
) {
1160 ring
= &bgmac
->rx_ring
[0];
1161 handled
+= bgmac_dma_rx_read(bgmac
, ring
, weight
);
1162 bgmac
->int_status
&= ~BGMAC_IS_RX
;
1165 if (bgmac
->int_status
) {
1166 bgmac_err(bgmac
, "Unknown IRQs: 0x%08X\n", bgmac
->int_status
);
1167 bgmac
->int_status
= 0;
1170 if (handled
< weight
)
1171 napi_complete(napi
);
1173 bgmac_chip_intrs_on(bgmac
);
1178 /**************************************************
1180 **************************************************/
1182 static int bgmac_open(struct net_device
*net_dev
)
1184 struct bgmac
*bgmac
= netdev_priv(net_dev
);
1187 bgmac_chip_reset(bgmac
);
1188 /* Specs say about reclaiming rings here, but we do that in DMA init */
1189 bgmac_chip_init(bgmac
, true);
1191 err
= request_irq(bgmac
->core
->irq
, bgmac_interrupt
, IRQF_SHARED
,
1192 KBUILD_MODNAME
, net_dev
);
1194 bgmac_err(bgmac
, "IRQ request error: %d!\n", err
);
1197 napi_enable(&bgmac
->napi
);
1199 phy_start(bgmac
->phy_dev
);
1201 netif_carrier_on(net_dev
);
1207 static int bgmac_stop(struct net_device
*net_dev
)
1209 struct bgmac
*bgmac
= netdev_priv(net_dev
);
1211 netif_carrier_off(net_dev
);
1213 phy_stop(bgmac
->phy_dev
);
1215 napi_disable(&bgmac
->napi
);
1216 bgmac_chip_intrs_off(bgmac
);
1217 free_irq(bgmac
->core
->irq
, net_dev
);
1219 bgmac_chip_reset(bgmac
);
1224 static netdev_tx_t
bgmac_start_xmit(struct sk_buff
*skb
,
1225 struct net_device
*net_dev
)
1227 struct bgmac
*bgmac
= netdev_priv(net_dev
);
1228 struct bgmac_dma_ring
*ring
;
1230 /* No QOS support yet */
1231 ring
= &bgmac
->tx_ring
[0];
1232 return bgmac_dma_tx_add(bgmac
, ring
, skb
);
1235 static int bgmac_set_mac_address(struct net_device
*net_dev
, void *addr
)
1237 struct bgmac
*bgmac
= netdev_priv(net_dev
);
1240 ret
= eth_prepare_mac_addr_change(net_dev
, addr
);
1243 bgmac_write_mac_address(bgmac
, (u8
*)addr
);
1244 eth_commit_mac_addr_change(net_dev
, addr
);
1248 static int bgmac_ioctl(struct net_device
*net_dev
, struct ifreq
*ifr
, int cmd
)
1250 struct bgmac
*bgmac
= netdev_priv(net_dev
);
1252 if (!netif_running(net_dev
))
1255 return phy_mii_ioctl(bgmac
->phy_dev
, ifr
, cmd
);
1258 static const struct net_device_ops bgmac_netdev_ops
= {
1259 .ndo_open
= bgmac_open
,
1260 .ndo_stop
= bgmac_stop
,
1261 .ndo_start_xmit
= bgmac_start_xmit
,
1262 .ndo_set_rx_mode
= bgmac_set_rx_mode
,
1263 .ndo_set_mac_address
= bgmac_set_mac_address
,
1264 .ndo_validate_addr
= eth_validate_addr
,
1265 .ndo_do_ioctl
= bgmac_ioctl
,
1268 /**************************************************
1270 **************************************************/
1272 static int bgmac_get_settings(struct net_device
*net_dev
,
1273 struct ethtool_cmd
*cmd
)
1275 struct bgmac
*bgmac
= netdev_priv(net_dev
);
1277 return phy_ethtool_gset(bgmac
->phy_dev
, cmd
);
1280 static int bgmac_set_settings(struct net_device
*net_dev
,
1281 struct ethtool_cmd
*cmd
)
1283 struct bgmac
*bgmac
= netdev_priv(net_dev
);
1285 return phy_ethtool_sset(bgmac
->phy_dev
, cmd
);
1288 static void bgmac_get_drvinfo(struct net_device
*net_dev
,
1289 struct ethtool_drvinfo
*info
)
1291 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
1292 strlcpy(info
->bus_info
, "BCMA", sizeof(info
->bus_info
));
1295 static const struct ethtool_ops bgmac_ethtool_ops
= {
1296 .get_settings
= bgmac_get_settings
,
1297 .set_settings
= bgmac_set_settings
,
1298 .get_drvinfo
= bgmac_get_drvinfo
,
1301 /**************************************************
1303 **************************************************/
1305 static int bgmac_mii_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
1307 return bgmac_phy_read(bus
->priv
, mii_id
, regnum
);
1310 static int bgmac_mii_write(struct mii_bus
*bus
, int mii_id
, int regnum
,
1313 return bgmac_phy_write(bus
->priv
, mii_id
, regnum
, value
);
1316 static void bgmac_adjust_link(struct net_device
*net_dev
)
1318 struct bgmac
*bgmac
= netdev_priv(net_dev
);
1319 struct phy_device
*phy_dev
= bgmac
->phy_dev
;
1320 bool update
= false;
1322 if (phy_dev
->link
) {
1323 if (phy_dev
->speed
!= bgmac
->mac_speed
) {
1324 bgmac
->mac_speed
= phy_dev
->speed
;
1328 if (phy_dev
->duplex
!= bgmac
->mac_duplex
) {
1329 bgmac
->mac_duplex
= phy_dev
->duplex
;
1335 bgmac_mac_speed(bgmac
);
1336 phy_print_status(phy_dev
);
1340 static int bgmac_mii_register(struct bgmac
*bgmac
)
1342 struct mii_bus
*mii_bus
;
1343 struct phy_device
*phy_dev
;
1344 char bus_id
[MII_BUS_ID_SIZE
+ 3];
1347 mii_bus
= mdiobus_alloc();
1351 mii_bus
->name
= "bgmac mii bus";
1352 sprintf(mii_bus
->id
, "%s-%d-%d", "bgmac", bgmac
->core
->bus
->num
,
1353 bgmac
->core
->core_unit
);
1354 mii_bus
->priv
= bgmac
;
1355 mii_bus
->read
= bgmac_mii_read
;
1356 mii_bus
->write
= bgmac_mii_write
;
1357 mii_bus
->parent
= &bgmac
->core
->dev
;
1358 mii_bus
->phy_mask
= ~(1 << bgmac
->phyaddr
);
1360 mii_bus
->irq
= kmalloc_array(PHY_MAX_ADDR
, sizeof(int), GFP_KERNEL
);
1361 if (!mii_bus
->irq
) {
1365 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1366 mii_bus
->irq
[i
] = PHY_POLL
;
1368 err
= mdiobus_register(mii_bus
);
1370 bgmac_err(bgmac
, "Registration of mii bus failed\n");
1374 bgmac
->mii_bus
= mii_bus
;
1376 /* Connect to the PHY */
1377 snprintf(bus_id
, sizeof(bus_id
), PHY_ID_FMT
, mii_bus
->id
,
1379 phy_dev
= phy_connect(bgmac
->net_dev
, bus_id
, &bgmac_adjust_link
,
1380 PHY_INTERFACE_MODE_MII
);
1381 if (IS_ERR(phy_dev
)) {
1382 bgmac_err(bgmac
, "PHY connecton failed\n");
1383 err
= PTR_ERR(phy_dev
);
1384 goto err_unregister_bus
;
1386 bgmac
->phy_dev
= phy_dev
;
1391 mdiobus_unregister(mii_bus
);
1393 kfree(mii_bus
->irq
);
1395 mdiobus_free(mii_bus
);
1399 static void bgmac_mii_unregister(struct bgmac
*bgmac
)
1401 struct mii_bus
*mii_bus
= bgmac
->mii_bus
;
1403 mdiobus_unregister(mii_bus
);
1404 kfree(mii_bus
->irq
);
1405 mdiobus_free(mii_bus
);
1408 /**************************************************
1410 **************************************************/
1412 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
1413 static int bgmac_probe(struct bcma_device
*core
)
1415 struct net_device
*net_dev
;
1416 struct bgmac
*bgmac
;
1417 struct ssb_sprom
*sprom
= &core
->bus
->sprom
;
1418 u8
*mac
= core
->core_unit
? sprom
->et1mac
: sprom
->et0mac
;
1421 /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */
1422 if (core
->core_unit
> 1) {
1423 pr_err("Unsupported core_unit %d\n", core
->core_unit
);
1427 if (!is_valid_ether_addr(mac
)) {
1428 dev_err(&core
->dev
, "Invalid MAC addr: %pM\n", mac
);
1429 eth_random_addr(mac
);
1430 dev_warn(&core
->dev
, "Using random MAC: %pM\n", mac
);
1433 /* Allocation and references */
1434 net_dev
= alloc_etherdev(sizeof(*bgmac
));
1437 net_dev
->netdev_ops
= &bgmac_netdev_ops
;
1438 net_dev
->irq
= core
->irq
;
1439 SET_ETHTOOL_OPS(net_dev
, &bgmac_ethtool_ops
);
1440 bgmac
= netdev_priv(net_dev
);
1441 bgmac
->net_dev
= net_dev
;
1443 bcma_set_drvdata(core
, bgmac
);
1446 memcpy(bgmac
->net_dev
->dev_addr
, mac
, ETH_ALEN
);
1448 /* On BCM4706 we need common core to access PHY */
1449 if (core
->id
.id
== BCMA_CORE_4706_MAC_GBIT
&&
1450 !core
->bus
->drv_gmac_cmn
.core
) {
1451 bgmac_err(bgmac
, "GMAC CMN core not found (required for BCM4706)\n");
1453 goto err_netdev_free
;
1455 bgmac
->cmn
= core
->bus
->drv_gmac_cmn
.core
;
1457 bgmac
->phyaddr
= core
->core_unit
? sprom
->et1phyaddr
:
1459 bgmac
->phyaddr
&= BGMAC_PHY_MASK
;
1460 if (bgmac
->phyaddr
== BGMAC_PHY_MASK
) {
1461 bgmac_err(bgmac
, "No PHY found\n");
1463 goto err_netdev_free
;
1465 bgmac_info(bgmac
, "Found PHY addr: %d%s\n", bgmac
->phyaddr
,
1466 bgmac
->phyaddr
== BGMAC_PHY_NOREGS
? " (NOREGS)" : "");
1468 if (core
->bus
->hosttype
== BCMA_HOSTTYPE_PCI
) {
1469 bgmac_err(bgmac
, "PCI setup not implemented\n");
1471 goto err_netdev_free
;
1474 bgmac_chip_reset(bgmac
);
1476 /* For Northstar, we have to take all GMAC core out of reset */
1477 if (core
->id
.id
== BCMA_CHIP_ID_BCM4707
||
1478 core
->id
.id
== BCMA_CHIP_ID_BCM53018
) {
1479 struct bcma_device
*ns_core
;
1482 /* Northstar has 4 GMAC cores */
1483 for (ns_gmac
= 0; ns_gmac
< 4; ns_gmac
++) {
1484 /* As Northstar requirement, we have to reset all GMACs
1485 * before accessing one. bgmac_chip_reset() call
1486 * bcma_core_enable() for this core. Then the other
1487 * three GMACs didn't reset. We do it here.
1489 ns_core
= bcma_find_core_unit(core
->bus
,
1492 if (ns_core
&& !bcma_core_is_enabled(ns_core
))
1493 bcma_core_enable(ns_core
, 0);
1497 err
= bgmac_dma_alloc(bgmac
);
1499 bgmac_err(bgmac
, "Unable to alloc memory for DMA\n");
1500 goto err_netdev_free
;
1503 bgmac
->int_mask
= BGMAC_IS_ERRMASK
| BGMAC_IS_RX
| BGMAC_IS_TX_MASK
;
1504 if (bcm47xx_nvram_getenv("et0_no_txint", NULL
, 0) == 0)
1505 bgmac
->int_mask
&= ~BGMAC_IS_TX_MASK
;
1507 /* TODO: reset the external phy. Specs are needed */
1508 bgmac_phy_reset(bgmac
);
1510 bgmac
->has_robosw
= !!(core
->bus
->sprom
.boardflags_lo
&
1511 BGMAC_BFL_ENETROBO
);
1512 if (bgmac
->has_robosw
)
1513 bgmac_warn(bgmac
, "Support for Roboswitch not implemented\n");
1515 if (core
->bus
->sprom
.boardflags_lo
& BGMAC_BFL_ENETADM
)
1516 bgmac_warn(bgmac
, "Support for ADMtek ethernet switch not implemented\n");
1518 err
= bgmac_mii_register(bgmac
);
1520 bgmac_err(bgmac
, "Cannot register MDIO\n");
1524 err
= register_netdev(bgmac
->net_dev
);
1526 bgmac_err(bgmac
, "Cannot register net device\n");
1527 goto err_mii_unregister
;
1530 netif_carrier_off(net_dev
);
1532 netif_napi_add(net_dev
, &bgmac
->napi
, bgmac_poll
, BGMAC_WEIGHT
);
1537 bgmac_mii_unregister(bgmac
);
1539 bgmac_dma_free(bgmac
);
1542 bcma_set_drvdata(core
, NULL
);
1543 free_netdev(net_dev
);
1548 static void bgmac_remove(struct bcma_device
*core
)
1550 struct bgmac
*bgmac
= bcma_get_drvdata(core
);
1552 netif_napi_del(&bgmac
->napi
);
1553 unregister_netdev(bgmac
->net_dev
);
1554 bgmac_mii_unregister(bgmac
);
1555 bgmac_dma_free(bgmac
);
1556 bcma_set_drvdata(core
, NULL
);
1557 free_netdev(bgmac
->net_dev
);
1560 static struct bcma_driver bgmac_bcma_driver
= {
1561 .name
= KBUILD_MODNAME
,
1562 .id_table
= bgmac_bcma_tbl
,
1563 .probe
= bgmac_probe
,
1564 .remove
= bgmac_remove
,
1567 static int __init
bgmac_init(void)
1571 err
= bcma_driver_register(&bgmac_bcma_driver
);
1574 pr_info("Broadcom 47xx GBit MAC driver loaded\n");
1579 static void __exit
bgmac_exit(void)
1581 bcma_driver_unregister(&bgmac_bcma_driver
);
1584 module_init(bgmac_init
)
1585 module_exit(bgmac_exit
)
1587 MODULE_AUTHOR("Rafał Miłecki");
1588 MODULE_LICENSE("GPL");