2 * Copyright (c) 2008 Stefan Hajnoczi <stefanha@gmail.com>
3 * Copyright (c) 2008 Pantelis Koukousoulas <pktoss@gmail.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of the
8 * License, or any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * This driver is a port of the b44 linux driver version 1.01
21 * Copyright (c) 2002 David S. Miller <davem@redhat.com>
22 * Copyright (c) Pekka Pietikainen <pp@ee.oulu.fi>
23 * Copyright (C) 2006 Broadcom Corporation.
25 * Some ssb bits copied from version 2.0 of the b44 driver
26 * Copyright (c) Michael Buesch
28 * Copyright (c) a lot of people too. Please respect their work.
31 FILE_LICENCE ( GPL2_OR_LATER
);
40 #include <gpxe/iobuf.h>
41 #include <gpxe/malloc.h>
43 #include <gpxe/netdevice.h>
44 #include <gpxe/ethernet.h>
45 #include <gpxe/if_ether.h>
46 #include <gpxe/memmap.h>
50 static inline int ring_next(int index
)
52 /* B44_RING_SIZE is a power of 2 :) */
53 return (index
+ 1) & (B44_RING_SIZE
- 1);
57 /* Memory-mapped I/O wrappers */
59 static inline u32
br32(const struct b44_private
*bp
, u32 reg
)
61 return readl(bp
->regs
+ reg
);
65 static inline void bw32(const struct b44_private
*bp
, u32 reg
, u32 val
)
67 writel(val
, bp
->regs
+ reg
);
71 static inline void bflush(const struct b44_private
*bp
, u32 reg
, u32 timeout
)
73 readl(bp
->regs
+ reg
);
78 #define VIRT_TO_B44(addr) ( virt_to_bus(addr) + SB_PCI_DMA )
82 * Return non-zero if the installed RAM is within
83 * the limit given and zero if it is outside.
84 * Hopefully will be removed soon.
86 int phys_ram_within_limit(u64 limit
)
88 struct memory_map memmap
;
89 struct memory_region
*highest
= NULL
;
92 highest
= &memmap
.regions
[memmap
.count
- 1];
94 return (highest
->end
< limit
);
99 * Ring cells waiting to be processed are between 'tx_cur' and 'pending'
100 * indexes in the ring.
102 static u32
pending_tx_index(struct b44_private
*bp
)
104 u32 pending
= br32(bp
, B44_DMATX_STAT
);
105 pending
&= DMATX_STAT_CDMASK
;
107 pending
/= sizeof(struct dma_desc
);
108 return pending
& (B44_RING_SIZE
- 1);
113 * Ring cells waiting to be processed are between 'rx_cur' and 'pending'
114 * indexes in the ring.
116 static u32
pending_rx_index(struct b44_private
*bp
)
118 u32 pending
= br32(bp
, B44_DMARX_STAT
);
119 pending
&= DMARX_STAT_CDMASK
;
121 pending
/= sizeof(struct dma_desc
);
122 return pending
& (B44_RING_SIZE
- 1);
127 * Wait until the given bit is set/cleared.
129 static int b44_wait_bit(struct b44_private
*bp
, unsigned long reg
, u32 bit
,
130 unsigned long timeout
, const int clear
)
134 for (i
= 0; i
< timeout
; i
++) {
135 u32 val
= br32(bp
, reg
);
137 if (clear
&& !(val
& bit
))
140 if (!clear
&& (val
& bit
))
153 * Sonics Silicon Backplane support. SSB is a mini-bus interconnecting
154 * so-called IP Cores. One of those cores implements the Fast Ethernet
155 * functionality and another one the PCI engine.
157 * You need to switch to the core you want to talk to before actually
160 * See: http://bcm-v4.sipsolutions.net/Backplane for (reverse-engineered)
164 static inline u32
ssb_get_core_rev(struct b44_private
*bp
)
166 return (br32(bp
, B44_SBIDHIGH
) & SBIDHIGH_RC_MASK
);
170 static inline int ssb_is_core_up(struct b44_private
*bp
)
172 return ((br32(bp
, B44_SBTMSLOW
) & (SSB_CORE_DOWN
| SBTMSLOW_CLOCK
))
177 static u32
ssb_pci_setup(struct b44_private
*bp
, u32 cores
)
179 u32 bar_orig
, pci_rev
, val
;
181 pci_read_config_dword(bp
->pci
, SSB_BAR0_WIN
, &bar_orig
);
182 pci_write_config_dword(bp
->pci
, SSB_BAR0_WIN
,
183 BCM4400_PCI_CORE_ADDR
);
184 pci_rev
= ssb_get_core_rev(bp
);
186 val
= br32(bp
, B44_SBINTVEC
);
188 bw32(bp
, B44_SBINTVEC
, val
);
190 val
= br32(bp
, SSB_PCI_TRANS_2
);
191 val
|= SSB_PCI_PREF
| SSB_PCI_BURST
;
192 bw32(bp
, SSB_PCI_TRANS_2
, val
);
194 pci_write_config_dword(bp
->pci
, SSB_BAR0_WIN
, bar_orig
);
200 static void ssb_core_disable(struct b44_private
*bp
)
202 if (br32(bp
, B44_SBTMSLOW
) & SBTMSLOW_RESET
)
205 bw32(bp
, B44_SBTMSLOW
, (SBTMSLOW_REJECT
| SBTMSLOW_CLOCK
));
206 b44_wait_bit(bp
, B44_SBTMSLOW
, SBTMSLOW_REJECT
, 100000, 0);
207 b44_wait_bit(bp
, B44_SBTMSHIGH
, SBTMSHIGH_BUSY
, 100000, 1);
209 bw32(bp
, B44_SBTMSLOW
, (SBTMSLOW_FGC
| SBTMSLOW_CLOCK
|
211 bflush(bp
, B44_SBTMSLOW
, 1);
213 bw32(bp
, B44_SBTMSLOW
, SSB_CORE_DOWN
);
214 bflush(bp
, B44_SBTMSLOW
, 1);
218 static void ssb_core_reset(struct b44_private
*bp
)
221 const u32 mask
= (SBTMSLOW_CLOCK
| SBTMSLOW_FGC
| SBTMSLOW_RESET
);
223 ssb_core_disable(bp
);
225 bw32(bp
, B44_SBTMSLOW
, mask
);
226 bflush(bp
, B44_SBTMSLOW
, 1);
228 /* Clear SERR if set, this is a hw bug workaround. */
229 if (br32(bp
, B44_SBTMSHIGH
) & SBTMSHIGH_SERR
)
230 bw32(bp
, B44_SBTMSHIGH
, 0);
232 val
= br32(bp
, B44_SBIMSTATE
);
233 if (val
& (SBIMSTATE_BAD
)) {
234 bw32(bp
, B44_SBIMSTATE
, val
& ~SBIMSTATE_BAD
);
237 bw32(bp
, B44_SBTMSLOW
, (SBTMSLOW_CLOCK
| SBTMSLOW_FGC
));
238 bflush(bp
, B44_SBTMSLOW
, 1);
240 bw32(bp
, B44_SBTMSLOW
, (SBTMSLOW_CLOCK
));
241 bflush(bp
, B44_SBTMSLOW
, 1);
246 * Driver helper functions
250 * Chip reset provides power to the b44 MAC & PCI cores, which
251 * is necessary for MAC register access. We only do a partial
252 * reset in case of transmit/receive errors (ISTAT_ERRORS) to
253 * avoid the chip being hung for an unnecessary long time in
256 * Called-by: b44_close, b44_halt, b44_inithw(b44_open), b44_probe
258 static void b44_chip_reset(struct b44_private
*bp
, int reset_kind
)
260 if (ssb_is_core_up(bp
)) {
261 bw32(bp
, B44_RCV_LAZY
, 0);
263 bw32(bp
, B44_ENET_CTRL
, ENET_CTRL_DISABLE
);
265 b44_wait_bit(bp
, B44_ENET_CTRL
, ENET_CTRL_DISABLE
, 200, 1);
267 bw32(bp
, B44_DMATX_CTRL
, 0);
269 bp
->tx_dirty
= bp
->tx_cur
= 0;
271 if (br32(bp
, B44_DMARX_STAT
) & DMARX_STAT_EMASK
)
272 b44_wait_bit(bp
, B44_DMARX_STAT
, DMARX_STAT_SIDLE
,
275 bw32(bp
, B44_DMARX_CTRL
, 0);
279 ssb_pci_setup(bp
, SBINTVEC_ENET0
);
284 /* Don't enable PHY if we are only doing a partial reset. */
285 if (reset_kind
== B44_CHIP_RESET_PARTIAL
)
288 /* Make PHY accessible. */
289 bw32(bp
, B44_MDIO_CTRL
,
290 (MDIO_CTRL_PREAMBLE
| (0x0d & MDIO_CTRL_MAXF_MASK
)));
291 bflush(bp
, B44_MDIO_CTRL
, 1);
293 /* Enable internal or external PHY */
294 if (!(br32(bp
, B44_DEVCTRL
) & DEVCTRL_IPP
)) {
295 bw32(bp
, B44_ENET_CTRL
, ENET_CTRL_EPSEL
);
296 bflush(bp
, B44_ENET_CTRL
, 1);
298 u32 val
= br32(bp
, B44_DEVCTRL
);
299 if (val
& DEVCTRL_EPR
) {
300 bw32(bp
, B44_DEVCTRL
, (val
& ~DEVCTRL_EPR
));
301 bflush(bp
, B44_DEVCTRL
, 100);
308 * called by b44_poll in the error path
310 static void b44_halt(struct b44_private
*bp
)
313 bw32(bp
, B44_IMASK
, 0);
314 bflush(bp
, B44_IMASK
, 1);
316 DBG("b44: powering down PHY\n");
317 bw32(bp
, B44_MAC_CTRL
, MAC_CTRL_PHY_PDOWN
);
320 * Now reset the chip, but without enabling
321 * the MAC&PHY part of it.
322 * This has to be done _after_ we shut down the PHY
324 b44_chip_reset(bp
, B44_CHIP_RESET_PARTIAL
);
330 * Called at device open time to get the chip ready for
333 * Called-by: b44_open
335 static void b44_init_hw(struct b44_private
*bp
, int reset_kind
)
338 #define CTRL_MASK (DMARX_CTRL_ENABLE | (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))
340 b44_chip_reset(bp
, B44_CHIP_RESET_FULL
);
341 if (reset_kind
== B44_FULL_RESET
) {
345 /* Enable CRC32, set proper LED modes and power on PHY */
346 bw32(bp
, B44_MAC_CTRL
, MAC_CTRL_CRC32_ENAB
| MAC_CTRL_PHY_LEDCTRL
);
347 bw32(bp
, B44_RCV_LAZY
, (1 << RCV_LAZY_FC_SHIFT
));
349 /* This sets the MAC address too. */
350 b44_set_rx_mode(bp
->netdev
);
352 /* MTU + eth header + possible VLAN tag + struct rx_header */
353 bw32(bp
, B44_RXMAXLEN
, B44_MAX_MTU
+ ETH_HLEN
+ 8 + RX_HEADER_LEN
);
354 bw32(bp
, B44_TXMAXLEN
, B44_MAX_MTU
+ ETH_HLEN
+ 8 + RX_HEADER_LEN
);
356 bw32(bp
, B44_TX_HIWMARK
, TX_HIWMARK_DEFLT
);
357 if (reset_kind
== B44_PARTIAL_RESET
) {
358 bw32(bp
, B44_DMARX_CTRL
, CTRL_MASK
);
360 bw32(bp
, B44_DMATX_CTRL
, DMATX_CTRL_ENABLE
);
361 bw32(bp
, B44_DMATX_ADDR
, VIRT_TO_B44(bp
->tx
));
363 bw32(bp
, B44_DMARX_CTRL
, CTRL_MASK
);
364 bw32(bp
, B44_DMARX_ADDR
, VIRT_TO_B44(bp
->rx
));
365 bw32(bp
, B44_DMARX_PTR
, B44_RX_RING_LEN_BYTES
);
367 bw32(bp
, B44_MIB_CTRL
, MIB_CTRL_CLR_ON_READ
);
370 val
= br32(bp
, B44_ENET_CTRL
);
371 bw32(bp
, B44_ENET_CTRL
, (val
| ENET_CTRL_ENABLE
));
376 /*** Management of ring descriptors ***/
379 static void b44_populate_rx_descriptor(struct b44_private
*bp
, u32 idx
)
381 struct rx_header
*rh
;
384 rh
= bp
->rx_iobuf
[idx
]->data
;
387 ctrl
= DESC_CTRL_LEN
& (RX_PKT_BUF_SZ
- RX_PKT_OFFSET
);
388 if (idx
== B44_RING_LAST
) {
389 ctrl
|= DESC_CTRL_EOT
;
391 addr
= VIRT_TO_B44(bp
->rx_iobuf
[idx
]->data
);
393 bp
->rx
[idx
].ctrl
= cpu_to_le32(ctrl
);
394 bp
->rx
[idx
].addr
= cpu_to_le32(addr
);
395 bw32(bp
, B44_DMARX_PTR
, idx
* sizeof(struct dma_desc
));
400 * Refill RX ring descriptors with buffers. This is needed
401 * because during rx we are passing ownership of descriptor
402 * buffers to the network stack.
404 static void b44_rx_refill(struct b44_private
*bp
, u32 pending
)
409 for (i
= pending
+ 1; i
!= bp
->rx_cur
; i
= ring_next(i
)) {
410 if (bp
->rx_iobuf
[i
] != NULL
)
413 bp
->rx_iobuf
[i
] = alloc_iob(RX_PKT_BUF_SZ
);
414 if (!bp
->rx_iobuf
[i
]) {
415 DBG("Refill rx ring failed!!\n");
419 b44_populate_rx_descriptor(bp
, i
);
424 static void b44_free_rx_ring(struct b44_private
*bp
)
429 for (i
= 0; i
< B44_RING_SIZE
; i
++) {
430 free_iob(bp
->rx_iobuf
[i
]);
431 bp
->rx_iobuf
[i
] = NULL
;
433 free_dma(bp
->rx
, B44_RX_RING_LEN_BYTES
);
439 static int b44_init_rx_ring(struct b44_private
*bp
)
441 b44_free_rx_ring(bp
);
443 bp
->rx
= malloc_dma(B44_RX_RING_LEN_BYTES
, B44_DMA_ALIGNMENT
);
447 memset(bp
->rx_iobuf
, 0, sizeof(bp
->rx_iobuf
));
449 bp
->rx_iobuf
[0] = alloc_iob(RX_PKT_BUF_SZ
);
450 b44_populate_rx_descriptor(bp
, 0);
451 b44_rx_refill(bp
, 0);
453 DBG("Init RX rings: rx=0x%08lx\n", VIRT_TO_B44(bp
->rx
));
458 static void b44_free_tx_ring(struct b44_private
*bp
)
461 free_dma(bp
->tx
, B44_TX_RING_LEN_BYTES
);
467 static int b44_init_tx_ring(struct b44_private
*bp
)
469 b44_free_tx_ring(bp
);
471 bp
->tx
= malloc_dma(B44_TX_RING_LEN_BYTES
, B44_DMA_ALIGNMENT
);
475 memset(bp
->tx
, 0, B44_TX_RING_LEN_BYTES
);
476 memset(bp
->tx_iobuf
, 0, sizeof(bp
->tx_iobuf
));
478 DBG("Init TX rings: tx=0x%08lx\n", VIRT_TO_B44(bp
->tx
));
483 /*** Interaction with the PHY ***/
486 static int b44_phy_read(struct b44_private
*bp
, int reg
, u32
* val
)
490 u32 arg1
= (MDIO_OP_READ
<< MDIO_DATA_OP_SHIFT
);
491 u32 arg2
= (bp
->phy_addr
<< MDIO_DATA_PMD_SHIFT
);
492 u32 arg3
= (reg
<< MDIO_DATA_RA_SHIFT
);
493 u32 arg4
= (MDIO_TA_VALID
<< MDIO_DATA_TA_SHIFT
);
494 u32 argv
= arg1
| arg2
| arg3
| arg4
;
496 bw32(bp
, B44_EMAC_ISTAT
, EMAC_INT_MII
);
497 bw32(bp
, B44_MDIO_DATA
, (MDIO_DATA_SB_START
| argv
));
498 err
= b44_wait_bit(bp
, B44_EMAC_ISTAT
, EMAC_INT_MII
, 100, 0);
499 *val
= br32(bp
, B44_MDIO_DATA
) & MDIO_DATA_DATA
;
505 static int b44_phy_write(struct b44_private
*bp
, int reg
, u32 val
)
507 u32 arg1
= (MDIO_OP_WRITE
<< MDIO_DATA_OP_SHIFT
);
508 u32 arg2
= (bp
->phy_addr
<< MDIO_DATA_PMD_SHIFT
);
509 u32 arg3
= (reg
<< MDIO_DATA_RA_SHIFT
);
510 u32 arg4
= (MDIO_TA_VALID
<< MDIO_DATA_TA_SHIFT
);
511 u32 arg5
= (val
& MDIO_DATA_DATA
);
512 u32 argv
= arg1
| arg2
| arg3
| arg4
| arg5
;
515 bw32(bp
, B44_EMAC_ISTAT
, EMAC_INT_MII
);
516 bw32(bp
, B44_MDIO_DATA
, (MDIO_DATA_SB_START
| argv
));
517 return b44_wait_bit(bp
, B44_EMAC_ISTAT
, EMAC_INT_MII
, 100, 0);
521 static int b44_phy_reset(struct b44_private
*bp
)
526 err
= b44_phy_write(bp
, MII_BMCR
, BMCR_RESET
);
531 err
= b44_phy_read(bp
, MII_BMCR
, &val
);
533 if (val
& BMCR_RESET
) {
543 * The BCM44xx CAM (Content Addressable Memory) stores the MAC
546 static void b44_cam_write(struct b44_private
*bp
, unsigned char *data
,
551 val
= ((u32
) data
[2]) << 24;
552 val
|= ((u32
) data
[3]) << 16;
553 val
|= ((u32
) data
[4]) << 8;
554 val
|= ((u32
) data
[5]) << 0;
555 bw32(bp
, B44_CAM_DATA_LO
, val
);
558 val
= (CAM_DATA_HI_VALID
|
559 (((u32
) data
[0]) << 8) | (((u32
) data
[1]) << 0));
561 bw32(bp
, B44_CAM_DATA_HI
, val
);
563 val
= CAM_CTRL_WRITE
| (index
<< CAM_CTRL_INDEX_SHIFT
);
564 bw32(bp
, B44_CAM_CTRL
, val
);
566 b44_wait_bit(bp
, B44_CAM_CTRL
, CAM_CTRL_BUSY
, 100, 1);
570 static void b44_set_mac_addr(struct b44_private
*bp
)
573 bw32(bp
, B44_CAM_CTRL
, 0);
574 b44_cam_write(bp
, bp
->netdev
->ll_addr
, 0);
575 val
= br32(bp
, B44_CAM_CTRL
);
576 bw32(bp
, B44_CAM_CTRL
, val
| CAM_CTRL_ENABLE
);
580 /* Read 128-bytes of EEPROM. */
581 static void b44_read_eeprom(struct b44_private
*bp
, u8
* data
)
584 u16
*ptr
= (u16
*) data
;
586 for (i
= 0; i
< 128; i
+= 2)
587 ptr
[i
/ 2] = cpu_to_le16(readw(bp
->regs
+ 4096 + i
));
591 static void b44_load_mac_and_phy_addr(struct b44_private
*bp
)
595 /* Load MAC address, note byteswapping */
596 b44_read_eeprom(bp
, &eeprom
[0]);
597 bp
->netdev
->hw_addr
[0] = eeprom
[79];
598 bp
->netdev
->hw_addr
[1] = eeprom
[78];
599 bp
->netdev
->hw_addr
[2] = eeprom
[81];
600 bp
->netdev
->hw_addr
[3] = eeprom
[80];
601 bp
->netdev
->hw_addr
[4] = eeprom
[83];
602 bp
->netdev
->hw_addr
[5] = eeprom
[82];
604 /* Load PHY address */
605 bp
->phy_addr
= eeprom
[90] & 0x1f;
609 static void b44_set_rx_mode(struct net_device
*netdev
)
611 struct b44_private
*bp
= netdev_priv(netdev
);
612 unsigned char zero
[6] = { 0, 0, 0, 0, 0, 0 };
616 val
= br32(bp
, B44_RXCONFIG
);
617 val
&= ~RXCONFIG_PROMISC
;
618 val
|= RXCONFIG_ALLMULTI
;
620 b44_set_mac_addr(bp
);
622 for (i
= 1; i
< 64; i
++)
623 b44_cam_write(bp
, zero
, i
);
625 bw32(bp
, B44_RXCONFIG
, val
);
626 val
= br32(bp
, B44_CAM_CTRL
);
627 bw32(bp
, B44_CAM_CTRL
, val
| CAM_CTRL_ENABLE
);
631 /*** Implementation of gPXE driver callbacks ***/
637 * @v id Matching entry in ID table
638 * @ret rc Return status code
640 static int b44_probe(struct pci_device
*pci
, const struct pci_device_id
*id
)
642 struct net_device
*netdev
;
643 struct b44_private
*bp
;
647 * Bail out if more than 1GB of physical RAM is installed.
648 * This limitation will be removed later when dma mapping
649 * is merged into mainline.
651 if (!phys_ram_within_limit(B44_30BIT_DMA_MASK
)) {
652 DBG("Sorry, this version of the driver does not\n"
653 "support systems with more than 1GB of RAM.\n");
658 netdev
= alloc_etherdev(sizeof(*bp
));
662 netdev_init(netdev
, &b44_operations
);
663 pci_set_drvdata(pci
, netdev
);
664 netdev
->dev
= &pci
->dev
;
666 /* Set up private data */
667 bp
= netdev_priv(netdev
);
668 memset(bp
, 0, sizeof(*bp
));
672 /* Map device registers */
673 bp
->regs
= ioremap(pci
->membase
, B44_REGS_SIZE
);
679 /* Enable PCI bus mastering */
680 adjust_pci_device(pci
);
682 b44_load_mac_and_phy_addr(bp
);
684 /* Link management currently not implemented */
685 netdev_link_up(netdev
);
687 rc
= register_netdev(netdev
);
694 b44_chip_reset(bp
, B44_CHIP_RESET_FULL
);
696 DBG("b44 %s (%04x:%04x) regs=%p MAC=%s\n", id
->name
, id
->vendor
,
697 id
->device
, bp
->regs
, eth_ntoa(netdev
->ll_addr
));
708 static void b44_remove(struct pci_device
*pci
)
710 struct net_device
*netdev
= pci_get_drvdata(pci
);
711 struct b44_private
*bp
= netdev_priv(netdev
);
713 ssb_core_disable(bp
);
714 unregister_netdev(netdev
);
716 netdev_nullify(netdev
);
721 /** Enable or disable interrupts
723 * @v netdev Network device
724 * @v enable Interrupts should be enabled
726 static void b44_irq(struct net_device
*netdev
, int enable
)
728 struct b44_private
*bp
= netdev_priv(netdev
);
730 /* Interrupt mask specifies which events generate interrupts */
731 bw32(bp
, B44_IMASK
, enable
? IMASK_DEF
: IMASK_DISABLE
);
735 /** Open network device
737 * @v netdev Network device
738 * @ret rc Return status code
740 static int b44_open(struct net_device
*netdev
)
742 struct b44_private
*bp
= netdev_priv(netdev
);
745 rc
= b44_init_tx_ring(bp
);
749 rc
= b44_init_rx_ring(bp
);
753 b44_init_hw(bp
, B44_FULL_RESET
);
755 /* Disable interrupts */
762 /** Close network device
764 * @v netdev Network device
766 static void b44_close(struct net_device
*netdev
)
768 struct b44_private
*bp
= netdev_priv(netdev
);
770 b44_chip_reset(bp
, B44_FULL_RESET
);
771 b44_free_tx_ring(bp
);
772 b44_free_rx_ring(bp
);
778 * @v netdev Network device
779 * @v iobuf I/O buffer
780 * @ret rc Return status code
782 static int b44_transmit(struct net_device
*netdev
, struct io_buffer
*iobuf
)
784 struct b44_private
*bp
= netdev_priv(netdev
);
785 u32 cur
= bp
->tx_cur
;
788 /* Check for TX ring overflow */
789 if (bp
->tx
[cur
].ctrl
) {
790 DBG("tx overflow\n");
794 /* Will call netdev_tx_complete() on the iobuf later */
795 bp
->tx_iobuf
[cur
] = iobuf
;
797 /* Set up TX descriptor */
798 ctrl
= (iob_len(iobuf
) & DESC_CTRL_LEN
) |
799 DESC_CTRL_IOC
| DESC_CTRL_SOF
| DESC_CTRL_EOF
;
801 if (cur
== B44_RING_LAST
)
802 ctrl
|= DESC_CTRL_EOT
;
804 bp
->tx
[cur
].ctrl
= cpu_to_le32(ctrl
);
805 bp
->tx
[cur
].addr
= cpu_to_le32(VIRT_TO_B44(iobuf
->data
));
807 /* Update next available descriptor index */
808 cur
= ring_next(cur
);
812 /* Tell card that a new TX descriptor is ready */
813 bw32(bp
, B44_DMATX_PTR
, cur
* sizeof(struct dma_desc
));
818 /** Recycles sent TX descriptors and notifies network stack
822 static void b44_tx_complete(struct b44_private
*bp
)
826 cur
= pending_tx_index(bp
);
828 for (i
= bp
->tx_dirty
; i
!= cur
; i
= ring_next(i
)) {
829 /* Free finished frame */
830 netdev_tx_complete(bp
->netdev
, bp
->tx_iobuf
[i
]);
831 bp
->tx_iobuf
[i
] = NULL
;
833 /* Clear TX descriptor */
841 static void b44_process_rx_packets(struct b44_private
*bp
)
843 struct io_buffer
*iob
; /* received data */
844 struct rx_header
*rh
;
848 pending
= pending_rx_index(bp
);
850 for (i
= bp
->rx_cur
; i
!= pending
; i
= ring_next(i
)) {
851 iob
= bp
->rx_iobuf
[i
];
856 len
= le16_to_cpu(rh
->len
);
859 * Guard against incompletely written RX descriptors.
860 * Without this, things can get really slow!
865 /* Discard CRC that is generated by the card */
868 /* Check for invalid packets and errors */
869 if (len
> RX_PKT_BUF_SZ
- RX_PKT_OFFSET
||
870 (rh
->flags
& cpu_to_le16(RX_FLAG_ERRORS
))) {
871 DBG("rx error len=%d flags=%04x\n", len
,
872 cpu_to_le16(rh
->flags
));
875 netdev_rx_err(bp
->netdev
, iob
, -EINVAL
);
879 /* Clear RX descriptor */
882 bp
->rx_iobuf
[i
] = NULL
;
884 /* Hand off the IO buffer to the network stack */
885 iob_reserve(iob
, RX_PKT_OFFSET
);
887 netdev_rx(bp
->netdev
, iob
);
890 b44_rx_refill(bp
, pending_rx_index(bp
));
894 /** Poll for completed and received packets
896 * @v netdev Network device
898 static void b44_poll(struct net_device
*netdev
)
900 struct b44_private
*bp
= netdev_priv(netdev
);
903 /* Interrupt status */
904 istat
= br32(bp
, B44_ISTAT
);
905 istat
&= IMASK_DEF
; /* only the events we care about */
909 if (istat
& ISTAT_TX
)
911 if (istat
& ISTAT_RX
)
912 b44_process_rx_packets(bp
);
913 if (istat
& ISTAT_ERRORS
) {
914 DBG("b44 error istat=0x%08x\n", istat
);
916 /* Reset B44 core partially to avoid long waits */
917 b44_irq(bp
->netdev
, 0);
919 b44_init_tx_ring(bp
);
920 b44_init_rx_ring(bp
);
921 b44_init_hw(bp
, B44_FULL_RESET_SKIP_PHY
);
924 /* Acknowledge interrupt */
925 bw32(bp
, B44_ISTAT
, 0);
926 bflush(bp
, B44_ISTAT
, 1);
930 static struct net_device_operations b44_operations
= {
933 .transmit
= b44_transmit
,
939 static struct pci_device_id b44_nics
[] = {
940 PCI_ROM(0x14e4, 0x4401, "BCM4401", "BCM4401", 0),
941 PCI_ROM(0x14e4, 0x170c, "BCM4401-B0", "BCM4401-B0", 0),
942 PCI_ROM(0x14e4, 0x4402, "BCM4401-B1", "BCM4401-B1", 0),
946 struct pci_driver b44_driver __pci_driver
= {
948 .id_count
= sizeof b44_nics
/ sizeof b44_nics
[0],
950 .remove
= b44_remove
,