jbd2: Annotate transaction start also for jbd2_journal_restart()
[linux/fpc-iii.git] / drivers / net / b44.c
blob0189dcd36f31b9c4fd778763c87e30e671df8059
1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7 * Copyright (C) 2006 Broadcom Corporation.
8 * Copyright (C) 2007 Michael Buesch <mb@bu3sch.de>
10 * Distribute under GPL.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/types.h>
17 #include <linux/netdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/mii.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
22 #include <linux/etherdevice.h>
23 #include <linux/pci.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/ssb/ssb.h>
29 #include <asm/uaccess.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
34 #include "b44.h"
36 #define DRV_MODULE_NAME "b44"
37 #define PFX DRV_MODULE_NAME ": "
38 #define DRV_MODULE_VERSION "2.0"
40 #define B44_DEF_MSG_ENABLE \
41 (NETIF_MSG_DRV | \
42 NETIF_MSG_PROBE | \
43 NETIF_MSG_LINK | \
44 NETIF_MSG_TIMER | \
45 NETIF_MSG_IFDOWN | \
46 NETIF_MSG_IFUP | \
47 NETIF_MSG_RX_ERR | \
48 NETIF_MSG_TX_ERR)
50 /* length of time before we decide the hardware is borked,
51 * and dev->tx_timeout() should be called to fix the problem
53 #define B44_TX_TIMEOUT (5 * HZ)
55 /* hardware minimum and maximum for a single frame's data payload */
56 #define B44_MIN_MTU 60
57 #define B44_MAX_MTU 1500
59 #define B44_RX_RING_SIZE 512
60 #define B44_DEF_RX_RING_PENDING 200
61 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
62 B44_RX_RING_SIZE)
63 #define B44_TX_RING_SIZE 512
64 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
65 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
66 B44_TX_RING_SIZE)
68 #define TX_RING_GAP(BP) \
69 (B44_TX_RING_SIZE - (BP)->tx_pending)
70 #define TX_BUFFS_AVAIL(BP) \
71 (((BP)->tx_cons <= (BP)->tx_prod) ? \
72 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
73 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
74 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
76 #define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
77 #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
79 /* minimum number of free TX descriptors required to wake up TX process */
80 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
82 /* b44 internal pattern match filter info */
83 #define B44_PATTERN_BASE 0x400
84 #define B44_PATTERN_SIZE 0x80
85 #define B44_PMASK_BASE 0x600
86 #define B44_PMASK_SIZE 0x10
87 #define B44_MAX_PATTERNS 16
88 #define B44_ETHIPV6UDP_HLEN 62
89 #define B44_ETHIPV4UDP_HLEN 42
91 static char version[] __devinitdata =
92 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n";
94 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
95 MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver");
96 MODULE_LICENSE("GPL");
97 MODULE_VERSION(DRV_MODULE_VERSION);
99 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
100 module_param(b44_debug, int, 0);
101 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
104 #ifdef CONFIG_B44_PCI
105 static const struct pci_device_id b44_pci_tbl[] = {
106 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
109 { 0 } /* terminate list with empty entry */
111 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
113 static struct pci_driver b44_pci_driver = {
114 .name = DRV_MODULE_NAME,
115 .id_table = b44_pci_tbl,
117 #endif /* CONFIG_B44_PCI */
119 static const struct ssb_device_id b44_ssb_tbl[] = {
120 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
121 SSB_DEVTABLE_END
123 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
125 static void b44_halt(struct b44 *);
126 static void b44_init_rings(struct b44 *);
128 #define B44_FULL_RESET 1
129 #define B44_FULL_RESET_SKIP_PHY 2
130 #define B44_PARTIAL_RESET 3
131 #define B44_CHIP_RESET_FULL 4
132 #define B44_CHIP_RESET_PARTIAL 5
134 static void b44_init_hw(struct b44 *, int);
136 static int dma_desc_align_mask;
137 static int dma_desc_sync_size;
138 static int instance;
140 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141 #define _B44(x...) # x,
142 B44_STAT_REG_DECLARE
143 #undef _B44
146 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147 dma_addr_t dma_base,
148 unsigned long offset,
149 enum dma_data_direction dir)
151 ssb_dma_sync_single_range_for_device(sdev, dma_base,
152 offset & dma_desc_align_mask,
153 dma_desc_sync_size, dir);
156 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
157 dma_addr_t dma_base,
158 unsigned long offset,
159 enum dma_data_direction dir)
161 ssb_dma_sync_single_range_for_cpu(sdev, dma_base,
162 offset & dma_desc_align_mask,
163 dma_desc_sync_size, dir);
166 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
168 return ssb_read32(bp->sdev, reg);
171 static inline void bw32(const struct b44 *bp,
172 unsigned long reg, unsigned long val)
174 ssb_write32(bp->sdev, reg, val);
177 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
178 u32 bit, unsigned long timeout, const int clear)
180 unsigned long i;
182 for (i = 0; i < timeout; i++) {
183 u32 val = br32(bp, reg);
185 if (clear && !(val & bit))
186 break;
187 if (!clear && (val & bit))
188 break;
189 udelay(10);
191 if (i == timeout) {
192 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
193 "%lx to %s.\n",
194 bp->dev->name,
195 bit, reg,
196 (clear ? "clear" : "set"));
197 return -ENODEV;
199 return 0;
202 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
204 u32 val;
206 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
207 (index << CAM_CTRL_INDEX_SHIFT)));
209 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
211 val = br32(bp, B44_CAM_DATA_LO);
213 data[2] = (val >> 24) & 0xFF;
214 data[3] = (val >> 16) & 0xFF;
215 data[4] = (val >> 8) & 0xFF;
216 data[5] = (val >> 0) & 0xFF;
218 val = br32(bp, B44_CAM_DATA_HI);
220 data[0] = (val >> 8) & 0xFF;
221 data[1] = (val >> 0) & 0xFF;
224 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
226 u32 val;
228 val = ((u32) data[2]) << 24;
229 val |= ((u32) data[3]) << 16;
230 val |= ((u32) data[4]) << 8;
231 val |= ((u32) data[5]) << 0;
232 bw32(bp, B44_CAM_DATA_LO, val);
233 val = (CAM_DATA_HI_VALID |
234 (((u32) data[0]) << 8) |
235 (((u32) data[1]) << 0));
236 bw32(bp, B44_CAM_DATA_HI, val);
237 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
238 (index << CAM_CTRL_INDEX_SHIFT)));
239 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
242 static inline void __b44_disable_ints(struct b44 *bp)
244 bw32(bp, B44_IMASK, 0);
247 static void b44_disable_ints(struct b44 *bp)
249 __b44_disable_ints(bp);
251 /* Flush posted writes. */
252 br32(bp, B44_IMASK);
255 static void b44_enable_ints(struct b44 *bp)
257 bw32(bp, B44_IMASK, bp->imask);
260 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
262 int err;
264 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
265 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
266 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
267 (phy_addr << MDIO_DATA_PMD_SHIFT) |
268 (reg << MDIO_DATA_RA_SHIFT) |
269 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
270 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
271 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
273 return err;
276 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
278 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
279 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
280 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
281 (phy_addr << MDIO_DATA_PMD_SHIFT) |
282 (reg << MDIO_DATA_RA_SHIFT) |
283 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
284 (val & MDIO_DATA_DATA)));
285 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
288 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
290 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
291 return 0;
293 return __b44_readphy(bp, bp->phy_addr, reg, val);
296 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
298 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
299 return 0;
301 return __b44_writephy(bp, bp->phy_addr, reg, val);
304 /* miilib interface */
305 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
307 u32 val;
308 struct b44 *bp = netdev_priv(dev);
309 int rc = __b44_readphy(bp, phy_id, location, &val);
310 if (rc)
311 return 0xffffffff;
312 return val;
315 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
316 int val)
318 struct b44 *bp = netdev_priv(dev);
319 __b44_writephy(bp, phy_id, location, val);
322 static int b44_phy_reset(struct b44 *bp)
324 u32 val;
325 int err;
327 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
328 return 0;
329 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
330 if (err)
331 return err;
332 udelay(100);
333 err = b44_readphy(bp, MII_BMCR, &val);
334 if (!err) {
335 if (val & BMCR_RESET) {
336 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
337 bp->dev->name);
338 err = -ENODEV;
342 return 0;
345 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
347 u32 val;
349 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
350 bp->flags |= pause_flags;
352 val = br32(bp, B44_RXCONFIG);
353 if (pause_flags & B44_FLAG_RX_PAUSE)
354 val |= RXCONFIG_FLOW;
355 else
356 val &= ~RXCONFIG_FLOW;
357 bw32(bp, B44_RXCONFIG, val);
359 val = br32(bp, B44_MAC_FLOW);
360 if (pause_flags & B44_FLAG_TX_PAUSE)
361 val |= (MAC_FLOW_PAUSE_ENAB |
362 (0xc0 & MAC_FLOW_RX_HI_WATER));
363 else
364 val &= ~MAC_FLOW_PAUSE_ENAB;
365 bw32(bp, B44_MAC_FLOW, val);
368 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
370 u32 pause_enab = 0;
372 /* The driver supports only rx pause by default because
373 the b44 mac tx pause mechanism generates excessive
374 pause frames.
375 Use ethtool to turn on b44 tx pause if necessary.
377 if ((local & ADVERTISE_PAUSE_CAP) &&
378 (local & ADVERTISE_PAUSE_ASYM)){
379 if ((remote & LPA_PAUSE_ASYM) &&
380 !(remote & LPA_PAUSE_CAP))
381 pause_enab |= B44_FLAG_RX_PAUSE;
384 __b44_set_flow_ctrl(bp, pause_enab);
387 #ifdef SSB_DRIVER_MIPS
388 extern char *nvram_get(char *name);
389 static void b44_wap54g10_workaround(struct b44 *bp)
391 const char *str;
392 u32 val;
393 int err;
396 * workaround for bad hardware design in Linksys WAP54G v1.0
397 * see https://dev.openwrt.org/ticket/146
398 * check and reset bit "isolate"
400 str = nvram_get("boardnum");
401 if (!str)
402 return;
403 if (simple_strtoul(str, NULL, 0) == 2) {
404 err = __b44_readphy(bp, 0, MII_BMCR, &val);
405 if (err)
406 goto error;
407 if (!(val & BMCR_ISOLATE))
408 return;
409 val &= ~BMCR_ISOLATE;
410 err = __b44_writephy(bp, 0, MII_BMCR, val);
411 if (err)
412 goto error;
414 return;
415 error:
416 printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n");
418 #else
419 static inline void b44_wap54g10_workaround(struct b44 *bp)
422 #endif
424 static int b44_setup_phy(struct b44 *bp)
426 u32 val;
427 int err;
429 b44_wap54g10_workaround(bp);
431 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
432 return 0;
433 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
434 goto out;
435 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
436 val & MII_ALEDCTRL_ALLMSK)) != 0)
437 goto out;
438 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
439 goto out;
440 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
441 val | MII_TLEDCTRL_ENABLE)) != 0)
442 goto out;
444 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
445 u32 adv = ADVERTISE_CSMA;
447 if (bp->flags & B44_FLAG_ADV_10HALF)
448 adv |= ADVERTISE_10HALF;
449 if (bp->flags & B44_FLAG_ADV_10FULL)
450 adv |= ADVERTISE_10FULL;
451 if (bp->flags & B44_FLAG_ADV_100HALF)
452 adv |= ADVERTISE_100HALF;
453 if (bp->flags & B44_FLAG_ADV_100FULL)
454 adv |= ADVERTISE_100FULL;
456 if (bp->flags & B44_FLAG_PAUSE_AUTO)
457 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
459 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
460 goto out;
461 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
462 BMCR_ANRESTART))) != 0)
463 goto out;
464 } else {
465 u32 bmcr;
467 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
468 goto out;
469 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
470 if (bp->flags & B44_FLAG_100_BASE_T)
471 bmcr |= BMCR_SPEED100;
472 if (bp->flags & B44_FLAG_FULL_DUPLEX)
473 bmcr |= BMCR_FULLDPLX;
474 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
475 goto out;
477 /* Since we will not be negotiating there is no safe way
478 * to determine if the link partner supports flow control
479 * or not. So just disable it completely in this case.
481 b44_set_flow_ctrl(bp, 0, 0);
484 out:
485 return err;
488 static void b44_stats_update(struct b44 *bp)
490 unsigned long reg;
491 u32 *val;
493 val = &bp->hw_stats.tx_good_octets;
494 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
495 *val++ += br32(bp, reg);
498 /* Pad */
499 reg += 8*4UL;
501 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
502 *val++ += br32(bp, reg);
506 static void b44_link_report(struct b44 *bp)
508 if (!netif_carrier_ok(bp->dev)) {
509 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
510 } else {
511 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
512 bp->dev->name,
513 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
514 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
516 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
517 "%s for RX.\n",
518 bp->dev->name,
519 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
520 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
524 static void b44_check_phy(struct b44 *bp)
526 u32 bmsr, aux;
528 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
529 bp->flags |= B44_FLAG_100_BASE_T;
530 bp->flags |= B44_FLAG_FULL_DUPLEX;
531 if (!netif_carrier_ok(bp->dev)) {
532 u32 val = br32(bp, B44_TX_CTRL);
533 val |= TX_CTRL_DUPLEX;
534 bw32(bp, B44_TX_CTRL, val);
535 netif_carrier_on(bp->dev);
536 b44_link_report(bp);
538 return;
541 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
542 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
543 (bmsr != 0xffff)) {
544 if (aux & MII_AUXCTRL_SPEED)
545 bp->flags |= B44_FLAG_100_BASE_T;
546 else
547 bp->flags &= ~B44_FLAG_100_BASE_T;
548 if (aux & MII_AUXCTRL_DUPLEX)
549 bp->flags |= B44_FLAG_FULL_DUPLEX;
550 else
551 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
553 if (!netif_carrier_ok(bp->dev) &&
554 (bmsr & BMSR_LSTATUS)) {
555 u32 val = br32(bp, B44_TX_CTRL);
556 u32 local_adv, remote_adv;
558 if (bp->flags & B44_FLAG_FULL_DUPLEX)
559 val |= TX_CTRL_DUPLEX;
560 else
561 val &= ~TX_CTRL_DUPLEX;
562 bw32(bp, B44_TX_CTRL, val);
564 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
565 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
566 !b44_readphy(bp, MII_LPA, &remote_adv))
567 b44_set_flow_ctrl(bp, local_adv, remote_adv);
569 /* Link now up */
570 netif_carrier_on(bp->dev);
571 b44_link_report(bp);
572 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
573 /* Link now down */
574 netif_carrier_off(bp->dev);
575 b44_link_report(bp);
578 if (bmsr & BMSR_RFAULT)
579 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
580 bp->dev->name);
581 if (bmsr & BMSR_JCD)
582 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
583 bp->dev->name);
587 static void b44_timer(unsigned long __opaque)
589 struct b44 *bp = (struct b44 *) __opaque;
591 spin_lock_irq(&bp->lock);
593 b44_check_phy(bp);
595 b44_stats_update(bp);
597 spin_unlock_irq(&bp->lock);
599 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
602 static void b44_tx(struct b44 *bp)
604 u32 cur, cons;
606 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
607 cur /= sizeof(struct dma_desc);
609 /* XXX needs updating when NETIF_F_SG is supported */
610 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
611 struct ring_info *rp = &bp->tx_buffers[cons];
612 struct sk_buff *skb = rp->skb;
614 BUG_ON(skb == NULL);
616 ssb_dma_unmap_single(bp->sdev,
617 rp->mapping,
618 skb->len,
619 DMA_TO_DEVICE);
620 rp->skb = NULL;
621 dev_kfree_skb_irq(skb);
624 bp->tx_cons = cons;
625 if (netif_queue_stopped(bp->dev) &&
626 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
627 netif_wake_queue(bp->dev);
629 bw32(bp, B44_GPTIMER, 0);
632 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
633 * before the DMA address you give it. So we allocate 30 more bytes
634 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
635 * point the chip at 30 bytes past where the rx_header will go.
637 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
639 struct dma_desc *dp;
640 struct ring_info *src_map, *map;
641 struct rx_header *rh;
642 struct sk_buff *skb;
643 dma_addr_t mapping;
644 int dest_idx;
645 u32 ctrl;
647 src_map = NULL;
648 if (src_idx >= 0)
649 src_map = &bp->rx_buffers[src_idx];
650 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
651 map = &bp->rx_buffers[dest_idx];
652 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
653 if (skb == NULL)
654 return -ENOMEM;
656 mapping = ssb_dma_map_single(bp->sdev, skb->data,
657 RX_PKT_BUF_SZ,
658 DMA_FROM_DEVICE);
660 /* Hardware bug work-around, the chip is unable to do PCI DMA
661 to/from anything above 1GB :-( */
662 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
663 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
664 /* Sigh... */
665 if (!ssb_dma_mapping_error(bp->sdev, mapping))
666 ssb_dma_unmap_single(bp->sdev, mapping,
667 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
668 dev_kfree_skb_any(skb);
669 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
670 if (skb == NULL)
671 return -ENOMEM;
672 mapping = ssb_dma_map_single(bp->sdev, skb->data,
673 RX_PKT_BUF_SZ,
674 DMA_FROM_DEVICE);
675 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
676 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
677 if (!ssb_dma_mapping_error(bp->sdev, mapping))
678 ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
679 dev_kfree_skb_any(skb);
680 return -ENOMEM;
682 bp->force_copybreak = 1;
685 rh = (struct rx_header *) skb->data;
687 rh->len = 0;
688 rh->flags = 0;
690 map->skb = skb;
691 map->mapping = mapping;
693 if (src_map != NULL)
694 src_map->skb = NULL;
696 ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
697 if (dest_idx == (B44_RX_RING_SIZE - 1))
698 ctrl |= DESC_CTRL_EOT;
700 dp = &bp->rx_ring[dest_idx];
701 dp->ctrl = cpu_to_le32(ctrl);
702 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
704 if (bp->flags & B44_FLAG_RX_RING_HACK)
705 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
706 dest_idx * sizeof(*dp),
707 DMA_BIDIRECTIONAL);
709 return RX_PKT_BUF_SZ;
712 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
714 struct dma_desc *src_desc, *dest_desc;
715 struct ring_info *src_map, *dest_map;
716 struct rx_header *rh;
717 int dest_idx;
718 __le32 ctrl;
720 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
721 dest_desc = &bp->rx_ring[dest_idx];
722 dest_map = &bp->rx_buffers[dest_idx];
723 src_desc = &bp->rx_ring[src_idx];
724 src_map = &bp->rx_buffers[src_idx];
726 dest_map->skb = src_map->skb;
727 rh = (struct rx_header *) src_map->skb->data;
728 rh->len = 0;
729 rh->flags = 0;
730 dest_map->mapping = src_map->mapping;
732 if (bp->flags & B44_FLAG_RX_RING_HACK)
733 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
734 src_idx * sizeof(*src_desc),
735 DMA_BIDIRECTIONAL);
737 ctrl = src_desc->ctrl;
738 if (dest_idx == (B44_RX_RING_SIZE - 1))
739 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
740 else
741 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
743 dest_desc->ctrl = ctrl;
744 dest_desc->addr = src_desc->addr;
746 src_map->skb = NULL;
748 if (bp->flags & B44_FLAG_RX_RING_HACK)
749 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
750 dest_idx * sizeof(*dest_desc),
751 DMA_BIDIRECTIONAL);
753 ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
754 RX_PKT_BUF_SZ,
755 DMA_FROM_DEVICE);
758 static int b44_rx(struct b44 *bp, int budget)
760 int received;
761 u32 cons, prod;
763 received = 0;
764 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
765 prod /= sizeof(struct dma_desc);
766 cons = bp->rx_cons;
768 while (cons != prod && budget > 0) {
769 struct ring_info *rp = &bp->rx_buffers[cons];
770 struct sk_buff *skb = rp->skb;
771 dma_addr_t map = rp->mapping;
772 struct rx_header *rh;
773 u16 len;
775 ssb_dma_sync_single_for_cpu(bp->sdev, map,
776 RX_PKT_BUF_SZ,
777 DMA_FROM_DEVICE);
778 rh = (struct rx_header *) skb->data;
779 len = le16_to_cpu(rh->len);
780 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
781 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
782 drop_it:
783 b44_recycle_rx(bp, cons, bp->rx_prod);
784 drop_it_no_recycle:
785 bp->dev->stats.rx_dropped++;
786 goto next_pkt;
789 if (len == 0) {
790 int i = 0;
792 do {
793 udelay(2);
794 barrier();
795 len = le16_to_cpu(rh->len);
796 } while (len == 0 && i++ < 5);
797 if (len == 0)
798 goto drop_it;
801 /* Omit CRC. */
802 len -= 4;
804 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
805 int skb_size;
806 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
807 if (skb_size < 0)
808 goto drop_it;
809 ssb_dma_unmap_single(bp->sdev, map,
810 skb_size, DMA_FROM_DEVICE);
811 /* Leave out rx_header */
812 skb_put(skb, len + RX_PKT_OFFSET);
813 skb_pull(skb, RX_PKT_OFFSET);
814 } else {
815 struct sk_buff *copy_skb;
817 b44_recycle_rx(bp, cons, bp->rx_prod);
818 copy_skb = dev_alloc_skb(len + 2);
819 if (copy_skb == NULL)
820 goto drop_it_no_recycle;
822 skb_reserve(copy_skb, 2);
823 skb_put(copy_skb, len);
824 /* DMA sync done above, copy just the actual packet */
825 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
826 copy_skb->data, len);
827 skb = copy_skb;
829 skb->ip_summed = CHECKSUM_NONE;
830 skb->protocol = eth_type_trans(skb, bp->dev);
831 netif_receive_skb(skb);
832 received++;
833 budget--;
834 next_pkt:
835 bp->rx_prod = (bp->rx_prod + 1) &
836 (B44_RX_RING_SIZE - 1);
837 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
840 bp->rx_cons = cons;
841 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
843 return received;
846 static int b44_poll(struct napi_struct *napi, int budget)
848 struct b44 *bp = container_of(napi, struct b44, napi);
849 int work_done;
851 spin_lock_irq(&bp->lock);
853 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
854 /* spin_lock(&bp->tx_lock); */
855 b44_tx(bp);
856 /* spin_unlock(&bp->tx_lock); */
858 spin_unlock_irq(&bp->lock);
860 work_done = 0;
861 if (bp->istat & ISTAT_RX)
862 work_done += b44_rx(bp, budget);
864 if (bp->istat & ISTAT_ERRORS) {
865 unsigned long flags;
867 spin_lock_irqsave(&bp->lock, flags);
868 b44_halt(bp);
869 b44_init_rings(bp);
870 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
871 netif_wake_queue(bp->dev);
872 spin_unlock_irqrestore(&bp->lock, flags);
873 work_done = 0;
876 if (work_done < budget) {
877 napi_complete(napi);
878 b44_enable_ints(bp);
881 return work_done;
884 static irqreturn_t b44_interrupt(int irq, void *dev_id)
886 struct net_device *dev = dev_id;
887 struct b44 *bp = netdev_priv(dev);
888 u32 istat, imask;
889 int handled = 0;
891 spin_lock(&bp->lock);
893 istat = br32(bp, B44_ISTAT);
894 imask = br32(bp, B44_IMASK);
896 /* The interrupt mask register controls which interrupt bits
897 * will actually raise an interrupt to the CPU when set by hw/firmware,
898 * but doesn't mask off the bits.
900 istat &= imask;
901 if (istat) {
902 handled = 1;
904 if (unlikely(!netif_running(dev))) {
905 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
906 goto irq_ack;
909 if (napi_schedule_prep(&bp->napi)) {
910 /* NOTE: These writes are posted by the readback of
911 * the ISTAT register below.
913 bp->istat = istat;
914 __b44_disable_ints(bp);
915 __napi_schedule(&bp->napi);
916 } else {
917 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
918 dev->name);
921 irq_ack:
922 bw32(bp, B44_ISTAT, istat);
923 br32(bp, B44_ISTAT);
925 spin_unlock(&bp->lock);
926 return IRQ_RETVAL(handled);
929 static void b44_tx_timeout(struct net_device *dev)
931 struct b44 *bp = netdev_priv(dev);
933 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
934 dev->name);
936 spin_lock_irq(&bp->lock);
938 b44_halt(bp);
939 b44_init_rings(bp);
940 b44_init_hw(bp, B44_FULL_RESET);
942 spin_unlock_irq(&bp->lock);
944 b44_enable_ints(bp);
946 netif_wake_queue(dev);
949 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
951 struct b44 *bp = netdev_priv(dev);
952 int rc = NETDEV_TX_OK;
953 dma_addr_t mapping;
954 u32 len, entry, ctrl;
955 unsigned long flags;
957 len = skb->len;
958 spin_lock_irqsave(&bp->lock, flags);
960 /* This is a hard error, log it. */
961 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
962 netif_stop_queue(dev);
963 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
964 dev->name);
965 goto err_out;
968 mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
969 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
970 struct sk_buff *bounce_skb;
972 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
973 if (!ssb_dma_mapping_error(bp->sdev, mapping))
974 ssb_dma_unmap_single(bp->sdev, mapping, len,
975 DMA_TO_DEVICE);
977 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
978 if (!bounce_skb)
979 goto err_out;
981 mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
982 len, DMA_TO_DEVICE);
983 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
984 if (!ssb_dma_mapping_error(bp->sdev, mapping))
985 ssb_dma_unmap_single(bp->sdev, mapping,
986 len, DMA_TO_DEVICE);
987 dev_kfree_skb_any(bounce_skb);
988 goto err_out;
991 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
992 dev_kfree_skb_any(skb);
993 skb = bounce_skb;
996 entry = bp->tx_prod;
997 bp->tx_buffers[entry].skb = skb;
998 bp->tx_buffers[entry].mapping = mapping;
1000 ctrl = (len & DESC_CTRL_LEN);
1001 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1002 if (entry == (B44_TX_RING_SIZE - 1))
1003 ctrl |= DESC_CTRL_EOT;
1005 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1006 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1008 if (bp->flags & B44_FLAG_TX_RING_HACK)
1009 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1010 entry * sizeof(bp->tx_ring[0]),
1011 DMA_TO_DEVICE);
1013 entry = NEXT_TX(entry);
1015 bp->tx_prod = entry;
1017 wmb();
1019 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1020 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1021 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1022 if (bp->flags & B44_FLAG_REORDER_BUG)
1023 br32(bp, B44_DMATX_PTR);
1025 if (TX_BUFFS_AVAIL(bp) < 1)
1026 netif_stop_queue(dev);
1028 dev->trans_start = jiffies;
1030 out_unlock:
1031 spin_unlock_irqrestore(&bp->lock, flags);
1033 return rc;
1035 err_out:
1036 rc = NETDEV_TX_BUSY;
1037 goto out_unlock;
1040 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1042 struct b44 *bp = netdev_priv(dev);
1044 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1045 return -EINVAL;
1047 if (!netif_running(dev)) {
1048 /* We'll just catch it later when the
1049 * device is up'd.
1051 dev->mtu = new_mtu;
1052 return 0;
1055 spin_lock_irq(&bp->lock);
1056 b44_halt(bp);
1057 dev->mtu = new_mtu;
1058 b44_init_rings(bp);
1059 b44_init_hw(bp, B44_FULL_RESET);
1060 spin_unlock_irq(&bp->lock);
1062 b44_enable_ints(bp);
1064 return 0;
1067 /* Free up pending packets in all rx/tx rings.
1069 * The chip has been shut down and the driver detached from
1070 * the networking, so no interrupts or new tx packets will
1071 * end up in the driver. bp->lock is not held and we are not
1072 * in an interrupt context and thus may sleep.
1074 static void b44_free_rings(struct b44 *bp)
1076 struct ring_info *rp;
1077 int i;
1079 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1080 rp = &bp->rx_buffers[i];
1082 if (rp->skb == NULL)
1083 continue;
1084 ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ,
1085 DMA_FROM_DEVICE);
1086 dev_kfree_skb_any(rp->skb);
1087 rp->skb = NULL;
1090 /* XXX needs changes once NETIF_F_SG is set... */
1091 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1092 rp = &bp->tx_buffers[i];
1094 if (rp->skb == NULL)
1095 continue;
1096 ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len,
1097 DMA_TO_DEVICE);
1098 dev_kfree_skb_any(rp->skb);
1099 rp->skb = NULL;
1103 /* Initialize tx/rx rings for packet processing.
1105 * The chip has been shut down and the driver detached from
1106 * the networking, so no interrupts or new tx packets will
1107 * end up in the driver.
1109 static void b44_init_rings(struct b44 *bp)
1111 int i;
1113 b44_free_rings(bp);
1115 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1116 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1118 if (bp->flags & B44_FLAG_RX_RING_HACK)
1119 ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma,
1120 DMA_TABLE_BYTES,
1121 DMA_BIDIRECTIONAL);
1123 if (bp->flags & B44_FLAG_TX_RING_HACK)
1124 ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma,
1125 DMA_TABLE_BYTES,
1126 DMA_TO_DEVICE);
1128 for (i = 0; i < bp->rx_pending; i++) {
1129 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1130 break;
1135 * Must not be invoked with interrupt sources disabled and
1136 * the hardware shutdown down.
1138 static void b44_free_consistent(struct b44 *bp)
1140 kfree(bp->rx_buffers);
1141 bp->rx_buffers = NULL;
1142 kfree(bp->tx_buffers);
1143 bp->tx_buffers = NULL;
1144 if (bp->rx_ring) {
1145 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1146 ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma,
1147 DMA_TABLE_BYTES,
1148 DMA_BIDIRECTIONAL);
1149 kfree(bp->rx_ring);
1150 } else
1151 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
1152 bp->rx_ring, bp->rx_ring_dma,
1153 GFP_KERNEL);
1154 bp->rx_ring = NULL;
1155 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1157 if (bp->tx_ring) {
1158 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1159 ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma,
1160 DMA_TABLE_BYTES,
1161 DMA_TO_DEVICE);
1162 kfree(bp->tx_ring);
1163 } else
1164 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
1165 bp->tx_ring, bp->tx_ring_dma,
1166 GFP_KERNEL);
1167 bp->tx_ring = NULL;
1168 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1173 * Must not be invoked with interrupt sources disabled and
1174 * the hardware shutdown down. Can sleep.
1176 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1178 int size;
1180 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1181 bp->rx_buffers = kzalloc(size, gfp);
1182 if (!bp->rx_buffers)
1183 goto out_err;
1185 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1186 bp->tx_buffers = kzalloc(size, gfp);
1187 if (!bp->tx_buffers)
1188 goto out_err;
1190 size = DMA_TABLE_BYTES;
1191 bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp);
1192 if (!bp->rx_ring) {
1193 /* Allocation may have failed due to pci_alloc_consistent
1194 insisting on use of GFP_DMA, which is more restrictive
1195 than necessary... */
1196 struct dma_desc *rx_ring;
1197 dma_addr_t rx_ring_dma;
1199 rx_ring = kzalloc(size, gfp);
1200 if (!rx_ring)
1201 goto out_err;
1203 rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring,
1204 DMA_TABLE_BYTES,
1205 DMA_BIDIRECTIONAL);
1207 if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
1208 rx_ring_dma + size > DMA_BIT_MASK(30)) {
1209 kfree(rx_ring);
1210 goto out_err;
1213 bp->rx_ring = rx_ring;
1214 bp->rx_ring_dma = rx_ring_dma;
1215 bp->flags |= B44_FLAG_RX_RING_HACK;
1218 bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp);
1219 if (!bp->tx_ring) {
1220 /* Allocation may have failed due to ssb_dma_alloc_consistent
1221 insisting on use of GFP_DMA, which is more restrictive
1222 than necessary... */
1223 struct dma_desc *tx_ring;
1224 dma_addr_t tx_ring_dma;
1226 tx_ring = kzalloc(size, gfp);
1227 if (!tx_ring)
1228 goto out_err;
1230 tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring,
1231 DMA_TABLE_BYTES,
1232 DMA_TO_DEVICE);
1234 if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
1235 tx_ring_dma + size > DMA_BIT_MASK(30)) {
1236 kfree(tx_ring);
1237 goto out_err;
1240 bp->tx_ring = tx_ring;
1241 bp->tx_ring_dma = tx_ring_dma;
1242 bp->flags |= B44_FLAG_TX_RING_HACK;
1245 return 0;
1247 out_err:
1248 b44_free_consistent(bp);
1249 return -ENOMEM;
1252 /* bp->lock is held. */
1253 static void b44_clear_stats(struct b44 *bp)
1255 unsigned long reg;
1257 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1258 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1259 br32(bp, reg);
1260 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1261 br32(bp, reg);
1264 /* bp->lock is held. */
1265 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1267 struct ssb_device *sdev = bp->sdev;
1268 bool was_enabled;
1270 was_enabled = ssb_device_is_enabled(bp->sdev);
1272 ssb_device_enable(bp->sdev, 0);
1273 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1275 if (was_enabled) {
1276 bw32(bp, B44_RCV_LAZY, 0);
1277 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1278 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1279 bw32(bp, B44_DMATX_CTRL, 0);
1280 bp->tx_prod = bp->tx_cons = 0;
1281 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1282 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1283 100, 0);
1285 bw32(bp, B44_DMARX_CTRL, 0);
1286 bp->rx_prod = bp->rx_cons = 0;
1289 b44_clear_stats(bp);
1292 * Don't enable PHY if we are doing a partial reset
1293 * we are probably going to power down
1295 if (reset_kind == B44_CHIP_RESET_PARTIAL)
1296 return;
1298 switch (sdev->bus->bustype) {
1299 case SSB_BUSTYPE_SSB:
1300 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1301 (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1302 B44_MDC_RATIO)
1303 & MDIO_CTRL_MAXF_MASK)));
1304 break;
1305 case SSB_BUSTYPE_PCI:
1306 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1307 (0x0d & MDIO_CTRL_MAXF_MASK)));
1308 break;
1309 case SSB_BUSTYPE_PCMCIA:
1310 case SSB_BUSTYPE_SDIO:
1311 WARN_ON(1); /* A device with this bus does not exist. */
1312 break;
1315 br32(bp, B44_MDIO_CTRL);
1317 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1318 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1319 br32(bp, B44_ENET_CTRL);
1320 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1321 } else {
1322 u32 val = br32(bp, B44_DEVCTRL);
1324 if (val & DEVCTRL_EPR) {
1325 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1326 br32(bp, B44_DEVCTRL);
1327 udelay(100);
1329 bp->flags |= B44_FLAG_INTERNAL_PHY;
1333 /* bp->lock is held. */
1334 static void b44_halt(struct b44 *bp)
1336 b44_disable_ints(bp);
1337 /* reset PHY */
1338 b44_phy_reset(bp);
1339 /* power down PHY */
1340 printk(KERN_INFO PFX "%s: powering down PHY\n", bp->dev->name);
1341 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1342 /* now reset the chip, but without enabling the MAC&PHY
1343 * part of it. This has to be done _after_ we shut down the PHY */
1344 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1347 /* bp->lock is held. */
1348 static void __b44_set_mac_addr(struct b44 *bp)
1350 bw32(bp, B44_CAM_CTRL, 0);
1351 if (!(bp->dev->flags & IFF_PROMISC)) {
1352 u32 val;
1354 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1355 val = br32(bp, B44_CAM_CTRL);
1356 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1360 static int b44_set_mac_addr(struct net_device *dev, void *p)
1362 struct b44 *bp = netdev_priv(dev);
1363 struct sockaddr *addr = p;
1364 u32 val;
1366 if (netif_running(dev))
1367 return -EBUSY;
1369 if (!is_valid_ether_addr(addr->sa_data))
1370 return -EINVAL;
1372 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1374 spin_lock_irq(&bp->lock);
1376 val = br32(bp, B44_RXCONFIG);
1377 if (!(val & RXCONFIG_CAM_ABSENT))
1378 __b44_set_mac_addr(bp);
1380 spin_unlock_irq(&bp->lock);
1382 return 0;
1385 /* Called at device open time to get the chip ready for
1386 * packet processing. Invoked with bp->lock held.
1388 static void __b44_set_rx_mode(struct net_device *);
1389 static void b44_init_hw(struct b44 *bp, int reset_kind)
1391 u32 val;
1393 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1394 if (reset_kind == B44_FULL_RESET) {
1395 b44_phy_reset(bp);
1396 b44_setup_phy(bp);
1399 /* Enable CRC32, set proper LED modes and power on PHY */
1400 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1401 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1403 /* This sets the MAC address too. */
1404 __b44_set_rx_mode(bp->dev);
1406 /* MTU + eth header + possible VLAN tag + struct rx_header */
1407 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1408 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1410 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1411 if (reset_kind == B44_PARTIAL_RESET) {
1412 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1413 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1414 } else {
1415 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1416 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1417 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1418 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1419 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1421 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1422 bp->rx_prod = bp->rx_pending;
1424 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1427 val = br32(bp, B44_ENET_CTRL);
1428 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1431 static int b44_open(struct net_device *dev)
1433 struct b44 *bp = netdev_priv(dev);
1434 int err;
1436 err = b44_alloc_consistent(bp, GFP_KERNEL);
1437 if (err)
1438 goto out;
1440 napi_enable(&bp->napi);
1442 b44_init_rings(bp);
1443 b44_init_hw(bp, B44_FULL_RESET);
1445 b44_check_phy(bp);
1447 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1448 if (unlikely(err < 0)) {
1449 napi_disable(&bp->napi);
1450 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1451 b44_free_rings(bp);
1452 b44_free_consistent(bp);
1453 goto out;
1456 init_timer(&bp->timer);
1457 bp->timer.expires = jiffies + HZ;
1458 bp->timer.data = (unsigned long) bp;
1459 bp->timer.function = b44_timer;
1460 add_timer(&bp->timer);
1462 b44_enable_ints(bp);
1463 netif_start_queue(dev);
1464 out:
1465 return err;
1468 #ifdef CONFIG_NET_POLL_CONTROLLER
1470 * Polling receive - used by netconsole and other diagnostic tools
1471 * to allow network i/o with interrupts disabled.
1473 static void b44_poll_controller(struct net_device *dev)
1475 disable_irq(dev->irq);
1476 b44_interrupt(dev->irq, dev);
1477 enable_irq(dev->irq);
1479 #endif
1481 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1483 u32 i;
1484 u32 *pattern = (u32 *) pp;
1486 for (i = 0; i < bytes; i += sizeof(u32)) {
1487 bw32(bp, B44_FILT_ADDR, table_offset + i);
1488 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1492 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1494 int magicsync = 6;
1495 int k, j, len = offset;
1496 int ethaddr_bytes = ETH_ALEN;
1498 memset(ppattern + offset, 0xff, magicsync);
1499 for (j = 0; j < magicsync; j++)
1500 set_bit(len++, (unsigned long *) pmask);
1502 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1503 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1504 ethaddr_bytes = ETH_ALEN;
1505 else
1506 ethaddr_bytes = B44_PATTERN_SIZE - len;
1507 if (ethaddr_bytes <=0)
1508 break;
1509 for (k = 0; k< ethaddr_bytes; k++) {
1510 ppattern[offset + magicsync +
1511 (j * ETH_ALEN) + k] = macaddr[k];
1512 len++;
1513 set_bit(len, (unsigned long *) pmask);
1516 return len - 1;
1519 /* Setup magic packet patterns in the b44 WOL
1520 * pattern matching filter.
1522 static void b44_setup_pseudo_magicp(struct b44 *bp)
1525 u32 val;
1526 int plen0, plen1, plen2;
1527 u8 *pwol_pattern;
1528 u8 pwol_mask[B44_PMASK_SIZE];
1530 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1531 if (!pwol_pattern) {
1532 printk(KERN_ERR PFX "Memory not available for WOL\n");
1533 return;
1536 /* Ipv4 magic packet pattern - pattern 0.*/
1537 memset(pwol_mask, 0, B44_PMASK_SIZE);
1538 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1539 B44_ETHIPV4UDP_HLEN);
1541 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1542 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1544 /* Raw ethernet II magic packet pattern - pattern 1 */
1545 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1546 memset(pwol_mask, 0, B44_PMASK_SIZE);
1547 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1548 ETH_HLEN);
1550 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1551 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1552 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1553 B44_PMASK_BASE + B44_PMASK_SIZE);
1555 /* Ipv6 magic packet pattern - pattern 2 */
1556 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1557 memset(pwol_mask, 0, B44_PMASK_SIZE);
1558 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1559 B44_ETHIPV6UDP_HLEN);
1561 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1562 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1563 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1564 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1566 kfree(pwol_pattern);
1568 /* set these pattern's lengths: one less than each real length */
1569 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1570 bw32(bp, B44_WKUP_LEN, val);
1572 /* enable wakeup pattern matching */
1573 val = br32(bp, B44_DEVCTRL);
1574 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1578 #ifdef CONFIG_B44_PCI
1579 static void b44_setup_wol_pci(struct b44 *bp)
1581 u16 val;
1583 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1584 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1585 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1586 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1589 #else
1590 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1591 #endif /* CONFIG_B44_PCI */
1593 static void b44_setup_wol(struct b44 *bp)
1595 u32 val;
1597 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1599 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1601 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1603 val = bp->dev->dev_addr[2] << 24 |
1604 bp->dev->dev_addr[3] << 16 |
1605 bp->dev->dev_addr[4] << 8 |
1606 bp->dev->dev_addr[5];
1607 bw32(bp, B44_ADDR_LO, val);
1609 val = bp->dev->dev_addr[0] << 8 |
1610 bp->dev->dev_addr[1];
1611 bw32(bp, B44_ADDR_HI, val);
1613 val = br32(bp, B44_DEVCTRL);
1614 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1616 } else {
1617 b44_setup_pseudo_magicp(bp);
1619 b44_setup_wol_pci(bp);
1622 static int b44_close(struct net_device *dev)
1624 struct b44 *bp = netdev_priv(dev);
1626 netif_stop_queue(dev);
1628 napi_disable(&bp->napi);
1630 del_timer_sync(&bp->timer);
1632 spin_lock_irq(&bp->lock);
1634 b44_halt(bp);
1635 b44_free_rings(bp);
1636 netif_carrier_off(dev);
1638 spin_unlock_irq(&bp->lock);
1640 free_irq(dev->irq, dev);
1642 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1643 b44_init_hw(bp, B44_PARTIAL_RESET);
1644 b44_setup_wol(bp);
1647 b44_free_consistent(bp);
1649 return 0;
1652 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1654 struct b44 *bp = netdev_priv(dev);
1655 struct net_device_stats *nstat = &dev->stats;
1656 struct b44_hw_stats *hwstat = &bp->hw_stats;
1658 /* Convert HW stats into netdevice stats. */
1659 nstat->rx_packets = hwstat->rx_pkts;
1660 nstat->tx_packets = hwstat->tx_pkts;
1661 nstat->rx_bytes = hwstat->rx_octets;
1662 nstat->tx_bytes = hwstat->tx_octets;
1663 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1664 hwstat->tx_oversize_pkts +
1665 hwstat->tx_underruns +
1666 hwstat->tx_excessive_cols +
1667 hwstat->tx_late_cols);
1668 nstat->multicast = hwstat->tx_multicast_pkts;
1669 nstat->collisions = hwstat->tx_total_cols;
1671 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1672 hwstat->rx_undersize);
1673 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1674 nstat->rx_frame_errors = hwstat->rx_align_errs;
1675 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1676 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1677 hwstat->rx_oversize_pkts +
1678 hwstat->rx_missed_pkts +
1679 hwstat->rx_crc_align_errs +
1680 hwstat->rx_undersize +
1681 hwstat->rx_crc_errs +
1682 hwstat->rx_align_errs +
1683 hwstat->rx_symbol_errs);
1685 nstat->tx_aborted_errors = hwstat->tx_underruns;
1686 #if 0
1687 /* Carrier lost counter seems to be broken for some devices */
1688 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1689 #endif
1691 return nstat;
1694 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1696 struct dev_mc_list *mclist;
1697 int i, num_ents;
1699 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1700 mclist = dev->mc_list;
1701 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1702 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1704 return i+1;
1707 static void __b44_set_rx_mode(struct net_device *dev)
1709 struct b44 *bp = netdev_priv(dev);
1710 u32 val;
1712 val = br32(bp, B44_RXCONFIG);
1713 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1714 if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1715 val |= RXCONFIG_PROMISC;
1716 bw32(bp, B44_RXCONFIG, val);
1717 } else {
1718 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1719 int i = 1;
1721 __b44_set_mac_addr(bp);
1723 if ((dev->flags & IFF_ALLMULTI) ||
1724 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1725 val |= RXCONFIG_ALLMULTI;
1726 else
1727 i = __b44_load_mcast(bp, dev);
1729 for (; i < 64; i++)
1730 __b44_cam_write(bp, zero, i);
1732 bw32(bp, B44_RXCONFIG, val);
1733 val = br32(bp, B44_CAM_CTRL);
1734 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1738 static void b44_set_rx_mode(struct net_device *dev)
1740 struct b44 *bp = netdev_priv(dev);
1742 spin_lock_irq(&bp->lock);
1743 __b44_set_rx_mode(dev);
1744 spin_unlock_irq(&bp->lock);
1747 static u32 b44_get_msglevel(struct net_device *dev)
1749 struct b44 *bp = netdev_priv(dev);
1750 return bp->msg_enable;
1753 static void b44_set_msglevel(struct net_device *dev, u32 value)
1755 struct b44 *bp = netdev_priv(dev);
1756 bp->msg_enable = value;
1759 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1761 struct b44 *bp = netdev_priv(dev);
1762 struct ssb_bus *bus = bp->sdev->bus;
1764 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1765 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1766 switch (bus->bustype) {
1767 case SSB_BUSTYPE_PCI:
1768 strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1769 break;
1770 case SSB_BUSTYPE_SSB:
1771 strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1772 break;
1773 case SSB_BUSTYPE_PCMCIA:
1774 case SSB_BUSTYPE_SDIO:
1775 WARN_ON(1); /* A device with this bus does not exist. */
1776 break;
1780 static int b44_nway_reset(struct net_device *dev)
1782 struct b44 *bp = netdev_priv(dev);
1783 u32 bmcr;
1784 int r;
1786 spin_lock_irq(&bp->lock);
1787 b44_readphy(bp, MII_BMCR, &bmcr);
1788 b44_readphy(bp, MII_BMCR, &bmcr);
1789 r = -EINVAL;
1790 if (bmcr & BMCR_ANENABLE) {
1791 b44_writephy(bp, MII_BMCR,
1792 bmcr | BMCR_ANRESTART);
1793 r = 0;
1795 spin_unlock_irq(&bp->lock);
1797 return r;
1800 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1802 struct b44 *bp = netdev_priv(dev);
1804 cmd->supported = (SUPPORTED_Autoneg);
1805 cmd->supported |= (SUPPORTED_100baseT_Half |
1806 SUPPORTED_100baseT_Full |
1807 SUPPORTED_10baseT_Half |
1808 SUPPORTED_10baseT_Full |
1809 SUPPORTED_MII);
1811 cmd->advertising = 0;
1812 if (bp->flags & B44_FLAG_ADV_10HALF)
1813 cmd->advertising |= ADVERTISED_10baseT_Half;
1814 if (bp->flags & B44_FLAG_ADV_10FULL)
1815 cmd->advertising |= ADVERTISED_10baseT_Full;
1816 if (bp->flags & B44_FLAG_ADV_100HALF)
1817 cmd->advertising |= ADVERTISED_100baseT_Half;
1818 if (bp->flags & B44_FLAG_ADV_100FULL)
1819 cmd->advertising |= ADVERTISED_100baseT_Full;
1820 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1821 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1822 SPEED_100 : SPEED_10;
1823 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1824 DUPLEX_FULL : DUPLEX_HALF;
1825 cmd->port = 0;
1826 cmd->phy_address = bp->phy_addr;
1827 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1828 XCVR_INTERNAL : XCVR_EXTERNAL;
1829 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1830 AUTONEG_DISABLE : AUTONEG_ENABLE;
1831 if (cmd->autoneg == AUTONEG_ENABLE)
1832 cmd->advertising |= ADVERTISED_Autoneg;
1833 if (!netif_running(dev)){
1834 cmd->speed = 0;
1835 cmd->duplex = 0xff;
1837 cmd->maxtxpkt = 0;
1838 cmd->maxrxpkt = 0;
1839 return 0;
1842 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1844 struct b44 *bp = netdev_priv(dev);
1846 /* We do not support gigabit. */
1847 if (cmd->autoneg == AUTONEG_ENABLE) {
1848 if (cmd->advertising &
1849 (ADVERTISED_1000baseT_Half |
1850 ADVERTISED_1000baseT_Full))
1851 return -EINVAL;
1852 } else if ((cmd->speed != SPEED_100 &&
1853 cmd->speed != SPEED_10) ||
1854 (cmd->duplex != DUPLEX_HALF &&
1855 cmd->duplex != DUPLEX_FULL)) {
1856 return -EINVAL;
1859 spin_lock_irq(&bp->lock);
1861 if (cmd->autoneg == AUTONEG_ENABLE) {
1862 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1863 B44_FLAG_100_BASE_T |
1864 B44_FLAG_FULL_DUPLEX |
1865 B44_FLAG_ADV_10HALF |
1866 B44_FLAG_ADV_10FULL |
1867 B44_FLAG_ADV_100HALF |
1868 B44_FLAG_ADV_100FULL);
1869 if (cmd->advertising == 0) {
1870 bp->flags |= (B44_FLAG_ADV_10HALF |
1871 B44_FLAG_ADV_10FULL |
1872 B44_FLAG_ADV_100HALF |
1873 B44_FLAG_ADV_100FULL);
1874 } else {
1875 if (cmd->advertising & ADVERTISED_10baseT_Half)
1876 bp->flags |= B44_FLAG_ADV_10HALF;
1877 if (cmd->advertising & ADVERTISED_10baseT_Full)
1878 bp->flags |= B44_FLAG_ADV_10FULL;
1879 if (cmd->advertising & ADVERTISED_100baseT_Half)
1880 bp->flags |= B44_FLAG_ADV_100HALF;
1881 if (cmd->advertising & ADVERTISED_100baseT_Full)
1882 bp->flags |= B44_FLAG_ADV_100FULL;
1884 } else {
1885 bp->flags |= B44_FLAG_FORCE_LINK;
1886 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1887 if (cmd->speed == SPEED_100)
1888 bp->flags |= B44_FLAG_100_BASE_T;
1889 if (cmd->duplex == DUPLEX_FULL)
1890 bp->flags |= B44_FLAG_FULL_DUPLEX;
1893 if (netif_running(dev))
1894 b44_setup_phy(bp);
1896 spin_unlock_irq(&bp->lock);
1898 return 0;
1901 static void b44_get_ringparam(struct net_device *dev,
1902 struct ethtool_ringparam *ering)
1904 struct b44 *bp = netdev_priv(dev);
1906 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1907 ering->rx_pending = bp->rx_pending;
1909 /* XXX ethtool lacks a tx_max_pending, oops... */
1912 static int b44_set_ringparam(struct net_device *dev,
1913 struct ethtool_ringparam *ering)
1915 struct b44 *bp = netdev_priv(dev);
1917 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1918 (ering->rx_mini_pending != 0) ||
1919 (ering->rx_jumbo_pending != 0) ||
1920 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1921 return -EINVAL;
1923 spin_lock_irq(&bp->lock);
1925 bp->rx_pending = ering->rx_pending;
1926 bp->tx_pending = ering->tx_pending;
1928 b44_halt(bp);
1929 b44_init_rings(bp);
1930 b44_init_hw(bp, B44_FULL_RESET);
1931 netif_wake_queue(bp->dev);
1932 spin_unlock_irq(&bp->lock);
1934 b44_enable_ints(bp);
1936 return 0;
1939 static void b44_get_pauseparam(struct net_device *dev,
1940 struct ethtool_pauseparam *epause)
1942 struct b44 *bp = netdev_priv(dev);
1944 epause->autoneg =
1945 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1946 epause->rx_pause =
1947 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1948 epause->tx_pause =
1949 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1952 static int b44_set_pauseparam(struct net_device *dev,
1953 struct ethtool_pauseparam *epause)
1955 struct b44 *bp = netdev_priv(dev);
1957 spin_lock_irq(&bp->lock);
1958 if (epause->autoneg)
1959 bp->flags |= B44_FLAG_PAUSE_AUTO;
1960 else
1961 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1962 if (epause->rx_pause)
1963 bp->flags |= B44_FLAG_RX_PAUSE;
1964 else
1965 bp->flags &= ~B44_FLAG_RX_PAUSE;
1966 if (epause->tx_pause)
1967 bp->flags |= B44_FLAG_TX_PAUSE;
1968 else
1969 bp->flags &= ~B44_FLAG_TX_PAUSE;
1970 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1971 b44_halt(bp);
1972 b44_init_rings(bp);
1973 b44_init_hw(bp, B44_FULL_RESET);
1974 } else {
1975 __b44_set_flow_ctrl(bp, bp->flags);
1977 spin_unlock_irq(&bp->lock);
1979 b44_enable_ints(bp);
1981 return 0;
1984 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1986 switch(stringset) {
1987 case ETH_SS_STATS:
1988 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1989 break;
1993 static int b44_get_sset_count(struct net_device *dev, int sset)
1995 switch (sset) {
1996 case ETH_SS_STATS:
1997 return ARRAY_SIZE(b44_gstrings);
1998 default:
1999 return -EOPNOTSUPP;
2003 static void b44_get_ethtool_stats(struct net_device *dev,
2004 struct ethtool_stats *stats, u64 *data)
2006 struct b44 *bp = netdev_priv(dev);
2007 u32 *val = &bp->hw_stats.tx_good_octets;
2008 u32 i;
2010 spin_lock_irq(&bp->lock);
2012 b44_stats_update(bp);
2014 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2015 *data++ = *val++;
2017 spin_unlock_irq(&bp->lock);
2020 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2022 struct b44 *bp = netdev_priv(dev);
2024 wol->supported = WAKE_MAGIC;
2025 if (bp->flags & B44_FLAG_WOL_ENABLE)
2026 wol->wolopts = WAKE_MAGIC;
2027 else
2028 wol->wolopts = 0;
2029 memset(&wol->sopass, 0, sizeof(wol->sopass));
2032 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2034 struct b44 *bp = netdev_priv(dev);
2036 spin_lock_irq(&bp->lock);
2037 if (wol->wolopts & WAKE_MAGIC)
2038 bp->flags |= B44_FLAG_WOL_ENABLE;
2039 else
2040 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2041 spin_unlock_irq(&bp->lock);
2043 return 0;
2046 static const struct ethtool_ops b44_ethtool_ops = {
2047 .get_drvinfo = b44_get_drvinfo,
2048 .get_settings = b44_get_settings,
2049 .set_settings = b44_set_settings,
2050 .nway_reset = b44_nway_reset,
2051 .get_link = ethtool_op_get_link,
2052 .get_wol = b44_get_wol,
2053 .set_wol = b44_set_wol,
2054 .get_ringparam = b44_get_ringparam,
2055 .set_ringparam = b44_set_ringparam,
2056 .get_pauseparam = b44_get_pauseparam,
2057 .set_pauseparam = b44_set_pauseparam,
2058 .get_msglevel = b44_get_msglevel,
2059 .set_msglevel = b44_set_msglevel,
2060 .get_strings = b44_get_strings,
2061 .get_sset_count = b44_get_sset_count,
2062 .get_ethtool_stats = b44_get_ethtool_stats,
2065 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2067 struct mii_ioctl_data *data = if_mii(ifr);
2068 struct b44 *bp = netdev_priv(dev);
2069 int err = -EINVAL;
2071 if (!netif_running(dev))
2072 goto out;
2074 spin_lock_irq(&bp->lock);
2075 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2076 spin_unlock_irq(&bp->lock);
2077 out:
2078 return err;
2081 static int __devinit b44_get_invariants(struct b44 *bp)
2083 struct ssb_device *sdev = bp->sdev;
2084 int err = 0;
2085 u8 *addr;
2087 bp->dma_offset = ssb_dma_translation(sdev);
2089 if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2090 instance > 1) {
2091 addr = sdev->bus->sprom.et1mac;
2092 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2093 } else {
2094 addr = sdev->bus->sprom.et0mac;
2095 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2097 /* Some ROMs have buggy PHY addresses with the high
2098 * bits set (sign extension?). Truncate them to a
2099 * valid PHY address. */
2100 bp->phy_addr &= 0x1F;
2102 memcpy(bp->dev->dev_addr, addr, 6);
2104 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2105 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2106 return -EINVAL;
2109 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2111 bp->imask = IMASK_DEF;
2113 /* XXX - really required?
2114 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2117 if (bp->sdev->id.revision >= 7)
2118 bp->flags |= B44_FLAG_B0_ANDLATER;
2120 return err;
2123 static const struct net_device_ops b44_netdev_ops = {
2124 .ndo_open = b44_open,
2125 .ndo_stop = b44_close,
2126 .ndo_start_xmit = b44_start_xmit,
2127 .ndo_get_stats = b44_get_stats,
2128 .ndo_set_multicast_list = b44_set_rx_mode,
2129 .ndo_set_mac_address = b44_set_mac_addr,
2130 .ndo_validate_addr = eth_validate_addr,
2131 .ndo_do_ioctl = b44_ioctl,
2132 .ndo_tx_timeout = b44_tx_timeout,
2133 .ndo_change_mtu = b44_change_mtu,
2134 #ifdef CONFIG_NET_POLL_CONTROLLER
2135 .ndo_poll_controller = b44_poll_controller,
2136 #endif
2139 static int __devinit b44_init_one(struct ssb_device *sdev,
2140 const struct ssb_device_id *ent)
2142 static int b44_version_printed = 0;
2143 struct net_device *dev;
2144 struct b44 *bp;
2145 int err;
2147 instance++;
2149 if (b44_version_printed++ == 0)
2150 printk(KERN_INFO "%s", version);
2153 dev = alloc_etherdev(sizeof(*bp));
2154 if (!dev) {
2155 dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n");
2156 err = -ENOMEM;
2157 goto out;
2160 SET_NETDEV_DEV(dev, sdev->dev);
2162 /* No interesting netdevice features in this card... */
2163 dev->features |= 0;
2165 bp = netdev_priv(dev);
2166 bp->sdev = sdev;
2167 bp->dev = dev;
2168 bp->force_copybreak = 0;
2170 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2172 spin_lock_init(&bp->lock);
2174 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2175 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2177 dev->netdev_ops = &b44_netdev_ops;
2178 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2179 dev->watchdog_timeo = B44_TX_TIMEOUT;
2180 dev->irq = sdev->irq;
2181 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2183 netif_carrier_off(dev);
2185 err = ssb_bus_powerup(sdev->bus, 0);
2186 if (err) {
2187 dev_err(sdev->dev,
2188 "Failed to powerup the bus\n");
2189 goto err_out_free_dev;
2191 err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
2192 if (err) {
2193 dev_err(sdev->dev,
2194 "Required 30BIT DMA mask unsupported by the system.\n");
2195 goto err_out_powerdown;
2197 err = b44_get_invariants(bp);
2198 if (err) {
2199 dev_err(sdev->dev,
2200 "Problem fetching invariants of chip, aborting.\n");
2201 goto err_out_powerdown;
2204 bp->mii_if.dev = dev;
2205 bp->mii_if.mdio_read = b44_mii_read;
2206 bp->mii_if.mdio_write = b44_mii_write;
2207 bp->mii_if.phy_id = bp->phy_addr;
2208 bp->mii_if.phy_id_mask = 0x1f;
2209 bp->mii_if.reg_num_mask = 0x1f;
2211 /* By default, advertise all speed/duplex settings. */
2212 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2213 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2215 /* By default, auto-negotiate PAUSE. */
2216 bp->flags |= B44_FLAG_PAUSE_AUTO;
2218 err = register_netdev(dev);
2219 if (err) {
2220 dev_err(sdev->dev, "Cannot register net device, aborting.\n");
2221 goto err_out_powerdown;
2224 ssb_set_drvdata(sdev, dev);
2226 /* Chip reset provides power to the b44 MAC & PCI cores, which
2227 * is necessary for MAC register access.
2229 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2231 printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
2232 dev->name, dev->dev_addr);
2234 return 0;
2236 err_out_powerdown:
2237 ssb_bus_may_powerdown(sdev->bus);
2239 err_out_free_dev:
2240 free_netdev(dev);
2242 out:
2243 return err;
2246 static void __devexit b44_remove_one(struct ssb_device *sdev)
2248 struct net_device *dev = ssb_get_drvdata(sdev);
2250 unregister_netdev(dev);
2251 ssb_device_disable(sdev, 0);
2252 ssb_bus_may_powerdown(sdev->bus);
2253 free_netdev(dev);
2254 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2255 ssb_set_drvdata(sdev, NULL);
2258 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2260 struct net_device *dev = ssb_get_drvdata(sdev);
2261 struct b44 *bp = netdev_priv(dev);
2263 if (!netif_running(dev))
2264 return 0;
2266 del_timer_sync(&bp->timer);
2268 spin_lock_irq(&bp->lock);
2270 b44_halt(bp);
2271 netif_carrier_off(bp->dev);
2272 netif_device_detach(bp->dev);
2273 b44_free_rings(bp);
2275 spin_unlock_irq(&bp->lock);
2277 free_irq(dev->irq, dev);
2278 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2279 b44_init_hw(bp, B44_PARTIAL_RESET);
2280 b44_setup_wol(bp);
2283 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2284 return 0;
2287 static int b44_resume(struct ssb_device *sdev)
2289 struct net_device *dev = ssb_get_drvdata(sdev);
2290 struct b44 *bp = netdev_priv(dev);
2291 int rc = 0;
2293 rc = ssb_bus_powerup(sdev->bus, 0);
2294 if (rc) {
2295 dev_err(sdev->dev,
2296 "Failed to powerup the bus\n");
2297 return rc;
2300 if (!netif_running(dev))
2301 return 0;
2303 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2304 if (rc) {
2305 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2306 return rc;
2309 spin_lock_irq(&bp->lock);
2311 b44_init_rings(bp);
2312 b44_init_hw(bp, B44_FULL_RESET);
2313 netif_device_attach(bp->dev);
2314 spin_unlock_irq(&bp->lock);
2316 b44_enable_ints(bp);
2317 netif_wake_queue(dev);
2319 mod_timer(&bp->timer, jiffies + 1);
2321 return 0;
2324 static struct ssb_driver b44_ssb_driver = {
2325 .name = DRV_MODULE_NAME,
2326 .id_table = b44_ssb_tbl,
2327 .probe = b44_init_one,
2328 .remove = __devexit_p(b44_remove_one),
2329 .suspend = b44_suspend,
2330 .resume = b44_resume,
2333 static inline int b44_pci_init(void)
2335 int err = 0;
2336 #ifdef CONFIG_B44_PCI
2337 err = ssb_pcihost_register(&b44_pci_driver);
2338 #endif
2339 return err;
2342 static inline void b44_pci_exit(void)
2344 #ifdef CONFIG_B44_PCI
2345 ssb_pcihost_unregister(&b44_pci_driver);
2346 #endif
2349 static int __init b44_init(void)
2351 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2352 int err;
2354 /* Setup paramaters for syncing RX/TX DMA descriptors */
2355 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2356 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2358 err = b44_pci_init();
2359 if (err)
2360 return err;
2361 err = ssb_driver_register(&b44_ssb_driver);
2362 if (err)
2363 b44_pci_exit();
2364 return err;
2367 static void __exit b44_cleanup(void)
2369 ssb_driver_unregister(&b44_ssb_driver);
2370 b44_pci_exit();
2373 module_init(b44_init);
2374 module_exit(b44_cleanup);