1 /* $NetBSD: if_bce.c,v 1.28 2009/11/26 15:17:09 njoly Exp $ */
4 * Copyright (c) 2003 Clifford Wright. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Broadcom BCM440x 10/100 ethernet (broadcom.com)
32 * SiliconBackplane is technology from Sonics, Inc.(sonicsinc.com)
34 * Cliff Wright cliff@snipe444.org
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_bce.c,v 1.28 2009/11/26 15:17:09 njoly Exp $");
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/callout.h>
47 #include <sys/sockio.h>
49 #include <sys/malloc.h>
50 #include <sys/kernel.h>
51 #include <sys/device.h>
52 #include <sys/socket.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_ether.h>
66 #include <dev/pci/pcireg.h>
67 #include <dev/pci/pcivar.h>
68 #include <dev/pci/pcidevs.h>
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
72 #include <dev/mii/miidevs.h>
73 #include <dev/mii/brgphyreg.h>
75 #include <dev/pci/if_bcereg.h>
77 #include <uvm/uvm_extern.h>
79 /* transmit buffer max frags allowed */
80 #define BCE_NTXFRAGS 16
87 #define CTRL_BC_MASK 0x1fff /* buffer byte count */
88 #define CTRL_EOT 0x10000000 /* end of descriptor table */
89 #define CTRL_IOC 0x20000000 /* interrupt on completion */
90 #define CTRL_EOF 0x40000000 /* end of frame */
91 #define CTRL_SOF 0x80000000 /* start of frame */
93 /* Packet status is returned in a pre-packet header */
100 /* packet status flags bits */
101 #define RXF_NO 0x8 /* odd number of nibbles */
102 #define RXF_RXER 0x4 /* receive symbol error */
103 #define RXF_CRC 0x2 /* crc error */
104 #define RXF_OV 0x1 /* fifo overflow */
106 /* number of descriptors used in a ring */
107 #define BCE_NRXDESC 128
108 #define BCE_NTXDESC 128
111 * Mbuf pointers. We need these to keep track of the virtual addresses
112 * of our mbuf chains since we can only convert from physical to virtual,
113 * not the other way around.
115 struct bce_chain_data
{
116 struct mbuf
*bce_tx_chain
[BCE_NTXDESC
];
117 struct mbuf
*bce_rx_chain
[BCE_NRXDESC
];
118 bus_dmamap_t bce_tx_map
[BCE_NTXDESC
];
119 bus_dmamap_t bce_rx_map
[BCE_NRXDESC
];
122 #define BCE_TIMEOUT 100 /* # 10us for mii read/write */
125 struct device bce_dev
;
126 bus_space_tag_t bce_btag
;
127 bus_space_handle_t bce_bhandle
;
128 bus_dma_tag_t bce_dmatag
;
129 struct ethercom ethercom
; /* interface info */
131 struct pci_attach_args bce_pa
;
132 struct mii_data bce_mii
;
133 uint32_t bce_phy
; /* eeprom indicated phy */
134 struct ifmedia bce_ifmedia
; /* media info *//* Check */
135 uint8_t enaddr
[ETHER_ADDR_LEN
];
136 struct bce_dma_slot
*bce_rx_ring
; /* receive ring */
137 struct bce_dma_slot
*bce_tx_ring
; /* transmit ring */
138 struct bce_chain_data bce_cdata
; /* mbufs */
139 bus_dmamap_t bce_ring_map
;
140 uint32_t bce_intmask
; /* current intr mask */
141 uint32_t bce_rxin
; /* last rx descriptor seen */
142 uint32_t bce_txin
; /* last tx descriptor seen */
143 int bce_txsfree
; /* no. tx slots available */
144 int bce_txsnext
; /* next available tx slot */
145 callout_t bce_timeout
;
147 rndsource_element_t rnd_source
;
151 /* for ring descriptors */
152 #define BCE_RXBUF_LEN (MCLBYTES - 4)
153 #define BCE_INIT_RXDESC(sc, x) \
155 struct bce_dma_slot *__bced = &sc->bce_rx_ring[x]; \
157 *mtod(sc->bce_cdata.bce_rx_chain[x], uint32_t *) = 0; \
159 htole32(sc->bce_cdata.bce_rx_map[x]->dm_segs[0].ds_addr \
161 if (x != (BCE_NRXDESC - 1)) \
162 __bced->ctrl = htole32(BCE_RXBUF_LEN); \
164 __bced->ctrl = htole32(BCE_RXBUF_LEN | CTRL_EOT); \
165 bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map, \
166 sizeof(struct bce_dma_slot) * x, \
167 sizeof(struct bce_dma_slot), \
168 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
169 } while (/* CONSTCOND */ 0)
171 static int bce_probe(device_t
, cfdata_t
, void *);
172 static void bce_attach(device_t
, device_t
, void *);
173 static int bce_ioctl(struct ifnet
*, u_long
, void *);
174 static void bce_start(struct ifnet
*);
175 static void bce_watchdog(struct ifnet
*);
176 static int bce_intr(void *);
177 static void bce_rxintr(struct bce_softc
*);
178 static void bce_txintr(struct bce_softc
*);
179 static int bce_init(struct ifnet
*);
180 static void bce_add_mac(struct bce_softc
*, uint8_t *, unsigned long);
181 static int bce_add_rxbuf(struct bce_softc
*, int);
182 static void bce_rxdrain(struct bce_softc
*);
183 static void bce_stop(struct ifnet
*, int);
184 static void bce_reset(struct bce_softc
*);
185 static bool bce_resume(device_t
, pmf_qual_t
);
186 static void bce_set_filter(struct ifnet
*);
187 static int bce_mii_read(device_t
, int, int);
188 static void bce_mii_write(device_t
, int, int, int);
189 static void bce_statchg(device_t
);
190 static void bce_tick(void *);
192 CFATTACH_DECL(bce
, sizeof(struct bce_softc
), bce_probe
, bce_attach
, NULL
, NULL
);
194 static const struct bce_product
{
195 pci_vendor_id_t bp_vendor
;
196 pci_product_id_t bp_product
;
201 PCI_PRODUCT_BROADCOM_BCM4401
,
202 "Broadcom BCM4401 10/100 Ethernet"
206 PCI_PRODUCT_BROADCOM_BCM4401_B0
,
207 "Broadcom BCM4401-B0 10/100 Ethernet"
217 static const struct bce_product
*
218 bce_lookup(const struct pci_attach_args
* pa
)
220 const struct bce_product
*bp
;
222 for (bp
= bce_products
; bp
->bp_name
!= NULL
; bp
++) {
223 if (PCI_VENDOR(pa
->pa_id
) == bp
->bp_vendor
&&
224 PCI_PRODUCT(pa
->pa_id
) == bp
->bp_product
)
232 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
233 * against drivers product list, and return its name if a match is found.
236 bce_probe(device_t parent
, cfdata_t match
, void *aux
)
238 struct pci_attach_args
*pa
= (struct pci_attach_args
*) aux
;
240 if (bce_lookup(pa
) != NULL
)
247 bce_attach(device_t parent
, device_t self
, void *aux
)
249 struct bce_softc
*sc
= device_private(self
);
250 struct pci_attach_args
*pa
= aux
;
251 const struct bce_product
*bp
;
252 pci_chipset_tag_t pc
= pa
->pa_pc
;
253 pci_intr_handle_t ih
;
254 const char *intrstr
= NULL
;
256 pcireg_t memtype
, pmode
;
260 bus_dma_segment_t seg
;
261 int error
, i
, pmreg
, rseg
;
269 /* BCM440x can only address 30 bits (1GB) */
270 if (bus_dmatag_subregion(pa
->pa_dmat
, 0, (1 << 30),
271 &(sc
->bce_dmatag
), BUS_DMA_NOWAIT
) != 0) {
272 aprint_error_dev(self
,
273 "WARNING: failed to restrict dma range,"
274 " falling back to parent bus dma range\n");
275 sc
->bce_dmatag
= pa
->pa_dmat
;
278 aprint_naive(": Ethernet controller\n");
279 aprint_normal(": %s\n", bp
->bp_name
);
282 * Map control/status registers.
284 command
= pci_conf_read(pc
, pa
->pa_tag
, PCI_COMMAND_STATUS_REG
);
285 command
|= PCI_COMMAND_MEM_ENABLE
| PCI_COMMAND_MASTER_ENABLE
;
286 pci_conf_write(pc
, pa
->pa_tag
, PCI_COMMAND_STATUS_REG
, command
);
287 command
= pci_conf_read(pc
, pa
->pa_tag
, PCI_COMMAND_STATUS_REG
);
289 if (!(command
& PCI_COMMAND_MEM_ENABLE
)) {
290 aprint_error_dev(self
, "failed to enable memory mapping!\n");
293 memtype
= pci_mapreg_type(pa
->pa_pc
, pa
->pa_tag
, BCE_PCI_BAR0
);
295 case PCI_MAPREG_TYPE_MEM
| PCI_MAPREG_MEM_TYPE_32BIT
:
296 case PCI_MAPREG_TYPE_MEM
| PCI_MAPREG_MEM_TYPE_64BIT
:
297 if (pci_mapreg_map(pa
, BCE_PCI_BAR0
, memtype
, 0, &sc
->bce_btag
,
298 &sc
->bce_bhandle
, &memaddr
, &memsize
) == 0)
301 aprint_error_dev(self
, "unable to find mem space\n");
305 /* Get it out of power save mode if needed. */
306 if (pci_get_capability(pc
, pa
->pa_tag
, PCI_CAP_PWRMGMT
, &pmreg
, NULL
)) {
307 pmode
= pci_conf_read(pc
, pa
->pa_tag
, pmreg
+ 4) & 0x3;
310 * The card has lost all configuration data in
311 * this state, so punt.
313 aprint_error_dev(self
,
314 "unable to wake up from power state D3\n");
318 aprint_normal_dev(self
,
319 "waking up from power state D%d\n", pmode
);
320 pci_conf_write(pc
, pa
->pa_tag
, pmreg
+ 4, 0);
323 if (pci_intr_map(pa
, &ih
)) {
324 aprint_error_dev(self
, "couldn't map interrupt\n");
327 intrstr
= pci_intr_string(pc
, ih
);
329 sc
->bce_intrhand
= pci_intr_establish(pc
, ih
, IPL_NET
, bce_intr
, sc
);
331 if (sc
->bce_intrhand
== NULL
) {
332 aprint_error_dev(self
, "couldn't establish interrupt\n");
334 aprint_error(" at %s", intrstr
);
338 aprint_normal_dev(self
, "interrupting at %s\n", intrstr
);
344 * Allocate DMA-safe memory for ring descriptors.
345 * The receive, and transmit rings can not share the same
346 * 4k space, however both are allocated at once here.
349 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
350 * due to the limition above. ??
352 if ((error
= bus_dmamem_alloc(sc
->bce_dmatag
,
353 2 * PAGE_SIZE
, PAGE_SIZE
, 2 * PAGE_SIZE
,
354 &seg
, 1, &rseg
, BUS_DMA_NOWAIT
))) {
355 aprint_error_dev(self
,
356 "unable to alloc space for ring descriptors, error = %d\n",
360 /* map ring space to kernel */
361 if ((error
= bus_dmamem_map(sc
->bce_dmatag
, &seg
, rseg
,
362 2 * PAGE_SIZE
, &kva
, BUS_DMA_NOWAIT
))) {
363 aprint_error_dev(self
,
364 "unable to map DMA buffers, error = %d\n", error
);
365 bus_dmamem_free(sc
->bce_dmatag
, &seg
, rseg
);
368 /* create a dma map for the ring */
369 if ((error
= bus_dmamap_create(sc
->bce_dmatag
,
370 2 * PAGE_SIZE
, 1, 2 * PAGE_SIZE
, 0, BUS_DMA_NOWAIT
,
371 &sc
->bce_ring_map
))) {
372 aprint_error_dev(self
,
373 "unable to create ring DMA map, error = %d\n", error
);
374 bus_dmamem_unmap(sc
->bce_dmatag
, kva
, 2 * PAGE_SIZE
);
375 bus_dmamem_free(sc
->bce_dmatag
, &seg
, rseg
);
378 /* connect the ring space to the dma map */
379 if (bus_dmamap_load(sc
->bce_dmatag
, sc
->bce_ring_map
, kva
,
380 2 * PAGE_SIZE
, NULL
, BUS_DMA_NOWAIT
)) {
381 bus_dmamap_destroy(sc
->bce_dmatag
, sc
->bce_ring_map
);
382 bus_dmamem_unmap(sc
->bce_dmatag
, kva
, 2 * PAGE_SIZE
);
383 bus_dmamem_free(sc
->bce_dmatag
, &seg
, rseg
);
386 /* save the ring space in softc */
387 sc
->bce_rx_ring
= (struct bce_dma_slot
*) kva
;
388 sc
->bce_tx_ring
= (struct bce_dma_slot
*) ((char *)kva
+ PAGE_SIZE
);
390 /* Create the transmit buffer DMA maps. */
391 for (i
= 0; i
< BCE_NTXDESC
; i
++) {
392 if ((error
= bus_dmamap_create(sc
->bce_dmatag
, MCLBYTES
,
393 BCE_NTXFRAGS
, MCLBYTES
, 0, 0, &sc
->bce_cdata
.bce_tx_map
[i
])) != 0) {
394 aprint_error_dev(self
,
395 "unable to create tx DMA map, error = %d\n", error
);
397 sc
->bce_cdata
.bce_tx_chain
[i
] = NULL
;
400 /* Create the receive buffer DMA maps. */
401 for (i
= 0; i
< BCE_NRXDESC
; i
++) {
402 if ((error
= bus_dmamap_create(sc
->bce_dmatag
, MCLBYTES
, 1,
403 MCLBYTES
, 0, 0, &sc
->bce_cdata
.bce_rx_map
[i
])) != 0) {
404 aprint_error_dev(self
,
405 "unable to create rx DMA map, error = %d\n", error
);
407 sc
->bce_cdata
.bce_rx_chain
[i
] = NULL
;
410 /* Set up ifnet structure */
411 ifp
= &sc
->ethercom
.ec_if
;
412 strcpy(ifp
->if_xname
, device_xname(self
));
414 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
415 ifp
->if_ioctl
= bce_ioctl
;
416 ifp
->if_start
= bce_start
;
417 ifp
->if_watchdog
= bce_watchdog
;
418 ifp
->if_init
= bce_init
;
419 ifp
->if_stop
= bce_stop
;
420 IFQ_SET_READY(&ifp
->if_snd
);
422 /* Initialize our media structures and probe the MII. */
424 sc
->bce_mii
.mii_ifp
= ifp
;
425 sc
->bce_mii
.mii_readreg
= bce_mii_read
;
426 sc
->bce_mii
.mii_writereg
= bce_mii_write
;
427 sc
->bce_mii
.mii_statchg
= bce_statchg
;
429 sc
->ethercom
.ec_mii
= &sc
->bce_mii
;
430 ifmedia_init(&sc
->bce_mii
.mii_media
, 0, ether_mediachange
,
432 mii_attach(&sc
->bce_dev
, &sc
->bce_mii
, 0xffffffff, MII_PHY_ANY
,
433 MII_OFFSET_ANY
, MIIF_FORCEANEG
|MIIF_DOPAUSE
);
434 if (LIST_FIRST(&sc
->bce_mii
.mii_phys
) == NULL
) {
435 ifmedia_add(&sc
->bce_mii
.mii_media
, IFM_ETHER
| IFM_NONE
, 0, NULL
);
436 ifmedia_set(&sc
->bce_mii
.mii_media
, IFM_ETHER
| IFM_NONE
);
438 ifmedia_set(&sc
->bce_mii
.mii_media
, IFM_ETHER
| IFM_AUTO
);
440 sc
->bce_phy
= bus_space_read_1(sc
->bce_btag
, sc
->bce_bhandle
,
441 BCE_MAGIC_PHY
) & 0x1f;
443 * Enable activity led.
444 * XXX This should be in a phy driver, but not currently.
446 bce_mii_write(&sc
->bce_dev
, 1, 26, /* MAGIC */
447 bce_mii_read(&sc
->bce_dev
, 1, 26) & 0x7fff); /* MAGIC */
448 /* enable traffic meter led mode */
449 bce_mii_write(&sc
->bce_dev
, 1, 27, /* MAGIC */
450 bce_mii_read(&sc
->bce_dev
, 1, 27) | (1 << 6)); /* MAGIC */
452 /* Attach the interface */
454 sc
->enaddr
[0] = bus_space_read_1(sc
->bce_btag
, sc
->bce_bhandle
,
456 sc
->enaddr
[1] = bus_space_read_1(sc
->bce_btag
, sc
->bce_bhandle
,
458 sc
->enaddr
[2] = bus_space_read_1(sc
->bce_btag
, sc
->bce_bhandle
,
460 sc
->enaddr
[3] = bus_space_read_1(sc
->bce_btag
, sc
->bce_bhandle
,
462 sc
->enaddr
[4] = bus_space_read_1(sc
->bce_btag
, sc
->bce_bhandle
,
464 sc
->enaddr
[5] = bus_space_read_1(sc
->bce_btag
, sc
->bce_bhandle
,
466 aprint_normal_dev(self
, "Ethernet address %s\n",
467 ether_sprintf(sc
->enaddr
));
468 ether_ifattach(ifp
, sc
->enaddr
);
470 rnd_attach_source(&sc
->rnd_source
, device_xname(self
),
473 callout_init(&sc
->bce_timeout
, 0);
475 if (pmf_device_register(self
, NULL
, bce_resume
))
476 pmf_class_network_register(self
, ifp
);
478 aprint_error_dev(self
, "couldn't establish power handler\n");
481 /* handle media, and ethernet requests */
483 bce_ioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
488 error
= ether_ioctl(ifp
, cmd
, data
);
489 if (error
== ENETRESET
) {
490 /* change multicast list */
494 /* Try to get more packets going. */
501 /* Start packet transmission on the interface. */
503 bce_start(struct ifnet
*ifp
)
505 struct bce_softc
*sc
= ifp
->if_softc
;
514 * do not start another if currently transmitting, and more
515 * descriptors(tx slots) are needed for next packet.
517 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_OACTIVE
)) != IFF_RUNNING
)
520 /* determine number of descriptors available */
521 if (sc
->bce_txsnext
>= sc
->bce_txin
)
522 txsfree
= BCE_NTXDESC
- 1 + sc
->bce_txin
- sc
->bce_txsnext
;
524 txsfree
= sc
->bce_txin
- sc
->bce_txsnext
- 1;
527 * Loop through the send queue, setting up transmit descriptors
528 * until we drain the queue, or use up all available transmit
531 while (txsfree
> 0) {
534 /* Grab a packet off the queue. */
535 IFQ_POLL(&ifp
->if_snd
, m0
);
539 /* get the transmit slot dma map */
540 dmamap
= sc
->bce_cdata
.bce_tx_map
[sc
->bce_txsnext
];
543 * Load the DMA map. If this fails, the packet either
544 * didn't fit in the alloted number of segments, or we
545 * were short on resources. If the packet will not fit,
546 * it will be dropped. If short on resources, it will
547 * be tried again later.
549 error
= bus_dmamap_load_mbuf(sc
->bce_dmatag
, dmamap
, m0
,
550 BUS_DMA_WRITE
| BUS_DMA_NOWAIT
);
551 if (error
== EFBIG
) {
552 aprint_error_dev(&sc
->bce_dev
,
553 "Tx packet consumes too many DMA segments, "
555 IFQ_DEQUEUE(&ifp
->if_snd
, m0
);
560 /* short on resources, come back later */
561 aprint_error_dev(&sc
->bce_dev
,
562 "unable to load Tx buffer, error = %d\n",
566 /* If not enough descriptors available, try again later */
567 if (dmamap
->dm_nsegs
> txsfree
) {
568 ifp
->if_flags
|= IFF_OACTIVE
;
569 bus_dmamap_unload(sc
->bce_dmatag
, dmamap
);
572 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
574 /* So take it off the queue */
575 IFQ_DEQUEUE(&ifp
->if_snd
, m0
);
577 /* save the pointer so it can be freed later */
578 sc
->bce_cdata
.bce_tx_chain
[sc
->bce_txsnext
] = m0
;
580 /* Sync the data DMA map. */
581 bus_dmamap_sync(sc
->bce_dmatag
, dmamap
, 0, dmamap
->dm_mapsize
,
582 BUS_DMASYNC_PREWRITE
);
584 /* Initialize the transmit descriptor(s). */
585 txstart
= sc
->bce_txsnext
;
586 for (seg
= 0; seg
< dmamap
->dm_nsegs
; seg
++) {
589 ctrl
= dmamap
->dm_segs
[seg
].ds_len
& CTRL_BC_MASK
;
592 if (seg
== dmamap
->dm_nsegs
- 1)
594 if (sc
->bce_txsnext
== BCE_NTXDESC
- 1)
597 sc
->bce_tx_ring
[sc
->bce_txsnext
].ctrl
= htole32(ctrl
);
598 sc
->bce_tx_ring
[sc
->bce_txsnext
].addr
=
599 htole32(dmamap
->dm_segs
[seg
].ds_addr
+ 0x40000000); /* MAGIC */
600 if (sc
->bce_txsnext
+ 1 > BCE_NTXDESC
- 1)
606 /* sync descriptors being used */
607 if ( sc
->bce_txsnext
> txstart
) {
608 bus_dmamap_sync(sc
->bce_dmatag
, sc
->bce_ring_map
,
609 PAGE_SIZE
+ sizeof(struct bce_dma_slot
) * txstart
,
610 sizeof(struct bce_dma_slot
) * dmamap
->dm_nsegs
,
611 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
613 bus_dmamap_sync(sc
->bce_dmatag
, sc
->bce_ring_map
,
614 PAGE_SIZE
+ sizeof(struct bce_dma_slot
) * txstart
,
615 sizeof(struct bce_dma_slot
) *
616 (BCE_NTXDESC
- txstart
),
617 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
618 if ( sc
->bce_txsnext
!= 0 ) {
619 bus_dmamap_sync(sc
->bce_dmatag
,
620 sc
->bce_ring_map
, PAGE_SIZE
,
622 sizeof(struct bce_dma_slot
),
623 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
627 /* Give the packet to the chip. */
628 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_DMA_DPTR
,
629 sc
->bce_txsnext
* sizeof(struct bce_dma_slot
));
634 /* Pass the packet to any BPF listeners. */
636 bpf_mtap(ifp
->if_bpf
, m0
);
637 #endif /* NBPFILTER > 0 */
640 /* No more slots left; notify upper layer. */
641 ifp
->if_flags
|= IFF_OACTIVE
;
644 /* Set a watchdog timer in case the chip flakes out. */
649 /* Watchdog timer handler. */
651 bce_watchdog(struct ifnet
*ifp
)
653 struct bce_softc
*sc
= ifp
->if_softc
;
655 aprint_error_dev(&sc
->bce_dev
, "device timeout\n");
658 (void) bce_init(ifp
);
660 /* Try to get more packets going. */
667 struct bce_softc
*sc
;
674 ifp
= &sc
->ethercom
.ec_if
;
676 for (wantinit
= 0; wantinit
== 0;) {
677 intstatus
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
,
680 /* ignore if not ours, or unsolicited interrupts */
681 intstatus
&= sc
->bce_intmask
;
688 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_INT_STS
,
691 /* Receive interrupts. */
692 if (intstatus
& I_RI
)
694 /* Transmit interrupts. */
695 if (intstatus
& I_XI
)
697 /* Error interrupts */
698 if (intstatus
& ~(I_RI
| I_XI
)) {
699 const char *msg
= NULL
;
700 if (intstatus
& I_XU
)
701 msg
= "transmit fifo underflow";
702 if (intstatus
& I_RO
) {
703 msg
= "receive fifo overflow";
706 if (intstatus
& I_RU
)
707 msg
= "receive descriptor underflow";
708 if (intstatus
& I_DE
)
709 msg
= "descriptor protocol error";
710 if (intstatus
& I_PD
)
712 if (intstatus
& I_PC
)
713 msg
= "descriptor error";
714 if (intstatus
& I_TO
)
715 msg
= "general purpose timeout";
717 aprint_error_dev(&sc
->bce_dev
, "%s\n", msg
);
726 if (RND_ENABLED(&sc
->rnd_source
))
727 rnd_add_uint32(&sc
->rnd_source
, intstatus
);
729 /* Try to get more packets going. */
735 /* Receive interrupt handler */
737 bce_rxintr(struct bce_softc
*sc
)
739 struct ifnet
*ifp
= &sc
->ethercom
.ec_if
;
746 /* get pointer to active receive slot */
747 curr
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_DMA_RXSTATUS
)
749 curr
= curr
/ sizeof(struct bce_dma_slot
);
750 if (curr
>= BCE_NRXDESC
)
751 curr
= BCE_NRXDESC
- 1;
753 /* process packets up to but not current packet being worked on */
754 for (i
= sc
->bce_rxin
; i
!= curr
;
755 i
+ 1 > BCE_NRXDESC
- 1 ? i
= 0 : i
++) {
756 /* complete any post dma memory ops on packet */
757 bus_dmamap_sync(sc
->bce_dmatag
, sc
->bce_cdata
.bce_rx_map
[i
], 0,
758 sc
->bce_cdata
.bce_rx_map
[i
]->dm_mapsize
,
759 BUS_DMASYNC_POSTREAD
);
762 * If the packet had an error, simply recycle the buffer,
763 * resetting the len, and flags.
765 pph
= mtod(sc
->bce_cdata
.bce_rx_chain
[i
], struct rx_pph
*);
766 if (pph
->flags
& (RXF_NO
| RXF_RXER
| RXF_CRC
| RXF_OV
)) {
772 /* receive the packet */
775 continue; /* no packet if empty */
778 /* bump past pre header to packet */
779 sc
->bce_cdata
.bce_rx_chain
[i
]->m_data
+= 30; /* MAGIC */
782 * The chip includes the CRC with every packet. Trim
785 len
-= ETHER_CRC_LEN
;
788 * If the packet is small enough to fit in a
789 * single header mbuf, allocate one and copy
790 * the data into it. This greatly reduces
791 * memory consumption when receiving lots
794 * Otherwise, add a new buffer to the receive
795 * chain. If this fails, drop the packet and
796 * recycle the old buffer.
798 if (len
<= (MHLEN
- 2)) {
799 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
803 memcpy(mtod(m
, void *),
804 mtod(sc
->bce_cdata
.bce_rx_chain
[i
], void *), len
);
805 sc
->bce_cdata
.bce_rx_chain
[i
]->m_data
-= 30; /* MAGIC */
807 m
= sc
->bce_cdata
.bce_rx_chain
[i
];
808 if (bce_add_rxbuf(sc
, i
) != 0) {
811 /* continue to use old buffer */
812 sc
->bce_cdata
.bce_rx_chain
[i
]->m_data
-= 30;
813 bus_dmamap_sync(sc
->bce_dmatag
,
814 sc
->bce_cdata
.bce_rx_map
[i
], 0,
815 sc
->bce_cdata
.bce_rx_map
[i
]->dm_mapsize
,
816 BUS_DMASYNC_PREREAD
);
821 m
->m_pkthdr
.rcvif
= ifp
;
822 m
->m_pkthdr
.len
= m
->m_len
= len
;
827 * Pass this up to any BPF listeners, but only
828 * pass it up the stack if it's for us.
831 bpf_mtap(ifp
->if_bpf
, m
);
832 #endif /* NBPFILTER > 0 */
835 (*ifp
->if_input
) (ifp
, m
);
837 /* re-check current in case it changed */
838 curr
= (bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
,
839 BCE_DMA_RXSTATUS
) & RS_CD_MASK
) /
840 sizeof(struct bce_dma_slot
);
841 if (curr
>= BCE_NRXDESC
)
842 curr
= BCE_NRXDESC
- 1;
847 /* Transmit interrupt handler */
849 bce_txintr(struct bce_softc
*sc
)
851 struct ifnet
*ifp
= &sc
->ethercom
.ec_if
;
855 ifp
->if_flags
&= ~IFF_OACTIVE
;
858 * Go through the Tx list and free mbufs for those
859 * frames which have been transmitted.
861 curr
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_DMA_TXSTATUS
) &
863 curr
= curr
/ sizeof(struct bce_dma_slot
);
864 if (curr
>= BCE_NTXDESC
)
865 curr
= BCE_NTXDESC
- 1;
866 for (i
= sc
->bce_txin
; i
!= curr
;
867 i
+ 1 > BCE_NTXDESC
- 1 ? i
= 0 : i
++) {
868 /* do any post dma memory ops on transmit data */
869 if (sc
->bce_cdata
.bce_tx_chain
[i
] == NULL
)
871 bus_dmamap_sync(sc
->bce_dmatag
, sc
->bce_cdata
.bce_tx_map
[i
], 0,
872 sc
->bce_cdata
.bce_tx_map
[i
]->dm_mapsize
,
873 BUS_DMASYNC_POSTWRITE
);
874 bus_dmamap_unload(sc
->bce_dmatag
, sc
->bce_cdata
.bce_tx_map
[i
]);
875 m_freem(sc
->bce_cdata
.bce_tx_chain
[i
]);
876 sc
->bce_cdata
.bce_tx_chain
[i
] = NULL
;
882 * If there are no more pending transmissions, cancel the watchdog
885 if (sc
->bce_txsnext
== sc
->bce_txin
)
889 /* initialize the interface */
891 bce_init(struct ifnet
*ifp
)
893 struct bce_softc
*sc
= ifp
->if_softc
;
898 /* Cancel any pending I/O. */
901 /* enable pci inerrupts, bursts, and prefetch */
903 /* remap the pci registers to the Sonics config registers */
905 /* save the current map, so it can be restored */
906 reg_win
= pci_conf_read(sc
->bce_pa
.pa_pc
, sc
->bce_pa
.pa_tag
,
909 /* set register window to Sonics registers */
910 pci_conf_write(sc
->bce_pa
.pa_pc
, sc
->bce_pa
.pa_tag
, BCE_REG_WIN
,
913 /* enable SB to PCI interrupt */
914 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_SBINTVEC
,
915 bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_SBINTVEC
) |
918 /* enable prefetch and bursts for sonics-to-pci translation 2 */
919 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_SPCI_TR2
,
920 bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_SPCI_TR2
) |
921 SBTOPCI_PREF
| SBTOPCI_BURST
);
923 /* restore to ethernet register space */
924 pci_conf_write(sc
->bce_pa
.pa_pc
, sc
->bce_pa
.pa_tag
, BCE_REG_WIN
,
927 /* Reset the chip to a known state. */
930 /* Initialize transmit descriptors */
931 memset(sc
->bce_tx_ring
, 0, BCE_NTXDESC
* sizeof(struct bce_dma_slot
));
935 /* enable crc32 generation */
936 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_MACCTL
,
937 bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_MACCTL
) |
940 /* setup DMA interrupt control */
941 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_DMAI_CTL
, 1 << 24); /* MAGIC */
943 /* setup packet filter */
946 /* set max frame length, account for possible vlan tag */
947 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_RX_MAX
,
949 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_TX_MAX
,
952 /* set tx watermark */
953 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_TX_WATER
, 56);
955 /* enable transmit */
956 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_DMA_TXCTL
, XC_XE
);
957 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_DMA_TXADDR
,
958 sc
->bce_ring_map
->dm_segs
[0].ds_addr
+ PAGE_SIZE
+ 0x40000000); /* MAGIC */
961 * Give the receive ring to the chip, and
962 * start the receive DMA engine.
966 /* clear the rx descriptor ring */
967 memset(sc
->bce_rx_ring
, 0, BCE_NRXDESC
* sizeof(struct bce_dma_slot
));
969 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_DMA_RXCTL
,
970 30 << 1 | 1); /* MAGIC */
971 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_DMA_RXADDR
,
972 sc
->bce_ring_map
->dm_segs
[0].ds_addr
+ 0x40000000); /* MAGIC */
974 /* Initalize receive descriptors */
975 for (i
= 0; i
< BCE_NRXDESC
; i
++) {
976 if (sc
->bce_cdata
.bce_rx_chain
[i
] == NULL
) {
977 if ((error
= bce_add_rxbuf(sc
, i
)) != 0) {
978 aprint_error_dev(&sc
->bce_dev
,
979 "unable to allocate or map rx(%d) "
980 "mbuf, error = %d\n", i
, error
);
985 BCE_INIT_RXDESC(sc
, i
);
988 /* Enable interrupts */
990 I_XI
| I_RI
| I_XU
| I_RO
| I_RU
| I_DE
| I_PD
| I_PC
| I_TO
;
991 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_INT_MASK
,
994 /* start the receive dma */
995 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_DMA_RXDPTR
,
996 BCE_NRXDESC
* sizeof(struct bce_dma_slot
));
999 if ((error
= ether_mediachange(ifp
)) != 0)
1002 /* turn on the ethernet mac */
1003 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_ENET_CTL
,
1004 bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
,
1005 BCE_ENET_CTL
) | EC_EE
);
1008 callout_reset(&sc
->bce_timeout
, hz
, bce_tick
, sc
);
1010 /* mark as running, and no outputs active */
1011 ifp
->if_flags
|= IFF_RUNNING
;
1012 ifp
->if_flags
&= ~IFF_OACTIVE
;
1017 /* add a mac address to packet filter */
1019 bce_add_mac(struct bce_softc
*sc
, uint8_t *mac
, u_long idx
)
1024 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_FILT_LOW
,
1025 mac
[2] << 24 | mac
[3] << 16 | mac
[4] << 8 | mac
[5]);
1026 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_FILT_HI
,
1027 mac
[0] << 8 | mac
[1] | 0x10000); /* MAGIC */
1028 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_FILT_CTL
,
1029 idx
<< 16 | 8); /* MAGIC */
1030 /* wait for write to complete */
1031 for (i
= 0; i
< 100; i
++) {
1032 rval
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
,
1034 if (!(rval
& 0x80000000)) /* MAGIC */
1039 aprint_error_dev(&sc
->bce_dev
,
1040 "timed out writing pkt filter ctl\n");
1044 /* Add a receive buffer to the indiciated descriptor. */
1046 bce_add_rxbuf(struct bce_softc
*sc
, int idx
)
1051 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
1055 MCLGET(m
, M_DONTWAIT
);
1056 if ((m
->m_flags
& M_EXT
) == 0) {
1060 if (sc
->bce_cdata
.bce_rx_chain
[idx
] != NULL
)
1061 bus_dmamap_unload(sc
->bce_dmatag
,
1062 sc
->bce_cdata
.bce_rx_map
[idx
]);
1064 sc
->bce_cdata
.bce_rx_chain
[idx
] = m
;
1066 error
= bus_dmamap_load(sc
->bce_dmatag
, sc
->bce_cdata
.bce_rx_map
[idx
],
1067 m
->m_ext
.ext_buf
, m
->m_ext
.ext_size
, NULL
,
1068 BUS_DMA_READ
| BUS_DMA_NOWAIT
);
1072 bus_dmamap_sync(sc
->bce_dmatag
, sc
->bce_cdata
.bce_rx_map
[idx
], 0,
1073 sc
->bce_cdata
.bce_rx_map
[idx
]->dm_mapsize
, BUS_DMASYNC_PREREAD
);
1075 BCE_INIT_RXDESC(sc
, idx
);
1081 /* Drain the receive queue. */
1083 bce_rxdrain(struct bce_softc
*sc
)
1087 for (i
= 0; i
< BCE_NRXDESC
; i
++) {
1088 if (sc
->bce_cdata
.bce_rx_chain
[i
] != NULL
) {
1089 bus_dmamap_unload(sc
->bce_dmatag
,
1090 sc
->bce_cdata
.bce_rx_map
[i
]);
1091 m_freem(sc
->bce_cdata
.bce_rx_chain
[i
]);
1092 sc
->bce_cdata
.bce_rx_chain
[i
] = NULL
;
1097 /* Stop transmission on the interface */
1099 bce_stop(struct ifnet
*ifp
, int disable
)
1101 struct bce_softc
*sc
= ifp
->if_softc
;
1105 /* Stop the 1 second timer */
1106 callout_stop(&sc
->bce_timeout
);
1109 mii_down(&sc
->bce_mii
);
1111 /* Disable interrupts. */
1112 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_INT_MASK
, 0);
1113 sc
->bce_intmask
= 0;
1117 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_ENET_CTL
, EC_ED
);
1118 for (i
= 0; i
< 200; i
++) {
1119 val
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
,
1127 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_DMA_RXCTL
, 0);
1128 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_DMA_TXCTL
, 0);
1131 /* Release any queued transmit buffers. */
1132 for (i
= 0; i
< BCE_NTXDESC
; i
++) {
1133 if (sc
->bce_cdata
.bce_tx_chain
[i
] != NULL
) {
1134 bus_dmamap_unload(sc
->bce_dmatag
,
1135 sc
->bce_cdata
.bce_tx_map
[i
]);
1136 m_freem(sc
->bce_cdata
.bce_tx_chain
[i
]);
1137 sc
->bce_cdata
.bce_tx_chain
[i
] = NULL
;
1141 /* Mark the interface down and cancel the watchdog timer. */
1142 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
1145 /* drain receive queue */
1150 /* reset the chip */
1152 bce_reset(struct bce_softc
*sc
)
1158 /* if SB core is up */
1159 sbval
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
,
1161 if ((sbval
& (SBTML_RESET
| SBTML_REJ
| SBTML_CLK
)) == SBTML_CLK
) {
1162 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_DMAI_CTL
,
1166 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_ENET_CTL
,
1168 for (i
= 0; i
< 200; i
++) {
1169 val
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
,
1176 aprint_error_dev(&sc
->bce_dev
,
1177 "timed out disabling ethernet mac\n");
1180 /* reset the dma engines */
1181 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_DMA_TXCTL
, 0);
1182 val
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_DMA_RXSTATUS
);
1183 /* if error on receive, wait to go idle */
1184 if (val
& RS_ERROR
) {
1185 for (i
= 0; i
< 100; i
++) {
1186 val
= bus_space_read_4(sc
->bce_btag
,
1187 sc
->bce_bhandle
, BCE_DMA_RXSTATUS
);
1188 if (val
& RS_DMA_IDLE
)
1193 aprint_error_dev(&sc
->bce_dev
,
1194 "receive dma did not go idle after"
1198 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
,
1199 BCE_DMA_RXSTATUS
, 0);
1201 /* reset ethernet mac */
1202 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_ENET_CTL
,
1204 for (i
= 0; i
< 200; i
++) {
1205 val
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
,
1212 aprint_error_dev(&sc
->bce_dev
,
1213 "timed out resetting ethernet mac\n");
1218 /* remap the pci registers to the Sonics config registers */
1220 /* save the current map, so it can be restored */
1221 reg_win
= pci_conf_read(sc
->bce_pa
.pa_pc
, sc
->bce_pa
.pa_tag
,
1223 /* set register window to Sonics registers */
1224 pci_conf_write(sc
->bce_pa
.pa_pc
, sc
->bce_pa
.pa_tag
,
1225 BCE_REG_WIN
, BCE_SONICS_WIN
);
1227 /* enable SB to PCI interrupt */
1228 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_SBINTVEC
,
1229 bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
,
1233 /* enable prefetch and bursts for sonics-to-pci translation 2 */
1234 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_SPCI_TR2
,
1235 bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
,
1237 SBTOPCI_PREF
| SBTOPCI_BURST
);
1239 /* restore to ethernet register space */
1240 pci_conf_write(sc
->bce_pa
.pa_pc
, sc
->bce_pa
.pa_tag
, BCE_REG_WIN
,
1244 /* disable SB core if not in reset */
1245 if (!(sbval
& SBTML_RESET
)) {
1247 /* set the reject bit */
1248 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
,
1249 BCE_SBTMSTATELOW
, SBTML_REJ
| SBTML_CLK
);
1250 for (i
= 0; i
< 200; i
++) {
1251 val
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
,
1253 if (val
& SBTML_REJ
)
1258 aprint_error_dev(&sc
->bce_dev
,
1259 "while resetting core, reject did not set\n");
1261 /* wait until busy is clear */
1262 for (i
= 0; i
< 200; i
++) {
1263 val
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
,
1270 aprint_error_dev(&sc
->bce_dev
,
1271 "while resetting core, busy did not clear\n");
1273 /* set reset and reject while enabling the clocks */
1274 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
,
1276 SBTML_FGC
| SBTML_CLK
| SBTML_REJ
| SBTML_RESET
);
1277 val
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
,
1280 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
,
1281 BCE_SBTMSTATELOW
, SBTML_REJ
| SBTML_RESET
);
1285 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_SBTMSTATELOW
,
1286 SBTML_FGC
| SBTML_CLK
| SBTML_RESET
);
1287 val
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_SBTMSTATELOW
);
1290 /* clear any error bits that may be on */
1291 val
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_SBTMSTATEHI
);
1293 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_SBTMSTATEHI
,
1295 val
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_SBIMSTATE
);
1296 if (val
& SBIM_MAGIC_ERRORBITS
)
1297 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_SBIMSTATE
,
1298 val
& ~SBIM_MAGIC_ERRORBITS
);
1300 /* clear reset and allow it to propagate throughout the core */
1301 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_SBTMSTATELOW
,
1302 SBTML_FGC
| SBTML_CLK
);
1303 val
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_SBTMSTATELOW
);
1306 /* leave clock enabled */
1307 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_SBTMSTATELOW
,
1309 val
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_SBTMSTATELOW
);
1312 /* initialize MDC preamble, frequency */
1313 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_MI_CTL
, 0x8d); /* MAGIC */
1315 /* enable phy, differs for internal, and external */
1316 val
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_DEVCTL
);
1317 if (!(val
& BCE_DC_IP
)) {
1318 /* select external phy */
1319 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_ENET_CTL
, EC_EP
);
1320 } else if (val
& BCE_DC_ER
) { /* internal, clear reset bit if on */
1321 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_DEVCTL
,
1327 /* Set up the receive filter. */
1329 bce_set_filter(struct ifnet
*ifp
)
1331 struct bce_softc
*sc
= ifp
->if_softc
;
1333 if (ifp
->if_flags
& IFF_PROMISC
) {
1334 ifp
->if_flags
|= IFF_ALLMULTI
;
1335 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_RX_CTL
,
1336 bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_RX_CTL
)
1339 ifp
->if_flags
&= ~IFF_ALLMULTI
;
1341 /* turn off promiscuous */
1342 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_RX_CTL
,
1343 bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
,
1344 BCE_RX_CTL
) & ~ERC_PE
);
1346 /* enable/disable broadcast */
1347 if (ifp
->if_flags
& IFF_BROADCAST
)
1348 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
,
1349 BCE_RX_CTL
, bus_space_read_4(sc
->bce_btag
,
1350 sc
->bce_bhandle
, BCE_RX_CTL
) & ~ERC_DB
);
1352 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
,
1353 BCE_RX_CTL
, bus_space_read_4(sc
->bce_btag
,
1354 sc
->bce_bhandle
, BCE_RX_CTL
) | ERC_DB
);
1356 /* disable the filter */
1357 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_FILT_CTL
,
1360 /* add our own address */
1361 bce_add_mac(sc
, sc
->enaddr
, 0);
1363 /* for now accept all multicast */
1364 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_RX_CTL
,
1365 bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_RX_CTL
) |
1367 ifp
->if_flags
|= IFF_ALLMULTI
;
1369 /* enable the filter */
1370 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_FILT_CTL
,
1371 bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
,
1377 bce_resume(device_t self
, pmf_qual_t qual
)
1379 struct bce_softc
*sc
= device_private(self
);
1386 /* Read a PHY register on the MII. */
1388 bce_mii_read(device_t self
, int phy
, int reg
)
1390 struct bce_softc
*sc
= device_private(self
);
1395 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_MI_STS
, BCE_MIINTR
);
1397 /* Read the PHY register */
1398 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_MI_COMM
,
1399 (MII_COMMAND_READ
<< 28) | (MII_COMMAND_START
<< 30) | /* MAGIC */
1400 (MII_COMMAND_ACK
<< 16) | BCE_MIPHY(phy
) | BCE_MIREG(reg
)); /* MAGIC */
1402 for (i
= 0; i
< BCE_TIMEOUT
; i
++) {
1403 val
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_MI_STS
);
1404 if (val
& BCE_MIINTR
)
1408 val
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_MI_COMM
);
1409 if (i
== BCE_TIMEOUT
) {
1410 aprint_error_dev(&sc
->bce_dev
,
1411 "PHY read timed out reading phy %d, reg %d, val = "
1412 "0x%08x\n", phy
, reg
, val
);
1415 return (val
& BCE_MICOMM_DATA
);
1418 /* Write a PHY register on the MII */
1420 bce_mii_write(device_t self
, int phy
, int reg
, int val
)
1422 struct bce_softc
*sc
= device_private(self
);
1427 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_MI_STS
,
1430 /* Write the PHY register */
1431 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_MI_COMM
,
1432 (MII_COMMAND_WRITE
<< 28) | (MII_COMMAND_START
<< 30) | /* MAGIC */
1433 (MII_COMMAND_ACK
<< 16) | (val
& BCE_MICOMM_DATA
) | /* MAGIC */
1434 BCE_MIPHY(phy
) | BCE_MIREG(reg
));
1436 /* wait for write to complete */
1437 for (i
= 0; i
< BCE_TIMEOUT
; i
++) {
1438 rval
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
,
1440 if (rval
& BCE_MIINTR
)
1444 rval
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_MI_COMM
);
1445 if (i
== BCE_TIMEOUT
) {
1446 aprint_error_dev(&sc
->bce_dev
,
1447 "PHY timed out writing phy %d, reg %d, val = 0x%08x\n", phy
,
1452 /* sync hardware duplex mode to software state */
1454 bce_statchg(device_t self
)
1456 struct bce_softc
*sc
= device_private(self
);
1459 /* if needed, change register to match duplex mode */
1460 reg
= bus_space_read_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_TX_CTL
);
1461 if (sc
->bce_mii
.mii_media_active
& IFM_FDX
&& !(reg
& EXC_FD
))
1462 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_TX_CTL
,
1464 else if (!(sc
->bce_mii
.mii_media_active
& IFM_FDX
) && reg
& EXC_FD
)
1465 bus_space_write_4(sc
->bce_btag
, sc
->bce_bhandle
, BCE_TX_CTL
,
1469 * Enable activity led.
1470 * XXX This should be in a phy driver, but not currently.
1472 bce_mii_write(&sc
->bce_dev
, 1, 26, /* MAGIC */
1473 bce_mii_read(&sc
->bce_dev
, 1, 26) & 0x7fff); /* MAGIC */
1474 /* enable traffic meter led mode */
1475 bce_mii_write(&sc
->bce_dev
, 1, 26, /* MAGIC */
1476 bce_mii_read(&sc
->bce_dev
, 1, 27) | (1 << 6)); /* MAGIC */
1479 /* One second timer, checks link status */
1483 struct bce_softc
*sc
= v
;
1486 mii_tick(&sc
->bce_mii
);
1488 callout_reset(&sc
->bce_timeout
, hz
, bce_tick
, sc
);