4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/stream.h>
26 #include <sys/strsun.h>
29 #include <sys/modctl.h>
30 #include <sys/kstat.h>
31 #include <sys/ethernet.h>
32 #include <sys/devops.h>
33 #include <sys/debug.h>
35 #include <sys/sysmacros.h>
36 #include <sys/dditypes.h>
38 #include <sys/sunddi.h>
39 #include <sys/miiregs.h>
40 #include <sys/byteorder.h>
41 #include <sys/cyclic.h>
43 #include <sys/crc32.h>
44 #include <sys/mac_provider.h>
45 #include <sys/mac_ether.h>
47 #include <sys/errno.h>
49 #include <sys/strsubr.h>
56 * Broadcom BCM4401 chipsets use two rings :
58 * - One TX : For sending packets down the wire.
59 * - One RX : For receving packets.
61 * Each ring can have any number of descriptors (configured during attach).
62 * As of now we configure only 128 descriptor per ring (TX/RX). Each descriptor
63 * has address (desc_addr) and control (desc_ctl) which holds a DMA buffer for
64 * the packet and control information (like start/end of frame or end of table).
65 * The descriptor table is allocated first and then a DMA buffer (for a packet)
66 * is allocated and linked to each descriptor.
68 * Each descriptor entry is bfe_desc_t structure in bfe. During TX/RX
69 * interrupt, the stat register will point to current descriptor being
72 * Here's an example of TX and RX ring :
76 * Base of the descriptor table is programmed using BFE_DMATX_CTRL control
77 * register. Each 'addr' points to DMA buffer (or packet data buffer) to
78 * be transmitted and 'ctl' has the length of the packet (usually MTU).
80 * ----------------------|
81 * | addr |Descriptor 0 |
83 * ----------------------|
84 * | addr |Descriptor 1 | SOF (start of the frame)
86 * ----------------------|
87 * | ... |Descriptor... | EOF (end of the frame)
89 * ----------------------|
90 * | addr |Descritor 127 |
91 * | ctl | EOT | EOT (End of Table)
92 * ----------------------|
94 * 'r_curr_desc' : pointer to current descriptor which can be used to transmit
96 * 'r_avail_desc' : decremented whenever a packet is being sent.
97 * 'r_cons_desc' : incremented whenever a packet is sent down the wire and
98 * notified by an interrupt to bfe driver.
102 * Base of the descriptor table is programmed using BFE_DMARX_CTRL control
103 * register. Each 'addr' points to DMA buffer (or packet data buffer). 'ctl'
104 * contains the size of the DMA buffer and all the DMA buffers are
105 * pre-allocated during attach and hence the maxmium size of the packet is
106 * also known (r_buf_len from the bfe_rint_t structure). During RX interrupt
107 * the packet length is embedded in bfe_header_t which is added by the
108 * chip in the beginning of the packet.
110 * ----------------------|
111 * | addr |Descriptor 0 |
113 * ----------------------|
114 * | addr |Descriptor 1 |
116 * ----------------------|
117 * | ... |Descriptor... |
119 * ----------------------|
120 * | addr |Descriptor 127|
121 * | ctl | EOT | EOT (End of Table)
122 * ----------------------|
124 * 'r_curr_desc' : pointer to current descriptor while receving a packet.
128 #define MODULE_NAME "bfe"
131 * Used for checking PHY (link state, speed)
133 #define BFE_TIMEOUT_INTERVAL (1000 * 1000 * 1000)
137 * Chip restart action and reason for restart
139 #define BFE_ACTION_RESTART 0x1 /* For restarting the chip */
140 #define BFE_ACTION_RESTART_SETPROP 0x2 /* restart due to setprop */
141 #define BFE_ACTION_RESTART_FAULT 0x4 /* restart due to fault */
142 #define BFE_ACTION_RESTART_PKT 0x8 /* restart due to pkt timeout */
144 static char bfe_ident
[] = "bfe driver for Broadcom BCM4401 chipsets";
147 * Function Prototypes for bfe driver.
149 static int bfe_check_link(bfe_t
*);
150 static void bfe_report_link(bfe_t
*);
151 static void bfe_chip_halt(bfe_t
*);
152 static void bfe_chip_reset(bfe_t
*);
153 static void bfe_tx_desc_init(bfe_ring_t
*);
154 static void bfe_rx_desc_init(bfe_ring_t
*);
155 static void bfe_set_rx_mode(bfe_t
*);
156 static void bfe_enable_chip_intrs(bfe_t
*);
157 static void bfe_chip_restart(bfe_t
*);
158 static void bfe_init_vars(bfe_t
*);
159 static void bfe_clear_stats(bfe_t
*);
160 static void bfe_gather_stats(bfe_t
*);
161 static void bfe_error(dev_info_t
*, char *, ...);
162 static int bfe_mac_getprop(void *, const char *, mac_prop_id_t
, uint_t
,
164 static int bfe_mac_setprop(void *, const char *, mac_prop_id_t
, uint_t
,
166 static int bfe_tx_reclaim(bfe_ring_t
*);
167 int bfe_mac_set_ether_addr(void *, const uint8_t *);
171 * Macros for ddi_dma_sync().
173 #define SYNC_DESC(r, s, l, d) \
174 (void) ddi_dma_sync(r->r_desc_dma_handle, \
175 (off_t)(s * sizeof (bfe_desc_t)), \
176 (size_t)(l * sizeof (bfe_desc_t)), \
179 #define SYNC_BUF(r, s, b, l, d) \
180 (void) ddi_dma_sync(r->r_buf_dma[s].handle, \
181 (off_t)(b), (size_t)(l), d)
184 * Supported Broadcom BCM4401 Cards.
186 static bfe_cards_t bfe_cards
[] = {
187 { 0x14e4, 0x170c, "BCM4401 100Base-TX"},
192 * DMA attributes for device registers, packet data (buffer) and
195 static struct ddi_device_acc_attr bfe_dev_attr
= {
197 DDI_STRUCTURE_LE_ACC
,
201 static struct ddi_device_acc_attr bfe_buf_attr
= {
203 DDI_NEVERSWAP_ACC
, /* native endianness */
207 static ddi_dma_attr_t bfe_dma_attr_buf
= {
208 DMA_ATTR_V0
, /* dma_attr_version */
209 0, /* dma_attr_addr_lo */
210 BFE_PCI_DMA
- 1, /* dma_attr_addr_hi */
211 0x1fff, /* dma_attr_count_max */
212 8, /* dma_attr_align */
213 0, /* dma_attr_burstsizes */
214 1, /* dma_attr_minxfer */
215 0x1fff, /* dma_attr_maxxfer */
216 BFE_PCI_DMA
- 1, /* dma_attr_seg */
217 1, /* dma_attr_sgllen */
218 1, /* dma_attr_granular */
219 0 /* dma_attr_flags */
222 static ddi_dma_attr_t bfe_dma_attr_desc
= {
223 DMA_ATTR_V0
, /* dma_attr_version */
224 0, /* dma_attr_addr_lo */
225 BFE_PCI_DMA
- 1, /* dma_attr_addr_hi */
226 BFE_PCI_DMA
- 1, /* dma_attr_count_max */
227 BFE_DESC_ALIGN
, /* dma_attr_align */
228 0, /* dma_attr_burstsizes */
229 1, /* dma_attr_minxfer */
230 BFE_PCI_DMA
- 1, /* dma_attr_maxxfer */
231 BFE_PCI_DMA
- 1, /* dma_attr_seg */
232 1, /* dma_attr_sgllen */
233 1, /* dma_attr_granular */
234 0 /* dma_attr_flags */
238 * Ethernet broadcast addresses.
240 static uchar_t bfe_broadcast
[ETHERADDRL
] = {
241 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
244 #define ASSERT_ALL_LOCKS(bfe) { \
245 ASSERT(mutex_owned(&bfe->bfe_tx_ring.r_lock)); \
246 ASSERT(rw_write_held(&bfe->bfe_rwlock)); \
250 * Debugging and error reproting code.
253 bfe_error(dev_info_t
*dip
, char *fmt
, ...)
259 (void) vsnprintf(buf
, sizeof (buf
), fmt
, ap
);
263 cmn_err(CE_WARN
, "%s%d: %s",
264 ddi_driver_name(dip
), ddi_get_instance(dip
), buf
);
266 cmn_err(CE_WARN
, "bfe: %s", buf
);
271 * Grabs all necessary locks to block any other operation on the chip.
274 bfe_grab_locks(bfe_t
*bfe
)
276 bfe_ring_t
*tx
= &bfe
->bfe_tx_ring
;
279 * Grab all the locks.
280 * - bfe_rwlock : locks down whole chip including RX.
281 * - tx's r_lock : locks down only TX side.
283 rw_enter(&bfe
->bfe_rwlock
, RW_WRITER
);
284 mutex_enter(&tx
->r_lock
);
287 * Note that we don't use RX's r_lock.
292 * Release lock on chip/drver.
295 bfe_release_locks(bfe_t
*bfe
)
297 bfe_ring_t
*tx
= &bfe
->bfe_tx_ring
;
300 * Release all the locks in the order in which they were grabbed.
302 mutex_exit(&tx
->r_lock
);
303 rw_exit(&bfe
->bfe_rwlock
);
308 * It's used to make sure that the write to device register was successful.
311 bfe_wait_bit(bfe_t
*bfe
, uint32_t reg
, uint32_t bit
,
312 ulong_t t
, const int clear
)
317 for (i
= 0; i
< t
; i
++) {
320 if (clear
&& !(v
& bit
))
323 if (!clear
&& (v
& bit
))
329 /* if device still didn't see the value */
337 * PHY functions (read, write, stop, reset and startup)
340 bfe_read_phy(bfe_t
*bfe
, uint32_t reg
)
342 OUTL(bfe
, BFE_EMAC_ISTAT
, BFE_EMAC_INT_MII
);
343 OUTL(bfe
, BFE_MDIO_DATA
, (BFE_MDIO_SB_START
|
344 (BFE_MDIO_OP_READ
<< BFE_MDIO_OP_SHIFT
) |
345 (bfe
->bfe_phy_addr
<< BFE_MDIO_PMD_SHIFT
) |
346 (reg
<< BFE_MDIO_RA_SHIFT
) |
347 (BFE_MDIO_TA_VALID
<< BFE_MDIO_TA_SHIFT
)));
349 (void) bfe_wait_bit(bfe
, BFE_EMAC_ISTAT
, BFE_EMAC_INT_MII
, 10, 0);
351 return ((INL(bfe
, BFE_MDIO_DATA
) & BFE_MDIO_DATA_DATA
));
355 bfe_write_phy(bfe_t
*bfe
, uint32_t reg
, uint32_t val
)
357 OUTL(bfe
, BFE_EMAC_ISTAT
, BFE_EMAC_INT_MII
);
358 OUTL(bfe
, BFE_MDIO_DATA
, (BFE_MDIO_SB_START
|
359 (BFE_MDIO_OP_WRITE
<< BFE_MDIO_OP_SHIFT
) |
360 (bfe
->bfe_phy_addr
<< BFE_MDIO_PMD_SHIFT
) |
361 (reg
<< BFE_MDIO_RA_SHIFT
) |
362 (BFE_MDIO_TA_VALID
<< BFE_MDIO_TA_SHIFT
) |
363 (val
& BFE_MDIO_DATA_DATA
)));
365 (void) bfe_wait_bit(bfe
, BFE_EMAC_ISTAT
, BFE_EMAC_INT_MII
, 10, 0);
369 * It resets the PHY layer.
372 bfe_reset_phy(bfe_t
*bfe
)
376 bfe_write_phy(bfe
, MII_CONTROL
, MII_CONTROL_RESET
);
378 for (i
= 0; i
< 10; i
++) {
379 if (bfe_read_phy(bfe
, MII_CONTROL
) &
389 bfe_error(bfe
->bfe_dip
, "Timeout waiting for PHY to reset");
390 bfe
->bfe_phy_state
= BFE_PHY_RESET_TIMEOUT
;
391 return (BFE_FAILURE
);
394 bfe
->bfe_phy_state
= BFE_PHY_RESET_DONE
;
396 return (BFE_SUCCESS
);
400 * Make sure timer function is out of our way and especially during
404 bfe_stop_timer(bfe_t
*bfe
)
406 if (bfe
->bfe_periodic_id
) {
407 ddi_periodic_delete(bfe
->bfe_periodic_id
);
408 bfe
->bfe_periodic_id
= NULL
;
416 bfe_stop_phy(bfe_t
*bfe
)
418 bfe_write_phy(bfe
, MII_CONTROL
, MII_CONTROL_PWRDN
|
419 MII_CONTROL_ISOLATE
);
421 bfe
->bfe_chip
.link
= LINK_STATE_UNKNOWN
;
422 bfe
->bfe_chip
.speed
= 0;
423 bfe
->bfe_chip
.duplex
= LINK_DUPLEX_UNKNOWN
;
425 bfe
->bfe_phy_state
= BFE_PHY_STOPPED
;
428 * Report the link status to MAC layer.
430 if (bfe
->bfe_machdl
!= NULL
)
431 (void) bfe_report_link(bfe
);
435 bfe_probe_phy(bfe_t
*bfe
)
440 if (bfe
->bfe_phy_addr
) {
441 status
= bfe_read_phy(bfe
, MII_STATUS
);
442 if (status
!= 0xffff && status
!= 0) {
443 bfe_write_phy(bfe
, MII_CONTROL
, 0);
444 return (BFE_SUCCESS
);
448 for (phy
= 0; phy
< 32; phy
++) {
449 bfe
->bfe_phy_addr
= phy
;
450 status
= bfe_read_phy(bfe
, MII_STATUS
);
451 if (status
!= 0xffff && status
!= 0) {
452 bfe_write_phy(bfe
, MII_CONTROL
, 0);
453 return (BFE_SUCCESS
);
457 return (BFE_FAILURE
);
461 * This timeout function fires at BFE_TIMEOUT_INTERVAL to check the link
465 bfe_timeout(void *arg
)
467 bfe_t
*bfe
= (bfe_t
*)arg
;
471 * We don't grab any lock because bfe can't go away.
472 * untimeout() will wait for this timeout instance to complete.
474 if (bfe
->bfe_chip_action
& BFE_ACTION_RESTART
) {
479 bfe_chip_restart(bfe
);
480 bfe
->bfe_chip_action
&= ~BFE_ACTION_RESTART
;
481 bfe
->bfe_chip_action
&= ~BFE_ACTION_RESTART_FAULT
;
482 bfe
->bfe_chip_action
&= ~BFE_ACTION_RESTART_PKT
;
483 bfe_release_locks(bfe
);
484 mac_tx_update(bfe
->bfe_machdl
);
485 /* Restart will register a new timeout */
489 rw_enter(&bfe
->bfe_rwlock
, RW_READER
);
491 if (bfe
->bfe_chip_state
== BFE_CHIP_ACTIVE
) {
495 if (bfe
->bfe_tx_stall_time
!= 0 &&
496 hr
> bfe
->bfe_tx_stall_time
) {
497 DTRACE_PROBE2(chip__restart
, int, bfe
->bfe_unit
,
498 char *, "pkt timeout");
499 bfe
->bfe_chip_action
|=
500 (BFE_ACTION_RESTART
| BFE_ACTION_RESTART_PKT
);
501 bfe
->bfe_tx_stall_time
= 0;
505 if (bfe
->bfe_phy_state
== BFE_PHY_STARTED
) {
507 * Report the link status to MAC layer if link status changed.
509 if (bfe_check_link(bfe
)) {
510 bfe_report_link(bfe
);
511 if (bfe
->bfe_chip
.link
== LINK_STATE_UP
) {
514 val
= INL(bfe
, BFE_TX_CTRL
);
515 val
&= ~BFE_TX_DUPLEX
;
516 if (bfe
->bfe_chip
.duplex
== LINK_DUPLEX_FULL
) {
517 val
|= BFE_TX_DUPLEX
;
518 flow
= INL(bfe
, BFE_RXCONF
);
519 flow
&= ~BFE_RXCONF_FLOW
;
520 OUTL(bfe
, BFE_RXCONF
, flow
);
522 flow
= INL(bfe
, BFE_MAC_FLOW
);
523 flow
&= ~(BFE_FLOW_RX_HIWAT
);
524 OUTL(bfe
, BFE_MAC_FLOW
, flow
);
529 OUTL(bfe
, BFE_TX_CTRL
, val
);
530 DTRACE_PROBE1(link__up
,
536 rw_exit(&bfe
->bfe_rwlock
);
539 mac_tx_update(bfe
->bfe_machdl
);
546 bfe_startup_phy(bfe_t
*bfe
)
548 uint16_t bmsr
, bmcr
, anar
;
552 if (bfe_probe_phy(bfe
) == BFE_FAILURE
) {
553 bfe
->bfe_phy_state
= BFE_PHY_NOTFOUND
;
554 return (BFE_FAILURE
);
557 (void) bfe_reset_phy(bfe
);
559 phyid1
= bfe_read_phy(bfe
, MII_PHYIDH
);
560 phyid2
= bfe_read_phy(bfe
, MII_PHYIDL
);
561 bfe
->bfe_phy_id
= (phyid1
<< 16) | phyid2
;
563 bmsr
= bfe_read_phy(bfe
, MII_STATUS
);
564 anar
= bfe_read_phy(bfe
, MII_AN_ADVERT
);
567 anar
&= ~(MII_ABILITY_100BASE_T4
|
568 MII_ABILITY_100BASE_TX_FD
| MII_ABILITY_100BASE_TX
|
569 MII_ABILITY_10BASE_T_FD
| MII_ABILITY_10BASE_T
);
572 * Supported hardware modes are in bmsr.
574 bfe
->bfe_chip
.bmsr
= bmsr
;
577 * Assume no capabilities are supported in the hardware.
579 bfe
->bfe_cap_aneg
= bfe
->bfe_cap_100T4
=
580 bfe
->bfe_cap_100fdx
= bfe
->bfe_cap_100hdx
=
581 bfe
->bfe_cap_10fdx
= bfe
->bfe_cap_10hdx
= 0;
584 * Assume property is set.
587 if (!(bfe
->bfe_chip_action
& BFE_ACTION_RESTART_SETPROP
)) {
589 * Property is not set which means bfe_mac_setprop()
590 * is not called on us.
597 if (bmsr
& MII_STATUS_100_BASEX_FD
) {
598 bfe
->bfe_cap_100fdx
= 1;
600 anar
|= MII_ABILITY_100BASE_TX_FD
;
601 bfe
->bfe_adv_100fdx
= 1;
603 } else if (bfe
->bfe_adv_100fdx
) {
604 anar
|= MII_ABILITY_100BASE_TX_FD
;
609 if (bmsr
& MII_STATUS_100_BASE_T4
) {
610 bfe
->bfe_cap_100T4
= 1;
612 anar
|= MII_ABILITY_100BASE_T4
;
613 bfe
->bfe_adv_100T4
= 1;
615 } else if (bfe
->bfe_adv_100T4
) {
616 anar
|= MII_ABILITY_100BASE_T4
;
621 if (bmsr
& MII_STATUS_100_BASEX
) {
622 bfe
->bfe_cap_100hdx
= 1;
624 anar
|= MII_ABILITY_100BASE_TX
;
625 bfe
->bfe_adv_100hdx
= 1;
627 } else if (bfe
->bfe_adv_100hdx
) {
628 anar
|= MII_ABILITY_100BASE_TX
;
633 if (bmsr
& MII_STATUS_10_FD
) {
634 bfe
->bfe_cap_10fdx
= 1;
636 anar
|= MII_ABILITY_10BASE_T_FD
;
637 bfe
->bfe_adv_10fdx
= 1;
639 } else if (bfe
->bfe_adv_10fdx
) {
640 anar
|= MII_ABILITY_10BASE_T_FD
;
645 if (bmsr
& MII_STATUS_10
) {
646 bfe
->bfe_cap_10hdx
= 1;
648 anar
|= MII_ABILITY_10BASE_T
;
649 bfe
->bfe_adv_10hdx
= 1;
651 } else if (bfe
->bfe_adv_10hdx
) {
652 anar
|= MII_ABILITY_10BASE_T
;
657 if (bmsr
& MII_STATUS_CANAUTONEG
) {
658 bfe
->bfe_cap_aneg
= 1;
660 bfe
->bfe_adv_aneg
= 1;
666 bfe_error(bfe
->bfe_dip
,
667 "No valid link mode selected. Powering down PHY");
669 bfe_report_link(bfe
);
670 return (BFE_FAILURE
);
674 * If property is set then user would have goofed up. So we
675 * go back to default properties.
677 bfe
->bfe_chip_action
&= ~BFE_ACTION_RESTART_SETPROP
;
681 if (bfe
->bfe_adv_aneg
&& (bmsr
& MII_STATUS_CANAUTONEG
)) {
682 bmcr
= (MII_CONTROL_ANE
| MII_CONTROL_RSAN
);
684 if (bfe
->bfe_adv_100fdx
)
685 bmcr
= (MII_CONTROL_100MB
| MII_CONTROL_FDUPLEX
);
686 else if (bfe
->bfe_adv_100hdx
)
687 bmcr
= MII_CONTROL_100MB
;
688 else if (bfe
->bfe_adv_10fdx
)
689 bmcr
= MII_CONTROL_FDUPLEX
;
691 bmcr
= 0; /* 10HDX */
695 bfe_write_phy(bfe
, MII_AN_ADVERT
, anar
);
698 bfe_write_phy(bfe
, MII_CONTROL
, bmcr
);
700 bfe
->bfe_mii_anar
= anar
;
701 bfe
->bfe_mii_bmcr
= bmcr
;
702 bfe
->bfe_phy_state
= BFE_PHY_STARTED
;
704 if (bfe
->bfe_periodic_id
== NULL
) {
705 bfe
->bfe_periodic_id
= ddi_periodic_add(bfe_timeout
,
706 (void *)bfe
, BFE_TIMEOUT_INTERVAL
, DDI_IPL_0
);
708 DTRACE_PROBE1(first__timeout
, int, bfe
->bfe_unit
);
711 DTRACE_PROBE4(phy_started
, int, bfe
->bfe_unit
,
712 int, bmsr
, int, bmcr
, int, anar
);
714 return (BFE_SUCCESS
);
718 * Reports link status back to MAC Layer.
721 bfe_report_link(bfe_t
*bfe
)
723 mac_link_update(bfe
->bfe_machdl
, bfe
->bfe_chip
.link
);
727 * Reads PHY/MII registers and get the link status for us.
730 bfe_check_link(bfe_t
*bfe
)
732 uint16_t bmsr
, bmcr
, anar
, anlpar
;
733 int speed
, duplex
, link
;
735 speed
= bfe
->bfe_chip
.speed
;
736 duplex
= bfe
->bfe_chip
.duplex
;
737 link
= bfe
->bfe_chip
.link
;
739 bmsr
= bfe_read_phy(bfe
, MII_STATUS
);
740 bfe
->bfe_mii_bmsr
= bmsr
;
742 bmcr
= bfe_read_phy(bfe
, MII_CONTROL
);
744 anar
= bfe_read_phy(bfe
, MII_AN_ADVERT
);
745 bfe
->bfe_mii_anar
= anar
;
747 anlpar
= bfe_read_phy(bfe
, MII_AN_LPABLE
);
748 bfe
->bfe_mii_anlpar
= anlpar
;
750 bfe
->bfe_mii_exp
= bfe_read_phy(bfe
, MII_AN_EXPANSION
);
753 * If exp register is not present in PHY.
755 if (bfe
->bfe_mii_exp
== 0xffff) {
756 bfe
->bfe_mii_exp
= 0;
759 if ((bmsr
& MII_STATUS_LINKUP
) == 0) {
760 bfe
->bfe_chip
.link
= LINK_STATE_DOWN
;
761 bfe
->bfe_chip
.speed
= 0;
762 bfe
->bfe_chip
.duplex
= LINK_DUPLEX_UNKNOWN
;
766 bfe
->bfe_chip
.link
= LINK_STATE_UP
;
768 if (!(bmcr
& MII_CONTROL_ANE
)) {
770 if (bmcr
& MII_CONTROL_100MB
)
771 bfe
->bfe_chip
.speed
= 100000000;
773 bfe
->bfe_chip
.speed
= 10000000;
775 if (bmcr
& MII_CONTROL_FDUPLEX
)
776 bfe
->bfe_chip
.duplex
= LINK_DUPLEX_FULL
;
778 bfe
->bfe_chip
.duplex
= LINK_DUPLEX_HALF
;
780 } else if ((!(bmsr
& MII_STATUS_CANAUTONEG
)) ||
781 (!(bmsr
& MII_STATUS_ANDONE
))) {
782 bfe
->bfe_chip
.speed
= 0;
783 bfe
->bfe_chip
.duplex
= LINK_DUPLEX_UNKNOWN
;
784 } else if (anar
& anlpar
& MII_ABILITY_100BASE_TX_FD
) {
785 bfe
->bfe_chip
.speed
= 100000000;
786 bfe
->bfe_chip
.duplex
= LINK_DUPLEX_FULL
;
787 } else if (anar
& anlpar
& MII_ABILITY_100BASE_T4
) {
788 bfe
->bfe_chip
.speed
= 100000000;
789 bfe
->bfe_chip
.duplex
= LINK_DUPLEX_HALF
;
790 } else if (anar
& anlpar
& MII_ABILITY_100BASE_TX
) {
791 bfe
->bfe_chip
.speed
= 100000000;
792 bfe
->bfe_chip
.duplex
= LINK_DUPLEX_HALF
;
793 } else if (anar
& anlpar
& MII_ABILITY_10BASE_T_FD
) {
794 bfe
->bfe_chip
.speed
= 10000000;
795 bfe
->bfe_chip
.duplex
= LINK_DUPLEX_FULL
;
796 } else if (anar
& anlpar
& MII_ABILITY_10BASE_T
) {
797 bfe
->bfe_chip
.speed
= 10000000;
798 bfe
->bfe_chip
.duplex
= LINK_DUPLEX_HALF
;
800 bfe
->bfe_chip
.speed
= 0;
801 bfe
->bfe_chip
.duplex
= LINK_DUPLEX_UNKNOWN
;
806 * If speed or link status or duplex mode changed then report to
807 * MAC layer which is done by the caller.
809 if (speed
!= bfe
->bfe_chip
.speed
||
810 duplex
!= bfe
->bfe_chip
.duplex
||
811 link
!= bfe
->bfe_chip
.link
) {
819 bfe_cam_write(bfe_t
*bfe
, uchar_t
*d
, int index
)
823 v
= ((uint32_t)d
[2] << 24);
824 v
|= ((uint32_t)d
[3] << 16);
825 v
|= ((uint32_t)d
[4] << 8);
828 OUTL(bfe
, BFE_CAM_DATA_LO
, v
);
829 v
= (BFE_CAM_HI_VALID
|
830 (((uint32_t)d
[0]) << 8) |
833 OUTL(bfe
, BFE_CAM_DATA_HI
, v
);
834 OUTL(bfe
, BFE_CAM_CTRL
, (BFE_CAM_WRITE
|
835 ((uint32_t)index
<< BFE_CAM_INDEX_SHIFT
)));
836 (void) bfe_wait_bit(bfe
, BFE_CAM_CTRL
, BFE_CAM_BUSY
, 10, 1);
840 * Chip related functions (halt, reset, start).
843 bfe_chip_halt(bfe_t
*bfe
)
846 * Disables interrupts.
848 OUTL(bfe
, BFE_INTR_MASK
, 0);
849 FLUSH(bfe
, BFE_INTR_MASK
);
851 OUTL(bfe
, BFE_ENET_CTRL
, BFE_ENET_DISABLE
);
854 * Wait until TX and RX finish their job.
856 (void) bfe_wait_bit(bfe
, BFE_ENET_CTRL
, BFE_ENET_DISABLE
, 20, 1);
859 * Disables DMA engine.
861 OUTL(bfe
, BFE_DMARX_CTRL
, 0);
862 OUTL(bfe
, BFE_DMATX_CTRL
, 0);
866 bfe
->bfe_chip_state
= BFE_CHIP_HALT
;
870 bfe_chip_restart(bfe_t
*bfe
)
872 DTRACE_PROBE2(chip__restart
, int, bfe
->bfe_unit
,
873 int, bfe
->bfe_chip_action
);
880 bfe
->bfe_chip_state
= BFE_CHIP_STOPPED
;
888 * Reset chip and start PHY.
893 * DMA descriptor rings.
895 bfe_tx_desc_init(&bfe
->bfe_tx_ring
);
896 bfe_rx_desc_init(&bfe
->bfe_rx_ring
);
898 bfe
->bfe_chip_state
= BFE_CHIP_ACTIVE
;
899 bfe_set_rx_mode(bfe
);
900 bfe_enable_chip_intrs(bfe
);
904 * Disables core by stopping the clock.
907 bfe_core_disable(bfe_t
*bfe
)
909 if ((INL(bfe
, BFE_SBTMSLOW
) & BFE_RESET
))
912 OUTL(bfe
, BFE_SBTMSLOW
, (BFE_REJECT
| BFE_CLOCK
));
913 (void) bfe_wait_bit(bfe
, BFE_SBTMSLOW
, BFE_REJECT
, 100, 0);
914 (void) bfe_wait_bit(bfe
, BFE_SBTMSHIGH
, BFE_BUSY
, 100, 1);
915 OUTL(bfe
, BFE_SBTMSLOW
, (BFE_FGC
| BFE_CLOCK
| BFE_REJECT
| BFE_RESET
));
916 FLUSH(bfe
, BFE_SBTMSLOW
);
918 OUTL(bfe
, BFE_SBTMSLOW
, (BFE_REJECT
| BFE_RESET
));
926 bfe_core_reset(bfe_t
*bfe
)
931 * First disable the core.
933 bfe_core_disable(bfe
);
935 OUTL(bfe
, BFE_SBTMSLOW
, (BFE_RESET
| BFE_CLOCK
| BFE_FGC
));
936 FLUSH(bfe
, BFE_SBTMSLOW
);
939 if (INL(bfe
, BFE_SBTMSHIGH
) & BFE_SERR
)
940 OUTL(bfe
, BFE_SBTMSHIGH
, 0);
942 val
= INL(bfe
, BFE_SBIMSTATE
);
943 if (val
& (BFE_IBE
| BFE_TO
))
944 OUTL(bfe
, BFE_SBIMSTATE
, val
& ~(BFE_IBE
| BFE_TO
));
946 OUTL(bfe
, BFE_SBTMSLOW
, (BFE_CLOCK
| BFE_FGC
));
947 FLUSH(bfe
, BFE_SBTMSLOW
);
950 OUTL(bfe
, BFE_SBTMSLOW
, BFE_CLOCK
);
951 FLUSH(bfe
, BFE_SBTMSLOW
);
956 bfe_setup_config(bfe_t
*bfe
, uint32_t cores
)
958 uint32_t bar_orig
, val
;
961 * Change bar0 window to map sbtopci registers.
963 bar_orig
= pci_config_get32(bfe
->bfe_conf_handle
, BFE_BAR0_WIN
);
964 pci_config_put32(bfe
->bfe_conf_handle
, BFE_BAR0_WIN
, BFE_REG_PCI
);
966 /* Just read it and don't do anything */
967 val
= INL(bfe
, BFE_SBIDHIGH
) & BFE_IDH_CORE
;
969 val
= INL(bfe
, BFE_SBINTVEC
);
971 OUTL(bfe
, BFE_SBINTVEC
, val
);
973 val
= INL(bfe
, BFE_SSB_PCI_TRANS_2
);
974 val
|= BFE_SSB_PCI_PREF
| BFE_SSB_PCI_BURST
;
975 OUTL(bfe
, BFE_SSB_PCI_TRANS_2
, val
);
978 * Restore bar0 window mapping.
980 pci_config_put32(bfe
->bfe_conf_handle
, BFE_BAR0_WIN
, bar_orig
);
984 * Resets chip and starts PHY.
987 bfe_chip_reset(bfe_t
*bfe
)
991 /* Set the interrupt vector for the enet core */
992 bfe_setup_config(bfe
, BFE_INTVEC_ENET0
);
994 /* check if core is up */
995 val
= INL(bfe
, BFE_SBTMSLOW
) &
996 (BFE_RESET
| BFE_REJECT
| BFE_CLOCK
);
998 if (val
== BFE_CLOCK
) {
999 OUTL(bfe
, BFE_RCV_LAZY
, 0);
1000 OUTL(bfe
, BFE_ENET_CTRL
, BFE_ENET_DISABLE
);
1001 (void) bfe_wait_bit(bfe
, BFE_ENET_CTRL
,
1002 BFE_ENET_DISABLE
, 10, 1);
1003 OUTL(bfe
, BFE_DMATX_CTRL
, 0);
1004 FLUSH(bfe
, BFE_DMARX_STAT
);
1005 drv_usecwait(20000); /* 20 milli seconds */
1006 if (INL(bfe
, BFE_DMARX_STAT
) & BFE_STAT_EMASK
) {
1007 (void) bfe_wait_bit(bfe
, BFE_DMARX_STAT
, BFE_STAT_SIDLE
,
1010 OUTL(bfe
, BFE_DMARX_CTRL
, 0);
1013 bfe_core_reset(bfe
);
1014 bfe_clear_stats(bfe
);
1016 OUTL(bfe
, BFE_MDIO_CTRL
, 0x8d);
1017 val
= INL(bfe
, BFE_DEVCTRL
);
1018 if (!(val
& BFE_IPP
))
1019 OUTL(bfe
, BFE_ENET_CTRL
, BFE_ENET_EPSEL
);
1020 else if (INL(bfe
, BFE_DEVCTRL
& BFE_EPR
)) {
1021 OUTL_AND(bfe
, BFE_DEVCTRL
, ~BFE_EPR
);
1022 drv_usecwait(20000); /* 20 milli seconds */
1025 OUTL_OR(bfe
, BFE_MAC_CTRL
, BFE_CTRL_CRC32_ENAB
| BFE_CTRL_LED
);
1027 OUTL_AND(bfe
, BFE_MAC_CTRL
, ~BFE_CTRL_PDOWN
);
1029 OUTL(bfe
, BFE_RCV_LAZY
, ((1 << BFE_LAZY_FC_SHIFT
) &
1032 OUTL_OR(bfe
, BFE_RCV_LAZY
, 0);
1034 OUTL(bfe
, BFE_RXMAXLEN
, bfe
->bfe_rx_ring
.r_buf_len
);
1035 OUTL(bfe
, BFE_TXMAXLEN
, bfe
->bfe_tx_ring
.r_buf_len
);
1037 OUTL(bfe
, BFE_TX_WMARK
, 56);
1039 /* Program DMA channels */
1040 OUTL(bfe
, BFE_DMATX_CTRL
, BFE_TX_CTRL_ENABLE
);
1043 * DMA addresses need to be added to BFE_PCI_DMA
1045 OUTL(bfe
, BFE_DMATX_ADDR
,
1046 bfe
->bfe_tx_ring
.r_desc_cookie
.dmac_laddress
+ BFE_PCI_DMA
);
1048 OUTL(bfe
, BFE_DMARX_CTRL
, (BFE_RX_OFFSET
<< BFE_RX_CTRL_ROSHIFT
)
1049 | BFE_RX_CTRL_ENABLE
);
1051 OUTL(bfe
, BFE_DMARX_ADDR
,
1052 bfe
->bfe_rx_ring
.r_desc_cookie
.dmac_laddress
+ BFE_PCI_DMA
);
1054 (void) bfe_startup_phy(bfe
);
1056 bfe
->bfe_chip_state
= BFE_CHIP_INITIALIZED
;
1060 * It enables interrupts. Should be the last step while starting chip.
1063 bfe_enable_chip_intrs(bfe_t
*bfe
)
1065 /* Enable the chip and core */
1066 OUTL(bfe
, BFE_ENET_CTRL
, BFE_ENET_ENABLE
);
1068 /* Enable interrupts */
1069 OUTL(bfe
, BFE_INTR_MASK
, BFE_IMASK_DEF
);
1073 * Common code to take care of setting RX side mode (filter).
1076 bfe_set_rx_mode(bfe_t
*bfe
)
1080 ether_addr_t mac
[ETHERADDRL
] = {0, 0, 0, 0, 0, 0};
1083 * We don't touch RX filter if we were asked to suspend. It's fine
1084 * if chip is not active (no interface is plumbed on us).
1086 if (bfe
->bfe_chip_state
== BFE_CHIP_SUSPENDED
)
1089 val
= INL(bfe
, BFE_RXCONF
);
1091 val
&= ~BFE_RXCONF_PROMISC
;
1092 val
&= ~BFE_RXCONF_DBCAST
;
1094 if ((bfe
->bfe_chip_mode
& BFE_RX_MODE_ENABLE
) == 0) {
1095 OUTL(bfe
, BFE_CAM_CTRL
, 0);
1096 FLUSH(bfe
, BFE_CAM_CTRL
);
1097 } else if (bfe
->bfe_chip_mode
& BFE_RX_MODE_PROMISC
) {
1098 val
|= BFE_RXCONF_PROMISC
;
1099 val
&= ~BFE_RXCONF_DBCAST
;
1101 if (bfe
->bfe_chip_state
== BFE_CHIP_ACTIVE
) {
1102 /* Flush everything */
1103 OUTL(bfe
, BFE_RXCONF
, val
|
1104 BFE_RXCONF_PROMISC
| BFE_RXCONF_ALLMULTI
);
1105 FLUSH(bfe
, BFE_RXCONF
);
1109 OUTL(bfe
, BFE_CAM_CTRL
, 0);
1110 FLUSH(bfe
, BFE_CAM_CTRL
);
1113 * We receive all multicast packets.
1115 val
|= BFE_RXCONF_ALLMULTI
;
1117 for (i
= 0; i
< BFE_MAX_MULTICAST_TABLE
- 1; i
++) {
1118 bfe_cam_write(bfe
, (uchar_t
*)mac
, i
);
1121 bfe_cam_write(bfe
, bfe
->bfe_ether_addr
, i
);
1124 OUTL_OR(bfe
, BFE_CAM_CTRL
, BFE_CAM_ENABLE
);
1125 FLUSH(bfe
, BFE_CAM_CTRL
);
1128 DTRACE_PROBE2(rx__mode__filter
, int, bfe
->bfe_unit
,
1131 OUTL(bfe
, BFE_RXCONF
, val
);
1132 FLUSH(bfe
, BFE_RXCONF
);
1136 * Reset various variable values to initial state.
1139 bfe_init_vars(bfe_t
*bfe
)
1141 bfe
->bfe_chip_mode
= BFE_RX_MODE_ENABLE
;
1143 /* Initial assumption */
1144 bfe
->bfe_chip
.link
= LINK_STATE_UNKNOWN
;
1145 bfe
->bfe_chip
.speed
= 0;
1146 bfe
->bfe_chip
.duplex
= LINK_DUPLEX_UNKNOWN
;
1148 bfe
->bfe_periodic_id
= NULL
;
1149 bfe
->bfe_chip_state
= BFE_CHIP_UNINITIALIZED
;
1151 bfe
->bfe_tx_stall_time
= 0;
1155 * Initializes TX side descriptor entries (bfe_desc_t). Each descriptor entry
1156 * has control (desc_ctl) and address (desc_addr) member.
1159 bfe_tx_desc_init(bfe_ring_t
*r
)
1164 for (i
= 0; i
< r
->r_ndesc
; i
++) {
1165 PUT_DESC(r
, (uint32_t *)&(r
->r_desc
[i
].desc_ctl
),
1166 (r
->r_buf_dma
[i
].len
& BFE_DESC_LEN
));
1169 * DMA addresses need to be added to BFE_PCI_DMA
1171 PUT_DESC(r
, (uint32_t *)&(r
->r_desc
[i
].desc_addr
),
1172 (r
->r_buf_dma
[i
].cookie
.dmac_laddress
+ BFE_PCI_DMA
));
1175 v
= GET_DESC(r
, (uint32_t *)&(r
->r_desc
[i
- 1].desc_ctl
));
1176 PUT_DESC(r
, (uint32_t *)&(r
->r_desc
[i
- 1].desc_ctl
),
1179 (void) SYNC_DESC(r
, 0, r
->r_ndesc
, DDI_DMA_SYNC_FORDEV
);
1182 r
->r_avail_desc
= TX_NUM_DESC
;
1187 * Initializes RX side descriptor entries (bfe_desc_t). Each descriptor entry
1188 * has control (desc_ctl) and address (desc_addr) member.
1191 bfe_rx_desc_init(bfe_ring_t
*r
)
1196 for (i
= 0; i
< r
->r_ndesc
; i
++) {
1197 PUT_DESC(r
, (uint32_t *)&(r
->r_desc
[i
].desc_ctl
),
1198 (r
->r_buf_dma
[i
].len
& BFE_DESC_LEN
));
1200 PUT_DESC(r
, (uint32_t *)&(r
->r_desc
[i
].desc_addr
),
1201 (r
->r_buf_dma
[i
].cookie
.dmac_laddress
+ BFE_PCI_DMA
));
1203 /* Initialize rx header (len, flags) */
1204 bzero(r
->r_buf_dma
[i
].addr
, sizeof (bfe_rx_header_t
));
1206 (void) SYNC_BUF(r
, i
, 0, sizeof (bfe_rx_header_t
),
1207 DDI_DMA_SYNC_FORDEV
);
1210 v
= GET_DESC(r
, (uint32_t *)&(r
->r_desc
[i
- 1].desc_ctl
));
1211 PUT_DESC(r
, (uint32_t *)&(r
->r_desc
[i
- 1].desc_ctl
),
1214 (void) SYNC_DESC(r
, 0, r
->r_ndesc
, DDI_DMA_SYNC_FORDEV
);
1216 /* TAIL of RX Descriptor */
1217 OUTL(r
->r_bfe
, BFE_DMARX_PTR
, ((i
) * sizeof (bfe_desc_t
)));
1220 r
->r_avail_desc
= RX_NUM_DESC
;
1224 bfe_chip_start(bfe_t
*bfe
)
1226 ASSERT_ALL_LOCKS(bfe
);
1229 * Stop the chip first & then Reset the chip. At last enable interrupts.
1235 * Reset chip and start PHY.
1237 bfe_chip_reset(bfe
);
1240 * Initailize Descriptor Rings.
1242 bfe_tx_desc_init(&bfe
->bfe_tx_ring
);
1243 bfe_rx_desc_init(&bfe
->bfe_rx_ring
);
1245 bfe
->bfe_chip_state
= BFE_CHIP_ACTIVE
;
1246 bfe
->bfe_chip_mode
|= BFE_RX_MODE_ENABLE
;
1247 bfe_set_rx_mode(bfe
);
1248 bfe_enable_chip_intrs(bfe
);
1250 /* Check link, speed and duplex mode */
1251 (void) bfe_check_link(bfe
);
1253 return (DDI_SUCCESS
);
1258 * Clear chip statistics.
1261 bfe_clear_stats(bfe_t
*bfe
)
1265 OUTL(bfe
, BFE_MIB_CTRL
, BFE_MIB_CLR_ON_READ
);
1268 * Stat registers are cleared by reading.
1270 for (r
= BFE_TX_GOOD_O
; r
<= BFE_TX_PAUSE
; r
+= 4)
1273 for (r
= BFE_RX_GOOD_O
; r
<= BFE_RX_NPAUSE
; r
+= 4)
1278 * Collect chip statistics.
1281 bfe_gather_stats(bfe_t
*bfe
)
1285 uint32_t txerr
= 0, rxerr
= 0, coll
= 0;
1287 v
= &bfe
->bfe_hw_stats
.tx_good_octets
;
1288 for (r
= BFE_TX_GOOD_O
; r
<= BFE_TX_PAUSE
; r
+= 4) {
1293 v
= &bfe
->bfe_hw_stats
.rx_good_octets
;
1294 for (r
= BFE_RX_GOOD_O
; r
<= BFE_RX_NPAUSE
; r
+= 4) {
1302 * tx_good_octets, tx_good_pkts, tx_octets
1303 * tx_pkts, tx_broadcast_pkts, tx_multicast_pkts
1304 * tx_len_64, tx_len_65_to_127, tx_len_128_to_255
1305 * tx_len_256_to_511, tx_len_512_to_1023, tx_len_1024_to_max
1306 * tx_jabber_pkts, tx_oversize_pkts, tx_fragment_pkts
1307 * tx_underruns, tx_total_cols, tx_single_cols
1308 * tx_multiple_cols, tx_excessive_cols, tx_late_cols
1309 * tx_defered, tx_carrier_lost, tx_pause_pkts
1313 * rx_good_octets, rx_good_pkts, rx_octets
1314 * rx_pkts, rx_broadcast_pkts, rx_multicast_pkts
1315 * rx_len_64, rx_len_65_to_127, rx_len_128_to_255
1316 * rx_len_256_to_511, rx_len_512_to_1023, rx_len_1024_to_max
1317 * rx_jabber_pkts, rx_oversize_pkts, rx_fragment_pkts
1318 * rx_missed_pkts, rx_crc_align_errs, rx_undersize
1319 * rx_crc_errs, rx_align_errs, rx_symbol_errs
1320 * rx_pause_pkts, rx_nonpause_pkts
1323 bfe
->bfe_stats
.ether_stat_carrier_errors
=
1324 bfe
->bfe_hw_stats
.tx_carrier_lost
;
1326 /* txerr += bfe->bfe_hw_stats.tx_carrier_lost; */
1328 bfe
->bfe_stats
.ether_stat_ex_collisions
=
1329 bfe
->bfe_hw_stats
.tx_excessive_cols
;
1330 txerr
+= bfe
->bfe_hw_stats
.tx_excessive_cols
;
1331 coll
+= bfe
->bfe_hw_stats
.tx_excessive_cols
;
1333 bfe
->bfe_stats
.ether_stat_fcs_errors
=
1334 bfe
->bfe_hw_stats
.rx_crc_errs
;
1335 rxerr
+= bfe
->bfe_hw_stats
.rx_crc_errs
;
1337 bfe
->bfe_stats
.ether_stat_first_collisions
=
1338 bfe
->bfe_hw_stats
.tx_single_cols
;
1339 coll
+= bfe
->bfe_hw_stats
.tx_single_cols
;
1340 bfe
->bfe_stats
.ether_stat_multi_collisions
=
1341 bfe
->bfe_hw_stats
.tx_multiple_cols
;
1342 coll
+= bfe
->bfe_hw_stats
.tx_multiple_cols
;
1344 bfe
->bfe_stats
.ether_stat_toolong_errors
=
1345 bfe
->bfe_hw_stats
.rx_oversize_pkts
;
1346 rxerr
+= bfe
->bfe_hw_stats
.rx_oversize_pkts
;
1348 bfe
->bfe_stats
.ether_stat_tooshort_errors
=
1349 bfe
->bfe_hw_stats
.rx_undersize
;
1350 rxerr
+= bfe
->bfe_hw_stats
.rx_undersize
;
1352 bfe
->bfe_stats
.ether_stat_tx_late_collisions
+=
1353 bfe
->bfe_hw_stats
.tx_late_cols
;
1355 bfe
->bfe_stats
.ether_stat_defer_xmts
+=
1356 bfe
->bfe_hw_stats
.tx_defered
;
1358 bfe
->bfe_stats
.ether_stat_macrcv_errors
+= rxerr
;
1359 bfe
->bfe_stats
.ether_stat_macxmt_errors
+= txerr
;
1361 bfe
->bfe_stats
.collisions
+= coll
;
1365 * Gets the state for dladm command and all.
1368 bfe_mac_getstat(void *arg
, uint_t stat
, uint64_t *val
)
1370 bfe_t
*bfe
= (bfe_t
*)arg
;
1374 rw_enter(&bfe
->bfe_rwlock
, RW_READER
);
1382 case MAC_STAT_IFSPEED
:
1384 * MAC layer will ask for IFSPEED first and hence we
1385 * collect it only once.
1387 if (bfe
->bfe_chip_state
== BFE_CHIP_ACTIVE
) {
1389 * Update stats from the hardware.
1391 bfe_gather_stats(bfe
);
1393 v
= bfe
->bfe_chip
.speed
;
1396 case ETHER_STAT_ADV_CAP_100T4
:
1397 v
= bfe
->bfe_adv_100T4
;
1400 case ETHER_STAT_ADV_CAP_100FDX
:
1401 v
= (bfe
->bfe_mii_anar
& MII_ABILITY_100BASE_TX_FD
) != 0;
1404 case ETHER_STAT_ADV_CAP_100HDX
:
1405 v
= (bfe
->bfe_mii_anar
& MII_ABILITY_100BASE_TX
) != 0;
1408 case ETHER_STAT_ADV_CAP_10FDX
:
1409 v
= (bfe
->bfe_mii_anar
& MII_ABILITY_10BASE_T_FD
) != 0;
1412 case ETHER_STAT_ADV_CAP_10HDX
:
1413 v
= (bfe
->bfe_mii_anar
& MII_ABILITY_10BASE_T
) != 0;
1416 case ETHER_STAT_ADV_CAP_ASMPAUSE
:
1420 case ETHER_STAT_ADV_CAP_AUTONEG
:
1421 v
= bfe
->bfe_adv_aneg
;
1424 case ETHER_STAT_ADV_CAP_PAUSE
:
1425 v
= (bfe
->bfe_mii_anar
& MII_ABILITY_PAUSE
) != 0;
1428 case ETHER_STAT_ADV_REMFAULT
:
1429 v
= (bfe
->bfe_mii_anar
& MII_AN_ADVERT_REMFAULT
) != 0;
1432 case ETHER_STAT_ALIGN_ERRORS
:
1434 v
= bfe
->bfe_stats
.ether_stat_align_errors
;
1437 case ETHER_STAT_CAP_100T4
:
1438 v
= (bfe
->bfe_mii_bmsr
& MII_STATUS_100_BASE_T4
) != 0;
1441 case ETHER_STAT_CAP_100FDX
:
1442 v
= (bfe
->bfe_mii_bmsr
& MII_STATUS_100_BASEX_FD
) != 0;
1445 case ETHER_STAT_CAP_100HDX
:
1446 v
= (bfe
->bfe_mii_bmsr
& MII_STATUS_100_BASEX
) != 0;
1449 case ETHER_STAT_CAP_10FDX
:
1450 v
= (bfe
->bfe_mii_bmsr
& MII_STATUS_10_FD
) != 0;
1453 case ETHER_STAT_CAP_10HDX
:
1454 v
= (bfe
->bfe_mii_bmsr
& MII_STATUS_10
) != 0;
1457 case ETHER_STAT_CAP_ASMPAUSE
:
1461 case ETHER_STAT_CAP_AUTONEG
:
1462 v
= ((bfe
->bfe_mii_bmsr
& MII_STATUS_CANAUTONEG
) != 0);
1465 case ETHER_STAT_CAP_PAUSE
:
1469 case ETHER_STAT_CAP_REMFAULT
:
1470 v
= (bfe
->bfe_mii_bmsr
& MII_STATUS_REMFAULT
) != 0;
1473 case ETHER_STAT_CARRIER_ERRORS
:
1474 v
= bfe
->bfe_stats
.ether_stat_carrier_errors
;
1477 case ETHER_STAT_JABBER_ERRORS
:
1481 case ETHER_STAT_DEFER_XMTS
:
1482 v
= bfe
->bfe_stats
.ether_stat_defer_xmts
;
1485 case ETHER_STAT_EX_COLLISIONS
:
1487 v
= bfe
->bfe_stats
.ether_stat_ex_collisions
;
1490 case ETHER_STAT_FCS_ERRORS
:
1492 v
= bfe
->bfe_stats
.ether_stat_fcs_errors
;
1495 case ETHER_STAT_FIRST_COLLISIONS
:
1497 v
= bfe
->bfe_stats
.ether_stat_first_collisions
;
1500 case ETHER_STAT_LINK_ASMPAUSE
:
1504 case ETHER_STAT_LINK_AUTONEG
:
1505 v
= (bfe
->bfe_mii_bmcr
& MII_CONTROL_ANE
) != 0 &&
1506 (bfe
->bfe_mii_bmsr
& MII_STATUS_ANDONE
) != 0;
1509 case ETHER_STAT_LINK_DUPLEX
:
1510 v
= bfe
->bfe_chip
.duplex
;
1513 case ETHER_STAT_LP_CAP_100T4
:
1514 v
= (bfe
->bfe_mii_anlpar
& MII_ABILITY_100BASE_T4
) != 0;
1517 case ETHER_STAT_LP_CAP_100FDX
:
1518 v
= (bfe
->bfe_mii_anlpar
& MII_ABILITY_100BASE_TX_FD
) != 0;
1521 case ETHER_STAT_LP_CAP_100HDX
:
1522 v
= (bfe
->bfe_mii_anlpar
& MII_ABILITY_100BASE_TX
) != 0;
1525 case ETHER_STAT_LP_CAP_10FDX
:
1526 v
= (bfe
->bfe_mii_anlpar
& MII_ABILITY_10BASE_T_FD
) != 0;
1529 case ETHER_STAT_LP_CAP_10HDX
:
1530 v
= (bfe
->bfe_mii_anlpar
& MII_ABILITY_10BASE_T
) != 0;
1533 case ETHER_STAT_LP_CAP_ASMPAUSE
:
1537 case ETHER_STAT_LP_CAP_AUTONEG
:
1538 v
= (bfe
->bfe_mii_exp
& MII_AN_EXP_LPCANAN
) != 0;
1541 case ETHER_STAT_LP_CAP_PAUSE
:
1542 v
= (bfe
->bfe_mii_anlpar
& MII_ABILITY_PAUSE
) != 0;
1545 case ETHER_STAT_LP_REMFAULT
:
1546 v
= (bfe
->bfe_mii_anlpar
& MII_STATUS_REMFAULT
) != 0;
1549 case ETHER_STAT_MACRCV_ERRORS
:
1550 v
= bfe
->bfe_stats
.ether_stat_macrcv_errors
;
1553 case ETHER_STAT_MACXMT_ERRORS
:
1554 v
= bfe
->bfe_stats
.ether_stat_macxmt_errors
;
1557 case ETHER_STAT_MULTI_COLLISIONS
:
1558 v
= bfe
->bfe_stats
.ether_stat_multi_collisions
;
1561 case ETHER_STAT_SQE_ERRORS
:
1565 case ETHER_STAT_TOOLONG_ERRORS
:
1566 v
= bfe
->bfe_stats
.ether_stat_toolong_errors
;
1569 case ETHER_STAT_TOOSHORT_ERRORS
:
1570 v
= bfe
->bfe_stats
.ether_stat_tooshort_errors
;
1573 case ETHER_STAT_TX_LATE_COLLISIONS
:
1574 v
= bfe
->bfe_stats
.ether_stat_tx_late_collisions
;
1577 case ETHER_STAT_XCVR_ADDR
:
1578 v
= bfe
->bfe_phy_addr
;
1581 case ETHER_STAT_XCVR_ID
:
1582 v
= bfe
->bfe_phy_id
;
1585 case MAC_STAT_BRDCSTRCV
:
1586 v
= bfe
->bfe_stats
.brdcstrcv
;
1589 case MAC_STAT_BRDCSTXMT
:
1590 v
= bfe
->bfe_stats
.brdcstxmt
;
1593 case MAC_STAT_MULTIXMT
:
1594 v
= bfe
->bfe_stats
.multixmt
;
1597 case MAC_STAT_COLLISIONS
:
1598 v
= bfe
->bfe_stats
.collisions
;
1601 case MAC_STAT_IERRORS
:
1602 v
= bfe
->bfe_stats
.ierrors
;
1605 case MAC_STAT_IPACKETS
:
1606 v
= bfe
->bfe_stats
.ipackets
;
1609 case MAC_STAT_MULTIRCV
:
1610 v
= bfe
->bfe_stats
.multircv
;
1613 case MAC_STAT_NORCVBUF
:
1614 v
= bfe
->bfe_stats
.norcvbuf
;
1617 case MAC_STAT_NOXMTBUF
:
1618 v
= bfe
->bfe_stats
.noxmtbuf
;
1621 case MAC_STAT_OBYTES
:
1622 v
= bfe
->bfe_stats
.obytes
;
1625 case MAC_STAT_OERRORS
:
1627 v
= bfe
->bfe_stats
.ether_stat_macxmt_errors
;
1630 case MAC_STAT_OPACKETS
:
1631 v
= bfe
->bfe_stats
.opackets
;
1634 case MAC_STAT_RBYTES
:
1635 v
= bfe
->bfe_stats
.rbytes
;
1638 case MAC_STAT_UNDERFLOWS
:
1639 v
= bfe
->bfe_stats
.underflows
;
1642 case MAC_STAT_OVERFLOWS
:
1643 v
= bfe
->bfe_stats
.overflows
;
1647 rw_exit(&bfe
->bfe_rwlock
);
1654 bfe_mac_getprop(void *arg
, const char *name
, mac_prop_id_t num
, uint_t sz
,
1657 bfe_t
*bfe
= (bfe_t
*)arg
;
1661 case MAC_PROP_DUPLEX
:
1662 ASSERT(sz
>= sizeof (link_duplex_t
));
1663 bcopy(&bfe
->bfe_chip
.duplex
, val
, sizeof (link_duplex_t
));
1666 case MAC_PROP_SPEED
:
1667 ASSERT(sz
>= sizeof (uint64_t));
1668 bcopy(&bfe
->bfe_chip
.speed
, val
, sizeof (uint64_t));
1671 case MAC_PROP_AUTONEG
:
1672 *(uint8_t *)val
= bfe
->bfe_adv_aneg
;
1675 case MAC_PROP_ADV_100FDX_CAP
:
1676 *(uint8_t *)val
= bfe
->bfe_adv_100fdx
;
1679 case MAC_PROP_EN_100FDX_CAP
:
1680 *(uint8_t *)val
= bfe
->bfe_adv_100fdx
;
1683 case MAC_PROP_ADV_100HDX_CAP
:
1684 *(uint8_t *)val
= bfe
->bfe_adv_100hdx
;
1687 case MAC_PROP_EN_100HDX_CAP
:
1688 *(uint8_t *)val
= bfe
->bfe_adv_100hdx
;
1691 case MAC_PROP_ADV_10FDX_CAP
:
1692 *(uint8_t *)val
= bfe
->bfe_adv_10fdx
;
1695 case MAC_PROP_EN_10FDX_CAP
:
1696 *(uint8_t *)val
= bfe
->bfe_adv_10fdx
;
1699 case MAC_PROP_ADV_10HDX_CAP
:
1700 *(uint8_t *)val
= bfe
->bfe_adv_10hdx
;
1703 case MAC_PROP_EN_10HDX_CAP
:
1704 *(uint8_t *)val
= bfe
->bfe_adv_10hdx
;
1707 case MAC_PROP_ADV_100T4_CAP
:
1708 *(uint8_t *)val
= bfe
->bfe_adv_100T4
;
1711 case MAC_PROP_EN_100T4_CAP
:
1712 *(uint8_t *)val
= bfe
->bfe_adv_100T4
;
1724 bfe_mac_propinfo(void *arg
, const char *name
, mac_prop_id_t num
,
1725 mac_prop_info_handle_t prh
)
1727 bfe_t
*bfe
= (bfe_t
*)arg
;
1730 case MAC_PROP_DUPLEX
:
1731 case MAC_PROP_SPEED
:
1732 case MAC_PROP_ADV_100FDX_CAP
:
1733 case MAC_PROP_ADV_100HDX_CAP
:
1734 case MAC_PROP_ADV_10FDX_CAP
:
1735 case MAC_PROP_ADV_10HDX_CAP
:
1736 case MAC_PROP_ADV_100T4_CAP
:
1737 case MAC_PROP_EN_100T4_CAP
:
1738 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
1741 case MAC_PROP_AUTONEG
:
1742 mac_prop_info_set_default_uint8(prh
, bfe
->bfe_cap_aneg
);
1745 case MAC_PROP_EN_100FDX_CAP
:
1746 mac_prop_info_set_default_uint8(prh
, bfe
->bfe_cap_100fdx
);
1749 case MAC_PROP_EN_100HDX_CAP
:
1750 mac_prop_info_set_default_uint8(prh
, bfe
->bfe_cap_100hdx
);
1753 case MAC_PROP_EN_10FDX_CAP
:
1754 mac_prop_info_set_default_uint8(prh
, bfe
->bfe_cap_10fdx
);
1757 case MAC_PROP_EN_10HDX_CAP
:
1758 mac_prop_info_set_default_uint8(prh
, bfe
->bfe_cap_10hdx
);
1766 bfe_mac_setprop(void *arg
, const char *name
, mac_prop_id_t num
, uint_t sz
,
1769 bfe_t
*bfe
= (bfe_t
*)arg
;
1775 case MAC_PROP_EN_100FDX_CAP
:
1776 advp
= &bfe
->bfe_adv_100fdx
;
1777 capp
= &bfe
->bfe_cap_100fdx
;
1780 case MAC_PROP_EN_100HDX_CAP
:
1781 advp
= &bfe
->bfe_adv_100hdx
;
1782 capp
= &bfe
->bfe_cap_100hdx
;
1785 case MAC_PROP_EN_10FDX_CAP
:
1786 advp
= &bfe
->bfe_adv_10fdx
;
1787 capp
= &bfe
->bfe_cap_10fdx
;
1790 case MAC_PROP_EN_10HDX_CAP
:
1791 advp
= &bfe
->bfe_adv_10hdx
;
1792 capp
= &bfe
->bfe_cap_10hdx
;
1795 case MAC_PROP_AUTONEG
:
1796 advp
= &bfe
->bfe_adv_aneg
;
1797 capp
= &bfe
->bfe_cap_aneg
;
1807 bfe_grab_locks(bfe
);
1809 if (*advp
!= *(const uint8_t *)val
) {
1810 *advp
= *(const uint8_t *)val
;
1812 bfe
->bfe_chip_action
= BFE_ACTION_RESTART_SETPROP
;
1813 if (bfe
->bfe_chip_state
== BFE_CHIP_ACTIVE
) {
1815 * We need to stop the timer before grabbing locks
1816 * otherwise we can land-up in deadlock with untimeout.
1818 bfe_stop_timer(bfe
);
1820 bfe
->bfe_chip_action
|= BFE_ACTION_RESTART
;
1822 bfe_chip_restart(bfe
);
1825 * We leave SETPROP because properties can be
1828 bfe
->bfe_chip_action
&= ~(BFE_ACTION_RESTART
);
1833 bfe_release_locks(bfe
);
1835 /* kick-off a potential stopped downstream */
1837 mac_tx_update(bfe
->bfe_machdl
);
1844 bfe_mac_set_ether_addr(void *arg
, const uint8_t *ea
)
1846 bfe_t
*bfe
= (bfe_t
*)arg
;
1848 bfe_grab_locks(bfe
);
1849 bcopy(ea
, bfe
->bfe_ether_addr
, ETHERADDRL
);
1850 bfe_set_rx_mode(bfe
);
1851 bfe_release_locks(bfe
);
1856 bfe_mac_start(void *arg
)
1858 bfe_t
*bfe
= (bfe_t
*)arg
;
1860 bfe_grab_locks(bfe
);
1861 if (bfe_chip_start(bfe
) == DDI_FAILURE
) {
1862 bfe_release_locks(bfe
);
1866 bfe_release_locks(bfe
);
1868 mac_tx_update(bfe
->bfe_machdl
);
1874 bfe_mac_stop(void *arg
)
1876 bfe_t
*bfe
= (bfe_t
*)arg
;
1879 * We need to stop the timer before grabbing locks otherwise
1880 * we can land-up in deadlock with untimeout.
1882 bfe_stop_timer(bfe
);
1884 bfe_grab_locks(bfe
);
1887 * First halt the chip by disabling interrupts.
1892 bfe
->bfe_chip_state
= BFE_CHIP_STOPPED
;
1895 * This will leave the PHY running.
1897 bfe_chip_reset(bfe
);
1900 * Disable RX register.
1902 bfe
->bfe_chip_mode
&= ~BFE_RX_MODE_ENABLE
;
1903 bfe_set_rx_mode(bfe
);
1905 bfe_release_locks(bfe
);
1909 * Send a packet down the wire.
1912 bfe_send_a_packet(bfe_t
*bfe
, mblk_t
*mp
)
1914 bfe_ring_t
*r
= &bfe
->bfe_tx_ring
;
1915 uint32_t cur
= r
->r_curr_desc
;
1917 size_t pktlen
= msgsize(mp
);
1921 ASSERT(MUTEX_HELD(&r
->r_lock
));
1924 if (pktlen
> r
->r_buf_len
) {
1926 return (BFE_SUCCESS
);
1930 * There is a big reason why we don't check for '0'. It becomes easy
1931 * for us to not roll over the ring since we are based on producer (tx)
1932 * and consumer (reclaim by an interrupt) model. Especially when we
1933 * run out of TX descriptor, chip will send a single interrupt and
1934 * both producer and consumer counter will be same. So we keep a
1935 * difference of 1 always.
1937 if (r
->r_avail_desc
<= 1) {
1938 bfe
->bfe_stats
.noxmtbuf
++;
1939 bfe
->bfe_tx_resched
= 1;
1940 return (BFE_FAILURE
);
1944 * Get the DMA buffer to hold packet.
1946 buf
= (uchar_t
*)r
->r_buf_dma
[cur
].addr
;
1948 mcopymsg(mp
, buf
); /* it also frees mp */
1951 * Gather statistics.
1954 if (bcmp(buf
, bfe_broadcast
, ETHERADDRL
) != 0)
1955 bfe
->bfe_stats
.multixmt
++;
1957 bfe
->bfe_stats
.brdcstxmt
++;
1959 bfe
->bfe_stats
.opackets
++;
1960 bfe
->bfe_stats
.obytes
+= pktlen
;
1964 * Program the DMA descriptor (start and end of frame are same).
1967 v
= (pktlen
& BFE_DESC_LEN
) | BFE_DESC_IOC
| BFE_DESC_SOF
|
1970 if (cur
== (TX_NUM_DESC
- 1))
1973 PUT_DESC(r
, (uint32_t *)&(r
->r_desc
[cur
].desc_ctl
), v
);
1976 * DMA addresses need to be added to BFE_PCI_DMA
1978 PUT_DESC(r
, (uint32_t *)&(r
->r_desc
[cur
].desc_addr
),
1979 (r
->r_buf_dma
[cur
].cookie
.dmac_laddress
+ BFE_PCI_DMA
));
1982 * Sync the packet data for the device.
1984 (void) SYNC_BUF(r
, cur
, 0, pktlen
, DDI_DMA_SYNC_FORDEV
);
1986 /* Move to next descriptor slot */
1987 BFE_INC_SLOT(next
, TX_NUM_DESC
);
1989 (void) SYNC_DESC(r
, 0, r
->r_ndesc
, DDI_DMA_SYNC_FORDEV
);
1991 r
->r_curr_desc
= next
;
1994 * The order should be 1,2,3,... for BFE_DMATX_PTR if 0,1,2,3,...
1995 * descriptor slot are being programmed.
1997 OUTL(bfe
, BFE_DMATX_PTR
, next
* sizeof (bfe_desc_t
));
1998 FLUSH(bfe
, BFE_DMATX_PTR
);
2003 * Let timeout know that it must reset the chip if a
2004 * packet is not sent down the wire for more than 5 seconds.
2006 bfe
->bfe_tx_stall_time
= gethrtime() + (5 * 1000000000ULL);
2008 return (BFE_SUCCESS
);
2012 bfe_mac_transmit_packet(void *arg
, mblk_t
*mp
)
2014 bfe_t
*bfe
= (bfe_t
*)arg
;
2015 bfe_ring_t
*r
= &bfe
->bfe_tx_ring
;
2018 mutex_enter(&r
->r_lock
);
2020 if (bfe
->bfe_chip_state
!= BFE_CHIP_ACTIVE
) {
2021 DTRACE_PROBE1(tx__chip__not__active
, int, bfe
->bfe_unit
);
2024 mutex_exit(&r
->r_lock
);
2029 while (mp
!= NULL
) {
2033 if (bfe_send_a_packet(bfe
, mp
) == BFE_FAILURE
) {
2040 mutex_exit(&r
->r_lock
);
2046 bfe_mac_set_promisc(void *arg
, boolean_t promiscflag
)
2048 bfe_t
*bfe
= (bfe_t
*)arg
;
2050 bfe_grab_locks(bfe
);
2051 if (bfe
->bfe_chip_state
!= BFE_CHIP_ACTIVE
) {
2052 bfe_release_locks(bfe
);
2057 /* Set Promiscous on */
2058 bfe
->bfe_chip_mode
|= BFE_RX_MODE_PROMISC
;
2060 bfe
->bfe_chip_mode
&= ~BFE_RX_MODE_PROMISC
;
2063 bfe_set_rx_mode(bfe
);
2064 bfe_release_locks(bfe
);
2070 bfe_mac_set_multicast(void *arg
, boolean_t add
, const uint8_t *macaddr
)
2073 * It was too much of pain to implement multicast in CAM. Instead
2074 * we never disable multicast filter.
2079 static mac_callbacks_t bfe_mac_callbacks
= {
2080 MC_SETPROP
| MC_GETPROP
| MC_PROPINFO
,
2081 bfe_mac_getstat
, /* gets stats */
2082 bfe_mac_start
, /* starts mac */
2083 bfe_mac_stop
, /* stops mac */
2084 bfe_mac_set_promisc
, /* sets promisc mode for snoop */
2085 bfe_mac_set_multicast
, /* multicast implementation */
2086 bfe_mac_set_ether_addr
, /* sets ethernet address (unicast) */
2087 bfe_mac_transmit_packet
, /* transmits packet */
2099 bfe_error_handler(bfe_t
*bfe
, int intr_mask
)
2103 if (intr_mask
& BFE_ISTAT_RFO
) {
2104 bfe
->bfe_stats
.overflows
++;
2105 bfe
->bfe_chip_action
|=
2106 (BFE_ACTION_RESTART
| BFE_ACTION_RESTART_FAULT
);
2110 if (intr_mask
& BFE_ISTAT_TFU
) {
2111 bfe
->bfe_stats
.underflows
++;
2115 /* Descriptor Protocol Error */
2116 if (intr_mask
& BFE_ISTAT_DPE
) {
2117 bfe_error(bfe
->bfe_dip
,
2118 "Descriptor Protocol Error. Halting Chip");
2119 bfe
->bfe_chip_action
|=
2120 (BFE_ACTION_RESTART
| BFE_ACTION_RESTART_FAULT
);
2124 /* Descriptor Error */
2125 if (intr_mask
& BFE_ISTAT_DSCE
) {
2126 bfe_error(bfe
->bfe_dip
, "Descriptor Error. Restarting Chip");
2130 /* Receive Descr. Underflow */
2131 if (intr_mask
& BFE_ISTAT_RDU
) {
2132 bfe_error(bfe
->bfe_dip
,
2133 "Receive Descriptor Underflow. Restarting Chip");
2134 bfe
->bfe_stats
.ether_stat_macrcv_errors
++;
2135 bfe
->bfe_chip_action
|=
2136 (BFE_ACTION_RESTART
| BFE_ACTION_RESTART_FAULT
);
2140 v
= INL(bfe
, BFE_DMATX_STAT
);
2142 /* Error while sending a packet */
2143 if (v
& BFE_STAT_EMASK
) {
2144 bfe
->bfe_stats
.ether_stat_macxmt_errors
++;
2145 bfe_error(bfe
->bfe_dip
,
2146 "Error while sending a packet. Restarting Chip");
2149 /* Error while receiving a packet */
2150 v
= INL(bfe
, BFE_DMARX_STAT
);
2151 if (v
& BFE_RX_FLAG_ERRORS
) {
2152 bfe
->bfe_stats
.ierrors
++;
2153 bfe_error(bfe
->bfe_dip
,
2154 "Error while receiving a packet. Restarting Chip");
2158 bfe
->bfe_chip_action
|=
2159 (BFE_ACTION_RESTART
| BFE_ACTION_RESTART_FAULT
);
2166 * It will recycle a RX descriptor slot.
2169 bfe_rx_desc_buf_reinit(bfe_t
*bfe
, uint_t slot
)
2171 bfe_ring_t
*r
= &bfe
->bfe_rx_ring
;
2174 slot
%= RX_NUM_DESC
;
2176 bzero(r
->r_buf_dma
[slot
].addr
, sizeof (bfe_rx_header_t
));
2178 (void) SYNC_BUF(r
, slot
, 0, BFE_RX_OFFSET
, DDI_DMA_SYNC_FORDEV
);
2180 v
= r
->r_buf_dma
[slot
].len
& BFE_DESC_LEN
;
2181 if (slot
== (RX_NUM_DESC
- 1))
2184 PUT_DESC(r
, (uint32_t *)&(r
->r_desc
[slot
].desc_ctl
), v
);
2187 * DMA addresses need to be added to BFE_PCI_DMA
2189 PUT_DESC(r
, (uint32_t *)&(r
->r_desc
[slot
].desc_addr
),
2190 (r
->r_buf_dma
[slot
].cookie
.dmac_laddress
+ BFE_PCI_DMA
));
2194 * Gets called from interrupt context to handle RX interrupt.
2197 bfe_receive(bfe_t
*bfe
, int intr_mask
)
2199 int rxstat
, current
;
2200 mblk_t
*mp
= NULL
, *rx_head
, *rx_tail
;
2204 bfe_ring_t
*r
= &bfe
->bfe_rx_ring
;
2207 rxstat
= INL(bfe
, BFE_DMARX_STAT
);
2208 current
= (rxstat
& BFE_STAT_CDMASK
) / sizeof (bfe_desc_t
);
2211 rx_head
= rx_tail
= NULL
;
2213 DTRACE_PROBE3(receive
, int, bfe
->bfe_unit
,
2214 int, r
->r_curr_desc
,
2217 for (i
= r
->r_curr_desc
; i
!= current
;
2218 BFE_INC_SLOT(i
, RX_NUM_DESC
)) {
2221 * Sync the buffer associated with the descriptor table entry.
2223 (void) SYNC_BUF(r
, i
, 0, r
->r_buf_dma
[i
].len
,
2224 DDI_DMA_SYNC_FORKERNEL
);
2226 rx_header
= (void *)r
->r_buf_dma
[i
].addr
;
2229 * We do this to make sure we are endian neutral. Chip is
2232 * The header looks like :-
2234 * Offset 0 -> uint16_t len
2235 * Offset 2 -> uint16_t flags
2236 * Offset 4 -> uint16_t pad[12]
2238 len
= (rx_header
[1] << 8) | rx_header
[0];
2239 len
-= 4; /* CRC bytes need to be removed */
2242 * Don't receive this packet if pkt length is greater than
2245 if (len
> r
->r_buf_len
) {
2246 /* Recycle slot for later use */
2247 bfe_rx_desc_buf_reinit(bfe
, i
);
2251 if ((mp
= allocb(len
+ VLAN_TAGSZ
, BPRI_MED
)) != NULL
) {
2252 mp
->b_rptr
+= VLAN_TAGSZ
;
2254 mp
->b_wptr
= bp
+ len
;
2256 /* sizeof (bfe_rx_header_t) + 2 */
2257 bcopy(r
->r_buf_dma
[i
].addr
+
2258 BFE_RX_OFFSET
, bp
, len
);
2261 if (rx_tail
== NULL
)
2262 rx_head
= rx_tail
= mp
;
2264 rx_tail
->b_next
= mp
;
2268 /* Number of packets received so far */
2269 bfe
->bfe_stats
.ipackets
++;
2271 /* Total bytes of packets received so far */
2272 bfe
->bfe_stats
.rbytes
+= len
;
2274 if (bcmp(mp
->b_rptr
, bfe_broadcast
, ETHERADDRL
) == 0)
2275 bfe
->bfe_stats
.brdcstrcv
++;
2277 bfe
->bfe_stats
.multircv
++;
2279 bfe
->bfe_stats
.norcvbuf
++;
2280 /* Recycle the slot for later use */
2281 bfe_rx_desc_buf_reinit(bfe
, i
);
2286 * Reinitialize the current descriptor slot's buffer so that
2289 bfe_rx_desc_buf_reinit(bfe
, i
);
2294 (void) SYNC_DESC(r
, 0, r
->r_ndesc
, DDI_DMA_SYNC_FORDEV
);
2300 bfe_tx_reclaim(bfe_ring_t
*r
)
2302 uint32_t cur
, start
;
2305 cur
= INL(r
->r_bfe
, BFE_DMATX_STAT
) & BFE_STAT_CDMASK
;
2306 cur
= cur
/ sizeof (bfe_desc_t
);
2309 * Start with the last descriptor consumed by the chip.
2311 start
= r
->r_cons_desc
;
2313 DTRACE_PROBE3(tx__reclaim
, int, r
->r_bfe
->bfe_unit
,
2318 * There will be at least one descriptor to process.
2320 while (start
!= cur
) {
2322 v
= r
->r_buf_dma
[start
].len
& BFE_DESC_LEN
;
2323 if (start
== (TX_NUM_DESC
- 1))
2326 PUT_DESC(r
, (uint32_t *)&(r
->r_desc
[start
].desc_ctl
), v
);
2327 PUT_DESC(r
, (uint32_t *)&(r
->r_desc
[start
].desc_addr
),
2328 (r
->r_buf_dma
[start
].cookie
.dmac_laddress
+ BFE_PCI_DMA
));
2330 /* Move to next descriptor in TX ring */
2331 BFE_INC_SLOT(start
, TX_NUM_DESC
);
2334 (void) ddi_dma_sync(r
->r_desc_dma_handle
,
2335 0, (r
->r_ndesc
* sizeof (bfe_desc_t
)),
2336 DDI_DMA_SYNC_FORDEV
);
2338 r
->r_cons_desc
= start
; /* consumed pointer */
2339 r
->r_bfe
->bfe_tx_stall_time
= 0;
2345 bfe_tx_done(bfe_t
*bfe
, int intr_mask
)
2347 bfe_ring_t
*r
= &bfe
->bfe_tx_ring
;
2350 mutex_enter(&r
->r_lock
);
2351 (void) bfe_tx_reclaim(r
);
2353 if (bfe
->bfe_tx_resched
) {
2355 bfe
->bfe_tx_resched
= 0;
2357 mutex_exit(&r
->r_lock
);
2363 * ISR for interrupt handling
2366 bfe_interrupt(caddr_t arg1
, caddr_t arg2
)
2368 bfe_t
*bfe
= (void *)arg1
;
2370 mblk_t
*rx_head
= NULL
;
2374 * Grab the lock to avoid stopping the chip while this interrupt
2377 rw_enter(&bfe
->bfe_rwlock
, RW_READER
);
2380 * It's necessary to read intr stat again because masking interrupt
2381 * register does not really mask interrupts coming from the chip.
2383 intr_stat
= INL(bfe
, BFE_INTR_STAT
);
2384 intr_stat
&= BFE_IMASK_DEF
;
2385 OUTL(bfe
, BFE_INTR_STAT
, intr_stat
);
2386 (void) INL(bfe
, BFE_INTR_STAT
);
2388 if (intr_stat
== 0) {
2389 rw_exit(&bfe
->bfe_rwlock
);
2390 return (DDI_INTR_UNCLAIMED
);
2393 DTRACE_PROBE2(bfe__interrupt
, int, bfe
->bfe_unit
,
2396 if (bfe
->bfe_chip_state
!= BFE_CHIP_ACTIVE
) {
2398 * If chip is suspended then we just return.
2400 if (bfe
->bfe_chip_state
== BFE_CHIP_SUSPENDED
) {
2401 rw_exit(&bfe
->bfe_rwlock
);
2402 DTRACE_PROBE1(interrupt__chip__is__suspend
, int,
2404 return (DDI_INTR_CLAIMED
);
2408 * Halt the chip again i.e basically disable interrupts.
2411 rw_exit(&bfe
->bfe_rwlock
);
2412 DTRACE_PROBE1(interrupt__chip__not__active
, int,
2414 return (DDI_INTR_CLAIMED
);
2417 /* A packet was received */
2418 if (intr_stat
& BFE_ISTAT_RX
) {
2419 rx_head
= bfe_receive(bfe
, intr_stat
);
2422 /* A packet was sent down the wire */
2423 if (intr_stat
& BFE_ISTAT_TX
) {
2424 resched
= bfe_tx_done(bfe
, intr_stat
);
2427 /* There was an error */
2428 if (intr_stat
& BFE_ISTAT_ERRORS
) {
2429 bfe_error_handler(bfe
, intr_stat
);
2432 rw_exit(&bfe
->bfe_rwlock
);
2435 * Pass the list of packets received from chip to MAC layer.
2438 mac_rx(bfe
->bfe_machdl
, 0, rx_head
);
2442 * Let the MAC start sending pkts to a potential stopped stream.
2445 mac_tx_update(bfe
->bfe_machdl
);
2447 return (DDI_INTR_CLAIMED
);
2451 * Removes registered interrupt handler.
2454 bfe_remove_intr(bfe_t
*bfe
)
2456 (void) ddi_intr_remove_handler(bfe
->bfe_intrhdl
);
2457 (void) ddi_intr_free(bfe
->bfe_intrhdl
);
2461 * Add an interrupt for the driver.
2464 bfe_add_intr(bfe_t
*bfe
)
2469 ret
= ddi_intr_alloc(bfe
->bfe_dip
, &bfe
->bfe_intrhdl
,
2470 DDI_INTR_TYPE_FIXED
, /* type */
2473 &nintrs
, /* actual nintrs */
2474 DDI_INTR_ALLOC_STRICT
);
2476 if (ret
!= DDI_SUCCESS
) {
2477 bfe_error(bfe
->bfe_dip
, "ddi_intr_alloc() failed"
2478 " : ret : %d", ret
);
2479 return (DDI_FAILURE
);
2482 ret
= ddi_intr_add_handler(bfe
->bfe_intrhdl
, bfe_interrupt
, bfe
, NULL
);
2483 if (ret
!= DDI_SUCCESS
) {
2484 bfe_error(bfe
->bfe_dip
, "ddi_intr_add_handler() failed");
2485 (void) ddi_intr_free(bfe
->bfe_intrhdl
);
2486 return (DDI_FAILURE
);
2489 ret
= ddi_intr_get_pri(bfe
->bfe_intrhdl
, &bfe
->bfe_intrpri
);
2490 if (ret
!= DDI_SUCCESS
) {
2491 bfe_error(bfe
->bfe_dip
, "ddi_intr_get_pri() failed");
2492 bfe_remove_intr(bfe
);
2493 return (DDI_FAILURE
);
2496 return (DDI_SUCCESS
);
2501 * Identify chipset family.
2504 bfe_identify_hardware(bfe_t
*bfe
)
2509 vid
= pci_config_get16(bfe
->bfe_conf_handle
, PCI_CONF_VENID
);
2510 did
= pci_config_get16(bfe
->bfe_conf_handle
, PCI_CONF_DEVID
);
2512 for (i
= 0; i
< (sizeof (bfe_cards
) / sizeof (bfe_cards_t
)); i
++) {
2513 if (bfe_cards
[i
].vendor_id
== vid
&&
2514 bfe_cards
[i
].device_id
== did
) {
2515 return (BFE_SUCCESS
);
2519 bfe_error(bfe
->bfe_dip
, "bfe driver is attaching to unknown pci%d,%d"
2520 " vendor/device-id card", vid
, did
);
2522 return (BFE_SUCCESS
);
2526 * Maps device registers.
2529 bfe_regs_map(bfe_t
*bfe
)
2531 dev_info_t
*dip
= bfe
->bfe_dip
;
2534 ret
= ddi_regs_map_setup(dip
, 1, &bfe
->bfe_mem_regset
.addr
, 0, 0,
2535 &bfe_dev_attr
, &bfe
->bfe_mem_regset
.hdl
);
2537 if (ret
!= DDI_SUCCESS
) {
2538 bfe_error(bfe
->bfe_dip
, "ddi_regs_map_setup failed");
2539 return (DDI_FAILURE
);
2542 return (DDI_SUCCESS
);
2546 bfe_unmap_regs(bfe_t
*bfe
)
2548 ddi_regs_map_free(&bfe
->bfe_mem_regset
.hdl
);
2552 bfe_get_chip_config(bfe_t
*bfe
)
2555 bfe
->bfe_dev_addr
[0] = bfe
->bfe_ether_addr
[0] =
2556 INB(bfe
, BFE_EEPROM_BASE
+ 79);
2558 bfe
->bfe_dev_addr
[1] = bfe
->bfe_ether_addr
[1] =
2559 INB(bfe
, BFE_EEPROM_BASE
+ 78);
2561 bfe
->bfe_dev_addr
[2] = bfe
->bfe_ether_addr
[2] =
2562 INB(bfe
, BFE_EEPROM_BASE
+ 81);
2564 bfe
->bfe_dev_addr
[3] = bfe
->bfe_ether_addr
[3] =
2565 INB(bfe
, BFE_EEPROM_BASE
+ 80);
2567 bfe
->bfe_dev_addr
[4] = bfe
->bfe_ether_addr
[4] =
2568 INB(bfe
, BFE_EEPROM_BASE
+ 83);
2570 bfe
->bfe_dev_addr
[5] = bfe
->bfe_ether_addr
[5] =
2571 INB(bfe
, BFE_EEPROM_BASE
+ 82);
2573 bfe
->bfe_phy_addr
= -1;
2575 return (DDI_SUCCESS
);
2579 * Ring Management routines
2582 bfe_ring_buf_alloc(bfe_t
*bfe
, bfe_ring_t
*r
, int slot
, int d
)
2587 err
= ddi_dma_alloc_handle(bfe
->bfe_dip
,
2588 &bfe_dma_attr_buf
, DDI_DMA_SLEEP
, NULL
,
2589 &r
->r_buf_dma
[slot
].handle
);
2591 if (err
!= DDI_SUCCESS
) {
2592 bfe_error(bfe
->bfe_dip
, " bfe_ring_buf_alloc() :"
2593 " alloc_handle failed");
2597 err
= ddi_dma_mem_alloc(r
->r_buf_dma
[slot
].handle
,
2598 r
->r_buf_len
, &bfe_buf_attr
, DDI_DMA_STREAMING
,
2599 DDI_DMA_SLEEP
, NULL
, &r
->r_buf_dma
[slot
].addr
,
2600 &r
->r_buf_dma
[slot
].len
,
2601 &r
->r_buf_dma
[slot
].acchdl
);
2603 if (err
!= DDI_SUCCESS
) {
2604 bfe_error(bfe
->bfe_dip
, " bfe_ring_buf_alloc() :"
2605 " mem_alloc failed :%d", err
);
2609 err
= ddi_dma_addr_bind_handle(r
->r_buf_dma
[slot
].handle
,
2610 NULL
, r
->r_buf_dma
[slot
].addr
,
2611 r
->r_buf_dma
[slot
].len
,
2612 (DDI_DMA_RDWR
| DDI_DMA_STREAMING
),
2613 DDI_DMA_SLEEP
, NULL
,
2614 &r
->r_buf_dma
[slot
].cookie
,
2617 if (err
!= DDI_DMA_MAPPED
) {
2618 bfe_error(bfe
->bfe_dip
, " bfe_ring_buf_alloc() :"
2619 " bind_handle failed");
2624 bfe_error(bfe
->bfe_dip
, " bfe_ring_buf_alloc() :"
2625 " more than one DMA cookie");
2626 (void) ddi_dma_unbind_handle(r
->r_buf_dma
[slot
].handle
);
2630 return (DDI_SUCCESS
);
2632 ddi_dma_mem_free(&r
->r_buf_dma
[slot
].acchdl
);
2634 ddi_dma_free_handle(&r
->r_buf_dma
[slot
].handle
);
2636 return (DDI_FAILURE
);
2640 bfe_ring_buf_free(bfe_ring_t
*r
, int slot
)
2642 if (r
->r_buf_dma
== NULL
)
2645 (void) ddi_dma_unbind_handle(r
->r_buf_dma
[slot
].handle
);
2646 ddi_dma_mem_free(&r
->r_buf_dma
[slot
].acchdl
);
2647 ddi_dma_free_handle(&r
->r_buf_dma
[slot
].handle
);
2651 bfe_buffer_free(bfe_ring_t
*r
)
2655 for (i
= 0; i
< r
->r_ndesc
; i
++) {
2656 bfe_ring_buf_free(r
, i
);
2661 bfe_ring_desc_free(bfe_ring_t
*r
)
2663 (void) ddi_dma_unbind_handle(r
->r_desc_dma_handle
);
2664 ddi_dma_mem_free(&r
->r_desc_acc_handle
);
2665 ddi_dma_free_handle(&r
->r_desc_dma_handle
);
2666 kmem_free(r
->r_buf_dma
, r
->r_ndesc
* sizeof (bfe_dma_t
));
2668 r
->r_buf_dma
= NULL
;
2674 bfe_ring_desc_alloc(bfe_t
*bfe
, bfe_ring_t
*r
, int d
)
2676 int err
, i
, fail
= 0;
2678 size_t size_krnl
= 0, size_dma
= 0, ring_len
= 0;
2679 ddi_dma_cookie_t cookie
;
2682 ASSERT(bfe
!= NULL
);
2684 size_krnl
= r
->r_ndesc
* sizeof (bfe_dma_t
);
2685 size_dma
= r
->r_ndesc
* sizeof (bfe_desc_t
);
2686 r
->r_buf_dma
= kmem_zalloc(size_krnl
, KM_SLEEP
);
2689 err
= ddi_dma_alloc_handle(bfe
->bfe_dip
, &bfe_dma_attr_desc
,
2690 DDI_DMA_SLEEP
, NULL
, &r
->r_desc_dma_handle
);
2692 if (err
!= DDI_SUCCESS
) {
2693 bfe_error(bfe
->bfe_dip
, "bfe_ring_desc_alloc() failed on"
2694 " ddi_dma_alloc_handle()");
2695 kmem_free(r
->r_buf_dma
, size_krnl
);
2696 return (DDI_FAILURE
);
2700 err
= ddi_dma_mem_alloc(r
->r_desc_dma_handle
,
2701 size_dma
, &bfe_buf_attr
,
2702 DDI_DMA_CONSISTENT
, DDI_DMA_SLEEP
, NULL
,
2703 &ring
, &ring_len
, &r
->r_desc_acc_handle
);
2705 if (err
!= DDI_SUCCESS
) {
2706 bfe_error(bfe
->bfe_dip
, "bfe_ring_desc_alloc() failed on"
2707 " ddi_dma_mem_alloc()");
2708 ddi_dma_free_handle(&r
->r_desc_dma_handle
);
2709 kmem_free(r
->r_buf_dma
, size_krnl
);
2710 return (DDI_FAILURE
);
2713 err
= ddi_dma_addr_bind_handle(r
->r_desc_dma_handle
,
2714 NULL
, ring
, ring_len
,
2715 DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
,
2716 DDI_DMA_SLEEP
, NULL
,
2719 if (err
!= DDI_SUCCESS
) {
2720 bfe_error(bfe
->bfe_dip
, "bfe_ring_desc_alloc() failed on"
2721 " ddi_dma_addr_bind_handle()");
2722 ddi_dma_mem_free(&r
->r_desc_acc_handle
);
2723 ddi_dma_free_handle(&r
->r_desc_dma_handle
);
2724 kmem_free(r
->r_buf_dma
, size_krnl
);
2725 return (DDI_FAILURE
);
2729 * We don't want to have multiple cookies. Descriptor should be
2730 * aligned to PAGESIZE boundary.
2734 /* The actual descriptor for the ring */
2735 r
->r_desc_len
= ring_len
;
2736 r
->r_desc_cookie
= cookie
;
2738 r
->r_desc
= (void *)ring
;
2740 bzero(r
->r_desc
, size_dma
);
2741 bzero(r
->r_desc
, ring_len
);
2743 /* For each descriptor, allocate a DMA buffer */
2745 for (i
= 0; i
< r
->r_ndesc
; i
++) {
2746 if (bfe_ring_buf_alloc(bfe
, r
, i
, d
) != DDI_SUCCESS
) {
2755 bfe_ring_buf_free(r
, i
);
2758 /* We don't need the descriptor anymore */
2759 bfe_ring_desc_free(r
);
2760 return (DDI_FAILURE
);
2763 return (DDI_SUCCESS
);
2767 bfe_rings_alloc(bfe_t
*bfe
)
2770 mutex_init(&bfe
->bfe_tx_ring
.r_lock
, NULL
, MUTEX_DRIVER
, NULL
);
2771 bfe
->bfe_tx_ring
.r_lockp
= &bfe
->bfe_tx_ring
.r_lock
;
2772 bfe
->bfe_tx_ring
.r_buf_len
= BFE_MTU
+ sizeof (struct ether_header
) +
2773 VLAN_TAGSZ
+ ETHERFCSL
;
2774 bfe
->bfe_tx_ring
.r_ndesc
= TX_NUM_DESC
;
2775 bfe
->bfe_tx_ring
.r_bfe
= bfe
;
2776 bfe
->bfe_tx_ring
.r_avail_desc
= TX_NUM_DESC
;
2779 mutex_init(&bfe
->bfe_rx_ring
.r_lock
, NULL
, MUTEX_DRIVER
, NULL
);
2780 bfe
->bfe_rx_ring
.r_lockp
= &bfe
->bfe_rx_ring
.r_lock
;
2781 bfe
->bfe_rx_ring
.r_buf_len
= BFE_MTU
+ sizeof (struct ether_header
) +
2782 VLAN_TAGSZ
+ ETHERFCSL
+ RX_HEAD_ROOM
;
2783 bfe
->bfe_rx_ring
.r_ndesc
= RX_NUM_DESC
;
2784 bfe
->bfe_rx_ring
.r_bfe
= bfe
;
2785 bfe
->bfe_rx_ring
.r_avail_desc
= RX_NUM_DESC
;
2787 /* Allocate TX Ring */
2788 if (bfe_ring_desc_alloc(bfe
, &bfe
->bfe_tx_ring
,
2789 DDI_DMA_WRITE
) != DDI_SUCCESS
)
2790 return (DDI_FAILURE
);
2792 /* Allocate RX Ring */
2793 if (bfe_ring_desc_alloc(bfe
, &bfe
->bfe_rx_ring
,
2794 DDI_DMA_READ
) != DDI_SUCCESS
) {
2795 cmn_err(CE_NOTE
, "RX ring allocation failed");
2796 bfe_ring_desc_free(&bfe
->bfe_tx_ring
);
2797 return (DDI_FAILURE
);
2800 bfe
->bfe_tx_ring
.r_flags
= BFE_RING_ALLOCATED
;
2801 bfe
->bfe_rx_ring
.r_flags
= BFE_RING_ALLOCATED
;
2803 return (DDI_SUCCESS
);
2807 bfe_resume(dev_info_t
*dip
)
2810 int err
= DDI_SUCCESS
;
2812 if ((bfe
= ddi_get_driver_private(dip
)) == NULL
) {
2813 bfe_error(dip
, "Unexpected error (no driver private data)"
2815 return (DDI_FAILURE
);
2819 * Grab all the locks first.
2821 bfe_grab_locks(bfe
);
2822 bfe
->bfe_chip_state
= BFE_CHIP_RESUME
;
2825 /* PHY will also start running */
2826 bfe_chip_reset(bfe
);
2827 if (bfe_chip_start(bfe
) == DDI_FAILURE
) {
2828 bfe_error(dip
, "Could not resume chip");
2832 bfe_release_locks(bfe
);
2834 if (err
== DDI_SUCCESS
)
2835 mac_tx_update(bfe
->bfe_machdl
);
2841 bfe_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
2845 mac_register_t
*macreg
;
2850 return (bfe_resume(dip
));
2856 return (DDI_FAILURE
);
2860 unit
= ddi_get_instance(dip
);
2862 bfe
= kmem_zalloc(sizeof (bfe_t
), KM_SLEEP
);
2864 bfe
->bfe_unit
= unit
;
2866 if (pci_config_setup(dip
, &bfe
->bfe_conf_handle
) != DDI_SUCCESS
) {
2867 bfe_error(dip
, "pci_config_setup failed");
2872 * Enable IO space, Bus Master and Memory Space accessess.
2874 ret
= pci_config_get16(bfe
->bfe_conf_handle
, PCI_CONF_COMM
);
2875 pci_config_put16(bfe
->bfe_conf_handle
, PCI_CONF_COMM
,
2876 PCI_COMM_IO
| PCI_COMM_MAE
| PCI_COMM_ME
| ret
);
2878 ddi_set_driver_private(dip
, bfe
);
2880 /* Identify hardware */
2881 if (bfe_identify_hardware(bfe
) == BFE_FAILURE
) {
2882 bfe_error(dip
, "Could not identify device");
2886 if (bfe_regs_map(bfe
) != DDI_SUCCESS
) {
2887 bfe_error(dip
, "Could not map device registers");
2891 (void) bfe_get_chip_config(bfe
);
2894 * Register with MAC layer
2896 if ((macreg
= mac_alloc(MAC_VERSION
)) == NULL
) {
2897 bfe_error(dip
, "mac_alloc() failed");
2901 macreg
->m_type_ident
= MAC_PLUGIN_IDENT_ETHER
;
2902 macreg
->m_driver
= bfe
;
2903 macreg
->m_dip
= dip
;
2904 macreg
->m_instance
= unit
;
2905 macreg
->m_src_addr
= bfe
->bfe_ether_addr
;
2906 macreg
->m_callbacks
= &bfe_mac_callbacks
;
2907 macreg
->m_min_sdu
= 0;
2908 macreg
->m_max_sdu
= ETHERMTU
;
2909 macreg
->m_margin
= VLAN_TAGSZ
;
2911 if ((ret
= mac_register(macreg
, &bfe
->bfe_machdl
)) != 0) {
2912 bfe_error(dip
, "mac_register() failed with %d error", ret
);
2919 rw_init(&bfe
->bfe_rwlock
, NULL
, RW_DRIVER
,
2920 DDI_INTR_PRI(bfe
->bfe_intrpri
));
2922 if (bfe_add_intr(bfe
) != DDI_SUCCESS
) {
2923 bfe_error(dip
, "Could not add interrupt");
2927 if (bfe_rings_alloc(bfe
) != DDI_SUCCESS
) {
2928 bfe_error(dip
, "Could not allocate TX/RX Ring");
2932 /* Init and then reset the chip */
2933 bfe
->bfe_chip_action
= 0;
2936 /* PHY will also start running */
2937 bfe_chip_reset(bfe
);
2940 * Even though we enable the interrupts here but chip's interrupt
2941 * is not enabled yet. It will be enabled once we plumb the interface.
2943 if (ddi_intr_enable(bfe
->bfe_intrhdl
) != DDI_SUCCESS
) {
2944 bfe_error(dip
, "Could not enable interrupt");
2948 return (DDI_SUCCESS
);
2951 bfe_remove_intr(bfe
);
2953 (void) mac_unregister(bfe
->bfe_machdl
);
2955 bfe_unmap_regs(bfe
);
2957 pci_config_teardown(&bfe
->bfe_conf_handle
);
2959 kmem_free(bfe
, sizeof (bfe_t
));
2960 return (DDI_FAILURE
);
2964 bfe_detach(dev_info_t
*devinfo
, ddi_detach_cmd_t cmd
)
2968 bfe
= ddi_get_driver_private(devinfo
);
2973 * We need to stop the timer before grabbing locks otherwise
2974 * we can land-up in deadlock with untimeout.
2976 bfe_stop_timer(bfe
);
2979 * First unregister with MAC layer before stopping DMA
2982 if (mac_unregister(bfe
->bfe_machdl
) != DDI_SUCCESS
)
2983 return (DDI_FAILURE
);
2985 bfe
->bfe_machdl
= NULL
;
2988 * Quiesce the chip first.
2990 bfe_grab_locks(bfe
);
2993 bfe_release_locks(bfe
);
2995 (void) ddi_intr_disable(bfe
->bfe_intrhdl
);
2997 /* Make sure timer is gone. */
2998 bfe_stop_timer(bfe
);
3001 * Free the DMA resources for buffer and then descriptors
3003 if (bfe
->bfe_tx_ring
.r_flags
== BFE_RING_ALLOCATED
) {
3005 bfe_buffer_free(&bfe
->bfe_tx_ring
);
3006 bfe_ring_desc_free(&bfe
->bfe_tx_ring
);
3009 if (bfe
->bfe_rx_ring
.r_flags
== BFE_RING_ALLOCATED
) {
3011 bfe_buffer_free(&bfe
->bfe_rx_ring
);
3012 bfe_ring_desc_free(&bfe
->bfe_rx_ring
);
3015 bfe_remove_intr(bfe
);
3016 bfe_unmap_regs(bfe
);
3017 pci_config_teardown(&bfe
->bfe_conf_handle
);
3019 mutex_destroy(&bfe
->bfe_tx_ring
.r_lock
);
3020 mutex_destroy(&bfe
->bfe_rx_ring
.r_lock
);
3021 rw_destroy(&bfe
->bfe_rwlock
);
3023 kmem_free(bfe
, sizeof (bfe_t
));
3025 ddi_set_driver_private(devinfo
, NULL
);
3026 return (DDI_SUCCESS
);
3030 * We need to stop the timer before grabbing locks otherwise
3031 * we can land-up in deadlock with untimeout.
3033 bfe_stop_timer(bfe
);
3036 * Grab all the locks first.
3038 bfe_grab_locks(bfe
);
3041 bfe
->bfe_chip_state
= BFE_CHIP_SUSPENDED
;
3042 bfe_release_locks(bfe
);
3044 return (DDI_SUCCESS
);
3047 return (DDI_FAILURE
);
3052 * Quiesce the card for fast reboot
3055 bfe_quiesce(dev_info_t
*dev_info
)
3059 bfe
= ddi_get_driver_private(dev_info
);
3063 bfe
->bfe_chip_state
= BFE_CHIP_QUIESCED
;
3065 return (DDI_SUCCESS
);
3068 static struct cb_ops bfe_cb_ops
= {
3069 nulldev
, /* cb_open */
3070 nulldev
, /* cb_close */
3071 nodev
, /* cb_strategy */
3072 nodev
, /* cb_print */
3073 nodev
, /* cb_dump */
3074 nodev
, /* cb_read */
3075 nodev
, /* cb_write */
3076 nodev
, /* cb_ioctl */
3077 nodev
, /* cb_devmap */
3078 nodev
, /* cb_mmap */
3079 nodev
, /* cb_segmap */
3080 nochpoll
, /* cb_chpoll */
3081 ddi_prop_op
, /* cb_prop_op */
3082 NULL
, /* cb_stream */
3083 D_MP
| D_HOTPLUG
, /* cb_flag */
3084 CB_REV
, /* cb_rev */
3085 nodev
, /* cb_aread */
3086 nodev
/* cb_awrite */
3089 static struct dev_ops bfe_dev_ops
= {
3090 DEVO_REV
, /* devo_rev */
3091 0, /* devo_refcnt */
3092 NULL
, /* devo_getinfo */
3093 nulldev
, /* devo_identify */
3094 nulldev
, /* devo_probe */
3095 bfe_attach
, /* devo_attach */
3096 bfe_detach
, /* devo_detach */
3097 nodev
, /* devo_reset */
3098 &bfe_cb_ops
, /* devo_cb_ops */
3099 NULL
, /* devo_bus_ops */
3100 ddi_power
, /* devo_power */
3101 bfe_quiesce
/* devo_quiesce */
3104 static struct modldrv bfe_modldrv
= {
3110 static struct modlinkage modlinkage
= {
3111 MODREV_1
, (void *)&bfe_modldrv
, NULL
3115 _info(struct modinfo
*modinfop
)
3117 return (mod_info(&modlinkage
, modinfop
));
3125 mac_init_ops(&bfe_dev_ops
, MODULE_NAME
);
3126 status
= mod_install(&modlinkage
);
3127 if (status
== DDI_FAILURE
)
3128 mac_fini_ops(&bfe_dev_ops
);
3137 status
= mod_remove(&modlinkage
);
3139 mac_fini_ops(&bfe_dev_ops
);