Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / io / bfe / bfe.c
blob5dfd8a8e25d006de62760b8697c90c654402178a
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/stream.h>
26 #include <sys/strsun.h>
27 #include <sys/stat.h>
28 #include <sys/pci.h>
29 #include <sys/modctl.h>
30 #include <sys/kstat.h>
31 #include <sys/ethernet.h>
32 #include <sys/devops.h>
33 #include <sys/debug.h>
34 #include <sys/conf.h>
35 #include <sys/sysmacros.h>
36 #include <sys/dditypes.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/miiregs.h>
40 #include <sys/byteorder.h>
41 #include <sys/cyclic.h>
42 #include <sys/note.h>
43 #include <sys/crc32.h>
44 #include <sys/mac_provider.h>
45 #include <sys/mac_ether.h>
46 #include <sys/vlan.h>
47 #include <sys/errno.h>
48 #include <sys/sdt.h>
49 #include <sys/strsubr.h>
51 #include "bfe.h"
52 #include "bfe_hw.h"
56 * Broadcom BCM4401 chipsets use two rings :
58 * - One TX : For sending packets down the wire.
59 * - One RX : For receving packets.
61 * Each ring can have any number of descriptors (configured during attach).
62 * As of now we configure only 128 descriptor per ring (TX/RX). Each descriptor
63 * has address (desc_addr) and control (desc_ctl) which holds a DMA buffer for
64 * the packet and control information (like start/end of frame or end of table).
65 * The descriptor table is allocated first and then a DMA buffer (for a packet)
66 * is allocated and linked to each descriptor.
68 * Each descriptor entry is bfe_desc_t structure in bfe. During TX/RX
69 * interrupt, the stat register will point to current descriptor being
70 * processed.
72 * Here's an example of TX and RX ring :
74 * TX:
76 * Base of the descriptor table is programmed using BFE_DMATX_CTRL control
77 * register. Each 'addr' points to DMA buffer (or packet data buffer) to
78 * be transmitted and 'ctl' has the length of the packet (usually MTU).
80 * ----------------------|
81 * | addr |Descriptor 0 |
82 * | ctl | |
83 * ----------------------|
84 * | addr |Descriptor 1 | SOF (start of the frame)
85 * | ctl | |
86 * ----------------------|
87 * | ... |Descriptor... | EOF (end of the frame)
88 * | ... | |
89 * ----------------------|
90 * | addr |Descritor 127 |
91 * | ctl | EOT | EOT (End of Table)
92 * ----------------------|
94 * 'r_curr_desc' : pointer to current descriptor which can be used to transmit
95 * a packet.
96 * 'r_avail_desc' : decremented whenever a packet is being sent.
97 * 'r_cons_desc' : incremented whenever a packet is sent down the wire and
98 * notified by an interrupt to bfe driver.
100 * RX:
102 * Base of the descriptor table is programmed using BFE_DMARX_CTRL control
103 * register. Each 'addr' points to DMA buffer (or packet data buffer). 'ctl'
104 * contains the size of the DMA buffer and all the DMA buffers are
105 * pre-allocated during attach and hence the maxmium size of the packet is
106 * also known (r_buf_len from the bfe_rint_t structure). During RX interrupt
107 * the packet length is embedded in bfe_header_t which is added by the
108 * chip in the beginning of the packet.
110 * ----------------------|
111 * | addr |Descriptor 0 |
112 * | ctl | |
113 * ----------------------|
114 * | addr |Descriptor 1 |
115 * | ctl | |
116 * ----------------------|
117 * | ... |Descriptor... |
118 * | ... | |
119 * ----------------------|
120 * | addr |Descriptor 127|
121 * | ctl | EOT | EOT (End of Table)
122 * ----------------------|
124 * 'r_curr_desc' : pointer to current descriptor while receving a packet.
128 #define MODULE_NAME "bfe"
131 * Used for checking PHY (link state, speed)
133 #define BFE_TIMEOUT_INTERVAL (1000 * 1000 * 1000)
137 * Chip restart action and reason for restart
139 #define BFE_ACTION_RESTART 0x1 /* For restarting the chip */
140 #define BFE_ACTION_RESTART_SETPROP 0x2 /* restart due to setprop */
141 #define BFE_ACTION_RESTART_FAULT 0x4 /* restart due to fault */
142 #define BFE_ACTION_RESTART_PKT 0x8 /* restart due to pkt timeout */
144 static char bfe_ident[] = "bfe driver for Broadcom BCM4401 chipsets";
147 * Function Prototypes for bfe driver.
149 static int bfe_check_link(bfe_t *);
150 static void bfe_report_link(bfe_t *);
151 static void bfe_chip_halt(bfe_t *);
152 static void bfe_chip_reset(bfe_t *);
153 static void bfe_tx_desc_init(bfe_ring_t *);
154 static void bfe_rx_desc_init(bfe_ring_t *);
155 static void bfe_set_rx_mode(bfe_t *);
156 static void bfe_enable_chip_intrs(bfe_t *);
157 static void bfe_chip_restart(bfe_t *);
158 static void bfe_init_vars(bfe_t *);
159 static void bfe_clear_stats(bfe_t *);
160 static void bfe_gather_stats(bfe_t *);
161 static void bfe_error(dev_info_t *, char *, ...);
162 static int bfe_mac_getprop(void *, const char *, mac_prop_id_t, uint_t,
163 void *);
164 static int bfe_mac_setprop(void *, const char *, mac_prop_id_t, uint_t,
165 const void *);
166 static int bfe_tx_reclaim(bfe_ring_t *);
167 int bfe_mac_set_ether_addr(void *, const uint8_t *);
171 * Macros for ddi_dma_sync().
173 #define SYNC_DESC(r, s, l, d) \
174 (void) ddi_dma_sync(r->r_desc_dma_handle, \
175 (off_t)(s * sizeof (bfe_desc_t)), \
176 (size_t)(l * sizeof (bfe_desc_t)), \
179 #define SYNC_BUF(r, s, b, l, d) \
180 (void) ddi_dma_sync(r->r_buf_dma[s].handle, \
181 (off_t)(b), (size_t)(l), d)
184 * Supported Broadcom BCM4401 Cards.
186 static bfe_cards_t bfe_cards[] = {
187 { 0x14e4, 0x170c, "BCM4401 100Base-TX"},
192 * DMA attributes for device registers, packet data (buffer) and
193 * descriptor table.
195 static struct ddi_device_acc_attr bfe_dev_attr = {
196 DDI_DEVICE_ATTR_V0,
197 DDI_STRUCTURE_LE_ACC,
198 DDI_STRICTORDER_ACC
201 static struct ddi_device_acc_attr bfe_buf_attr = {
202 DDI_DEVICE_ATTR_V0,
203 DDI_NEVERSWAP_ACC, /* native endianness */
204 DDI_STRICTORDER_ACC
207 static ddi_dma_attr_t bfe_dma_attr_buf = {
208 DMA_ATTR_V0, /* dma_attr_version */
209 0, /* dma_attr_addr_lo */
210 BFE_PCI_DMA - 1, /* dma_attr_addr_hi */
211 0x1fff, /* dma_attr_count_max */
212 8, /* dma_attr_align */
213 0, /* dma_attr_burstsizes */
214 1, /* dma_attr_minxfer */
215 0x1fff, /* dma_attr_maxxfer */
216 BFE_PCI_DMA - 1, /* dma_attr_seg */
217 1, /* dma_attr_sgllen */
218 1, /* dma_attr_granular */
219 0 /* dma_attr_flags */
222 static ddi_dma_attr_t bfe_dma_attr_desc = {
223 DMA_ATTR_V0, /* dma_attr_version */
224 0, /* dma_attr_addr_lo */
225 BFE_PCI_DMA - 1, /* dma_attr_addr_hi */
226 BFE_PCI_DMA - 1, /* dma_attr_count_max */
227 BFE_DESC_ALIGN, /* dma_attr_align */
228 0, /* dma_attr_burstsizes */
229 1, /* dma_attr_minxfer */
230 BFE_PCI_DMA - 1, /* dma_attr_maxxfer */
231 BFE_PCI_DMA - 1, /* dma_attr_seg */
232 1, /* dma_attr_sgllen */
233 1, /* dma_attr_granular */
234 0 /* dma_attr_flags */
238 * Ethernet broadcast addresses.
240 static uchar_t bfe_broadcast[ETHERADDRL] = {
241 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
244 #define ASSERT_ALL_LOCKS(bfe) { \
245 ASSERT(mutex_owned(&bfe->bfe_tx_ring.r_lock)); \
246 ASSERT(rw_write_held(&bfe->bfe_rwlock)); \
250 * Debugging and error reproting code.
252 static void
253 bfe_error(dev_info_t *dip, char *fmt, ...)
255 va_list ap;
256 char buf[256];
258 va_start(ap, fmt);
259 (void) vsnprintf(buf, sizeof (buf), fmt, ap);
260 va_end(ap);
262 if (dip) {
263 cmn_err(CE_WARN, "%s%d: %s",
264 ddi_driver_name(dip), ddi_get_instance(dip), buf);
265 } else {
266 cmn_err(CE_WARN, "bfe: %s", buf);
271 * Grabs all necessary locks to block any other operation on the chip.
273 static void
274 bfe_grab_locks(bfe_t *bfe)
276 bfe_ring_t *tx = &bfe->bfe_tx_ring;
279 * Grab all the locks.
280 * - bfe_rwlock : locks down whole chip including RX.
281 * - tx's r_lock : locks down only TX side.
283 rw_enter(&bfe->bfe_rwlock, RW_WRITER);
284 mutex_enter(&tx->r_lock);
287 * Note that we don't use RX's r_lock.
292 * Release lock on chip/drver.
294 static void
295 bfe_release_locks(bfe_t *bfe)
297 bfe_ring_t *tx = &bfe->bfe_tx_ring;
300 * Release all the locks in the order in which they were grabbed.
302 mutex_exit(&tx->r_lock);
303 rw_exit(&bfe->bfe_rwlock);
308 * It's used to make sure that the write to device register was successful.
310 static int
311 bfe_wait_bit(bfe_t *bfe, uint32_t reg, uint32_t bit,
312 ulong_t t, const int clear)
314 ulong_t i;
315 uint32_t v;
317 for (i = 0; i < t; i++) {
318 v = INL(bfe, reg);
320 if (clear && !(v & bit))
321 break;
323 if (!clear && (v & bit))
324 break;
326 drv_usecwait(10);
329 /* if device still didn't see the value */
330 if (i == t)
331 return (-1);
333 return (0);
337 * PHY functions (read, write, stop, reset and startup)
339 static int
340 bfe_read_phy(bfe_t *bfe, uint32_t reg)
342 OUTL(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
343 OUTL(bfe, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
344 (BFE_MDIO_OP_READ << BFE_MDIO_OP_SHIFT) |
345 (bfe->bfe_phy_addr << BFE_MDIO_PMD_SHIFT) |
346 (reg << BFE_MDIO_RA_SHIFT) |
347 (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT)));
349 (void) bfe_wait_bit(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 10, 0);
351 return ((INL(bfe, BFE_MDIO_DATA) & BFE_MDIO_DATA_DATA));
354 static void
355 bfe_write_phy(bfe_t *bfe, uint32_t reg, uint32_t val)
357 OUTL(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
358 OUTL(bfe, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
359 (BFE_MDIO_OP_WRITE << BFE_MDIO_OP_SHIFT) |
360 (bfe->bfe_phy_addr << BFE_MDIO_PMD_SHIFT) |
361 (reg << BFE_MDIO_RA_SHIFT) |
362 (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT) |
363 (val & BFE_MDIO_DATA_DATA)));
365 (void) bfe_wait_bit(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 10, 0);
369 * It resets the PHY layer.
371 static int
372 bfe_reset_phy(bfe_t *bfe)
374 uint32_t i;
376 bfe_write_phy(bfe, MII_CONTROL, MII_CONTROL_RESET);
377 drv_usecwait(100);
378 for (i = 0; i < 10; i++) {
379 if (bfe_read_phy(bfe, MII_CONTROL) &
380 MII_CONTROL_RESET) {
381 drv_usecwait(500);
382 continue;
385 break;
388 if (i == 10) {
389 bfe_error(bfe->bfe_dip, "Timeout waiting for PHY to reset");
390 bfe->bfe_phy_state = BFE_PHY_RESET_TIMEOUT;
391 return (BFE_FAILURE);
394 bfe->bfe_phy_state = BFE_PHY_RESET_DONE;
396 return (BFE_SUCCESS);
400 * Make sure timer function is out of our way and especially during
401 * detach.
403 static void
404 bfe_stop_timer(bfe_t *bfe)
406 if (bfe->bfe_periodic_id) {
407 ddi_periodic_delete(bfe->bfe_periodic_id);
408 bfe->bfe_periodic_id = NULL;
413 * Stops the PHY
415 static void
416 bfe_stop_phy(bfe_t *bfe)
418 bfe_write_phy(bfe, MII_CONTROL, MII_CONTROL_PWRDN |
419 MII_CONTROL_ISOLATE);
421 bfe->bfe_chip.link = LINK_STATE_UNKNOWN;
422 bfe->bfe_chip.speed = 0;
423 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
425 bfe->bfe_phy_state = BFE_PHY_STOPPED;
428 * Report the link status to MAC layer.
430 if (bfe->bfe_machdl != NULL)
431 (void) bfe_report_link(bfe);
434 static int
435 bfe_probe_phy(bfe_t *bfe)
437 int phy;
438 uint32_t status;
440 if (bfe->bfe_phy_addr) {
441 status = bfe_read_phy(bfe, MII_STATUS);
442 if (status != 0xffff && status != 0) {
443 bfe_write_phy(bfe, MII_CONTROL, 0);
444 return (BFE_SUCCESS);
448 for (phy = 0; phy < 32; phy++) {
449 bfe->bfe_phy_addr = phy;
450 status = bfe_read_phy(bfe, MII_STATUS);
451 if (status != 0xffff && status != 0) {
452 bfe_write_phy(bfe, MII_CONTROL, 0);
453 return (BFE_SUCCESS);
457 return (BFE_FAILURE);
461 * This timeout function fires at BFE_TIMEOUT_INTERVAL to check the link
462 * status.
464 static void
465 bfe_timeout(void *arg)
467 bfe_t *bfe = (bfe_t *)arg;
468 int resched = 0;
471 * We don't grab any lock because bfe can't go away.
472 * untimeout() will wait for this timeout instance to complete.
474 if (bfe->bfe_chip_action & BFE_ACTION_RESTART) {
476 * Restart the chip.
478 bfe_grab_locks(bfe);
479 bfe_chip_restart(bfe);
480 bfe->bfe_chip_action &= ~BFE_ACTION_RESTART;
481 bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_FAULT;
482 bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_PKT;
483 bfe_release_locks(bfe);
484 mac_tx_update(bfe->bfe_machdl);
485 /* Restart will register a new timeout */
486 return;
489 rw_enter(&bfe->bfe_rwlock, RW_READER);
491 if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
492 hrtime_t hr;
494 hr = gethrtime();
495 if (bfe->bfe_tx_stall_time != 0 &&
496 hr > bfe->bfe_tx_stall_time) {
497 DTRACE_PROBE2(chip__restart, int, bfe->bfe_unit,
498 char *, "pkt timeout");
499 bfe->bfe_chip_action |=
500 (BFE_ACTION_RESTART | BFE_ACTION_RESTART_PKT);
501 bfe->bfe_tx_stall_time = 0;
505 if (bfe->bfe_phy_state == BFE_PHY_STARTED) {
507 * Report the link status to MAC layer if link status changed.
509 if (bfe_check_link(bfe)) {
510 bfe_report_link(bfe);
511 if (bfe->bfe_chip.link == LINK_STATE_UP) {
512 uint32_t val, flow;
514 val = INL(bfe, BFE_TX_CTRL);
515 val &= ~BFE_TX_DUPLEX;
516 if (bfe->bfe_chip.duplex == LINK_DUPLEX_FULL) {
517 val |= BFE_TX_DUPLEX;
518 flow = INL(bfe, BFE_RXCONF);
519 flow &= ~BFE_RXCONF_FLOW;
520 OUTL(bfe, BFE_RXCONF, flow);
522 flow = INL(bfe, BFE_MAC_FLOW);
523 flow &= ~(BFE_FLOW_RX_HIWAT);
524 OUTL(bfe, BFE_MAC_FLOW, flow);
527 resched = 1;
529 OUTL(bfe, BFE_TX_CTRL, val);
530 DTRACE_PROBE1(link__up,
531 int, bfe->bfe_unit);
536 rw_exit(&bfe->bfe_rwlock);
538 if (resched)
539 mac_tx_update(bfe->bfe_machdl);
543 * Starts PHY layer.
545 static int
546 bfe_startup_phy(bfe_t *bfe)
548 uint16_t bmsr, bmcr, anar;
549 int prog, s;
550 int phyid1, phyid2;
552 if (bfe_probe_phy(bfe) == BFE_FAILURE) {
553 bfe->bfe_phy_state = BFE_PHY_NOTFOUND;
554 return (BFE_FAILURE);
557 (void) bfe_reset_phy(bfe);
559 phyid1 = bfe_read_phy(bfe, MII_PHYIDH);
560 phyid2 = bfe_read_phy(bfe, MII_PHYIDL);
561 bfe->bfe_phy_id = (phyid1 << 16) | phyid2;
563 bmsr = bfe_read_phy(bfe, MII_STATUS);
564 anar = bfe_read_phy(bfe, MII_AN_ADVERT);
566 again:
567 anar &= ~(MII_ABILITY_100BASE_T4 |
568 MII_ABILITY_100BASE_TX_FD | MII_ABILITY_100BASE_TX |
569 MII_ABILITY_10BASE_T_FD | MII_ABILITY_10BASE_T);
572 * Supported hardware modes are in bmsr.
574 bfe->bfe_chip.bmsr = bmsr;
577 * Assume no capabilities are supported in the hardware.
579 bfe->bfe_cap_aneg = bfe->bfe_cap_100T4 =
580 bfe->bfe_cap_100fdx = bfe->bfe_cap_100hdx =
581 bfe->bfe_cap_10fdx = bfe->bfe_cap_10hdx = 0;
584 * Assume property is set.
586 s = 1;
587 if (!(bfe->bfe_chip_action & BFE_ACTION_RESTART_SETPROP)) {
589 * Property is not set which means bfe_mac_setprop()
590 * is not called on us.
592 s = 0;
595 bmcr = prog = 0;
597 if (bmsr & MII_STATUS_100_BASEX_FD) {
598 bfe->bfe_cap_100fdx = 1;
599 if (s == 0) {
600 anar |= MII_ABILITY_100BASE_TX_FD;
601 bfe->bfe_adv_100fdx = 1;
602 prog++;
603 } else if (bfe->bfe_adv_100fdx) {
604 anar |= MII_ABILITY_100BASE_TX_FD;
605 prog++;
609 if (bmsr & MII_STATUS_100_BASE_T4) {
610 bfe->bfe_cap_100T4 = 1;
611 if (s == 0) {
612 anar |= MII_ABILITY_100BASE_T4;
613 bfe->bfe_adv_100T4 = 1;
614 prog++;
615 } else if (bfe->bfe_adv_100T4) {
616 anar |= MII_ABILITY_100BASE_T4;
617 prog++;
621 if (bmsr & MII_STATUS_100_BASEX) {
622 bfe->bfe_cap_100hdx = 1;
623 if (s == 0) {
624 anar |= MII_ABILITY_100BASE_TX;
625 bfe->bfe_adv_100hdx = 1;
626 prog++;
627 } else if (bfe->bfe_adv_100hdx) {
628 anar |= MII_ABILITY_100BASE_TX;
629 prog++;
633 if (bmsr & MII_STATUS_10_FD) {
634 bfe->bfe_cap_10fdx = 1;
635 if (s == 0) {
636 anar |= MII_ABILITY_10BASE_T_FD;
637 bfe->bfe_adv_10fdx = 1;
638 prog++;
639 } else if (bfe->bfe_adv_10fdx) {
640 anar |= MII_ABILITY_10BASE_T_FD;
641 prog++;
645 if (bmsr & MII_STATUS_10) {
646 bfe->bfe_cap_10hdx = 1;
647 if (s == 0) {
648 anar |= MII_ABILITY_10BASE_T;
649 bfe->bfe_adv_10hdx = 1;
650 prog++;
651 } else if (bfe->bfe_adv_10hdx) {
652 anar |= MII_ABILITY_10BASE_T;
653 prog++;
657 if (bmsr & MII_STATUS_CANAUTONEG) {
658 bfe->bfe_cap_aneg = 1;
659 if (s == 0) {
660 bfe->bfe_adv_aneg = 1;
664 if (prog == 0) {
665 if (s == 0) {
666 bfe_error(bfe->bfe_dip,
667 "No valid link mode selected. Powering down PHY");
668 bfe_stop_phy(bfe);
669 bfe_report_link(bfe);
670 return (BFE_FAILURE);
674 * If property is set then user would have goofed up. So we
675 * go back to default properties.
677 bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_SETPROP;
678 goto again;
681 if (bfe->bfe_adv_aneg && (bmsr & MII_STATUS_CANAUTONEG)) {
682 bmcr = (MII_CONTROL_ANE | MII_CONTROL_RSAN);
683 } else {
684 if (bfe->bfe_adv_100fdx)
685 bmcr = (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX);
686 else if (bfe->bfe_adv_100hdx)
687 bmcr = MII_CONTROL_100MB;
688 else if (bfe->bfe_adv_10fdx)
689 bmcr = MII_CONTROL_FDUPLEX;
690 else
691 bmcr = 0; /* 10HDX */
694 if (prog)
695 bfe_write_phy(bfe, MII_AN_ADVERT, anar);
697 if (bmcr)
698 bfe_write_phy(bfe, MII_CONTROL, bmcr);
700 bfe->bfe_mii_anar = anar;
701 bfe->bfe_mii_bmcr = bmcr;
702 bfe->bfe_phy_state = BFE_PHY_STARTED;
704 if (bfe->bfe_periodic_id == NULL) {
705 bfe->bfe_periodic_id = ddi_periodic_add(bfe_timeout,
706 (void *)bfe, BFE_TIMEOUT_INTERVAL, DDI_IPL_0);
708 DTRACE_PROBE1(first__timeout, int, bfe->bfe_unit);
711 DTRACE_PROBE4(phy_started, int, bfe->bfe_unit,
712 int, bmsr, int, bmcr, int, anar);
714 return (BFE_SUCCESS);
718 * Reports link status back to MAC Layer.
720 static void
721 bfe_report_link(bfe_t *bfe)
723 mac_link_update(bfe->bfe_machdl, bfe->bfe_chip.link);
727 * Reads PHY/MII registers and get the link status for us.
729 static int
730 bfe_check_link(bfe_t *bfe)
732 uint16_t bmsr, bmcr, anar, anlpar;
733 int speed, duplex, link;
735 speed = bfe->bfe_chip.speed;
736 duplex = bfe->bfe_chip.duplex;
737 link = bfe->bfe_chip.link;
739 bmsr = bfe_read_phy(bfe, MII_STATUS);
740 bfe->bfe_mii_bmsr = bmsr;
742 bmcr = bfe_read_phy(bfe, MII_CONTROL);
744 anar = bfe_read_phy(bfe, MII_AN_ADVERT);
745 bfe->bfe_mii_anar = anar;
747 anlpar = bfe_read_phy(bfe, MII_AN_LPABLE);
748 bfe->bfe_mii_anlpar = anlpar;
750 bfe->bfe_mii_exp = bfe_read_phy(bfe, MII_AN_EXPANSION);
753 * If exp register is not present in PHY.
755 if (bfe->bfe_mii_exp == 0xffff) {
756 bfe->bfe_mii_exp = 0;
759 if ((bmsr & MII_STATUS_LINKUP) == 0) {
760 bfe->bfe_chip.link = LINK_STATE_DOWN;
761 bfe->bfe_chip.speed = 0;
762 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
763 goto done;
766 bfe->bfe_chip.link = LINK_STATE_UP;
768 if (!(bmcr & MII_CONTROL_ANE)) {
769 /* Forced mode */
770 if (bmcr & MII_CONTROL_100MB)
771 bfe->bfe_chip.speed = 100000000;
772 else
773 bfe->bfe_chip.speed = 10000000;
775 if (bmcr & MII_CONTROL_FDUPLEX)
776 bfe->bfe_chip.duplex = LINK_DUPLEX_FULL;
777 else
778 bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
780 } else if ((!(bmsr & MII_STATUS_CANAUTONEG)) ||
781 (!(bmsr & MII_STATUS_ANDONE))) {
782 bfe->bfe_chip.speed = 0;
783 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
784 } else if (anar & anlpar & MII_ABILITY_100BASE_TX_FD) {
785 bfe->bfe_chip.speed = 100000000;
786 bfe->bfe_chip.duplex = LINK_DUPLEX_FULL;
787 } else if (anar & anlpar & MII_ABILITY_100BASE_T4) {
788 bfe->bfe_chip.speed = 100000000;
789 bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
790 } else if (anar & anlpar & MII_ABILITY_100BASE_TX) {
791 bfe->bfe_chip.speed = 100000000;
792 bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
793 } else if (anar & anlpar & MII_ABILITY_10BASE_T_FD) {
794 bfe->bfe_chip.speed = 10000000;
795 bfe->bfe_chip.duplex = LINK_DUPLEX_FULL;
796 } else if (anar & anlpar & MII_ABILITY_10BASE_T) {
797 bfe->bfe_chip.speed = 10000000;
798 bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
799 } else {
800 bfe->bfe_chip.speed = 0;
801 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
804 done:
806 * If speed or link status or duplex mode changed then report to
807 * MAC layer which is done by the caller.
809 if (speed != bfe->bfe_chip.speed ||
810 duplex != bfe->bfe_chip.duplex ||
811 link != bfe->bfe_chip.link) {
812 return (1);
815 return (0);
818 static void
819 bfe_cam_write(bfe_t *bfe, uchar_t *d, int index)
821 uint32_t v;
823 v = ((uint32_t)d[2] << 24);
824 v |= ((uint32_t)d[3] << 16);
825 v |= ((uint32_t)d[4] << 8);
826 v |= (uint32_t)d[5];
828 OUTL(bfe, BFE_CAM_DATA_LO, v);
829 v = (BFE_CAM_HI_VALID |
830 (((uint32_t)d[0]) << 8) |
831 (((uint32_t)d[1])));
833 OUTL(bfe, BFE_CAM_DATA_HI, v);
834 OUTL(bfe, BFE_CAM_CTRL, (BFE_CAM_WRITE |
835 ((uint32_t)index << BFE_CAM_INDEX_SHIFT)));
836 (void) bfe_wait_bit(bfe, BFE_CAM_CTRL, BFE_CAM_BUSY, 10, 1);
840 * Chip related functions (halt, reset, start).
842 static void
843 bfe_chip_halt(bfe_t *bfe)
846 * Disables interrupts.
848 OUTL(bfe, BFE_INTR_MASK, 0);
849 FLUSH(bfe, BFE_INTR_MASK);
851 OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_DISABLE);
854 * Wait until TX and RX finish their job.
856 (void) bfe_wait_bit(bfe, BFE_ENET_CTRL, BFE_ENET_DISABLE, 20, 1);
859 * Disables DMA engine.
861 OUTL(bfe, BFE_DMARX_CTRL, 0);
862 OUTL(bfe, BFE_DMATX_CTRL, 0);
864 drv_usecwait(10);
866 bfe->bfe_chip_state = BFE_CHIP_HALT;
869 static void
870 bfe_chip_restart(bfe_t *bfe)
872 DTRACE_PROBE2(chip__restart, int, bfe->bfe_unit,
873 int, bfe->bfe_chip_action);
876 * Halt chip and PHY.
878 bfe_chip_halt(bfe);
879 bfe_stop_phy(bfe);
880 bfe->bfe_chip_state = BFE_CHIP_STOPPED;
883 * Init variables.
885 bfe_init_vars(bfe);
888 * Reset chip and start PHY.
890 bfe_chip_reset(bfe);
893 * DMA descriptor rings.
895 bfe_tx_desc_init(&bfe->bfe_tx_ring);
896 bfe_rx_desc_init(&bfe->bfe_rx_ring);
898 bfe->bfe_chip_state = BFE_CHIP_ACTIVE;
899 bfe_set_rx_mode(bfe);
900 bfe_enable_chip_intrs(bfe);
904 * Disables core by stopping the clock.
906 static void
907 bfe_core_disable(bfe_t *bfe)
909 if ((INL(bfe, BFE_SBTMSLOW) & BFE_RESET))
910 return;
912 OUTL(bfe, BFE_SBTMSLOW, (BFE_REJECT | BFE_CLOCK));
913 (void) bfe_wait_bit(bfe, BFE_SBTMSLOW, BFE_REJECT, 100, 0);
914 (void) bfe_wait_bit(bfe, BFE_SBTMSHIGH, BFE_BUSY, 100, 1);
915 OUTL(bfe, BFE_SBTMSLOW, (BFE_FGC | BFE_CLOCK | BFE_REJECT | BFE_RESET));
916 FLUSH(bfe, BFE_SBTMSLOW);
917 drv_usecwait(10);
918 OUTL(bfe, BFE_SBTMSLOW, (BFE_REJECT | BFE_RESET));
919 drv_usecwait(10);
923 * Resets core.
925 static void
926 bfe_core_reset(bfe_t *bfe)
928 uint32_t val;
931 * First disable the core.
933 bfe_core_disable(bfe);
935 OUTL(bfe, BFE_SBTMSLOW, (BFE_RESET | BFE_CLOCK | BFE_FGC));
936 FLUSH(bfe, BFE_SBTMSLOW);
937 drv_usecwait(1);
939 if (INL(bfe, BFE_SBTMSHIGH) & BFE_SERR)
940 OUTL(bfe, BFE_SBTMSHIGH, 0);
942 val = INL(bfe, BFE_SBIMSTATE);
943 if (val & (BFE_IBE | BFE_TO))
944 OUTL(bfe, BFE_SBIMSTATE, val & ~(BFE_IBE | BFE_TO));
946 OUTL(bfe, BFE_SBTMSLOW, (BFE_CLOCK | BFE_FGC));
947 FLUSH(bfe, BFE_SBTMSLOW);
948 drv_usecwait(1);
950 OUTL(bfe, BFE_SBTMSLOW, BFE_CLOCK);
951 FLUSH(bfe, BFE_SBTMSLOW);
952 drv_usecwait(1);
955 static void
956 bfe_setup_config(bfe_t *bfe, uint32_t cores)
958 uint32_t bar_orig, val;
961 * Change bar0 window to map sbtopci registers.
963 bar_orig = pci_config_get32(bfe->bfe_conf_handle, BFE_BAR0_WIN);
964 pci_config_put32(bfe->bfe_conf_handle, BFE_BAR0_WIN, BFE_REG_PCI);
966 /* Just read it and don't do anything */
967 val = INL(bfe, BFE_SBIDHIGH) & BFE_IDH_CORE;
969 val = INL(bfe, BFE_SBINTVEC);
970 val |= cores;
971 OUTL(bfe, BFE_SBINTVEC, val);
973 val = INL(bfe, BFE_SSB_PCI_TRANS_2);
974 val |= BFE_SSB_PCI_PREF | BFE_SSB_PCI_BURST;
975 OUTL(bfe, BFE_SSB_PCI_TRANS_2, val);
978 * Restore bar0 window mapping.
980 pci_config_put32(bfe->bfe_conf_handle, BFE_BAR0_WIN, bar_orig);
984 * Resets chip and starts PHY.
986 static void
987 bfe_chip_reset(bfe_t *bfe)
989 uint32_t val;
991 /* Set the interrupt vector for the enet core */
992 bfe_setup_config(bfe, BFE_INTVEC_ENET0);
994 /* check if core is up */
995 val = INL(bfe, BFE_SBTMSLOW) &
996 (BFE_RESET | BFE_REJECT | BFE_CLOCK);
998 if (val == BFE_CLOCK) {
999 OUTL(bfe, BFE_RCV_LAZY, 0);
1000 OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_DISABLE);
1001 (void) bfe_wait_bit(bfe, BFE_ENET_CTRL,
1002 BFE_ENET_DISABLE, 10, 1);
1003 OUTL(bfe, BFE_DMATX_CTRL, 0);
1004 FLUSH(bfe, BFE_DMARX_STAT);
1005 drv_usecwait(20000); /* 20 milli seconds */
1006 if (INL(bfe, BFE_DMARX_STAT) & BFE_STAT_EMASK) {
1007 (void) bfe_wait_bit(bfe, BFE_DMARX_STAT, BFE_STAT_SIDLE,
1008 10, 0);
1010 OUTL(bfe, BFE_DMARX_CTRL, 0);
1013 bfe_core_reset(bfe);
1014 bfe_clear_stats(bfe);
1016 OUTL(bfe, BFE_MDIO_CTRL, 0x8d);
1017 val = INL(bfe, BFE_DEVCTRL);
1018 if (!(val & BFE_IPP))
1019 OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_EPSEL);
1020 else if (INL(bfe, BFE_DEVCTRL & BFE_EPR)) {
1021 OUTL_AND(bfe, BFE_DEVCTRL, ~BFE_EPR);
1022 drv_usecwait(20000); /* 20 milli seconds */
1025 OUTL_OR(bfe, BFE_MAC_CTRL, BFE_CTRL_CRC32_ENAB | BFE_CTRL_LED);
1027 OUTL_AND(bfe, BFE_MAC_CTRL, ~BFE_CTRL_PDOWN);
1029 OUTL(bfe, BFE_RCV_LAZY, ((1 << BFE_LAZY_FC_SHIFT) &
1030 BFE_LAZY_FC_MASK));
1032 OUTL_OR(bfe, BFE_RCV_LAZY, 0);
1034 OUTL(bfe, BFE_RXMAXLEN, bfe->bfe_rx_ring.r_buf_len);
1035 OUTL(bfe, BFE_TXMAXLEN, bfe->bfe_tx_ring.r_buf_len);
1037 OUTL(bfe, BFE_TX_WMARK, 56);
1039 /* Program DMA channels */
1040 OUTL(bfe, BFE_DMATX_CTRL, BFE_TX_CTRL_ENABLE);
1043 * DMA addresses need to be added to BFE_PCI_DMA
1045 OUTL(bfe, BFE_DMATX_ADDR,
1046 bfe->bfe_tx_ring.r_desc_cookie.dmac_laddress + BFE_PCI_DMA);
1048 OUTL(bfe, BFE_DMARX_CTRL, (BFE_RX_OFFSET << BFE_RX_CTRL_ROSHIFT)
1049 | BFE_RX_CTRL_ENABLE);
1051 OUTL(bfe, BFE_DMARX_ADDR,
1052 bfe->bfe_rx_ring.r_desc_cookie.dmac_laddress + BFE_PCI_DMA);
1054 (void) bfe_startup_phy(bfe);
1056 bfe->bfe_chip_state = BFE_CHIP_INITIALIZED;
1060 * It enables interrupts. Should be the last step while starting chip.
1062 static void
1063 bfe_enable_chip_intrs(bfe_t *bfe)
1065 /* Enable the chip and core */
1066 OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_ENABLE);
1068 /* Enable interrupts */
1069 OUTL(bfe, BFE_INTR_MASK, BFE_IMASK_DEF);
1073 * Common code to take care of setting RX side mode (filter).
1075 static void
1076 bfe_set_rx_mode(bfe_t *bfe)
1078 uint32_t val;
1079 int i;
1080 ether_addr_t mac[ETHERADDRL] = {0, 0, 0, 0, 0, 0};
1083 * We don't touch RX filter if we were asked to suspend. It's fine
1084 * if chip is not active (no interface is plumbed on us).
1086 if (bfe->bfe_chip_state == BFE_CHIP_SUSPENDED)
1087 return;
1089 val = INL(bfe, BFE_RXCONF);
1091 val &= ~BFE_RXCONF_PROMISC;
1092 val &= ~BFE_RXCONF_DBCAST;
1094 if ((bfe->bfe_chip_mode & BFE_RX_MODE_ENABLE) == 0) {
1095 OUTL(bfe, BFE_CAM_CTRL, 0);
1096 FLUSH(bfe, BFE_CAM_CTRL);
1097 } else if (bfe->bfe_chip_mode & BFE_RX_MODE_PROMISC) {
1098 val |= BFE_RXCONF_PROMISC;
1099 val &= ~BFE_RXCONF_DBCAST;
1100 } else {
1101 if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
1102 /* Flush everything */
1103 OUTL(bfe, BFE_RXCONF, val |
1104 BFE_RXCONF_PROMISC | BFE_RXCONF_ALLMULTI);
1105 FLUSH(bfe, BFE_RXCONF);
1108 /* Disable CAM */
1109 OUTL(bfe, BFE_CAM_CTRL, 0);
1110 FLUSH(bfe, BFE_CAM_CTRL);
1113 * We receive all multicast packets.
1115 val |= BFE_RXCONF_ALLMULTI;
1117 for (i = 0; i < BFE_MAX_MULTICAST_TABLE - 1; i++) {
1118 bfe_cam_write(bfe, (uchar_t *)mac, i);
1121 bfe_cam_write(bfe, bfe->bfe_ether_addr, i);
1123 /* Enable CAM */
1124 OUTL_OR(bfe, BFE_CAM_CTRL, BFE_CAM_ENABLE);
1125 FLUSH(bfe, BFE_CAM_CTRL);
1128 DTRACE_PROBE2(rx__mode__filter, int, bfe->bfe_unit,
1129 int, val);
1131 OUTL(bfe, BFE_RXCONF, val);
1132 FLUSH(bfe, BFE_RXCONF);
1136 * Reset various variable values to initial state.
1138 static void
1139 bfe_init_vars(bfe_t *bfe)
1141 bfe->bfe_chip_mode = BFE_RX_MODE_ENABLE;
1143 /* Initial assumption */
1144 bfe->bfe_chip.link = LINK_STATE_UNKNOWN;
1145 bfe->bfe_chip.speed = 0;
1146 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
1148 bfe->bfe_periodic_id = NULL;
1149 bfe->bfe_chip_state = BFE_CHIP_UNINITIALIZED;
1151 bfe->bfe_tx_stall_time = 0;
1155 * Initializes TX side descriptor entries (bfe_desc_t). Each descriptor entry
1156 * has control (desc_ctl) and address (desc_addr) member.
1158 static void
1159 bfe_tx_desc_init(bfe_ring_t *r)
1161 int i;
1162 uint32_t v;
1164 for (i = 0; i < r->r_ndesc; i++) {
1165 PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_ctl),
1166 (r->r_buf_dma[i].len & BFE_DESC_LEN));
1169 * DMA addresses need to be added to BFE_PCI_DMA
1171 PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_addr),
1172 (r->r_buf_dma[i].cookie.dmac_laddress + BFE_PCI_DMA));
1175 v = GET_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl));
1176 PUT_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl),
1177 v | BFE_DESC_EOT);
1179 (void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
1181 r->r_curr_desc = 0;
1182 r->r_avail_desc = TX_NUM_DESC;
1183 r->r_cons_desc = 0;
1187 * Initializes RX side descriptor entries (bfe_desc_t). Each descriptor entry
1188 * has control (desc_ctl) and address (desc_addr) member.
1190 static void
1191 bfe_rx_desc_init(bfe_ring_t *r)
1193 int i;
1194 uint32_t v;
1196 for (i = 0; i < r->r_ndesc; i++) {
1197 PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_ctl),
1198 (r->r_buf_dma[i].len& BFE_DESC_LEN));
1200 PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_addr),
1201 (r->r_buf_dma[i].cookie.dmac_laddress + BFE_PCI_DMA));
1203 /* Initialize rx header (len, flags) */
1204 bzero(r->r_buf_dma[i].addr, sizeof (bfe_rx_header_t));
1206 (void) SYNC_BUF(r, i, 0, sizeof (bfe_rx_header_t),
1207 DDI_DMA_SYNC_FORDEV);
1210 v = GET_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl));
1211 PUT_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl),
1212 v | BFE_DESC_EOT);
1214 (void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
1216 /* TAIL of RX Descriptor */
1217 OUTL(r->r_bfe, BFE_DMARX_PTR, ((i) * sizeof (bfe_desc_t)));
1219 r->r_curr_desc = 0;
1220 r->r_avail_desc = RX_NUM_DESC;
1223 static int
1224 bfe_chip_start(bfe_t *bfe)
1226 ASSERT_ALL_LOCKS(bfe);
1229 * Stop the chip first & then Reset the chip. At last enable interrupts.
1231 bfe_chip_halt(bfe);
1232 bfe_stop_phy(bfe);
1235 * Reset chip and start PHY.
1237 bfe_chip_reset(bfe);
1240 * Initailize Descriptor Rings.
1242 bfe_tx_desc_init(&bfe->bfe_tx_ring);
1243 bfe_rx_desc_init(&bfe->bfe_rx_ring);
1245 bfe->bfe_chip_state = BFE_CHIP_ACTIVE;
1246 bfe->bfe_chip_mode |= BFE_RX_MODE_ENABLE;
1247 bfe_set_rx_mode(bfe);
1248 bfe_enable_chip_intrs(bfe);
1250 /* Check link, speed and duplex mode */
1251 (void) bfe_check_link(bfe);
1253 return (DDI_SUCCESS);
1258 * Clear chip statistics.
1260 static void
1261 bfe_clear_stats(bfe_t *bfe)
1263 ulong_t r;
1265 OUTL(bfe, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ);
1268 * Stat registers are cleared by reading.
1270 for (r = BFE_TX_GOOD_O; r <= BFE_TX_PAUSE; r += 4)
1271 (void) INL(bfe, r);
1273 for (r = BFE_RX_GOOD_O; r <= BFE_RX_NPAUSE; r += 4)
1274 (void) INL(bfe, r);
1278 * Collect chip statistics.
1280 static void
1281 bfe_gather_stats(bfe_t *bfe)
1283 ulong_t r;
1284 uint32_t *v;
1285 uint32_t txerr = 0, rxerr = 0, coll = 0;
1287 v = &bfe->bfe_hw_stats.tx_good_octets;
1288 for (r = BFE_TX_GOOD_O; r <= BFE_TX_PAUSE; r += 4) {
1289 *v += INL(bfe, r);
1290 v++;
1293 v = &bfe->bfe_hw_stats.rx_good_octets;
1294 for (r = BFE_RX_GOOD_O; r <= BFE_RX_NPAUSE; r += 4) {
1295 *v += INL(bfe, r);
1296 v++;
1300 * TX :
1301 * -------
1302 * tx_good_octets, tx_good_pkts, tx_octets
1303 * tx_pkts, tx_broadcast_pkts, tx_multicast_pkts
1304 * tx_len_64, tx_len_65_to_127, tx_len_128_to_255
1305 * tx_len_256_to_511, tx_len_512_to_1023, tx_len_1024_to_max
1306 * tx_jabber_pkts, tx_oversize_pkts, tx_fragment_pkts
1307 * tx_underruns, tx_total_cols, tx_single_cols
1308 * tx_multiple_cols, tx_excessive_cols, tx_late_cols
1309 * tx_defered, tx_carrier_lost, tx_pause_pkts
1311 * RX :
1312 * -------
1313 * rx_good_octets, rx_good_pkts, rx_octets
1314 * rx_pkts, rx_broadcast_pkts, rx_multicast_pkts
1315 * rx_len_64, rx_len_65_to_127, rx_len_128_to_255
1316 * rx_len_256_to_511, rx_len_512_to_1023, rx_len_1024_to_max
1317 * rx_jabber_pkts, rx_oversize_pkts, rx_fragment_pkts
1318 * rx_missed_pkts, rx_crc_align_errs, rx_undersize
1319 * rx_crc_errs, rx_align_errs, rx_symbol_errs
1320 * rx_pause_pkts, rx_nonpause_pkts
1323 bfe->bfe_stats.ether_stat_carrier_errors =
1324 bfe->bfe_hw_stats.tx_carrier_lost;
1326 /* txerr += bfe->bfe_hw_stats.tx_carrier_lost; */
1328 bfe->bfe_stats.ether_stat_ex_collisions =
1329 bfe->bfe_hw_stats.tx_excessive_cols;
1330 txerr += bfe->bfe_hw_stats.tx_excessive_cols;
1331 coll += bfe->bfe_hw_stats.tx_excessive_cols;
1333 bfe->bfe_stats.ether_stat_fcs_errors =
1334 bfe->bfe_hw_stats.rx_crc_errs;
1335 rxerr += bfe->bfe_hw_stats.rx_crc_errs;
1337 bfe->bfe_stats.ether_stat_first_collisions =
1338 bfe->bfe_hw_stats.tx_single_cols;
1339 coll += bfe->bfe_hw_stats.tx_single_cols;
1340 bfe->bfe_stats.ether_stat_multi_collisions =
1341 bfe->bfe_hw_stats.tx_multiple_cols;
1342 coll += bfe->bfe_hw_stats.tx_multiple_cols;
1344 bfe->bfe_stats.ether_stat_toolong_errors =
1345 bfe->bfe_hw_stats.rx_oversize_pkts;
1346 rxerr += bfe->bfe_hw_stats.rx_oversize_pkts;
1348 bfe->bfe_stats.ether_stat_tooshort_errors =
1349 bfe->bfe_hw_stats.rx_undersize;
1350 rxerr += bfe->bfe_hw_stats.rx_undersize;
1352 bfe->bfe_stats.ether_stat_tx_late_collisions +=
1353 bfe->bfe_hw_stats.tx_late_cols;
1355 bfe->bfe_stats.ether_stat_defer_xmts +=
1356 bfe->bfe_hw_stats.tx_defered;
1358 bfe->bfe_stats.ether_stat_macrcv_errors += rxerr;
1359 bfe->bfe_stats.ether_stat_macxmt_errors += txerr;
1361 bfe->bfe_stats.collisions += coll;
1365 * Gets the state for dladm command and all.
1368 bfe_mac_getstat(void *arg, uint_t stat, uint64_t *val)
1370 bfe_t *bfe = (bfe_t *)arg;
1371 uint64_t v;
1372 int err = 0;
1374 rw_enter(&bfe->bfe_rwlock, RW_READER);
1377 switch (stat) {
1378 default:
1379 err = ENOTSUP;
1380 break;
1382 case MAC_STAT_IFSPEED:
1384 * MAC layer will ask for IFSPEED first and hence we
1385 * collect it only once.
1387 if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
1389 * Update stats from the hardware.
1391 bfe_gather_stats(bfe);
1393 v = bfe->bfe_chip.speed;
1394 break;
1396 case ETHER_STAT_ADV_CAP_100T4:
1397 v = bfe->bfe_adv_100T4;
1398 break;
1400 case ETHER_STAT_ADV_CAP_100FDX:
1401 v = (bfe->bfe_mii_anar & MII_ABILITY_100BASE_TX_FD) != 0;
1402 break;
1404 case ETHER_STAT_ADV_CAP_100HDX:
1405 v = (bfe->bfe_mii_anar & MII_ABILITY_100BASE_TX) != 0;
1406 break;
1408 case ETHER_STAT_ADV_CAP_10FDX:
1409 v = (bfe->bfe_mii_anar & MII_ABILITY_10BASE_T_FD) != 0;
1410 break;
1412 case ETHER_STAT_ADV_CAP_10HDX:
1413 v = (bfe->bfe_mii_anar & MII_ABILITY_10BASE_T) != 0;
1414 break;
1416 case ETHER_STAT_ADV_CAP_ASMPAUSE:
1417 v = 0;
1418 break;
1420 case ETHER_STAT_ADV_CAP_AUTONEG:
1421 v = bfe->bfe_adv_aneg;
1422 break;
1424 case ETHER_STAT_ADV_CAP_PAUSE:
1425 v = (bfe->bfe_mii_anar & MII_ABILITY_PAUSE) != 0;
1426 break;
1428 case ETHER_STAT_ADV_REMFAULT:
1429 v = (bfe->bfe_mii_anar & MII_AN_ADVERT_REMFAULT) != 0;
1430 break;
1432 case ETHER_STAT_ALIGN_ERRORS:
1433 /* MIB */
1434 v = bfe->bfe_stats.ether_stat_align_errors;
1435 break;
1437 case ETHER_STAT_CAP_100T4:
1438 v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASE_T4) != 0;
1439 break;
1441 case ETHER_STAT_CAP_100FDX:
1442 v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASEX_FD) != 0;
1443 break;
1445 case ETHER_STAT_CAP_100HDX:
1446 v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASEX) != 0;
1447 break;
1449 case ETHER_STAT_CAP_10FDX:
1450 v = (bfe->bfe_mii_bmsr & MII_STATUS_10_FD) != 0;
1451 break;
1453 case ETHER_STAT_CAP_10HDX:
1454 v = (bfe->bfe_mii_bmsr & MII_STATUS_10) != 0;
1455 break;
1457 case ETHER_STAT_CAP_ASMPAUSE:
1458 v = 0;
1459 break;
1461 case ETHER_STAT_CAP_AUTONEG:
1462 v = ((bfe->bfe_mii_bmsr & MII_STATUS_CANAUTONEG) != 0);
1463 break;
1465 case ETHER_STAT_CAP_PAUSE:
1466 v = 1;
1467 break;
1469 case ETHER_STAT_CAP_REMFAULT:
1470 v = (bfe->bfe_mii_bmsr & MII_STATUS_REMFAULT) != 0;
1471 break;
1473 case ETHER_STAT_CARRIER_ERRORS:
1474 v = bfe->bfe_stats.ether_stat_carrier_errors;
1475 break;
1477 case ETHER_STAT_JABBER_ERRORS:
1478 err = ENOTSUP;
1479 break;
1481 case ETHER_STAT_DEFER_XMTS:
1482 v = bfe->bfe_stats.ether_stat_defer_xmts;
1483 break;
1485 case ETHER_STAT_EX_COLLISIONS:
1486 /* MIB */
1487 v = bfe->bfe_stats.ether_stat_ex_collisions;
1488 break;
1490 case ETHER_STAT_FCS_ERRORS:
1491 /* MIB */
1492 v = bfe->bfe_stats.ether_stat_fcs_errors;
1493 break;
1495 case ETHER_STAT_FIRST_COLLISIONS:
1496 /* MIB */
1497 v = bfe->bfe_stats.ether_stat_first_collisions;
1498 break;
1500 case ETHER_STAT_LINK_ASMPAUSE:
1501 v = 0;
1502 break;
1504 case ETHER_STAT_LINK_AUTONEG:
1505 v = (bfe->bfe_mii_bmcr & MII_CONTROL_ANE) != 0 &&
1506 (bfe->bfe_mii_bmsr & MII_STATUS_ANDONE) != 0;
1507 break;
1509 case ETHER_STAT_LINK_DUPLEX:
1510 v = bfe->bfe_chip.duplex;
1511 break;
1513 case ETHER_STAT_LP_CAP_100T4:
1514 v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_T4) != 0;
1515 break;
1517 case ETHER_STAT_LP_CAP_100FDX:
1518 v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_TX_FD) != 0;
1519 break;
1521 case ETHER_STAT_LP_CAP_100HDX:
1522 v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_TX) != 0;
1523 break;
1525 case ETHER_STAT_LP_CAP_10FDX:
1526 v = (bfe->bfe_mii_anlpar & MII_ABILITY_10BASE_T_FD) != 0;
1527 break;
1529 case ETHER_STAT_LP_CAP_10HDX:
1530 v = (bfe->bfe_mii_anlpar & MII_ABILITY_10BASE_T) != 0;
1531 break;
1533 case ETHER_STAT_LP_CAP_ASMPAUSE:
1534 v = 0;
1535 break;
1537 case ETHER_STAT_LP_CAP_AUTONEG:
1538 v = (bfe->bfe_mii_exp & MII_AN_EXP_LPCANAN) != 0;
1539 break;
1541 case ETHER_STAT_LP_CAP_PAUSE:
1542 v = (bfe->bfe_mii_anlpar & MII_ABILITY_PAUSE) != 0;
1543 break;
1545 case ETHER_STAT_LP_REMFAULT:
1546 v = (bfe->bfe_mii_anlpar & MII_STATUS_REMFAULT) != 0;
1547 break;
1549 case ETHER_STAT_MACRCV_ERRORS:
1550 v = bfe->bfe_stats.ether_stat_macrcv_errors;
1551 break;
1553 case ETHER_STAT_MACXMT_ERRORS:
1554 v = bfe->bfe_stats.ether_stat_macxmt_errors;
1555 break;
1557 case ETHER_STAT_MULTI_COLLISIONS:
1558 v = bfe->bfe_stats.ether_stat_multi_collisions;
1559 break;
1561 case ETHER_STAT_SQE_ERRORS:
1562 err = ENOTSUP;
1563 break;
1565 case ETHER_STAT_TOOLONG_ERRORS:
1566 v = bfe->bfe_stats.ether_stat_toolong_errors;
1567 break;
1569 case ETHER_STAT_TOOSHORT_ERRORS:
1570 v = bfe->bfe_stats.ether_stat_tooshort_errors;
1571 break;
1573 case ETHER_STAT_TX_LATE_COLLISIONS:
1574 v = bfe->bfe_stats.ether_stat_tx_late_collisions;
1575 break;
1577 case ETHER_STAT_XCVR_ADDR:
1578 v = bfe->bfe_phy_addr;
1579 break;
1581 case ETHER_STAT_XCVR_ID:
1582 v = bfe->bfe_phy_id;
1583 break;
1585 case MAC_STAT_BRDCSTRCV:
1586 v = bfe->bfe_stats.brdcstrcv;
1587 break;
1589 case MAC_STAT_BRDCSTXMT:
1590 v = bfe->bfe_stats.brdcstxmt;
1591 break;
1593 case MAC_STAT_MULTIXMT:
1594 v = bfe->bfe_stats.multixmt;
1595 break;
1597 case MAC_STAT_COLLISIONS:
1598 v = bfe->bfe_stats.collisions;
1599 break;
1601 case MAC_STAT_IERRORS:
1602 v = bfe->bfe_stats.ierrors;
1603 break;
1605 case MAC_STAT_IPACKETS:
1606 v = bfe->bfe_stats.ipackets;
1607 break;
1609 case MAC_STAT_MULTIRCV:
1610 v = bfe->bfe_stats.multircv;
1611 break;
1613 case MAC_STAT_NORCVBUF:
1614 v = bfe->bfe_stats.norcvbuf;
1615 break;
1617 case MAC_STAT_NOXMTBUF:
1618 v = bfe->bfe_stats.noxmtbuf;
1619 break;
1621 case MAC_STAT_OBYTES:
1622 v = bfe->bfe_stats.obytes;
1623 break;
1625 case MAC_STAT_OERRORS:
1626 /* MIB */
1627 v = bfe->bfe_stats.ether_stat_macxmt_errors;
1628 break;
1630 case MAC_STAT_OPACKETS:
1631 v = bfe->bfe_stats.opackets;
1632 break;
1634 case MAC_STAT_RBYTES:
1635 v = bfe->bfe_stats.rbytes;
1636 break;
1638 case MAC_STAT_UNDERFLOWS:
1639 v = bfe->bfe_stats.underflows;
1640 break;
1642 case MAC_STAT_OVERFLOWS:
1643 v = bfe->bfe_stats.overflows;
1644 break;
1647 rw_exit(&bfe->bfe_rwlock);
1649 *val = v;
1650 return (err);
1654 bfe_mac_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1655 void *val)
1657 bfe_t *bfe = (bfe_t *)arg;
1658 int err = 0;
1660 switch (num) {
1661 case MAC_PROP_DUPLEX:
1662 ASSERT(sz >= sizeof (link_duplex_t));
1663 bcopy(&bfe->bfe_chip.duplex, val, sizeof (link_duplex_t));
1664 break;
1666 case MAC_PROP_SPEED:
1667 ASSERT(sz >= sizeof (uint64_t));
1668 bcopy(&bfe->bfe_chip.speed, val, sizeof (uint64_t));
1669 break;
1671 case MAC_PROP_AUTONEG:
1672 *(uint8_t *)val = bfe->bfe_adv_aneg;
1673 break;
1675 case MAC_PROP_ADV_100FDX_CAP:
1676 *(uint8_t *)val = bfe->bfe_adv_100fdx;
1677 break;
1679 case MAC_PROP_EN_100FDX_CAP:
1680 *(uint8_t *)val = bfe->bfe_adv_100fdx;
1681 break;
1683 case MAC_PROP_ADV_100HDX_CAP:
1684 *(uint8_t *)val = bfe->bfe_adv_100hdx;
1685 break;
1687 case MAC_PROP_EN_100HDX_CAP:
1688 *(uint8_t *)val = bfe->bfe_adv_100hdx;
1689 break;
1691 case MAC_PROP_ADV_10FDX_CAP:
1692 *(uint8_t *)val = bfe->bfe_adv_10fdx;
1693 break;
1695 case MAC_PROP_EN_10FDX_CAP:
1696 *(uint8_t *)val = bfe->bfe_adv_10fdx;
1697 break;
1699 case MAC_PROP_ADV_10HDX_CAP:
1700 *(uint8_t *)val = bfe->bfe_adv_10hdx;
1701 break;
1703 case MAC_PROP_EN_10HDX_CAP:
1704 *(uint8_t *)val = bfe->bfe_adv_10hdx;
1705 break;
1707 case MAC_PROP_ADV_100T4_CAP:
1708 *(uint8_t *)val = bfe->bfe_adv_100T4;
1709 break;
1711 case MAC_PROP_EN_100T4_CAP:
1712 *(uint8_t *)val = bfe->bfe_adv_100T4;
1713 break;
1715 default:
1716 err = ENOTSUP;
1719 return (err);
1723 static void
1724 bfe_mac_propinfo(void *arg, const char *name, mac_prop_id_t num,
1725 mac_prop_info_handle_t prh)
1727 bfe_t *bfe = (bfe_t *)arg;
1729 switch (num) {
1730 case MAC_PROP_DUPLEX:
1731 case MAC_PROP_SPEED:
1732 case MAC_PROP_ADV_100FDX_CAP:
1733 case MAC_PROP_ADV_100HDX_CAP:
1734 case MAC_PROP_ADV_10FDX_CAP:
1735 case MAC_PROP_ADV_10HDX_CAP:
1736 case MAC_PROP_ADV_100T4_CAP:
1737 case MAC_PROP_EN_100T4_CAP:
1738 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1739 break;
1741 case MAC_PROP_AUTONEG:
1742 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_aneg);
1743 break;
1745 case MAC_PROP_EN_100FDX_CAP:
1746 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_100fdx);
1747 break;
1749 case MAC_PROP_EN_100HDX_CAP:
1750 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_100hdx);
1751 break;
1753 case MAC_PROP_EN_10FDX_CAP:
1754 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_10fdx);
1755 break;
1757 case MAC_PROP_EN_10HDX_CAP:
1758 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_10hdx);
1759 break;
1764 /*ARGSUSED*/
1766 bfe_mac_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1767 const void *val)
1769 bfe_t *bfe = (bfe_t *)arg;
1770 uint8_t *advp;
1771 uint8_t *capp;
1772 int r = 0;
1774 switch (num) {
1775 case MAC_PROP_EN_100FDX_CAP:
1776 advp = &bfe->bfe_adv_100fdx;
1777 capp = &bfe->bfe_cap_100fdx;
1778 break;
1780 case MAC_PROP_EN_100HDX_CAP:
1781 advp = &bfe->bfe_adv_100hdx;
1782 capp = &bfe->bfe_cap_100hdx;
1783 break;
1785 case MAC_PROP_EN_10FDX_CAP:
1786 advp = &bfe->bfe_adv_10fdx;
1787 capp = &bfe->bfe_cap_10fdx;
1788 break;
1790 case MAC_PROP_EN_10HDX_CAP:
1791 advp = &bfe->bfe_adv_10hdx;
1792 capp = &bfe->bfe_cap_10hdx;
1793 break;
1795 case MAC_PROP_AUTONEG:
1796 advp = &bfe->bfe_adv_aneg;
1797 capp = &bfe->bfe_cap_aneg;
1798 break;
1800 default:
1801 return (ENOTSUP);
1804 if (*capp == 0)
1805 return (ENOTSUP);
1807 bfe_grab_locks(bfe);
1809 if (*advp != *(const uint8_t *)val) {
1810 *advp = *(const uint8_t *)val;
1812 bfe->bfe_chip_action = BFE_ACTION_RESTART_SETPROP;
1813 if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
1815 * We need to stop the timer before grabbing locks
1816 * otherwise we can land-up in deadlock with untimeout.
1818 bfe_stop_timer(bfe);
1820 bfe->bfe_chip_action |= BFE_ACTION_RESTART;
1822 bfe_chip_restart(bfe);
1825 * We leave SETPROP because properties can be
1826 * temporary.
1828 bfe->bfe_chip_action &= ~(BFE_ACTION_RESTART);
1829 r = 1;
1833 bfe_release_locks(bfe);
1835 /* kick-off a potential stopped downstream */
1836 if (r)
1837 mac_tx_update(bfe->bfe_machdl);
1839 return (0);
1844 bfe_mac_set_ether_addr(void *arg, const uint8_t *ea)
1846 bfe_t *bfe = (bfe_t *)arg;
1848 bfe_grab_locks(bfe);
1849 bcopy(ea, bfe->bfe_ether_addr, ETHERADDRL);
1850 bfe_set_rx_mode(bfe);
1851 bfe_release_locks(bfe);
1852 return (0);
1856 bfe_mac_start(void *arg)
1858 bfe_t *bfe = (bfe_t *)arg;
1860 bfe_grab_locks(bfe);
1861 if (bfe_chip_start(bfe) == DDI_FAILURE) {
1862 bfe_release_locks(bfe);
1863 return (EINVAL);
1866 bfe_release_locks(bfe);
1868 mac_tx_update(bfe->bfe_machdl);
1870 return (0);
1873 void
1874 bfe_mac_stop(void *arg)
1876 bfe_t *bfe = (bfe_t *)arg;
1879 * We need to stop the timer before grabbing locks otherwise
1880 * we can land-up in deadlock with untimeout.
1882 bfe_stop_timer(bfe);
1884 bfe_grab_locks(bfe);
1887 * First halt the chip by disabling interrupts.
1889 bfe_chip_halt(bfe);
1890 bfe_stop_phy(bfe);
1892 bfe->bfe_chip_state = BFE_CHIP_STOPPED;
1895 * This will leave the PHY running.
1897 bfe_chip_reset(bfe);
1900 * Disable RX register.
1902 bfe->bfe_chip_mode &= ~BFE_RX_MODE_ENABLE;
1903 bfe_set_rx_mode(bfe);
1905 bfe_release_locks(bfe);
1909 * Send a packet down the wire.
1911 static int
1912 bfe_send_a_packet(bfe_t *bfe, mblk_t *mp)
1914 bfe_ring_t *r = &bfe->bfe_tx_ring;
1915 uint32_t cur = r->r_curr_desc;
1916 uint32_t next;
1917 size_t pktlen = msgsize(mp);
1918 uchar_t *buf;
1919 uint32_t v;
1921 ASSERT(MUTEX_HELD(&r->r_lock));
1922 ASSERT(mp != NULL);
1924 if (pktlen > r->r_buf_len) {
1925 freemsg(mp);
1926 return (BFE_SUCCESS);
1930 * There is a big reason why we don't check for '0'. It becomes easy
1931 * for us to not roll over the ring since we are based on producer (tx)
1932 * and consumer (reclaim by an interrupt) model. Especially when we
1933 * run out of TX descriptor, chip will send a single interrupt and
1934 * both producer and consumer counter will be same. So we keep a
1935 * difference of 1 always.
1937 if (r->r_avail_desc <= 1) {
1938 bfe->bfe_stats.noxmtbuf++;
1939 bfe->bfe_tx_resched = 1;
1940 return (BFE_FAILURE);
1944 * Get the DMA buffer to hold packet.
1946 buf = (uchar_t *)r->r_buf_dma[cur].addr;
1948 mcopymsg(mp, buf); /* it also frees mp */
1951 * Gather statistics.
1953 if (buf[0] & 0x1) {
1954 if (bcmp(buf, bfe_broadcast, ETHERADDRL) != 0)
1955 bfe->bfe_stats.multixmt++;
1956 else
1957 bfe->bfe_stats.brdcstxmt++;
1959 bfe->bfe_stats.opackets++;
1960 bfe->bfe_stats.obytes += pktlen;
1964 * Program the DMA descriptor (start and end of frame are same).
1966 next = cur;
1967 v = (pktlen & BFE_DESC_LEN) | BFE_DESC_IOC | BFE_DESC_SOF |
1968 BFE_DESC_EOF;
1970 if (cur == (TX_NUM_DESC - 1))
1971 v |= BFE_DESC_EOT;
1973 PUT_DESC(r, (uint32_t *)&(r->r_desc[cur].desc_ctl), v);
1976 * DMA addresses need to be added to BFE_PCI_DMA
1978 PUT_DESC(r, (uint32_t *)&(r->r_desc[cur].desc_addr),
1979 (r->r_buf_dma[cur].cookie.dmac_laddress + BFE_PCI_DMA));
1982 * Sync the packet data for the device.
1984 (void) SYNC_BUF(r, cur, 0, pktlen, DDI_DMA_SYNC_FORDEV);
1986 /* Move to next descriptor slot */
1987 BFE_INC_SLOT(next, TX_NUM_DESC);
1989 (void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
1991 r->r_curr_desc = next;
1994 * The order should be 1,2,3,... for BFE_DMATX_PTR if 0,1,2,3,...
1995 * descriptor slot are being programmed.
1997 OUTL(bfe, BFE_DMATX_PTR, next * sizeof (bfe_desc_t));
1998 FLUSH(bfe, BFE_DMATX_PTR);
2000 r->r_avail_desc--;
2003 * Let timeout know that it must reset the chip if a
2004 * packet is not sent down the wire for more than 5 seconds.
2006 bfe->bfe_tx_stall_time = gethrtime() + (5 * 1000000000ULL);
2008 return (BFE_SUCCESS);
2011 mblk_t *
2012 bfe_mac_transmit_packet(void *arg, mblk_t *mp)
2014 bfe_t *bfe = (bfe_t *)arg;
2015 bfe_ring_t *r = &bfe->bfe_tx_ring;
2016 mblk_t *nmp;
2018 mutex_enter(&r->r_lock);
2020 if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) {
2021 DTRACE_PROBE1(tx__chip__not__active, int, bfe->bfe_unit);
2023 freemsgchain(mp);
2024 mutex_exit(&r->r_lock);
2025 return (NULL);
2029 while (mp != NULL) {
2030 nmp = mp->b_next;
2031 mp->b_next = NULL;
2033 if (bfe_send_a_packet(bfe, mp) == BFE_FAILURE) {
2034 mp->b_next = nmp;
2035 break;
2037 mp = nmp;
2040 mutex_exit(&r->r_lock);
2042 return (mp);
2046 bfe_mac_set_promisc(void *arg, boolean_t promiscflag)
2048 bfe_t *bfe = (bfe_t *)arg;
2050 bfe_grab_locks(bfe);
2051 if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) {
2052 bfe_release_locks(bfe);
2053 return (EIO);
2056 if (promiscflag) {
2057 /* Set Promiscous on */
2058 bfe->bfe_chip_mode |= BFE_RX_MODE_PROMISC;
2059 } else {
2060 bfe->bfe_chip_mode &= ~BFE_RX_MODE_PROMISC;
2063 bfe_set_rx_mode(bfe);
2064 bfe_release_locks(bfe);
2066 return (0);
2070 bfe_mac_set_multicast(void *arg, boolean_t add, const uint8_t *macaddr)
2073 * It was too much of pain to implement multicast in CAM. Instead
2074 * we never disable multicast filter.
2076 return (0);
2079 static mac_callbacks_t bfe_mac_callbacks = {
2080 MC_SETPROP | MC_GETPROP | MC_PROPINFO,
2081 bfe_mac_getstat, /* gets stats */
2082 bfe_mac_start, /* starts mac */
2083 bfe_mac_stop, /* stops mac */
2084 bfe_mac_set_promisc, /* sets promisc mode for snoop */
2085 bfe_mac_set_multicast, /* multicast implementation */
2086 bfe_mac_set_ether_addr, /* sets ethernet address (unicast) */
2087 bfe_mac_transmit_packet, /* transmits packet */
2088 NULL,
2089 NULL, /* ioctl */
2090 NULL, /* getcap */
2091 NULL, /* open */
2092 NULL, /* close */
2093 bfe_mac_setprop,
2094 bfe_mac_getprop,
2095 bfe_mac_propinfo
2098 static void
2099 bfe_error_handler(bfe_t *bfe, int intr_mask)
2101 uint32_t v;
2103 if (intr_mask & BFE_ISTAT_RFO) {
2104 bfe->bfe_stats.overflows++;
2105 bfe->bfe_chip_action |=
2106 (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2107 goto action;
2110 if (intr_mask & BFE_ISTAT_TFU) {
2111 bfe->bfe_stats.underflows++;
2112 return;
2115 /* Descriptor Protocol Error */
2116 if (intr_mask & BFE_ISTAT_DPE) {
2117 bfe_error(bfe->bfe_dip,
2118 "Descriptor Protocol Error. Halting Chip");
2119 bfe->bfe_chip_action |=
2120 (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2121 goto action;
2124 /* Descriptor Error */
2125 if (intr_mask & BFE_ISTAT_DSCE) {
2126 bfe_error(bfe->bfe_dip, "Descriptor Error. Restarting Chip");
2127 goto action;
2130 /* Receive Descr. Underflow */
2131 if (intr_mask & BFE_ISTAT_RDU) {
2132 bfe_error(bfe->bfe_dip,
2133 "Receive Descriptor Underflow. Restarting Chip");
2134 bfe->bfe_stats.ether_stat_macrcv_errors++;
2135 bfe->bfe_chip_action |=
2136 (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2137 goto action;
2140 v = INL(bfe, BFE_DMATX_STAT);
2142 /* Error while sending a packet */
2143 if (v & BFE_STAT_EMASK) {
2144 bfe->bfe_stats.ether_stat_macxmt_errors++;
2145 bfe_error(bfe->bfe_dip,
2146 "Error while sending a packet. Restarting Chip");
2149 /* Error while receiving a packet */
2150 v = INL(bfe, BFE_DMARX_STAT);
2151 if (v & BFE_RX_FLAG_ERRORS) {
2152 bfe->bfe_stats.ierrors++;
2153 bfe_error(bfe->bfe_dip,
2154 "Error while receiving a packet. Restarting Chip");
2158 bfe->bfe_chip_action |=
2159 (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2161 action:
2162 bfe_chip_halt(bfe);
2166 * It will recycle a RX descriptor slot.
2168 static void
2169 bfe_rx_desc_buf_reinit(bfe_t *bfe, uint_t slot)
2171 bfe_ring_t *r = &bfe->bfe_rx_ring;
2172 uint32_t v;
2174 slot %= RX_NUM_DESC;
2176 bzero(r->r_buf_dma[slot].addr, sizeof (bfe_rx_header_t));
2178 (void) SYNC_BUF(r, slot, 0, BFE_RX_OFFSET, DDI_DMA_SYNC_FORDEV);
2180 v = r->r_buf_dma[slot].len & BFE_DESC_LEN;
2181 if (slot == (RX_NUM_DESC - 1))
2182 v |= BFE_DESC_EOT;
2184 PUT_DESC(r, (uint32_t *)&(r->r_desc[slot].desc_ctl), v);
2187 * DMA addresses need to be added to BFE_PCI_DMA
2189 PUT_DESC(r, (uint32_t *)&(r->r_desc[slot].desc_addr),
2190 (r->r_buf_dma[slot].cookie.dmac_laddress + BFE_PCI_DMA));
2194 * Gets called from interrupt context to handle RX interrupt.
2196 static mblk_t *
2197 bfe_receive(bfe_t *bfe, int intr_mask)
2199 int rxstat, current;
2200 mblk_t *mp = NULL, *rx_head, *rx_tail;
2201 uchar_t *rx_header;
2202 uint16_t len;
2203 uchar_t *bp;
2204 bfe_ring_t *r = &bfe->bfe_rx_ring;
2205 int i;
2207 rxstat = INL(bfe, BFE_DMARX_STAT);
2208 current = (rxstat & BFE_STAT_CDMASK) / sizeof (bfe_desc_t);
2209 i = r->r_curr_desc;
2211 rx_head = rx_tail = NULL;
2213 DTRACE_PROBE3(receive, int, bfe->bfe_unit,
2214 int, r->r_curr_desc,
2215 int, current);
2217 for (i = r->r_curr_desc; i != current;
2218 BFE_INC_SLOT(i, RX_NUM_DESC)) {
2221 * Sync the buffer associated with the descriptor table entry.
2223 (void) SYNC_BUF(r, i, 0, r->r_buf_dma[i].len,
2224 DDI_DMA_SYNC_FORKERNEL);
2226 rx_header = (void *)r->r_buf_dma[i].addr;
2229 * We do this to make sure we are endian neutral. Chip is
2230 * big endian.
2232 * The header looks like :-
2234 * Offset 0 -> uint16_t len
2235 * Offset 2 -> uint16_t flags
2236 * Offset 4 -> uint16_t pad[12]
2238 len = (rx_header[1] << 8) | rx_header[0];
2239 len -= 4; /* CRC bytes need to be removed */
2242 * Don't receive this packet if pkt length is greater than
2243 * MTU + VLAN_TAGSZ.
2245 if (len > r->r_buf_len) {
2246 /* Recycle slot for later use */
2247 bfe_rx_desc_buf_reinit(bfe, i);
2248 continue;
2251 if ((mp = allocb(len + VLAN_TAGSZ, BPRI_MED)) != NULL) {
2252 mp->b_rptr += VLAN_TAGSZ;
2253 bp = mp->b_rptr;
2254 mp->b_wptr = bp + len;
2256 /* sizeof (bfe_rx_header_t) + 2 */
2257 bcopy(r->r_buf_dma[i].addr +
2258 BFE_RX_OFFSET, bp, len);
2260 mp->b_next = NULL;
2261 if (rx_tail == NULL)
2262 rx_head = rx_tail = mp;
2263 else {
2264 rx_tail->b_next = mp;
2265 rx_tail = mp;
2268 /* Number of packets received so far */
2269 bfe->bfe_stats.ipackets++;
2271 /* Total bytes of packets received so far */
2272 bfe->bfe_stats.rbytes += len;
2274 if (bcmp(mp->b_rptr, bfe_broadcast, ETHERADDRL) == 0)
2275 bfe->bfe_stats.brdcstrcv++;
2276 else
2277 bfe->bfe_stats.multircv++;
2278 } else {
2279 bfe->bfe_stats.norcvbuf++;
2280 /* Recycle the slot for later use */
2281 bfe_rx_desc_buf_reinit(bfe, i);
2282 break;
2286 * Reinitialize the current descriptor slot's buffer so that
2287 * it can be reused.
2289 bfe_rx_desc_buf_reinit(bfe, i);
2292 r->r_curr_desc = i;
2294 (void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
2296 return (rx_head);
2299 static int
2300 bfe_tx_reclaim(bfe_ring_t *r)
2302 uint32_t cur, start;
2303 uint32_t v;
2305 cur = INL(r->r_bfe, BFE_DMATX_STAT) & BFE_STAT_CDMASK;
2306 cur = cur / sizeof (bfe_desc_t);
2309 * Start with the last descriptor consumed by the chip.
2311 start = r->r_cons_desc;
2313 DTRACE_PROBE3(tx__reclaim, int, r->r_bfe->bfe_unit,
2314 int, start,
2315 int, cur);
2318 * There will be at least one descriptor to process.
2320 while (start != cur) {
2321 r->r_avail_desc++;
2322 v = r->r_buf_dma[start].len & BFE_DESC_LEN;
2323 if (start == (TX_NUM_DESC - 1))
2324 v |= BFE_DESC_EOT;
2326 PUT_DESC(r, (uint32_t *)&(r->r_desc[start].desc_ctl), v);
2327 PUT_DESC(r, (uint32_t *)&(r->r_desc[start].desc_addr),
2328 (r->r_buf_dma[start].cookie.dmac_laddress + BFE_PCI_DMA));
2330 /* Move to next descriptor in TX ring */
2331 BFE_INC_SLOT(start, TX_NUM_DESC);
2334 (void) ddi_dma_sync(r->r_desc_dma_handle,
2335 0, (r->r_ndesc * sizeof (bfe_desc_t)),
2336 DDI_DMA_SYNC_FORDEV);
2338 r->r_cons_desc = start; /* consumed pointer */
2339 r->r_bfe->bfe_tx_stall_time = 0;
2341 return (cur);
2344 static int
2345 bfe_tx_done(bfe_t *bfe, int intr_mask)
2347 bfe_ring_t *r = &bfe->bfe_tx_ring;
2348 int resched = 0;
2350 mutex_enter(&r->r_lock);
2351 (void) bfe_tx_reclaim(r);
2353 if (bfe->bfe_tx_resched) {
2354 resched = 1;
2355 bfe->bfe_tx_resched = 0;
2357 mutex_exit(&r->r_lock);
2359 return (resched);
2363 * ISR for interrupt handling
2365 static uint_t
2366 bfe_interrupt(caddr_t arg1, caddr_t arg2)
2368 bfe_t *bfe = (void *)arg1;
2369 uint32_t intr_stat;
2370 mblk_t *rx_head = NULL;
2371 int resched = 0;
2374 * Grab the lock to avoid stopping the chip while this interrupt
2375 * is handled.
2377 rw_enter(&bfe->bfe_rwlock, RW_READER);
2380 * It's necessary to read intr stat again because masking interrupt
2381 * register does not really mask interrupts coming from the chip.
2383 intr_stat = INL(bfe, BFE_INTR_STAT);
2384 intr_stat &= BFE_IMASK_DEF;
2385 OUTL(bfe, BFE_INTR_STAT, intr_stat);
2386 (void) INL(bfe, BFE_INTR_STAT);
2388 if (intr_stat == 0) {
2389 rw_exit(&bfe->bfe_rwlock);
2390 return (DDI_INTR_UNCLAIMED);
2393 DTRACE_PROBE2(bfe__interrupt, int, bfe->bfe_unit,
2394 int, intr_stat);
2396 if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) {
2398 * If chip is suspended then we just return.
2400 if (bfe->bfe_chip_state == BFE_CHIP_SUSPENDED) {
2401 rw_exit(&bfe->bfe_rwlock);
2402 DTRACE_PROBE1(interrupt__chip__is__suspend, int,
2403 bfe->bfe_unit);
2404 return (DDI_INTR_CLAIMED);
2408 * Halt the chip again i.e basically disable interrupts.
2410 bfe_chip_halt(bfe);
2411 rw_exit(&bfe->bfe_rwlock);
2412 DTRACE_PROBE1(interrupt__chip__not__active, int,
2413 bfe->bfe_unit);
2414 return (DDI_INTR_CLAIMED);
2417 /* A packet was received */
2418 if (intr_stat & BFE_ISTAT_RX) {
2419 rx_head = bfe_receive(bfe, intr_stat);
2422 /* A packet was sent down the wire */
2423 if (intr_stat & BFE_ISTAT_TX) {
2424 resched = bfe_tx_done(bfe, intr_stat);
2427 /* There was an error */
2428 if (intr_stat & BFE_ISTAT_ERRORS) {
2429 bfe_error_handler(bfe, intr_stat);
2432 rw_exit(&bfe->bfe_rwlock);
2435 * Pass the list of packets received from chip to MAC layer.
2437 if (rx_head) {
2438 mac_rx(bfe->bfe_machdl, 0, rx_head);
2442 * Let the MAC start sending pkts to a potential stopped stream.
2444 if (resched)
2445 mac_tx_update(bfe->bfe_machdl);
2447 return (DDI_INTR_CLAIMED);
2451 * Removes registered interrupt handler.
2453 static void
2454 bfe_remove_intr(bfe_t *bfe)
2456 (void) ddi_intr_remove_handler(bfe->bfe_intrhdl);
2457 (void) ddi_intr_free(bfe->bfe_intrhdl);
2461 * Add an interrupt for the driver.
2463 static int
2464 bfe_add_intr(bfe_t *bfe)
2466 int nintrs = 1;
2467 int ret;
2469 ret = ddi_intr_alloc(bfe->bfe_dip, &bfe->bfe_intrhdl,
2470 DDI_INTR_TYPE_FIXED, /* type */
2471 0, /* inumber */
2472 1, /* count */
2473 &nintrs, /* actual nintrs */
2474 DDI_INTR_ALLOC_STRICT);
2476 if (ret != DDI_SUCCESS) {
2477 bfe_error(bfe->bfe_dip, "ddi_intr_alloc() failed"
2478 " : ret : %d", ret);
2479 return (DDI_FAILURE);
2482 ret = ddi_intr_add_handler(bfe->bfe_intrhdl, bfe_interrupt, bfe, NULL);
2483 if (ret != DDI_SUCCESS) {
2484 bfe_error(bfe->bfe_dip, "ddi_intr_add_handler() failed");
2485 (void) ddi_intr_free(bfe->bfe_intrhdl);
2486 return (DDI_FAILURE);
2489 ret = ddi_intr_get_pri(bfe->bfe_intrhdl, &bfe->bfe_intrpri);
2490 if (ret != DDI_SUCCESS) {
2491 bfe_error(bfe->bfe_dip, "ddi_intr_get_pri() failed");
2492 bfe_remove_intr(bfe);
2493 return (DDI_FAILURE);
2496 return (DDI_SUCCESS);
2501 * Identify chipset family.
2503 static int
2504 bfe_identify_hardware(bfe_t *bfe)
2506 uint16_t vid, did;
2507 int i;
2509 vid = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_VENID);
2510 did = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_DEVID);
2512 for (i = 0; i < (sizeof (bfe_cards) / sizeof (bfe_cards_t)); i++) {
2513 if (bfe_cards[i].vendor_id == vid &&
2514 bfe_cards[i].device_id == did) {
2515 return (BFE_SUCCESS);
2519 bfe_error(bfe->bfe_dip, "bfe driver is attaching to unknown pci%d,%d"
2520 " vendor/device-id card", vid, did);
2522 return (BFE_SUCCESS);
2526 * Maps device registers.
2528 static int
2529 bfe_regs_map(bfe_t *bfe)
2531 dev_info_t *dip = bfe->bfe_dip;
2532 int ret;
2534 ret = ddi_regs_map_setup(dip, 1, &bfe->bfe_mem_regset.addr, 0, 0,
2535 &bfe_dev_attr, &bfe->bfe_mem_regset.hdl);
2537 if (ret != DDI_SUCCESS) {
2538 bfe_error(bfe->bfe_dip, "ddi_regs_map_setup failed");
2539 return (DDI_FAILURE);
2542 return (DDI_SUCCESS);
2545 static void
2546 bfe_unmap_regs(bfe_t *bfe)
2548 ddi_regs_map_free(&bfe->bfe_mem_regset.hdl);
2551 static int
2552 bfe_get_chip_config(bfe_t *bfe)
2555 bfe->bfe_dev_addr[0] = bfe->bfe_ether_addr[0] =
2556 INB(bfe, BFE_EEPROM_BASE + 79);
2558 bfe->bfe_dev_addr[1] = bfe->bfe_ether_addr[1] =
2559 INB(bfe, BFE_EEPROM_BASE + 78);
2561 bfe->bfe_dev_addr[2] = bfe->bfe_ether_addr[2] =
2562 INB(bfe, BFE_EEPROM_BASE + 81);
2564 bfe->bfe_dev_addr[3] = bfe->bfe_ether_addr[3] =
2565 INB(bfe, BFE_EEPROM_BASE + 80);
2567 bfe->bfe_dev_addr[4] = bfe->bfe_ether_addr[4] =
2568 INB(bfe, BFE_EEPROM_BASE + 83);
2570 bfe->bfe_dev_addr[5] = bfe->bfe_ether_addr[5] =
2571 INB(bfe, BFE_EEPROM_BASE + 82);
2573 bfe->bfe_phy_addr = -1;
2575 return (DDI_SUCCESS);
2579 * Ring Management routines
2581 static int
2582 bfe_ring_buf_alloc(bfe_t *bfe, bfe_ring_t *r, int slot, int d)
2584 int err;
2585 uint_t count = 0;
2587 err = ddi_dma_alloc_handle(bfe->bfe_dip,
2588 &bfe_dma_attr_buf, DDI_DMA_SLEEP, NULL,
2589 &r->r_buf_dma[slot].handle);
2591 if (err != DDI_SUCCESS) {
2592 bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2593 " alloc_handle failed");
2594 goto fail0;
2597 err = ddi_dma_mem_alloc(r->r_buf_dma[slot].handle,
2598 r->r_buf_len, &bfe_buf_attr, DDI_DMA_STREAMING,
2599 DDI_DMA_SLEEP, NULL, &r->r_buf_dma[slot].addr,
2600 &r->r_buf_dma[slot].len,
2601 &r->r_buf_dma[slot].acchdl);
2603 if (err != DDI_SUCCESS) {
2604 bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2605 " mem_alloc failed :%d", err);
2606 goto fail1;
2609 err = ddi_dma_addr_bind_handle(r->r_buf_dma[slot].handle,
2610 NULL, r->r_buf_dma[slot].addr,
2611 r->r_buf_dma[slot].len,
2612 (DDI_DMA_RDWR | DDI_DMA_STREAMING),
2613 DDI_DMA_SLEEP, NULL,
2614 &r->r_buf_dma[slot].cookie,
2615 &count);
2617 if (err != DDI_DMA_MAPPED) {
2618 bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2619 " bind_handle failed");
2620 goto fail2;
2623 if (count > 1) {
2624 bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2625 " more than one DMA cookie");
2626 (void) ddi_dma_unbind_handle(r->r_buf_dma[slot].handle);
2627 goto fail2;
2630 return (DDI_SUCCESS);
2631 fail2:
2632 ddi_dma_mem_free(&r->r_buf_dma[slot].acchdl);
2633 fail1:
2634 ddi_dma_free_handle(&r->r_buf_dma[slot].handle);
2635 fail0:
2636 return (DDI_FAILURE);
2639 static void
2640 bfe_ring_buf_free(bfe_ring_t *r, int slot)
2642 if (r->r_buf_dma == NULL)
2643 return;
2645 (void) ddi_dma_unbind_handle(r->r_buf_dma[slot].handle);
2646 ddi_dma_mem_free(&r->r_buf_dma[slot].acchdl);
2647 ddi_dma_free_handle(&r->r_buf_dma[slot].handle);
2650 static void
2651 bfe_buffer_free(bfe_ring_t *r)
2653 int i;
2655 for (i = 0; i < r->r_ndesc; i++) {
2656 bfe_ring_buf_free(r, i);
2660 static void
2661 bfe_ring_desc_free(bfe_ring_t *r)
2663 (void) ddi_dma_unbind_handle(r->r_desc_dma_handle);
2664 ddi_dma_mem_free(&r->r_desc_acc_handle);
2665 ddi_dma_free_handle(&r->r_desc_dma_handle);
2666 kmem_free(r->r_buf_dma, r->r_ndesc * sizeof (bfe_dma_t));
2668 r->r_buf_dma = NULL;
2669 r->r_desc = NULL;
2673 static int
2674 bfe_ring_desc_alloc(bfe_t *bfe, bfe_ring_t *r, int d)
2676 int err, i, fail = 0;
2677 caddr_t ring;
2678 size_t size_krnl = 0, size_dma = 0, ring_len = 0;
2679 ddi_dma_cookie_t cookie;
2680 uint_t count = 0;
2682 ASSERT(bfe != NULL);
2684 size_krnl = r->r_ndesc * sizeof (bfe_dma_t);
2685 size_dma = r->r_ndesc * sizeof (bfe_desc_t);
2686 r->r_buf_dma = kmem_zalloc(size_krnl, KM_SLEEP);
2689 err = ddi_dma_alloc_handle(bfe->bfe_dip, &bfe_dma_attr_desc,
2690 DDI_DMA_SLEEP, NULL, &r->r_desc_dma_handle);
2692 if (err != DDI_SUCCESS) {
2693 bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on"
2694 " ddi_dma_alloc_handle()");
2695 kmem_free(r->r_buf_dma, size_krnl);
2696 return (DDI_FAILURE);
2700 err = ddi_dma_mem_alloc(r->r_desc_dma_handle,
2701 size_dma, &bfe_buf_attr,
2702 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
2703 &ring, &ring_len, &r->r_desc_acc_handle);
2705 if (err != DDI_SUCCESS) {
2706 bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on"
2707 " ddi_dma_mem_alloc()");
2708 ddi_dma_free_handle(&r->r_desc_dma_handle);
2709 kmem_free(r->r_buf_dma, size_krnl);
2710 return (DDI_FAILURE);
2713 err = ddi_dma_addr_bind_handle(r->r_desc_dma_handle,
2714 NULL, ring, ring_len,
2715 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2716 DDI_DMA_SLEEP, NULL,
2717 &cookie, &count);
2719 if (err != DDI_SUCCESS) {
2720 bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on"
2721 " ddi_dma_addr_bind_handle()");
2722 ddi_dma_mem_free(&r->r_desc_acc_handle);
2723 ddi_dma_free_handle(&r->r_desc_dma_handle);
2724 kmem_free(r->r_buf_dma, size_krnl);
2725 return (DDI_FAILURE);
2729 * We don't want to have multiple cookies. Descriptor should be
2730 * aligned to PAGESIZE boundary.
2732 ASSERT(count == 1);
2734 /* The actual descriptor for the ring */
2735 r->r_desc_len = ring_len;
2736 r->r_desc_cookie = cookie;
2738 r->r_desc = (void *)ring;
2740 bzero(r->r_desc, size_dma);
2741 bzero(r->r_desc, ring_len);
2743 /* For each descriptor, allocate a DMA buffer */
2744 fail = 0;
2745 for (i = 0; i < r->r_ndesc; i++) {
2746 if (bfe_ring_buf_alloc(bfe, r, i, d) != DDI_SUCCESS) {
2747 i--;
2748 fail = 1;
2749 break;
2753 if (fail) {
2754 while (i-- >= 0) {
2755 bfe_ring_buf_free(r, i);
2758 /* We don't need the descriptor anymore */
2759 bfe_ring_desc_free(r);
2760 return (DDI_FAILURE);
2763 return (DDI_SUCCESS);
2766 static int
2767 bfe_rings_alloc(bfe_t *bfe)
2769 /* TX */
2770 mutex_init(&bfe->bfe_tx_ring.r_lock, NULL, MUTEX_DRIVER, NULL);
2771 bfe->bfe_tx_ring.r_lockp = &bfe->bfe_tx_ring.r_lock;
2772 bfe->bfe_tx_ring.r_buf_len = BFE_MTU + sizeof (struct ether_header) +
2773 VLAN_TAGSZ + ETHERFCSL;
2774 bfe->bfe_tx_ring.r_ndesc = TX_NUM_DESC;
2775 bfe->bfe_tx_ring.r_bfe = bfe;
2776 bfe->bfe_tx_ring.r_avail_desc = TX_NUM_DESC;
2778 /* RX */
2779 mutex_init(&bfe->bfe_rx_ring.r_lock, NULL, MUTEX_DRIVER, NULL);
2780 bfe->bfe_rx_ring.r_lockp = &bfe->bfe_rx_ring.r_lock;
2781 bfe->bfe_rx_ring.r_buf_len = BFE_MTU + sizeof (struct ether_header) +
2782 VLAN_TAGSZ + ETHERFCSL + RX_HEAD_ROOM;
2783 bfe->bfe_rx_ring.r_ndesc = RX_NUM_DESC;
2784 bfe->bfe_rx_ring.r_bfe = bfe;
2785 bfe->bfe_rx_ring.r_avail_desc = RX_NUM_DESC;
2787 /* Allocate TX Ring */
2788 if (bfe_ring_desc_alloc(bfe, &bfe->bfe_tx_ring,
2789 DDI_DMA_WRITE) != DDI_SUCCESS)
2790 return (DDI_FAILURE);
2792 /* Allocate RX Ring */
2793 if (bfe_ring_desc_alloc(bfe, &bfe->bfe_rx_ring,
2794 DDI_DMA_READ) != DDI_SUCCESS) {
2795 cmn_err(CE_NOTE, "RX ring allocation failed");
2796 bfe_ring_desc_free(&bfe->bfe_tx_ring);
2797 return (DDI_FAILURE);
2800 bfe->bfe_tx_ring.r_flags = BFE_RING_ALLOCATED;
2801 bfe->bfe_rx_ring.r_flags = BFE_RING_ALLOCATED;
2803 return (DDI_SUCCESS);
2806 static int
2807 bfe_resume(dev_info_t *dip)
2809 bfe_t *bfe;
2810 int err = DDI_SUCCESS;
2812 if ((bfe = ddi_get_driver_private(dip)) == NULL) {
2813 bfe_error(dip, "Unexpected error (no driver private data)"
2814 " while resume");
2815 return (DDI_FAILURE);
2819 * Grab all the locks first.
2821 bfe_grab_locks(bfe);
2822 bfe->bfe_chip_state = BFE_CHIP_RESUME;
2824 bfe_init_vars(bfe);
2825 /* PHY will also start running */
2826 bfe_chip_reset(bfe);
2827 if (bfe_chip_start(bfe) == DDI_FAILURE) {
2828 bfe_error(dip, "Could not resume chip");
2829 err = DDI_FAILURE;
2832 bfe_release_locks(bfe);
2834 if (err == DDI_SUCCESS)
2835 mac_tx_update(bfe->bfe_machdl);
2837 return (err);
2840 static int
2841 bfe_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2843 int unit;
2844 bfe_t *bfe;
2845 mac_register_t *macreg;
2846 int ret;
2848 switch (cmd) {
2849 case DDI_RESUME:
2850 return (bfe_resume(dip));
2852 case DDI_ATTACH:
2853 break;
2855 default:
2856 return (DDI_FAILURE);
2860 unit = ddi_get_instance(dip);
2862 bfe = kmem_zalloc(sizeof (bfe_t), KM_SLEEP);
2863 bfe->bfe_dip = dip;
2864 bfe->bfe_unit = unit;
2866 if (pci_config_setup(dip, &bfe->bfe_conf_handle) != DDI_SUCCESS) {
2867 bfe_error(dip, "pci_config_setup failed");
2868 goto fail0;
2872 * Enable IO space, Bus Master and Memory Space accessess.
2874 ret = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_COMM);
2875 pci_config_put16(bfe->bfe_conf_handle, PCI_CONF_COMM,
2876 PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME | ret);
2878 ddi_set_driver_private(dip, bfe);
2880 /* Identify hardware */
2881 if (bfe_identify_hardware(bfe) == BFE_FAILURE) {
2882 bfe_error(dip, "Could not identify device");
2883 goto fail1;
2886 if (bfe_regs_map(bfe) != DDI_SUCCESS) {
2887 bfe_error(dip, "Could not map device registers");
2888 goto fail1;
2891 (void) bfe_get_chip_config(bfe);
2894 * Register with MAC layer
2896 if ((macreg = mac_alloc(MAC_VERSION)) == NULL) {
2897 bfe_error(dip, "mac_alloc() failed");
2898 goto fail2;
2901 macreg->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2902 macreg->m_driver = bfe;
2903 macreg->m_dip = dip;
2904 macreg->m_instance = unit;
2905 macreg->m_src_addr = bfe->bfe_ether_addr;
2906 macreg->m_callbacks = &bfe_mac_callbacks;
2907 macreg->m_min_sdu = 0;
2908 macreg->m_max_sdu = ETHERMTU;
2909 macreg->m_margin = VLAN_TAGSZ;
2911 if ((ret = mac_register(macreg, &bfe->bfe_machdl)) != 0) {
2912 bfe_error(dip, "mac_register() failed with %d error", ret);
2913 mac_free(macreg);
2914 goto fail2;
2917 mac_free(macreg);
2919 rw_init(&bfe->bfe_rwlock, NULL, RW_DRIVER,
2920 DDI_INTR_PRI(bfe->bfe_intrpri));
2922 if (bfe_add_intr(bfe) != DDI_SUCCESS) {
2923 bfe_error(dip, "Could not add interrupt");
2924 goto fail3;
2927 if (bfe_rings_alloc(bfe) != DDI_SUCCESS) {
2928 bfe_error(dip, "Could not allocate TX/RX Ring");
2929 goto fail4;
2932 /* Init and then reset the chip */
2933 bfe->bfe_chip_action = 0;
2934 bfe_init_vars(bfe);
2936 /* PHY will also start running */
2937 bfe_chip_reset(bfe);
2940 * Even though we enable the interrupts here but chip's interrupt
2941 * is not enabled yet. It will be enabled once we plumb the interface.
2943 if (ddi_intr_enable(bfe->bfe_intrhdl) != DDI_SUCCESS) {
2944 bfe_error(dip, "Could not enable interrupt");
2945 goto fail4;
2948 return (DDI_SUCCESS);
2950 fail4:
2951 bfe_remove_intr(bfe);
2952 fail3:
2953 (void) mac_unregister(bfe->bfe_machdl);
2954 fail2:
2955 bfe_unmap_regs(bfe);
2956 fail1:
2957 pci_config_teardown(&bfe->bfe_conf_handle);
2958 fail0:
2959 kmem_free(bfe, sizeof (bfe_t));
2960 return (DDI_FAILURE);
2963 static int
2964 bfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
2966 bfe_t *bfe;
2968 bfe = ddi_get_driver_private(devinfo);
2970 switch (cmd) {
2971 case DDI_DETACH:
2973 * We need to stop the timer before grabbing locks otherwise
2974 * we can land-up in deadlock with untimeout.
2976 bfe_stop_timer(bfe);
2979 * First unregister with MAC layer before stopping DMA
2980 * engine.
2982 if (mac_unregister(bfe->bfe_machdl) != DDI_SUCCESS)
2983 return (DDI_FAILURE);
2985 bfe->bfe_machdl = NULL;
2988 * Quiesce the chip first.
2990 bfe_grab_locks(bfe);
2991 bfe_chip_halt(bfe);
2992 bfe_stop_phy(bfe);
2993 bfe_release_locks(bfe);
2995 (void) ddi_intr_disable(bfe->bfe_intrhdl);
2997 /* Make sure timer is gone. */
2998 bfe_stop_timer(bfe);
3001 * Free the DMA resources for buffer and then descriptors
3003 if (bfe->bfe_tx_ring.r_flags == BFE_RING_ALLOCATED) {
3004 /* TX */
3005 bfe_buffer_free(&bfe->bfe_tx_ring);
3006 bfe_ring_desc_free(&bfe->bfe_tx_ring);
3009 if (bfe->bfe_rx_ring.r_flags == BFE_RING_ALLOCATED) {
3010 /* RX */
3011 bfe_buffer_free(&bfe->bfe_rx_ring);
3012 bfe_ring_desc_free(&bfe->bfe_rx_ring);
3015 bfe_remove_intr(bfe);
3016 bfe_unmap_regs(bfe);
3017 pci_config_teardown(&bfe->bfe_conf_handle);
3019 mutex_destroy(&bfe->bfe_tx_ring.r_lock);
3020 mutex_destroy(&bfe->bfe_rx_ring.r_lock);
3021 rw_destroy(&bfe->bfe_rwlock);
3023 kmem_free(bfe, sizeof (bfe_t));
3025 ddi_set_driver_private(devinfo, NULL);
3026 return (DDI_SUCCESS);
3028 case DDI_SUSPEND:
3030 * We need to stop the timer before grabbing locks otherwise
3031 * we can land-up in deadlock with untimeout.
3033 bfe_stop_timer(bfe);
3036 * Grab all the locks first.
3038 bfe_grab_locks(bfe);
3039 bfe_chip_halt(bfe);
3040 bfe_stop_phy(bfe);
3041 bfe->bfe_chip_state = BFE_CHIP_SUSPENDED;
3042 bfe_release_locks(bfe);
3044 return (DDI_SUCCESS);
3046 default:
3047 return (DDI_FAILURE);
3052 * Quiesce the card for fast reboot
3055 bfe_quiesce(dev_info_t *dev_info)
3057 bfe_t *bfe;
3059 bfe = ddi_get_driver_private(dev_info);
3061 bfe_chip_halt(bfe);
3062 bfe_stop_phy(bfe);
3063 bfe->bfe_chip_state = BFE_CHIP_QUIESCED;
3065 return (DDI_SUCCESS);
3068 static struct cb_ops bfe_cb_ops = {
3069 nulldev, /* cb_open */
3070 nulldev, /* cb_close */
3071 nodev, /* cb_strategy */
3072 nodev, /* cb_print */
3073 nodev, /* cb_dump */
3074 nodev, /* cb_read */
3075 nodev, /* cb_write */
3076 nodev, /* cb_ioctl */
3077 nodev, /* cb_devmap */
3078 nodev, /* cb_mmap */
3079 nodev, /* cb_segmap */
3080 nochpoll, /* cb_chpoll */
3081 ddi_prop_op, /* cb_prop_op */
3082 NULL, /* cb_stream */
3083 D_MP | D_HOTPLUG, /* cb_flag */
3084 CB_REV, /* cb_rev */
3085 nodev, /* cb_aread */
3086 nodev /* cb_awrite */
3089 static struct dev_ops bfe_dev_ops = {
3090 DEVO_REV, /* devo_rev */
3091 0, /* devo_refcnt */
3092 NULL, /* devo_getinfo */
3093 nulldev, /* devo_identify */
3094 nulldev, /* devo_probe */
3095 bfe_attach, /* devo_attach */
3096 bfe_detach, /* devo_detach */
3097 nodev, /* devo_reset */
3098 &bfe_cb_ops, /* devo_cb_ops */
3099 NULL, /* devo_bus_ops */
3100 ddi_power, /* devo_power */
3101 bfe_quiesce /* devo_quiesce */
3104 static struct modldrv bfe_modldrv = {
3105 &mod_driverops,
3106 bfe_ident,
3107 &bfe_dev_ops
3110 static struct modlinkage modlinkage = {
3111 MODREV_1, (void *)&bfe_modldrv, NULL
3115 _info(struct modinfo *modinfop)
3117 return (mod_info(&modlinkage, modinfop));
3121 _init(void)
3123 int status;
3125 mac_init_ops(&bfe_dev_ops, MODULE_NAME);
3126 status = mod_install(&modlinkage);
3127 if (status == DDI_FAILURE)
3128 mac_fini_ops(&bfe_dev_ops);
3129 return (status);
3133 _fini(void)
3135 int status;
3137 status = mod_remove(&modlinkage);
3138 if (status == 0) {
3139 mac_fini_ops(&bfe_dev_ops);
3141 return (status);