4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
28 #define REG32(rgep, reg) ((uint32_t *)(rgep->io_regs+(reg)))
29 #define REG16(rgep, reg) ((uint16_t *)(rgep->io_regs+(reg)))
30 #define REG8(rgep, reg) ((uint8_t *)(rgep->io_regs+(reg)))
31 #define PIO_ADDR(rgep, offset) ((void *)(rgep->io_regs+(offset)))
37 * Enables/disables automatic recovery after fault detection
39 static uint32_t rge_autorecover
= 1;
44 #define RGE_DBG RGE_DBG_REGS /* debug flag for this code */
45 static uint32_t rge_watchdog_count
= 1 << 5;
46 static uint32_t rge_rx_watchdog_count
= 1 << 3;
49 * Operating register get/set access routines
52 static uint32_t rge_reg_get32(rge_t
*rgep
, uintptr_t regno
);
53 #pragma inline(rge_reg_get32)
56 rge_reg_get32(rge_t
*rgep
, uintptr_t regno
)
58 RGE_TRACE(("rge_reg_get32($%p, 0x%lx)",
59 (void *)rgep
, regno
));
61 return (ddi_get32(rgep
->io_handle
, REG32(rgep
, regno
)));
64 static void rge_reg_put32(rge_t
*rgep
, uintptr_t regno
, uint32_t data
);
65 #pragma inline(rge_reg_put32)
68 rge_reg_put32(rge_t
*rgep
, uintptr_t regno
, uint32_t data
)
70 RGE_TRACE(("rge_reg_put32($%p, 0x%lx, 0x%x)",
71 (void *)rgep
, regno
, data
));
73 ddi_put32(rgep
->io_handle
, REG32(rgep
, regno
), data
);
76 static void rge_reg_set32(rge_t
*rgep
, uintptr_t regno
, uint32_t bits
);
77 #pragma inline(rge_reg_set32)
80 rge_reg_set32(rge_t
*rgep
, uintptr_t regno
, uint32_t bits
)
84 RGE_TRACE(("rge_reg_set32($%p, 0x%lx, 0x%x)",
85 (void *)rgep
, regno
, bits
));
87 regval
= rge_reg_get32(rgep
, regno
);
89 rge_reg_put32(rgep
, regno
, regval
);
92 static void rge_reg_clr32(rge_t
*rgep
, uintptr_t regno
, uint32_t bits
);
93 #pragma inline(rge_reg_clr32)
96 rge_reg_clr32(rge_t
*rgep
, uintptr_t regno
, uint32_t bits
)
100 RGE_TRACE(("rge_reg_clr32($%p, 0x%lx, 0x%x)",
101 (void *)rgep
, regno
, bits
));
103 regval
= rge_reg_get32(rgep
, regno
);
105 rge_reg_put32(rgep
, regno
, regval
);
108 static uint16_t rge_reg_get16(rge_t
*rgep
, uintptr_t regno
);
109 #pragma inline(rge_reg_get16)
112 rge_reg_get16(rge_t
*rgep
, uintptr_t regno
)
114 RGE_TRACE(("rge_reg_get16($%p, 0x%lx)",
115 (void *)rgep
, regno
));
117 return (ddi_get16(rgep
->io_handle
, REG16(rgep
, regno
)));
120 static void rge_reg_put16(rge_t
*rgep
, uintptr_t regno
, uint16_t data
);
121 #pragma inline(rge_reg_put16)
124 rge_reg_put16(rge_t
*rgep
, uintptr_t regno
, uint16_t data
)
126 RGE_TRACE(("rge_reg_put16($%p, 0x%lx, 0x%x)",
127 (void *)rgep
, regno
, data
));
129 ddi_put16(rgep
->io_handle
, REG16(rgep
, regno
), data
);
132 static uint8_t rge_reg_get8(rge_t
*rgep
, uintptr_t regno
);
133 #pragma inline(rge_reg_get8)
136 rge_reg_get8(rge_t
*rgep
, uintptr_t regno
)
138 RGE_TRACE(("rge_reg_get8($%p, 0x%lx)",
139 (void *)rgep
, regno
));
141 return (ddi_get8(rgep
->io_handle
, REG8(rgep
, regno
)));
144 static void rge_reg_put8(rge_t
*rgep
, uintptr_t regno
, uint8_t data
);
145 #pragma inline(rge_reg_put8)
148 rge_reg_put8(rge_t
*rgep
, uintptr_t regno
, uint8_t data
)
150 RGE_TRACE(("rge_reg_put8($%p, 0x%lx, 0x%x)",
151 (void *)rgep
, regno
, data
));
153 ddi_put8(rgep
->io_handle
, REG8(rgep
, regno
), data
);
156 static void rge_reg_set8(rge_t
*rgep
, uintptr_t regno
, uint8_t bits
);
157 #pragma inline(rge_reg_set8)
160 rge_reg_set8(rge_t
*rgep
, uintptr_t regno
, uint8_t bits
)
164 RGE_TRACE(("rge_reg_set8($%p, 0x%lx, 0x%x)",
165 (void *)rgep
, regno
, bits
));
167 regval
= rge_reg_get8(rgep
, regno
);
169 rge_reg_put8(rgep
, regno
, regval
);
172 static void rge_reg_clr8(rge_t
*rgep
, uintptr_t regno
, uint8_t bits
);
173 #pragma inline(rge_reg_clr8)
176 rge_reg_clr8(rge_t
*rgep
, uintptr_t regno
, uint8_t bits
)
180 RGE_TRACE(("rge_reg_clr8($%p, 0x%lx, 0x%x)",
181 (void *)rgep
, regno
, bits
));
183 regval
= rge_reg_get8(rgep
, regno
);
185 rge_reg_put8(rgep
, regno
, regval
);
188 uint16_t rge_mii_get16(rge_t
*rgep
, uintptr_t mii
);
189 #pragma no_inline(rge_mii_get16)
192 rge_mii_get16(rge_t
*rgep
, uintptr_t mii
)
198 regval
= (mii
& PHY_REG_MASK
) << PHY_REG_SHIFT
;
199 rge_reg_put32(rgep
, PHY_ACCESS_REG
, regval
);
202 * Waiting for PHY reading OK
204 for (i
= 0; i
< PHY_RESET_LOOP
; i
++) {
206 val32
= rge_reg_get32(rgep
, PHY_ACCESS_REG
);
207 if (val32
& PHY_ACCESS_WR_FLAG
)
208 return ((uint16_t)(val32
& 0xffff));
211 RGE_REPORT((rgep
, "rge_mii_get16(0x%x) fail, val = %x", mii
, val32
));
212 return ((uint16_t)~0u);
215 void rge_mii_put16(rge_t
*rgep
, uintptr_t mii
, uint16_t data
);
216 #pragma no_inline(rge_mii_put16)
219 rge_mii_put16(rge_t
*rgep
, uintptr_t mii
, uint16_t data
)
225 regval
= (mii
& PHY_REG_MASK
) << PHY_REG_SHIFT
;
226 regval
|= data
& PHY_DATA_MASK
;
227 regval
|= PHY_ACCESS_WR_FLAG
;
228 rge_reg_put32(rgep
, PHY_ACCESS_REG
, regval
);
231 * Waiting for PHY writing OK
233 for (i
= 0; i
< PHY_RESET_LOOP
; i
++) {
235 val32
= rge_reg_get32(rgep
, PHY_ACCESS_REG
);
236 if (!(val32
& PHY_ACCESS_WR_FLAG
))
239 RGE_REPORT((rgep
, "rge_mii_put16(0x%lx, 0x%x) fail",
243 void rge_ephy_put16(rge_t
*rgep
, uintptr_t emii
, uint16_t data
);
244 #pragma no_inline(rge_ephy_put16)
247 rge_ephy_put16(rge_t
*rgep
, uintptr_t emii
, uint16_t data
)
253 regval
= (emii
& EPHY_REG_MASK
) << EPHY_REG_SHIFT
;
254 regval
|= data
& EPHY_DATA_MASK
;
255 regval
|= EPHY_ACCESS_WR_FLAG
;
256 rge_reg_put32(rgep
, EPHY_ACCESS_REG
, regval
);
259 * Waiting for PHY writing OK
261 for (i
= 0; i
< PHY_RESET_LOOP
; i
++) {
263 val32
= rge_reg_get32(rgep
, EPHY_ACCESS_REG
);
264 if (!(val32
& EPHY_ACCESS_WR_FLAG
))
267 RGE_REPORT((rgep
, "rge_ephy_put16(0x%lx, 0x%x) fail",
272 * Atomically shift a 32-bit word left, returning
273 * the value it had *before* the shift was applied
275 static uint32_t rge_atomic_shl32(uint32_t *sp
, uint_t count
);
276 #pragma inline(rge_mii_put16)
279 rge_atomic_shl32(uint32_t *sp
, uint_t count
)
287 newval
= oldval
<< count
;
288 } while (atomic_cas_32(sp
, oldval
, newval
) != oldval
);
294 * PHY operation routines
299 rge_phydump(rge_t
*rgep
)
304 ASSERT(mutex_owned(rgep
->genlock
));
306 for (i
= 0; i
< 32; ++i
) {
307 regs
[i
] = rge_mii_get16(rgep
, i
);
310 for (i
= 0; i
< 32; i
+= 8)
311 RGE_DEBUG(("rge_phydump: "
312 "0x%04x %04x %04x %04x %04x %04x %04x %04x",
313 regs
[i
+0], regs
[i
+1], regs
[i
+2], regs
[i
+3],
314 regs
[i
+4], regs
[i
+5], regs
[i
+6], regs
[i
+7]));
317 #endif /* RGE_DEBUGGING */
320 rge_phy_check(rge_t
*rgep
)
324 if (rgep
->param_link_up
== LINK_STATE_DOWN
) {
326 * RTL8169S/8110S PHY has the "PCS bug". Need reset PHY
327 * every 15 seconds whin link down & advertise is 1000.
329 if (rgep
->chipid
.phy_ver
== PHY_VER_S
) {
330 gig_ctl
= rge_mii_get16(rgep
, MII_1000BASE_T_CONTROL
);
331 if (gig_ctl
& MII_1000BT_CTL_ADV_FDX
) {
332 rgep
->link_down_count
++;
333 if (rgep
->link_down_count
> 15) {
334 (void) rge_phy_reset(rgep
);
335 rgep
->stats
.phy_reset
++;
336 rgep
->link_down_count
= 0;
341 rgep
->link_down_count
= 0;
346 * Basic low-level function to reset the PHY.
347 * Doesn't incorporate any special-case workarounds.
349 * Returns TRUE on success, FALSE if the RESET bit doesn't clear
352 rge_phy_reset(rge_t
*rgep
)
358 * Set the PHY RESET bit, then wait up to 5 ms for it to self-clear
360 control
= rge_mii_get16(rgep
, MII_CONTROL
);
361 rge_mii_put16(rgep
, MII_CONTROL
, control
| MII_CONTROL_RESET
);
362 for (count
= 0; count
< 5; count
++) {
364 control
= rge_mii_get16(rgep
, MII_CONTROL
);
365 if (BIC(control
, MII_CONTROL_RESET
))
369 RGE_REPORT((rgep
, "rge_phy_reset: FAILED, control now 0x%x", control
));
374 * Synchronise the PHY's speed/duplex/autonegotiation capabilities
375 * and advertisements with the required settings as specified by the various
376 * param_* variables that can be poked via the NDD interface.
378 * We always reset the PHY and reprogram *all* the relevant registers,
379 * not just those changed. This should cause the link to go down, and then
380 * back up again once the link is stable and autonegotiation (if enabled)
381 * is complete. We should get a link state change interrupt somewhere along
384 * NOTE: <genlock> must already be held by the caller
387 rge_phy_update(rge_t
*rgep
)
389 boolean_t adv_autoneg
;
391 boolean_t adv_asym_pause
;
392 boolean_t adv_1000fdx
;
393 boolean_t adv_1000hdx
;
394 boolean_t adv_100fdx
;
395 boolean_t adv_100hdx
;
403 ASSERT(mutex_owned(rgep
->genlock
));
405 RGE_DEBUG(("rge_phy_update: autoneg %d "
406 "pause %d asym_pause %d "
407 "1000fdx %d 1000hdx %d "
408 "100fdx %d 100hdx %d "
409 "10fdx %d 10hdx %d ",
410 rgep
->param_adv_autoneg
,
411 rgep
->param_adv_pause
, rgep
->param_adv_asym_pause
,
412 rgep
->param_adv_1000fdx
, rgep
->param_adv_1000hdx
,
413 rgep
->param_adv_100fdx
, rgep
->param_adv_100hdx
,
414 rgep
->param_adv_10fdx
, rgep
->param_adv_10hdx
));
416 control
= gigctrl
= anar
= 0;
419 * PHY settings are normally based on the param_* variables,
420 * but if any loopback mode is in effect, that takes precedence.
422 * RGE supports MAC-internal loopback, PHY-internal loopback,
423 * and External loopback at a variety of speeds (with a special
424 * cable). In all cases, autoneg is turned OFF, full-duplex
425 * is turned ON, and the speed/mastership is forced.
427 switch (rgep
->param_loop_mode
) {
430 adv_autoneg
= rgep
->param_adv_autoneg
;
431 adv_pause
= rgep
->param_adv_pause
;
432 adv_asym_pause
= rgep
->param_adv_asym_pause
;
433 adv_1000fdx
= rgep
->param_adv_1000fdx
;
434 adv_1000hdx
= rgep
->param_adv_1000hdx
;
435 adv_100fdx
= rgep
->param_adv_100fdx
;
436 adv_100hdx
= rgep
->param_adv_100hdx
;
437 adv_10fdx
= rgep
->param_adv_10fdx
;
438 adv_10hdx
= rgep
->param_adv_10hdx
;
441 case RGE_LOOP_INTERNAL_PHY
:
442 case RGE_LOOP_INTERNAL_MAC
:
443 adv_autoneg
= adv_pause
= adv_asym_pause
= B_FALSE
;
444 adv_1000fdx
= adv_100fdx
= adv_10fdx
= B_FALSE
;
445 adv_1000hdx
= adv_100hdx
= adv_10hdx
= B_FALSE
;
446 rgep
->param_link_duplex
= LINK_DUPLEX_FULL
;
448 switch (rgep
->param_loop_mode
) {
449 case RGE_LOOP_INTERNAL_PHY
:
450 if (rgep
->chipid
.mac_ver
!= MAC_VER_8101E
) {
451 rgep
->param_link_speed
= 1000;
452 adv_1000fdx
= B_TRUE
;
454 rgep
->param_link_speed
= 100;
457 control
= MII_CONTROL_LOOPBACK
;
460 case RGE_LOOP_INTERNAL_MAC
:
461 if (rgep
->chipid
.mac_ver
!= MAC_VER_8101E
) {
462 rgep
->param_link_speed
= 1000;
463 adv_1000fdx
= B_TRUE
;
465 rgep
->param_link_speed
= 100;
471 RGE_DEBUG(("rge_phy_update: autoneg %d "
472 "pause %d asym_pause %d "
473 "1000fdx %d 1000hdx %d "
474 "100fdx %d 100hdx %d "
475 "10fdx %d 10hdx %d ",
477 adv_pause
, adv_asym_pause
,
478 adv_1000fdx
, adv_1000hdx
,
479 adv_100fdx
, adv_100hdx
,
480 adv_10fdx
, adv_10hdx
));
483 * We should have at least one technology capability set;
484 * if not, we select a default of 1000Mb/s full-duplex
486 if (!adv_1000fdx
&& !adv_100fdx
&& !adv_10fdx
&&
487 !adv_1000hdx
&& !adv_100hdx
&& !adv_10hdx
) {
488 if (rgep
->chipid
.mac_ver
!= MAC_VER_8101E
)
489 adv_1000fdx
= B_TRUE
;
491 adv_1000fdx
= B_FALSE
;
497 * Now transform the adv_* variables into the proper settings
498 * of the PHY registers ...
500 * If autonegotiation is (now) enabled, we want to trigger
501 * a new autonegotiation cycle once the PHY has been
502 * programmed with the capabilities to be advertised.
504 * RTL8169/8110 doesn't support 1000Mb/s half-duplex.
507 control
|= MII_CONTROL_ANE
|MII_CONTROL_RSAN
;
510 control
|= MII_CONTROL_1GB
|MII_CONTROL_FDUPLEX
;
511 else if (adv_1000hdx
)
512 control
|= MII_CONTROL_1GB
;
514 control
|= MII_CONTROL_100MB
|MII_CONTROL_FDUPLEX
;
516 control
|= MII_CONTROL_100MB
;
518 control
|= MII_CONTROL_FDUPLEX
;
522 { _NOTE(EMPTY
); } /* Can't get here anyway ... */
525 gigctrl
|= MII_1000BT_CTL_ADV_FDX
;
527 * Chipset limitation: need set other capabilities to true
529 if (rgep
->chipid
.is_pcie
)
530 adv_1000hdx
= B_TRUE
;
538 gigctrl
|= MII_1000BT_CTL_ADV_HDX
;
541 anar
|= MII_ABILITY_100BASE_TX_FD
;
543 anar
|= MII_ABILITY_100BASE_TX
;
545 anar
|= MII_ABILITY_10BASE_T_FD
;
547 anar
|= MII_ABILITY_10BASE_T
;
550 anar
|= MII_ABILITY_PAUSE
;
552 anar
|= MII_ABILITY_ASMPAUSE
;
555 * Munge in any other fixed bits we require ...
557 anar
|= MII_AN_SELECTOR_8023
;
560 * Restart the PHY and write the new values. Note the
561 * time, so that we can say whether subsequent link state
562 * changes can be attributed to our reprogramming the PHY
565 if (rgep
->chipid
.mac_ver
== MAC_VER_8168B_B
||
566 rgep
->chipid
.mac_ver
== MAC_VER_8168B_C
) {
567 /* power up PHY for RTL8168B chipset */
568 rge_mii_put16(rgep
, PHY_1F_REG
, 0x0000);
569 rge_mii_put16(rgep
, PHY_0E_REG
, 0x0000);
570 rge_mii_put16(rgep
, PHY_1F_REG
, 0x0000);
572 rge_mii_put16(rgep
, MII_AN_ADVERT
, anar
);
573 rge_mii_put16(rgep
, MII_1000BASE_T_CONTROL
, gigctrl
);
574 rge_mii_put16(rgep
, MII_CONTROL
, control
);
576 RGE_DEBUG(("rge_phy_update: anar <- 0x%x", anar
));
577 RGE_DEBUG(("rge_phy_update: control <- 0x%x", control
));
578 RGE_DEBUG(("rge_phy_update: gigctrl <- 0x%x", gigctrl
));
581 void rge_phy_init(rge_t
*rgep
);
582 #pragma no_inline(rge_phy_init)
585 rge_phy_init(rge_t
*rgep
)
587 rgep
->phy_mii_addr
= 1;
590 * Below phy config steps are copied from the Programming Guide
591 * (there's no detail comments for these steps.)
593 switch (rgep
->chipid
.mac_ver
) {
594 case MAC_VER_8169S_D
:
595 case MAC_VER_8169S_E
:
596 rge_mii_put16(rgep
, PHY_1F_REG
, 0x0001);
597 rge_mii_put16(rgep
, PHY_15_REG
, 0x1000);
598 rge_mii_put16(rgep
, PHY_18_REG
, 0x65c7);
599 rge_mii_put16(rgep
, PHY_ANAR_REG
, 0x0000);
600 rge_mii_put16(rgep
, PHY_ID_REG_2
, 0x00a1);
601 rge_mii_put16(rgep
, PHY_ID_REG_1
, 0x0008);
602 rge_mii_put16(rgep
, PHY_BMSR_REG
, 0x1020);
603 rge_mii_put16(rgep
, PHY_BMCR_REG
, 0x1000);
604 rge_mii_put16(rgep
, PHY_ANAR_REG
, 0x0800);
605 rge_mii_put16(rgep
, PHY_ANAR_REG
, 0x0000);
606 rge_mii_put16(rgep
, PHY_ANAR_REG
, 0x7000);
607 rge_mii_put16(rgep
, PHY_ID_REG_2
, 0xff41);
608 rge_mii_put16(rgep
, PHY_ID_REG_1
, 0xde60);
609 rge_mii_put16(rgep
, PHY_BMSR_REG
, 0x0140);
610 rge_mii_put16(rgep
, PHY_BMCR_REG
, 0x0077);
611 rge_mii_put16(rgep
, PHY_ANAR_REG
, 0x7800);
612 rge_mii_put16(rgep
, PHY_ANAR_REG
, 0x7000);
613 rge_mii_put16(rgep
, PHY_ANAR_REG
, 0xa000);
614 rge_mii_put16(rgep
, PHY_ID_REG_2
, 0xdf01);
615 rge_mii_put16(rgep
, PHY_ID_REG_1
, 0xdf20);
616 rge_mii_put16(rgep
, PHY_BMSR_REG
, 0xff95);
617 rge_mii_put16(rgep
, PHY_BMCR_REG
, 0xfa00);
618 rge_mii_put16(rgep
, PHY_ANAR_REG
, 0xa800);
619 rge_mii_put16(rgep
, PHY_ANAR_REG
, 0xa000);
620 rge_mii_put16(rgep
, PHY_ANAR_REG
, 0xb000);
621 rge_mii_put16(rgep
, PHY_ID_REG_2
, 0xff41);
622 rge_mii_put16(rgep
, PHY_ID_REG_1
, 0xde20);
623 rge_mii_put16(rgep
, PHY_BMSR_REG
, 0x0140);
624 rge_mii_put16(rgep
, PHY_BMCR_REG
, 0x00bb);
625 rge_mii_put16(rgep
, PHY_ANAR_REG
, 0xb800);
626 rge_mii_put16(rgep
, PHY_ANAR_REG
, 0xb000);
627 rge_mii_put16(rgep
, PHY_ANAR_REG
, 0xf000);
628 rge_mii_put16(rgep
, PHY_ID_REG_2
, 0xdf01);
629 rge_mii_put16(rgep
, PHY_ID_REG_1
, 0xdf20);
630 rge_mii_put16(rgep
, PHY_BMSR_REG
, 0xff95);
631 rge_mii_put16(rgep
, PHY_BMCR_REG
, 0xbf00);
632 rge_mii_put16(rgep
, PHY_ANAR_REG
, 0xf800);
633 rge_mii_put16(rgep
, PHY_ANAR_REG
, 0xf000);
634 rge_mii_put16(rgep
, PHY_ANAR_REG
, 0x0000);
635 rge_mii_put16(rgep
, PHY_1F_REG
, 0x0000);
636 rge_mii_put16(rgep
, PHY_0B_REG
, 0x0000);
640 rge_mii_put16(rgep
, PHY_1F_REG
, 0x0001);
641 rge_mii_put16(rgep
, PHY_1B_REG
, 0xD41E);
642 rge_mii_put16(rgep
, PHY_0E_REG
, 0x7bff);
643 rge_mii_put16(rgep
, PHY_GBCR_REG
, GBCR_DEFAULT
);
644 rge_mii_put16(rgep
, PHY_1F_REG
, 0x0002);
645 rge_mii_put16(rgep
, PHY_BMSR_REG
, 0x90D0);
646 rge_mii_put16(rgep
, PHY_1F_REG
, 0x0000);
650 rge_mii_put16(rgep
, PHY_1F_REG
, 0x0001);
651 rge_mii_put16(rgep
, PHY_ANER_REG
, 0x0078);
652 rge_mii_put16(rgep
, PHY_ANNPRR_REG
, 0x05dc);
653 rge_mii_put16(rgep
, PHY_GBCR_REG
, 0x2672);
654 rge_mii_put16(rgep
, PHY_GBSR_REG
, 0x6a14);
655 rge_mii_put16(rgep
, PHY_0B_REG
, 0x7cb0);
656 rge_mii_put16(rgep
, PHY_0C_REG
, 0xdb80);
657 rge_mii_put16(rgep
, PHY_1B_REG
, 0xc414);
658 rge_mii_put16(rgep
, PHY_1C_REG
, 0xef03);
659 rge_mii_put16(rgep
, PHY_1D_REG
, 0x3dc8);
660 rge_mii_put16(rgep
, PHY_1F_REG
, 0x0003);
661 rge_mii_put16(rgep
, PHY_13_REG
, 0x0600);
662 rge_mii_put16(rgep
, PHY_1F_REG
, 0x0000);
666 rge_mii_put16(rgep
, PHY_1F_REG
, 0x0001);
667 rge_mii_put16(rgep
, PHY_ANER_REG
, 0x00aa);
668 rge_mii_put16(rgep
, PHY_ANNPTR_REG
, 0x3173);
669 rge_mii_put16(rgep
, PHY_ANNPRR_REG
, 0x08fc);
670 rge_mii_put16(rgep
, PHY_GBCR_REG
, 0xe2d0);
671 rge_mii_put16(rgep
, PHY_0B_REG
, 0x941a);
672 rge_mii_put16(rgep
, PHY_18_REG
, 0x65fe);
673 rge_mii_put16(rgep
, PHY_1C_REG
, 0x1e02);
674 rge_mii_put16(rgep
, PHY_1F_REG
, 0x0002);
675 rge_mii_put16(rgep
, PHY_ANNPTR_REG
, 0x103e);
676 rge_mii_put16(rgep
, PHY_1F_REG
, 0x0000);
679 case MAC_VER_8168B_B
:
680 case MAC_VER_8168B_C
:
681 rge_mii_put16(rgep
, PHY_1F_REG
, 0x0001);
682 rge_mii_put16(rgep
, PHY_0B_REG
, 0x94b0);
683 rge_mii_put16(rgep
, PHY_1B_REG
, 0xc416);
684 rge_mii_put16(rgep
, PHY_1F_REG
, 0x0003);
685 rge_mii_put16(rgep
, PHY_12_REG
, 0x6096);
686 rge_mii_put16(rgep
, PHY_1F_REG
, 0x0000);
691 void rge_chip_ident(rge_t
*rgep
);
692 #pragma no_inline(rge_chip_ident)
695 rge_chip_ident(rge_t
*rgep
)
697 chip_id_t
*chip
= &rgep
->chipid
;
702 * Read and record MAC version
704 val32
= rge_reg_get32(rgep
, TX_CONFIG_REG
);
705 val32
&= HW_VERSION_ID_0
| HW_VERSION_ID_1
;
706 chip
->mac_ver
= val32
;
707 chip
->is_pcie
= pci_lcap_locate(rgep
->cfg_handle
,
708 PCI_CAP_ID_PCI_E
, &val16
) == DDI_SUCCESS
;
711 * Workaround for 8101E_C
713 chip
->enable_mac_first
= !chip
->is_pcie
;
714 if (chip
->mac_ver
== MAC_VER_8101E_C
) {
715 chip
->is_pcie
= B_FALSE
;
719 * Read and record PHY version
721 val16
= rge_mii_get16(rgep
, PHY_ID_REG_2
);
722 val16
&= PHY_VER_MASK
;
723 chip
->phy_ver
= val16
;
725 /* set pci latency timer */
726 if (chip
->mac_ver
== MAC_VER_8169
||
727 chip
->mac_ver
== MAC_VER_8169S_D
||
728 chip
->mac_ver
== MAC_VER_8169S_E
||
729 chip
->mac_ver
== MAC_VER_8169SC
)
730 pci_config_put8(rgep
->cfg_handle
, PCI_CONF_LATENCY_TIMER
, 0x40);
732 if (chip
->mac_ver
== MAC_VER_8169SC
) {
733 val16
= rge_reg_get16(rgep
, RT_CONFIG_1_REG
);
735 if (val16
== 0x1) /* 66Mhz PCI */
736 rge_reg_put32(rgep
, 0x7c, 0x000700ff);
737 else if (val16
== 0x0) /* 33Mhz PCI */
738 rge_reg_put32(rgep
, 0x7c, 0x0007ff00);
742 * PCIE chipset require the Rx buffer start address must be
743 * 8-byte alignment and the Rx buffer size must be multiple of 8.
744 * We'll just use bcopy in receive procedure for the PCIE chipset.
747 rgep
->chip_flags
|= CHIP_FLAG_FORCE_BCOPY
;
748 if (rgep
->default_mtu
> ETHERMTU
) {
749 rge_notice(rgep
, "Jumbo packets not supported "
750 "for this PCIE chipset");
751 rgep
->default_mtu
= ETHERMTU
;
754 if (rgep
->chip_flags
& CHIP_FLAG_FORCE_BCOPY
)
757 rgep
->head_room
= RGE_HEADROOM
;
760 * Initialize other variables.
762 if (rgep
->default_mtu
< ETHERMTU
|| rgep
->default_mtu
> RGE_JUMBO_MTU
)
763 rgep
->default_mtu
= ETHERMTU
;
764 if (rgep
->default_mtu
> ETHERMTU
) {
765 rgep
->rxbuf_size
= RGE_BUFF_SIZE_JUMBO
;
766 rgep
->txbuf_size
= RGE_BUFF_SIZE_JUMBO
;
767 rgep
->ethmax_size
= RGE_JUMBO_SIZE
;
769 rgep
->rxbuf_size
= RGE_BUFF_SIZE_STD
;
770 rgep
->txbuf_size
= RGE_BUFF_SIZE_STD
;
771 rgep
->ethmax_size
= ETHERMAX
;
773 chip
->rxconfig
= RX_CONFIG_DEFAULT
;
774 chip
->txconfig
= TX_CONFIG_DEFAULT
;
776 /* interval to update statistics for polling mode */
777 rgep
->tick_delta
= drv_usectohz(1000*1000/CLK_TICK
);
779 /* ensure we are not in polling mode */
780 rgep
->curr_tick
= ddi_get_lbolt() - 2*rgep
->tick_delta
;
781 RGE_TRACE(("%s: MAC version = %x, PHY version = %x",
782 rgep
->ifname
, chip
->mac_ver
, chip
->phy_ver
));
786 * Perform first-stage chip (re-)initialisation, using only config-space
789 * + Read the vendor/device/revision/subsystem/cache-line-size registers,
790 * returning the data in the structure pointed to by <idp>.
791 * + Enable Memory Space accesses.
792 * + Enable Bus Mastering according.
794 void rge_chip_cfg_init(rge_t
*rgep
, chip_id_t
*cidp
);
795 #pragma no_inline(rge_chip_cfg_init)
798 rge_chip_cfg_init(rge_t
*rgep
, chip_id_t
*cidp
)
800 ddi_acc_handle_t handle
;
803 handle
= rgep
->cfg_handle
;
806 * Save PCI cache line size and subsystem vendor ID
808 cidp
->command
= pci_config_get16(handle
, PCI_CONF_COMM
);
809 cidp
->vendor
= pci_config_get16(handle
, PCI_CONF_VENID
);
810 cidp
->device
= pci_config_get16(handle
, PCI_CONF_DEVID
);
811 cidp
->subven
= pci_config_get16(handle
, PCI_CONF_SUBVENID
);
812 cidp
->subdev
= pci_config_get16(handle
, PCI_CONF_SUBSYSID
);
813 cidp
->revision
= pci_config_get8(handle
, PCI_CONF_REVID
);
814 cidp
->clsize
= pci_config_get8(handle
, PCI_CONF_CACHE_LINESZ
);
815 cidp
->latency
= pci_config_get8(handle
, PCI_CONF_LATENCY_TIMER
);
818 * Turn on Master Enable (DMA) and IO Enable bits.
819 * Enable PCI Memory Space accesses
821 commd
= cidp
->command
;
822 commd
|= PCI_COMM_ME
| PCI_COMM_MAE
| PCI_COMM_IO
;
823 pci_config_put16(handle
, PCI_CONF_COMM
, commd
);
825 RGE_DEBUG(("rge_chip_cfg_init: vendor 0x%x device 0x%x revision 0x%x",
826 cidp
->vendor
, cidp
->device
, cidp
->revision
));
827 RGE_DEBUG(("rge_chip_cfg_init: subven 0x%x subdev 0x%x",
828 cidp
->subven
, cidp
->subdev
));
829 RGE_DEBUG(("rge_chip_cfg_init: clsize %d latency %d command 0x%x",
830 cidp
->clsize
, cidp
->latency
, cidp
->command
));
833 int rge_chip_reset(rge_t
*rgep
);
834 #pragma no_inline(rge_chip_reset)
837 rge_chip_reset(rge_t
*rgep
)
843 * Chip should be in STOP state
845 rge_reg_clr8(rgep
, RT_COMMAND_REG
,
846 RT_COMMAND_RX_ENABLE
| RT_COMMAND_TX_ENABLE
);
851 rgep
->int_mask
= INT_MASK_NONE
;
852 rge_reg_put16(rgep
, INT_MASK_REG
, rgep
->int_mask
);
855 * Clear pended interrupt
857 rge_reg_put16(rgep
, INT_STATUS_REG
, INT_MASK_ALL
);
862 rge_reg_set8(rgep
, RT_COMMAND_REG
, RT_COMMAND_RESET
);
865 * Wait for reset success
867 for (i
= 0; i
< CHIP_RESET_LOOP
; i
++) {
869 val8
= rge_reg_get8(rgep
, RT_COMMAND_REG
);
870 if (!(val8
& RT_COMMAND_RESET
)) {
871 rgep
->rge_chip_state
= RGE_CHIP_RESET
;
875 RGE_REPORT((rgep
, "rge_chip_reset fail."));
879 void rge_chip_init(rge_t
*rgep
);
880 #pragma no_inline(rge_chip_init)
883 rge_chip_init(rge_t
*rgep
)
888 chip_id_t
*chip
= &rgep
->chipid
;
891 * Increase the threshold voltage of RX sensitivity
893 if (chip
->mac_ver
== MAC_VER_8168B_B
||
894 chip
->mac_ver
== MAC_VER_8168B_C
||
895 chip
->mac_ver
== MAC_VER_8101E
) {
896 rge_ephy_put16(rgep
, 0x01, 0x1bd3);
899 if (chip
->mac_ver
== MAC_VER_8168
||
900 chip
->mac_ver
== MAC_VER_8168B_B
) {
901 val16
= rge_reg_get8(rgep
, PHY_STATUS_REG
);
902 val16
= 0x12<<8 | val16
;
903 rge_reg_put16(rgep
, PHY_STATUS_REG
, val16
);
904 rge_reg_put32(rgep
, RT_CSI_DATA_REG
, 0x00021c01);
905 rge_reg_put32(rgep
, RT_CSI_ACCESS_REG
, 0x8000f088);
906 rge_reg_put32(rgep
, RT_CSI_DATA_REG
, 0x00004000);
907 rge_reg_put32(rgep
, RT_CSI_ACCESS_REG
, 0x8000f0b0);
908 rge_reg_put32(rgep
, RT_CSI_ACCESS_REG
, 0x0000f068);
909 val32
= rge_reg_get32(rgep
, RT_CSI_DATA_REG
);
912 rge_reg_put32(rgep
, RT_CSI_DATA_REG
, val32
);
913 rge_reg_put32(rgep
, RT_CSI_ACCESS_REG
, 0x8000f068);
917 * Config MII register
919 rgep
->param_link_up
= LINK_STATE_DOWN
;
920 rge_phy_update(rgep
);
923 * Enable Rx checksum offload.
924 * Then for vlan support, we must enable receive vlan de-tagging.
925 * Otherwise, there'll be checksum error.
927 val16
= rge_reg_get16(rgep
, CPLUS_COMMAND_REG
);
928 val16
|= RX_CKSM_OFFLOAD
| RX_VLAN_DETAG
;
929 if (chip
->mac_ver
== MAC_VER_8169S_D
) {
930 val16
|= CPLUS_BIT14
| MUL_PCI_RW_ENABLE
;
931 rge_reg_put8(rgep
, RESV_82_REG
, 0x01);
933 if (chip
->mac_ver
== MAC_VER_8169S_E
||
934 chip
->mac_ver
== MAC_VER_8169SC
) {
935 val16
|= MUL_PCI_RW_ENABLE
;
937 rge_reg_put16(rgep
, CPLUS_COMMAND_REG
, val16
& (~0x03));
940 * Start transmit/receive before set tx/rx configuration register
942 if (chip
->enable_mac_first
)
943 rge_reg_set8(rgep
, RT_COMMAND_REG
,
944 RT_COMMAND_RX_ENABLE
| RT_COMMAND_TX_ENABLE
);
947 * Set dump tally counter register
949 val32
= rgep
->dma_area_stats
.cookie
.dmac_laddress
>> 32;
950 rge_reg_put32(rgep
, DUMP_COUNTER_REG_1
, val32
);
951 val32
= rge_reg_get32(rgep
, DUMP_COUNTER_REG_0
);
952 val32
&= DUMP_COUNTER_REG_RESV
;
953 val32
|= rgep
->dma_area_stats
.cookie
.dmac_laddress
;
954 rge_reg_put32(rgep
, DUMP_COUNTER_REG_0
, val32
);
957 * Change to config register write enable mode
959 rge_reg_set8(rgep
, RT_93c46_COMMOND_REG
, RT_93c46_MODE_CONFIG
);
962 * Set Tx/Rx maximum packet size
964 if (rgep
->default_mtu
> ETHERMTU
) {
965 rge_reg_put8(rgep
, TX_MAX_PKTSIZE_REG
, TX_PKTSIZE_JUMBO
);
966 rge_reg_put16(rgep
, RX_MAX_PKTSIZE_REG
, RX_PKTSIZE_JUMBO
);
967 } else if (rgep
->chipid
.mac_ver
!= MAC_VER_8101E
) {
968 rge_reg_put8(rgep
, TX_MAX_PKTSIZE_REG
, TX_PKTSIZE_STD
);
969 rge_reg_put16(rgep
, RX_MAX_PKTSIZE_REG
, RX_PKTSIZE_STD
);
971 rge_reg_put8(rgep
, TX_MAX_PKTSIZE_REG
, TX_PKTSIZE_STD_8101E
);
972 rge_reg_put16(rgep
, RX_MAX_PKTSIZE_REG
, RX_PKTSIZE_STD_8101E
);
976 * Set receive configuration register
978 val32
= rge_reg_get32(rgep
, RX_CONFIG_REG
);
979 val32
&= RX_CONFIG_REG_RESV
;
981 val32
|= RX_ACCEPT_ALL_PKT
;
982 rge_reg_put32(rgep
, RX_CONFIG_REG
, val32
| chip
->rxconfig
);
985 * Set transmit configuration register
987 val32
= rge_reg_get32(rgep
, TX_CONFIG_REG
);
988 val32
&= TX_CONFIG_REG_RESV
;
989 rge_reg_put32(rgep
, TX_CONFIG_REG
, val32
| chip
->txconfig
);
992 * Set Tx/Rx descriptor register
994 val32
= rgep
->tx_desc
.cookie
.dmac_laddress
;
995 rge_reg_put32(rgep
, NORMAL_TX_RING_ADDR_LO_REG
, val32
);
996 val32
= rgep
->tx_desc
.cookie
.dmac_laddress
>> 32;
997 rge_reg_put32(rgep
, NORMAL_TX_RING_ADDR_HI_REG
, val32
);
998 rge_reg_put32(rgep
, HIGH_TX_RING_ADDR_LO_REG
, 0);
999 rge_reg_put32(rgep
, HIGH_TX_RING_ADDR_HI_REG
, 0);
1000 val32
= rgep
->rx_desc
.cookie
.dmac_laddress
;
1001 rge_reg_put32(rgep
, RX_RING_ADDR_LO_REG
, val32
);
1002 val32
= rgep
->rx_desc
.cookie
.dmac_laddress
>> 32;
1003 rge_reg_put32(rgep
, RX_RING_ADDR_HI_REG
, val32
);
1006 * Suggested setting from Realtek
1008 if (rgep
->chipid
.mac_ver
!= MAC_VER_8101E
)
1009 rge_reg_put16(rgep
, RESV_E2_REG
, 0x282a);
1011 rge_reg_put16(rgep
, RESV_E2_REG
, 0x0000);
1014 * Set multicast register
1016 hashp
= (uint32_t *)rgep
->mcast_hash
;
1017 if (rgep
->promisc
) {
1018 rge_reg_put32(rgep
, MULTICAST_0_REG
, ~0U);
1019 rge_reg_put32(rgep
, MULTICAST_4_REG
, ~0U);
1021 rge_reg_put32(rgep
, MULTICAST_0_REG
, RGE_BSWAP_32(hashp
[0]));
1022 rge_reg_put32(rgep
, MULTICAST_4_REG
, RGE_BSWAP_32(hashp
[1]));
1026 * Msic register setting:
1027 * -- Missed packet counter: clear it
1028 * -- TimerInt Register
1029 * -- Timer count register
1031 rge_reg_put32(rgep
, RX_PKT_MISS_COUNT_REG
, 0);
1032 rge_reg_put32(rgep
, TIMER_INT_REG
, TIMER_INT_NONE
);
1033 rge_reg_put32(rgep
, TIMER_COUNT_REG
, 0);
1036 * disable the Unicast Wakeup Frame capability
1038 rge_reg_clr8(rgep
, RT_CONFIG_5_REG
, RT_UNI_WAKE_FRAME
);
1041 * Return to normal network/host communication mode
1043 rge_reg_clr8(rgep
, RT_93c46_COMMOND_REG
, RT_93c46_MODE_CONFIG
);
1048 * rge_chip_start() -- start the chip transmitting and/or receiving,
1049 * including enabling interrupts
1051 void rge_chip_start(rge_t
*rgep
);
1052 #pragma no_inline(rge_chip_start)
1055 rge_chip_start(rge_t
*rgep
)
1060 bzero(&rgep
->stats
, sizeof (rge_stats_t
));
1061 DMA_ZERO(rgep
->dma_area_stats
);
1064 * Start transmit/receive
1066 rge_reg_set8(rgep
, RT_COMMAND_REG
,
1067 RT_COMMAND_RX_ENABLE
| RT_COMMAND_TX_ENABLE
);
1072 rgep
->int_mask
= RGE_INT_MASK
;
1073 if (rgep
->chipid
.is_pcie
) {
1074 rgep
->int_mask
|= NO_TXDESC_INT
;
1076 rgep
->rx_fifo_ovf
= 0;
1077 rgep
->int_mask
|= RX_FIFO_OVERFLOW_INT
;
1078 rge_reg_put16(rgep
, INT_MASK_REG
, rgep
->int_mask
);
1083 rgep
->rge_chip_state
= RGE_CHIP_RUNNING
;
1087 * rge_chip_stop() -- stop board receiving
1089 * Since this function is also invoked by rge_quiesce(), it
1090 * must not block; also, no tracing or logging takes place
1091 * when invoked by rge_quiesce().
1093 void rge_chip_stop(rge_t
*rgep
, boolean_t fault
);
1094 #pragma no_inline(rge_chip_stop)
1097 rge_chip_stop(rge_t
*rgep
, boolean_t fault
)
1102 rgep
->int_mask
= INT_MASK_NONE
;
1103 rge_reg_put16(rgep
, INT_MASK_REG
, rgep
->int_mask
);
1106 * Clear pended interrupt
1108 if (!rgep
->suspended
) {
1109 rge_reg_put16(rgep
, INT_STATUS_REG
, INT_MASK_ALL
);
1113 * Stop the board and disable transmit/receive
1115 rge_reg_clr8(rgep
, RT_COMMAND_REG
,
1116 RT_COMMAND_RX_ENABLE
| RT_COMMAND_TX_ENABLE
);
1119 rgep
->rge_chip_state
= RGE_CHIP_FAULT
;
1121 rgep
->rge_chip_state
= RGE_CHIP_STOPPED
;
1125 * rge_get_mac_addr() -- get the MAC address on NIC
1127 static void rge_get_mac_addr(rge_t
*rgep
);
1128 #pragma inline(rge_get_mac_addr)
1131 rge_get_mac_addr(rge_t
*rgep
)
1133 uint8_t *macaddr
= rgep
->netaddr
;
1137 * Read first 4-byte of mac address
1139 val32
= rge_reg_get32(rgep
, ID_0_REG
);
1140 macaddr
[0] = val32
& 0xff;
1142 macaddr
[1] = val32
& 0xff;
1144 macaddr
[2] = val32
& 0xff;
1146 macaddr
[3] = val32
& 0xff;
1149 * Read last 2-byte of mac address
1151 val32
= rge_reg_get32(rgep
, ID_4_REG
);
1152 macaddr
[4] = val32
& 0xff;
1154 macaddr
[5] = val32
& 0xff;
1157 static void rge_set_mac_addr(rge_t
*rgep
);
1158 #pragma inline(rge_set_mac_addr)
1161 rge_set_mac_addr(rge_t
*rgep
)
1163 uint8_t *p
= rgep
->netaddr
;
1167 * Change to config register write enable mode
1169 rge_reg_set8(rgep
, RT_93c46_COMMOND_REG
, RT_93c46_MODE_CONFIG
);
1172 * Get first 4 bytes of mac address
1183 * Set first 4 bytes of mac address
1185 rge_reg_put32(rgep
, ID_0_REG
, val32
);
1188 * Get last 2 bytes of mac address
1195 * Set last 2 bytes of mac address
1197 val32
|= rge_reg_get32(rgep
, ID_4_REG
) & ~0xffff;
1198 rge_reg_put32(rgep
, ID_4_REG
, val32
);
1201 * Return to normal network/host communication mode
1203 rge_reg_clr8(rgep
, RT_93c46_COMMOND_REG
, RT_93c46_MODE_CONFIG
);
1206 static void rge_set_multi_addr(rge_t
*rgep
);
1207 #pragma inline(rge_set_multi_addr)
1210 rge_set_multi_addr(rge_t
*rgep
)
1214 hashp
= (uint32_t *)rgep
->mcast_hash
;
1217 * Change to config register write enable mode
1219 if (rgep
->chipid
.mac_ver
== MAC_VER_8169SC
) {
1220 rge_reg_set8(rgep
, RT_93c46_COMMOND_REG
, RT_93c46_MODE_CONFIG
);
1222 if (rgep
->promisc
) {
1223 rge_reg_put32(rgep
, MULTICAST_0_REG
, ~0U);
1224 rge_reg_put32(rgep
, MULTICAST_4_REG
, ~0U);
1226 rge_reg_put32(rgep
, MULTICAST_0_REG
, RGE_BSWAP_32(hashp
[0]));
1227 rge_reg_put32(rgep
, MULTICAST_4_REG
, RGE_BSWAP_32(hashp
[1]));
1231 * Return to normal network/host communication mode
1233 if (rgep
->chipid
.mac_ver
== MAC_VER_8169SC
) {
1234 rge_reg_clr8(rgep
, RT_93c46_COMMOND_REG
, RT_93c46_MODE_CONFIG
);
1238 static void rge_set_promisc(rge_t
*rgep
);
1239 #pragma inline(rge_set_promisc)
1242 rge_set_promisc(rge_t
*rgep
)
1245 rge_reg_set32(rgep
, RX_CONFIG_REG
, RX_ACCEPT_ALL_PKT
);
1247 rge_reg_clr32(rgep
, RX_CONFIG_REG
, RX_ACCEPT_ALL_PKT
);
1251 * rge_chip_sync() -- program the chip with the unicast MAC address,
1252 * the multicast hash table, the required level of promiscuity, and
1253 * the current loopback mode ...
1255 void rge_chip_sync(rge_t
*rgep
, enum rge_sync_op todo
);
1256 #pragma no_inline(rge_chip_sync)
1259 rge_chip_sync(rge_t
*rgep
, enum rge_sync_op todo
)
1263 rge_get_mac_addr(rgep
);
1266 /* Reprogram the unicast MAC address(es) ... */
1267 rge_set_mac_addr(rgep
);
1270 /* Reprogram the hashed multicast address table ... */
1271 rge_set_multi_addr(rgep
);
1273 case RGE_SET_PROMISC
:
1274 /* Set or clear the PROMISCUOUS mode bit */
1275 rge_set_multi_addr(rgep
);
1276 rge_set_promisc(rgep
);
1283 void rge_chip_blank(void *arg
, time_t ticks
, uint_t count
, int flag
);
1284 #pragma no_inline(rge_chip_blank)
1288 rge_chip_blank(void *arg
, time_t ticks
, uint_t count
, int flag
)
1290 _NOTE(ARGUNUSED(arg
, ticks
, count
));
1293 void rge_tx_trigger(rge_t
*rgep
);
1294 #pragma no_inline(rge_tx_trigger)
1297 rge_tx_trigger(rge_t
*rgep
)
1299 rge_reg_put8(rgep
, TX_RINGS_POLL_REG
, NORMAL_TX_RING_POLL
);
1302 void rge_hw_stats_dump(rge_t
*rgep
);
1303 #pragma no_inline(rge_tx_trigger)
1306 rge_hw_stats_dump(rge_t
*rgep
)
1309 uint32_t regval
= 0;
1311 if (rgep
->rge_mac_state
== RGE_MAC_STOPPED
)
1314 regval
= rge_reg_get32(rgep
, DUMP_COUNTER_REG_0
);
1315 while (regval
& DUMP_START
) {
1317 if (++i
> STATS_DUMP_LOOP
) {
1318 RGE_DEBUG(("rge h/w statistics dump fail!"));
1319 rgep
->rge_chip_state
= RGE_CHIP_ERROR
;
1322 regval
= rge_reg_get32(rgep
, DUMP_COUNTER_REG_0
);
1324 DMA_SYNC(rgep
->dma_area_stats
, DDI_DMA_SYNC_FORKERNEL
);
1327 * Start H/W statistics dump for RTL8169 chip
1329 rge_reg_set32(rgep
, DUMP_COUNTER_REG_0
, DUMP_START
);
1333 * ========== Hardware interrupt handler ==========
1337 #define RGE_DBG RGE_DBG_INT /* debug flag for this code */
1339 static void rge_wake_factotum(rge_t
*rgep
);
1340 #pragma inline(rge_wake_factotum)
1343 rge_wake_factotum(rge_t
*rgep
)
1345 if (rgep
->factotum_flag
== 0) {
1346 rgep
->factotum_flag
= 1;
1347 (void) ddi_intr_trigger_softint(rgep
->factotum_hdl
, NULL
);
1352 * rge_intr() -- handle chip interrupts
1354 uint_t
rge_intr(caddr_t arg1
, caddr_t arg2
);
1355 #pragma no_inline(rge_intr)
1358 rge_intr(caddr_t arg1
, caddr_t arg2
)
1360 rge_t
*rgep
= (rge_t
*)arg1
;
1361 uint16_t int_status
;
1368 boolean_t update_int_mask
= B_FALSE
;
1371 _NOTE(ARGUNUSED(arg2
))
1373 mutex_enter(rgep
->genlock
);
1375 if (rgep
->suspended
) {
1376 mutex_exit(rgep
->genlock
);
1377 return (DDI_INTR_UNCLAIMED
);
1381 * Was this interrupt caused by our device...
1383 int_status
= rge_reg_get16(rgep
, INT_STATUS_REG
);
1384 if (!(int_status
& rgep
->int_mask
)) {
1385 mutex_exit(rgep
->genlock
);
1386 return (DDI_INTR_UNCLAIMED
);
1387 /* indicate it wasn't our interrupt */
1393 * For PCIE chipset, we need disable interrupt first.
1395 if (rgep
->chipid
.is_pcie
) {
1396 rge_reg_put16(rgep
, INT_MASK_REG
, INT_MASK_NONE
);
1397 update_int_mask
= B_TRUE
;
1399 rge_reg_put16(rgep
, INT_STATUS_REG
, int_status
);
1402 * Calculate optimal polling interval
1404 now
= ddi_get_lbolt();
1405 if (now
- rgep
->curr_tick
>= rgep
->tick_delta
&&
1406 (rgep
->param_link_speed
== RGE_SPEED_1000M
||
1407 rgep
->param_link_speed
== RGE_SPEED_100M
)) {
1408 /* number of rx and tx packets in the last tick */
1409 tx_pkts
= rgep
->stats
.opackets
- rgep
->last_opackets
;
1410 rx_pkts
= rgep
->stats
.rpackets
- rgep
->last_rpackets
;
1412 rgep
->last_opackets
= rgep
->stats
.opackets
;
1413 rgep
->last_rpackets
= rgep
->stats
.rpackets
;
1415 /* restore interrupt mask */
1416 rgep
->int_mask
|= TX_OK_INT
| RX_OK_INT
;
1417 if (rgep
->chipid
.is_pcie
) {
1418 rgep
->int_mask
|= NO_TXDESC_INT
;
1421 /* optimal number of packets in a tick */
1422 if (rgep
->param_link_speed
== RGE_SPEED_1000M
) {
1423 opt_pkts
= (1000*1000*1000/8)/ETHERMTU
/CLK_TICK
;
1425 opt_pkts
= (100*1000*1000/8)/ETHERMTU
/CLK_TICK
;
1429 * calculate polling interval based on rx and tx packets
1433 if (now
- rgep
->curr_tick
< 2*rgep
->tick_delta
) {
1434 opt_intrs
= opt_pkts
/TX_COALESC
;
1435 if (tx_pkts
> opt_intrs
) {
1436 poll_rate
= max(tx_pkts
/TX_COALESC
, opt_intrs
);
1437 rgep
->int_mask
&= ~(TX_OK_INT
| NO_TXDESC_INT
);
1440 opt_intrs
= opt_pkts
/RX_COALESC
;
1441 if (rx_pkts
> opt_intrs
) {
1442 opt_intrs
= max(rx_pkts
/RX_COALESC
, opt_intrs
);
1443 poll_rate
= max(opt_intrs
, poll_rate
);
1444 rgep
->int_mask
&= ~RX_OK_INT
;
1446 /* ensure poll_rate reasonable */
1447 poll_rate
= min(poll_rate
, opt_pkts
*4);
1451 /* move to polling mode */
1452 if (rgep
->chipid
.is_pcie
) {
1453 itimer
= (TIMER_CLK_PCIE
/CLK_TICK
)/poll_rate
;
1455 itimer
= (TIMER_CLK_PCI
/CLK_TICK
)/poll_rate
;
1458 /* move to normal mode */
1461 RGE_DEBUG(("%s: poll: itimer:%d int_mask:0x%x",
1462 __func__
, itimer
, rgep
->int_mask
));
1463 rge_reg_put32(rgep
, TIMER_INT_REG
, itimer
);
1465 /* update timestamp for statistics */
1466 rgep
->curr_tick
= now
;
1469 int_status
|= TIME_OUT_INT
;
1471 update_int_mask
= B_TRUE
;
1474 if (int_status
& TIME_OUT_INT
) {
1475 rge_reg_put32(rgep
, TIMER_COUNT_REG
, 0);
1478 /* flush post writes */
1479 (void) rge_reg_get16(rgep
, INT_STATUS_REG
);
1482 * Cable link change interrupt
1484 if (int_status
& LINK_CHANGE_INT
) {
1485 rge_chip_cyclic(rgep
);
1488 if (int_status
& RX_FIFO_OVERFLOW_INT
) {
1489 /* start rx watchdog timeout detection */
1490 rgep
->rx_fifo_ovf
= 1;
1491 if (rgep
->int_mask
& RX_FIFO_OVERFLOW_INT
) {
1492 rgep
->int_mask
&= ~RX_FIFO_OVERFLOW_INT
;
1493 update_int_mask
= B_TRUE
;
1495 } else if (int_status
& RGE_RX_INT
) {
1496 /* stop rx watchdog timeout detection */
1497 rgep
->rx_fifo_ovf
= 0;
1498 if ((rgep
->int_mask
& RX_FIFO_OVERFLOW_INT
) == 0) {
1499 rgep
->int_mask
|= RX_FIFO_OVERFLOW_INT
;
1500 update_int_mask
= B_TRUE
;
1504 mutex_exit(rgep
->genlock
);
1509 if (int_status
& RGE_RX_INT
)
1513 * Transmit interrupt
1515 if (int_status
& TX_ERR_INT
) {
1516 RGE_REPORT((rgep
, "tx error happened, resetting the chip "));
1517 mutex_enter(rgep
->genlock
);
1518 rgep
->rge_chip_state
= RGE_CHIP_ERROR
;
1519 mutex_exit(rgep
->genlock
);
1520 } else if ((rgep
->chipid
.is_pcie
&& (int_status
& NO_TXDESC_INT
)) ||
1521 ((int_status
& TX_OK_INT
) && rgep
->tx_free
< RGE_SEND_SLOTS
/8)) {
1522 (void) ddi_intr_trigger_softint(rgep
->resched_hdl
, NULL
);
1526 * System error interrupt
1528 if (int_status
& SYS_ERR_INT
) {
1529 RGE_REPORT((rgep
, "sys error happened, resetting the chip "));
1530 mutex_enter(rgep
->genlock
);
1531 rgep
->rge_chip_state
= RGE_CHIP_ERROR
;
1532 mutex_exit(rgep
->genlock
);
1536 * Re-enable interrupt for PCIE chipset or install new int_mask
1538 if (update_int_mask
)
1539 rge_reg_put16(rgep
, INT_MASK_REG
, rgep
->int_mask
);
1541 return (DDI_INTR_CLAIMED
); /* indicate it was our interrupt */
1545 * ========== Factotum, implemented as a softint handler ==========
1549 #define RGE_DBG RGE_DBG_FACT /* debug flag for this code */
1551 static boolean_t
rge_factotum_link_check(rge_t
*rgep
);
1552 #pragma no_inline(rge_factotum_link_check)
1555 rge_factotum_link_check(rge_t
*rgep
)
1557 uint8_t media_status
;
1560 media_status
= rge_reg_get8(rgep
, PHY_STATUS_REG
);
1561 link
= (media_status
& PHY_STATUS_LINK_UP
) ?
1562 LINK_STATE_UP
: LINK_STATE_DOWN
;
1563 if (rgep
->param_link_up
!= link
) {
1567 rgep
->param_link_up
= link
;
1569 if (link
== LINK_STATE_UP
) {
1570 if (media_status
& PHY_STATUS_1000MF
) {
1571 rgep
->param_link_speed
= RGE_SPEED_1000M
;
1572 rgep
->param_link_duplex
= LINK_DUPLEX_FULL
;
1574 rgep
->param_link_speed
=
1575 (media_status
& PHY_STATUS_100M
) ?
1576 RGE_SPEED_100M
: RGE_SPEED_10M
;
1577 rgep
->param_link_duplex
=
1578 (media_status
& PHY_STATUS_DUPLEX_FULL
) ?
1579 LINK_DUPLEX_FULL
: LINK_DUPLEX_HALF
;
1588 * Factotum routine to check for Tx stall, using the 'watchdog' counter
1590 static boolean_t
rge_factotum_stall_check(rge_t
*rgep
);
1591 #pragma no_inline(rge_factotum_stall_check)
1594 rge_factotum_stall_check(rge_t
*rgep
)
1598 ASSERT(mutex_owned(rgep
->genlock
));
1601 * Specific check for RX stall ...
1603 rgep
->rx_fifo_ovf
<<= 1;
1604 if (rgep
->rx_fifo_ovf
> rge_rx_watchdog_count
) {
1605 RGE_REPORT((rgep
, "rx_hang detected"));
1610 * Specific check for Tx stall ...
1612 * The 'watchdog' counter is incremented whenever a packet
1613 * is queued, reset to 1 when some (but not all) buffers
1614 * are reclaimed, reset to 0 (disabled) when all buffers
1615 * are reclaimed, and shifted left here. If it exceeds the
1616 * threshold value, the chip is assumed to have stalled and
1617 * is put into the ERROR state. The factotum will then reset
1618 * it on the next pass.
1620 * All of which should ensure that we don't get into a state
1621 * where packets are left pending indefinitely!
1623 if (rgep
->resched_needed
)
1624 (void) ddi_intr_trigger_softint(rgep
->resched_hdl
, NULL
);
1625 dogval
= rge_atomic_shl32(&rgep
->watchdog
, 1);
1626 if (dogval
< rge_watchdog_count
)
1629 RGE_REPORT((rgep
, "Tx stall detected, watchdog code 0x%x", dogval
));
1635 * The factotum is woken up when there's something to do that we'd rather
1636 * not do from inside a hardware interrupt handler or high-level cyclic.
1637 * Its two main tasks are:
1638 * reset & restart the chip after an error
1639 * check the link status whenever necessary
1641 uint_t
rge_chip_factotum(caddr_t arg1
, caddr_t arg2
);
1642 #pragma no_inline(rge_chip_factotum)
1645 rge_chip_factotum(caddr_t arg1
, caddr_t arg2
)
1652 rgep
= (rge_t
*)arg1
;
1653 _NOTE(ARGUNUSED(arg2
))
1655 if (rgep
->factotum_flag
== 0)
1656 return (DDI_INTR_UNCLAIMED
);
1658 rgep
->factotum_flag
= 0;
1659 result
= DDI_INTR_CLAIMED
;
1663 mutex_enter(rgep
->genlock
);
1664 switch (rgep
->rge_chip_state
) {
1668 case RGE_CHIP_RUNNING
:
1669 linkchg
= rge_factotum_link_check(rgep
);
1670 error
= rge_factotum_stall_check(rgep
);
1673 case RGE_CHIP_ERROR
:
1677 case RGE_CHIP_FAULT
:
1679 * Fault detected, time to reset ...
1681 if (rge_autorecover
) {
1682 RGE_REPORT((rgep
, "automatic recovery activated"));
1689 * If an error is detected, stop the chip now, marking it as
1690 * faulty, so that it will be reset next time through ...
1693 rge_chip_stop(rgep
, B_TRUE
);
1694 mutex_exit(rgep
->genlock
);
1697 * If the link state changed, tell the world about it.
1698 * Note: can't do this while still holding the mutex.
1701 mac_link_update(rgep
->mh
, rgep
->param_link_up
);
1707 * High-level cyclic handler
1709 * This routine schedules a (low-level) softint callback to the
1710 * factotum, and prods the chip to update the status block (which
1711 * will cause a hardware interrupt when complete).
1713 void rge_chip_cyclic(void *arg
);
1714 #pragma no_inline(rge_chip_cyclic)
1717 rge_chip_cyclic(void *arg
)
1723 switch (rgep
->rge_chip_state
) {
1727 case RGE_CHIP_RUNNING
:
1728 rge_phy_check(rgep
);
1729 if (rgep
->tx_free
< RGE_SEND_SLOTS
)
1730 rge_send_recycle(rgep
);
1733 case RGE_CHIP_FAULT
:
1734 case RGE_CHIP_ERROR
:
1738 rge_wake_factotum(rgep
);
1743 * ========== Ioctl subfunctions ==========
1747 #define RGE_DBG RGE_DBG_PPIO /* debug flag for this code */
1749 #if RGE_DEBUGGING || RGE_DO_PPIO
1751 static void rge_chip_peek_cfg(rge_t
*rgep
, rge_peekpoke_t
*ppd
);
1752 #pragma no_inline(rge_chip_peek_cfg)
1755 rge_chip_peek_cfg(rge_t
*rgep
, rge_peekpoke_t
*ppd
)
1760 RGE_TRACE(("rge_chip_peek_cfg($%p, $%p)",
1761 (void *)rgep
, (void *)ppd
));
1763 regno
= ppd
->pp_acc_offset
;
1765 switch (ppd
->pp_acc_size
) {
1767 regval
= pci_config_get8(rgep
->cfg_handle
, regno
);
1771 regval
= pci_config_get16(rgep
->cfg_handle
, regno
);
1775 regval
= pci_config_get32(rgep
->cfg_handle
, regno
);
1779 regval
= pci_config_get64(rgep
->cfg_handle
, regno
);
1783 ppd
->pp_acc_data
= regval
;
1786 static void rge_chip_poke_cfg(rge_t
*rgep
, rge_peekpoke_t
*ppd
);
1787 #pragma no_inline(rge_chip_poke_cfg)
1790 rge_chip_poke_cfg(rge_t
*rgep
, rge_peekpoke_t
*ppd
)
1795 RGE_TRACE(("rge_chip_poke_cfg($%p, $%p)",
1796 (void *)rgep
, (void *)ppd
));
1798 regno
= ppd
->pp_acc_offset
;
1799 regval
= ppd
->pp_acc_data
;
1801 switch (ppd
->pp_acc_size
) {
1803 pci_config_put8(rgep
->cfg_handle
, regno
, regval
);
1807 pci_config_put16(rgep
->cfg_handle
, regno
, regval
);
1811 pci_config_put32(rgep
->cfg_handle
, regno
, regval
);
1815 pci_config_put64(rgep
->cfg_handle
, regno
, regval
);
1820 static void rge_chip_peek_reg(rge_t
*rgep
, rge_peekpoke_t
*ppd
);
1821 #pragma no_inline(rge_chip_peek_reg)
1824 rge_chip_peek_reg(rge_t
*rgep
, rge_peekpoke_t
*ppd
)
1829 RGE_TRACE(("rge_chip_peek_reg($%p, $%p)",
1830 (void *)rgep
, (void *)ppd
));
1832 regaddr
= PIO_ADDR(rgep
, ppd
->pp_acc_offset
);
1834 switch (ppd
->pp_acc_size
) {
1836 regval
= ddi_get8(rgep
->io_handle
, regaddr
);
1840 regval
= ddi_get16(rgep
->io_handle
, regaddr
);
1844 regval
= ddi_get32(rgep
->io_handle
, regaddr
);
1848 regval
= ddi_get64(rgep
->io_handle
, regaddr
);
1852 ppd
->pp_acc_data
= regval
;
1855 static void rge_chip_poke_reg(rge_t
*rgep
, rge_peekpoke_t
*ppd
);
1856 #pragma no_inline(rge_chip_peek_reg)
1859 rge_chip_poke_reg(rge_t
*rgep
, rge_peekpoke_t
*ppd
)
1864 RGE_TRACE(("rge_chip_poke_reg($%p, $%p)",
1865 (void *)rgep
, (void *)ppd
));
1867 regaddr
= PIO_ADDR(rgep
, ppd
->pp_acc_offset
);
1868 regval
= ppd
->pp_acc_data
;
1870 switch (ppd
->pp_acc_size
) {
1872 ddi_put8(rgep
->io_handle
, regaddr
, regval
);
1876 ddi_put16(rgep
->io_handle
, regaddr
, regval
);
1880 ddi_put32(rgep
->io_handle
, regaddr
, regval
);
1884 ddi_put64(rgep
->io_handle
, regaddr
, regval
);
1889 static void rge_chip_peek_mii(rge_t
*rgep
, rge_peekpoke_t
*ppd
);
1890 #pragma no_inline(rge_chip_peek_mii)
1893 rge_chip_peek_mii(rge_t
*rgep
, rge_peekpoke_t
*ppd
)
1895 RGE_TRACE(("rge_chip_peek_mii($%p, $%p)",
1896 (void *)rgep
, (void *)ppd
));
1898 ppd
->pp_acc_data
= rge_mii_get16(rgep
, ppd
->pp_acc_offset
/2);
1901 static void rge_chip_poke_mii(rge_t
*rgep
, rge_peekpoke_t
*ppd
);
1902 #pragma no_inline(rge_chip_poke_mii)
1905 rge_chip_poke_mii(rge_t
*rgep
, rge_peekpoke_t
*ppd
)
1907 RGE_TRACE(("rge_chip_poke_mii($%p, $%p)",
1908 (void *)rgep
, (void *)ppd
));
1910 rge_mii_put16(rgep
, ppd
->pp_acc_offset
/2, ppd
->pp_acc_data
);
1913 static void rge_chip_peek_mem(rge_t
*rgep
, rge_peekpoke_t
*ppd
);
1914 #pragma no_inline(rge_chip_peek_mem)
1917 rge_chip_peek_mem(rge_t
*rgep
, rge_peekpoke_t
*ppd
)
1922 RGE_TRACE(("rge_chip_peek_rge($%p, $%p)",
1923 (void *)rgep
, (void *)ppd
));
1925 vaddr
= (void *)(uintptr_t)ppd
->pp_acc_offset
;
1927 switch (ppd
->pp_acc_size
) {
1929 regval
= *(uint8_t *)vaddr
;
1933 regval
= *(uint16_t *)vaddr
;
1937 regval
= *(uint32_t *)vaddr
;
1941 regval
= *(uint64_t *)vaddr
;
1945 RGE_DEBUG(("rge_chip_peek_mem($%p, $%p) peeked 0x%llx from $%p",
1946 (void *)rgep
, (void *)ppd
, regval
, vaddr
));
1948 ppd
->pp_acc_data
= regval
;
1951 static void rge_chip_poke_mem(rge_t
*rgep
, rge_peekpoke_t
*ppd
);
1952 #pragma no_inline(rge_chip_poke_mem)
1955 rge_chip_poke_mem(rge_t
*rgep
, rge_peekpoke_t
*ppd
)
1960 RGE_TRACE(("rge_chip_poke_mem($%p, $%p)",
1961 (void *)rgep
, (void *)ppd
));
1963 vaddr
= (void *)(uintptr_t)ppd
->pp_acc_offset
;
1964 regval
= ppd
->pp_acc_data
;
1966 RGE_DEBUG(("rge_chip_poke_mem($%p, $%p) poking 0x%llx at $%p",
1967 (void *)rgep
, (void *)ppd
, regval
, vaddr
));
1969 switch (ppd
->pp_acc_size
) {
1971 *(uint8_t *)vaddr
= (uint8_t)regval
;
1975 *(uint16_t *)vaddr
= (uint16_t)regval
;
1979 *(uint32_t *)vaddr
= (uint32_t)regval
;
1983 *(uint64_t *)vaddr
= (uint64_t)regval
;
1988 static enum ioc_reply
rge_pp_ioctl(rge_t
*rgep
, int cmd
, mblk_t
*mp
,
1989 struct iocblk
*iocp
);
1990 #pragma no_inline(rge_pp_ioctl)
1992 static enum ioc_reply
1993 rge_pp_ioctl(rge_t
*rgep
, int cmd
, mblk_t
*mp
, struct iocblk
*iocp
)
1995 void (*ppfn
)(rge_t
*rgep
, rge_peekpoke_t
*ppd
);
1996 rge_peekpoke_t
*ppd
;
2006 rge_error(rgep
, "rge_pp_ioctl: invalid cmd 0x%x", cmd
);
2019 * Validate format of ioctl
2021 if (iocp
->ioc_count
!= sizeof (rge_peekpoke_t
))
2023 if (mp
->b_cont
== NULL
)
2025 ppd
= (rge_peekpoke_t
*)mp
->b_cont
->b_rptr
;
2028 * Validate request parameters
2030 switch (ppd
->pp_acc_space
) {
2034 case RGE_PP_SPACE_CFG
:
2040 maxoff
= PCI_CONF_HDR_SIZE
;
2041 ppfn
= peek
? rge_chip_peek_cfg
: rge_chip_poke_cfg
;
2044 case RGE_PP_SPACE_REG
:
2046 * Memory-mapped I/O space
2050 maxoff
= RGE_REGISTER_MAX
;
2051 ppfn
= peek
? rge_chip_peek_reg
: rge_chip_poke_reg
;
2054 case RGE_PP_SPACE_MII
:
2056 * PHY's MII registers
2057 * NB: all PHY registers are two bytes, but the
2058 * addresses increment in ones (word addressing).
2059 * So we scale the address here, then undo the
2060 * transformation inside the peek/poke functions.
2062 ppd
->pp_acc_offset
*= 2;
2065 maxoff
= (MII_MAXREG
+1)*2;
2066 ppfn
= peek
? rge_chip_peek_mii
: rge_chip_poke_mii
;
2069 case RGE_PP_SPACE_RGE
:
2071 * RGE data structure!
2074 mem_va
= (uintptr_t)rgep
;
2075 maxoff
= sizeof (*rgep
);
2076 ppfn
= peek
? rge_chip_peek_mem
: rge_chip_poke_mem
;
2079 case RGE_PP_SPACE_STATISTICS
:
2080 case RGE_PP_SPACE_TXDESC
:
2081 case RGE_PP_SPACE_TXBUFF
:
2082 case RGE_PP_SPACE_RXDESC
:
2083 case RGE_PP_SPACE_RXBUFF
:
2087 switch (ppd
->pp_acc_space
) {
2088 case RGE_PP_SPACE_TXDESC
:
2089 areap
= &rgep
->dma_area_txdesc
;
2091 case RGE_PP_SPACE_RXDESC
:
2092 areap
= &rgep
->dma_area_rxdesc
;
2094 case RGE_PP_SPACE_STATISTICS
:
2095 areap
= &rgep
->dma_area_stats
;
2100 mem_va
= (uintptr_t)areap
->mem_va
;
2101 maxoff
= areap
->alength
;
2102 ppfn
= peek
? rge_chip_peek_mem
: rge_chip_poke_mem
;
2106 switch (ppd
->pp_acc_size
) {
2114 if ((ppd
->pp_acc_size
& sizemask
) == 0)
2119 if ((ppd
->pp_acc_offset
% ppd
->pp_acc_size
) != 0)
2122 if (ppd
->pp_acc_offset
>= maxoff
)
2125 if (ppd
->pp_acc_offset
+ppd
->pp_acc_size
> maxoff
)
2129 * All OK - go do it!
2131 ppd
->pp_acc_offset
+= mem_va
;
2133 return (peek
? IOC_REPLY
: IOC_ACK
);
2136 static enum ioc_reply
rge_diag_ioctl(rge_t
*rgep
, int cmd
, mblk_t
*mp
,
2137 struct iocblk
*iocp
);
2138 #pragma no_inline(rge_diag_ioctl)
2140 static enum ioc_reply
2141 rge_diag_ioctl(rge_t
*rgep
, int cmd
, mblk_t
*mp
, struct iocblk
*iocp
)
2143 ASSERT(mutex_owned(rgep
->genlock
));
2148 rge_error(rgep
, "rge_diag_ioctl: invalid cmd 0x%x", cmd
);
2159 return (rge_pp_ioctl(rgep
, cmd
, mp
, iocp
));
2162 return (IOC_RESTART_ACK
);
2164 case RGE_SOFT_RESET
:
2165 case RGE_HARD_RESET
:
2167 * Reset and reinitialise the 570x hardware
2176 #endif /* RGE_DEBUGGING || RGE_DO_PPIO */
2178 static enum ioc_reply
rge_mii_ioctl(rge_t
*rgep
, int cmd
, mblk_t
*mp
,
2179 struct iocblk
*iocp
);
2180 #pragma no_inline(rge_mii_ioctl)
2182 static enum ioc_reply
2183 rge_mii_ioctl(rge_t
*rgep
, int cmd
, mblk_t
*mp
, struct iocblk
*iocp
)
2185 struct rge_mii_rw
*miirwp
;
2188 * Validate format of ioctl
2190 if (iocp
->ioc_count
!= sizeof (struct rge_mii_rw
))
2192 if (mp
->b_cont
== NULL
)
2194 miirwp
= (struct rge_mii_rw
*)mp
->b_cont
->b_rptr
;
2197 * Validate request parameters ...
2199 if (miirwp
->mii_reg
> MII_MAXREG
)
2205 rge_error(rgep
, "rge_mii_ioctl: invalid cmd 0x%x", cmd
);
2209 miirwp
->mii_data
= rge_mii_get16(rgep
, miirwp
->mii_reg
);
2213 rge_mii_put16(rgep
, miirwp
->mii_reg
, miirwp
->mii_data
);
2220 enum ioc_reply
rge_chip_ioctl(rge_t
*rgep
, queue_t
*wq
, mblk_t
*mp
,
2221 struct iocblk
*iocp
);
2222 #pragma no_inline(rge_chip_ioctl)
2225 rge_chip_ioctl(rge_t
*rgep
, queue_t
*wq
, mblk_t
*mp
, struct iocblk
*iocp
)
2229 RGE_TRACE(("rge_chip_ioctl($%p, $%p, $%p, $%p)",
2230 (void *)rgep
, (void *)wq
, (void *)mp
, (void *)iocp
));
2232 ASSERT(mutex_owned(rgep
->genlock
));
2234 cmd
= iocp
->ioc_cmd
;
2238 rge_error(rgep
, "rge_chip_ioctl: invalid cmd 0x%x", cmd
);
2245 case RGE_SOFT_RESET
:
2246 case RGE_HARD_RESET
:
2247 #if RGE_DEBUGGING || RGE_DO_PPIO
2248 return (rge_diag_ioctl(rgep
, cmd
, mp
, iocp
));
2251 #endif /* RGE_DEBUGGING || RGE_DO_PPIO */
2255 return (rge_mii_ioctl(rgep
, cmd
, mp
, iocp
));