4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
29 * This is the string displayed by modinfo, etc.
30 * Make sure you keep the version ID up to date!
32 static char rge_ident
[] = "Realtek 1Gb Ethernet";
35 * Used for buffers allocated by ddi_dma_mem_alloc()
37 static ddi_dma_attr_t dma_attr_buf
= {
38 DMA_ATTR_V0
, /* dma_attr version */
39 0, /* dma_attr_addr_lo */
40 (uint32_t)0xFFFFFFFF, /* dma_attr_addr_hi */
41 (uint32_t)0xFFFFFFFF, /* dma_attr_count_max */
42 (uint32_t)16, /* dma_attr_align */
43 0xFFFFFFFF, /* dma_attr_burstsizes */
44 1, /* dma_attr_minxfer */
45 (uint32_t)0xFFFFFFFF, /* dma_attr_maxxfer */
46 (uint32_t)0xFFFFFFFF, /* dma_attr_seg */
47 1, /* dma_attr_sgllen */
48 1, /* dma_attr_granular */
49 0, /* dma_attr_flags */
53 * Used for BDs allocated by ddi_dma_mem_alloc()
55 static ddi_dma_attr_t dma_attr_desc
= {
56 DMA_ATTR_V0
, /* dma_attr version */
57 0, /* dma_attr_addr_lo */
58 (uint32_t)0xFFFFFFFF, /* dma_attr_addr_hi */
59 (uint32_t)0xFFFFFFFF, /* dma_attr_count_max */
60 (uint32_t)256, /* dma_attr_align */
61 0xFFFFFFFF, /* dma_attr_burstsizes */
62 1, /* dma_attr_minxfer */
63 (uint32_t)0xFFFFFFFF, /* dma_attr_maxxfer */
64 (uint32_t)0xFFFFFFFF, /* dma_attr_seg */
65 1, /* dma_attr_sgllen */
66 1, /* dma_attr_granular */
67 0, /* dma_attr_flags */
71 * PIO access attributes for registers
73 static ddi_device_acc_attr_t rge_reg_accattr
= {
81 * DMA access attributes for descriptors
83 static ddi_device_acc_attr_t rge_desc_accattr
= {
91 * DMA access attributes for data
93 static ddi_device_acc_attr_t rge_buf_accattr
= {
103 static char debug_propname
[] = "rge_debug_flags";
104 static char mtu_propname
[] = "default_mtu";
105 static char msi_propname
[] = "msi_enable";
107 static int rge_m_start(void *);
108 static void rge_m_stop(void *);
109 static int rge_m_promisc(void *, boolean_t
);
110 static int rge_m_multicst(void *, boolean_t
, const uint8_t *);
111 static int rge_m_unicst(void *, const uint8_t *);
112 static void rge_m_ioctl(void *, queue_t
*, mblk_t
*);
113 static boolean_t
rge_m_getcapab(void *, mac_capab_t
, void *);
115 #define RGE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB)
117 static mac_callbacks_t rge_m_callbacks
= {
118 RGE_M_CALLBACK_FLAGS
,
132 * Allocate an area of memory and a DMA handle for accessing it
135 rge_alloc_dma_mem(rge_t
*rgep
, size_t memsize
, ddi_dma_attr_t
*dma_attr_p
,
136 ddi_device_acc_attr_t
*acc_attr_p
, uint_t dma_flags
, dma_area_t
*dma_p
)
144 err
= ddi_dma_alloc_handle(rgep
->devinfo
, dma_attr_p
,
145 DDI_DMA_SLEEP
, NULL
, &dma_p
->dma_hdl
);
146 if (err
!= DDI_SUCCESS
) {
147 dma_p
->dma_hdl
= NULL
;
148 return (DDI_FAILURE
);
154 err
= ddi_dma_mem_alloc(dma_p
->dma_hdl
, memsize
, acc_attr_p
,
155 dma_flags
& (DDI_DMA_CONSISTENT
| DDI_DMA_STREAMING
),
156 DDI_DMA_SLEEP
, NULL
, &vaddr
, &dma_p
->alength
, &dma_p
->acc_hdl
);
157 if (err
!= DDI_SUCCESS
) {
158 ddi_dma_free_handle(&dma_p
->dma_hdl
);
159 dma_p
->dma_hdl
= NULL
;
160 dma_p
->acc_hdl
= NULL
;
161 return (DDI_FAILURE
);
165 * Bind the two together
167 dma_p
->mem_va
= vaddr
;
168 err
= ddi_dma_addr_bind_handle(dma_p
->dma_hdl
, NULL
,
169 vaddr
, dma_p
->alength
, dma_flags
, DDI_DMA_SLEEP
, NULL
,
170 &dma_p
->cookie
, &dma_p
->ncookies
);
171 if (err
!= DDI_DMA_MAPPED
|| dma_p
->ncookies
!= 1) {
172 ddi_dma_mem_free(&dma_p
->acc_hdl
);
173 ddi_dma_free_handle(&dma_p
->dma_hdl
);
174 dma_p
->acc_hdl
= NULL
;
175 dma_p
->dma_hdl
= NULL
;
176 return (DDI_FAILURE
);
183 return (DDI_SUCCESS
);
187 * Free one allocated area of DMAable memory
190 rge_free_dma_mem(dma_area_t
*dma_p
)
192 if (dma_p
->dma_hdl
!= NULL
) {
193 if (dma_p
->ncookies
) {
194 (void) ddi_dma_unbind_handle(dma_p
->dma_hdl
);
197 ddi_dma_free_handle(&dma_p
->dma_hdl
);
198 dma_p
->dma_hdl
= NULL
;
201 if (dma_p
->acc_hdl
!= NULL
) {
202 ddi_dma_mem_free(&dma_p
->acc_hdl
);
203 dma_p
->acc_hdl
= NULL
;
208 * Utility routine to carve a slice off a chunk of allocated memory,
209 * updating the chunk descriptor accordingly. The size of the slice
210 * is given by the product of the <qty> and <size> parameters.
213 rge_slice_chunk(dma_area_t
*slice
, dma_area_t
*chunk
,
214 uint32_t qty
, uint32_t size
)
216 static uint32_t sequence
= 0xbcd5704a;
220 ASSERT(totsize
<= chunk
->alength
);
225 slice
->alength
= totsize
;
226 slice
->token
= ++sequence
;
228 chunk
->mem_va
= (caddr_t
)chunk
->mem_va
+ totsize
;
229 chunk
->alength
-= totsize
;
230 chunk
->offset
+= totsize
;
231 chunk
->cookie
.dmac_laddress
+= totsize
;
232 chunk
->cookie
.dmac_size
-= totsize
;
236 rge_alloc_bufs(rge_t
*rgep
)
243 * Allocate memory & handle for packet statistics
245 err
= rge_alloc_dma_mem(rgep
,
249 DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
,
250 &rgep
->dma_area_stats
);
251 if (err
!= DDI_SUCCESS
)
252 return (DDI_FAILURE
);
253 rgep
->hw_stats
= DMA_VPTR(rgep
->dma_area_stats
);
256 * Allocate memory & handle for Tx descriptor ring
258 txdescsize
= RGE_SEND_SLOTS
* sizeof (rge_bd_t
);
259 err
= rge_alloc_dma_mem(rgep
,
263 DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
,
264 &rgep
->dma_area_txdesc
);
265 if (err
!= DDI_SUCCESS
)
266 return (DDI_FAILURE
);
269 * Allocate memory & handle for Rx descriptor ring
271 rxdescsize
= RGE_RECV_SLOTS
* sizeof (rge_bd_t
);
272 err
= rge_alloc_dma_mem(rgep
,
276 DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
,
277 &rgep
->dma_area_rxdesc
);
278 if (err
!= DDI_SUCCESS
)
279 return (DDI_FAILURE
);
281 return (DDI_SUCCESS
);
285 * rge_free_bufs() -- free descriptors/buffers allocated for this
289 rge_free_bufs(rge_t
*rgep
)
291 rge_free_dma_mem(&rgep
->dma_area_stats
);
292 rge_free_dma_mem(&rgep
->dma_area_txdesc
);
293 rge_free_dma_mem(&rgep
->dma_area_rxdesc
);
297 * ========== Transmit and receive ring reinitialisation ==========
301 * These <reinit> routines each reset the rx/tx rings to an initial
302 * state, assuming that the corresponding <init> routine has already
303 * been called exactly once.
306 rge_reinit_send_ring(rge_t
*rgep
)
315 DMA_ZERO(rgep
->tx_desc
);
316 ssbdp
= rgep
->sw_sbds
;
318 for (slot
= 0; slot
< RGE_SEND_SLOTS
; slot
++) {
320 RGE_BSWAP_32(ssbdp
->pbuf
.cookie
.dmac_laddress
);
321 bdp
->host_buf_addr_hi
=
322 RGE_BSWAP_32(ssbdp
->pbuf
.cookie
.dmac_laddress
>> 32);
323 /* last BD in Tx ring */
324 if (slot
== (RGE_SEND_SLOTS
- 1))
325 bdp
->flags_len
= RGE_BSWAP_32(BD_FLAG_EOR
);
329 DMA_SYNC(rgep
->tx_desc
, DDI_DMA_SYNC_FORDEV
);
334 rgep
->tx_free
= RGE_SEND_SLOTS
;
338 rge_reinit_recv_ring(rge_t
*rgep
)
346 * re-init receive ring
348 DMA_ZERO(rgep
->rx_desc
);
349 srbdp
= rgep
->sw_rbds
;
351 for (slot
= 0; slot
< RGE_RECV_SLOTS
; slot
++) {
352 pbuf
= &srbdp
->rx_buf
->pbuf
;
354 RGE_BSWAP_32(pbuf
->cookie
.dmac_laddress
+ rgep
->head_room
);
355 bdp
->host_buf_addr_hi
=
356 RGE_BSWAP_32(pbuf
->cookie
.dmac_laddress
>> 32);
357 bdp
->flags_len
= RGE_BSWAP_32(BD_FLAG_HW_OWN
|
358 (rgep
->rxbuf_size
- rgep
->head_room
));
359 /* last BD in Tx ring */
360 if (slot
== (RGE_RECV_SLOTS
- 1))
361 bdp
->flags_len
|= RGE_BSWAP_32(BD_FLAG_EOR
);
365 DMA_SYNC(rgep
->rx_desc
, DDI_DMA_SYNC_FORDEV
);
371 rge_reinit_buf_ring(rge_t
*rgep
)
374 if (rgep
->chip_flags
& CHIP_FLAG_FORCE_BCOPY
)
378 * If all the up-sending buffers haven't been returned to driver,
379 * use bcopy() only in rx process.
381 if (rgep
->rx_free
!= RGE_BUF_SLOTS
)
382 rgep
->rx_bcopy
= B_TRUE
;
386 rge_reinit_rings(rge_t
*rgep
)
388 rge_reinit_send_ring(rgep
);
389 rge_reinit_recv_ring(rgep
);
390 rge_reinit_buf_ring(rgep
);
394 rge_fini_send_ring(rge_t
*rgep
)
399 ssbdp
= rgep
->sw_sbds
;
400 for (slot
= 0; slot
< RGE_SEND_SLOTS
; ++slot
) {
401 rge_free_dma_mem(&ssbdp
->pbuf
);
405 kmem_free(rgep
->sw_sbds
, RGE_SEND_SLOTS
* sizeof (sw_sbd_t
));
406 rgep
->sw_sbds
= NULL
;
410 rge_fini_recv_ring(rge_t
*rgep
)
415 srbdp
= rgep
->sw_rbds
;
416 for (slot
= 0; slot
< RGE_RECV_SLOTS
; ++srbdp
, ++slot
) {
418 if (srbdp
->rx_buf
->mp
!= NULL
) {
419 freemsg(srbdp
->rx_buf
->mp
);
420 srbdp
->rx_buf
->mp
= NULL
;
422 rge_free_dma_mem(&srbdp
->rx_buf
->pbuf
);
423 kmem_free(srbdp
->rx_buf
, sizeof (dma_buf_t
));
424 srbdp
->rx_buf
= NULL
;
428 kmem_free(rgep
->sw_rbds
, RGE_RECV_SLOTS
* sizeof (sw_rbd_t
));
429 rgep
->sw_rbds
= NULL
;
433 rge_fini_buf_ring(rge_t
*rgep
)
438 if (rgep
->chip_flags
& CHIP_FLAG_FORCE_BCOPY
)
441 ASSERT(rgep
->rx_free
== RGE_BUF_SLOTS
);
443 srbdp
= rgep
->free_srbds
;
444 for (slot
= 0; slot
< RGE_BUF_SLOTS
; ++srbdp
, ++slot
) {
445 if (srbdp
->rx_buf
!= NULL
) {
446 if (srbdp
->rx_buf
->mp
!= NULL
) {
447 freemsg(srbdp
->rx_buf
->mp
);
448 srbdp
->rx_buf
->mp
= NULL
;
450 rge_free_dma_mem(&srbdp
->rx_buf
->pbuf
);
451 kmem_free(srbdp
->rx_buf
, sizeof (dma_buf_t
));
452 srbdp
->rx_buf
= NULL
;
456 kmem_free(rgep
->free_srbds
, RGE_BUF_SLOTS
* sizeof (sw_rbd_t
));
457 rgep
->free_srbds
= NULL
;
461 rge_fini_rings(rge_t
*rgep
)
463 rge_fini_send_ring(rgep
);
464 rge_fini_recv_ring(rgep
);
465 rge_fini_buf_ring(rgep
);
469 rge_init_send_ring(rge_t
*rgep
)
478 * Allocate the array of s/w Tx Buffer Descriptors
480 ssbdp
= kmem_zalloc(RGE_SEND_SLOTS
*sizeof (*ssbdp
), KM_SLEEP
);
481 rgep
->sw_sbds
= ssbdp
;
486 rgep
->tx_desc
= rgep
->dma_area_txdesc
;
487 DMA_ZERO(rgep
->tx_desc
);
488 rgep
->tx_ring
= rgep
->tx_desc
.mem_va
;
490 desc
= rgep
->tx_desc
;
491 for (slot
= 0; slot
< RGE_SEND_SLOTS
; slot
++) {
492 rge_slice_chunk(&ssbdp
->desc
, &desc
, 1, sizeof (rge_bd_t
));
495 * Allocate memory & handle for Tx buffers
498 err
= rge_alloc_dma_mem(rgep
, rgep
->txbuf_size
,
499 &dma_attr_buf
, &rge_buf_accattr
,
500 DDI_DMA_WRITE
| DDI_DMA_STREAMING
, pbuf
);
501 if (err
!= DDI_SUCCESS
) {
503 "rge_init_send_ring: alloc tx buffer failed");
504 rge_fini_send_ring(rgep
);
505 return (DDI_FAILURE
);
509 ASSERT(desc
.alength
== 0);
511 DMA_SYNC(rgep
->tx_desc
, DDI_DMA_SYNC_FORDEV
);
512 return (DDI_SUCCESS
);
516 rge_init_recv_ring(rge_t
*rgep
)
525 * Allocate the array of s/w Rx Buffer Descriptors
527 srbdp
= kmem_zalloc(RGE_RECV_SLOTS
*sizeof (*srbdp
), KM_SLEEP
);
528 rgep
->sw_rbds
= srbdp
;
534 rgep
->rx_desc
= rgep
->dma_area_rxdesc
;
535 DMA_ZERO(rgep
->rx_desc
);
536 rgep
->rx_ring
= rgep
->rx_desc
.mem_va
;
538 for (slot
= 0; slot
< RGE_RECV_SLOTS
; slot
++) {
539 srbdp
->rx_buf
= rx_buf
=
540 kmem_zalloc(sizeof (dma_buf_t
), KM_SLEEP
);
543 * Allocate memory & handle for Rx buffers
545 pbuf
= &rx_buf
->pbuf
;
546 err
= rge_alloc_dma_mem(rgep
, rgep
->rxbuf_size
,
547 &dma_attr_buf
, &rge_buf_accattr
,
548 DDI_DMA_READ
| DDI_DMA_STREAMING
, pbuf
);
549 if (err
!= DDI_SUCCESS
) {
550 rge_fini_recv_ring(rgep
);
552 "rge_init_recv_ring: alloc rx buffer failed");
553 return (DDI_FAILURE
);
556 pbuf
->alength
-= rgep
->head_room
;
557 pbuf
->offset
+= rgep
->head_room
;
558 if (!(rgep
->chip_flags
& CHIP_FLAG_FORCE_BCOPY
)) {
559 rx_buf
->rx_recycle
.free_func
= rge_rx_recycle
;
560 rx_buf
->rx_recycle
.free_arg
= (caddr_t
)rx_buf
;
561 rx_buf
->private = (caddr_t
)rgep
;
562 rx_buf
->mp
= desballoc(DMA_VPTR(rx_buf
->pbuf
),
563 rgep
->rxbuf_size
, 0, &rx_buf
->rx_recycle
);
564 if (rx_buf
->mp
== NULL
) {
565 rge_fini_recv_ring(rgep
);
567 "rge_init_recv_ring: desballoc() failed");
568 return (DDI_FAILURE
);
573 DMA_SYNC(rgep
->rx_desc
, DDI_DMA_SYNC_FORDEV
);
574 return (DDI_SUCCESS
);
578 rge_init_buf_ring(rge_t
*rgep
)
581 sw_rbd_t
*free_srbdp
;
586 if (rgep
->chip_flags
& CHIP_FLAG_FORCE_BCOPY
) {
587 rgep
->rx_bcopy
= B_TRUE
;
588 return (DDI_SUCCESS
);
592 * Allocate the array of s/w free Buffer Descriptors
594 free_srbdp
= kmem_zalloc(RGE_BUF_SLOTS
*sizeof (*free_srbdp
), KM_SLEEP
);
595 rgep
->free_srbds
= free_srbdp
;
598 * Init free buffer ring
602 rgep
->rx_bcopy
= B_FALSE
;
603 rgep
->rx_free
= RGE_BUF_SLOTS
;
604 for (slot
= 0; slot
< RGE_BUF_SLOTS
; slot
++) {
605 free_srbdp
->rx_buf
= rx_buf
=
606 kmem_zalloc(sizeof (dma_buf_t
), KM_SLEEP
);
609 * Allocate memory & handle for free Rx buffers
611 pbuf
= &rx_buf
->pbuf
;
612 err
= rge_alloc_dma_mem(rgep
, rgep
->rxbuf_size
,
613 &dma_attr_buf
, &rge_buf_accattr
,
614 DDI_DMA_READ
| DDI_DMA_STREAMING
, pbuf
);
615 if (err
!= DDI_SUCCESS
) {
616 rge_fini_buf_ring(rgep
);
618 "rge_init_buf_ring: alloc rx free buffer failed");
619 return (DDI_FAILURE
);
621 pbuf
->alength
-= rgep
->head_room
;
622 pbuf
->offset
+= rgep
->head_room
;
623 rx_buf
->rx_recycle
.free_func
= rge_rx_recycle
;
624 rx_buf
->rx_recycle
.free_arg
= (caddr_t
)rx_buf
;
625 rx_buf
->private = (caddr_t
)rgep
;
626 rx_buf
->mp
= desballoc(DMA_VPTR(rx_buf
->pbuf
),
627 rgep
->rxbuf_size
, 0, &rx_buf
->rx_recycle
);
628 if (rx_buf
->mp
== NULL
) {
629 rge_fini_buf_ring(rgep
);
631 "rge_init_buf_ring: desballoc() failed");
632 return (DDI_FAILURE
);
636 return (DDI_SUCCESS
);
640 rge_init_rings(rge_t
*rgep
)
644 err
= rge_init_send_ring(rgep
);
645 if (err
!= DDI_SUCCESS
)
646 return (DDI_FAILURE
);
648 err
= rge_init_recv_ring(rgep
);
649 if (err
!= DDI_SUCCESS
) {
650 rge_fini_send_ring(rgep
);
651 return (DDI_FAILURE
);
654 err
= rge_init_buf_ring(rgep
);
655 if (err
!= DDI_SUCCESS
) {
656 rge_fini_send_ring(rgep
);
657 rge_fini_recv_ring(rgep
);
658 return (DDI_FAILURE
);
661 return (DDI_SUCCESS
);
665 * ========== Internal state management entry points ==========
669 #define RGE_DBG RGE_DBG_NEMO /* debug flag for this code */
672 * These routines provide all the functionality required by the
673 * corresponding MAC layer entry points, but don't update the
674 * MAC state so they can be called internally without disturbing
675 * our record of what NEMO thinks we should be doing ...
679 * rge_reset() -- reset h/w & rings to initial state
682 rge_reset(rge_t
*rgep
)
684 ASSERT(mutex_owned(rgep
->genlock
));
687 * Grab all the other mutexes in the world (this should
688 * ensure no other threads are manipulating driver state)
690 mutex_enter(rgep
->rx_lock
);
691 mutex_enter(rgep
->rc_lock
);
692 rw_enter(rgep
->errlock
, RW_WRITER
);
694 (void) rge_chip_reset(rgep
);
695 rge_reinit_rings(rgep
);
701 rw_exit(rgep
->errlock
);
702 mutex_exit(rgep
->rc_lock
);
703 mutex_exit(rgep
->rx_lock
);
705 rgep
->stats
.rpackets
= 0;
706 rgep
->stats
.rbytes
= 0;
707 rgep
->stats
.opackets
= 0;
708 rgep
->stats
.obytes
= 0;
709 rgep
->stats
.tx_pre_ismax
= B_FALSE
;
710 rgep
->stats
.tx_cur_ismax
= B_FALSE
;
712 RGE_DEBUG(("rge_reset($%p) done", (void *)rgep
));
716 * rge_stop() -- stop processing, don't reset h/w or rings
719 rge_stop(rge_t
*rgep
)
721 ASSERT(mutex_owned(rgep
->genlock
));
723 rge_chip_stop(rgep
, B_FALSE
);
725 RGE_DEBUG(("rge_stop($%p) done", (void *)rgep
));
729 * rge_start() -- start transmitting/receiving
732 rge_start(rge_t
*rgep
)
734 ASSERT(mutex_owned(rgep
->genlock
));
737 * Start chip processing, including enabling interrupts
739 rge_chip_start(rgep
);
744 * rge_restart - restart transmitting/receiving after error or suspend
747 rge_restart(rge_t
*rgep
)
751 ASSERT(mutex_owned(rgep
->genlock
));
753 * Wait for posted buffer to be freed...
755 if (!rgep
->rx_bcopy
) {
756 for (i
= 0; i
< RXBUFF_FREE_LOOP
; i
++) {
757 if (rgep
->rx_free
== RGE_BUF_SLOTS
)
760 RGE_DEBUG(("rge_restart: waiting for rx buf free..."));
764 rgep
->stats
.chip_reset
++;
765 if (rgep
->rge_mac_state
== RGE_MAC_STARTED
) {
767 rgep
->resched_needed
= B_TRUE
;
768 (void) ddi_intr_trigger_softint(rgep
->resched_hdl
, NULL
);
774 * ========== Nemo-required management entry points ==========
778 #define RGE_DBG RGE_DBG_NEMO /* debug flag for this code */
781 * rge_m_stop() -- stop transmitting/receiving
784 rge_m_stop(void *arg
)
786 rge_t
*rgep
= arg
; /* private device info */
790 * Just stop processing, then record new MAC state
792 mutex_enter(rgep
->genlock
);
793 if (rgep
->suspended
) {
794 ASSERT(rgep
->rge_mac_state
== RGE_MAC_STOPPED
);
795 mutex_exit(rgep
->genlock
);
800 * Wait for posted buffer to be freed...
802 if (!rgep
->rx_bcopy
) {
803 for (i
= 0; i
< RXBUFF_FREE_LOOP
; i
++) {
804 if (rgep
->rx_free
== RGE_BUF_SLOTS
)
807 RGE_DEBUG(("rge_m_stop: waiting for rx buf free..."));
810 rgep
->rge_mac_state
= RGE_MAC_STOPPED
;
811 RGE_DEBUG(("rge_m_stop($%p) done", arg
));
812 mutex_exit(rgep
->genlock
);
816 * rge_m_start() -- start transmitting/receiving
819 rge_m_start(void *arg
)
821 rge_t
*rgep
= arg
; /* private device info */
823 mutex_enter(rgep
->genlock
);
824 if (rgep
->suspended
) {
825 mutex_exit(rgep
->genlock
);
826 return (DDI_FAILURE
);
829 * Clear hw/sw statistics
831 DMA_ZERO(rgep
->dma_area_stats
);
832 bzero(&rgep
->stats
, sizeof (rge_stats_t
));
835 * Start processing and record new MAC state
839 rgep
->rge_mac_state
= RGE_MAC_STARTED
;
840 RGE_DEBUG(("rge_m_start($%p) done", arg
));
842 mutex_exit(rgep
->genlock
);
848 * rge_m_unicst_set() -- set the physical network address
851 rge_m_unicst(void *arg
, const uint8_t *macaddr
)
853 rge_t
*rgep
= arg
; /* private device info */
856 * Remember the new current address in the driver state
857 * Sync the chip's idea of the address too ...
859 mutex_enter(rgep
->genlock
);
860 bcopy(macaddr
, rgep
->netaddr
, ETHERADDRL
);
862 if (rgep
->suspended
) {
863 mutex_exit(rgep
->genlock
);
864 return (DDI_SUCCESS
);
867 rge_chip_sync(rgep
, RGE_SET_MAC
);
868 mutex_exit(rgep
->genlock
);
874 * Compute the index of the required bit in the multicast hash map.
875 * This must mirror the way the hardware actually does it!
878 rge_hash_index(const uint8_t *mca
)
880 uint32_t crc
= (uint32_t)RGE_HASH_CRC
;
881 uint32_t const POLY
= RGE_HASH_POLY
;
888 for (bytes
= 0; bytes
< ETHERADDRL
; bytes
++) {
889 currentbyte
= mca
[bytes
];
890 for (bit
= 0; bit
< 8; bit
++) {
893 if (msb
^ (currentbyte
& 1))
899 /* the index value is between 0 and 63(0x3f) */
905 * rge_m_multicst_add() -- enable/disable a multicast address
908 rge_m_multicst(void *arg
, boolean_t add
, const uint8_t *mca
)
910 rge_t
*rgep
= arg
; /* private device info */
911 struct ether_addr
*addr
;
916 mutex_enter(rgep
->genlock
);
917 hashp
= rgep
->mcast_hash
;
918 addr
= (struct ether_addr
*)mca
;
920 * Calculate the Multicast address hash index value
921 * Normally, the position of MAR0-MAR7 is
922 * MAR0: offset 0x08, ..., MAR7: offset 0x0F.
924 * For pcie chipset, the position of MAR0-MAR7 is
925 * different from others:
926 * MAR0: offset 0x0F, ..., MAR7: offset 0x08.
928 index
= rge_hash_index(addr
->ether_addr_octet
);
929 if (rgep
->chipid
.is_pcie
)
930 reg
= (~(index
/ RGE_MCAST_NUM
)) & 0x7;
932 reg
= index
/ RGE_MCAST_NUM
;
935 if (rgep
->mcast_refs
[index
]++) {
936 mutex_exit(rgep
->genlock
);
939 hashp
[reg
] |= 1 << (index
% RGE_MCAST_NUM
);
941 if (--rgep
->mcast_refs
[index
]) {
942 mutex_exit(rgep
->genlock
);
945 hashp
[reg
] &= ~ (1 << (index
% RGE_MCAST_NUM
));
948 if (rgep
->suspended
) {
949 mutex_exit(rgep
->genlock
);
950 return (DDI_SUCCESS
);
954 * Set multicast register
956 rge_chip_sync(rgep
, RGE_SET_MUL
);
958 mutex_exit(rgep
->genlock
);
963 * rge_m_promisc() -- set or reset promiscuous mode on the board
965 * Program the hardware to enable/disable promiscuous and/or
966 * receive-all-multicast modes.
969 rge_m_promisc(void *arg
, boolean_t on
)
974 * Store MAC layer specified mode and pass to chip layer to update h/w
976 mutex_enter(rgep
->genlock
);
978 if (rgep
->promisc
== on
) {
979 mutex_exit(rgep
->genlock
);
984 if (rgep
->suspended
) {
985 mutex_exit(rgep
->genlock
);
986 return (DDI_SUCCESS
);
989 rge_chip_sync(rgep
, RGE_SET_PROMISC
);
990 RGE_DEBUG(("rge_m_promisc_set($%p) done", arg
));
991 mutex_exit(rgep
->genlock
);
996 * Loopback ioctl code
999 static lb_property_t loopmodes
[] = {
1000 { normal
, "normal", RGE_LOOP_NONE
},
1001 { internal
, "PHY", RGE_LOOP_INTERNAL_PHY
},
1002 { internal
, "MAC", RGE_LOOP_INTERNAL_MAC
}
1005 static enum ioc_reply
1006 rge_set_loop_mode(rge_t
*rgep
, uint32_t mode
)
1009 * If the mode isn't being changed, there's nothing to do ...
1011 if (mode
== rgep
->param_loop_mode
)
1015 * Validate the requested mode and prepare a suitable message
1016 * to explain the link down/up cycle that the change will
1017 * probably induce ...
1024 case RGE_LOOP_INTERNAL_PHY
:
1025 case RGE_LOOP_INTERNAL_MAC
:
1030 * All OK; tell the caller to reprogram
1031 * the PHY and/or MAC for the new mode ...
1033 rgep
->param_loop_mode
= mode
;
1034 return (IOC_RESTART_ACK
);
1037 static enum ioc_reply
1038 rge_loop_ioctl(rge_t
*rgep
, queue_t
*wq
, mblk_t
*mp
, struct iocblk
*iocp
)
1041 lb_property_t
*lbpp
;
1045 _NOTE(ARGUNUSED(wq
))
1048 * Validate format of ioctl
1050 if (mp
->b_cont
== NULL
)
1053 cmd
= iocp
->ioc_cmd
;
1057 rge_error(rgep
, "rge_loop_ioctl: invalid cmd 0x%x", cmd
);
1060 case LB_GET_INFO_SIZE
:
1061 if (iocp
->ioc_count
!= sizeof (lb_info_sz_t
))
1063 lbsp
= (lb_info_sz_t
*)mp
->b_cont
->b_rptr
;
1064 *lbsp
= sizeof (loopmodes
);
1068 if (iocp
->ioc_count
!= sizeof (loopmodes
))
1070 lbpp
= (lb_property_t
*)mp
->b_cont
->b_rptr
;
1071 bcopy(loopmodes
, lbpp
, sizeof (loopmodes
));
1075 if (iocp
->ioc_count
!= sizeof (uint32_t))
1077 lbmp
= (uint32_t *)mp
->b_cont
->b_rptr
;
1078 *lbmp
= rgep
->param_loop_mode
;
1082 if (iocp
->ioc_count
!= sizeof (uint32_t))
1084 lbmp
= (uint32_t *)mp
->b_cont
->b_rptr
;
1085 return (rge_set_loop_mode(rgep
, *lbmp
));
1090 * Specific rge IOCTLs, the MAC layer handles the generic ones.
1093 rge_m_ioctl(void *arg
, queue_t
*wq
, mblk_t
*mp
)
1096 struct iocblk
*iocp
;
1097 enum ioc_reply status
;
1098 boolean_t need_privilege
;
1103 * If suspended, we might actually be able to do some of
1104 * these ioctls, but it is harder to make sure they occur
1105 * without actually putting the hardware in an undesireable
1106 * state. So just NAK it.
1108 mutex_enter(rgep
->genlock
);
1109 if (rgep
->suspended
) {
1110 miocnak(wq
, mp
, 0, EINVAL
);
1111 mutex_exit(rgep
->genlock
);
1114 mutex_exit(rgep
->genlock
);
1117 * Validate the command before bothering with the mutex ...
1119 iocp
= (struct iocblk
*)mp
->b_rptr
;
1120 iocp
->ioc_error
= 0;
1121 need_privilege
= B_TRUE
;
1122 cmd
= iocp
->ioc_cmd
;
1125 miocnak(wq
, mp
, 0, EINVAL
);
1134 case RGE_SOFT_RESET
:
1135 case RGE_HARD_RESET
:
1138 case LB_GET_INFO_SIZE
:
1141 need_privilege
= B_FALSE
;
1147 need_privilege
= B_FALSE
;
1153 if (need_privilege
) {
1155 * Check for specific net_config privilege
1157 err
= secpolicy_net_config(iocp
->ioc_cr
, B_FALSE
);
1159 miocnak(wq
, mp
, 0, err
);
1164 mutex_enter(rgep
->genlock
);
1178 case RGE_SOFT_RESET
:
1179 case RGE_HARD_RESET
:
1180 status
= rge_chip_ioctl(rgep
, wq
, mp
, iocp
);
1183 case LB_GET_INFO_SIZE
:
1187 status
= rge_loop_ioctl(rgep
, wq
, mp
, iocp
);
1192 status
= rge_nd_ioctl(rgep
, wq
, mp
, iocp
);
1197 * Do we need to reprogram the PHY and/or the MAC?
1198 * Do it now, while we still have the mutex.
1200 * Note: update the PHY first, 'cos it controls the
1201 * speed/duplex parameters that the MAC code uses.
1204 case IOC_RESTART_REPLY
:
1205 case IOC_RESTART_ACK
:
1206 rge_phy_update(rgep
);
1210 mutex_exit(rgep
->genlock
);
1213 * Finally, decide how to reply
1219 * Error, reply with a NAK and EINVAL or the specified error
1221 miocnak(wq
, mp
, 0, iocp
->ioc_error
== 0 ?
1222 EINVAL
: iocp
->ioc_error
);
1227 * OK, reply already sent
1231 case IOC_RESTART_ACK
:
1234 * OK, reply with an ACK
1236 miocack(wq
, mp
, 0, 0);
1239 case IOC_RESTART_REPLY
:
1242 * OK, send prepared reply as ACK or NAK
1244 mp
->b_datap
->db_type
= iocp
->ioc_error
== 0 ?
1245 M_IOCACK
: M_IOCNAK
;
1253 rge_m_getcapab(void *arg
, mac_capab_t cap
, void *cap_data
)
1258 case MAC_CAPAB_HCKSUM
: {
1259 uint32_t *hcksum_txflags
= cap_data
;
1260 switch (rgep
->chipid
.mac_ver
) {
1262 case MAC_VER_8169S_D
:
1263 case MAC_VER_8169S_E
:
1264 case MAC_VER_8169SB
:
1265 case MAC_VER_8169SC
:
1267 case MAC_VER_8168B_B
:
1268 case MAC_VER_8168B_C
:
1270 *hcksum_txflags
= HCKSUM_INET_FULL_V4
|
1274 case MAC_VER_8101E_B
:
1275 case MAC_VER_8101E_C
:
1277 *hcksum_txflags
= 0;
1289 * ============ Init MSI/Fixed Interrupt routines ==============
1295 * Register FIXED or MSI interrupts.
1298 rge_add_intrs(rge_t
*rgep
, int intr_type
)
1300 dev_info_t
*dip
= rgep
->devinfo
;
1308 /* Get number of interrupts */
1309 ret
= ddi_intr_get_nintrs(dip
, intr_type
, &count
);
1310 if ((ret
!= DDI_SUCCESS
) || (count
== 0)) {
1311 rge_error(rgep
, "ddi_intr_get_nintrs() failure, ret: %d, "
1312 "count: %d", ret
, count
);
1313 return (DDI_FAILURE
);
1316 /* Get number of available interrupts */
1317 ret
= ddi_intr_get_navail(dip
, intr_type
, &avail
);
1318 if ((ret
!= DDI_SUCCESS
) || (avail
== 0)) {
1319 rge_error(rgep
, "ddi_intr_get_navail() failure, "
1320 "ret: %d, avail: %d\n", ret
, avail
);
1321 return (DDI_FAILURE
);
1324 /* Allocate an array of interrupt handles */
1325 intr_size
= count
* sizeof (ddi_intr_handle_t
);
1326 rgep
->htable
= kmem_alloc(intr_size
, KM_SLEEP
);
1327 rgep
->intr_rqst
= count
;
1329 /* Call ddi_intr_alloc() */
1330 ret
= ddi_intr_alloc(dip
, rgep
->htable
, intr_type
, 0,
1331 count
, &actual
, DDI_INTR_ALLOC_NORMAL
);
1332 if (ret
!= DDI_SUCCESS
|| actual
== 0) {
1333 rge_error(rgep
, "ddi_intr_alloc() failed %d\n", ret
);
1334 kmem_free(rgep
->htable
, intr_size
);
1335 return (DDI_FAILURE
);
1337 if (actual
< count
) {
1338 rge_log(rgep
, "ddi_intr_alloc() Requested: %d, Received: %d\n",
1341 rgep
->intr_cnt
= actual
;
1344 * Get priority for first msi, assume remaining are all the same
1346 if ((ret
= ddi_intr_get_pri(rgep
->htable
[0], &rgep
->intr_pri
)) !=
1348 rge_error(rgep
, "ddi_intr_get_pri() failed %d\n", ret
);
1349 /* Free already allocated intr */
1350 for (i
= 0; i
< actual
; i
++) {
1351 (void) ddi_intr_free(rgep
->htable
[i
]);
1353 kmem_free(rgep
->htable
, intr_size
);
1354 return (DDI_FAILURE
);
1357 /* Test for high level mutex */
1358 if (rgep
->intr_pri
>= ddi_intr_get_hilevel_pri()) {
1359 rge_error(rgep
, "rge_add_intrs:"
1360 "Hi level interrupt not supported");
1361 for (i
= 0; i
< actual
; i
++)
1362 (void) ddi_intr_free(rgep
->htable
[i
]);
1363 kmem_free(rgep
->htable
, intr_size
);
1364 return (DDI_FAILURE
);
1367 /* Call ddi_intr_add_handler() */
1368 for (i
= 0; i
< actual
; i
++) {
1369 if ((ret
= ddi_intr_add_handler(rgep
->htable
[i
], rge_intr
,
1370 (caddr_t
)rgep
, (caddr_t
)(uintptr_t)i
)) != DDI_SUCCESS
) {
1371 rge_error(rgep
, "ddi_intr_add_handler() "
1372 "failed %d\n", ret
);
1373 /* Remove already added intr */
1374 for (j
= 0; j
< i
; j
++)
1375 (void) ddi_intr_remove_handler(rgep
->htable
[j
]);
1376 /* Free already allocated intr */
1377 for (i
= 0; i
< actual
; i
++) {
1378 (void) ddi_intr_free(rgep
->htable
[i
]);
1380 kmem_free(rgep
->htable
, intr_size
);
1381 return (DDI_FAILURE
);
1385 if ((ret
= ddi_intr_get_cap(rgep
->htable
[0], &rgep
->intr_cap
))
1387 rge_error(rgep
, "ddi_intr_get_cap() failed %d\n", ret
);
1388 for (i
= 0; i
< actual
; i
++) {
1389 (void) ddi_intr_remove_handler(rgep
->htable
[i
]);
1390 (void) ddi_intr_free(rgep
->htable
[i
]);
1392 kmem_free(rgep
->htable
, intr_size
);
1393 return (DDI_FAILURE
);
1396 return (DDI_SUCCESS
);
1402 * Unregister FIXED or MSI interrupts
1405 rge_rem_intrs(rge_t
*rgep
)
1409 /* Disable all interrupts */
1410 if (rgep
->intr_cap
& DDI_INTR_FLAG_BLOCK
) {
1411 /* Call ddi_intr_block_disable() */
1412 (void) ddi_intr_block_disable(rgep
->htable
, rgep
->intr_cnt
);
1414 for (i
= 0; i
< rgep
->intr_cnt
; i
++) {
1415 (void) ddi_intr_disable(rgep
->htable
[i
]);
1419 /* Call ddi_intr_remove_handler() */
1420 for (i
= 0; i
< rgep
->intr_cnt
; i
++) {
1421 (void) ddi_intr_remove_handler(rgep
->htable
[i
]);
1422 (void) ddi_intr_free(rgep
->htable
[i
]);
1425 kmem_free(rgep
->htable
, rgep
->intr_rqst
* sizeof (ddi_intr_handle_t
));
1429 * ========== Per-instance setup/teardown code ==========
1433 #define RGE_DBG RGE_DBG_INIT /* debug flag for this code */
1436 rge_unattach(rge_t
*rgep
)
1439 * Flag that no more activity may be initiated
1441 rgep
->progress
&= ~PROGRESS_READY
;
1442 rgep
->rge_mac_state
= RGE_MAC_UNATTACH
;
1445 * Quiesce the PHY and MAC (leave it reset but still powered).
1446 * Clean up and free all RGE data structures
1448 if (rgep
->periodic_id
!= NULL
) {
1449 ddi_periodic_delete(rgep
->periodic_id
);
1450 rgep
->periodic_id
= NULL
;
1453 if (rgep
->progress
& PROGRESS_KSTATS
)
1454 rge_fini_kstats(rgep
);
1456 if (rgep
->progress
& PROGRESS_PHY
)
1457 (void) rge_phy_reset(rgep
);
1459 if (rgep
->progress
& PROGRESS_INIT
) {
1460 mutex_enter(rgep
->genlock
);
1461 (void) rge_chip_reset(rgep
);
1462 mutex_exit(rgep
->genlock
);
1463 rge_fini_rings(rgep
);
1466 if (rgep
->progress
& PROGRESS_INTR
) {
1467 rge_rem_intrs(rgep
);
1468 mutex_destroy(rgep
->rc_lock
);
1469 mutex_destroy(rgep
->rx_lock
);
1470 mutex_destroy(rgep
->tc_lock
);
1471 mutex_destroy(rgep
->tx_lock
);
1472 rw_destroy(rgep
->errlock
);
1473 mutex_destroy(rgep
->genlock
);
1476 if (rgep
->progress
& PROGRESS_FACTOTUM
)
1477 (void) ddi_intr_remove_softint(rgep
->factotum_hdl
);
1479 if (rgep
->progress
& PROGRESS_RESCHED
)
1480 (void) ddi_intr_remove_softint(rgep
->resched_hdl
);
1482 if (rgep
->progress
& PROGRESS_NDD
)
1483 rge_nd_cleanup(rgep
);
1485 rge_free_bufs(rgep
);
1487 if (rgep
->progress
& PROGRESS_REGS
)
1488 ddi_regs_map_free(&rgep
->io_handle
);
1490 if (rgep
->progress
& PROGRESS_CFG
)
1491 pci_config_teardown(&rgep
->cfg_handle
);
1493 ddi_remove_minor_node(rgep
->devinfo
, NULL
);
1494 kmem_free(rgep
, sizeof (*rgep
));
1498 rge_resume(dev_info_t
*devinfo
)
1500 rge_t
*rgep
; /* Our private data */
1504 rgep
= ddi_get_driver_private(devinfo
);
1507 * If there are state inconsistancies, this is bad. Returning
1508 * DDI_FAILURE here will eventually cause the machine to panic,
1509 * so it is best done here so that there is a possibility of
1510 * debugging the problem.
1514 "rge: ngep returned from ddi_get_driver_private was NULL");
1517 * Refuse to resume if the data structures aren't consistent
1519 if (rgep
->devinfo
!= devinfo
)
1521 "rge: passed devinfo not the same as saved devinfo");
1524 * Read chip ID & set up config space command register(s)
1525 * Refuse to resume if the chip has changed its identity!
1527 cidp
= &rgep
->chipid
;
1528 rge_chip_cfg_init(rgep
, &chipid
);
1529 if (chipid
.vendor
!= cidp
->vendor
)
1530 return (DDI_FAILURE
);
1531 if (chipid
.device
!= cidp
->device
)
1532 return (DDI_FAILURE
);
1533 if (chipid
.revision
!= cidp
->revision
)
1534 return (DDI_FAILURE
);
1536 mutex_enter(rgep
->genlock
);
1539 * Only in one case, this conditional branch can be executed: the port
1540 * hasn't been plumbed.
1542 if (rgep
->suspended
== B_FALSE
) {
1543 mutex_exit(rgep
->genlock
);
1544 return (DDI_SUCCESS
);
1546 rgep
->rge_mac_state
= RGE_MAC_STARTED
;
1548 * All OK, reinitialise h/w & kick off NEMO scheduling
1551 rgep
->suspended
= B_FALSE
;
1553 mutex_exit(rgep
->genlock
);
1555 return (DDI_SUCCESS
);
1560 * attach(9E) -- Attach a device to the system
1562 * Called once for each board successfully probed.
1565 rge_attach(dev_info_t
*devinfo
, ddi_attach_cmd_t cmd
)
1567 rge_t
*rgep
; /* Our private data */
1568 mac_register_t
*macp
;
1577 * we don't support high level interrupts in the driver
1579 if (ddi_intr_hilevel(devinfo
, 0) != 0) {
1581 "rge_attach -- unsupported high level interrupt");
1582 return (DDI_FAILURE
);
1585 instance
= ddi_get_instance(devinfo
);
1586 RGE_GTRACE(("rge_attach($%p, %d) instance %d",
1587 (void *)devinfo
, cmd
, instance
));
1588 RGE_BRKPT(NULL
, "rge_attach");
1592 return (DDI_FAILURE
);
1595 return (rge_resume(devinfo
));
1601 rgep
= kmem_zalloc(sizeof (*rgep
), KM_SLEEP
);
1602 ddi_set_driver_private(devinfo
, rgep
);
1603 rgep
->devinfo
= devinfo
;
1606 * Initialize more fields in RGE private data
1608 rgep
->rge_mac_state
= RGE_MAC_ATTACH
;
1609 rgep
->debug
= ddi_prop_get_int(DDI_DEV_T_ANY
, devinfo
,
1610 DDI_PROP_DONTPASS
, debug_propname
, rge_debug
);
1611 rgep
->default_mtu
= ddi_prop_get_int(DDI_DEV_T_ANY
, devinfo
,
1612 DDI_PROP_DONTPASS
, mtu_propname
, ETHERMTU
);
1613 rgep
->msi_enable
= ddi_prop_get_int(DDI_DEV_T_ANY
, devinfo
,
1614 DDI_PROP_DONTPASS
, msi_propname
, B_TRUE
);
1615 (void) snprintf(rgep
->ifname
, sizeof (rgep
->ifname
), "%s%d",
1616 RGE_DRIVER_NAME
, instance
);
1619 * Map config space registers
1620 * Read chip ID & set up config space command register(s)
1622 * Note: this leaves the chip accessible by Memory Space
1623 * accesses, but with interrupts and Bus Mastering off.
1624 * This should ensure that nothing untoward will happen
1625 * if it has been left active by the (net-)bootloader.
1626 * We'll re-enable Bus Mastering once we've reset the chip,
1627 * and allow interrupts only when everything else is set up.
1629 err
= pci_config_setup(devinfo
, &rgep
->cfg_handle
);
1630 if (err
!= DDI_SUCCESS
) {
1631 rge_problem(rgep
, "pci_config_setup() failed");
1634 rgep
->progress
|= PROGRESS_CFG
;
1635 cidp
= &rgep
->chipid
;
1636 bzero(cidp
, sizeof (*cidp
));
1637 rge_chip_cfg_init(rgep
, cidp
);
1640 * Map operating registers
1642 err
= ddi_regs_map_setup(devinfo
, 2, ®s
,
1643 0, 0, &rge_reg_accattr
, &rgep
->io_handle
);
1646 * MMIO map will fail if the assigned address is bigger than 4G
1647 * then choose I/O map
1649 if (err
!= DDI_SUCCESS
) {
1650 err
= ddi_regs_map_setup(devinfo
, 1, ®s
,
1651 0, 0, &rge_reg_accattr
, &rgep
->io_handle
);
1653 if (err
!= DDI_SUCCESS
) {
1654 rge_problem(rgep
, "ddi_regs_map_setup() failed");
1657 rgep
->io_regs
= regs
;
1658 rgep
->progress
|= PROGRESS_REGS
;
1661 * Characterise the device, so we know its requirements.
1662 * Then allocate the appropriate TX and RX descriptors & buffers.
1664 rge_chip_ident(rgep
);
1665 err
= rge_alloc_bufs(rgep
);
1666 if (err
!= DDI_SUCCESS
) {
1667 rge_problem(rgep
, "DMA buffer allocation failed");
1672 * Register NDD-tweakable parameters
1674 if (rge_nd_init(rgep
)) {
1675 rge_problem(rgep
, "rge_nd_init() failed");
1678 rgep
->progress
|= PROGRESS_NDD
;
1681 * Add the softint handlers:
1683 * Both of these handlers are used to avoid restrictions on the
1684 * context and/or mutexes required for some operations. In
1685 * particular, the hardware interrupt handler and its subfunctions
1686 * can detect a number of conditions that we don't want to handle
1687 * in that context or with that set of mutexes held. So, these
1688 * softints are triggered instead:
1690 * the <resched> softint is triggered if if we have previously
1691 * had to refuse to send a packet because of resource shortage
1692 * (we've run out of transmit buffers), but the send completion
1693 * interrupt handler has now detected that more buffers have
1696 * the <factotum> is triggered if the h/w interrupt handler
1697 * sees the <link state changed> or <error> bits in the status
1698 * block. It's also triggered periodically to poll the link
1699 * state, just in case we aren't getting link status change
1702 err
= ddi_intr_add_softint(devinfo
, &rgep
->resched_hdl
,
1703 DDI_INTR_SOFTPRI_MIN
, rge_reschedule
, (caddr_t
)rgep
);
1704 if (err
!= DDI_SUCCESS
) {
1705 rge_problem(rgep
, "ddi_intr_add_softint() failed");
1708 rgep
->progress
|= PROGRESS_RESCHED
;
1709 err
= ddi_intr_add_softint(devinfo
, &rgep
->factotum_hdl
,
1710 DDI_INTR_SOFTPRI_MIN
, rge_chip_factotum
, (caddr_t
)rgep
);
1711 if (err
!= DDI_SUCCESS
) {
1712 rge_problem(rgep
, "ddi_intr_add_softint() failed");
1715 rgep
->progress
|= PROGRESS_FACTOTUM
;
1718 * Get supported interrupt types
1720 if (ddi_intr_get_supported_types(devinfo
, &intr_types
)
1722 rge_error(rgep
, "ddi_intr_get_supported_types failed\n");
1727 * Add the h/w interrupt handler and initialise mutexes
1728 * RTL8101E is observed to have MSI invalidation issue after S/R.
1729 * So the FIXED interrupt is used instead.
1731 if (rgep
->chipid
.mac_ver
== MAC_VER_8101E
)
1732 rgep
->msi_enable
= B_FALSE
;
1733 if ((intr_types
& DDI_INTR_TYPE_MSI
) && rgep
->msi_enable
) {
1734 if (rge_add_intrs(rgep
, DDI_INTR_TYPE_MSI
) != DDI_SUCCESS
) {
1735 rge_error(rgep
, "MSI registration failed, "
1736 "trying FIXED interrupt type\n");
1738 rge_log(rgep
, "Using MSI interrupt type\n");
1739 rgep
->intr_type
= DDI_INTR_TYPE_MSI
;
1740 rgep
->progress
|= PROGRESS_INTR
;
1743 if (!(rgep
->progress
& PROGRESS_INTR
) &&
1744 (intr_types
& DDI_INTR_TYPE_FIXED
)) {
1745 if (rge_add_intrs(rgep
, DDI_INTR_TYPE_FIXED
) != DDI_SUCCESS
) {
1746 rge_error(rgep
, "FIXED interrupt "
1747 "registration failed\n");
1750 rge_log(rgep
, "Using FIXED interrupt type\n");
1751 rgep
->intr_type
= DDI_INTR_TYPE_FIXED
;
1752 rgep
->progress
|= PROGRESS_INTR
;
1754 if (!(rgep
->progress
& PROGRESS_INTR
)) {
1755 rge_error(rgep
, "No interrupts registered\n");
1758 mutex_init(rgep
->genlock
, NULL
, MUTEX_DRIVER
,
1759 DDI_INTR_PRI(rgep
->intr_pri
));
1760 rw_init(rgep
->errlock
, NULL
, RW_DRIVER
,
1761 DDI_INTR_PRI(rgep
->intr_pri
));
1762 mutex_init(rgep
->tx_lock
, NULL
, MUTEX_DRIVER
,
1763 DDI_INTR_PRI(rgep
->intr_pri
));
1764 mutex_init(rgep
->tc_lock
, NULL
, MUTEX_DRIVER
,
1765 DDI_INTR_PRI(rgep
->intr_pri
));
1766 mutex_init(rgep
->rx_lock
, NULL
, MUTEX_DRIVER
,
1767 DDI_INTR_PRI(rgep
->intr_pri
));
1768 mutex_init(rgep
->rc_lock
, NULL
, MUTEX_DRIVER
,
1769 DDI_INTR_PRI(rgep
->intr_pri
));
1774 err
= rge_init_rings(rgep
);
1775 if (err
!= DDI_SUCCESS
) {
1776 rge_problem(rgep
, "rge_init_rings() failed");
1779 rgep
->progress
|= PROGRESS_INIT
;
1782 * Now that mutex locks are initialized, enable interrupts.
1784 if (rgep
->intr_cap
& DDI_INTR_FLAG_BLOCK
) {
1785 /* Call ddi_intr_block_enable() for MSI interrupts */
1786 (void) ddi_intr_block_enable(rgep
->htable
, rgep
->intr_cnt
);
1788 /* Call ddi_intr_enable for MSI or FIXED interrupts */
1789 for (i
= 0; i
< rgep
->intr_cnt
; i
++) {
1790 (void) ddi_intr_enable(rgep
->htable
[i
]);
1795 * Initialise link state variables
1796 * Stop, reset & reinitialise the chip.
1797 * Initialise the (internal) PHY.
1799 rgep
->param_link_up
= LINK_STATE_UNKNOWN
;
1802 * Reset chip & rings to initial state; also reset address
1803 * filtering, promiscuity, loopback mode.
1805 mutex_enter(rgep
->genlock
);
1806 (void) rge_chip_reset(rgep
);
1807 rge_chip_sync(rgep
, RGE_GET_MAC
);
1808 bzero(rgep
->mcast_hash
, sizeof (rgep
->mcast_hash
));
1809 bzero(rgep
->mcast_refs
, sizeof (rgep
->mcast_refs
));
1810 rgep
->promisc
= B_FALSE
;
1811 rgep
->param_loop_mode
= RGE_LOOP_NONE
;
1812 mutex_exit(rgep
->genlock
);
1814 rgep
->progress
|= PROGRESS_PHY
;
1817 * Create & initialise named kstats
1819 rge_init_kstats(rgep
, instance
);
1820 rgep
->progress
|= PROGRESS_KSTATS
;
1822 if ((macp
= mac_alloc(MAC_VERSION
)) == NULL
)
1824 macp
->m_type_ident
= MAC_PLUGIN_IDENT_ETHER
;
1825 macp
->m_driver
= rgep
;
1826 macp
->m_dip
= devinfo
;
1827 macp
->m_src_addr
= rgep
->netaddr
;
1828 macp
->m_callbacks
= &rge_m_callbacks
;
1829 macp
->m_min_sdu
= 0;
1830 macp
->m_max_sdu
= rgep
->default_mtu
;
1831 macp
->m_margin
= VLAN_TAGSZ
;
1834 * Finally, we're ready to register ourselves with the MAC layer
1835 * interface; if this succeeds, we're all ready to start()
1837 err
= mac_register(macp
, &rgep
->mh
);
1843 * Register a periodical handler.
1844 * reg_chip_cyclic() is invoked in kernel context.
1846 rgep
->periodic_id
= ddi_periodic_add(rge_chip_cyclic
, rgep
,
1847 RGE_CYCLIC_PERIOD
, DDI_IPL_0
);
1849 rgep
->progress
|= PROGRESS_READY
;
1850 return (DDI_SUCCESS
);
1854 return (DDI_FAILURE
);
1858 * rge_suspend() -- suspend transmit/receive for powerdown
1861 rge_suspend(rge_t
*rgep
)
1864 * Stop processing and idle (powerdown) the PHY ...
1866 mutex_enter(rgep
->genlock
);
1867 rw_enter(rgep
->errlock
, RW_WRITER
);
1869 if (rgep
->rge_mac_state
!= RGE_MAC_STARTED
) {
1870 rw_exit(rgep
->errlock
);
1871 mutex_exit(rgep
->genlock
);
1872 return (DDI_SUCCESS
);
1875 rgep
->suspended
= B_TRUE
;
1877 rgep
->rge_mac_state
= RGE_MAC_STOPPED
;
1879 rw_exit(rgep
->errlock
);
1880 mutex_exit(rgep
->genlock
);
1882 return (DDI_SUCCESS
);
1886 * quiesce(9E) entry point.
1888 * This function is called when the system is single-threaded at high
1889 * PIL with preemption disabled. Therefore, this function must not be
1892 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1893 * DDI_FAILURE indicates an error condition and should almost never happen.
1896 rge_quiesce(dev_info_t
*devinfo
)
1898 rge_t
*rgep
= ddi_get_driver_private(devinfo
);
1901 return (DDI_FAILURE
);
1904 * Turn off debugging
1910 rge_chip_stop(rgep
, B_FALSE
);
1912 return (DDI_SUCCESS
);
1916 * detach(9E) -- Detach a device from the system
1919 rge_detach(dev_info_t
*devinfo
, ddi_detach_cmd_t cmd
)
1923 RGE_GTRACE(("rge_detach($%p, %d)", (void *)devinfo
, cmd
));
1925 rgep
= ddi_get_driver_private(devinfo
);
1929 return (DDI_FAILURE
);
1932 return (rge_suspend(rgep
));
1939 * If there is any posted buffer, the driver should reject to be
1940 * detached. Need notice upper layer to release them.
1942 if (!(rgep
->chip_flags
& CHIP_FLAG_FORCE_BCOPY
) &&
1943 rgep
->rx_free
!= RGE_BUF_SLOTS
)
1944 return (DDI_FAILURE
);
1947 * Unregister from the MAC layer subsystem. This can fail, in
1948 * particular if there are DLPI style-2 streams still open -
1949 * in which case we just return failure without shutting
1950 * down chip operations.
1952 if (mac_unregister(rgep
->mh
) != 0)
1953 return (DDI_FAILURE
);
1956 * All activity stopped, so we can clean up & exit
1959 return (DDI_SUCCESS
);
1964 * ========== Module Loading Data & Entry Points ==========
1968 #define RGE_DBG RGE_DBG_INIT /* debug flag for this code */
1969 DDI_DEFINE_STREAM_OPS(rge_dev_ops
, nulldev
, nulldev
, rge_attach
, rge_detach
,
1970 nodev
, NULL
, D_MP
, NULL
, rge_quiesce
);
1972 static struct modldrv rge_modldrv
= {
1973 &mod_driverops
, /* Type of module. This one is a driver */
1974 rge_ident
, /* short description */
1975 &rge_dev_ops
/* driver specific ops */
1978 static struct modlinkage modlinkage
= {
1979 MODREV_1
, (void *)&rge_modldrv
, NULL
1984 _info(struct modinfo
*modinfop
)
1986 return (mod_info(&modlinkage
, modinfop
));
1994 mac_init_ops(&rge_dev_ops
, "rge");
1995 status
= mod_install(&modlinkage
);
1996 if (status
== DDI_SUCCESS
)
1997 mutex_init(rge_log_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
1999 mac_fini_ops(&rge_dev_ops
);
2009 status
= mod_remove(&modlinkage
);
2010 if (status
== DDI_SUCCESS
) {
2011 mac_fini_ops(&rge_dev_ops
);
2012 mutex_destroy(rge_log_mutex
);