4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 #include <sys/types.h>
28 #include <sys/sunddi.h>
29 #include <sys/policy.h>
31 #include "dmfe_impl.h"
34 * This is the string displayed by modinfo, etc.
36 static char dmfe_ident
[] = "Davicom DM9102 Ethernet";
44 * DMFE_PCI_RNUMBER is the register-set number to use for the operating
45 * registers. On an OBP-based machine, regset 0 refers to CONFIG space,
46 * regset 1 will be the operating registers in I/O space, and regset 2
47 * will be the operating registers in MEMORY space (preferred). If an
48 * expansion ROM is fitted, it may appear as a further register set.
50 * DMFE_SLOP defines the amount by which the chip may read beyond
51 * the end of a buffer or descriptor, apparently 6-8 dwords :(
52 * We have to make sure this doesn't cause it to access unallocated
55 * DMFE_BUF_SIZE must be at least (ETHERMAX + ETHERFCSL + DMFE_SLOP)
56 * rounded up to a multiple of 4. Here we choose a power of two for
57 * speed & simplicity at the cost of a bit more memory.
59 * However, the buffer length field in the TX/RX descriptors is only
60 * eleven bits, so even though we allocate DMFE_BUF_SIZE (2048) bytes
61 * per buffer, we tell the chip that they're only DMFE_BUF_SIZE_1
64 * DMFE_DMA_MODE defines the mode (STREAMING/CONSISTENT) used for
65 * the data buffers. The descriptors are always set up in CONSISTENT
68 * DMFE_HEADROOM defines how much space we'll leave in allocated
69 * mblks before the first valid data byte. This should be chosen
70 * to be 2 modulo 4, so that once the ethernet header (14 bytes)
71 * has been stripped off, the packet data will be 4-byte aligned.
72 * The remaining space can be used by upstream modules to prepend
73 * any headers required.
77 * dmfe_bus_modes: the bus mode bits to be put into CSR0.
78 * Setting READ_MULTIPLE in this register seems to cause
79 * the chip to generate a READ LINE command with a parity
82 * dmfe_setup_desc1: the value to be put into descriptor word 1
83 * when sending a SETUP packet.
85 * Setting TX_LAST_DESC in desc1 in a setup packet seems
86 * to make the chip spontaneously reset internally - it
87 * attempts to give back the setup packet descriptor by
88 * writing to PCI address 00000000 - which may or may not
89 * get a MASTER ABORT - after which most of its registers
90 * seem to have either default values or garbage!
92 * TX_FIRST_DESC doesn't seem to have the same effect but
93 * it isn't needed on a setup packet so we'll leave it out
94 * too, just in case it has some other wierd side-effect.
96 * The default hardware packet filtering mode is now
97 * HASH_AND_PERFECT (imperfect filtering of multicast
98 * packets and perfect filtering of unicast packets).
99 * If this is found not to work reliably, setting the
100 * TX_FILTER_TYPE1 bit will cause a switchover to using
101 * HASH_ONLY mode (imperfect filtering of *all* packets).
102 * Software will then perform the additional filtering
106 #define DMFE_PCI_RNUMBER 2
107 #define DMFE_SLOP (8*sizeof (uint32_t))
108 #define DMFE_BUF_SIZE 2048
109 #define DMFE_BUF_SIZE_1 2000
110 #define DMFE_DMA_MODE DDI_DMA_STREAMING
111 #define DMFE_HEADROOM 34
113 static uint32_t dmfe_bus_modes
= TX_POLL_INTVL
| CACHE_ALIGN
;
114 static uint32_t dmfe_setup_desc1
= TX_SETUP_PACKET
| SETUPBUF_SIZE
|
118 * Some tunable parameters ...
119 * Number of RX/TX ring entries (128/128)
120 * Minimum number of TX ring slots to keep free (1)
121 * Low-water mark at which to try to reclaim TX ring slots (1)
122 * How often to take a TX-done interrupt (twice per ring cycle)
123 * Whether to reclaim TX ring entries on a TX-done interrupt (no)
126 #define DMFE_TX_DESC 128 /* Should be a multiple of 4 <= 256 */
127 #define DMFE_RX_DESC 128 /* Should be a multiple of 4 <= 256 */
129 static uint32_t dmfe_rx_desc
= DMFE_RX_DESC
;
130 static uint32_t dmfe_tx_desc
= DMFE_TX_DESC
;
131 static uint32_t dmfe_tx_min_free
= 1;
132 static uint32_t dmfe_tx_reclaim_level
= 1;
133 static uint32_t dmfe_tx_int_factor
= (DMFE_TX_DESC
/ 2) - 1;
134 static boolean_t dmfe_reclaim_on_done
= B_FALSE
;
137 * Time-related parameters:
139 * We use a cyclic to provide a periodic callback; this is then used
140 * to check for TX-stall and poll the link status register.
142 * DMFE_TICK is the interval between cyclic callbacks, in microseconds.
144 * TX_STALL_TIME_100 is the timeout in microseconds between passing
145 * a packet to the chip for transmission and seeing that it's gone,
146 * when running at 100Mb/s. If we haven't reclaimed at least one
147 * descriptor in this time we assume the transmitter has stalled
148 * and reset the chip.
150 * TX_STALL_TIME_10 is the equivalent timeout when running at 10Mb/s.
154 * dmfe_tick_us: DMFE_TICK
155 * dmfe_tx100_stall_us: TX_STALL_TIME_100
156 * dmfe_tx10_stall_us: TX_STALL_TIME_10
158 * These are then used in _init() to calculate:
160 * stall_100_tix[]: number of consecutive cyclic callbacks without a
161 * reclaim before the TX process is considered stalled,
162 * when running at 100Mb/s. The elements are indexed
163 * by transmit-engine-state.
164 * stall_10_tix[]: number of consecutive cyclic callbacks without a
165 * reclaim before the TX process is considered stalled,
166 * when running at 10Mb/s. The elements are indexed
167 * by transmit-engine-state.
170 #define DMFE_TICK 25000 /* microseconds */
171 #define TX_STALL_TIME_100 50000 /* microseconds */
172 #define TX_STALL_TIME_10 200000 /* microseconds */
174 static uint32_t dmfe_tick_us
= DMFE_TICK
;
175 static uint32_t dmfe_tx100_stall_us
= TX_STALL_TIME_100
;
176 static uint32_t dmfe_tx10_stall_us
= TX_STALL_TIME_10
;
179 * Calculated from above in _init()
182 static uint32_t stall_100_tix
[TX_PROCESS_MAX_STATE
+1];
183 static uint32_t stall_10_tix
[TX_PROCESS_MAX_STATE
+1];
188 static char localmac_propname
[] = "local-mac-address";
189 static char opmode_propname
[] = "opmode-reg-value";
191 static int dmfe_m_start(void *);
192 static void dmfe_m_stop(void *);
193 static int dmfe_m_promisc(void *, boolean_t
);
194 static int dmfe_m_multicst(void *, boolean_t
, const uint8_t *);
195 static int dmfe_m_unicst(void *, const uint8_t *);
196 static void dmfe_m_ioctl(void *, queue_t
*, mblk_t
*);
197 static mblk_t
*dmfe_m_tx(void *, mblk_t
*);
198 static int dmfe_m_stat(void *, uint_t
, uint64_t *);
199 static int dmfe_m_getprop(void *, const char *, mac_prop_id_t
,
201 static int dmfe_m_setprop(void *, const char *, mac_prop_id_t
,
202 uint_t
, const void *);
203 static void dmfe_m_propinfo(void *, const char *, mac_prop_id_t
,
204 mac_prop_info_handle_t
);
206 static mac_callbacks_t dmfe_m_callbacks
= {
207 MC_IOCTL
| MC_SETPROP
| MC_GETPROP
| MC_PROPINFO
,
227 * Describes the chip's DMA engine
229 static ddi_dma_attr_t dma_attr
= {
230 DMA_ATTR_V0
, /* dma_attr version */
231 0, /* dma_attr_addr_lo */
232 (uint32_t)0xFFFFFFFF, /* dma_attr_addr_hi */
233 0x0FFFFFF, /* dma_attr_count_max */
234 0x20, /* dma_attr_align */
235 0x7F, /* dma_attr_burstsizes */
236 1, /* dma_attr_minxfer */
237 (uint32_t)0xFFFFFFFF, /* dma_attr_maxxfer */
238 (uint32_t)0xFFFFFFFF, /* dma_attr_seg */
239 1, /* dma_attr_sgllen */
240 1, /* dma_attr_granular */
241 0 /* dma_attr_flags */
245 * DMA access attributes for registers and descriptors
247 static ddi_device_acc_attr_t dmfe_reg_accattr
= {
249 DDI_STRUCTURE_LE_ACC
,
254 * DMA access attributes for data: NOT to be byte swapped.
256 static ddi_device_acc_attr_t dmfe_data_accattr
= {
262 static uchar_t dmfe_broadcast_addr
[ETHERADDRL
] = {
263 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
268 * ========== Lowest-level chip register & ring access routines ==========
272 * I/O register get/put routines
275 dmfe_chip_get32(dmfe_t
*dmfep
, off_t offset
)
279 addr
= (void *)(dmfep
->io_reg
+ offset
);
280 return (ddi_get32(dmfep
->io_handle
, addr
));
284 dmfe_chip_put32(dmfe_t
*dmfep
, off_t offset
, uint32_t value
)
288 addr
= (void *)(dmfep
->io_reg
+ offset
);
289 ddi_put32(dmfep
->io_handle
, addr
, value
);
293 * TX/RX ring get/put routines
296 dmfe_ring_get32(dma_area_t
*dma_p
, uint_t index
, uint_t offset
)
300 addr
= (void *)dma_p
->mem_va
;
301 return (ddi_get32(dma_p
->acc_hdl
, addr
+ index
*DESC_SIZE
+ offset
));
305 dmfe_ring_put32(dma_area_t
*dma_p
, uint_t index
, uint_t offset
, uint32_t value
)
309 addr
= (void *)dma_p
->mem_va
;
310 ddi_put32(dma_p
->acc_hdl
, addr
+ index
*DESC_SIZE
+ offset
, value
);
314 * Setup buffer get/put routines
317 dmfe_setup_get32(dma_area_t
*dma_p
, uint_t index
)
321 addr
= (void *)dma_p
->setup_va
;
322 return (ddi_get32(dma_p
->acc_hdl
, addr
+ index
));
326 dmfe_setup_put32(dma_area_t
*dma_p
, uint_t index
, uint32_t value
)
330 addr
= (void *)dma_p
->setup_va
;
331 ddi_put32(dma_p
->acc_hdl
, addr
+ index
, value
);
336 * ========== Low-level chip & ring buffer manipulation ==========
340 * dmfe_set_opmode() -- function to set operating mode
343 dmfe_set_opmode(dmfe_t
*dmfep
)
345 ASSERT(mutex_owned(dmfep
->oplock
));
347 dmfe_chip_put32(dmfep
, OPN_MODE_REG
, dmfep
->opmode
);
352 * dmfe_stop_chip() -- stop all chip processing & optionally reset the h/w
355 dmfe_stop_chip(dmfe_t
*dmfep
, enum chip_state newstate
)
357 ASSERT(mutex_owned(dmfep
->oplock
));
361 * disable all interrupts
362 * stop TX/RX processes
363 * clear the status bits for TX/RX stopped
364 * If required, reset the chip
365 * Record the new state
367 dmfe_chip_put32(dmfep
, INT_MASK_REG
, 0);
368 dmfep
->opmode
&= ~(START_TRANSMIT
| START_RECEIVE
);
369 dmfe_set_opmode(dmfep
);
370 dmfe_chip_put32(dmfep
, STATUS_REG
, TX_STOPPED_INT
| RX_STOPPED_INT
);
374 ASSERT(!"can't get here");
382 dmfe_chip_put32(dmfep
, BUS_MODE_REG
, SW_RESET
);
384 dmfe_chip_put32(dmfep
, BUS_MODE_REG
, 0);
386 dmfe_chip_put32(dmfep
, BUS_MODE_REG
, dmfe_bus_modes
);
390 dmfep
->chip_state
= newstate
;
394 * Initialize transmit and receive descriptor rings, and
395 * set the chip to point to the first entry in each ring
398 dmfe_init_rings(dmfe_t
*dmfep
)
408 * You need all the locks in order to rewrite the descriptor rings
410 ASSERT(mutex_owned(dmfep
->oplock
));
411 ASSERT(mutex_owned(dmfep
->rxlock
));
412 ASSERT(mutex_owned(dmfep
->txlock
));
415 * Program the RX ring entries
417 descp
= &dmfep
->rx_desc
;
418 pstart
= descp
->mem_dvma
;
419 pnext
= pstart
+ sizeof (struct rx_desc_type
);
420 pbuff
= dmfep
->rx_buff
.mem_dvma
;
421 desc1
= RX_CHAINING
| DMFE_BUF_SIZE_1
;
423 for (i
= 0; i
< dmfep
->rx
.n_desc
; ++i
) {
424 dmfe_ring_put32(descp
, i
, RD_NEXT
, pnext
);
425 dmfe_ring_put32(descp
, i
, BUFFER1
, pbuff
);
426 dmfe_ring_put32(descp
, i
, DESC1
, desc1
);
427 dmfe_ring_put32(descp
, i
, DESC0
, RX_OWN
);
429 pnext
+= sizeof (struct rx_desc_type
);
430 pbuff
+= DMFE_BUF_SIZE
;
434 * Fix up last entry & sync
436 dmfe_ring_put32(descp
, --i
, RD_NEXT
, pstart
);
437 DMA_SYNC(descp
, DDI_DMA_SYNC_FORDEV
);
438 dmfep
->rx
.next_free
= 0;
441 * Set the base address of the RX descriptor list in CSR3
443 dmfe_chip_put32(dmfep
, RX_BASE_ADDR_REG
, descp
->mem_dvma
);
446 * Program the TX ring entries
448 descp
= &dmfep
->tx_desc
;
449 pstart
= descp
->mem_dvma
;
450 pnext
= pstart
+ sizeof (struct tx_desc_type
);
451 pbuff
= dmfep
->tx_buff
.mem_dvma
;
454 for (i
= 0; i
< dmfep
->tx
.n_desc
; ++i
) {
455 dmfe_ring_put32(descp
, i
, TD_NEXT
, pnext
);
456 dmfe_ring_put32(descp
, i
, BUFFER1
, pbuff
);
457 dmfe_ring_put32(descp
, i
, DESC1
, desc1
);
458 dmfe_ring_put32(descp
, i
, DESC0
, 0);
460 pnext
+= sizeof (struct tx_desc_type
);
461 pbuff
+= DMFE_BUF_SIZE
;
465 * Fix up last entry & sync
467 dmfe_ring_put32(descp
, --i
, TD_NEXT
, pstart
);
468 DMA_SYNC(descp
, DDI_DMA_SYNC_FORDEV
);
469 dmfep
->tx
.n_free
= dmfep
->tx
.n_desc
;
470 dmfep
->tx
.next_free
= dmfep
->tx
.next_busy
= 0;
473 * Set the base address of the TX descrptor list in CSR4
475 dmfe_chip_put32(dmfep
, TX_BASE_ADDR_REG
, descp
->mem_dvma
);
479 * dmfe_start_chip() -- start the chip transmitting and/or receiving
482 dmfe_start_chip(dmfe_t
*dmfep
, int mode
)
484 ASSERT(mutex_owned(dmfep
->oplock
));
486 dmfep
->opmode
|= mode
;
487 dmfe_set_opmode(dmfep
);
489 dmfe_chip_put32(dmfep
, W_J_TIMER_REG
, 0);
491 * Enable VLAN length mode (allows packets to be 4 bytes Longer).
493 dmfe_chip_put32(dmfep
, W_J_TIMER_REG
, VLAN_ENABLE
);
496 * Clear any pending process-stopped interrupts
498 dmfe_chip_put32(dmfep
, STATUS_REG
, TX_STOPPED_INT
| RX_STOPPED_INT
);
499 dmfep
->chip_state
= mode
& START_RECEIVE
? CHIP_TX_RX
:
500 mode
& START_TRANSMIT
? CHIP_TX_ONLY
: CHIP_STOPPED
;
504 * dmfe_enable_interrupts() -- enable our favourite set of interrupts.
508 * RX_PKTDONE_INT (packet received)
509 * TX_PKTDONE_INT (TX complete)
511 * TX_ALLDONE_INT (next TX buffer not ready)
513 * Abnormal interrupts:
525 * GP_TIMER_INT (not valid in -9 chips)
526 * LINK_STATUS_INT (not valid in -9 chips)
529 dmfe_enable_interrupts(dmfe_t
*dmfep
)
531 ASSERT(mutex_owned(dmfep
->oplock
));
534 * Put 'the standard set of interrupts' in the interrupt mask register
536 dmfep
->imask
= RX_PKTDONE_INT
| TX_PKTDONE_INT
|
537 RX_STOPPED_INT
| TX_STOPPED_INT
| RX_UNAVAIL_INT
| SYSTEM_ERR_INT
;
539 dmfe_chip_put32(dmfep
, INT_MASK_REG
,
540 NORMAL_SUMMARY_INT
| ABNORMAL_SUMMARY_INT
| dmfep
->imask
);
541 dmfep
->chip_state
= CHIP_RUNNING
;
545 * ========== RX side routines ==========
549 * Function to update receive statistics on various errors
552 dmfe_update_rx_stats(dmfe_t
*dmfep
, uint32_t desc0
)
554 ASSERT(mutex_owned(dmfep
->rxlock
));
557 * The error summary bit and the error bits that it summarises
558 * are only valid if this is the last fragment. Therefore, a
559 * fragment only contributes to the error statistics if both
560 * the last-fragment and error summary bits are set.
562 if (((RX_LAST_DESC
| RX_ERR_SUMMARY
) & ~desc0
) == 0) {
563 dmfep
->rx_stats_ierrors
+= 1;
566 * There are some other error bits in the descriptor for
567 * which there don't seem to be appropriate MAC statistics,
568 * notably RX_COLLISION and perhaps RX_DESC_ERR. The
569 * latter may not be possible if it is supposed to indicate
570 * that one buffer has been filled with a partial packet
571 * and the next buffer required for the rest of the packet
572 * was not available, as all our buffers are more than large
573 * enough for a whole packet without fragmenting.
576 if (desc0
& RX_OVERFLOW
) {
577 dmfep
->rx_stats_overflow
+= 1;
579 } else if (desc0
& RX_RUNT_FRAME
)
580 dmfep
->rx_stats_short
+= 1;
583 dmfep
->rx_stats_fcs
+= 1;
585 if (desc0
& RX_FRAME2LONG
)
586 dmfep
->rx_stats_toolong
+= 1;
590 * A receive watchdog timeout is counted as a MAC-level receive
591 * error. Strangely, it doesn't set the packet error summary bit,
592 * according to the chip data sheet :-?
594 if (desc0
& RX_RCV_WD_TO
)
595 dmfep
->rx_stats_macrcv_errors
+= 1;
597 if (desc0
& RX_DRIBBLING
)
598 dmfep
->rx_stats_align
+= 1;
600 if (desc0
& RX_MII_ERR
)
601 dmfep
->rx_stats_macrcv_errors
+= 1;
605 * Receive incoming packet(s) and pass them up ...
608 dmfe_getp(dmfe_t
*dmfep
)
621 mutex_enter(dmfep
->rxlock
);
624 * Update the missed frame statistic from the on-chip counter.
626 misses
= dmfe_chip_get32(dmfep
, MISSED_FRAME_REG
);
627 dmfep
->rx_stats_norcvbuf
+= (misses
& MISSED_FRAME_MASK
);
630 * sync (all) receive descriptors before inspecting them
632 descp
= &dmfep
->rx_desc
;
633 DMA_SYNC(descp
, DDI_DMA_SYNC_FORKERNEL
);
636 * We should own at least one RX entry, since we've had a
637 * receive interrupt, but let's not be dogmatic about it.
639 index
= dmfep
->rx
.next_free
;
640 desc0
= dmfe_ring_get32(descp
, index
, DESC0
);
642 DTRACE_PROBE1(rx__start
, uint32_t, desc0
);
643 for (head
= NULL
, tail
= &head
; (desc0
& RX_OWN
) == 0; ) {
645 * Maintain statistics for every descriptor returned
646 * to us by the chip ...
648 dmfe_update_rx_stats(dmfep
, desc0
);
651 * Check that the entry has both "packet start" and
652 * "packet end" flags. We really shouldn't get packet
653 * fragments, 'cos all the RX buffers are bigger than
654 * the largest valid packet. So we'll just drop any
655 * fragments we find & skip on to the next entry.
657 if (((RX_FIRST_DESC
| RX_LAST_DESC
) & ~desc0
) != 0) {
658 DTRACE_PROBE1(rx__frag
, uint32_t, desc0
);
663 * A whole packet in one buffer. We have to check error
664 * status and packet length before forwarding it upstream.
666 if (desc0
& RX_ERR_SUMMARY
) {
667 DTRACE_PROBE1(rx__err
, uint32_t, desc0
);
671 packet_length
= (desc0
>> 16) & 0x3fff;
672 if (packet_length
> DMFE_MAX_PKT_SIZE
) {
673 DTRACE_PROBE1(rx__toobig
, int, packet_length
);
675 } else if (packet_length
< ETHERMIN
) {
677 * Note that VLAN packet would be even larger,
678 * but we don't worry about dropping runt VLAN
681 * This check is probably redundant, as well,
682 * since the hardware should drop RUNT frames.
684 DTRACE_PROBE1(rx__runt
, int, packet_length
);
689 * Sync the data, so we can examine it; then check that
690 * the packet is really intended for us (remember that
691 * if we're using Imperfect Filtering, then the chip will
692 * receive unicast packets sent to stations whose addresses
693 * just happen to hash to the same value as our own; we
694 * discard these here so they don't get sent upstream ...)
696 (void) ddi_dma_sync(dmfep
->rx_buff
.dma_hdl
,
697 index
* DMFE_BUF_SIZE
, DMFE_BUF_SIZE
,
698 DDI_DMA_SYNC_FORKERNEL
);
699 rxb
= &dmfep
->rx_buff
.mem_va
[index
*DMFE_BUF_SIZE
];
703 * We do not bother to check that the packet is really for
704 * us, we let the MAC framework make that check instead.
705 * This is especially important if we ever want to support
706 * multiple MAC addresses.
710 * Packet looks good; get a buffer to copy it into. We
711 * allow some space at the front of the allocated buffer
712 * (HEADROOM) in case any upstream modules want to prepend
713 * some sort of header. The value has been carefully chosen
714 * So that it also has the side-effect of making the packet
715 * *contents* 4-byte aligned, as required by NCA!
717 mp
= allocb(DMFE_HEADROOM
+ packet_length
, 0);
719 DTRACE_PROBE(rx__no__buf
);
720 dmfep
->rx_stats_norcvbuf
+= 1;
725 * Account for statistics of good packets.
727 dmfep
->rx_stats_ipackets
+= 1;
728 dmfep
->rx_stats_rbytes
+= packet_length
;
729 if (desc0
& RX_MULTI_FRAME
) {
730 if (bcmp(rxb
, dmfe_broadcast_addr
, ETHERADDRL
)) {
731 dmfep
->rx_stats_multi
+= 1;
733 dmfep
->rx_stats_bcast
+= 1;
738 * Copy the packet into the STREAMS buffer
740 dp
= mp
->b_rptr
+= DMFE_HEADROOM
;
741 mp
->b_cont
= mp
->b_next
= NULL
;
744 * Don't worry about stripping the vlan tag, the MAC
745 * layer will take care of that for us.
747 bcopy(rxb
, dp
, packet_length
);
750 * Fix up the packet length, and link it to the chain
752 mp
->b_wptr
= mp
->b_rptr
+ packet_length
- ETHERFCSL
;
758 * Return ownership of ring entry & advance to next
760 dmfe_ring_put32(descp
, index
, DESC0
, RX_OWN
);
761 index
= NEXT(index
, dmfep
->rx
.n_desc
);
762 desc0
= dmfe_ring_get32(descp
, index
, DESC0
);
766 * Remember where to start looking next time ...
768 dmfep
->rx
.next_free
= index
;
771 * sync the receive descriptors that we've given back
772 * (actually, we sync all of them for simplicity), and
773 * wake the chip in case it had suspended receive
775 DMA_SYNC(descp
, DDI_DMA_SYNC_FORDEV
);
776 dmfe_chip_put32(dmfep
, RX_POLL_REG
, 0);
778 mutex_exit(dmfep
->rxlock
);
783 * ========== Primary TX side routines ==========
787 * TX ring management:
789 * There are <tx.n_desc> entries in the ring, of which those from
790 * <tx.next_free> round to but not including <tx.next_busy> must
791 * be owned by the CPU. The number of such entries should equal
792 * <tx.n_free>; but there may also be some more entries which the
793 * chip has given back but which we haven't yet accounted for.
794 * The routine dmfe_reclaim_tx_desc() adjusts the indexes & counts
795 * as it discovers such entries.
797 * Initially, or when the ring is entirely free:
799 * D = Owned by Davicom (DMFE) chip
801 * tx.next_free tx.n_desc = 16
804 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
805 * | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C |
806 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
809 * tx.next_busy tx.n_free = 16
811 * On entry to reclaim() during normal use:
813 * tx.next_free tx.n_desc = 16
816 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
817 * | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C |
818 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
821 * tx.next_busy tx.n_free = 9
823 * On exit from reclaim():
825 * tx.next_free tx.n_desc = 16
828 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
829 * | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C |
830 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
833 * tx.next_busy tx.n_free = 13
835 * The ring is considered "full" when only one entry is owned by
836 * the CPU; thus <tx.n_free> should always be >= 1.
838 * tx.next_free tx.n_desc = 16
841 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
842 * | D | D | D | D | D | C | D | D | D | D | D | D | D | D | D | D |
843 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
846 * tx.next_busy tx.n_free = 1
850 * Function to update transmit statistics on various errors
853 dmfe_update_tx_stats(dmfe_t
*dmfep
, int index
, uint32_t desc0
, uint32_t desc1
)
859 ASSERT(mutex_owned(dmfep
->txlock
));
861 collisions
= ((desc0
>> 3) & 0x0f);
862 errsum
= desc0
& TX_ERR_SUMMARY
;
863 errbits
= desc0
& (TX_UNDERFLOW
| TX_LATE_COLL
| TX_CARRIER_LOSS
|
864 TX_NO_CARRIER
| TX_EXCESS_COLL
| TX_JABBER_TO
);
865 if ((errsum
== 0) != (errbits
== 0)) {
866 dmfe_log(dmfep
, "dubious TX error status 0x%x", desc0
);
867 desc0
|= TX_ERR_SUMMARY
;
870 if (desc0
& TX_ERR_SUMMARY
) {
871 dmfep
->tx_stats_oerrors
+= 1;
874 * If we ever see a transmit jabber timeout, we count it
875 * as a MAC-level transmit error; but we probably won't
876 * see it as it causes an Abnormal interrupt and we reset
877 * the chip in order to recover
879 if (desc0
& TX_JABBER_TO
) {
880 dmfep
->tx_stats_macxmt_errors
+= 1;
881 dmfep
->tx_stats_jabber
+= 1;
884 if (desc0
& TX_UNDERFLOW
)
885 dmfep
->tx_stats_underflow
+= 1;
886 else if (desc0
& TX_LATE_COLL
)
887 dmfep
->tx_stats_xmtlatecoll
+= 1;
889 if (desc0
& (TX_CARRIER_LOSS
| TX_NO_CARRIER
))
890 dmfep
->tx_stats_nocarrier
+= 1;
892 if (desc0
& TX_EXCESS_COLL
) {
893 dmfep
->tx_stats_excoll
+= 1;
897 int bit
= index
% NBBY
;
898 int byt
= index
/ NBBY
;
900 if (dmfep
->tx_mcast
[byt
] & bit
) {
901 dmfep
->tx_mcast
[byt
] &= ~bit
;
902 dmfep
->tx_stats_multi
+= 1;
904 } else if (dmfep
->tx_bcast
[byt
] & bit
) {
905 dmfep
->tx_bcast
[byt
] &= ~bit
;
906 dmfep
->tx_stats_bcast
+= 1;
909 dmfep
->tx_stats_opackets
+= 1;
910 dmfep
->tx_stats_obytes
+= desc1
& TX_BUFFER_SIZE1
;
914 dmfep
->tx_stats_first_coll
+= 1;
915 else if (collisions
!= 0)
916 dmfep
->tx_stats_multi_coll
+= 1;
917 dmfep
->tx_stats_collisions
+= collisions
;
919 if (desc0
& TX_DEFERRED
)
920 dmfep
->tx_stats_defer
+= 1;
924 * Reclaim all the ring entries that the chip has returned to us ...
926 * Returns B_FALSE if no entries could be reclaimed. Otherwise, reclaims
927 * as many as possible, restarts the TX stall timeout, and returns B_TRUE.
930 dmfe_reclaim_tx_desc(dmfe_t
*dmfep
)
937 ASSERT(mutex_owned(dmfep
->txlock
));
940 * sync transmit descriptor ring before looking at it
942 descp
= &dmfep
->tx_desc
;
943 DMA_SYNC(descp
, DDI_DMA_SYNC_FORKERNEL
);
946 * Early exit if there are no descriptors to reclaim, either
947 * because they're all reclaimed already, or because the next
948 * one is still owned by the chip ...
950 i
= dmfep
->tx
.next_busy
;
951 if (i
== dmfep
->tx
.next_free
)
953 desc0
= dmfe_ring_get32(descp
, i
, DESC0
);
958 * Reclaim as many descriptors as possible ...
961 desc1
= dmfe_ring_get32(descp
, i
, DESC1
);
962 ASSERT((desc1
& (TX_SETUP_PACKET
| TX_LAST_DESC
)) != 0);
964 if ((desc1
& TX_SETUP_PACKET
) == 0) {
966 * Regular packet - just update stats
968 dmfe_update_tx_stats(dmfep
, i
, desc0
, desc1
);
972 * Update count & index; we're all done if the ring is
973 * now fully reclaimed, or the next entry if still owned
976 dmfep
->tx
.n_free
+= 1;
977 i
= NEXT(i
, dmfep
->tx
.n_desc
);
978 if (i
== dmfep
->tx
.next_free
)
980 desc0
= dmfe_ring_get32(descp
, i
, DESC0
);
985 dmfep
->tx
.next_busy
= i
;
986 dmfep
->tx_pending_tix
= 0;
991 * Send the message in the message block chain <mp>.
993 * The message is freed if and only if its contents are successfully copied
994 * and queued for transmission (so that the return value is B_TRUE).
995 * If we can't queue the message, the return value is B_FALSE and
996 * the message is *not* freed.
998 * This routine handles the special case of <mp> == NULL, which indicates
999 * that we want to "send" the special "setup packet" allocated during
1000 * startup. We have to use some different flags in the packet descriptor
1001 * to say its a setup packet (from the global <dmfe_setup_desc1>), and the
1002 * setup packet *isn't* freed after use.
1005 dmfe_send_msg(dmfe_t
*dmfep
, mblk_t
*mp
)
1017 * If the number of free slots is below the reclaim threshold
1018 * (soft limit), we'll try to reclaim some. If we fail, and
1019 * the number of free slots is also below the minimum required
1020 * (the hard limit, usually 1), then we can't send the packet.
1022 mutex_enter(dmfep
->txlock
);
1023 if (dmfep
->suspended
)
1026 if (dmfep
->tx
.n_free
<= dmfe_tx_reclaim_level
&&
1027 dmfe_reclaim_tx_desc(dmfep
) == B_FALSE
&&
1028 dmfep
->tx
.n_free
<= dmfe_tx_min_free
) {
1030 * Resource shortage - return B_FALSE so the packet
1031 * will be queued for retry after the next TX-done
1034 mutex_exit(dmfep
->txlock
);
1035 DTRACE_PROBE(tx__no__desc
);
1040 * There's a slot available, so claim it by incrementing
1041 * the next-free index and decrementing the free count.
1042 * If the ring is currently empty, we also restart the
1043 * stall-detect timer. The ASSERTions check that our
1044 * invariants still hold:
1045 * the next-free index must not match the next-busy index
1046 * there must still be at least one free entry
1047 * After this, we now have exclusive ownership of the ring
1048 * entry (and matching buffer) indicated by <index>, so we
1049 * don't need to hold the TX lock any longer
1051 index
= dmfep
->tx
.next_free
;
1052 dmfep
->tx
.next_free
= NEXT(index
, dmfep
->tx
.n_desc
);
1053 ASSERT(dmfep
->tx
.next_free
!= dmfep
->tx
.next_busy
);
1054 if (dmfep
->tx
.n_free
-- == dmfep
->tx
.n_desc
)
1055 dmfep
->tx_pending_tix
= 0;
1056 ASSERT(dmfep
->tx
.n_free
>= 1);
1057 mutex_exit(dmfep
->txlock
);
1060 * Check the ownership of the ring entry ...
1062 descp
= &dmfep
->tx_desc
;
1063 ASSERT((dmfe_ring_get32(descp
, index
, DESC0
) & TX_OWN
) == 0);
1067 * Indicates we should send a SETUP packet, which we do by
1068 * temporarily switching the BUFFER1 pointer in the ring
1069 * entry. The reclaim routine will restore BUFFER1 to its
1072 * Note that as the setup packet is tagged on the end of
1073 * the TX ring, when we sync the descriptor we're also
1074 * implicitly syncing the setup packet - hence, we don't
1075 * need a separate ddi_dma_sync() call here.
1077 desc1
= dmfe_setup_desc1
;
1078 paddr
= descp
->setup_dvma
;
1081 * A regular packet; we copy the data into a pre-mapped
1082 * buffer, which avoids the overhead (and complication)
1083 * of mapping/unmapping STREAMS buffers and keeping hold
1084 * of them until the DMA has completed.
1086 * Because all buffers are the same size, and larger
1087 * than the longest single valid message, we don't have
1088 * to bother about splitting the message across multiple
1091 txb
= &dmfep
->tx_buff
.mem_va
[index
*DMFE_BUF_SIZE
];
1096 * Copy all (remaining) mblks in the message ...
1098 for (; bp
!= NULL
; bp
= bp
->b_cont
) {
1100 if ((totlen
+= mblen
) <= DMFE_MAX_PKT_SIZE
) {
1101 bcopy(bp
->b_rptr
, txb
, mblen
);
1107 * Is this a multicast or broadcast packet? We do
1108 * this so that we can track statistics accurately
1109 * when we reclaim it.
1111 txb
= &dmfep
->tx_buff
.mem_va
[index
*DMFE_BUF_SIZE
];
1113 if (bcmp(txb
, dmfe_broadcast_addr
, ETHERADDRL
) == 0) {
1114 dmfep
->tx_bcast
[index
/ NBBY
] |=
1115 (1 << (index
% NBBY
));
1117 dmfep
->tx_mcast
[index
/ NBBY
] |=
1118 (1 << (index
% NBBY
));
1123 * We'e reached the end of the chain; and we should have
1124 * collected no more than DMFE_MAX_PKT_SIZE bytes into our
1125 * buffer. Note that the <size> field in the descriptor is
1126 * only 11 bits, so bigger packets would be a problem!
1129 ASSERT(totlen
<= DMFE_MAX_PKT_SIZE
);
1130 totlen
&= TX_BUFFER_SIZE1
;
1131 desc1
= TX_FIRST_DESC
| TX_LAST_DESC
| totlen
;
1132 paddr
= dmfep
->tx_buff
.mem_dvma
+ index
*DMFE_BUF_SIZE
;
1134 (void) ddi_dma_sync(dmfep
->tx_buff
.dma_hdl
,
1135 index
* DMFE_BUF_SIZE
, DMFE_BUF_SIZE
, DDI_DMA_SYNC_FORDEV
);
1139 * Update ring descriptor entries, sync them, and wake up the
1142 if ((index
& dmfe_tx_int_factor
) == 0)
1143 desc1
|= TX_INT_ON_COMP
;
1144 desc1
|= TX_CHAINING
;
1145 dmfe_ring_put32(descp
, index
, BUFFER1
, paddr
);
1146 dmfe_ring_put32(descp
, index
, DESC1
, desc1
);
1147 dmfe_ring_put32(descp
, index
, DESC0
, TX_OWN
);
1148 DMA_SYNC(descp
, DDI_DMA_SYNC_FORDEV
);
1149 dmfe_chip_put32(dmfep
, TX_POLL_REG
, 0);
1152 * Finally, free the message & return success
1160 * dmfe_m_tx() -- send a chain of packets
1162 * Called when packet(s) are ready to be transmitted. A pointer to an
1163 * M_DATA message that contains the packet is passed to this routine.
1164 * The complete LLC header is contained in the message's first message
1165 * block, and the remainder of the packet is contained within
1166 * additional M_DATA message blocks linked to the first message block.
1168 * Additional messages may be passed by linking with b_next.
1171 dmfe_m_tx(void *arg
, mblk_t
*mp
)
1173 dmfe_t
*dmfep
= arg
; /* private device info */
1177 ASSERT(dmfep
->mac_state
== DMFE_MAC_STARTED
);
1179 if (dmfep
->chip_state
!= CHIP_RUNNING
)
1182 while (mp
!= NULL
) {
1185 if (!dmfe_send_msg(dmfep
, mp
)) {
1196 * ========== Address-setting routines (TX-side) ==========
1200 * Find the index of the relevant bit in the setup packet.
1201 * This must mirror the way the hardware will actually calculate it!
1204 dmfe_hash_index(const uint8_t *address
)
1206 uint32_t const POLY
= HASH_POLY
;
1207 uint32_t crc
= HASH_CRC
;
1210 uchar_t currentbyte
;
1215 for (byteslength
= 0; byteslength
< ETHERADDRL
; ++byteslength
) {
1216 currentbyte
= address
[byteslength
];
1217 for (bit
= 0; bit
< 8; ++bit
) {
1220 if (msb
^ (currentbyte
& 1)) {
1228 for (index
= 0, bit
= 23, shift
= 8; shift
>= 0; ++bit
, --shift
)
1229 index
|= (((crc
>> bit
) & 1) << shift
);
1235 * Find and set/clear the relevant bit in the setup packet hash table
1236 * This must mirror the way the hardware will actually interpret it!
1239 dmfe_update_hash(dmfe_t
*dmfep
, uint32_t index
, boolean_t val
)
1244 ASSERT(mutex_owned(dmfep
->oplock
));
1246 descp
= &dmfep
->tx_desc
;
1247 tmp
= dmfe_setup_get32(descp
, index
/16);
1249 tmp
|= 1 << (index
%16);
1251 tmp
&= ~(1 << (index
%16));
1252 dmfe_setup_put32(descp
, index
/16, tmp
);
1256 * Update the refcount for the bit in the setup packet corresponding
1257 * to the specified address; if it changes between zero & nonzero,
1258 * also update the bitmap itself & return B_TRUE, so that the caller
1259 * knows to re-send the setup packet. Otherwise (only the refcount
1260 * changed), return B_FALSE
1263 dmfe_update_mcast(dmfe_t
*dmfep
, const uint8_t *mca
, boolean_t val
)
1269 index
= dmfe_hash_index(mca
);
1270 refp
= &dmfep
->mcast_refs
[index
];
1271 change
= (val
? (*refp
)++ : --(*refp
)) == 0;
1274 dmfe_update_hash(dmfep
, index
, val
);
1280 * "Transmit" the (possibly updated) magic setup packet
1283 dmfe_send_setup(dmfe_t
*dmfep
)
1287 ASSERT(mutex_owned(dmfep
->oplock
));
1289 if (dmfep
->suspended
)
1293 * If the chip isn't running, we can't really send the setup frame
1294 * now but it doesn't matter, 'cos it will be sent when the transmit
1295 * process is restarted (see dmfe_start()).
1297 if ((dmfep
->opmode
& START_TRANSMIT
) == 0)
1301 * "Send" the setup frame. If it fails (e.g. no resources),
1302 * set a flag; then the factotum will retry the "send". Once
1303 * it works, we can clear the flag no matter how many attempts
1304 * had previously failed. We tell the caller that it worked
1305 * whether it did or not; after all, it *will* work eventually.
1307 status
= dmfe_send_msg(dmfep
, NULL
);
1308 dmfep
->need_setup
= status
? B_FALSE
: B_TRUE
;
1313 * dmfe_m_unicst() -- set the physical network address
1316 dmfe_m_unicst(void *arg
, const uint8_t *macaddr
)
1318 dmfe_t
*dmfep
= arg
;
1323 * Update our current address and send out a new setup packet
1325 * Here we accommodate the use of HASH_ONLY or HASH_AND_PERFECT
1326 * filtering modes (we don't support PERFECT_ONLY or INVERSE modes).
1328 * It is said that there is a bug in the 21140 where it fails to
1329 * receive packes addresses to the specified perfect filter address.
1330 * If the same bug is present in the DM9102A, the TX_FILTER_TYPE1
1331 * bit should be set in the module variable dmfe_setup_desc1.
1333 * If TX_FILTER_TYPE1 is set, we will use HASH_ONLY filtering.
1334 * In this mode, *all* incoming addresses are hashed and looked
1335 * up in the bitmap described by the setup packet. Therefore,
1336 * the bit representing the station address has to be added to
1337 * the table before sending it out. If the address is changed,
1338 * the old entry should be removed before the new entry is made.
1340 * NOTE: in this mode, unicast packets that are not intended for
1341 * this station may be received; it is up to software to filter
1342 * them out afterwards!
1344 * If TX_FILTER_TYPE1 is *not* set, we will use HASH_AND_PERFECT
1345 * filtering. In this mode, multicast addresses are hashed and
1346 * checked against the bitmap, while unicast addresses are simply
1347 * matched against the one physical address specified in the setup
1348 * packet. This means that we shouldn't receive unicast packets
1349 * that aren't intended for us (but software still has to filter
1350 * multicast packets just the same).
1352 * Whichever mode we're using, we have to enter the broadcast
1353 * address into the multicast filter map too, so we do this on
1354 * the first time through after attach or reset.
1356 mutex_enter(dmfep
->oplock
);
1358 if (dmfep
->addr_set
&& dmfe_setup_desc1
& TX_FILTER_TYPE1
)
1359 (void) dmfe_update_mcast(dmfep
, dmfep
->curr_addr
, B_FALSE
);
1360 if (dmfe_setup_desc1
& TX_FILTER_TYPE1
)
1361 (void) dmfe_update_mcast(dmfep
, macaddr
, B_TRUE
);
1362 if (!dmfep
->addr_set
)
1363 (void) dmfe_update_mcast(dmfep
, dmfe_broadcast_addr
, B_TRUE
);
1366 * Remember the new current address
1368 ethaddr_copy(macaddr
, dmfep
->curr_addr
);
1369 dmfep
->addr_set
= B_TRUE
;
1372 * Install the new physical address into the proper position in
1373 * the setup frame; this is only used if we select hash+perfect
1374 * filtering, but we'll put it in anyway. The ugliness here is
1375 * down to the usual war of the egg :(
1377 for (index
= 0; index
< ETHERADDRL
; index
+= 2)
1378 dmfe_setup_put32(&dmfep
->tx_desc
, SETUPBUF_PHYS
+index
/2,
1379 (macaddr
[index
+1] << 8) | macaddr
[index
]);
1382 * Finally, we're ready to "transmit" the setup frame
1384 status
= dmfe_send_setup(dmfep
);
1385 mutex_exit(dmfep
->oplock
);
1391 * dmfe_m_multicst() -- enable or disable a multicast address
1393 * Program the hardware to enable/disable the multicast address
1394 * in "mca" (enable if add is true, otherwise disable it.)
1395 * We keep a refcount for each bit in the map, so that it still
1396 * works out properly if multiple addresses hash to the same bit.
1397 * dmfe_update_mcast() tells us whether the map actually changed;
1398 * if so, we have to re-"transmit" the magic setup packet.
1401 dmfe_m_multicst(void *arg
, boolean_t add
, const uint8_t *mca
)
1403 dmfe_t
*dmfep
= arg
; /* private device info */
1406 mutex_enter(dmfep
->oplock
);
1407 if (dmfe_update_mcast(dmfep
, mca
, add
))
1408 status
= dmfe_send_setup(dmfep
);
1409 mutex_exit(dmfep
->oplock
);
1416 * ========== Internal state management entry points ==========
1420 * These routines provide all the functionality required by the
1421 * corresponding MAC layer entry points, but don't update the MAC layer state
1422 * so they can be called internally without disturbing our record
1423 * of what MAC layer thinks we should be doing ...
1427 * dmfe_stop() -- stop processing, don't reset h/w or rings
1430 dmfe_stop(dmfe_t
*dmfep
)
1432 ASSERT(mutex_owned(dmfep
->oplock
));
1434 dmfe_stop_chip(dmfep
, CHIP_STOPPED
);
1438 * dmfe_reset() -- stop processing, reset h/w & rings to initial state
1441 dmfe_reset(dmfe_t
*dmfep
)
1443 ASSERT(mutex_owned(dmfep
->oplock
));
1444 ASSERT(mutex_owned(dmfep
->rxlock
));
1445 ASSERT(mutex_owned(dmfep
->txlock
));
1447 dmfe_stop_chip(dmfep
, CHIP_RESET
);
1448 dmfe_init_rings(dmfep
);
1452 * dmfe_start() -- start transmitting/receiving
1455 dmfe_start(dmfe_t
*dmfep
)
1459 ASSERT(mutex_owned(dmfep
->oplock
));
1461 ASSERT(dmfep
->chip_state
== CHIP_RESET
||
1462 dmfep
->chip_state
== CHIP_STOPPED
);
1465 * Make opmode consistent with PHY duplex setting
1467 gpsr
= dmfe_chip_get32(dmfep
, PHY_STATUS_REG
);
1468 if (gpsr
& GPS_FULL_DUPLEX
)
1469 dmfep
->opmode
|= FULL_DUPLEX
;
1471 dmfep
->opmode
&= ~FULL_DUPLEX
;
1474 * Start transmit processing
1475 * Set up the address filters
1476 * Start receive processing
1479 dmfe_start_chip(dmfep
, START_TRANSMIT
);
1480 (void) dmfe_send_setup(dmfep
);
1482 dmfe_start_chip(dmfep
, START_RECEIVE
);
1483 dmfe_enable_interrupts(dmfep
);
1487 * dmfe_restart - restart transmitting/receiving after error or suspend
1490 dmfe_restart(dmfe_t
*dmfep
)
1492 ASSERT(mutex_owned(dmfep
->oplock
));
1495 * You need not only <oplock>, but also <rxlock> AND <txlock>
1496 * in order to reset the rings, but then <txlock> *mustn't*
1497 * be held across the call to dmfe_start()
1499 mutex_enter(dmfep
->rxlock
);
1500 mutex_enter(dmfep
->txlock
);
1502 mutex_exit(dmfep
->txlock
);
1503 mutex_exit(dmfep
->rxlock
);
1504 if (dmfep
->mac_state
== DMFE_MAC_STARTED
) {
1511 * ========== MAC-required management entry points ==========
1515 * dmfe_m_stop() -- stop transmitting/receiving
1518 dmfe_m_stop(void *arg
)
1520 dmfe_t
*dmfep
= arg
; /* private device info */
1523 * Just stop processing, then record new MAC state
1525 mii_stop(dmfep
->mii
);
1527 mutex_enter(dmfep
->oplock
);
1528 if (!dmfep
->suspended
)
1530 dmfep
->mac_state
= DMFE_MAC_STOPPED
;
1531 mutex_exit(dmfep
->oplock
);
1535 * dmfe_m_start() -- start transmitting/receiving
1538 dmfe_m_start(void *arg
)
1540 dmfe_t
*dmfep
= arg
; /* private device info */
1543 * Start processing and record new MAC state
1545 mutex_enter(dmfep
->oplock
);
1546 if (!dmfep
->suspended
)
1548 dmfep
->mac_state
= DMFE_MAC_STARTED
;
1549 mutex_exit(dmfep
->oplock
);
1551 mii_start(dmfep
->mii
);
1557 * dmfe_m_promisc() -- set or reset promiscuous mode on the board
1559 * Program the hardware to enable/disable promiscuous and/or
1560 * receive-all-multicast modes. Davicom don't document this
1561 * clearly, but it looks like we can do this on-the-fly (i.e.
1562 * without stopping & restarting the TX/RX processes).
1565 dmfe_m_promisc(void *arg
, boolean_t on
)
1567 dmfe_t
*dmfep
= arg
;
1569 mutex_enter(dmfep
->oplock
);
1570 dmfep
->opmode
&= ~(PROMISC_MODE
| PASS_MULTICAST
);
1572 dmfep
->opmode
|= PROMISC_MODE
;
1573 if (!dmfep
->suspended
)
1574 dmfe_set_opmode(dmfep
);
1575 mutex_exit(dmfep
->oplock
);
1581 * ========== Factotum, implemented as a softint handler ==========
1585 * The factotum is woken up when there's something to do that we'd rather
1586 * not do from inside a (high-level?) hardware interrupt handler. Its
1587 * two main tasks are:
1588 * reset & restart the chip after an error
1589 * update & restart the chip after a link status change
1592 dmfe_factotum(caddr_t arg
)
1596 dmfep
= (void *)arg
;
1597 ASSERT(dmfep
->dmfe_guard
== DMFE_GUARD
);
1599 mutex_enter(dmfep
->oplock
);
1600 if (dmfep
->suspended
) {
1601 mutex_exit(dmfep
->oplock
);
1602 return (DDI_INTR_CLAIMED
);
1605 dmfep
->factotum_flag
= 0;
1606 DRV_KS_INC(dmfep
, KS_FACTOTUM_RUN
);
1609 * Check for chip error ...
1611 if (dmfep
->chip_state
== CHIP_ERROR
) {
1613 * Error recovery required: reset the chip and the rings,
1614 * then, if it's supposed to be running, kick it off again.
1616 DRV_KS_INC(dmfep
, KS_RECOVERY
);
1617 dmfe_restart(dmfep
);
1618 mutex_exit(dmfep
->oplock
);
1620 mii_reset(dmfep
->mii
);
1622 } else if (dmfep
->need_setup
) {
1623 (void) dmfe_send_setup(dmfep
);
1624 mutex_exit(dmfep
->oplock
);
1627 return (DDI_INTR_CLAIMED
);
1631 dmfe_wake_factotum(dmfe_t
*dmfep
, int ks_id
, const char *why
)
1633 _NOTE(ARGUNUSED(why
));
1634 ASSERT(mutex_owned(dmfep
->oplock
));
1635 DRV_KS_INC(dmfep
, ks_id
);
1637 if (dmfep
->factotum_flag
++ == 0)
1638 ddi_trigger_softintr(dmfep
->factotum_id
);
1643 * ========== Periodic Tasks (Cyclic handler & friends) ==========
1647 * Periodic tick tasks, run from the cyclic handler
1649 * Check for TX stall; flag an error and wake the factotum if so.
1652 dmfe_tick_stall_check(dmfe_t
*dmfep
, uint32_t gpsr
, uint32_t istat
)
1658 ASSERT(mutex_owned(dmfep
->oplock
));
1661 * Check for transmit stall ...
1663 * IF there's at least one packet in the ring, AND the timeout
1664 * has elapsed, AND we can't reclaim any descriptors, THEN we've
1665 * stalled; we return B_TRUE to trigger a reset-and-recover cycle.
1667 * Note that the timeout limit is based on the transmit engine
1668 * state; we allow the transmitter longer to make progress in
1669 * some states than in others, based on observations of this
1670 * chip's actual behaviour in the lab.
1672 * By observation, we find that on about 1 in 10000 passes through
1673 * here, the TX lock is already held. In that case, we'll skip
1674 * the check on this pass rather than wait. Most likely, the send
1675 * routine was holding the lock when the interrupt happened, and
1676 * we'll succeed next time through. In the event of a real stall,
1677 * the TX ring will fill up, after which the send routine won't be
1678 * called any more and then we're sure to get in.
1681 if (mutex_tryenter(dmfep
->txlock
)) {
1682 if (dmfep
->tx
.n_free
< dmfep
->tx
.n_desc
) {
1683 tx_state
= TX_PROCESS_STATE(istat
);
1684 if (gpsr
& GPS_LINK_100
)
1685 limit
= stall_100_tix
[tx_state
];
1687 limit
= stall_10_tix
[tx_state
];
1688 if (++dmfep
->tx_pending_tix
>= limit
&&
1689 dmfe_reclaim_tx_desc(dmfep
) == B_FALSE
) {
1690 dmfe_log(dmfep
, "TX stall detected "
1691 "after %d ticks in state %d; "
1692 "automatic recovery initiated",
1693 dmfep
->tx_pending_tix
, tx_state
);
1697 mutex_exit(dmfep
->txlock
);
1701 dmfe_stop_chip(dmfep
, CHIP_ERROR
);
1702 dmfe_wake_factotum(dmfep
, KS_TX_STALL
, "tick (TX stall)");
1707 * Cyclic callback handler
1710 dmfe_cyclic(void *arg
)
1712 dmfe_t
*dmfep
= arg
; /* private device info */
1717 * If the chip's not RUNNING, there's nothing to do.
1718 * If we can't get the mutex straight away, we'll just
1719 * skip this pass; we'll back back soon enough anyway.
1721 if (mutex_tryenter(dmfep
->oplock
) == 0)
1723 if ((dmfep
->suspended
) || (dmfep
->chip_state
!= CHIP_RUNNING
)) {
1724 mutex_exit(dmfep
->oplock
);
1729 * Recheck chip state (it might have been stopped since we
1730 * checked above). If still running, call each of the *tick*
1731 * tasks. They will check for link change, TX stall, etc ...
1733 if (dmfep
->chip_state
== CHIP_RUNNING
) {
1734 istat
= dmfe_chip_get32(dmfep
, STATUS_REG
);
1735 gpsr
= dmfe_chip_get32(dmfep
, PHY_STATUS_REG
);
1736 dmfe_tick_stall_check(dmfep
, gpsr
, istat
);
1739 DRV_KS_INC(dmfep
, KS_CYCLIC_RUN
);
1740 mutex_exit(dmfep
->oplock
);
1744 * ========== Hardware interrupt handler ==========
1748 * dmfe_interrupt() -- handle chip interrupts
1751 dmfe_interrupt(caddr_t arg
)
1753 dmfe_t
*dmfep
; /* private device info */
1754 uint32_t interrupts
;
1758 boolean_t warning_msg
= B_TRUE
;
1760 dmfep
= (void *)arg
;
1762 mutex_enter(dmfep
->oplock
);
1763 if (dmfep
->suspended
) {
1764 mutex_exit(dmfep
->oplock
);
1765 return (DDI_INTR_UNCLAIMED
);
1769 * A quick check as to whether the interrupt was from this
1770 * device, before we even finish setting up all our local
1771 * variables. Note that reading the interrupt status register
1772 * doesn't have any unpleasant side effects such as clearing
1773 * the bits read, so it's quite OK to re-read it once we have
1774 * determined that we are going to service this interrupt and
1775 * grabbed the mutexen.
1777 istat
= dmfe_chip_get32(dmfep
, STATUS_REG
);
1778 if ((istat
& (NORMAL_SUMMARY_INT
| ABNORMAL_SUMMARY_INT
)) == 0) {
1780 mutex_exit(dmfep
->oplock
);
1781 return (DDI_INTR_UNCLAIMED
);
1784 DRV_KS_INC(dmfep
, KS_INTERRUPT
);
1787 * Identify bits that represent enabled interrupts ...
1789 istat
|= dmfe_chip_get32(dmfep
, STATUS_REG
);
1790 interrupts
= istat
& dmfep
->imask
;
1791 ASSERT(interrupts
!= 0);
1793 DTRACE_PROBE1(intr
, uint32_t, istat
);
1796 * Check for any interrupts other than TX/RX done.
1797 * If there are any, they are considered Abnormal
1798 * and will cause the chip to be reset.
1800 if (interrupts
& ~(RX_PKTDONE_INT
| TX_PKTDONE_INT
)) {
1801 if (istat
& ABNORMAL_SUMMARY_INT
) {
1803 * Any Abnormal interrupts will lead to us
1804 * resetting the chip, so we don't bother
1805 * to clear each interrupt individually.
1807 * Our main task here is to identify the problem,
1808 * by pointing out the most significant unexpected
1809 * bit. Additional bits may well be consequences
1810 * of the first problem, so we consider the possible
1811 * causes in order of severity.
1813 if (interrupts
& SYSTEM_ERR_INT
) {
1814 switch (istat
& SYSTEM_ERR_BITS
) {
1815 case SYSTEM_ERR_M_ABORT
:
1816 msg
= "Bus Master Abort";
1819 case SYSTEM_ERR_T_ABORT
:
1820 msg
= "Bus Target Abort";
1823 case SYSTEM_ERR_PARITY
:
1824 msg
= "Parity Error";
1828 msg
= "Unknown System Bus Error";
1831 } else if (interrupts
& RX_STOPPED_INT
) {
1832 msg
= "RX process stopped";
1833 } else if (interrupts
& RX_UNAVAIL_INT
) {
1834 msg
= "RX buffer unavailable";
1835 warning_msg
= B_FALSE
;
1836 } else if (interrupts
& RX_WATCHDOG_INT
) {
1837 msg
= "RX watchdog timeout?";
1838 } else if (interrupts
& RX_EARLY_INT
) {
1839 msg
= "RX early interrupt?";
1840 } else if (interrupts
& TX_STOPPED_INT
) {
1841 msg
= "TX process stopped";
1842 } else if (interrupts
& TX_JABBER_INT
) {
1843 msg
= "TX jabber timeout";
1844 } else if (interrupts
& TX_UNDERFLOW_INT
) {
1845 msg
= "TX underflow?";
1846 } else if (interrupts
& TX_EARLY_INT
) {
1847 msg
= "TX early interrupt?";
1849 } else if (interrupts
& LINK_STATUS_INT
) {
1850 msg
= "Link status change?";
1851 } else if (interrupts
& GP_TIMER_INT
) {
1852 msg
= "Timer expired?";
1856 dmfe_warning(dmfep
, "abnormal interrupt, "
1857 "status 0x%x: %s", istat
, msg
);
1860 * We don't want to run the entire reinitialisation
1861 * code out of this (high-level?) interrupt, so we
1862 * simply STOP the chip, and wake up the factotum
1863 * to reinitalise it ...
1865 dmfe_stop_chip(dmfep
, CHIP_ERROR
);
1866 dmfe_wake_factotum(dmfep
, KS_CHIP_ERROR
,
1867 "interrupt (error)");
1870 * We shouldn't really get here (it would mean
1871 * there were some unprocessed enabled bits but
1872 * they weren't Abnormal?), but we'll check just
1875 DTRACE_PROBE1(intr__unexpected
, uint32_t, istat
);
1880 * Acknowledge all the original bits - except in the case of an
1881 * error, when we leave them unacknowledged so that the recovery
1882 * code can see what was going on when the problem occurred ...
1884 if (dmfep
->chip_state
!= CHIP_ERROR
) {
1885 (void) dmfe_chip_put32(dmfep
, STATUS_REG
, istat
);
1887 * Read-after-write forces completion on PCI bus.
1890 (void) dmfe_chip_get32(dmfep
, STATUS_REG
);
1895 * We've finished talking to the chip, so we can drop <oplock>
1896 * before handling the normal interrupts, which only involve
1897 * manipulation of descriptors ...
1899 mutex_exit(dmfep
->oplock
);
1901 if (interrupts
& RX_PKTDONE_INT
)
1902 if ((mp
= dmfe_getp(dmfep
)) != NULL
)
1903 mac_rx(dmfep
->mh
, NULL
, mp
);
1905 if (interrupts
& TX_PKTDONE_INT
) {
1907 * The only reason for taking this interrupt is to give
1908 * MAC a chance to schedule queued packets after a
1909 * ring-full condition. To minimise the number of
1910 * redundant TX-Done interrupts, we only mark two of the
1911 * ring descriptors as 'interrupt-on-complete' - all the
1912 * others are simply handed back without an interrupt.
1914 if (dmfe_reclaim_on_done
&& mutex_tryenter(dmfep
->txlock
)) {
1915 (void) dmfe_reclaim_tx_desc(dmfep
);
1916 mutex_exit(dmfep
->txlock
);
1918 mac_tx_update(dmfep
->mh
);
1921 return (DDI_INTR_CLAIMED
);
1925 * ========== Statistics update handler ==========
1929 dmfe_m_stat(void *arg
, uint_t stat
, uint64_t *val
)
1931 dmfe_t
*dmfep
= arg
;
1934 /* Let MII handle its own stats. */
1935 if (mii_m_getstat(dmfep
->mii
, stat
, val
) == 0) {
1939 mutex_enter(dmfep
->oplock
);
1940 mutex_enter(dmfep
->rxlock
);
1941 mutex_enter(dmfep
->txlock
);
1943 /* make sure we have all the stats collected */
1944 (void) dmfe_reclaim_tx_desc(dmfep
);
1948 case MAC_STAT_IPACKETS
:
1949 *val
= dmfep
->rx_stats_ipackets
;
1952 case MAC_STAT_MULTIRCV
:
1953 *val
= dmfep
->rx_stats_multi
;
1956 case MAC_STAT_BRDCSTRCV
:
1957 *val
= dmfep
->rx_stats_bcast
;
1960 case MAC_STAT_RBYTES
:
1961 *val
= dmfep
->rx_stats_rbytes
;
1964 case MAC_STAT_IERRORS
:
1965 *val
= dmfep
->rx_stats_ierrors
;
1968 case MAC_STAT_NORCVBUF
:
1969 *val
= dmfep
->rx_stats_norcvbuf
;
1972 case MAC_STAT_COLLISIONS
:
1973 *val
= dmfep
->tx_stats_collisions
;
1976 case MAC_STAT_OERRORS
:
1977 *val
= dmfep
->tx_stats_oerrors
;
1980 case MAC_STAT_OPACKETS
:
1981 *val
= dmfep
->tx_stats_opackets
;
1984 case MAC_STAT_MULTIXMT
:
1985 *val
= dmfep
->tx_stats_multi
;
1988 case MAC_STAT_BRDCSTXMT
:
1989 *val
= dmfep
->tx_stats_bcast
;
1992 case MAC_STAT_OBYTES
:
1993 *val
= dmfep
->tx_stats_obytes
;
1996 case MAC_STAT_OVERFLOWS
:
1997 *val
= dmfep
->rx_stats_overflow
;
2000 case MAC_STAT_UNDERFLOWS
:
2001 *val
= dmfep
->tx_stats_underflow
;
2004 case ETHER_STAT_ALIGN_ERRORS
:
2005 *val
= dmfep
->rx_stats_align
;
2008 case ETHER_STAT_FCS_ERRORS
:
2009 *val
= dmfep
->rx_stats_fcs
;
2012 case ETHER_STAT_TOOLONG_ERRORS
:
2013 *val
= dmfep
->rx_stats_toolong
;
2016 case ETHER_STAT_TOOSHORT_ERRORS
:
2017 *val
= dmfep
->rx_stats_short
;
2020 case ETHER_STAT_MACRCV_ERRORS
:
2021 *val
= dmfep
->rx_stats_macrcv_errors
;
2024 case ETHER_STAT_MACXMT_ERRORS
:
2025 *val
= dmfep
->tx_stats_macxmt_errors
;
2028 case ETHER_STAT_JABBER_ERRORS
:
2029 *val
= dmfep
->tx_stats_jabber
;
2032 case ETHER_STAT_CARRIER_ERRORS
:
2033 *val
= dmfep
->tx_stats_nocarrier
;
2036 case ETHER_STAT_TX_LATE_COLLISIONS
:
2037 *val
= dmfep
->tx_stats_xmtlatecoll
;
2040 case ETHER_STAT_EX_COLLISIONS
:
2041 *val
= dmfep
->tx_stats_excoll
;
2044 case ETHER_STAT_DEFER_XMTS
:
2045 *val
= dmfep
->tx_stats_defer
;
2048 case ETHER_STAT_FIRST_COLLISIONS
:
2049 *val
= dmfep
->tx_stats_first_coll
;
2052 case ETHER_STAT_MULTI_COLLISIONS
:
2053 *val
= dmfep
->tx_stats_multi_coll
;
2060 mutex_exit(dmfep
->txlock
);
2061 mutex_exit(dmfep
->rxlock
);
2062 mutex_exit(dmfep
->oplock
);
2068 * ========== Ioctl handler & subfunctions ==========
2071 static lb_property_t dmfe_loopmodes
[] = {
2072 { normal
, "normal", 0 },
2073 { internal
, "Internal", 1 },
2074 { external
, "External", 2 },
2078 * Specific dmfe IOCTLs, the mac module handles the generic ones.
2079 * Unfortunately, the DM9102 doesn't seem to work well with MII based
2080 * loopback, so we have to do something special for it.
2084 dmfe_m_ioctl(void *arg
, queue_t
*wq
, mblk_t
*mp
)
2086 dmfe_t
*dmfep
= arg
;
2087 struct iocblk
*iocp
;
2093 iocp
= (void *)mp
->b_rptr
;
2094 cmd
= iocp
->ioc_cmd
;
2096 if (mp
->b_cont
== NULL
) {
2098 * All of these ioctls need data!
2100 miocnak(wq
, mp
, 0, EINVAL
);
2105 case LB_GET_INFO_SIZE
:
2106 if (iocp
->ioc_count
!= sizeof (sz
)) {
2109 sz
= sizeof (dmfe_loopmodes
);
2110 bcopy(&sz
, mp
->b_cont
->b_rptr
, sizeof (sz
));
2115 if (iocp
->ioc_count
!= sizeof (dmfe_loopmodes
)) {
2118 bcopy(dmfe_loopmodes
, mp
->b_cont
->b_rptr
,
2124 if (iocp
->ioc_count
!= sizeof (mode
)) {
2127 mutex_enter(dmfep
->oplock
);
2128 switch (dmfep
->opmode
& LOOPBACK_MODE_MASK
) {
2132 case LOOPBACK_INTERNAL
:
2139 mutex_exit(dmfep
->oplock
);
2140 bcopy(&mode
, mp
->b_cont
->b_rptr
, sizeof (mode
));
2145 rv
= secpolicy_net_config(iocp
->ioc_cr
, B_FALSE
);
2148 if (iocp
->ioc_count
!= sizeof (mode
)) {
2152 bcopy(mp
->b_cont
->b_rptr
, &mode
, sizeof (mode
));
2154 mutex_enter(dmfep
->oplock
);
2155 dmfep
->opmode
&= ~LOOPBACK_MODE_MASK
;
2158 dmfep
->opmode
|= LOOPBACK_PHY_D
;
2161 dmfep
->opmode
|= LOOPBACK_INTERNAL
;
2166 if (!dmfep
->suspended
) {
2167 dmfe_restart(dmfep
);
2169 mutex_exit(dmfep
->oplock
);
2178 miocack(wq
, mp
, iocp
->ioc_count
, 0);
2180 miocnak(wq
, mp
, 0, rv
);
2185 dmfe_m_getprop(void *arg
, const char *name
, mac_prop_id_t num
, uint_t sz
,
2188 dmfe_t
*dmfep
= arg
;
2190 return (mii_m_getprop(dmfep
->mii
, name
, num
, sz
, val
));
2194 dmfe_m_setprop(void *arg
, const char *name
, mac_prop_id_t num
, uint_t sz
,
2197 dmfe_t
*dmfep
= arg
;
2199 return (mii_m_setprop(dmfep
->mii
, name
, num
, sz
, val
));
2203 dmfe_m_propinfo(void *arg
, const char *name
, mac_prop_id_t num
,
2204 mac_prop_info_handle_t mph
)
2206 dmfe_t
*dmfep
= arg
;
2208 mii_m_propinfo(dmfep
->mii
, name
, num
, mph
);
2212 * ========== Per-instance setup/teardown code ==========
2216 * Determine local MAC address & broadcast address for this interface
2219 dmfe_find_mac_address(dmfe_t
*dmfep
)
2226 * We have to find the "vendor's factory-set address". This is
2227 * the value of the property "local-mac-address", as set by OBP
2228 * (or a .conf file!)
2230 * If the property is not there, then we try to find the factory
2231 * mac address from the devices serial EEPROM.
2233 bzero(dmfep
->curr_addr
, sizeof (dmfep
->curr_addr
));
2234 err
= ddi_prop_lookup_byte_array(DDI_DEV_T_ANY
, dmfep
->devinfo
,
2235 DDI_PROP_DONTPASS
, localmac_propname
, &prop
, &propsize
);
2236 if (err
== DDI_PROP_SUCCESS
) {
2237 if (propsize
== ETHERADDRL
)
2238 ethaddr_copy(prop
, dmfep
->curr_addr
);
2239 ddi_prop_free(prop
);
2241 /* no property set... check eeprom */
2242 dmfe_read_eeprom(dmfep
, EEPROM_EN_ADDR
, dmfep
->curr_addr
,
2248 dmfe_alloc_dma_mem(dmfe_t
*dmfep
, size_t memsize
,
2249 size_t setup
, size_t slop
, ddi_device_acc_attr_t
*attr_p
,
2250 uint_t dma_flags
, dma_area_t
*dma_p
)
2252 ddi_dma_cookie_t dma_cookie
;
2259 err
= ddi_dma_alloc_handle(dmfep
->devinfo
, &dma_attr
,
2260 DDI_DMA_SLEEP
, NULL
, &dma_p
->dma_hdl
);
2261 if (err
!= DDI_SUCCESS
) {
2262 dmfe_error(dmfep
, "DMA handle allocation failed");
2263 return (DDI_FAILURE
);
2269 err
= ddi_dma_mem_alloc(dma_p
->dma_hdl
, memsize
+ setup
+ slop
,
2270 attr_p
, dma_flags
& (DDI_DMA_CONSISTENT
| DDI_DMA_STREAMING
),
2271 DDI_DMA_SLEEP
, NULL
,
2272 &dma_p
->mem_va
, &dma_p
->alength
, &dma_p
->acc_hdl
);
2273 if (err
!= DDI_SUCCESS
) {
2274 dmfe_error(dmfep
, "DMA memory allocation failed: %d", err
);
2275 return (DDI_FAILURE
);
2279 * Bind the two together
2281 err
= ddi_dma_addr_bind_handle(dma_p
->dma_hdl
, NULL
,
2282 dma_p
->mem_va
, dma_p
->alength
, dma_flags
,
2283 DDI_DMA_SLEEP
, NULL
, &dma_cookie
, &ncookies
);
2284 if (err
!= DDI_DMA_MAPPED
) {
2285 dmfe_error(dmfep
, "DMA mapping failed: %d", err
);
2286 return (DDI_FAILURE
);
2288 if ((dma_p
->ncookies
= ncookies
) != 1) {
2289 dmfe_error(dmfep
, "Too many DMA cookeis: %d", ncookies
);
2290 return (DDI_FAILURE
);
2293 dma_p
->mem_dvma
= dma_cookie
.dmac_address
;
2295 dma_p
->setup_dvma
= dma_p
->mem_dvma
+ memsize
;
2296 dma_p
->setup_va
= dma_p
->mem_va
+ memsize
;
2298 dma_p
->setup_dvma
= 0;
2299 dma_p
->setup_va
= NULL
;
2302 return (DDI_SUCCESS
);
2306 * This function allocates the transmit and receive buffers and descriptors.
2309 dmfe_alloc_bufs(dmfe_t
*dmfep
)
2315 * Allocate memory & handles for TX descriptor ring
2317 memsize
= dmfep
->tx
.n_desc
* sizeof (struct tx_desc_type
);
2318 err
= dmfe_alloc_dma_mem(dmfep
, memsize
, SETUPBUF_SIZE
, DMFE_SLOP
,
2319 &dmfe_reg_accattr
, DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
,
2321 if (err
!= DDI_SUCCESS
) {
2322 dmfe_error(dmfep
, "TX descriptor allocation failed");
2323 return (DDI_FAILURE
);
2327 * Allocate memory & handles for TX buffers
2329 memsize
= dmfep
->tx
.n_desc
* DMFE_BUF_SIZE
;
2330 err
= dmfe_alloc_dma_mem(dmfep
, memsize
, 0, 0,
2331 &dmfe_data_accattr
, DDI_DMA_WRITE
| DMFE_DMA_MODE
,
2333 if (err
!= DDI_SUCCESS
) {
2334 dmfe_error(dmfep
, "TX buffer allocation failed");
2335 return (DDI_FAILURE
);
2339 * Allocate memory & handles for RX descriptor ring
2341 memsize
= dmfep
->rx
.n_desc
* sizeof (struct rx_desc_type
);
2342 err
= dmfe_alloc_dma_mem(dmfep
, memsize
, 0, DMFE_SLOP
,
2343 &dmfe_reg_accattr
, DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
,
2345 if (err
!= DDI_SUCCESS
) {
2346 dmfe_error(dmfep
, "RX descriptor allocation failed");
2347 return (DDI_FAILURE
);
2351 * Allocate memory & handles for RX buffers
2353 memsize
= dmfep
->rx
.n_desc
* DMFE_BUF_SIZE
;
2354 err
= dmfe_alloc_dma_mem(dmfep
, memsize
, 0, 0,
2355 &dmfe_data_accattr
, DDI_DMA_READ
| DMFE_DMA_MODE
, &dmfep
->rx_buff
);
2356 if (err
!= DDI_SUCCESS
) {
2357 dmfe_error(dmfep
, "RX buffer allocation failed");
2358 return (DDI_FAILURE
);
2362 * Allocate bitmasks for tx packet type tracking
2364 dmfep
->tx_mcast
= kmem_zalloc(dmfep
->tx
.n_desc
/ NBBY
, KM_SLEEP
);
2365 dmfep
->tx_bcast
= kmem_zalloc(dmfep
->tx
.n_desc
/ NBBY
, KM_SLEEP
);
2367 return (DDI_SUCCESS
);
2371 dmfe_free_dma_mem(dma_area_t
*dma_p
)
2373 if (dma_p
->dma_hdl
!= NULL
) {
2374 if (dma_p
->ncookies
) {
2375 (void) ddi_dma_unbind_handle(dma_p
->dma_hdl
);
2376 dma_p
->ncookies
= 0;
2378 ddi_dma_free_handle(&dma_p
->dma_hdl
);
2379 dma_p
->dma_hdl
= NULL
;
2380 dma_p
->mem_dvma
= 0;
2381 dma_p
->setup_dvma
= 0;
2384 if (dma_p
->acc_hdl
!= NULL
) {
2385 ddi_dma_mem_free(&dma_p
->acc_hdl
);
2386 dma_p
->acc_hdl
= NULL
;
2387 dma_p
->mem_va
= NULL
;
2388 dma_p
->setup_va
= NULL
;
2393 * This routine frees the transmit and receive buffers and descriptors.
2394 * Make sure the chip is stopped before calling it!
2397 dmfe_free_bufs(dmfe_t
*dmfep
)
2399 dmfe_free_dma_mem(&dmfep
->rx_buff
);
2400 dmfe_free_dma_mem(&dmfep
->rx_desc
);
2401 dmfe_free_dma_mem(&dmfep
->tx_buff
);
2402 dmfe_free_dma_mem(&dmfep
->tx_desc
);
2403 if (dmfep
->tx_mcast
)
2404 kmem_free(dmfep
->tx_mcast
, dmfep
->tx
.n_desc
/ NBBY
);
2405 if (dmfep
->tx_bcast
)
2406 kmem_free(dmfep
->tx_bcast
, dmfep
->tx
.n_desc
/ NBBY
);
2410 dmfe_unattach(dmfe_t
*dmfep
)
2413 * Clean up and free all DMFE data structures
2415 if (dmfep
->cycid
!= NULL
) {
2416 ddi_periodic_delete(dmfep
->cycid
);
2417 dmfep
->cycid
= NULL
;
2420 if (dmfep
->ksp_drv
!= NULL
)
2421 kstat_delete(dmfep
->ksp_drv
);
2422 if (dmfep
->progress
& PROGRESS_HWINT
) {
2423 ddi_remove_intr(dmfep
->devinfo
, 0, dmfep
->iblk
);
2425 if (dmfep
->progress
& PROGRESS_SOFTINT
)
2426 ddi_remove_softintr(dmfep
->factotum_id
);
2427 if (dmfep
->mii
!= NULL
)
2428 mii_free(dmfep
->mii
);
2429 if (dmfep
->progress
& PROGRESS_MUTEX
) {
2430 mutex_destroy(dmfep
->txlock
);
2431 mutex_destroy(dmfep
->rxlock
);
2432 mutex_destroy(dmfep
->oplock
);
2434 dmfe_free_bufs(dmfep
);
2435 if (dmfep
->io_handle
!= NULL
)
2436 ddi_regs_map_free(&dmfep
->io_handle
);
2438 kmem_free(dmfep
, sizeof (*dmfep
));
2442 dmfe_config_init(dmfe_t
*dmfep
, chip_id_t
*idp
)
2444 ddi_acc_handle_t handle
;
2447 if (pci_config_setup(dmfep
->devinfo
, &handle
) != DDI_SUCCESS
)
2448 return (DDI_FAILURE
);
2451 * Get vendor/device/revision. We expect (but don't check) that
2452 * (vendorid == DAVICOM_VENDOR_ID) && (deviceid == DEVICE_ID_9102)
2454 idp
->vendor
= pci_config_get16(handle
, PCI_CONF_VENID
);
2455 idp
->device
= pci_config_get16(handle
, PCI_CONF_DEVID
);
2456 idp
->revision
= pci_config_get8(handle
, PCI_CONF_REVID
);
2459 * Turn on Bus Master Enable bit and ensure the device is not asleep
2461 regval
= pci_config_get32(handle
, PCI_CONF_COMM
);
2462 pci_config_put32(handle
, PCI_CONF_COMM
, (regval
| PCI_COMM_ME
));
2464 regval
= pci_config_get32(handle
, PCI_DMFE_CONF_CFDD
);
2465 pci_config_put32(handle
, PCI_DMFE_CONF_CFDD
,
2466 regval
& ~(CFDD_SLEEP
| CFDD_SNOOZE
));
2468 pci_config_teardown(&handle
);
2469 return (DDI_SUCCESS
);
2477 static const struct ks_index ks_drv_names
[] = {
2478 { KS_INTERRUPT
, "intr" },
2479 { KS_CYCLIC_RUN
, "cyclic_run" },
2481 { KS_TX_STALL
, "tx_stall_detect" },
2482 { KS_CHIP_ERROR
, "chip_error_interrupt" },
2484 { KS_FACTOTUM_RUN
, "factotum_run" },
2485 { KS_RECOVERY
, "factotum_recover" },
2491 dmfe_init_kstats(dmfe_t
*dmfep
, int instance
)
2495 const struct ks_index
*ksip
;
2497 /* no need to create MII stats, the mac module already does it */
2499 /* Create and initialise driver-defined kstats */
2500 ksp
= kstat_create(DRIVER_NAME
, instance
, "dmfe_events", "net",
2501 KSTAT_TYPE_NAMED
, KS_DRV_COUNT
, KSTAT_FLAG_PERSISTENT
);
2503 for (knp
= ksp
->ks_data
, ksip
= ks_drv_names
;
2504 ksip
->name
!= NULL
; ++ksip
) {
2505 kstat_named_init(&knp
[ksip
->index
], ksip
->name
,
2508 dmfep
->ksp_drv
= ksp
;
2509 dmfep
->knp_drv
= knp
;
2512 dmfe_error(dmfep
, "kstat_create() for dmfe_events failed");
2517 dmfe_resume(dev_info_t
*devinfo
)
2519 dmfe_t
*dmfep
; /* Our private data */
2521 boolean_t restart
= B_FALSE
;
2523 dmfep
= ddi_get_driver_private(devinfo
);
2525 return (DDI_FAILURE
);
2528 * Refuse to resume if the data structures aren't consistent
2530 if (dmfep
->devinfo
!= devinfo
)
2531 return (DDI_FAILURE
);
2534 * Refuse to resume if the chip's changed its identity (*boggle*)
2536 if (dmfe_config_init(dmfep
, &chipid
) != DDI_SUCCESS
)
2537 return (DDI_FAILURE
);
2538 if (chipid
.vendor
!= dmfep
->chipid
.vendor
)
2539 return (DDI_FAILURE
);
2540 if (chipid
.device
!= dmfep
->chipid
.device
)
2541 return (DDI_FAILURE
);
2542 if (chipid
.revision
!= dmfep
->chipid
.revision
)
2543 return (DDI_FAILURE
);
2545 mutex_enter(dmfep
->oplock
);
2546 mutex_enter(dmfep
->txlock
);
2547 dmfep
->suspended
= B_FALSE
;
2548 mutex_exit(dmfep
->txlock
);
2551 * All OK, reinitialise h/w & kick off MAC scheduling
2553 if (dmfep
->mac_state
== DMFE_MAC_STARTED
) {
2554 dmfe_restart(dmfep
);
2557 mutex_exit(dmfep
->oplock
);
2560 mii_resume(dmfep
->mii
);
2561 mac_tx_update(dmfep
->mh
);
2563 return (DDI_SUCCESS
);
2567 * attach(9E) -- Attach a device to the system
2569 * Called once for each board successfully probed.
2572 dmfe_attach(dev_info_t
*devinfo
, ddi_attach_cmd_t cmd
)
2574 mac_register_t
*macp
;
2575 dmfe_t
*dmfep
; /* Our private data */
2580 instance
= ddi_get_instance(devinfo
);
2584 return (DDI_FAILURE
);
2587 return (dmfe_resume(devinfo
));
2593 dmfep
= kmem_zalloc(sizeof (*dmfep
), KM_SLEEP
);
2594 ddi_set_driver_private(devinfo
, dmfep
);
2595 dmfep
->devinfo
= devinfo
;
2596 dmfep
->dmfe_guard
= DMFE_GUARD
;
2599 * Initialize more fields in DMFE private data
2600 * Determine the local MAC address
2603 dmfep
->debug
= ddi_prop_get_int(DDI_DEV_T_ANY
, devinfo
, 0,
2604 debug_propname
, dmfe_debug
);
2605 #endif /* DMFEDEBUG */
2606 dmfep
->cycid
= NULL
;
2607 (void) snprintf(dmfep
->ifname
, sizeof (dmfep
->ifname
), "dmfe%d",
2611 * Check for custom "opmode-reg-value" property;
2612 * if none, use the defaults below for CSR6 ...
2614 csr6
= TX_THRESHOLD_HI
| STORE_AND_FORWARD
| EXT_MII_IF
| OPN_25_MB1
;
2615 dmfep
->opmode
= ddi_prop_get_int(DDI_DEV_T_ANY
, devinfo
,
2616 DDI_PROP_DONTPASS
, opmode_propname
, csr6
);
2619 * Read chip ID & set up config space command register(s)
2621 if (dmfe_config_init(dmfep
, &dmfep
->chipid
) != DDI_SUCCESS
) {
2622 dmfe_error(dmfep
, "dmfe_config_init() failed");
2627 * Map operating registers
2629 err
= ddi_regs_map_setup(devinfo
, DMFE_PCI_RNUMBER
,
2630 &dmfep
->io_reg
, 0, 0, &dmfe_reg_accattr
, &dmfep
->io_handle
);
2631 if (err
!= DDI_SUCCESS
) {
2632 dmfe_error(dmfep
, "ddi_regs_map_setup() failed");
2637 * Get our MAC address.
2639 dmfe_find_mac_address(dmfep
);
2642 * Allocate the TX and RX descriptors/buffers.
2644 dmfep
->tx
.n_desc
= dmfe_tx_desc
;
2645 dmfep
->rx
.n_desc
= dmfe_rx_desc
;
2646 err
= dmfe_alloc_bufs(dmfep
);
2647 if (err
!= DDI_SUCCESS
) {
2652 * Add the softint handler
2654 if (ddi_add_softintr(devinfo
, DDI_SOFTINT_LOW
, &dmfep
->factotum_id
,
2655 NULL
, NULL
, dmfe_factotum
, (caddr_t
)dmfep
) != DDI_SUCCESS
) {
2656 dmfe_error(dmfep
, "ddi_add_softintr() failed");
2659 dmfep
->progress
|= PROGRESS_SOFTINT
;
2662 * Add the h/w interrupt handler & initialise mutexen
2664 if (ddi_get_iblock_cookie(devinfo
, 0, &dmfep
->iblk
) != DDI_SUCCESS
) {
2665 dmfe_error(dmfep
, "ddi_get_iblock_cookie() failed");
2669 mutex_init(dmfep
->milock
, NULL
, MUTEX_DRIVER
, NULL
);
2670 mutex_init(dmfep
->oplock
, NULL
, MUTEX_DRIVER
, dmfep
->iblk
);
2671 mutex_init(dmfep
->rxlock
, NULL
, MUTEX_DRIVER
, dmfep
->iblk
);
2672 mutex_init(dmfep
->txlock
, NULL
, MUTEX_DRIVER
, dmfep
->iblk
);
2673 dmfep
->progress
|= PROGRESS_MUTEX
;
2675 if (ddi_add_intr(devinfo
, 0, NULL
, NULL
,
2676 dmfe_interrupt
, (caddr_t
)dmfep
) != DDI_SUCCESS
) {
2677 dmfe_error(dmfep
, "ddi_add_intr() failed");
2680 dmfep
->progress
|= PROGRESS_HWINT
;
2683 * Create & initialise named kstats
2685 dmfe_init_kstats(dmfep
, instance
);
2688 * Reset & initialise the chip and the ring buffers
2689 * Initialise the (internal) PHY
2691 mutex_enter(dmfep
->oplock
);
2692 mutex_enter(dmfep
->rxlock
);
2693 mutex_enter(dmfep
->txlock
);
2698 * Prepare the setup packet
2700 bzero(dmfep
->tx_desc
.setup_va
, SETUPBUF_SIZE
);
2701 bzero(dmfep
->mcast_refs
, MCASTBUF_SIZE
);
2702 dmfep
->addr_set
= B_FALSE
;
2703 dmfep
->opmode
&= ~(PROMISC_MODE
| PASS_MULTICAST
);
2704 dmfep
->mac_state
= DMFE_MAC_RESET
;
2706 mutex_exit(dmfep
->txlock
);
2707 mutex_exit(dmfep
->rxlock
);
2708 mutex_exit(dmfep
->oplock
);
2710 if (dmfe_init_phy(dmfep
) != B_TRUE
)
2714 * Send a reasonable setup frame. This configures our starting
2715 * address and the broadcast address.
2717 (void) dmfe_m_unicst(dmfep
, dmfep
->curr_addr
);
2720 * Initialize pointers to device specific functions which
2721 * will be used by the generic layer.
2723 if ((macp
= mac_alloc(MAC_VERSION
)) == NULL
)
2725 macp
->m_type_ident
= MAC_PLUGIN_IDENT_ETHER
;
2726 macp
->m_driver
= dmfep
;
2727 macp
->m_dip
= devinfo
;
2728 macp
->m_src_addr
= dmfep
->curr_addr
;
2729 macp
->m_callbacks
= &dmfe_m_callbacks
;
2730 macp
->m_min_sdu
= 0;
2731 macp
->m_max_sdu
= ETHERMTU
;
2732 macp
->m_margin
= VLAN_TAGSZ
;
2735 * Finally, we're ready to register ourselves with the MAC layer
2736 * interface; if this succeeds, we're all ready to start()
2738 err
= mac_register(macp
, &dmfep
->mh
);
2742 ASSERT(dmfep
->dmfe_guard
== DMFE_GUARD
);
2745 * Install the cyclic callback that we use to check for link
2746 * status, transmit stall, etc. The cyclic callback (dmfe_cyclic())
2747 * is invoked in kernel context then.
2749 ASSERT(dmfep
->cycid
== NULL
);
2750 dmfep
->cycid
= ddi_periodic_add(dmfe_cyclic
, dmfep
,
2751 dmfe_tick_us
* 1000, DDI_IPL_0
);
2752 return (DDI_SUCCESS
);
2755 dmfe_unattach(dmfep
);
2756 return (DDI_FAILURE
);
2760 * dmfe_suspend() -- suspend transmit/receive for powerdown
2763 dmfe_suspend(dmfe_t
*dmfep
)
2766 * Just stop processing ...
2768 mii_suspend(dmfep
->mii
);
2769 mutex_enter(dmfep
->oplock
);
2772 mutex_enter(dmfep
->txlock
);
2773 dmfep
->suspended
= B_TRUE
;
2774 mutex_exit(dmfep
->txlock
);
2775 mutex_exit(dmfep
->oplock
);
2777 return (DDI_SUCCESS
);
2781 * detach(9E) -- Detach a device from the system
2784 dmfe_detach(dev_info_t
*devinfo
, ddi_detach_cmd_t cmd
)
2788 dmfep
= ddi_get_driver_private(devinfo
);
2792 return (DDI_FAILURE
);
2795 return (dmfe_suspend(dmfep
));
2802 * Unregister from the MAC subsystem. This can fail, in
2803 * particular if there are DLPI style-2 streams still open -
2804 * in which case we just return failure without shutting
2805 * down chip operations.
2807 if (mac_unregister(dmfep
->mh
) != DDI_SUCCESS
)
2808 return (DDI_FAILURE
);
2811 * All activity stopped, so we can clean up & exit
2813 dmfe_unattach(dmfep
);
2814 return (DDI_SUCCESS
);
2819 * ========== Module Loading Data & Entry Points ==========
2822 DDI_DEFINE_STREAM_OPS(dmfe_dev_ops
, nulldev
, nulldev
, dmfe_attach
, dmfe_detach
,
2823 nodev
, NULL
, D_MP
, NULL
, ddi_quiesce_not_supported
);
2825 static struct modldrv dmfe_modldrv
= {
2826 &mod_driverops
, /* Type of module. This one is a driver */
2827 dmfe_ident
, /* short description */
2828 &dmfe_dev_ops
/* driver specific ops */
2831 static struct modlinkage modlinkage
= {
2832 MODREV_1
, (void *)&dmfe_modldrv
, NULL
2836 _info(struct modinfo
*modinfop
)
2838 return (mod_info(&modlinkage
, modinfop
));
2849 /* Calculate global timing parameters */
2850 tmp100
= (dmfe_tx100_stall_us
+dmfe_tick_us
-1)/dmfe_tick_us
;
2851 tmp10
= (dmfe_tx10_stall_us
+dmfe_tick_us
-1)/dmfe_tick_us
;
2853 for (i
= 0; i
<= TX_PROCESS_MAX_STATE
; ++i
) {
2855 case TX_PROCESS_STATE(TX_PROCESS_FETCH_DATA
):
2856 case TX_PROCESS_STATE(TX_PROCESS_WAIT_END
):
2858 * The chip doesn't spontaneously recover from
2859 * a stall in these states, so we reset early
2861 stall_100_tix
[i
] = tmp100
;
2862 stall_10_tix
[i
] = tmp10
;
2865 case TX_PROCESS_STATE(TX_PROCESS_SUSPEND
):
2868 * The chip has been seen to spontaneously recover
2869 * after an apparent stall in the SUSPEND state,
2870 * so we'll allow it rather longer to do so. As
2871 * stalls in other states have not been observed,
2872 * we'll use long timeouts for them too ...
2874 stall_100_tix
[i
] = tmp100
* 20;
2875 stall_10_tix
[i
] = tmp10
* 20;
2880 mac_init_ops(&dmfe_dev_ops
, "dmfe");
2881 status
= mod_install(&modlinkage
);
2882 if (status
== DDI_SUCCESS
)
2893 status
= mod_remove(&modlinkage
);
2894 if (status
== DDI_SUCCESS
) {
2895 mac_fini_ops(&dmfe_dev_ops
);