1 /* $NetBSD: i82596.c,v 1.26 2009/05/10 04:36:58 tsutsui Exp $ */
4 * Copyright (c) 2003 Jochen Kunz.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of Jochen Kunz may not be used to endorse or promote
16 * products derived from this software without specific prior
19 * THIS SOFTWARE IS PROVIDED BY JOCHEN KUNZ
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JOCHEN KUNZ
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Driver for the Intel i82596CA and i82596DX/SX 10MBit/s Ethernet chips.
35 * It operates the i82596 in 32-Bit Linear Mode, opposed to the old i82586
36 * ie(4) driver (src/sys/dev/ic/i82586.c), that degrades the i82596 to
37 * i82586 compatibility mode.
39 * Documentation about these chips can be found at
41 * http://developer.intel.com/design/network/datashts/290218.htm
42 * http://developer.intel.com/design/network/datashts/290219.htm
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: i82596.c,v 1.26 2009/05/10 04:36:58 tsutsui Exp $");
48 /* autoconfig and device stuff */
49 #include <sys/param.h>
50 #include <sys/device.h>
55 /* bus_space / bus_dma etc. */
59 /* general system data and functions */
60 #include <sys/systm.h>
61 #include <sys/ioctl.h>
63 /* tsleep / sleep / wakeup */
66 #include <sys/kernel.h>
70 #include <net/if_dl.h>
71 #include <net/if_media.h>
72 #include <net/if_ether.h>
73 #include <sys/socket.h>
81 #include <dev/ic/i82596reg.h>
82 #include <dev/ic/i82596var.h>
84 /* Supported chip variants */
85 const char *i82596_typenames
[] = { "unknown", "DX/SX", "CA" };
87 /* media change and status callback */
88 static int iee_mediachange(struct ifnet
*);
89 static void iee_mediastatus(struct ifnet
*, struct ifmediareq
*);
91 /* interface routines to upper protocols */
92 static void iee_start(struct ifnet
*); /* initiate output */
93 static int iee_ioctl(struct ifnet
*, u_long
, void *); /* ioctl routine */
94 static int iee_init(struct ifnet
*); /* init routine */
95 static void iee_stop(struct ifnet
*, int); /* stop routine */
96 static void iee_watchdog(struct ifnet
*); /* timer routine */
98 /* internal helper functions */
99 static void iee_cb_setup(struct iee_softc
*, uint32_t);
102 * Things a MD frontend has to provide:
104 * The functions via function pointers in the softc:
105 * int (*sc_iee_cmd)(struct iee_softc *sc, uint32_t cmd);
106 * int (*sc_iee_reset)(struct iee_softc *sc);
107 * void (*sc_mediastatus)(struct ifnet *, struct ifmediareq *);
108 * int (*sc_mediachange)(struct ifnet *);
110 * sc_iee_cmd(): send a command to the i82596 by writing the cmd parameter
111 * to the SCP cmd word and issuing a Channel Attention.
112 * sc_iee_reset(): initiate a reset, supply the address of the SCP to the
113 * chip, wait for the chip to initialize and ACK interrupts that
114 * this may have caused by calling (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
115 * This functions must carefully bus_dmamap_sync() all data they have touched!
117 * sc_mediastatus() and sc_mediachange() are just MD hooks to the according
118 * MI functions. The MD frontend may set this pointers to NULL when they
121 * sc->sc_type has to be set to I82596_UNKNOWN or I82596_DX or I82596_CA.
122 * This is for printing out the correct chip type at attach time only. The
123 * MI backend doesn't distinguish different chip types when programming
126 * IEE_NEED_SWAP in sc->sc_flags has to be cleared on little endian hardware
127 * and set on big endian hardware, when endianess conversion is not done
128 * by the bus attachment but done by i82596 chip itself.
129 * Usually you need to set IEE_NEED_SWAP on big endian machines
130 * where the hardware (the LE/~BE pin) is configured as BE mode.
132 * If the chip is configured as BE mode, all 8 bit (byte) and 16 bit (word)
133 * entities can be written in big endian. But Rev A chip doesn't support
134 * 32 bit (dword) entities with big endian byte ordering, so we have to
135 * treat all 32 bit (dword) entities as two 16 bit big endian entities.
136 * Rev B and C chips support big endian byte ordering for 32 bit entities,
137 * and this new feature is enabled by IEE_SYSBUS_BE in the sysbus byte.
139 * With the IEE_SYSBUS_BE feature, all 32 bit address ponters are
140 * treated as true 32 bit entities but the SCB absolute address and
141 * statistical counters are still treated as two 16 bit big endian entities,
142 * so we have to always swap high and low words for these entities.
143 * IEE_SWAP32() should be used for the SCB address and statistical counters,
144 * and IEE_SWAPA32() should be used for other 32 bit pointers in the shmem.
146 * IEE_REV_A flag must be set in sc->sc_flags if the IEE_SYSBUS_BE feature
147 * is disabled even on big endian machines for the old Rev A chip in backend.
149 * sc->sc_cl_align must be set to 1 or to the cache line size. When set to
150 * 1 no special alignment of DMA descriptors is done. If sc->sc_cl_align != 1
151 * it forces alignment of the data structures in the shared memory to a multiple
152 * of sc->sc_cl_align. This is needed on archs like hp700 that have non DMA
153 * I/O coherent caches and are unable to map the shared memory uncachable.
154 * (At least pre PA7100LC CPUs are unable to map memory uncachable.)
156 * The MD frontend also has to set sc->sc_cl_align and sc->sc_sysbus
157 * to allocate and setup shared DMA memory in MI iee_attach().
158 * All communication with the chip is done via this shared memory.
159 * This memory is mapped with BUS_DMA_COHERENT so it will be uncached
160 * if possible for archs with non DMA I/O coherent caches.
161 * The base of the memory needs to be aligned to an even address
162 * if sc->sc_cl_align == 1 and aligned to a cache line if sc->sc_cl_align != 1.
163 * Each descriptor offsets are calculated in iee_attach() to handle this.
165 * An interrupt with iee_intr() as handler must be established.
167 * Call void iee_attach(struct iee_softc *sc, uint8_t *ether_address,
168 * int *media, int nmedia, int defmedia); when everything is set up. First
169 * parameter is a pointer to the MI softc, ether_address is an array that
170 * contains the ethernet address. media is an array of the media types
171 * provided by the hardware. The members of this array are supplied to
172 * ifmedia_add() in sequence. nmedia is the count of elements in media.
173 * defmedia is the default media that is set via ifmedia_set().
174 * nmedia and defmedia are ignored when media == NULL.
176 * The MD backend may call iee_detach() to detach the device.
178 * See sys/arch/hp700/gsc/if_iee_gsc.c for an example.
183 * How frame reception is done:
184 * Each Receive Frame Descriptor has one associated Receive Buffer Descriptor.
185 * Each RBD points to the data area of an mbuf cluster. The RFDs are linked
186 * together in a circular list. sc->sc_rx_done is the count of RFDs in the
187 * list already processed / the number of the RFD that has to be checked for
188 * a new frame first at the next RX interrupt. Upon successful reception of
189 * a frame the mbuf cluster is handled to upper protocol layers, a new mbuf
190 * cluster is allocated and the RFD / RBD are reinitialized accordingly.
192 * When a RFD list overrun occurred the whole RFD and RBD lists are
193 * reinitialized and frame reception is started again.
196 iee_intr(void *intarg
)
198 struct iee_softc
*sc
= intarg
;
199 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
203 struct mbuf
*rx_mbuf
;
204 struct mbuf
*new_mbuf
;
208 uint16_t status
, count
, cmd
;
210 if ((ifp
->if_flags
& IFF_RUNNING
) == 0) {
211 (sc
->sc_iee_cmd
)(sc
, IEE_SCB_ACK
);
214 IEE_SCBSYNC(sc
, BUS_DMASYNC_POSTREAD
);
215 scb_status
= SC_SCB(sc
)->scb_status
;
216 scb_cmd
= SC_SCB(sc
)->scb_cmd
;
218 rfd
= SC_RFD(sc
, sc
->sc_rx_done
);
219 IEE_RFDSYNC(sc
, sc
->sc_rx_done
,
220 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
221 status
= rfd
->rfd_status
;
222 if ((status
& IEE_RFD_C
) == 0) {
223 IEE_RFDSYNC(sc
, sc
->sc_rx_done
, BUS_DMASYNC_PREREAD
);
227 IEE_RFDSYNC(sc
, sc
->sc_rx_done
,
228 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
230 /* At least one packet was received. */
231 rx_map
= sc
->sc_rx_map
[sc
->sc_rx_done
];
232 rx_mbuf
= sc
->sc_rx_mbuf
[sc
->sc_rx_done
];
233 IEE_RBDSYNC(sc
, (sc
->sc_rx_done
+ IEE_NRFD
- 1) % IEE_NRFD
,
234 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
235 SC_RBD(sc
, (sc
->sc_rx_done
+ IEE_NRFD
- 1) % IEE_NRFD
)->rbd_size
237 IEE_RBDSYNC(sc
, (sc
->sc_rx_done
+ IEE_NRFD
- 1) % IEE_NRFD
,
238 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
239 rbd
= SC_RBD(sc
, sc
->sc_rx_done
);
240 IEE_RBDSYNC(sc
, sc
->sc_rx_done
,
241 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
242 count
= rbd
->rbd_count
;
243 if ((status
& IEE_RFD_OK
) == 0
244 || (count
& IEE_RBD_EOF
) == 0
245 || (count
& IEE_RBD_F
) == 0){
246 /* Receive error, skip frame and reuse buffer. */
248 rbd
->rbd_size
= IEE_RBD_EL
| rx_map
->dm_segs
[0].ds_len
;
249 IEE_RBDSYNC(sc
, sc
->sc_rx_done
,
250 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
251 printf("%s: iee_intr: receive error %d, rfd_status="
252 "0x%.4x, rfd_count=0x%.4x\n",
253 device_xname(sc
->sc_dev
),
254 ++sc
->sc_rx_err
, status
, count
);
255 sc
->sc_rx_done
= (sc
->sc_rx_done
+ 1) % IEE_NRFD
;
258 bus_dmamap_sync(sc
->sc_dmat
, rx_map
, 0, rx_map
->dm_mapsize
,
259 BUS_DMASYNC_POSTREAD
);
260 rx_mbuf
->m_pkthdr
.len
= rx_mbuf
->m_len
=
261 count
& IEE_RBD_COUNT
;
262 rx_mbuf
->m_pkthdr
.rcvif
= ifp
;
263 MGETHDR(new_mbuf
, M_DONTWAIT
, MT_DATA
);
264 if (new_mbuf
== NULL
) {
265 printf("%s: iee_intr: can't allocate mbuf\n",
266 device_xname(sc
->sc_dev
));
269 MCLAIM(new_mbuf
, &sc
->sc_ethercom
.ec_rx_mowner
);
270 MCLGET(new_mbuf
, M_DONTWAIT
);
271 if ((new_mbuf
->m_flags
& M_EXT
) == 0) {
272 printf("%s: iee_intr: can't alloc mbuf cluster\n",
273 device_xname(sc
->sc_dev
));
277 bus_dmamap_unload(sc
->sc_dmat
, rx_map
);
278 new_mbuf
->m_len
= new_mbuf
->m_pkthdr
.len
= MCLBYTES
- 2;
279 new_mbuf
->m_data
+= 2;
280 if (bus_dmamap_load_mbuf(sc
->sc_dmat
, rx_map
,
281 new_mbuf
, BUS_DMA_READ
| BUS_DMA_NOWAIT
) != 0)
282 panic("%s: iee_intr: can't load RX DMA map\n",
283 device_xname(sc
->sc_dev
));
284 bus_dmamap_sync(sc
->sc_dmat
, rx_map
, 0,
285 rx_map
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
287 if (ifp
->if_bpf
!= 0)
288 bpf_mtap(ifp
->if_bpf
, rx_mbuf
);
289 #endif /* NBPFILTER > 0 */
290 (*ifp
->if_input
)(ifp
, rx_mbuf
);
292 sc
->sc_rx_mbuf
[sc
->sc_rx_done
] = new_mbuf
;
294 rbd
->rbd_size
= IEE_RBD_EL
| rx_map
->dm_segs
[0].ds_len
;
295 rbd
->rbd_rb_addr
= IEE_SWAPA32(rx_map
->dm_segs
[0].ds_addr
);
296 IEE_RBDSYNC(sc
, sc
->sc_rx_done
,
297 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
298 sc
->sc_rx_done
= (sc
->sc_rx_done
+ 1) % IEE_NRFD
;
300 if ((scb_status
& IEE_SCB_RUS
) == IEE_SCB_RUS_NR1
301 || (scb_status
& IEE_SCB_RUS
) == IEE_SCB_RUS_NR2
302 || (scb_status
& IEE_SCB_RUS
) == IEE_SCB_RUS_NR3
) {
303 /* Receive Overrun, reinit receive ring buffer. */
304 for (n
= 0 ; n
< IEE_NRFD
; n
++) {
307 rfd
->rfd_cmd
= IEE_RFD_SF
;
309 IEE_SWAPA32(IEE_PHYS_SHMEM(sc
->sc_rfd_off
310 + sc
->sc_rfd_sz
* ((n
+ 1) % IEE_NRFD
)));
312 IEE_SWAPA32(IEE_PHYS_SHMEM(sc
->sc_rbd_off
313 + sc
->sc_rbd_sz
* ((n
+ 1) % IEE_NRFD
)));
314 rbd
->rbd_size
= IEE_RBD_EL
|
315 sc
->sc_rx_map
[n
]->dm_segs
[0].ds_len
;
317 IEE_SWAPA32(sc
->sc_rx_map
[n
]->dm_segs
[0].ds_addr
);
319 SC_RFD(sc
, 0)->rfd_rbd_addr
=
320 IEE_SWAPA32(IEE_PHYS_SHMEM(sc
->sc_rbd_off
));
322 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_shmem_map
, sc
->sc_rfd_off
,
323 sc
->sc_rfd_sz
* IEE_NRFD
+ sc
->sc_rbd_sz
* IEE_NRFD
,
324 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
325 (sc
->sc_iee_cmd
)(sc
, IEE_SCB_RUC_ST
);
326 printf("%s: iee_intr: receive ring buffer overrun\n",
327 device_xname(sc
->sc_dev
));
330 if (sc
->sc_next_cb
!= 0) {
331 IEE_CBSYNC(sc
, sc
->sc_next_cb
- 1,
332 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
333 status
= SC_CB(sc
, sc
->sc_next_cb
- 1)->cb_status
;
334 IEE_CBSYNC(sc
, sc
->sc_next_cb
- 1,
335 BUS_DMASYNC_PREREAD
);
336 if ((status
& IEE_CB_C
) != 0) {
337 /* CMD list finished */
339 if (sc
->sc_next_tbd
!= 0) {
340 /* A TX CMD list finished, cleanup */
341 for (n
= 0 ; n
< sc
->sc_next_cb
; n
++) {
342 m_freem(sc
->sc_tx_mbuf
[n
]);
343 sc
->sc_tx_mbuf
[n
] = NULL
;
344 bus_dmamap_unload(sc
->sc_dmat
,
347 BUS_DMASYNC_POSTREAD
|
348 BUS_DMASYNC_POSTWRITE
);
349 status
= SC_CB(sc
, n
)->cb_status
;
351 BUS_DMASYNC_PREREAD
);
352 if ((status
& IEE_CB_COL
) != 0 &&
353 (status
& IEE_CB_MAXCOL
) == 0)
358 sc
->sc_tx_col
+= col
;
359 if ((status
& IEE_CB_OK
) != 0) {
361 ifp
->if_collisions
+= col
;
365 ifp
->if_flags
&= ~IFF_OACTIVE
;
367 for (n
= 0 ; n
< sc
->sc_next_cb
; n
++) {
369 * Check if a CMD failed, but ignore TX errors.
372 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
373 cmd
= SC_CB(sc
, n
)->cb_cmd
;
374 status
= SC_CB(sc
, n
)->cb_status
;
375 IEE_CBSYNC(sc
, n
, BUS_DMASYNC_PREREAD
);
376 if ((cmd
& IEE_CB_CMD
) != IEE_CB_CMD_TR
&&
377 (status
& IEE_CB_OK
) == 0)
378 printf("%s: iee_intr: scb_status=0x%x "
379 "scb_cmd=0x%x failed command %d: "
380 "cb_status[%d]=0x%.4x "
381 "cb_cmd[%d]=0x%.4x\n",
382 device_xname(sc
->sc_dev
),
388 if ((sc
->sc_flags
& IEE_WANT_MCAST
) != 0) {
389 iee_cb_setup(sc
, IEE_CB_CMD_MCS
|
390 IEE_CB_S
| IEE_CB_EL
| IEE_CB_I
);
391 (sc
->sc_iee_cmd
)(sc
, IEE_SCB_CUC_EXE
);
393 /* Try to get deferred packets going. */
397 if (IEE_SWAP32(SC_SCB(sc
)->scb_crc_err
) != sc
->sc_crc_err
) {
398 sc
->sc_crc_err
= IEE_SWAP32(SC_SCB(sc
)->scb_crc_err
);
399 printf("%s: iee_intr: crc_err=%d\n", device_xname(sc
->sc_dev
),
402 if (IEE_SWAP32(SC_SCB(sc
)->scb_align_err
) != sc
->sc_align_err
) {
403 sc
->sc_align_err
= IEE_SWAP32(SC_SCB(sc
)->scb_align_err
);
404 printf("%s: iee_intr: align_err=%d\n", device_xname(sc
->sc_dev
),
407 if (IEE_SWAP32(SC_SCB(sc
)->scb_resource_err
) != sc
->sc_resource_err
) {
408 sc
->sc_resource_err
= IEE_SWAP32(SC_SCB(sc
)->scb_resource_err
);
409 printf("%s: iee_intr: resource_err=%d\n",
410 device_xname(sc
->sc_dev
), sc
->sc_resource_err
);
412 if (IEE_SWAP32(SC_SCB(sc
)->scb_overrun_err
) != sc
->sc_overrun_err
) {
413 sc
->sc_overrun_err
= IEE_SWAP32(SC_SCB(sc
)->scb_overrun_err
);
414 printf("%s: iee_intr: overrun_err=%d\n",
415 device_xname(sc
->sc_dev
), sc
->sc_overrun_err
);
417 if (IEE_SWAP32(SC_SCB(sc
)->scb_rcvcdt_err
) != sc
->sc_rcvcdt_err
) {
418 sc
->sc_rcvcdt_err
= IEE_SWAP32(SC_SCB(sc
)->scb_rcvcdt_err
);
419 printf("%s: iee_intr: rcvcdt_err=%d\n",
420 device_xname(sc
->sc_dev
), sc
->sc_rcvcdt_err
);
422 if (IEE_SWAP32(SC_SCB(sc
)->scb_short_fr_err
) != sc
->sc_short_fr_err
) {
423 sc
->sc_short_fr_err
= IEE_SWAP32(SC_SCB(sc
)->scb_short_fr_err
);
424 printf("%s: iee_intr: short_fr_err=%d\n",
425 device_xname(sc
->sc_dev
), sc
->sc_short_fr_err
);
427 IEE_SCBSYNC(sc
, BUS_DMASYNC_PREREAD
);
428 (sc
->sc_iee_cmd
)(sc
, IEE_SCB_ACK
);
435 * How Command Block List Processing is done.
437 * A running CBL is never manipulated. If there is a CBL already running,
438 * further CMDs are deferred until the current list is done. A new list is
439 * setup when the old one has finished.
440 * This eases programming. To manipulate a running CBL it is necessary to
441 * suspend the Command Unit to avoid race conditions. After a suspend
442 * is sent we have to wait for an interrupt that ACKs the suspend. Then
443 * we can manipulate the CBL and resume operation. I am not sure that this
444 * is more effective then the current, much simpler approach. => KISS
445 * See i82596CA data sheet page 26.
447 * A CBL is running or on the way to be set up when (sc->sc_next_cb != 0).
449 * A CBL may consist of TX CMDs, and _only_ TX CMDs.
450 * A TX CBL is running or on the way to be set up when
451 * ((sc->sc_next_cb != 0) && (sc->sc_next_tbd != 0)).
453 * A CBL may consist of other non-TX CMDs like IAS or CONF, and _only_
456 * This comes mostly through the way how an Ethernet driver works and
457 * because running CBLs are not manipulated when they are on the way. If
458 * if_start() is called there will be TX CMDs enqueued so we have a running
459 * CBL and other CMDs from e.g. if_ioctl() will be deferred and vice versa.
461 * The Multicast Setup Command is special. A MCS needs more space than
462 * a single CB has. Actual space requirement depends on the length of the
463 * multicast list. So we always defer MCS until other CBLs are finished,
464 * then we setup a CONF CMD in the first CB. The CONF CMD is needed to
465 * turn ALLMULTI on the hardware on or off. The MCS is the 2nd CB and may
466 * use all the remaining space in the CBL and the Transmit Buffer Descriptor
467 * List. (Therefore CBL and TBDL must be continuous in physical and virtual
468 * memory. This is guaranteed through the definitions of the list offsets
469 * in i82596reg.h and because it is only a single DMA segment used for all
470 * lists.) When ALLMULTI is enabled via the CONF CMD, the MCS is run with
471 * a multicast list length of 0, thus disabling the multicast filter.
472 * A deferred MCS is signaled via ((sc->sc_flags & IEE_WANT_MCAST) != 0)
475 iee_cb_setup(struct iee_softc
*sc
, uint32_t cmd
)
477 struct iee_cb
*cb
= SC_CB(sc
, sc
->sc_next_cb
);
478 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
479 struct ether_multistep step
;
480 struct ether_multi
*enm
;
482 memset(cb
, 0, sc
->sc_cb_sz
);
484 switch (cmd
& IEE_CB_CMD
) {
485 case IEE_CB_CMD_NOP
: /* NOP CMD */
487 case IEE_CB_CMD_IAS
: /* Individual Address Setup */
488 memcpy(__UNVOLATILE(cb
->cb_ind_addr
), CLLADDR(ifp
->if_sadl
),
491 case IEE_CB_CMD_CONF
: /* Configure */
492 memcpy(__UNVOLATILE(cb
->cb_cf
), sc
->sc_cf
, sc
->sc_cf
[0]
495 case IEE_CB_CMD_MCS
: /* Multicast Setup */
496 if (sc
->sc_next_cb
!= 0) {
497 sc
->sc_flags
|= IEE_WANT_MCAST
;
500 sc
->sc_flags
&= ~IEE_WANT_MCAST
;
501 if ((sc
->sc_cf
[8] & IEE_CF_8_PRM
) != 0) {
502 /* Need no multicast filter in promisc mode. */
503 iee_cb_setup(sc
, IEE_CB_CMD_CONF
| IEE_CB_S
| IEE_CB_EL
507 /* Leave room for a CONF CMD to en/dis-able ALLMULTI mode */
508 cb
= SC_CB(sc
, sc
->sc_next_cb
+ 1);
510 cb
->cb_mcast
.mc_size
= 0;
511 ETHER_FIRST_MULTI(step
, &sc
->sc_ethercom
, enm
);
512 while (enm
!= NULL
) {
513 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
,
514 ETHER_ADDR_LEN
) != 0 || cb
->cb_mcast
.mc_size
515 * ETHER_ADDR_LEN
+ 2 * sc
->sc_cb_sz
>
516 sc
->sc_cb_sz
* IEE_NCB
+
517 sc
->sc_tbd_sz
* IEE_NTBD
* IEE_NCB
) {
518 cb
->cb_mcast
.mc_size
= 0;
521 memcpy(__UNVOLATILE(&cb
->cb_mcast
.mc_addrs
[
522 cb
->cb_mcast
.mc_size
* ETHER_ADDR_LEN
]),
523 enm
->enm_addrlo
, ETHER_ADDR_LEN
);
524 ETHER_NEXT_MULTI(step
, enm
);
525 cb
->cb_mcast
.mc_size
++;
527 if (cb
->cb_mcast
.mc_size
== 0) {
528 /* Can't do exact mcast filtering, do ALLMULTI mode. */
529 ifp
->if_flags
|= IFF_ALLMULTI
;
530 sc
->sc_cf
[11] &= ~IEE_CF_11_MCALL
;
532 /* disable ALLMULTI and load mcast list */
533 ifp
->if_flags
&= ~IFF_ALLMULTI
;
534 sc
->sc_cf
[11] |= IEE_CF_11_MCALL
;
535 /* Mcast setup may need more then sc->sc_cb_sz bytes. */
536 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_shmem_map
,
538 sc
->sc_cb_sz
* IEE_NCB
+
539 sc
->sc_tbd_sz
* IEE_NTBD
* IEE_NCB
,
540 BUS_DMASYNC_PREWRITE
);
542 iee_cb_setup(sc
, IEE_CB_CMD_CONF
);
544 case IEE_CB_CMD_TR
: /* Transmit */
545 cb
->cb_transmit
.tx_tbd_addr
=
546 IEE_SWAPA32(IEE_PHYS_SHMEM(sc
->sc_tbd_off
547 + sc
->sc_tbd_sz
* sc
->sc_next_tbd
));
548 cb
->cb_cmd
|= IEE_CB_SF
; /* Always use Flexible Mode. */
550 case IEE_CB_CMD_TDR
: /* Time Domain Reflectometry */
552 case IEE_CB_CMD_DUMP
: /* Dump */
554 case IEE_CB_CMD_DIAG
: /* Diagnose */
560 cb
->cb_link_addr
= IEE_SWAPA32(IEE_PHYS_SHMEM(sc
->sc_cb_off
+
561 sc
->sc_cb_sz
* (sc
->sc_next_cb
+ 1)));
562 IEE_CBSYNC(sc
, sc
->sc_next_cb
,
563 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
571 iee_attach(struct iee_softc
*sc
, uint8_t *eth_addr
, int *media
, int nmedia
,
574 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
577 KASSERT(sc
->sc_cl_align
> 0 && powerof2(sc
->sc_cl_align
));
580 * Calculate DMA descriptor offsets and sizes in shmem
581 * which should be cache line aligned.
584 sc
->sc_scp_sz
= roundup2(sizeof(struct iee_scp
), sc
->sc_cl_align
);
585 sc
->sc_iscp_off
= sc
->sc_scp_sz
;
586 sc
->sc_iscp_sz
= roundup2(sizeof(struct iee_iscp
), sc
->sc_cl_align
);
587 sc
->sc_scb_off
= sc
->sc_iscp_off
+ sc
->sc_iscp_sz
;
588 sc
->sc_scb_sz
= roundup2(sizeof(struct iee_scb
), sc
->sc_cl_align
);
589 sc
->sc_rfd_off
= sc
->sc_scb_off
+ sc
->sc_scb_sz
;
590 sc
->sc_rfd_sz
= roundup2(sizeof(struct iee_rfd
), sc
->sc_cl_align
);
591 sc
->sc_rbd_off
= sc
->sc_rfd_off
+ sc
->sc_rfd_sz
* IEE_NRFD
;
592 sc
->sc_rbd_sz
= roundup2(sizeof(struct iee_rbd
), sc
->sc_cl_align
);
593 sc
->sc_cb_off
= sc
->sc_rbd_off
+ sc
->sc_rbd_sz
* IEE_NRFD
;
594 sc
->sc_cb_sz
= roundup2(sizeof(struct iee_cb
), sc
->sc_cl_align
);
595 sc
->sc_tbd_off
= sc
->sc_cb_off
+ sc
->sc_cb_sz
* IEE_NCB
;
596 sc
->sc_tbd_sz
= roundup2(sizeof(struct iee_tbd
), sc
->sc_cl_align
);
597 sc
->sc_shmem_sz
= sc
->sc_tbd_off
+ sc
->sc_tbd_sz
* IEE_NTBD
* IEE_NCB
;
599 /* allocate memory for shared DMA descriptors */
600 if (bus_dmamem_alloc(sc
->sc_dmat
, sc
->sc_shmem_sz
, PAGE_SIZE
, 0,
601 &sc
->sc_dma_segs
, 1, &sc
->sc_dma_rsegs
, BUS_DMA_NOWAIT
) != 0) {
602 aprint_error(": can't allocate %d bytes of DMA memory\n",
606 if (bus_dmamem_map(sc
->sc_dmat
, &sc
->sc_dma_segs
, sc
->sc_dma_rsegs
,
607 sc
->sc_shmem_sz
, (void **)&sc
->sc_shmem_addr
,
608 BUS_DMA_COHERENT
| BUS_DMA_NOWAIT
) != 0) {
609 aprint_error(": can't map DMA memory\n");
610 bus_dmamem_free(sc
->sc_dmat
, &sc
->sc_dma_segs
,
614 if (bus_dmamap_create(sc
->sc_dmat
, sc
->sc_shmem_sz
, sc
->sc_dma_rsegs
,
615 sc
->sc_shmem_sz
, 0, BUS_DMA_NOWAIT
, &sc
->sc_shmem_map
) != 0) {
616 aprint_error(": can't create DMA map\n");
617 bus_dmamem_unmap(sc
->sc_dmat
, sc
->sc_shmem_addr
,
619 bus_dmamem_free(sc
->sc_dmat
, &sc
->sc_dma_segs
,
623 if (bus_dmamap_load(sc
->sc_dmat
, sc
->sc_shmem_map
, sc
->sc_shmem_addr
,
624 sc
->sc_shmem_sz
, NULL
, BUS_DMA_NOWAIT
) != 0) {
625 aprint_error(": can't load DMA map\n");
626 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_shmem_map
);
627 bus_dmamem_unmap(sc
->sc_dmat
, sc
->sc_shmem_addr
,
629 bus_dmamem_free(sc
->sc_dmat
, &sc
->sc_dma_segs
,
633 memset(sc
->sc_shmem_addr
, 0, sc
->sc_shmem_sz
);
635 /* Set pointer to Intermediate System Configuration Pointer. */
636 /* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
637 SC_SCP(sc
)->scp_iscp_addr
= IEE_SWAP32(IEE_PHYS_SHMEM(sc
->sc_iscp_off
));
638 SC_SCP(sc
)->scp_sysbus
= sc
->sc_sysbus
;
639 /* Set pointer to System Control Block. */
640 /* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
641 SC_ISCP(sc
)->iscp_scb_addr
= IEE_SWAP32(IEE_PHYS_SHMEM(sc
->sc_scb_off
));
642 /* Set pointer to Receive Frame Area. (physical address) */
643 SC_SCB(sc
)->scb_rfa_addr
= IEE_SWAPA32(IEE_PHYS_SHMEM(sc
->sc_rfd_off
));
644 /* Set pointer to Command Block. (physical address) */
645 SC_SCB(sc
)->scb_cmd_blk_addr
=
646 IEE_SWAPA32(IEE_PHYS_SHMEM(sc
->sc_cb_off
));
648 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_shmem_map
, 0, sc
->sc_shmem_sz
,
649 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
651 ifmedia_init(&sc
->sc_ifmedia
, 0, iee_mediachange
, iee_mediastatus
);
653 for (n
= 0 ; n
< nmedia
; n
++)
654 ifmedia_add(&sc
->sc_ifmedia
, media
[n
], 0, NULL
);
655 ifmedia_set(&sc
->sc_ifmedia
, defmedia
);
657 ifmedia_add(&sc
->sc_ifmedia
, IFM_ETHER
| IFM_NONE
, 0, NULL
);
658 ifmedia_set(&sc
->sc_ifmedia
, IFM_ETHER
| IFM_NONE
);
662 strcpy(ifp
->if_xname
, device_xname(sc
->sc_dev
));
663 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
664 ifp
->if_start
= iee_start
; /* initiate output routine */
665 ifp
->if_ioctl
= iee_ioctl
; /* ioctl routine */
666 ifp
->if_init
= iee_init
; /* init routine */
667 ifp
->if_stop
= iee_stop
; /* stop routine */
668 ifp
->if_watchdog
= iee_watchdog
; /* timer routine */
669 IFQ_SET_READY(&ifp
->if_snd
);
670 /* iee supports IEEE 802.1Q Virtual LANs, see vlan(4). */
671 sc
->sc_ethercom
.ec_capabilities
|= ETHERCAP_VLAN_MTU
;
674 ether_ifattach(ifp
, eth_addr
);
676 aprint_normal(": Intel 82596%s address %s\n",
677 i82596_typenames
[sc
->sc_type
], ether_sprintf(eth_addr
));
679 for (n
= 0 ; n
< IEE_NCB
; n
++)
680 sc
->sc_tx_map
[n
] = NULL
;
681 for (n
= 0 ; n
< IEE_NRFD
; n
++) {
682 sc
->sc_rx_mbuf
[n
] = NULL
;
683 sc
->sc_rx_map
[n
] = NULL
;
685 sc
->sc_tx_timeout
= 0;
686 sc
->sc_setup_timeout
= 0;
687 (sc
->sc_iee_reset
)(sc
);
693 iee_detach(struct iee_softc
*sc
, int flags
)
695 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
697 if ((ifp
->if_flags
& IFF_RUNNING
) != 0)
701 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_shmem_map
);
702 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_shmem_map
);
703 bus_dmamem_unmap(sc
->sc_dmat
, sc
->sc_shmem_addr
, sc
->sc_shmem_sz
);
704 bus_dmamem_free(sc
->sc_dmat
, &sc
->sc_dma_segs
, sc
->sc_dma_rsegs
);
709 /* media change and status callback */
711 iee_mediachange(struct ifnet
*ifp
)
713 struct iee_softc
*sc
= ifp
->if_softc
;
715 if (sc
->sc_mediachange
!= NULL
)
716 return (sc
->sc_mediachange
)(ifp
);
723 iee_mediastatus(struct ifnet
*ifp
, struct ifmediareq
*ifmreq
)
725 struct iee_softc
*sc
= ifp
->if_softc
;
727 if (sc
->sc_mediastatus
!= NULL
)
728 (sc
->sc_mediastatus
)(ifp
, ifmreq
);
733 /* initiate output routine */
735 iee_start(struct ifnet
*ifp
)
737 struct iee_softc
*sc
= ifp
->if_softc
;
738 struct mbuf
*m
= NULL
;
743 if (sc
->sc_next_cb
!= 0)
744 /* There is already a CMD running. Defer packet enqueuing. */
746 for (t
= 0 ; t
< IEE_NCB
; t
++) {
747 IFQ_DEQUEUE(&ifp
->if_snd
, sc
->sc_tx_mbuf
[t
]);
748 if (sc
->sc_tx_mbuf
[t
] == NULL
)
750 if (bus_dmamap_load_mbuf(sc
->sc_dmat
, sc
->sc_tx_map
[t
],
751 sc
->sc_tx_mbuf
[t
], BUS_DMA_WRITE
| BUS_DMA_NOWAIT
) != 0) {
753 * The packet needs more TBD then we support.
754 * Copy the packet into a mbuf cluster to get it out.
756 printf("%s: iee_start: failed to load DMA map\n",
757 device_xname(sc
->sc_dev
));
758 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
760 printf("%s: iee_start: can't allocate mbuf\n",
761 device_xname(sc
->sc_dev
));
762 m_freem(sc
->sc_tx_mbuf
[t
]);
766 MCLAIM(m
, &sc
->sc_ethercom
.ec_rx_mowner
);
767 MCLGET(m
, M_DONTWAIT
);
768 if ((m
->m_flags
& M_EXT
) == 0) {
769 printf("%s: iee_start: can't allocate mbuf "
770 "cluster\n", device_xname(sc
->sc_dev
));
771 m_freem(sc
->sc_tx_mbuf
[t
]);
776 m_copydata(sc
->sc_tx_mbuf
[t
], 0,
777 sc
->sc_tx_mbuf
[t
]->m_pkthdr
.len
, mtod(m
, void *));
778 m
->m_pkthdr
.len
= sc
->sc_tx_mbuf
[t
]->m_pkthdr
.len
;
779 m
->m_len
= sc
->sc_tx_mbuf
[t
]->m_pkthdr
.len
;
780 m_freem(sc
->sc_tx_mbuf
[t
]);
781 sc
->sc_tx_mbuf
[t
] = m
;
782 if (bus_dmamap_load_mbuf(sc
->sc_dmat
, sc
->sc_tx_map
[t
],
783 m
, BUS_DMA_WRITE
| BUS_DMA_NOWAIT
) != 0) {
784 printf("%s: iee_start: can't load TX DMA map\n",
785 device_xname(sc
->sc_dev
));
786 m_freem(sc
->sc_tx_mbuf
[t
]);
791 for (n
= 0 ; n
< sc
->sc_tx_map
[t
]->dm_nsegs
; n
++) {
792 tbd
= SC_TBD(sc
, sc
->sc_next_tbd
+ n
);
794 IEE_SWAPA32(sc
->sc_tx_map
[t
]->dm_segs
[n
].ds_addr
);
796 sc
->sc_tx_map
[t
]->dm_segs
[n
].ds_len
;
798 IEE_SWAPA32(IEE_PHYS_SHMEM(sc
->sc_tbd_off
+
799 sc
->sc_tbd_sz
* (sc
->sc_next_tbd
+ n
+ 1)));
801 SC_TBD(sc
, sc
->sc_next_tbd
+ n
- 1)->tbd_size
|= IEE_CB_EL
;
802 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_shmem_map
,
803 sc
->sc_tbd_off
+ sc
->sc_next_tbd
* sc
->sc_tbd_sz
,
804 sc
->sc_tbd_sz
* sc
->sc_tx_map
[t
]->dm_nsegs
,
805 BUS_DMASYNC_PREWRITE
);
806 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_tx_map
[t
], 0,
807 sc
->sc_tx_map
[t
]->dm_mapsize
, BUS_DMASYNC_PREWRITE
);
808 IFQ_POLL(&ifp
->if_snd
, m
);
810 iee_cb_setup(sc
, IEE_CB_CMD_TR
| IEE_CB_S
| IEE_CB_EL
813 iee_cb_setup(sc
, IEE_CB_CMD_TR
);
814 sc
->sc_next_tbd
+= n
;
816 /* Pass packet to bpf if someone listens. */
818 bpf_mtap(ifp
->if_bpf
, sc
->sc_tx_mbuf
[t
]);
822 /* No packets got set up for TX. */
825 ifp
->if_flags
|= IFF_OACTIVE
;
826 (sc
->sc_iee_cmd
)(sc
, IEE_SCB_CUC_EXE
);
833 iee_ioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
835 struct iee_softc
*sc
= ifp
->if_softc
;
843 err
= ifmedia_ioctl(ifp
, (struct ifreq
*) data
,
844 &sc
->sc_ifmedia
, cmd
);
848 err
= ether_ioctl(ifp
, cmd
, data
);
849 if (err
== ENETRESET
) {
851 * Multicast list as changed; set the hardware filter
854 if (ifp
->if_flags
& IFF_RUNNING
) {
855 iee_cb_setup(sc
, IEE_CB_CMD_MCS
| IEE_CB_S
|
856 IEE_CB_EL
| IEE_CB_I
);
857 if ((sc
->sc_flags
& IEE_WANT_MCAST
) == 0)
858 (*sc
->sc_iee_cmd
)(sc
, IEE_SCB_CUC_EXE
);
872 iee_init(struct ifnet
*ifp
)
874 struct iee_softc
*sc
= ifp
->if_softc
;
882 sc
->sc_flags
&= ~IEE_WANT_MCAST
;
884 SC_SCB(sc
)->scb_crc_err
= 0;
885 SC_SCB(sc
)->scb_align_err
= 0;
886 SC_SCB(sc
)->scb_resource_err
= 0;
887 SC_SCB(sc
)->scb_overrun_err
= 0;
888 SC_SCB(sc
)->scb_rcvcdt_err
= 0;
889 SC_SCB(sc
)->scb_short_fr_err
= 0;
891 sc
->sc_align_err
= 0;
892 sc
->sc_resource_err
= 0;
893 sc
->sc_overrun_err
= 0;
894 sc
->sc_rcvcdt_err
= 0;
895 sc
->sc_short_fr_err
= 0;
899 /* Create Transmit DMA maps. */
900 for (t
= 0 ; t
< IEE_NCB
; t
++) {
901 if (sc
->sc_tx_map
[t
] == NULL
&& bus_dmamap_create(sc
->sc_dmat
,
902 MCLBYTES
, IEE_NTBD
, MCLBYTES
, 0, BUS_DMA_NOWAIT
,
903 &sc
->sc_tx_map
[t
]) != 0) {
904 printf("%s: iee_init: can't create TX DMA map\n",
905 device_xname(sc
->sc_dev
));
906 for (n
= 0 ; n
< t
; n
++)
907 bus_dmamap_destroy(sc
->sc_dmat
,
912 /* Initialize Receive Frame and Receive Buffer Descriptors */
914 memset(SC_RFD(sc
, 0), 0, sc
->sc_rfd_sz
* IEE_NRFD
);
915 memset(SC_RBD(sc
, 0), 0, sc
->sc_rbd_sz
* IEE_NRFD
);
916 for (r
= 0 ; r
< IEE_NRFD
; r
++) {
917 SC_RFD(sc
, r
)->rfd_cmd
= IEE_RFD_SF
;
918 SC_RFD(sc
, r
)->rfd_link_addr
=
919 IEE_SWAPA32(IEE_PHYS_SHMEM(sc
->sc_rfd_off
920 + sc
->sc_rfd_sz
* ((r
+ 1) % IEE_NRFD
)));
922 SC_RBD(sc
, r
)->rbd_next_rbd
=
923 IEE_SWAPA32(IEE_PHYS_SHMEM(sc
->sc_rbd_off
924 + sc
->sc_rbd_sz
* ((r
+ 1) % IEE_NRFD
)));
925 if (sc
->sc_rx_mbuf
[r
] == NULL
) {
926 MGETHDR(sc
->sc_rx_mbuf
[r
], M_DONTWAIT
, MT_DATA
);
927 if (sc
->sc_rx_mbuf
[r
] == NULL
) {
928 printf("%s: iee_init: can't allocate mbuf\n",
929 device_xname(sc
->sc_dev
));
933 MCLAIM(sc
->sc_rx_mbuf
[r
],&sc
->sc_ethercom
.ec_rx_mowner
);
934 MCLGET(sc
->sc_rx_mbuf
[r
], M_DONTWAIT
);
935 if ((sc
->sc_rx_mbuf
[r
]->m_flags
& M_EXT
) == 0) {
936 printf("%s: iee_init: can't allocate mbuf"
937 " cluster\n", device_xname(sc
->sc_dev
));
938 m_freem(sc
->sc_rx_mbuf
[r
]);
942 sc
->sc_rx_mbuf
[r
]->m_len
=
943 sc
->sc_rx_mbuf
[r
]->m_pkthdr
.len
= MCLBYTES
- 2;
944 sc
->sc_rx_mbuf
[r
]->m_data
+= 2;
946 if (sc
->sc_rx_map
[r
] == NULL
&& bus_dmamap_create(sc
->sc_dmat
,
947 MCLBYTES
, 1, MCLBYTES
, 0, BUS_DMA_NOWAIT
,
948 &sc
->sc_rx_map
[r
]) != 0) {
949 printf("%s: iee_init: can't create RX "
950 "DMA map\n", device_xname(sc
->sc_dev
));
951 m_freem(sc
->sc_rx_mbuf
[r
]);
955 if (bus_dmamap_load_mbuf(sc
->sc_dmat
, sc
->sc_rx_map
[r
],
956 sc
->sc_rx_mbuf
[r
], BUS_DMA_READ
| BUS_DMA_NOWAIT
) != 0) {
957 printf("%s: iee_init: can't load RX DMA map\n",
958 device_xname(sc
->sc_dev
));
959 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_rx_map
[r
]);
960 m_freem(sc
->sc_rx_mbuf
[r
]);
964 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_rx_map
[r
], 0,
965 sc
->sc_rx_map
[r
]->dm_mapsize
, BUS_DMASYNC_PREREAD
);
966 SC_RBD(sc
, r
)->rbd_size
= sc
->sc_rx_map
[r
]->dm_segs
[0].ds_len
;
967 SC_RBD(sc
, r
)->rbd_rb_addr
=
968 IEE_SWAPA32(sc
->sc_rx_map
[r
]->dm_segs
[0].ds_addr
);
970 SC_RFD(sc
, 0)->rfd_rbd_addr
=
971 IEE_SWAPA32(IEE_PHYS_SHMEM(sc
->sc_rbd_off
));
973 for (n
= 0 ; n
< r
; n
++) {
974 m_freem(sc
->sc_rx_mbuf
[n
]);
975 sc
->sc_rx_mbuf
[n
] = NULL
;
976 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_rx_map
[n
]);
977 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_rx_map
[n
]);
978 sc
->sc_rx_map
[n
] = NULL
;
980 for (n
= 0 ; n
< t
; n
++) {
981 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_tx_map
[n
]);
982 sc
->sc_tx_map
[n
] = NULL
;
987 (sc
->sc_iee_reset
)(sc
);
988 iee_cb_setup(sc
, IEE_CB_CMD_IAS
);
989 sc
->sc_cf
[0] = IEE_CF_0_DEF
| IEE_CF_0_PREF
;
990 sc
->sc_cf
[1] = IEE_CF_1_DEF
;
991 sc
->sc_cf
[2] = IEE_CF_2_DEF
;
992 sc
->sc_cf
[3] = IEE_CF_3_ADDRLEN_DEF
| IEE_CF_3_NSAI
993 | IEE_CF_3_PREAMLEN_DEF
;
994 sc
->sc_cf
[4] = IEE_CF_4_DEF
;
995 sc
->sc_cf
[5] = IEE_CF_5_DEF
;
996 sc
->sc_cf
[6] = IEE_CF_6_DEF
;
997 sc
->sc_cf
[7] = IEE_CF_7_DEF
;
998 sc
->sc_cf
[8] = IEE_CF_8_DEF
;
999 sc
->sc_cf
[9] = IEE_CF_9_DEF
;
1000 sc
->sc_cf
[10] = IEE_CF_10_DEF
;
1001 sc
->sc_cf
[11] = IEE_CF_11_DEF
& ~IEE_CF_11_LNGFLD
;
1002 sc
->sc_cf
[12] = IEE_CF_12_DEF
;
1003 sc
->sc_cf
[13] = IEE_CF_13_DEF
;
1004 iee_cb_setup(sc
, IEE_CB_CMD_CONF
| IEE_CB_S
| IEE_CB_EL
);
1005 SC_SCB(sc
)->scb_rfa_addr
= IEE_SWAPA32(IEE_PHYS_SHMEM(sc
->sc_rfd_off
));
1006 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_shmem_map
, 0, sc
->sc_shmem_sz
,
1007 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1008 (sc
->sc_iee_cmd
)(sc
, IEE_SCB_CUC_EXE
| IEE_SCB_RUC_ST
);
1009 /* Issue a Channel Attention to ACK interrupts we may have caused. */
1010 (sc
->sc_iee_cmd
)(sc
, IEE_SCB_ACK
);
1012 /* Mark the interface as running and ready to RX/TX packets. */
1013 ifp
->if_flags
|= IFF_RUNNING
;
1014 ifp
->if_flags
&= ~IFF_OACTIVE
;
1022 iee_stop(struct ifnet
*ifp
, int disable
)
1024 struct iee_softc
*sc
= ifp
->if_softc
;
1027 ifp
->if_flags
&= ~IFF_RUNNING
;
1028 ifp
->if_flags
|= IFF_OACTIVE
;
1030 /* Reset the chip to get it quiet. */
1031 (sc
->sc_iee_reset
)(ifp
->if_softc
);
1032 /* Issue a Channel Attention to ACK interrupts we may have caused. */
1033 (sc
->sc_iee_cmd
)(ifp
->if_softc
, IEE_SCB_ACK
);
1034 /* Release any dynamically allocated resources. */
1035 for (n
= 0 ; n
< IEE_NCB
; n
++) {
1036 if (sc
->sc_tx_map
[n
] != NULL
)
1037 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_tx_map
[n
]);
1038 sc
->sc_tx_map
[n
] = NULL
;
1040 for (n
= 0 ; n
< IEE_NRFD
; n
++) {
1041 if (sc
->sc_rx_mbuf
[n
] != NULL
)
1042 m_freem(sc
->sc_rx_mbuf
[n
]);
1043 sc
->sc_rx_mbuf
[n
] = NULL
;
1044 if (sc
->sc_rx_map
[n
] != NULL
) {
1045 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_rx_map
[n
]);
1046 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_rx_map
[n
]);
1048 sc
->sc_rx_map
[n
] = NULL
;
1056 iee_watchdog(struct ifnet
*ifp
)
1058 struct iee_softc
*sc
= ifp
->if_softc
;
1060 (sc
->sc_iee_reset
)(sc
);
1061 if (sc
->sc_next_tbd
!= 0)
1062 printf("%s: iee_watchdog: transmit timeout %d\n",
1063 device_xname(sc
->sc_dev
), ++sc
->sc_tx_timeout
);
1065 printf("%s: iee_watchdog: setup timeout %d\n",
1066 device_xname(sc
->sc_dev
), ++sc
->sc_setup_timeout
);