1 /* $NetBSD: sgec.c,v 1.35 2008/03/11 05:34:01 matt Exp $ */
3 * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed at Ludd, University of
16 * Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * Driver for the SGEC (Second Generation Ethernet Controller), sitting
34 * on for example the VAX 4000/300 (KA670).
36 * The SGEC looks like a mixture of the DEQNA and the TULIP. Fun toy.
38 * Even though the chip is capable to use virtual addresses (read the
39 * System Page Table directly) this driver doesn't do so, and there
40 * is no benefit in doing it either in NetBSD of today.
42 * Things that is still to do:
44 * Use imperfect filtering when many multicast addresses.
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sgec.c,v 1.35 2008/03/11 05:34:01 matt Exp $");
53 #include <sys/param.h>
55 #include <sys/socket.h>
56 #include <sys/device.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
60 #include <uvm/uvm_extern.h>
63 #include <net/if_ether.h>
64 #include <net/if_dl.h>
66 #include <netinet/in.h>
67 #include <netinet/if_inarp.h>
71 #include <net/bpfdesc.h>
76 #include <dev/ic/sgecreg.h>
77 #include <dev/ic/sgecvar.h>
79 static void zeinit(struct ze_softc
*);
80 static void zestart(struct ifnet
*);
81 static int zeioctl(struct ifnet
*, u_long
, void *);
82 static int ze_add_rxbuf(struct ze_softc
*, int);
83 static void ze_setup(struct ze_softc
*);
84 static void zetimeout(struct ifnet
*);
85 static bool zereset(struct ze_softc
*);
87 #define ZE_WCSR(csr, val) \
88 bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
89 #define ZE_RCSR(csr) \
90 bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
93 * Interface exists: make available by filling in network interface
94 * record. System will initialize the interface when it is ready
98 sgec_attach(struct ze_softc
*sc
)
100 struct ifnet
*ifp
= &sc
->sc_if
;
103 bus_dma_segment_t seg
;
107 * Allocate DMA safe memory for descriptors and setup memory.
109 error
= bus_dmamem_alloc(sc
->sc_dmat
, sizeof(struct ze_cdata
),
110 PAGE_SIZE
, 0, &seg
, 1, &rseg
, BUS_DMA_NOWAIT
);
112 aprint_error(": unable to allocate control data, error = %d\n",
117 error
= bus_dmamem_map(sc
->sc_dmat
, &seg
, rseg
, sizeof(struct ze_cdata
),
118 (void **)&sc
->sc_zedata
, BUS_DMA_NOWAIT
|BUS_DMA_COHERENT
);
121 ": unable to map control data, error = %d\n", error
);
125 error
= bus_dmamap_create(sc
->sc_dmat
, sizeof(struct ze_cdata
), 1,
126 sizeof(struct ze_cdata
), 0, BUS_DMA_NOWAIT
, &sc
->sc_cmap
);
129 ": unable to create control data DMA map, error = %d\n",
134 error
= bus_dmamap_load(sc
->sc_dmat
, sc
->sc_cmap
, sc
->sc_zedata
,
135 sizeof(struct ze_cdata
), NULL
, BUS_DMA_NOWAIT
);
138 ": unable to load control data DMA map, error = %d\n",
144 * Zero the newly allocated memory.
146 memset(sc
->sc_zedata
, 0, sizeof(struct ze_cdata
));
149 * Create the transmit descriptor DMA maps.
151 for (i
= 0; error
== 0 && i
< TXDESCS
; i
++) {
152 error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
,
153 TXDESCS
- 1, MCLBYTES
, 0, BUS_DMA_NOWAIT
|BUS_DMA_ALLOCNOW
,
157 aprint_error(": unable to create tx DMA map %d, error = %d\n",
163 * Create receive buffer DMA maps.
165 for (i
= 0; error
== 0 && i
< RXDESCS
; i
++) {
166 error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
, 1,
167 MCLBYTES
, 0, BUS_DMA_NOWAIT
, &sc
->sc_rcvmap
[i
]);
170 aprint_error(": unable to create rx DMA map %d, error = %d\n",
176 * Pre-allocate the receive buffers.
178 for (i
= 0; error
== 0 && i
< RXDESCS
; i
++) {
179 error
= ze_add_rxbuf(sc
, i
);
184 ": unable to allocate or map rx buffer %d, error = %d\n",
191 evcnt_attach_dynamic(&sc
->sc_intrcnt
, EVCNT_TYPE_INTR
, NULL
,
192 device_xname(sc
->sc_dev
), "intr");
193 evcnt_attach_dynamic(&sc
->sc_rxintrcnt
, EVCNT_TYPE_INTR
,
194 &sc
->sc_intrcnt
, device_xname(sc
->sc_dev
), "rx intr");
195 evcnt_attach_dynamic(&sc
->sc_txintrcnt
, EVCNT_TYPE_INTR
,
196 &sc
->sc_intrcnt
, device_xname(sc
->sc_dev
), "tx intr");
197 evcnt_attach_dynamic(&sc
->sc_txdraincnt
, EVCNT_TYPE_INTR
,
198 &sc
->sc_intrcnt
, device_xname(sc
->sc_dev
), "tx drain");
199 evcnt_attach_dynamic(&sc
->sc_nobufintrcnt
, EVCNT_TYPE_INTR
,
200 &sc
->sc_intrcnt
, device_xname(sc
->sc_dev
), "nobuf intr");
201 evcnt_attach_dynamic(&sc
->sc_nointrcnt
, EVCNT_TYPE_INTR
,
202 &sc
->sc_intrcnt
, device_xname(sc
->sc_dev
), "no intr");
205 * Create ring loops of the buffer chains.
206 * This is only done once.
208 sc
->sc_pzedata
= (struct ze_cdata
*)sc
->sc_cmap
->dm_segs
[0].ds_addr
;
210 rp
= sc
->sc_zedata
->zc_recv
;
211 rp
[RXDESCS
].ze_framelen
= ZE_FRAMELEN_OW
;
212 rp
[RXDESCS
].ze_rdes1
= ZE_RDES1_CA
;
213 rp
[RXDESCS
].ze_bufaddr
= (char *)sc
->sc_pzedata
->zc_recv
;
215 tp
= sc
->sc_zedata
->zc_xmit
;
216 tp
[TXDESCS
].ze_tdr
= ZE_TDR_OW
;
217 tp
[TXDESCS
].ze_tdes1
= ZE_TDES1_CA
;
218 tp
[TXDESCS
].ze_bufaddr
= (char *)sc
->sc_pzedata
->zc_xmit
;
223 strcpy(ifp
->if_xname
, device_xname(sc
->sc_dev
));
225 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
226 ifp
->if_start
= zestart
;
227 ifp
->if_ioctl
= zeioctl
;
228 ifp
->if_watchdog
= zetimeout
;
229 IFQ_SET_READY(&ifp
->if_snd
);
232 * Attach the interface.
235 ether_ifattach(ifp
, sc
->sc_enaddr
);
238 aprint_normal_dev(sc
->sc_dev
, "hardware address %s\n",
239 ether_sprintf(sc
->sc_enaddr
));
243 * Free any resources we've allocated during the failed attach
244 * attempt. Do this in reverse order and fall through.
247 for (i
= 0; i
< RXDESCS
; i
++) {
248 if (sc
->sc_rxmbuf
[i
] != NULL
) {
249 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_xmtmap
[i
]);
250 m_freem(sc
->sc_rxmbuf
[i
]);
254 for (i
= 0; i
< RXDESCS
; i
++) {
255 if (sc
->sc_xmtmap
[i
] != NULL
)
256 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_xmtmap
[i
]);
259 for (i
= 0; i
< TXDESCS
; i
++) {
260 if (sc
->sc_rcvmap
[i
] != NULL
)
261 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_rcvmap
[i
]);
263 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_cmap
);
265 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_cmap
);
267 bus_dmamem_unmap(sc
->sc_dmat
, (void *)sc
->sc_zedata
,
268 sizeof(struct ze_cdata
));
270 bus_dmamem_free(sc
->sc_dmat
, &seg
, rseg
);
276 * Initialization of interface.
279 zeinit(struct ze_softc
*sc
)
281 struct ifnet
*ifp
= &sc
->sc_if
;
282 struct ze_cdata
*zc
= sc
->sc_zedata
;
286 * Reset the interface.
291 sc
->sc_nexttx
= sc
->sc_inq
= sc
->sc_lastack
= sc
->sc_txcnt
= 0;
293 * Release and init transmit descriptors.
295 for (i
= 0; i
< TXDESCS
; i
++) {
296 if (sc
->sc_xmtmap
[i
]->dm_nsegs
> 0)
297 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_xmtmap
[i
]);
298 if (sc
->sc_txmbuf
[i
]) {
299 m_freem(sc
->sc_txmbuf
[i
]);
300 sc
->sc_txmbuf
[i
] = 0;
302 zc
->zc_xmit
[i
].ze_tdr
= 0; /* Clear valid bit */
307 * Init receive descriptors.
309 for (i
= 0; i
< RXDESCS
; i
++)
310 zc
->zc_recv
[i
].ze_framelen
= ZE_FRAMELEN_OW
;
313 ZE_WCSR(ZE_CSR6
, ZE_NICSR6_IE
|ZE_NICSR6_BL_8
|ZE_NICSR6_ST
|
314 ZE_NICSR6_SR
|ZE_NICSR6_DC
);
316 ifp
->if_flags
|= IFF_RUNNING
;
317 ifp
->if_flags
&= ~IFF_OACTIVE
;
320 * Send a setup frame.
321 * This will start the transmit machinery as well.
328 * Start output on interface.
331 zestart(struct ifnet
*ifp
)
333 struct ze_softc
*sc
= ifp
->if_softc
;
334 struct ze_cdata
*zc
= sc
->sc_zedata
;
338 int len
, i
, totlen
, error
;
339 int old_inq
= sc
->sc_inq
;
340 uint16_t orword
, tdr
;
343 while (sc
->sc_inq
< (TXDESCS
- 1)) {
349 nexttx
= sc
->sc_nexttx
;
350 IFQ_POLL(&sc
->sc_if
.if_snd
, m
);
354 * Count number of mbufs in chain.
355 * Always do DMA directly from mbufs, therefore the transmit
356 * ring is really big.
358 map
= sc
->sc_xmtmap
[nexttx
];
359 error
= bus_dmamap_load_mbuf(sc
->sc_dmat
, map
, m
,
362 aprint_error_dev(sc
->sc_dev
,
363 "zestart: load_mbuf failed: %d", error
);
367 if (map
->dm_nsegs
>= TXDESCS
)
368 panic("zestart"); /* XXX */
370 if ((map
->dm_nsegs
+ sc
->sc_inq
) >= (TXDESCS
- 1)) {
371 bus_dmamap_unload(sc
->sc_dmat
, map
);
372 ifp
->if_flags
|= IFF_OACTIVE
;
377 * m now points to a mbuf chain that can be loaded.
378 * Loop around and set it.
381 orword
= ZE_TDES1_FS
;
383 for (i
= 0; i
< map
->dm_nsegs
; i
++) {
384 buffer
= map
->dm_segs
[i
].ds_addr
;
385 len
= map
->dm_segs
[i
].ds_len
;
390 /* Word alignment calc */
391 if (totlen
== m
->m_pkthdr
.len
) {
392 sc
->sc_txcnt
+= map
->dm_nsegs
;
393 if (sc
->sc_txcnt
>= TXDESCS
* 3 / 4) {
394 orword
|= ZE_TDES1_IC
;
397 orword
|= ZE_TDES1_LS
;
398 sc
->sc_txmbuf
[nexttx
] = m
;
400 zc
->zc_xmit
[nexttx
].ze_bufsize
= len
;
401 zc
->zc_xmit
[nexttx
].ze_bufaddr
= (char *)buffer
;
402 zc
->zc_xmit
[nexttx
].ze_tdes1
= orword
;
403 zc
->zc_xmit
[nexttx
].ze_tdr
= tdr
;
405 if (++nexttx
== TXDESCS
)
411 sc
->sc_inq
+= map
->dm_nsegs
;
413 IFQ_DEQUEUE(&ifp
->if_snd
, m
);
415 if (totlen
!= m
->m_pkthdr
.len
)
416 panic("zestart: len fault");
419 * Turn ownership of the packet over to the device.
421 zc
->zc_xmit
[starttx
].ze_tdr
= ZE_TDR_OW
;
424 * Kick off the transmit logic, if it is stopped.
426 if ((ZE_RCSR(ZE_CSR5
) & ZE_NICSR5_TS
) != ZE_NICSR5_TS_RUN
)
427 ZE_WCSR(ZE_CSR1
, -1);
428 sc
->sc_nexttx
= nexttx
;
430 if (sc
->sc_inq
== (TXDESCS
- 1))
431 ifp
->if_flags
|= IFF_OACTIVE
;
433 out
: if (old_inq
< sc
->sc_inq
)
434 ifp
->if_timer
= 5; /* If transmit logic dies */
438 sgec_intr(struct ze_softc
*sc
)
440 struct ze_cdata
*zc
= sc
->sc_zedata
;
441 struct ifnet
*ifp
= &sc
->sc_if
;
445 csr
= ZE_RCSR(ZE_CSR5
);
446 if ((csr
& ZE_NICSR5_IS
) == 0) { /* Wasn't we */
447 sc
->sc_nointrcnt
.ev_count
++;
450 ZE_WCSR(ZE_CSR5
, csr
);
452 if (csr
& ZE_NICSR5_RU
)
453 sc
->sc_nobufintrcnt
.ev_count
++;
455 if (csr
& ZE_NICSR5_RI
) {
456 sc
->sc_rxintrcnt
.ev_count
++;
457 while ((zc
->zc_recv
[sc
->sc_nextrx
].ze_framelen
&
458 ZE_FRAMELEN_OW
) == 0) {
461 m
= sc
->sc_rxmbuf
[sc
->sc_nextrx
];
462 len
= zc
->zc_recv
[sc
->sc_nextrx
].ze_framelen
;
463 ze_add_rxbuf(sc
, sc
->sc_nextrx
);
464 if (++sc
->sc_nextrx
== RXDESCS
)
466 if (len
< ETHER_MIN_LEN
) {
470 m
->m_pkthdr
.rcvif
= ifp
;
471 m
->m_pkthdr
.len
= m
->m_len
=
475 bpf_mtap(ifp
->if_bpf
, m
);
477 (*ifp
->if_input
)(ifp
, m
);
482 if (csr
& ZE_NICSR5_TI
)
483 sc
->sc_txintrcnt
.ev_count
++;
484 if (sc
->sc_lastack
!= sc
->sc_nexttx
) {
486 for (lastack
= sc
->sc_lastack
; lastack
!= sc
->sc_nexttx
; ) {
490 if ((zc
->zc_xmit
[lastack
].ze_tdr
& ZE_TDR_OW
) != 0)
493 if ((zc
->zc_xmit
[lastack
].ze_tdes1
& ZE_TDES1_DT
) ==
495 if (++lastack
== TXDESCS
)
501 KASSERT(zc
->zc_xmit
[lastack
].ze_tdes1
& ZE_TDES1_FS
);
502 map
= sc
->sc_xmtmap
[lastack
];
503 KASSERT(map
->dm_nsegs
> 0);
504 nlastack
= (lastack
+ map
->dm_nsegs
- 1) % TXDESCS
;
505 if (zc
->zc_xmit
[nlastack
].ze_tdr
& ZE_TDR_OW
)
508 if (sc
->sc_txcnt
> map
->dm_nsegs
)
509 sc
->sc_txcnt
-= map
->dm_nsegs
;
512 sc
->sc_inq
-= map
->dm_nsegs
;
513 KASSERT(zc
->zc_xmit
[lastack
].ze_tdes1
& ZE_TDES1_LS
);
515 bus_dmamap_unload(sc
->sc_dmat
, map
);
516 KASSERT(sc
->sc_txmbuf
[lastack
]);
519 bpf_mtap(ifp
->if_bpf
, sc
->sc_txmbuf
[lastack
]);
521 m_freem(sc
->sc_txmbuf
[lastack
]);
522 sc
->sc_txmbuf
[lastack
] = 0;
523 if (++lastack
== TXDESCS
)
526 if (lastack
!= sc
->sc_lastack
) {
527 sc
->sc_txdraincnt
.ev_count
++;
528 sc
->sc_lastack
= lastack
;
531 ifp
->if_flags
&= ~IFF_OACTIVE
;
532 zestart(ifp
); /* Put in more in queue */
539 * Process an ioctl request.
542 zeioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
544 struct ze_softc
*sc
= ifp
->if_softc
;
545 struct ifaddr
*ifa
= data
;
546 int s
= splnet(), error
= 0;
551 ifp
->if_flags
|= IFF_UP
;
552 switch(ifa
->ifa_addr
->sa_family
) {
556 arp_ifinit(ifp
, ifa
);
563 if ((error
= ifioctl_common(ifp
, cmd
, data
)) != 0)
565 /* XXX re-use ether_ioctl() */
566 switch (ifp
->if_flags
& (IFF_UP
|IFF_RUNNING
)) {
569 * If interface is marked down and it is running,
570 * stop it. (by disabling receive mechanism).
572 ZE_WCSR(ZE_CSR6
, ZE_RCSR(ZE_CSR6
) &
573 ~(ZE_NICSR6_ST
|ZE_NICSR6_SR
));
574 ifp
->if_flags
&= ~IFF_RUNNING
;
578 * If interface it marked up and it is stopped, then
583 case IFF_UP
|IFF_RUNNING
:
585 * Send a new setup packet to match any new changes.
586 * (Like IFF_PROMISC etc)
598 * Update our multicast list.
600 if ((error
= ether_ioctl(ifp
, cmd
, data
)) == ENETRESET
) {
602 * Multicast list has changed; set the hardware filter
605 if (ifp
->if_flags
& IFF_RUNNING
)
612 error
= ether_ioctl(ifp
, cmd
, data
);
620 * Add a receive buffer to the indicated descriptor.
623 ze_add_rxbuf(struct ze_softc
*sc
, int i
)
629 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
633 MCLAIM(m
, &sc
->sc_ec
.ec_rx_mowner
);
634 MCLGET(m
, M_DONTWAIT
);
635 if ((m
->m_flags
& M_EXT
) == 0) {
640 if (sc
->sc_rxmbuf
[i
] != NULL
)
641 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_rcvmap
[i
]);
643 error
= bus_dmamap_load(sc
->sc_dmat
, sc
->sc_rcvmap
[i
],
644 m
->m_ext
.ext_buf
, m
->m_ext
.ext_size
, NULL
,
645 BUS_DMA_READ
|BUS_DMA_NOWAIT
);
647 panic("%s: can't load rx DMA map %d, error = %d",
648 device_xname(sc
->sc_dev
), i
, error
);
649 sc
->sc_rxmbuf
[i
] = m
;
651 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_rcvmap
[i
], 0,
652 sc
->sc_rcvmap
[i
]->dm_mapsize
, BUS_DMASYNC_PREREAD
);
655 * We know that the mbuf cluster is page aligned. Also, be sure
656 * that the IP header will be longword aligned.
659 rp
= &sc
->sc_zedata
->zc_recv
[i
];
660 rp
->ze_bufsize
= (m
->m_ext
.ext_size
- 2);
661 rp
->ze_bufaddr
= (char *)sc
->sc_rcvmap
[i
]->dm_segs
[0].ds_addr
+ 2;
662 rp
->ze_framelen
= ZE_FRAMELEN_OW
;
668 * Create a setup packet and put in queue for sending.
671 ze_setup(struct ze_softc
*sc
)
673 struct ether_multi
*enm
;
674 struct ether_multistep step
;
675 struct ze_cdata
*zc
= sc
->sc_zedata
;
676 struct ifnet
*ifp
= &sc
->sc_if
;
677 const u_int8_t
*enaddr
= CLLADDR(ifp
->if_sadl
);
680 if (sc
->sc_inq
== (TXDESCS
- 1)) {
686 * Init the setup packet with valid info.
688 memset(zc
->zc_setup
, 0xff, sizeof(zc
->zc_setup
)); /* Broadcast */
689 memcpy(zc
->zc_setup
, enaddr
, ETHER_ADDR_LEN
);
692 * Multicast handling. The SGEC can handle up to 16 direct
693 * ethernet addresses.
696 ifp
->if_flags
&= ~IFF_ALLMULTI
;
697 ETHER_FIRST_MULTI(step
, &sc
->sc_ec
, enm
);
698 while (enm
!= NULL
) {
699 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
, 6)) {
700 ifp
->if_flags
|= IFF_ALLMULTI
;
703 memcpy(&zc
->zc_setup
[j
], enm
->enm_addrlo
, ETHER_ADDR_LEN
);
705 ETHER_NEXT_MULTI(step
, enm
);
706 if ((enm
!= NULL
)&& (j
== 128)) {
707 ifp
->if_flags
|= IFF_ALLMULTI
;
713 * ALLMULTI implies PROMISC in this driver.
715 if (ifp
->if_flags
& IFF_ALLMULTI
)
716 ifp
->if_flags
|= IFF_PROMISC
;
717 else if (ifp
->if_pcount
== 0)
718 ifp
->if_flags
&= ~IFF_PROMISC
;
721 * Fiddle with the receive logic.
723 reg
= ZE_RCSR(ZE_CSR6
);
725 ZE_WCSR(ZE_CSR6
, reg
& ~ZE_NICSR6_SR
); /* Stop rx */
726 reg
&= ~ZE_NICSR6_AF
;
727 if (ifp
->if_flags
& IFF_PROMISC
)
728 reg
|= ZE_NICSR6_AF_PROM
;
729 else if (ifp
->if_flags
& IFF_ALLMULTI
)
730 reg
|= ZE_NICSR6_AF_ALLM
;
732 ZE_WCSR(ZE_CSR6
, reg
);
734 * Only send a setup packet if needed.
736 if ((ifp
->if_flags
& (IFF_PROMISC
|IFF_ALLMULTI
)) == 0) {
738 zc
->zc_xmit
[idx
].ze_tdes1
= ZE_TDES1_DT_SETUP
;
739 zc
->zc_xmit
[idx
].ze_bufsize
= 128;
740 zc
->zc_xmit
[idx
].ze_bufaddr
= sc
->sc_pzedata
->zc_setup
;
741 zc
->zc_xmit
[idx
].ze_tdr
= ZE_TDR_OW
;
743 if ((ZE_RCSR(ZE_CSR5
) & ZE_NICSR5_TS
) != ZE_NICSR5_TS_RUN
)
744 ZE_WCSR(ZE_CSR1
, -1);
747 if (++sc
->sc_nexttx
== TXDESCS
)
753 * Check for dead transmit logic.
756 zetimeout(struct ifnet
*ifp
)
758 struct ze_softc
*sc
= ifp
->if_softc
;
763 aprint_error_dev(sc
->sc_dev
, "xmit logic died, resetting...\n");
765 * Do a reset of interface, to get it going again.
766 * Will it work by just restart the transmit logic?
773 * Set/reset the reset flag.
774 * Write interrupt vector.
775 * Write ring buffer addresses.
779 zereset(struct ze_softc
*sc
)
783 ZE_WCSR(ZE_CSR6
, ZE_NICSR6_RE
);
785 if (ZE_RCSR(ZE_CSR6
) & ZE_NICSR5_SF
) {
786 aprint_error_dev(sc
->sc_dev
, "selftest failed\n");
791 * Get the vector that were set at match time, and remember it.
792 * WHICH VECTOR TO USE? Take one unused. XXX
793 * Funny way to set vector described in the programmers manual.
795 reg
= ZE_NICSR0_IPL14
| sc
->sc_intvec
| 0x1fff0003; /* SYNC/ASYNC??? */
799 aprint_error_dev(sc
->sc_dev
,
800 "failing SGEC CSR0 init\n");
803 ZE_WCSR(ZE_CSR0
, reg
);
804 } while (ZE_RCSR(ZE_CSR0
) != reg
);
806 ZE_WCSR(ZE_CSR3
, (vaddr_t
)sc
->sc_pzedata
->zc_recv
);
807 ZE_WCSR(ZE_CSR4
, (vaddr_t
)sc
->sc_pzedata
->zc_xmit
);