1 /* $NetBSD: if_qe.c,v 1.68 2008/11/07 00:20:12 dyoung Exp $ */
3 * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed at Ludd, University of
16 * Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * Driver for DEQNA/DELQA ethernet cards.
34 * Things that is still to do:
35 * Handle ubaresets. Does not work at all right now.
36 * Fix ALLMULTI reception. But someone must tell me how...
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.68 2008/11/07 00:20:12 dyoung Exp $");
46 #include <sys/param.h>
48 #include <sys/socket.h>
49 #include <sys/device.h>
50 #include <sys/systm.h>
51 #include <sys/sockio.h>
54 #include <net/if_ether.h>
55 #include <net/if_dl.h>
57 #include <netinet/in.h>
58 #include <netinet/if_inarp.h>
62 #include <net/bpfdesc.h>
67 #include <dev/qbus/ubavar.h>
68 #include <dev/qbus/if_qereg.h>
72 #define RXDESCS 30 /* # of receive descriptors */
73 #define TXDESCS 60 /* # transmit descs */
76 * Structure containing the elements that must be in DMA-safe memory.
79 struct qe_ring qc_recv
[RXDESCS
+1]; /* Receive descriptors */
80 struct qe_ring qc_xmit
[TXDESCS
+1]; /* Transmit descriptors */
81 u_int8_t qc_setup
[128]; /* Setup packet layout */
85 device_t sc_dev
; /* Configuration common part */
86 struct uba_softc
*sc_uh
; /* our parent */
87 struct evcnt sc_intrcnt
; /* Interrupt counting */
88 struct ethercom sc_ec
; /* Ethernet common part */
89 #define sc_if sc_ec.ec_if /* network-visible interface */
90 bus_space_tag_t sc_iot
;
92 bus_dma_tag_t sc_dmat
;
93 struct qe_cdata
*sc_qedata
; /* Descriptor struct */
94 struct qe_cdata
*sc_pqedata
; /* Unibus address of above */
95 struct mbuf
* sc_txmbuf
[TXDESCS
];
96 struct mbuf
* sc_rxmbuf
[RXDESCS
];
97 bus_dmamap_t sc_xmtmap
[TXDESCS
];
98 bus_dmamap_t sc_rcvmap
[RXDESCS
];
99 bus_dmamap_t sc_nulldmamap
; /* ethernet padding buffer */
101 int sc_intvec
; /* Interrupt vector */
106 int sc_setup
; /* Setup packet in queue */
109 static int qematch(device_t
, cfdata_t
, void *);
110 static void qeattach(device_t
, device_t
, void *);
111 static void qeinit(struct qe_softc
*);
112 static void qestart(struct ifnet
*);
113 static void qeintr(void *);
114 static int qeioctl(struct ifnet
*, u_long
, void *);
115 static int qe_add_rxbuf(struct qe_softc
*, int);
116 static void qe_setup(struct qe_softc
*);
117 static void qetimeout(struct ifnet
*);
119 CFATTACH_DECL_NEW(qe
, sizeof(struct qe_softc
),
120 qematch
, qeattach
, NULL
, NULL
);
122 #define QE_WCSR(csr, val) \
123 bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
124 #define QE_RCSR(csr) \
125 bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
127 #define LOWORD(x) ((int)(x) & 0xffff)
128 #define HIWORD(x) (((int)(x) >> 16) & 0x3f)
130 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
133 * Check for present DEQNA. Done by sending a fake setup packet
134 * and wait for interrupt.
137 qematch(device_t parent
, cfdata_t cf
, void *aux
)
140 struct qe_softc
*sc
= &ssc
;
141 struct uba_attach_args
*ua
= aux
;
142 struct uba_softc
*uh
= device_private(parent
);
145 #define PROBESIZE 4096
146 struct qe_ring
*ring
;
150 ring
= malloc(PROBESIZE
, M_TEMP
, M_WAITOK
|M_ZERO
);
151 memset(sc
, 0, sizeof(*sc
));
152 sc
->sc_iot
= ua
->ua_iot
;
153 sc
->sc_ioh
= ua
->ua_ioh
;
154 sc
->sc_dmat
= ua
->ua_dmat
;
157 QE_WCSR(QE_CSR_CSR
, QE_RESET
);
158 QE_WCSR(QE_CSR_VECTOR
, uh
->uh_lastiv
);
161 * Map the ring area. Actually this is done only to be able to
162 * send and receive a internal packet; some junk is loopbacked
163 * so that the DEQNA has a reason to interrupt.
165 ui
.ui_size
= PROBESIZE
;
166 ui
.ui_vaddr
= (void *)&ring
[0];
167 if ((error
= uballoc(uh
, &ui
, UBA_CANTWAIT
)))
171 * Init a simple "fake" receive and transmit descriptor that
172 * points to some unused area. Send a fake setup packet.
174 rp
= (void *)ui
.ui_baddr
;
175 ring
[0].qe_flag
= ring
[0].qe_status1
= QE_NOTYET
;
176 ring
[0].qe_addr_lo
= LOWORD(&rp
[4]);
177 ring
[0].qe_addr_hi
= HIWORD(&rp
[4]) | QE_VALID
| QE_EOMSG
| QE_SETUP
;
178 ring
[0].qe_buf_len
= -64;
180 ring
[2].qe_flag
= ring
[2].qe_status1
= QE_NOTYET
;
181 ring
[2].qe_addr_lo
= LOWORD(&rp
[4]);
182 ring
[2].qe_addr_hi
= HIWORD(&rp
[4]) | QE_VALID
;
183 ring
[2].qe_buf_len
= -(1500/2);
185 QE_WCSR(QE_CSR_CSR
, QE_RCSR(QE_CSR_CSR
) & ~QE_RESET
);
189 * Start the interface and wait for the packet.
191 QE_WCSR(QE_CSR_CSR
, QE_INT_ENABLE
|QE_XMIT_INT
|QE_RCV_INT
);
192 QE_WCSR(QE_CSR_RCLL
, LOWORD(&rp
[2]));
193 QE_WCSR(QE_CSR_RCLH
, HIWORD(&rp
[2]));
194 QE_WCSR(QE_CSR_XMTL
, LOWORD(rp
));
195 QE_WCSR(QE_CSR_XMTH
, HIWORD(rp
));
199 * All done with the bus resources.
207 * Interface exists: make available by filling in network interface
208 * record. System will initialize the interface when it is ready
212 qeattach(device_t parent
, device_t self
, void *aux
)
214 struct uba_attach_args
*ua
= aux
;
215 struct qe_softc
*sc
= device_private(self
);
216 struct ifnet
*ifp
= &sc
->sc_if
;
218 u_int8_t enaddr
[ETHER_ADDR_LEN
];
223 sc
->sc_uh
= device_private(parent
);
224 sc
->sc_iot
= ua
->ua_iot
;
225 sc
->sc_ioh
= ua
->ua_ioh
;
226 sc
->sc_dmat
= ua
->ua_dmat
;
229 * Allocate DMA safe memory for descriptors and setup memory.
232 sc
->sc_ui
.ui_size
= sizeof(struct qe_cdata
) + ETHER_PAD_LEN
;
233 if ((error
= ubmemalloc(sc
->sc_uh
, &sc
->sc_ui
, 0))) {
234 aprint_error(": unable to ubmemalloc(), error = %d\n", error
);
237 sc
->sc_pqedata
= (struct qe_cdata
*)sc
->sc_ui
.ui_baddr
;
238 sc
->sc_qedata
= (struct qe_cdata
*)sc
->sc_ui
.ui_vaddr
;
241 * Zero the newly allocated memory.
243 memset(sc
->sc_qedata
, 0, sizeof(struct qe_cdata
) + ETHER_PAD_LEN
);
244 nullbuf
= ((char*)sc
->sc_qedata
) + sizeof(struct qe_cdata
);
246 * Create the transmit descriptor DMA maps. We take advantage
247 * of the fact that the Qbus address space is big, and therefore
248 * allocate map registers for all transmit descriptors also,
249 * so that we can avoid this each time we send a packet.
251 for (i
= 0; i
< TXDESCS
; i
++) {
252 if ((error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
,
253 1, MCLBYTES
, 0, BUS_DMA_NOWAIT
|BUS_DMA_ALLOCNOW
,
254 &sc
->sc_xmtmap
[i
]))) {
256 ": unable to create tx DMA map %d, error = %d\n",
263 * Create receive buffer DMA maps.
265 for (i
= 0; i
< RXDESCS
; i
++) {
266 if ((error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
, 1,
267 MCLBYTES
, 0, BUS_DMA_NOWAIT
,
268 &sc
->sc_rcvmap
[i
]))) {
270 ": unable to create rx DMA map %d, error = %d\n",
276 * Pre-allocate the receive buffers.
278 for (i
= 0; i
< RXDESCS
; i
++) {
279 if ((error
= qe_add_rxbuf(sc
, i
)) != 0) {
281 ": unable to allocate or map rx buffer %d,"
282 " error = %d\n", i
, error
);
287 if ((error
= bus_dmamap_create(sc
->sc_dmat
, ETHER_PAD_LEN
, 1,
288 ETHER_PAD_LEN
, 0, BUS_DMA_NOWAIT
,&sc
->sc_nulldmamap
)) != 0) {
290 ": unable to create pad buffer DMA map, error = %d\n",
294 if ((error
= bus_dmamap_load(sc
->sc_dmat
, sc
->sc_nulldmamap
,
295 nullbuf
, ETHER_PAD_LEN
, NULL
, BUS_DMA_NOWAIT
)) != 0) {
297 ": unable to load pad buffer DMA map, error = %d\n",
301 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_nulldmamap
, 0, ETHER_PAD_LEN
,
302 BUS_DMASYNC_PREWRITE
);
305 * Create ring loops of the buffer chains.
306 * This is only done once.
309 rp
= sc
->sc_qedata
->qc_recv
;
310 rp
[RXDESCS
].qe_addr_lo
= LOWORD(&sc
->sc_pqedata
->qc_recv
[0]);
311 rp
[RXDESCS
].qe_addr_hi
= HIWORD(&sc
->sc_pqedata
->qc_recv
[0]) |
313 rp
[RXDESCS
].qe_flag
= rp
[RXDESCS
].qe_status1
= QE_NOTYET
;
315 rp
= sc
->sc_qedata
->qc_xmit
;
316 rp
[TXDESCS
].qe_addr_lo
= LOWORD(&sc
->sc_pqedata
->qc_xmit
[0]);
317 rp
[TXDESCS
].qe_addr_hi
= HIWORD(&sc
->sc_pqedata
->qc_xmit
[0]) |
319 rp
[TXDESCS
].qe_flag
= rp
[TXDESCS
].qe_status1
= QE_NOTYET
;
322 * Get the vector that were set at match time, and remember it.
324 sc
->sc_intvec
= sc
->sc_uh
->uh_lastiv
;
325 QE_WCSR(QE_CSR_CSR
, QE_RESET
);
327 QE_WCSR(QE_CSR_CSR
, QE_RCSR(QE_CSR_CSR
) & ~QE_RESET
);
330 * Read out ethernet address and tell which type this card is.
332 for (i
= 0; i
< 6; i
++)
333 enaddr
[i
] = QE_RCSR(i
* 2) & 0xff;
335 QE_WCSR(QE_CSR_VECTOR
, sc
->sc_intvec
| 1);
336 aprint_normal(": %s, hardware address %s\n",
337 QE_RCSR(QE_CSR_VECTOR
) & 1 ? "delqa":"deqna",
338 ether_sprintf(enaddr
));
340 QE_WCSR(QE_CSR_VECTOR
, QE_RCSR(QE_CSR_VECTOR
) & ~1); /* ??? */
342 uba_intr_establish(ua
->ua_icookie
, ua
->ua_cvec
, qeintr
,
343 sc
, &sc
->sc_intrcnt
);
344 evcnt_attach_dynamic(&sc
->sc_intrcnt
, EVCNT_TYPE_INTR
, ua
->ua_evcnt
,
345 device_xname(sc
->sc_dev
), "intr");
347 strcpy(ifp
->if_xname
, device_xname(sc
->sc_dev
));
349 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
350 ifp
->if_start
= qestart
;
351 ifp
->if_ioctl
= qeioctl
;
352 ifp
->if_watchdog
= qetimeout
;
353 IFQ_SET_READY(&ifp
->if_snd
);
356 * Attach the interface.
359 ether_ifattach(ifp
, enaddr
);
364 * Free any resources we've allocated during the failed attach
365 * attempt. Do this in reverse order and fall through.
368 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_nulldmamap
);
370 for (i
= 0; i
< RXDESCS
; i
++) {
371 if (sc
->sc_rxmbuf
[i
] != NULL
) {
372 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_rcvmap
[i
]);
373 m_freem(sc
->sc_rxmbuf
[i
]);
377 for (i
= 0; i
< RXDESCS
; i
++) {
378 if (sc
->sc_xmtmap
[i
] != NULL
)
379 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_xmtmap
[i
]);
382 for (i
= 0; i
< TXDESCS
; i
++) {
383 if (sc
->sc_rcvmap
[i
] != NULL
)
384 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_rcvmap
[i
]);
389 * Initialization of interface.
392 qeinit(struct qe_softc
*sc
)
394 struct ifnet
*ifp
= (struct ifnet
*)&sc
->sc_if
;
395 struct qe_cdata
*qc
= sc
->sc_qedata
;
400 * Reset the interface.
402 QE_WCSR(QE_CSR_CSR
, QE_RESET
);
404 QE_WCSR(QE_CSR_CSR
, QE_RCSR(QE_CSR_CSR
) & ~QE_RESET
);
405 QE_WCSR(QE_CSR_VECTOR
, sc
->sc_intvec
);
407 sc
->sc_nexttx
= sc
->sc_inq
= sc
->sc_lastack
= 0;
409 * Release and init transmit descriptors.
411 for (i
= 0; i
< TXDESCS
; i
++) {
412 if (sc
->sc_txmbuf
[i
]) {
413 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_xmtmap
[i
]);
414 m_freem(sc
->sc_txmbuf
[i
]);
415 sc
->sc_txmbuf
[i
] = 0;
417 qc
->qc_xmit
[i
].qe_addr_hi
= 0; /* Clear valid bit */
418 qc
->qc_xmit
[i
].qe_status1
= qc
->qc_xmit
[i
].qe_flag
= QE_NOTYET
;
423 * Init receive descriptors.
425 for (i
= 0; i
< RXDESCS
; i
++)
426 qc
->qc_recv
[i
].qe_status1
= qc
->qc_recv
[i
].qe_flag
= QE_NOTYET
;
430 * Write the descriptor addresses to the device.
431 * Receiving packets will be enabled in the interrupt routine.
433 QE_WCSR(QE_CSR_CSR
, QE_INT_ENABLE
|QE_XMIT_INT
|QE_RCV_INT
);
434 QE_WCSR(QE_CSR_RCLL
, LOWORD(sc
->sc_pqedata
->qc_recv
));
435 QE_WCSR(QE_CSR_RCLH
, HIWORD(sc
->sc_pqedata
->qc_recv
));
437 ifp
->if_flags
|= IFF_RUNNING
;
438 ifp
->if_flags
&= ~IFF_OACTIVE
;
441 * Send a setup frame.
442 * This will start the transmit machinery as well.
449 * Start output on interface.
452 qestart(struct ifnet
*ifp
)
454 struct qe_softc
*sc
= ifp
->if_softc
;
455 struct qe_cdata
*qc
= sc
->sc_qedata
;
458 int idx
, len
, s
, i
, totlen
, buflen
, error
;
461 if ((QE_RCSR(QE_CSR_CSR
) & QE_RCV_ENABLE
) == 0)
465 while (sc
->sc_inq
< (TXDESCS
- 1)) {
472 IFQ_POLL(&ifp
->if_snd
, m
);
476 * Count number of mbufs in chain.
477 * Always do DMA directly from mbufs, therefore the transmit
478 * ring is really big.
480 for (m0
= m
, i
= 0; m0
; m0
= m0
->m_next
)
483 if (m
->m_pkthdr
.len
< ETHER_PAD_LEN
) {
484 buflen
= ETHER_PAD_LEN
;
487 buflen
= m
->m_pkthdr
.len
;
491 if ((i
+ sc
->sc_inq
) >= (TXDESCS
- 1)) {
492 ifp
->if_flags
|= IFF_OACTIVE
;
496 IFQ_DEQUEUE(&ifp
->if_snd
, m
);
500 bpf_mtap(ifp
->if_bpf
, m
);
503 * m now points to a mbuf chain that can be loaded.
504 * Loop around and set it.
507 for (m0
= m
; ; m0
= m0
->m_next
) {
511 error
= bus_dmamap_load(sc
->sc_dmat
,
512 sc
->sc_xmtmap
[idx
], mtod(m0
, void *),
514 buffer
= sc
->sc_xmtmap
[idx
]->dm_segs
[0].ds_addr
;
516 } else if (totlen
< ETHER_PAD_LEN
) {
517 buffer
= sc
->sc_nulldmamap
->dm_segs
[0].ds_addr
;
518 len
= ETHER_PAD_LEN
- totlen
;
524 /* Word alignment calc */
526 if (totlen
== buflen
) {
528 sc
->sc_txmbuf
[idx
] = m
;
530 if ((buffer
& 1) || (len
& 1))
533 orword
|= QE_ODDBEGIN
;
534 if ((buffer
+ len
) & 1)
536 qc
->qc_xmit
[idx
].qe_buf_len
= -(len
/2);
537 qc
->qc_xmit
[idx
].qe_addr_lo
= LOWORD(buffer
);
538 qc
->qc_xmit
[idx
].qe_addr_hi
= HIWORD(buffer
);
539 qc
->qc_xmit
[idx
].qe_flag
=
540 qc
->qc_xmit
[idx
].qe_status1
= QE_NOTYET
;
541 qc
->qc_xmit
[idx
].qe_addr_hi
|= (QE_VALID
| orword
);
542 if (++idx
== TXDESCS
)
549 if (totlen
!= buflen
)
550 panic("qestart: len fault");
554 * Kick off the transmit logic, if it is stopped.
556 csr
= QE_RCSR(QE_CSR_CSR
);
557 if (csr
& QE_XL_INVALID
) {
559 LOWORD(&sc
->sc_pqedata
->qc_xmit
[sc
->sc_nexttx
]));
561 HIWORD(&sc
->sc_pqedata
->qc_xmit
[sc
->sc_nexttx
]));
565 if (sc
->sc_inq
== (TXDESCS
- 1))
566 ifp
->if_flags
|= IFF_OACTIVE
;
569 ifp
->if_timer
= 5; /* If transmit logic dies */
576 struct qe_softc
*sc
= arg
;
577 struct qe_cdata
*qc
= sc
->sc_qedata
;
578 struct ifnet
*ifp
= &sc
->sc_if
;
580 int csr
, status1
, status2
, len
;
582 csr
= QE_RCSR(QE_CSR_CSR
);
584 QE_WCSR(QE_CSR_CSR
, QE_RCV_ENABLE
| QE_INT_ENABLE
| QE_XMIT_INT
|
585 QE_RCV_INT
| QE_ILOOP
);
587 if (csr
& QE_RCV_INT
)
588 while (qc
->qc_recv
[sc
->sc_nextrx
].qe_status1
!= QE_NOTYET
) {
589 status1
= qc
->qc_recv
[sc
->sc_nextrx
].qe_status1
;
590 status2
= qc
->qc_recv
[sc
->sc_nextrx
].qe_status2
;
592 m
= sc
->sc_rxmbuf
[sc
->sc_nextrx
];
593 len
= ((status1
& QE_RBL_HI
) |
594 (status2
& QE_RBL_LO
)) + 60;
595 qe_add_rxbuf(sc
, sc
->sc_nextrx
);
596 m
->m_pkthdr
.rcvif
= ifp
;
597 m
->m_pkthdr
.len
= m
->m_len
= len
;
598 if (++sc
->sc_nextrx
== RXDESCS
)
602 bpf_mtap(ifp
->if_bpf
, m
);
604 if ((status1
& QE_ESETUP
) == 0)
605 (*ifp
->if_input
)(ifp
, m
);
610 if (csr
& (QE_XMIT_INT
|QE_XL_INVALID
)) {
611 while (qc
->qc_xmit
[sc
->sc_lastack
].qe_status1
!= QE_NOTYET
) {
612 int idx
= sc
->sc_lastack
;
615 if (++sc
->sc_lastack
== TXDESCS
)
618 /* XXX collect statistics */
619 qc
->qc_xmit
[idx
].qe_addr_hi
&= ~QE_VALID
;
620 qc
->qc_xmit
[idx
].qe_status1
=
621 qc
->qc_xmit
[idx
].qe_flag
= QE_NOTYET
;
623 if (qc
->qc_xmit
[idx
].qe_addr_hi
& QE_SETUP
)
625 if (sc
->sc_txmbuf
[idx
] == NULL
||
626 sc
->sc_txmbuf
[idx
]->m_pkthdr
.len
< ETHER_PAD_LEN
)
627 bus_dmamap_unload(sc
->sc_dmat
,
629 if (sc
->sc_txmbuf
[idx
]) {
630 m_freem(sc
->sc_txmbuf
[idx
]);
631 sc
->sc_txmbuf
[idx
] = NULL
;
635 ifp
->if_flags
&= ~IFF_OACTIVE
;
636 qestart(ifp
); /* Put in more in queue */
639 * How can the receive list get invalid???
640 * Verified that it happens anyway.
642 if ((qc
->qc_recv
[sc
->sc_nextrx
].qe_status1
== QE_NOTYET
) &&
643 (QE_RCSR(QE_CSR_CSR
) & QE_RL_INVALID
)) {
645 LOWORD(&sc
->sc_pqedata
->qc_recv
[sc
->sc_nextrx
]));
647 HIWORD(&sc
->sc_pqedata
->qc_recv
[sc
->sc_nextrx
]));
652 * Process an ioctl request.
655 qeioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
657 struct qe_softc
*sc
= ifp
->if_softc
;
658 struct ifaddr
*ifa
= (struct ifaddr
*)data
;
659 int s
= splnet(), error
= 0;
664 ifp
->if_flags
|= IFF_UP
;
665 switch(ifa
->ifa_addr
->sa_family
) {
669 arp_ifinit(ifp
, ifa
);
676 if ((error
= ifioctl_common(ifp
, cmd
, data
)) != 0)
678 /* XXX re-use ether_ioctl() */
679 switch (ifp
->if_flags
& (IFF_UP
|IFF_RUNNING
)) {
682 * If interface is marked down and it is running,
683 * stop it. (by disabling receive mechanism).
686 QE_RCSR(QE_CSR_CSR
) & ~QE_RCV_ENABLE
);
687 ifp
->if_flags
&= ~IFF_RUNNING
;
691 * If interface it marked up and it is stopped, then
696 case IFF_UP
|IFF_RUNNING
:
698 * Send a new setup packet to match any new changes.
699 * (Like IFF_PROMISC etc)
711 * Update our multicast list.
713 if ((error
= ether_ioctl(ifp
, cmd
, data
)) == ENETRESET
) {
715 * Multicast list has changed; set the hardware filter
718 if (ifp
->if_flags
& IFF_RUNNING
)
725 error
= ether_ioctl(ifp
, cmd
, data
);
732 * Add a receive buffer to the indicated descriptor.
735 qe_add_rxbuf(struct qe_softc
*sc
, int i
)
742 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
746 MCLGET(m
, M_DONTWAIT
);
747 if ((m
->m_flags
& M_EXT
) == 0) {
752 if (sc
->sc_rxmbuf
[i
] != NULL
)
753 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_rcvmap
[i
]);
755 error
= bus_dmamap_load(sc
->sc_dmat
, sc
->sc_rcvmap
[i
],
756 m
->m_ext
.ext_buf
, m
->m_ext
.ext_size
, NULL
, BUS_DMA_NOWAIT
);
758 panic("%s: can't load rx DMA map %d, error = %d",
759 device_xname(sc
->sc_dev
), i
, error
);
760 sc
->sc_rxmbuf
[i
] = m
;
762 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_rcvmap
[i
], 0,
763 sc
->sc_rcvmap
[i
]->dm_mapsize
, BUS_DMASYNC_PREREAD
);
766 * We know that the mbuf cluster is page aligned. Also, be sure
767 * that the IP header will be longword aligned.
770 addr
= sc
->sc_rcvmap
[i
]->dm_segs
[0].ds_addr
+ 2;
771 rp
= &sc
->sc_qedata
->qc_recv
[i
];
772 rp
->qe_flag
= rp
->qe_status1
= QE_NOTYET
;
773 rp
->qe_addr_lo
= LOWORD(addr
);
774 rp
->qe_addr_hi
= HIWORD(addr
) | QE_VALID
;
775 rp
->qe_buf_len
= -(m
->m_ext
.ext_size
- 2)/2;
781 * Create a setup packet and put in queue for sending.
784 qe_setup(struct qe_softc
*sc
)
786 struct ether_multi
*enm
;
787 struct ether_multistep step
;
788 struct qe_cdata
*qc
= sc
->sc_qedata
;
789 struct ifnet
*ifp
= &sc
->sc_if
;
790 u_int8_t enaddr
[ETHER_ADDR_LEN
];
794 if (sc
->sc_inq
== (TXDESCS
- 1)) {
801 * Init the setup packet with valid info.
803 memset(qc
->qc_setup
, 0xff, sizeof(qc
->qc_setup
)); /* Broadcast */
804 memcpy(enaddr
, CLLADDR(ifp
->if_sadl
), sizeof(enaddr
));
805 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
806 qc
->qc_setup
[i
* 8 + 1] = enaddr
[i
]; /* Own address */
809 * Multicast handling. The DEQNA can handle up to 12 direct
810 * ethernet addresses.
813 ifp
->if_flags
&= ~IFF_ALLMULTI
;
814 ETHER_FIRST_MULTI(step
, &sc
->sc_ec
, enm
);
815 while (enm
!= NULL
) {
816 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
, 6)) {
817 ifp
->if_flags
|= IFF_ALLMULTI
;
820 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
821 qc
->qc_setup
[i
* 8 + j
+ k
] = enm
->enm_addrlo
[i
];
827 ifp
->if_flags
|= IFF_ALLMULTI
;
830 ETHER_NEXT_MULTI(step
, enm
);
833 qc
->qc_xmit
[idx
].qe_buf_len
= -64;
836 * How is the DEQNA turned in ALLMULTI mode???
837 * Until someone tells me, fall back to PROMISC when more than
838 * 12 ethernet addresses.
840 if (ifp
->if_flags
& IFF_ALLMULTI
)
841 ifp
->if_flags
|= IFF_PROMISC
;
842 else if (ifp
->if_pcount
== 0)
843 ifp
->if_flags
&= ~IFF_PROMISC
;
844 if (ifp
->if_flags
& IFF_PROMISC
)
845 qc
->qc_xmit
[idx
].qe_buf_len
= -65;
847 qc
->qc_xmit
[idx
].qe_addr_lo
= LOWORD(sc
->sc_pqedata
->qc_setup
);
848 qc
->qc_xmit
[idx
].qe_addr_hi
=
849 HIWORD(sc
->sc_pqedata
->qc_setup
) | QE_SETUP
| QE_EOMSG
;
850 qc
->qc_xmit
[idx
].qe_status1
= qc
->qc_xmit
[idx
].qe_flag
= QE_NOTYET
;
851 qc
->qc_xmit
[idx
].qe_addr_hi
|= QE_VALID
;
853 if (QE_RCSR(QE_CSR_CSR
) & QE_XL_INVALID
) {
855 LOWORD(&sc
->sc_pqedata
->qc_xmit
[idx
]));
857 HIWORD(&sc
->sc_pqedata
->qc_xmit
[idx
]));
861 if (++sc
->sc_nexttx
== TXDESCS
)
867 * Check for dead transmit logic. Not uncommon.
870 qetimeout(struct ifnet
*ifp
)
872 struct qe_softc
*sc
= ifp
->if_softc
;
877 aprint_error_dev(sc
->sc_dev
, "xmit logic died, resetting...\n");
879 * Do a reset of interface, to get it going again.
880 * Will it work by just restart the transmit logic?