1 /* $NetBSD: hd64570.c,v 1.40 2008/11/07 00:20:02 dyoung Exp $ */
4 * Copyright (c) 1999 Christian E. Hopps
5 * Copyright (c) 1998 Vixie Enterprises
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Vixie Enterprises nor the names
18 * of its contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
22 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * This software has been written for Vixie Enterprises by Michael Graff
36 * <explorer@flame.org>. To learn more about Vixie Enterprises, see
37 * ``http://www.vix.com''.
43 * o teach the receive logic about errors, and about long frames that
44 * span more than one input buffer. (Right now, receive/transmit is
45 * limited to one descriptor's buffer space, which is MTU + 4 bytes.
46 * This is currently 1504, which is large enough to hold the HDLC
47 * header and the packet itself. Packets which are too long are
48 * silently dropped on transmit and silently dropped on receive.
49 * o write code to handle the msci interrupts, needed only for CD
51 * o consider switching back to a "queue tx with DMA active" model which
52 * should help sustain outgoing traffic
53 * o through clever use of bus_dma*() functions, it should be possible
54 * to map the mbuf's data area directly into a descriptor transmit
55 * buffer, removing the need to allocate extra memory. If, however,
56 * we run out of descriptors for this, we will need to then allocate
57 * one large mbuf, copy the fragmented chain into it, and put it onto
58 * a single descriptor.
59 * o use bus_dmamap_sync() with the right offset and lengths, rather
60 * than cheating and always sync'ing the whole region.
62 * o perhaps allow rx and tx to be in more than one page
63 * if not using DMA. currently the assumption is that
64 * rx uses a page and tx uses a page.
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: hd64570.c,v 1.40 2008/11/07 00:20:02 dyoung Exp $");
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/device.h>
78 #include <sys/socket.h>
79 #include <sys/sockio.h>
80 #include <sys/kernel.h>
83 #include <net/if_types.h>
84 #include <net/netisr.h>
86 #if defined(INET) || defined(INET6)
87 #include <netinet/in.h>
88 #include <netinet/in_systm.h>
89 #include <netinet/in_var.h>
90 #include <netinet/ip.h>
92 #include <netinet6/in6_var.h>
97 #include <net/if_llc.h>
98 #include <netiso/iso.h>
99 #include <netiso/iso_var.h>
108 #include <sys/intr.h>
110 #include <dev/pci/pcivar.h>
111 #include <dev/pci/pcireg.h>
112 #include <dev/pci/pcidevs.h>
114 #include <dev/ic/hd64570reg.h>
115 #include <dev/ic/hd64570var.h>
117 #define SCA_DEBUG_RX 0x0001
118 #define SCA_DEBUG_TX 0x0002
119 #define SCA_DEBUG_CISCO 0x0004
120 #define SCA_DEBUG_DMA 0x0008
121 #define SCA_DEBUG_RXPKT 0x0010
122 #define SCA_DEBUG_TXPKT 0x0020
123 #define SCA_DEBUG_INTR 0x0040
124 #define SCA_DEBUG_CLOCK 0x0080
127 #define SCA_DEBUG_LEVEL ( 0xFFFF )
129 #define SCA_DEBUG_LEVEL 0
132 u_int32_t sca_debug
= SCA_DEBUG_LEVEL
;
134 #if SCA_DEBUG_LEVEL > 0
135 #define SCA_DPRINTF(l, x) do { \
136 if ((l) & sca_debug) \
140 #define SCA_DPRINTF(l, x)
144 #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */
147 static inline void msci_write_1(sca_port_t
*, u_int
, u_int8_t
);
148 static inline u_int8_t
msci_read_1(sca_port_t
*, u_int
);
150 static inline void dmac_write_1(sca_port_t
*, u_int
, u_int8_t
);
151 static inline void dmac_write_2(sca_port_t
*, u_int
, u_int16_t
);
152 static inline u_int8_t
dmac_read_1(sca_port_t
*, u_int
);
153 static inline u_int16_t
dmac_read_2(sca_port_t
*, u_int
);
155 static void sca_msci_init(struct sca_softc
*, sca_port_t
*);
156 static void sca_dmac_init(struct sca_softc
*, sca_port_t
*);
157 static void sca_dmac_rxinit(sca_port_t
*);
159 static int sca_dmac_intr(sca_port_t
*, u_int8_t
);
160 static int sca_msci_intr(sca_port_t
*, u_int8_t
);
162 static void sca_get_packets(sca_port_t
*);
163 static int sca_frame_avail(sca_port_t
*);
164 static void sca_frame_process(sca_port_t
*);
165 static void sca_frame_read_done(sca_port_t
*);
167 static void sca_port_starttx(sca_port_t
*);
169 static void sca_port_up(sca_port_t
*);
170 static void sca_port_down(sca_port_t
*);
172 static int sca_output(struct ifnet
*, struct mbuf
*, const struct sockaddr
*,
174 static int sca_ioctl(struct ifnet
*, u_long
, void *);
175 static void sca_start(struct ifnet
*);
176 static void sca_watchdog(struct ifnet
*);
178 static struct mbuf
*sca_mbuf_alloc(struct sca_softc
*, void *, u_int
);
180 #if SCA_DEBUG_LEVEL > 0
181 static void sca_frame_print(sca_port_t
*, sca_desc_t
*, u_int8_t
*);
185 #define sca_read_1(sc, reg) (sc)->sc_read_1(sc, reg)
186 #define sca_read_2(sc, reg) (sc)->sc_read_2(sc, reg)
187 #define sca_write_1(sc, reg, val) (sc)->sc_write_1(sc, reg, val)
188 #define sca_write_2(sc, reg, val) (sc)->sc_write_2(sc, reg, val)
190 #define sca_page_addr(sc, addr) ((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask)
193 msci_write_1(sca_port_t
*scp
, u_int reg
, u_int8_t val
)
195 sca_write_1(scp
->sca
, scp
->msci_off
+ reg
, val
);
198 static inline u_int8_t
199 msci_read_1(sca_port_t
*scp
, u_int reg
)
201 return sca_read_1(scp
->sca
, scp
->msci_off
+ reg
);
205 dmac_write_1(sca_port_t
*scp
, u_int reg
, u_int8_t val
)
207 sca_write_1(scp
->sca
, scp
->dmac_off
+ reg
, val
);
211 dmac_write_2(sca_port_t
*scp
, u_int reg
, u_int16_t val
)
213 sca_write_2(scp
->sca
, scp
->dmac_off
+ reg
, val
);
216 static inline u_int8_t
217 dmac_read_1(sca_port_t
*scp
, u_int reg
)
219 return sca_read_1(scp
->sca
, scp
->dmac_off
+ reg
);
222 static inline u_int16_t
223 dmac_read_2(sca_port_t
*scp
, u_int reg
)
225 return sca_read_2(scp
->sca
, scp
->dmac_off
+ reg
);
229 * read the chain pointer
231 static inline u_int16_t
232 sca_desc_read_chainp(struct sca_softc
*sc
, struct sca_desc
*dp
)
235 return ((dp
)->sd_chainp
);
236 return (bus_space_read_2(sc
->scu_memt
, sc
->scu_memh
,
237 sca_page_addr(sc
, dp
) + offsetof(struct sca_desc
, sd_chainp
)));
241 * write the chain pointer
244 sca_desc_write_chainp(struct sca_softc
*sc
, struct sca_desc
*dp
, u_int16_t cp
)
247 (dp
)->sd_chainp
= cp
;
249 bus_space_write_2(sc
->scu_memt
, sc
->scu_memh
,
250 sca_page_addr(sc
, dp
)
251 + offsetof(struct sca_desc
, sd_chainp
), cp
);
255 * read the buffer pointer
257 static inline u_int32_t
258 sca_desc_read_bufp(struct sca_softc
*sc
, struct sca_desc
*dp
)
263 address
= dp
->sd_bufp
| dp
->sd_hbufp
<< 16;
265 address
= bus_space_read_2(sc
->scu_memt
, sc
->scu_memh
,
266 sca_page_addr(sc
, dp
) + offsetof(struct sca_desc
, sd_bufp
));
267 address
|= bus_space_read_1(sc
->scu_memt
, sc
->scu_memh
,
268 sca_page_addr(sc
, dp
)
269 + offsetof(struct sca_desc
, sd_hbufp
)) << 16;
275 * write the buffer pointer
278 sca_desc_write_bufp(struct sca_softc
*sc
, struct sca_desc
*dp
, u_int32_t bufp
)
281 dp
->sd_bufp
= bufp
& 0xFFFF;
282 dp
->sd_hbufp
= (bufp
& 0x00FF0000) >> 16;
284 bus_space_write_2(sc
->scu_memt
, sc
->scu_memh
,
285 sca_page_addr(sc
, dp
) + offsetof(struct sca_desc
, sd_bufp
),
287 bus_space_write_1(sc
->scu_memt
, sc
->scu_memh
,
288 sca_page_addr(sc
, dp
) + offsetof(struct sca_desc
, sd_hbufp
),
289 (bufp
& 0x00FF0000) >> 16);
294 * read the buffer length
296 static inline u_int16_t
297 sca_desc_read_buflen(struct sca_softc
*sc
, struct sca_desc
*dp
)
300 return ((dp
)->sd_buflen
);
301 return (bus_space_read_2(sc
->scu_memt
, sc
->scu_memh
,
302 sca_page_addr(sc
, dp
) + offsetof(struct sca_desc
, sd_buflen
)));
306 * write the buffer length
309 sca_desc_write_buflen(struct sca_softc
*sc
, struct sca_desc
*dp
, u_int16_t len
)
312 (dp
)->sd_buflen
= len
;
314 bus_space_write_2(sc
->scu_memt
, sc
->scu_memh
,
315 sca_page_addr(sc
, dp
)
316 + offsetof(struct sca_desc
, sd_buflen
), len
);
320 * read the descriptor status
322 static inline u_int8_t
323 sca_desc_read_stat(struct sca_softc
*sc
, struct sca_desc
*dp
)
326 return ((dp
)->sd_stat
);
327 return (bus_space_read_1(sc
->scu_memt
, sc
->scu_memh
,
328 sca_page_addr(sc
, dp
) + offsetof(struct sca_desc
, sd_stat
)));
332 * write the descriptor status
335 sca_desc_write_stat(struct sca_softc
*sc
, struct sca_desc
*dp
, u_int8_t stat
)
338 (dp
)->sd_stat
= stat
;
340 bus_space_write_1(sc
->scu_memt
, sc
->scu_memh
,
341 sca_page_addr(sc
, dp
) + offsetof(struct sca_desc
, sd_stat
),
346 sca_init(struct sca_softc
*sc
)
349 * Do a little sanity check: check number of ports.
351 if (sc
->sc_numports
< 1 || sc
->sc_numports
> 2)
352 panic("sca can\'t handle more than 2 or less than 1 ports");
355 * disable DMA and MSCI interrupts
357 sca_write_1(sc
, SCA_DMER
, 0);
358 sca_write_1(sc
, SCA_IER0
, 0);
359 sca_write_1(sc
, SCA_IER1
, 0);
360 sca_write_1(sc
, SCA_IER2
, 0);
363 * configure interrupt system
365 sca_write_1(sc
, SCA_ITCR
,
366 SCA_ITCR_INTR_PRI_MSCI
| SCA_ITCR_ACK_NONE
| SCA_ITCR_VOUT_IVR
);
368 /* these are for the intrerrupt ack cycle which we don't use */
369 sca_write_1(sc
, SCA_IVR
, 0x40);
370 sca_write_1(sc
, SCA_IMVR
, 0x40);
374 * set wait control register to zero wait states
376 sca_write_1(sc
, SCA_PABR0
, 0);
377 sca_write_1(sc
, SCA_PABR1
, 0);
378 sca_write_1(sc
, SCA_WCRL
, 0);
379 sca_write_1(sc
, SCA_WCRM
, 0);
380 sca_write_1(sc
, SCA_WCRH
, 0);
383 * disable DMA and reset status
385 sca_write_1(sc
, SCA_PCR
, SCA_PCR_PR2
);
388 * disable transmit DMA for all channels
390 sca_write_1(sc
, SCA_DSR0
+ SCA_DMAC_OFF_0
, 0);
391 sca_write_1(sc
, SCA_DCR0
+ SCA_DMAC_OFF_0
, SCA_DCR_ABRT
);
392 sca_write_1(sc
, SCA_DSR1
+ SCA_DMAC_OFF_0
, 0);
393 sca_write_1(sc
, SCA_DCR1
+ SCA_DMAC_OFF_0
, SCA_DCR_ABRT
);
394 sca_write_1(sc
, SCA_DSR0
+ SCA_DMAC_OFF_1
, 0);
395 sca_write_1(sc
, SCA_DCR0
+ SCA_DMAC_OFF_1
, SCA_DCR_ABRT
);
396 sca_write_1(sc
, SCA_DSR1
+ SCA_DMAC_OFF_1
, 0);
397 sca_write_1(sc
, SCA_DCR1
+ SCA_DMAC_OFF_1
, SCA_DCR_ABRT
);
400 * enable DMA based on channel enable flags for each channel
402 sca_write_1(sc
, SCA_DMER
, SCA_DMER_EN
);
405 * Should check to see if the chip is responding, but for now
411 * initialize the port and attach it to the networking layer
414 sca_port_attach(struct sca_softc
*sc
, u_int port
)
417 sca_port_t
*scp
= &sc
->sc_ports
[port
];
419 static u_int ntwo_unit
= 0;
421 scp
->sca
= sc
; /* point back to the parent */
426 scp
->msci_off
= SCA_MSCI_OFF_0
;
427 scp
->dmac_off
= SCA_DMAC_OFF_0
;
428 if(sc
->sc_parent
!= NULL
)
429 ntwo_unit
= device_unit(sc
->sc_parent
) * 2 + 0;
431 ntwo_unit
= 0; /* XXX */
433 scp
->msci_off
= SCA_MSCI_OFF_1
;
434 scp
->dmac_off
= SCA_DMAC_OFF_1
;
435 if(sc
->sc_parent
!= NULL
)
436 ntwo_unit
= device_unit(sc
->sc_parent
) * 2 + 1;
438 ntwo_unit
= 1; /* XXX */
441 sca_msci_init(sc
, scp
);
442 sca_dmac_init(sc
, scp
);
445 * attach to the network layer
448 snprintf(ifp
->if_xname
, sizeof(ifp
->if_xname
), "ntwo%d", ntwo_unit
);
450 ifp
->if_mtu
= SCA_MTU
;
451 ifp
->if_flags
= IFF_POINTOPOINT
| IFF_MULTICAST
;
452 ifp
->if_type
= IFT_PTPSERIAL
;
453 ifp
->if_hdrlen
= HDLC_HDRLEN
;
454 ifp
->if_ioctl
= sca_ioctl
;
455 ifp
->if_output
= sca_output
;
456 ifp
->if_watchdog
= sca_watchdog
;
457 ifp
->if_snd
.ifq_maxlen
= IFQ_MAXLEN
;
458 scp
->linkq
.ifq_maxlen
= 5; /* if we exceed this we are hosed already */
460 scp
->fastq
.ifq_maxlen
= IFQ_MAXLEN
;
462 IFQ_SET_READY(&ifp
->if_snd
);
467 bpfattach(ifp
, DLT_HDLC
, HDLC_HDRLEN
);
470 if (sc
->sc_parent
== NULL
)
471 printf("%s: port %d\n", ifp
->if_xname
, port
);
473 printf("%s at %s port %d\n",
474 ifp
->if_xname
, device_xname(sc
->sc_parent
), port
);
477 * reset the last seen times on the cisco keepalive protocol
480 scp
->cka_lasttx
= now
.tv_usec
;
486 * returns log2(div), sets 'tmc' for the required freq 'hz'
489 sca_msci_get_baud_rate_values(u_int32_t hz
, u_int8_t
*tmcp
)
494 /* clock hz = (chipclock / tmc) / 2^(div); */
499 * 1 <= TD <= 256 TD is inc of 1
500 * 2 <= TD <= 512 TD is inc of 2
501 * 4 <= TD <= 1024 TD is inc of 4
503 * 512 <= TD <= 256*512 TD is inc of 512
505 * so note there are overlaps. We lose prec
506 * as div increases so we wish to minize div.
508 * basically we want to do
510 * tmc = chip / hz, but have tmc <= 256
513 /* assume system clock is 9.8304MHz or 9830400Hz */
514 clock
= clock
= 9830400 >> 1;
518 while ((tmc
= clock
/ hz
) > 256 || (tmc
== 256 && (clock
/ tmc
) > hz
)) {
522 if (clock
/ tmc
> hz
)
527 if (div
> SCA_RXS_DIV_512
) {
528 /* set to maximums */
529 div
= SCA_RXS_DIV_512
;
533 *tmcp
= (tmc
& 0xFF); /* 0 == 256 */
539 * initialize the port's MSCI
542 sca_msci_init(struct sca_softc
*sc
, sca_port_t
*scp
)
544 /* reset the channel */
545 msci_write_1(scp
, SCA_CMD0
, SCA_CMD_RESET
);
547 msci_write_1(scp
, SCA_MD00
,
551 | SCA_MD0_MODE_HDLC
));
553 /* immediately send receive reset so the above takes */
554 msci_write_1(scp
, SCA_CMD0
, SCA_CMD_RXRESET
);
557 msci_write_1(scp
, SCA_MD10
, SCA_MD1_NOADDRCHK
);
558 msci_write_1(scp
, SCA_MD20
,
559 (SCA_MD2_DUPLEX
| SCA_MD2_ADPLLx8
| SCA_MD2_NRZ
));
561 /* be safe and do it again */
562 msci_write_1(scp
, SCA_CMD0
, SCA_CMD_RXRESET
);
564 /* setup underrun and idle control, and initial RTS state */
565 msci_write_1(scp
, SCA_CTL0
,
566 (SCA_CTL_IDLC_PATTERN
567 | SCA_CTL_UDRNC_AFTER_FCS
570 /* reset the transmitter */
571 msci_write_1(scp
, SCA_CMD0
, SCA_CMD_TXRESET
);
574 * set the clock sources
576 msci_write_1(scp
, SCA_RXS0
, scp
->sp_rxs
);
577 msci_write_1(scp
, SCA_TXS0
, scp
->sp_txs
);
578 msci_write_1(scp
, SCA_TMC0
, scp
->sp_tmc
);
580 /* set external clock generate as requested */
581 sc
->sc_clock_callback(sc
->sc_aux
, scp
->sp_port
, scp
->sp_eclock
);
584 * XXX don't pay attention to CTS or CD changes right now. I can't
585 * simulate one, and the transmitter will try to transmit even if
586 * CD isn't there anyway, so nothing bad SHOULD happen.
589 msci_write_1(scp
, SCA_IE00
, 0);
590 msci_write_1(scp
, SCA_IE10
, 0); /* 0x0c == CD and CTS changes only */
592 /* this would deliver transmitter underrun to ST1/ISR1 */
593 msci_write_1(scp
, SCA_IE10
, SCA_ST1_UDRN
);
594 msci_write_1(scp
, SCA_IE00
, SCA_ST0_TXINT
);
596 msci_write_1(scp
, SCA_IE20
, 0);
598 msci_write_1(scp
, SCA_FIE0
, 0);
600 msci_write_1(scp
, SCA_SA00
, 0);
601 msci_write_1(scp
, SCA_SA10
, 0);
603 msci_write_1(scp
, SCA_IDL0
, 0x7e);
605 msci_write_1(scp
, SCA_RRC0
, 0x0e);
606 /* msci_write_1(scp, SCA_TRC00, 0x10); */
608 * the correct values here are important for avoiding underruns
609 * for any value less than or equal to TRC0 txrdy is activated
610 * which will start the dmac transfer to the fifo.
611 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop DMA.
613 * thus if we are using a very fast clock that empties the fifo
614 * quickly, delays in the dmac starting to fill the fifo can
615 * lead to underruns so we want a fairly full fifo to still
616 * cause the dmac to start. for cards with on board ram this
617 * has no effect on system performance. For cards that DMA
618 * to/from system memory it will cause more, shorter,
619 * bus accesses rather than fewer longer ones.
621 msci_write_1(scp
, SCA_TRC00
, 0x00);
622 msci_write_1(scp
, SCA_TRC10
, 0x1f);
626 * Take the memory for the port and construct two circular linked lists of
627 * descriptors (one tx, one rx) and set the pointers in these descriptors
628 * to point to the buffer space for this port.
631 sca_dmac_init(struct sca_softc
*sc
, sca_port_t
*scp
)
639 bus_dmamap_sync(sc
->scu_dmat
, sc
->scu_dmam
, 0, sc
->scu_allocsize
,
640 BUS_DMASYNC_PREWRITE
);
643 * XXX assumes that all tx desc and bufs in same page
646 sc
->scu_set_page(sc
, scp
->sp_txdesc_p
);
649 desc
= scp
->sp_txdesc
;
650 desc_p
= scp
->sp_txdesc_p
;
651 buf_p
= scp
->sp_txbuf_p
;
656 /* make sure that we won't wrap */
657 if ((desc_p
& 0xffff0000) !=
658 ((desc_p
+ sizeof(*desc
) * scp
->sp_ntxdesc
) & 0xffff0000))
659 panic("sca: tx descriptors cross architecural boundary");
660 if ((buf_p
& 0xff000000) !=
661 ((buf_p
+ SCA_BSIZE
* scp
->sp_ntxdesc
) & 0xff000000))
662 panic("sca: tx buffers cross architecural boundary");
665 for (i
= 0 ; i
< scp
->sp_ntxdesc
; i
++) {
667 * desc_p points to the physcial address of the NEXT desc
669 desc_p
+= sizeof(sca_desc_t
);
671 sca_desc_write_chainp(sc
, desc
, desc_p
& 0x0000ffff);
672 sca_desc_write_bufp(sc
, desc
, buf_p
);
673 sca_desc_write_buflen(sc
, desc
, SCA_BSIZE
);
674 sca_desc_write_stat(sc
, desc
, 0);
676 desc
++; /* point to the next descriptor */
681 * "heal" the circular list by making the last entry point to the
684 sca_desc_write_chainp(sc
, desc
- 1, scp
->sp_txdesc_p
& 0x0000ffff);
687 * Now, initialize the transmit DMA logic
689 * CPB == chain pointer base address
691 dmac_write_1(scp
, SCA_DSR1
, 0);
692 dmac_write_1(scp
, SCA_DCR1
, SCA_DCR_ABRT
);
693 dmac_write_1(scp
, SCA_DMR1
, SCA_DMR_TMOD
| SCA_DMR_NF
);
695 dmac_write_1(scp, SCA_DIR1,
696 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
698 dmac_write_1(scp
, SCA_DIR1
,
699 (SCA_DIR_EOM
| SCA_DIR_EOT
| SCA_DIR_BOF
| SCA_DIR_COF
));
700 dmac_write_1(scp
, SCA_CPB1
,
701 (u_int8_t
)((scp
->sp_txdesc_p
& 0x00ff0000) >> 16));
704 * now, do the same thing for receive descriptors
706 * XXX assumes that all rx desc and bufs in same page
709 sc
->scu_set_page(sc
, scp
->sp_rxdesc_p
);
711 desc
= scp
->sp_rxdesc
;
712 desc_p
= scp
->sp_rxdesc_p
;
713 buf_p
= scp
->sp_rxbuf_p
;
716 /* make sure that we won't wrap */
717 if ((desc_p
& 0xffff0000) !=
718 ((desc_p
+ sizeof(*desc
) * scp
->sp_nrxdesc
) & 0xffff0000))
719 panic("sca: rx descriptors cross architecural boundary");
720 if ((buf_p
& 0xff000000) !=
721 ((buf_p
+ SCA_BSIZE
* scp
->sp_nrxdesc
) & 0xff000000))
722 panic("sca: rx buffers cross architecural boundary");
725 for (i
= 0 ; i
< scp
->sp_nrxdesc
; i
++) {
727 * desc_p points to the physcial address of the NEXT desc
729 desc_p
+= sizeof(sca_desc_t
);
731 sca_desc_write_chainp(sc
, desc
, desc_p
& 0x0000ffff);
732 sca_desc_write_bufp(sc
, desc
, buf_p
);
733 /* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */
734 sca_desc_write_buflen(sc
, desc
, 0);
735 sca_desc_write_stat(sc
, desc
, 0);
737 desc
++; /* point to the next descriptor */
742 * "heal" the circular list by making the last entry point to the
745 sca_desc_write_chainp(sc
, desc
- 1, scp
->sp_rxdesc_p
& 0x0000ffff);
747 sca_dmac_rxinit(scp
);
750 bus_dmamap_sync(sc
->scu_dmat
, sc
->scu_dmam
,
751 0, sc
->scu_allocsize
, BUS_DMASYNC_POSTWRITE
);
753 sc
->scu_page_off(sc
);
757 * reset and reinitialize the receive DMA logic
760 sca_dmac_rxinit(sca_port_t
*scp
)
763 * ... and the receive DMA logic ...
765 dmac_write_1(scp
, SCA_DSR0
, 0); /* disable DMA */
766 dmac_write_1(scp
, SCA_DCR0
, SCA_DCR_ABRT
);
768 dmac_write_1(scp
, SCA_DMR0
, SCA_DMR_TMOD
| SCA_DMR_NF
);
769 dmac_write_2(scp
, SCA_BFLL0
, SCA_BSIZE
);
771 /* reset descriptors to initial state */
773 scp
->sp_rxend
= scp
->sp_nrxdesc
- 1;
776 * CPB == chain pointer base
777 * CDA == current descriptor address
778 * EDA == error descriptor address (overwrite position)
779 * because cda can't be eda when starting we always
780 * have a single buffer gap between cda and eda
782 dmac_write_1(scp
, SCA_CPB0
,
783 (u_int8_t
)((scp
->sp_rxdesc_p
& 0x00ff0000) >> 16));
784 dmac_write_2(scp
, SCA_CDAL0
, (u_int16_t
)(scp
->sp_rxdesc_p
& 0xffff));
785 dmac_write_2(scp
, SCA_EDAL0
, (u_int16_t
)
786 (scp
->sp_rxdesc_p
+ (sizeof(sca_desc_t
) * scp
->sp_rxend
)));
789 * enable receiver DMA
791 dmac_write_1(scp
, SCA_DIR0
,
792 (SCA_DIR_EOT
| SCA_DIR_EOM
| SCA_DIR_BOF
| SCA_DIR_COF
));
793 dmac_write_1(scp
, SCA_DSR0
, SCA_DSR_DE
);
797 * Queue the packet for our start routine to transmit
803 const struct sockaddr
*dst
,
807 struct hdlc_llc_header
*llc
;
809 struct hdlc_header
*hdlc
;
810 struct ifqueue
*ifq
= NULL
;
813 ALTQ_DECL(struct altq_pktattr pktattr
;)
817 if ((ifp
->if_flags
& IFF_UP
) != IFF_UP
) {
823 * If the queueing discipline needs packet classification,
824 * do it before prepending link headers.
826 IFQ_CLASSIFY(&ifp
->if_snd
, m
, dst
->sa_family
, &pktattr
);
829 * determine address family, and priority for this packet
831 switch (dst
->sa_family
) {
835 if ((mtod(m
, struct ip
*)->ip_tos
& IPTOS_LOWDELAY
)
837 ifq
= &((sca_port_t
*)ifp
->if_softc
)->fastq
;
840 * Add cisco serial line header. If there is no
841 * space in the first mbuf, allocate another.
843 M_PREPEND(m
, sizeof(struct hdlc_header
), M_DONTWAIT
);
846 hdlc
= mtod(m
, struct hdlc_header
*);
847 hdlc
->h_proto
= htons(HDLC_PROTOCOL_IP
);
853 * Add cisco serial line header. If there is no
854 * space in the first mbuf, allocate another.
856 M_PREPEND(m
, sizeof(struct hdlc_header
), M_DONTWAIT
);
859 hdlc
= mtod(m
, struct hdlc_header
*);
860 hdlc
->h_proto
= htons(HDLC_PROTOCOL_IPV6
);
866 * Add cisco llc serial line header. If there is no
867 * space in the first mbuf, allocate another.
869 M_PREPEND(m
, sizeof(struct hdlc_llc_header
), M_DONTWAIT
);
872 hdlc
= mtod(m
, struct hdlc_header
*);
873 llc
= mtod(m
, struct hdlc_llc_header
*);
874 llc
->hl_dsap
= llc
->hl_ssap
= LLC_ISO_LSAP
;
879 printf("%s: address family %d unsupported\n",
880 ifp
->if_xname
, dst
->sa_family
);
881 error
= EAFNOSUPPORT
;
886 if ((m
->m_flags
& (M_BCAST
| M_MCAST
)) != 0)
887 hdlc
->h_addr
= CISCO_MULTICAST
;
889 hdlc
->h_addr
= CISCO_UNICAST
;
893 * queue the packet. If interactive, use the fast queue.
896 len
= m
->m_pkthdr
.len
;
906 IFQ_ENQUEUE(&ifp
->if_snd
, m
, &pktattr
, error
);
910 ifp
->if_collisions
++;
913 ifp
->if_obytes
+= len
;
914 if (mflags
& M_MCAST
)
929 sca_ioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
938 ifr
= (struct ifreq
*)data
;
939 ifa
= (struct ifaddr
*)data
;
944 switch(ifa
->ifa_addr
->sa_family
) {
951 #if defined(INET) || defined(INET6)
952 ifp
->if_flags
|= IFF_UP
;
953 sca_port_up(ifp
->if_softc
);
957 error
= EAFNOSUPPORT
;
964 if (ifa
->ifa_addr
->sa_family
== AF_INET
)
968 if (ifa
->ifa_addr
->sa_family
== AF_INET6
)
971 error
= EAFNOSUPPORT
;
976 /* XXX need multicast group management code */
978 error
= EAFNOSUPPORT
; /* XXX */
981 switch (ifreq_getaddr(cmd
, ifr
)->sa_family
) {
991 error
= EAFNOSUPPORT
;
997 if ((error
= ifioctl_common(ifp
, cmd
, data
)) != 0)
999 if (ifr
->ifr_flags
& IFF_UP
) {
1000 ifp
->if_flags
|= IFF_UP
;
1001 sca_port_up(ifp
->if_softc
);
1003 ifp
->if_flags
&= ~IFF_UP
;
1004 sca_port_down(ifp
->if_softc
);
1010 error
= ifioctl_common(ifp
, cmd
, data
);
1018 * start packet transmission on the interface
1020 * MUST BE CALLED AT splnet()
1023 sca_start(struct ifnet
*ifp
)
1025 sca_port_t
*scp
= ifp
->if_softc
;
1026 struct sca_softc
*sc
= scp
->sca
;
1027 struct mbuf
*m
, *mb_head
;
1029 u_int8_t
*buf
, stat
;
1035 SCA_DPRINTF(SCA_DEBUG_TX
, ("TX: enter start\n"));
1038 * can't queue when we are full or transmitter is busy
1041 if ((scp
->sp_txinuse
>= (scp
->sp_ntxdesc
- 1))
1042 || ((ifp
->if_flags
& IFF_OACTIVE
) == IFF_OACTIVE
))
1046 || ((ifp
->if_flags
& IFF_OACTIVE
) == IFF_OACTIVE
))
1049 SCA_DPRINTF(SCA_DEBUG_TX
, ("TX: txinuse %d\n", scp
->sp_txinuse
));
1052 * XXX assume that all tx desc and bufs in same page
1055 bus_dmamap_sync(sc
->scu_dmat
, sc
->scu_dmam
,
1056 0, sc
->scu_allocsize
,
1057 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1059 sc
->scu_page_on(sc
);
1060 sc
->scu_set_page(sc
, scp
->sp_txdesc_p
);
1066 IF_DEQUEUE(&scp
->linkq
, mb_head
);
1067 if (mb_head
== NULL
)
1068 #ifdef SCA_USE_FASTQ
1069 IF_DEQUEUE(&scp
->fastq
, mb_head
);
1070 if (mb_head
== NULL
)
1072 IFQ_DEQUEUE(&ifp
->if_snd
, mb_head
);
1073 if (mb_head
== NULL
)
1076 SCA_DPRINTF(SCA_DEBUG_TX
, ("TX: got mbuf\n"));
1078 if (scp
->txinuse
!= 0) {
1079 /* Kill EOT interrupts on the previous descriptor. */
1080 desc
= &scp
->sp_txdesc
[scp
->txcur
];
1081 stat
= sca_desc_read_stat(sc
, desc
);
1082 sca_desc_write_stat(sc
, desc
, stat
& ~SCA_DESC_EOT
);
1084 /* Figure out what the next free descriptor is. */
1085 nexttx
= (scp
->sp_txcur
+ 1) % scp
->sp_ntxdesc
;
1088 #endif /* oldcode */
1090 if (scp
->sp_txinuse
)
1091 nexttx
= (scp
->sp_txcur
+ 1) % scp
->sp_ntxdesc
;
1095 SCA_DPRINTF(SCA_DEBUG_TX
, ("TX: nexttx %d\n", nexttx
));
1097 buf
= scp
->sp_txbuf
+ SCA_BSIZE
* nexttx
;
1098 buf_p
= scp
->sp_txbuf_p
+ SCA_BSIZE
* nexttx
;
1100 /* XXX hoping we can delay the desc write till after we don't drop. */
1101 desc
= &scp
->sp_txdesc
[nexttx
];
1103 /* XXX isn't this set already?? */
1104 sca_desc_write_bufp(sc
, desc
, buf_p
);
1107 SCA_DPRINTF(SCA_DEBUG_TX
, ("TX: buf %x buf_p %x\n", (u_int
)buf
, buf_p
));
1109 #if 0 /* uncomment this for a core in cc1 */
1113 * Run through the chain, copying data into the descriptor as we
1114 * go. If it won't fit in one transmission block, drop the packet.
1115 * No, this isn't nice, but most of the time it _will_ fit.
1117 for (m
= mb_head
; m
!= NULL
; m
= m
->m_next
) {
1118 if (m
->m_len
!= 0) {
1120 if (len
> SCA_BSIZE
) {
1124 SCA_DPRINTF(SCA_DEBUG_TX
,
1125 ("TX: about to mbuf len %d\n", m
->m_len
));
1128 memcpy(buf
, mtod(m
, u_int8_t
*), m
->m_len
);
1130 bus_space_write_region_1(sc
->scu_memt
,
1131 sc
->scu_memh
, sca_page_addr(sc
, buf_p
),
1132 mtod(m
, u_int8_t
*), m
->m_len
);
1138 /* set the buffer, the length, and mark end of frame and end of xfer */
1139 sca_desc_write_buflen(sc
, desc
, len
);
1140 sca_desc_write_stat(sc
, desc
, SCA_DESC_EOM
);
1146 * Pass packet to bpf if there is a listener.
1149 bpf_mtap(ifp
->if_bpf
, mb_head
);
1154 scp
->sp_txcur
= nexttx
;
1158 SCA_DPRINTF(SCA_DEBUG_TX
,
1159 ("TX: inuse %d index %d\n", scp
->sp_txinuse
, scp
->sp_txcur
));
1162 * XXX so didn't this used to limit us to 1?! - multi may be untested
1163 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard
1167 if (scp
->sp_txinuse
< (scp
->sp_ntxdesc
- 1))
1169 if (scp
->sp_txinuse
< scp
->sp_ntxdesc
)
1173 SCA_DPRINTF(SCA_DEBUG_TX
, ("TX: trigger_xmit %d\n", trigger_xmit
));
1175 if (trigger_xmit
!= 0) {
1176 /* set EOT on final descriptor */
1177 desc
= &scp
->sp_txdesc
[scp
->sp_txcur
];
1178 stat
= sca_desc_read_stat(sc
, desc
);
1179 sca_desc_write_stat(sc
, desc
, stat
| SCA_DESC_EOT
);
1183 bus_dmamap_sync(sc
->scu_dmat
, sc
->scu_dmam
, 0,
1185 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
1187 if (trigger_xmit
!= 0)
1188 sca_port_starttx(scp
);
1191 sc
->scu_page_off(sc
);
1195 sca_watchdog(struct ifnet
*ifp
)
1200 sca_hardintr(struct sca_softc
*sc
)
1202 u_int8_t isr0
, isr1
, isr2
;
1205 ret
= 0; /* non-zero means we processed at least one interrupt */
1207 SCA_DPRINTF(SCA_DEBUG_INTR
, ("sca_hardintr entered\n"));
1211 * read SCA interrupts
1213 isr0
= sca_read_1(sc
, SCA_ISR0
);
1214 isr1
= sca_read_1(sc
, SCA_ISR1
);
1215 isr2
= sca_read_1(sc
, SCA_ISR2
);
1217 if (isr0
== 0 && isr1
== 0 && isr2
== 0)
1220 SCA_DPRINTF(SCA_DEBUG_INTR
,
1221 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1225 * check DMAC interrupt
1228 ret
+= sca_dmac_intr(&sc
->sc_ports
[0],
1232 ret
+= sca_dmac_intr(&sc
->sc_ports
[1],
1233 (isr1
& 0xf0) >> 4);
1239 ret
+= sca_msci_intr(&sc
->sc_ports
[0], isr0
& 0x0f);
1242 ret
+= sca_msci_intr(&sc
->sc_ports
[1],
1243 (isr0
& 0xf0) >> 4);
1245 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1247 ret
+= sca_timer_intr(sc
, isr2
);
1255 sca_dmac_intr(sca_port_t
*scp
, u_int8_t isr
)
1263 * Check transmit channel
1265 if (isr
& (SCA_ISR1_DMAC_TX0A
| SCA_ISR1_DMAC_TX0B
)) {
1266 SCA_DPRINTF(SCA_DEBUG_INTR
,
1267 ("TX INTERRUPT port %d\n", scp
->sp_port
));
1275 dsr
= dmac_read_1(scp
, SCA_DSR1
);
1276 dmac_write_1(scp
, SCA_DSR1
,
1277 dsr
| SCA_DSR_DEWD
);
1280 * filter out the bits we don't care about
1282 dsr
&= ( SCA_DSR_COF
| SCA_DSR_BOF
| SCA_DSR_EOT
);
1287 * check for counter overflow
1289 if (dsr
& SCA_DSR_COF
) {
1290 printf("%s: TXDMA counter overflow\n",
1291 scp
->sp_if
.if_xname
);
1293 scp
->sp_if
.if_flags
&= ~IFF_OACTIVE
;
1295 scp
->sp_txinuse
= 0;
1299 * check for buffer overflow
1301 if (dsr
& SCA_DSR_BOF
) {
1302 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1303 scp
->sp_if
.if_xname
,
1304 dmac_read_2(scp
, SCA_CDAL1
),
1305 dmac_read_2(scp
, SCA_EDAL1
),
1306 dmac_read_1(scp
, SCA_CPB1
));
1309 * Yikes. Arrange for a full
1310 * transmitter restart.
1312 scp
->sp_if
.if_flags
&= ~IFF_OACTIVE
;
1314 scp
->sp_txinuse
= 0;
1318 * check for end of transfer, which is not
1319 * an error. It means that all data queued
1320 * was transmitted, and we mark ourself as
1321 * not in use and stop the watchdog timer.
1323 if (dsr
& SCA_DSR_EOT
) {
1324 SCA_DPRINTF(SCA_DEBUG_TX
,
1325 ("Transmit completed. cda %x eda %x dsr %x\n",
1326 dmac_read_2(scp
, SCA_CDAL1
),
1327 dmac_read_2(scp
, SCA_EDAL1
),
1330 scp
->sp_if
.if_flags
&= ~IFF_OACTIVE
;
1332 scp
->sp_txinuse
= 0;
1335 * check for more packets
1337 sca_start(&scp
->sp_if
);
1342 * receive channel check
1344 if (isr
& (SCA_ISR1_DMAC_RX0A
| SCA_ISR1_DMAC_RX0B
)) {
1345 SCA_DPRINTF(SCA_DEBUG_INTR
, ("RX INTERRUPT port %d\n",
1346 (scp
== &scp
->sca
->sc_ports
[0] ? 0 : 1)));
1352 dsr
= dmac_read_1(scp
, SCA_DSR0
);
1353 dmac_write_1(scp
, SCA_DSR0
, dsr
| SCA_DSR_DEWD
);
1356 * filter out the bits we don't care about
1358 dsr
&= (SCA_DSR_EOM
| SCA_DSR_COF
1359 | SCA_DSR_BOF
| SCA_DSR_EOT
);
1366 if (dsr
& SCA_DSR_EOM
) {
1367 SCA_DPRINTF(SCA_DEBUG_RX
, ("Got a frame!\n"));
1369 sca_get_packets(scp
);
1373 * check for counter overflow
1375 if (dsr
& SCA_DSR_COF
) {
1376 printf("%s: RXDMA counter overflow\n",
1377 scp
->sp_if
.if_xname
);
1379 sca_dmac_rxinit(scp
);
1383 * check for end of transfer, which means we
1384 * ran out of descriptors to receive into.
1385 * This means the line is much faster than
1388 if (dsr
& (SCA_DSR_BOF
| SCA_DSR_EOT
)) {
1389 printf("%s: RXDMA buffer overflow\n",
1390 scp
->sp_if
.if_xname
);
1392 sca_dmac_rxinit(scp
);
1401 sca_msci_intr(sca_port_t
*scp
, u_int8_t isr
)
1405 /* get and clear the specific interrupt -- should act on it :)*/
1406 if ((st1
= msci_read_1(scp
, SCA_ST10
))) {
1407 /* clear the interrupt */
1408 msci_write_1(scp
, SCA_ST10
, st1
);
1410 if (st1
& SCA_ST1_UDRN
) {
1411 /* underrun -- try to increase ready control */
1412 trc0
= msci_read_1(scp
, SCA_TRC00
);
1414 printf("TX: underrun - fifo depth maxed\n");
1416 if ((trc0
+= 2) > 0x1f)
1418 SCA_DPRINTF(SCA_DEBUG_TX
,
1419 ("TX: udrn - incr fifo to %d\n", trc0
));
1420 msci_write_1(scp
, SCA_TRC00
, trc0
);
1428 sca_get_packets(sca_port_t
*scp
)
1430 struct sca_softc
*sc
;
1432 SCA_DPRINTF(SCA_DEBUG_RX
, ("RX: sca_get_packets\n"));
1436 bus_dmamap_sync(sc
->scu_dmat
, sc
->scu_dmam
,
1437 0, sc
->scu_allocsize
,
1438 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1441 * XXX this code is unable to deal with rx stuff
1442 * in more than 1 page
1444 sc
->scu_page_on(sc
);
1445 sc
->scu_set_page(sc
, scp
->sp_rxdesc_p
);
1448 /* process as many frames as are available */
1449 while (sca_frame_avail(scp
)) {
1450 sca_frame_process(scp
);
1451 sca_frame_read_done(scp
);
1455 bus_dmamap_sync(sc
->scu_dmat
, sc
->scu_dmam
,
1456 0, sc
->scu_allocsize
,
1457 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
1459 sc
->scu_page_off(sc
);
1463 * Starting with the first descriptor we wanted to read into, up to but
1464 * not including the current SCA read descriptor, look for a packet.
1466 * must be called at splnet()
1469 sca_frame_avail(sca_port_t
*scp
)
1472 u_int32_t desc_p
; /* physical address (lower 16 bits) */
1475 int cdaidx
, toolong
;
1478 * Read the current descriptor from the SCA.
1480 cda
= dmac_read_2(scp
, SCA_CDAL0
);
1483 * calculate the index of the current descriptor
1485 desc_p
= (scp
->sp_rxdesc_p
& 0xFFFF);
1486 desc_p
= cda
- desc_p
;
1487 cdaidx
= desc_p
/ sizeof(sca_desc_t
);
1489 SCA_DPRINTF(SCA_DEBUG_RX
,
1490 ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n",
1491 cda
, desc_p
, cdaidx
, scp
->sp_nrxdesc
, scp
->sp_rxstart
));
1493 /* note confusion */
1494 if (cdaidx
>= scp
->sp_nrxdesc
)
1495 panic("current descriptor index out of range");
1497 /* see if we have a valid frame available */
1499 for (; scp
->sp_rxstart
!= cdaidx
; sca_frame_read_done(scp
)) {
1501 * We might have a valid descriptor. Set up a pointer
1502 * to the kva address for it so we can more easily examine
1505 desc
= &scp
->sp_rxdesc
[scp
->sp_rxstart
];
1506 rxstat
= sca_desc_read_stat(scp
->sca
, desc
);
1508 SCA_DPRINTF(SCA_DEBUG_RX
, ("port %d RX: idx %d rxstat %x\n",
1509 scp
->sp_port
, scp
->sp_rxstart
, rxstat
));
1511 SCA_DPRINTF(SCA_DEBUG_RX
, ("port %d RX: buflen %d\n",
1512 scp
->sp_port
, sca_desc_read_buflen(scp
->sca
, desc
)));
1517 if (rxstat
& SCA_DESC_ERRORS
) {
1519 * consider an error condition the end
1522 scp
->sp_if
.if_ierrors
++;
1528 * if we aren't skipping overlong frames
1529 * we are done, otherwise reset and look for
1530 * another good frame
1532 if (rxstat
& SCA_DESC_EOM
) {
1536 } else if (!toolong
) {
1538 * we currently don't deal with frames
1539 * larger than a single buffer (fixed MTU)
1541 scp
->sp_if
.if_ierrors
++;
1544 SCA_DPRINTF(SCA_DEBUG_RX
, ("RX: idx %d no EOM\n",
1548 SCA_DPRINTF(SCA_DEBUG_RX
, ("RX: returning none\n"));
1553 * Pass the packet up to the kernel if it is a packet we want to pay
1556 * MUST BE CALLED AT splnet()
1559 sca_frame_process(sca_port_t
*scp
)
1561 struct ifqueue
*ifq
;
1562 struct hdlc_header
*hdlc
;
1563 struct cisco_pkt
*cisco
;
1570 t
= time_uptime
* 1000;
1571 desc
= &scp
->sp_rxdesc
[scp
->sp_rxstart
];
1572 bufp
= scp
->sp_rxbuf
+ SCA_BSIZE
* scp
->sp_rxstart
;
1573 len
= sca_desc_read_buflen(scp
->sca
, desc
);
1575 SCA_DPRINTF(SCA_DEBUG_RX
,
1576 ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t
)desc
,
1577 (bus_addr_t
)bufp
, len
));
1579 #if SCA_DEBUG_LEVEL > 0
1580 if (sca_debug
& SCA_DEBUG_RXPKT
)
1581 sca_frame_print(scp
, desc
, bufp
);
1584 * skip packets that are too short
1586 if (len
< sizeof(struct hdlc_header
)) {
1587 scp
->sp_if
.if_ierrors
++;
1591 m
= sca_mbuf_alloc(scp
->sca
, bufp
, len
);
1593 SCA_DPRINTF(SCA_DEBUG_RX
, ("RX: no mbuf!\n"));
1598 * read and then strip off the HDLC information
1600 m
= m_pullup(m
, sizeof(struct hdlc_header
));
1602 SCA_DPRINTF(SCA_DEBUG_RX
, ("RX: no m_pullup!\n"));
1607 if (scp
->sp_if
.if_bpf
)
1608 bpf_mtap(scp
->sp_if
.if_bpf
, m
);
1611 scp
->sp_if
.if_ipackets
++;
1613 hdlc
= mtod(m
, struct hdlc_header
*);
1614 switch (ntohs(hdlc
->h_proto
)) {
1616 case HDLC_PROTOCOL_IP
:
1617 SCA_DPRINTF(SCA_DEBUG_RX
, ("Received IP packet\n"));
1618 m
->m_pkthdr
.rcvif
= &scp
->sp_if
;
1619 m
->m_pkthdr
.len
-= sizeof(struct hdlc_header
);
1620 m
->m_data
+= sizeof(struct hdlc_header
);
1621 m
->m_len
-= sizeof(struct hdlc_header
);
1623 schednetisr(NETISR_IP
);
1627 case HDLC_PROTOCOL_IPV6
:
1628 SCA_DPRINTF(SCA_DEBUG_RX
, ("Received IP packet\n"));
1629 m
->m_pkthdr
.rcvif
= &scp
->sp_if
;
1630 m
->m_pkthdr
.len
-= sizeof(struct hdlc_header
);
1631 m
->m_data
+= sizeof(struct hdlc_header
);
1632 m
->m_len
-= sizeof(struct hdlc_header
);
1634 schednetisr(NETISR_IPV6
);
1638 case HDLC_PROTOCOL_ISO
:
1639 if (m
->m_pkthdr
.len
< sizeof(struct hdlc_llc_header
))
1641 m
->m_pkthdr
.rcvif
= &scp
->sp_if
;
1642 m
->m_pkthdr
.len
-= sizeof(struct hdlc_llc_header
);
1643 m
->m_data
+= sizeof(struct hdlc_llc_header
);
1644 m
->m_len
-= sizeof(struct hdlc_llc_header
);
1646 schednetisr(NETISR_ISO
);
1649 case CISCO_KEEPALIVE
:
1650 SCA_DPRINTF(SCA_DEBUG_CISCO
,
1651 ("Received CISCO keepalive packet\n"));
1653 if (len
< CISCO_PKT_LEN
) {
1654 SCA_DPRINTF(SCA_DEBUG_CISCO
,
1655 ("short CISCO packet %d, wanted %d\n",
1656 len
, CISCO_PKT_LEN
));
1657 scp
->sp_if
.if_ierrors
++;
1661 m
= m_pullup(m
, sizeof(struct cisco_pkt
));
1663 SCA_DPRINTF(SCA_DEBUG_RX
, ("RX: no m_pullup!\n"));
1667 cisco
= (struct cisco_pkt
*)
1668 (mtod(m
, u_int8_t
*) + HDLC_HDRLEN
);
1669 m
->m_pkthdr
.rcvif
= &scp
->sp_if
;
1671 switch (ntohl(cisco
->type
)) {
1672 case CISCO_ADDR_REQ
:
1673 printf("Got CISCO addr_req, ignoring\n");
1674 scp
->sp_if
.if_ierrors
++;
1677 case CISCO_ADDR_REPLY
:
1678 printf("Got CISCO addr_reply, ignoring\n");
1679 scp
->sp_if
.if_ierrors
++;
1682 case CISCO_KEEPALIVE_REQ
:
1684 SCA_DPRINTF(SCA_DEBUG_CISCO
,
1685 ("Received KA, mseq %d,"
1686 " yseq %d, rel 0x%04x, t0"
1688 ntohl(cisco
->par1
), ntohl(cisco
->par2
),
1689 ntohs(cisco
->rel
), ntohs(cisco
->time0
),
1690 ntohs(cisco
->time1
)));
1692 scp
->cka_lastrx
= ntohl(cisco
->par1
);
1696 * schedule the transmit right here.
1698 cisco
->par2
= cisco
->par1
;
1699 cisco
->par1
= htonl(scp
->cka_lasttx
);
1700 cisco
->time0
= htons((u_int16_t
)(t
>> 16));
1701 cisco
->time1
= htons((u_int16_t
)(t
& 0x0000ffff));
1704 if (IF_QFULL(ifq
)) {
1710 sca_start(&scp
->sp_if
);
1712 /* since start may have reset this fix */
1713 if (!scp
->sca
->sc_usedma
) {
1714 scp
->sca
->scu_set_page(scp
->sca
,
1716 scp
->sca
->scu_page_on(scp
->sca
);
1720 SCA_DPRINTF(SCA_DEBUG_CISCO
,
1721 ("Unknown CISCO keepalive protocol 0x%04x\n",
1722 ntohl(cisco
->type
)));
1724 scp
->sp_if
.if_noproto
++;
1729 SCA_DPRINTF(SCA_DEBUG_RX
,
1730 ("Unknown/unexpected ethertype 0x%04x\n",
1731 ntohs(hdlc
->h_proto
)));
1732 scp
->sp_if
.if_noproto
++;
1736 /* queue the packet */
1737 if (!IF_QFULL(ifq
)) {
1741 scp
->sp_if
.if_iqdrops
++;
1751 #if SCA_DEBUG_LEVEL > 0
1753 * do a hex dump of the packet received into descriptor "desc" with
1757 sca_frame_print(sca_port_t
*scp
, sca_desc_t
*desc
, u_int8_t
*p
)
1760 int nothing_yet
= 1;
1761 struct sca_softc
*sc
;
1765 printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n",
1767 sca_desc_read_chainp(sc
, desc
),
1768 sca_desc_read_bufp(sc
, desc
),
1769 sca_desc_read_stat(sc
, desc
),
1770 (len
= sca_desc_read_buflen(sc
, desc
)));
1772 for (i
= 0 ; i
< len
&& i
< 256; i
++) {
1773 if (nothing_yet
== 1 &&
1775 : bus_space_read_1(sc
->scu_memt
, sc
->scu_memh
,
1776 sca_page_addr(sc
, p
))) == 0) {
1785 : bus_space_read_1(sc
->scu_memt
, sc
->scu_memh
,
1786 sca_page_addr(sc
, p
))));
1796 * adjust things because we have just read the current starting
1799 * must be called at splnet()
1802 sca_frame_read_done(sca_port_t
*scp
)
1806 /* update where our indicies are */
1807 scp
->sp_rxend
= scp
->sp_rxstart
;
1808 scp
->sp_rxstart
= (scp
->sp_rxstart
+ 1) % scp
->sp_nrxdesc
;
1810 /* update the error [end] descriptor */
1811 edesc_p
= (u_int16_t
)scp
->sp_rxdesc_p
+
1812 (sizeof(sca_desc_t
) * scp
->sp_rxend
);
1813 dmac_write_2(scp
, SCA_EDAL0
, edesc_p
);
1817 * set a port to the "up" state
1820 sca_port_up(sca_port_t
*scp
)
1822 struct sca_softc
*sc
= scp
->sca
;
1825 u_int8_t ier0
, ier1
;
1832 msci_write_1(scp
, SCA_CMD0
, SCA_CMD_TXRESET
);
1833 msci_write_1(scp
, SCA_CMD0
, SCA_CMD_RXRESET
);
1838 scp
->sp_if
.if_flags
&= ~IFF_OACTIVE
;
1839 scp
->sp_if
.if_flags
|= IFF_RUNNING
;
1844 sc
->sc_dtr_callback(sc
->sc_aux
, scp
->sp_port
, 1);
1849 msci_write_1(scp
, SCA_CTL0
,
1850 (msci_read_1(scp
, SCA_CTL0
) & ~SCA_CTL_RTS_MASK
)
1851 | SCA_CTL_RTS_HIGH
);
1855 * enable interrupts (no timer IER2)
1857 ier0
= SCA_IER0_MSCI_RXRDY0
| SCA_IER0_MSCI_TXRDY0
1858 | SCA_IER0_MSCI_RXINT0
| SCA_IER0_MSCI_TXINT0
;
1859 ier1
= SCA_IER1_DMAC_RX0A
| SCA_IER1_DMAC_RX0B
1860 | SCA_IER1_DMAC_TX0A
| SCA_IER1_DMAC_TX0B
;
1861 if (scp
->sp_port
== 1) {
1865 sca_write_1(sc
, SCA_IER0
, sca_read_1(sc
, SCA_IER0
) | ier0
);
1866 sca_write_1(sc
, SCA_IER1
, sca_read_1(sc
, SCA_IER1
) | ier1
);
1868 if (scp
->sp_port
== 0) {
1869 sca_write_1(sc
, SCA_IER0
, sca_read_1(sc
, SCA_IER0
) | 0x0f);
1870 sca_write_1(sc
, SCA_IER1
, sca_read_1(sc
, SCA_IER1
) | 0x0f);
1872 sca_write_1(sc
, SCA_IER0
, sca_read_1(sc
, SCA_IER0
) | 0xf0);
1873 sca_write_1(sc
, SCA_IER1
, sca_read_1(sc
, SCA_IER1
) | 0xf0);
1878 * enable transmit and receive
1880 msci_write_1(scp
, SCA_CMD0
, SCA_CMD_TXENABLE
);
1881 msci_write_1(scp
, SCA_CMD0
, SCA_CMD_RXENABLE
);
1884 * reset internal state
1886 scp
->sp_txinuse
= 0;
1889 scp
->cka_lasttx
= now
.tv_usec
;
1890 scp
->cka_lastrx
= 0;
1894 * set a port to the "down" state
1897 sca_port_down(sca_port_t
*scp
)
1899 struct sca_softc
*sc
= scp
->sca
;
1901 u_int8_t ier0
, ier1
;
1907 sc
->sc_dtr_callback(sc
->sc_aux
, scp
->sp_port
, 0);
1912 msci_write_1(scp
, SCA_CTL0
,
1913 (msci_read_1(scp
, SCA_CTL0
) & ~SCA_CTL_RTS_MASK
)
1917 * disable interrupts
1920 ier0
= SCA_IER0_MSCI_RXRDY0
| SCA_IER0_MSCI_TXRDY0
1921 | SCA_IER0_MSCI_RXINT0
| SCA_IER0_MSCI_TXINT0
;
1922 ier1
= SCA_IER1_DMAC_RX0A
| SCA_IER1_DMAC_RX0B
1923 | SCA_IER1_DMAC_TX0A
| SCA_IER1_DMAC_TX0B
;
1924 if (scp
->sp_port
== 1) {
1928 sca_write_1(sc
, SCA_IER0
, sca_read_1(sc
, SCA_IER0
) & ~ier0
);
1929 sca_write_1(sc
, SCA_IER1
, sca_read_1(sc
, SCA_IER1
) & ~ier1
);
1931 if (scp
->sp_port
== 0) {
1932 sca_write_1(sc
, SCA_IER0
, sca_read_1(sc
, SCA_IER0
) & 0xf0);
1933 sca_write_1(sc
, SCA_IER1
, sca_read_1(sc
, SCA_IER1
) & 0xf0);
1935 sca_write_1(sc
, SCA_IER0
, sca_read_1(sc
, SCA_IER0
) & 0x0f);
1936 sca_write_1(sc
, SCA_IER1
, sca_read_1(sc
, SCA_IER1
) & 0x0f);
1941 * disable transmit and receive
1943 msci_write_1(scp
, SCA_CMD0
, SCA_CMD_RXDISABLE
);
1944 msci_write_1(scp
, SCA_CMD0
, SCA_CMD_TXDISABLE
);
1947 * no, we're not in use anymore
1949 scp
->sp_if
.if_flags
&= ~(IFF_OACTIVE
|IFF_RUNNING
);
1953 * disable all DMA and interrupts for all ports at once.
1956 sca_shutdown(struct sca_softc
*sca
)
1959 * disable DMA and interrupts
1961 sca_write_1(sca
, SCA_DMER
, 0);
1962 sca_write_1(sca
, SCA_IER0
, 0);
1963 sca_write_1(sca
, SCA_IER1
, 0);
1967 * If there are packets to transmit, start the transmit DMA logic.
1970 sca_port_starttx(sca_port_t
*scp
)
1972 u_int32_t startdesc_p
, enddesc_p
;
1975 SCA_DPRINTF(SCA_DEBUG_TX
, ("TX: starttx\n"));
1977 if (((scp
->sp_if
.if_flags
& IFF_OACTIVE
) == IFF_OACTIVE
)
1978 || scp
->sp_txinuse
== 0)
1981 SCA_DPRINTF(SCA_DEBUG_TX
, ("TX: setting oactive\n"));
1983 scp
->sp_if
.if_flags
|= IFF_OACTIVE
;
1986 * We have something to do, since we have at least one packet
1987 * waiting, and we are not already marked as active.
1989 enddesc
= (scp
->sp_txcur
+ 1) % scp
->sp_ntxdesc
;
1990 startdesc_p
= scp
->sp_txdesc_p
;
1991 enddesc_p
= scp
->sp_txdesc_p
+ sizeof(sca_desc_t
) * enddesc
;
1993 SCA_DPRINTF(SCA_DEBUG_TX
, ("TX: start %x end %x\n",
1994 startdesc_p
, enddesc_p
));
1996 dmac_write_2(scp
, SCA_EDAL1
, (u_int16_t
)(enddesc_p
& 0x0000ffff));
1997 dmac_write_2(scp
, SCA_CDAL1
,
1998 (u_int16_t
)(startdesc_p
& 0x0000ffff));
2003 dmac_write_1(scp
, SCA_DSR1
, SCA_DSR_DE
);
2007 * allocate an mbuf at least long enough to hold "len" bytes.
2008 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
2009 * otherwise let the caller handle copying the data in.
2011 static struct mbuf
*
2012 sca_mbuf_alloc(struct sca_softc
*sc
, void *p
, u_int len
)
2017 * allocate an mbuf and copy the important bits of data
2018 * into it. If the packet won't fit in the header,
2019 * allocate a cluster for it and store it there.
2021 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
2025 if (len
> MCLBYTES
) {
2029 MCLGET(m
, M_DONTWAIT
);
2030 if ((m
->m_flags
& M_EXT
) == 0) {
2036 /* XXX do we need to sync here? */
2038 memcpy(mtod(m
, void *), p
, len
);
2040 bus_space_read_region_1(sc
->scu_memt
, sc
->scu_memh
,
2041 sca_page_addr(sc
, p
), mtod(m
, u_int8_t
*), len
);
2044 m
->m_pkthdr
.len
= len
;
2050 * get the base clock
2053 sca_get_base_clock(struct sca_softc
*sc
)
2055 struct timeval btv
, ctv
, dtv
;
2060 /* disable the timer, set prescale to 0 */
2061 sca_write_1(sc
, SCA_TCSR0
, 0);
2062 sca_write_1(sc
, SCA_TEPR0
, 0);
2064 /* reset the counter */
2065 (void)sca_read_1(sc
, SCA_TCSR0
);
2066 subcnt
= sca_read_2(sc
, SCA_TCNTL0
);
2069 sca_write_2(sc
, SCA_TCONRL0
, 0xffff);
2073 /* start the timer -- no interrupt enable */
2074 sca_write_1(sc
, SCA_TCSR0
, SCA_TCSR_TME
);
2078 /* end around 3/4 of a second */
2079 timersub(&ctv
, &btv
, &dtv
);
2080 if (dtv
.tv_usec
>= 750000)
2084 while (!(sca_read_1(sc
, SCA_TCSR0
) & SCA_TCSR_CMF
))
2086 /* reset the timer */
2087 (void)sca_read_2(sc
, SCA_TCNTL0
);
2091 /* stop the timer */
2092 sca_write_1(sc
, SCA_TCSR0
, 0);
2094 subcnt
= sca_read_2(sc
, SCA_TCNTL0
);
2095 /* add the slop in and get the total timer ticks */
2096 cnt
= (cnt
<< 16) | subcnt
;
2098 /* cnt is 1/8 the actual time */
2100 /* make it proportional to 3/4 of a second */
2101 bcnt
*= (u_int64_t
)750000;
2102 bcnt
/= (u_int64_t
)dtv
.tv_usec
;
2109 SCA_DPRINTF(SCA_DEBUG_CLOCK
,
2110 ("sca: unadjusted base %lu Hz\n", (u_long
)cnt
));
2113 * round to the nearest 200 -- this allows for +-3 ticks error
2115 sc
->sc_baseclock
= ((cnt
+ 100) / 200) * 200;
2119 * print the information about the clock on the ports
2122 sca_print_clock_info(struct sca_softc
*sc
)
2124 struct sca_port
*scp
;
2128 printf("%s: base clock %d Hz\n", device_xname(sc
->sc_parent
),
2131 /* print the information about the port clock selection */
2132 for (i
= 0; i
< sc
->sc_numports
; i
++) {
2133 scp
= &sc
->sc_ports
[i
];
2134 mhz
= sc
->sc_baseclock
/ (scp
->sp_tmc
? scp
->sp_tmc
: 256);
2135 div
= scp
->sp_rxs
& SCA_RXS_DIV_MASK
;
2137 printf("%s: rx clock: ", scp
->sp_if
.if_xname
);
2138 switch (scp
->sp_rxs
& SCA_RXS_CLK_MASK
) {
2139 case SCA_RXS_CLK_LINE
:
2142 case SCA_RXS_CLK_LINE_SN
:
2143 printf("line with noise suppression");
2145 case SCA_RXS_CLK_INTERNAL
:
2146 printf("internal %d Hz", (mhz
>> div
));
2148 case SCA_RXS_CLK_ADPLL_OUT
:
2149 printf("adpll using internal %d Hz", (mhz
>> div
));
2151 case SCA_RXS_CLK_ADPLL_IN
:
2152 printf("adpll using line clock");
2155 printf(" tx clock: ");
2156 div
= scp
->sp_txs
& SCA_TXS_DIV_MASK
;
2157 switch (scp
->sp_txs
& SCA_TXS_CLK_MASK
) {
2158 case SCA_TXS_CLK_LINE
:
2161 case SCA_TXS_CLK_INTERNAL
:
2162 printf("internal %d Hz\n", (mhz
>> div
));
2164 case SCA_TXS_CLK_RXCLK
:
2165 printf("rxclock\n");
2169 printf("%s: outputting line clock\n",
2170 scp
->sp_if
.if_xname
);