1 /* $NetBSD: if_le_ioasic.c,v 1.32 2009/03/18 17:06:50 cegger Exp $ */
4 * Copyright (c) 1996 Carnegie-Mellon University.
7 * Author: Chris G. Demetriou
9 * Permission to use, copy, modify and distribute this software and
10 * its documentation is hereby granted, provided that both the copyright
11 * notice and this permission notice appear in all copies of the
12 * software, derivative works or modified versions, and any portions
13 * thereof, and that both notices appear in supporting documentation.
15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
19 * Carnegie Mellon requests users of this software to return to
21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
26 * any improvements or extensions that they make and grant Carnegie the
27 * rights to redistribute these changes.
31 * LANCE on DEC IOCTL ASIC.
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: if_le_ioasic.c,v 1.32 2009/03/18 17:06:50 cegger Exp $");
39 #include <sys/param.h>
40 #include <sys/systm.h>
42 #include <sys/syslog.h>
43 #include <sys/socket.h>
44 #include <sys/device.h>
47 #include <net/if_ether.h>
48 #include <net/if_media.h>
51 #include <netinet/in.h>
52 #include <netinet/if_inarp.h>
55 #include <dev/ic/lancereg.h>
56 #include <dev/ic/lancevar.h>
57 #include <dev/ic/am7990reg.h>
58 #include <dev/ic/am7990var.h>
60 #include <dev/tc/if_levar.h>
61 #include <dev/tc/tcvar.h>
62 #include <dev/tc/ioasicreg.h>
63 #include <dev/tc/ioasicvar.h>
65 struct le_ioasic_softc
{
66 struct am7990_softc sc_am7990
; /* glue to MI code */
67 struct lereg1
*sc_r1
; /* LANCE registers */
68 /* XXX must match with le_softc of if_levar.h XXX */
70 bus_dma_tag_t sc_dmat
; /* bus dma tag */
71 bus_dmamap_t sc_dmamap
; /* bus dmamap */
74 static int le_ioasic_match(device_t
, cfdata_t
, void *);
75 static void le_ioasic_attach(device_t
, device_t
, void *);
77 CFATTACH_DECL_NEW(le_ioasic
, sizeof(struct le_softc
),
78 le_ioasic_match
, le_ioasic_attach
, NULL
, NULL
);
80 static void le_ioasic_copytobuf_gap2(struct lance_softc
*, void *, int, int);
81 static void le_ioasic_copyfrombuf_gap2(struct lance_softc
*, void *, int, int);
82 static void le_ioasic_copytobuf_gap16(struct lance_softc
*, void *, int, int);
83 static void le_ioasic_copyfrombuf_gap16(struct lance_softc
*, void *,
85 static void le_ioasic_zerobuf_gap16(struct lance_softc
*, int, int);
88 le_ioasic_match(device_t parent
, cfdata_t cf
, void *aux
)
90 struct ioasicdev_attach_args
*d
= aux
;
92 if (strncmp("PMAD-BA ", d
->iada_modname
, TC_ROM_LLEN
) != 0)
98 /* IOASIC LANCE DMA needs 128KB boundary aligned 128KB chunk */
99 #define LE_IOASIC_MEMSIZE (128 * 1024)
100 #define LE_IOASIC_MEMALIGN (128 * 1024)
103 le_ioasic_attach(device_t parent
, device_t self
, void *aux
)
105 struct le_ioasic_softc
*sc
= device_private(self
);
106 struct ioasicdev_attach_args
*d
= aux
;
107 struct lance_softc
*le
= &sc
->sc_am7990
.lsc
;
108 struct ioasic_softc
*iosc
= device_private(parent
);
109 bus_space_tag_t ioasic_bst
;
110 bus_space_handle_t ioasic_bsh
;
112 bus_dma_segment_t seg
;
119 ioasic_bst
= iosc
->sc_bst
;
120 ioasic_bsh
= iosc
->sc_bsh
;
121 dmat
= sc
->sc_dmat
= iosc
->sc_dmat
;
123 * Allocate a DMA area for the chip.
125 if (bus_dmamem_alloc(dmat
, LE_IOASIC_MEMSIZE
, LE_IOASIC_MEMALIGN
,
126 0, &seg
, 1, &rseg
, BUS_DMA_NOWAIT
)) {
127 aprint_error(": can't allocate DMA area for LANCE\n");
130 if (bus_dmamem_map(dmat
, &seg
, rseg
, LE_IOASIC_MEMSIZE
,
131 &le_iomem
, BUS_DMA_NOWAIT
|BUS_DMA_COHERENT
)) {
132 aprint_error(": can't map DMA area for LANCE\n");
133 bus_dmamem_free(dmat
, &seg
, rseg
);
137 * Create and load the DMA map for the DMA area.
139 if (bus_dmamap_create(dmat
, LE_IOASIC_MEMSIZE
, 1,
140 LE_IOASIC_MEMSIZE
, 0, BUS_DMA_NOWAIT
, &sc
->sc_dmamap
)) {
141 aprint_error(": can't create DMA map\n");
144 if (bus_dmamap_load(dmat
, sc
->sc_dmamap
,
145 le_iomem
, LE_IOASIC_MEMSIZE
, NULL
, BUS_DMA_NOWAIT
)) {
146 aprint_error(": can't load DMA map\n");
150 * Bind 128KB buffer with IOASIC DMA.
152 tca
= IOASIC_DMA_ADDR(sc
->sc_dmamap
->dm_segs
[0].ds_addr
);
153 bus_space_write_4(ioasic_bst
, ioasic_bsh
, IOASIC_LANCE_DMAPTR
, tca
);
154 ssr
= bus_space_read_4(ioasic_bst
, ioasic_bsh
, IOASIC_CSR
);
155 ssr
|= IOASIC_CSR_DMAEN_LANCE
;
156 bus_space_write_4(ioasic_bst
, ioasic_bsh
, IOASIC_CSR
, ssr
);
158 sc
->sc_r1
= (struct lereg1
*)
159 TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d
->iada_addr
));
160 le
->sc_mem
= (void *)TC_PHYS_TO_UNCACHED(le_iomem
);
161 le
->sc_copytodesc
= le_ioasic_copytobuf_gap2
;
162 le
->sc_copyfromdesc
= le_ioasic_copyfrombuf_gap2
;
163 le
->sc_copytobuf
= le_ioasic_copytobuf_gap16
;
164 le
->sc_copyfrombuf
= le_ioasic_copyfrombuf_gap16
;
165 le
->sc_zerobuf
= le_ioasic_zerobuf_gap16
;
167 dec_le_common_attach(&sc
->sc_am7990
,
168 (uint8_t *)iosc
->sc_base
+ IOASIC_SLOT_2_START
);
170 ioasic_intr_establish(parent
, d
->iada_cookie
, TC_IPL_NET
,
175 bus_dmamem_unmap(dmat
, le_iomem
, LE_IOASIC_MEMSIZE
);
176 bus_dmamem_free(dmat
, &seg
, rseg
);
180 * Special memory access functions needed by ioasic-attached LANCE
185 * gap2: two bytes of data followed by two bytes of pad.
187 * Buffers must be 4-byte aligned. The code doesn't worry about
188 * doing an extra byte.
192 le_ioasic_copytobuf_gap2(struct lance_softc
*sc
, void *fromv
, int boff
, int len
)
194 volatile void *buf
= sc
->sc_mem
;
195 uint8_t *from
= fromv
;
196 volatile uint16_t *bptr
;
199 /* handle unaligned first byte */
200 bptr
= ((volatile uint16_t *)buf
) + (boff
- 1);
201 *bptr
= (*from
++ << 8) | (*bptr
& 0xff);
205 bptr
= ((volatile uint16_t *)buf
) + boff
;
207 *bptr
= (from
[1] << 8) | (from
[0] & 0xff);
213 *bptr
= (uint16_t)*from
;
217 le_ioasic_copyfrombuf_gap2(struct lance_softc
*sc
, void *tov
, int boff
, int len
)
219 volatile void *buf
= sc
->sc_mem
;
221 volatile uint16_t *bptr
;
225 /* handle unaligned first byte */
226 bptr
= ((volatile uint16_t *)buf
) + (boff
- 1);
227 *to
++ = (*bptr
>> 8) & 0xff;
231 bptr
= ((volatile uint16_t *)buf
) + boff
;
235 *to
++ = (tmp
>> 8) & 0xff;
244 * gap16: 16 bytes of data followed by 16 bytes of pad.
246 * Buffers must be 32-byte aligned.
250 le_ioasic_copytobuf_gap16(struct lance_softc
*sc
, void *fromv
, int boff
,
253 uint8_t *buf
= sc
->sc_mem
;
254 uint8_t *from
= fromv
;
257 bptr
= buf
+ ((boff
<< 1) & ~0x1f);
261 * Dispose of boff so destination of subsequent copies is
266 xfer
= min(len
, 16 - boff
);
267 memcpy(bptr
+ boff
, from
, xfer
);
273 /* Destination of copies is now 16-byte aligned. */
275 switch ((u_long
)from
& (sizeof(uint32_t) -1)) {
277 /* Ethernet headers make this the dominant case. */
279 uint32_t *dst
= (uint32_t *)bptr
;
281 uint32_t t1
, t2
, t3
, t4
;
283 /* read from odd-16-bit-aligned, cached src */
284 t0
= *(uint16_t *)(from
+ 0);
285 t1
= *(uint32_t *)(from
+ 2);
286 t2
= *(uint32_t *)(from
+ 6);
287 t3
= *(uint32_t *)(from
+ 10);
288 t4
= *(uint16_t *)(from
+ 14);
290 /* DMA buffer is uncached on mips */
291 dst
[0] = t0
| (t1
<< 16);
292 dst
[1] = (t1
>> 16) | (t2
<< 16);
293 dst
[2] = (t2
>> 16) | (t3
<< 16);
294 dst
[3] = (t3
>> 16) | (t4
<< 16);
304 uint32_t *src
= (uint32_t*)from
;
305 uint32_t *dst
= (uint32_t*)bptr
;
306 uint32_t t0
, t1
, t2
, t3
;
308 t0
= src
[0]; t1
= src
[1]; t2
= src
[2]; t3
= src
[3];
309 dst
[0] = t0
; dst
[1] = t1
; dst
[2] = t2
; dst
[3] = t3
;
318 /* Does odd-aligned case ever happen? */
320 memcpy(bptr
, from
, 16);
328 memcpy(bptr
, from
, len
);
332 le_ioasic_copyfrombuf_gap16(struct lance_softc
*sc
, void *tov
, int boff
,
335 uint8_t *buf
= sc
->sc_mem
;
339 bptr
= buf
+ ((boff
<< 1) & ~0x1f);
342 /* Dispose of boff. source of copy is subsequently 16-byte aligned. */
345 xfer
= min(len
, 16 - boff
);
346 memcpy(to
, bptr
+ boff
, xfer
);
352 switch ((u_long
)to
& (sizeof(uint32_t) -1)) {
355 * to is aligned to an odd 16-bit boundary. Ethernet headers
356 * make this the dominant case (98% or more).
359 uint32_t *src
= (uint32_t *)bptr
;
360 uint32_t t0
, t1
, t2
, t3
;
362 /* read from uncached aligned DMA buf */
363 t0
= src
[0]; t1
= src
[1]; t2
= src
[2]; t3
= src
[3];
365 /* write to odd-16-bit-word aligned dst */
366 *(uint16_t *)(to
+ 0) = (uint16_t)t0
;
367 *(uint32_t *)(to
+ 2) = (t0
>> 16) | (t1
<< 16);
368 *(uint32_t *)(to
+ 6) = (t1
>> 16) | (t2
<< 16);
369 *(uint32_t *)(to
+ 10) = (t2
>> 16) | (t3
<< 16);
370 *(uint16_t *)(to
+ 14) = (t3
>> 16);
377 /* 32-bit aligned aligned copy. Rare. */
379 uint32_t *src
= (uint32_t *)bptr
;
380 uint32_t *dst
= (uint32_t *)to
;
381 uint32_t t0
, t1
, t2
, t3
;
383 t0
= src
[0]; t1
= src
[1]; t2
= src
[2]; t3
= src
[3];
384 dst
[0] = t0
; dst
[1] = t1
; dst
[2] = t2
; dst
[3] = t3
;
391 /* XXX Does odd-byte-aligned case ever happen? */
394 memcpy(to
, bptr
, 16);
402 memcpy(to
, bptr
, len
);
406 le_ioasic_zerobuf_gap16(struct lance_softc
*sc
, int boff
, int len
)
408 uint8_t *buf
= sc
->sc_mem
;
412 bptr
= buf
+ ((boff
<< 1) & ~0x1f);
414 xfer
= min(len
, 16 - boff
);
416 memset(bptr
+ boff
, 0, xfer
);