Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / dev / tc / if_le_ioasic.c
blobd0301d4d8fd9b9537e5b2ba5412a5ebcaae1c3dd
1 /* $NetBSD: if_le_ioasic.c,v 1.32 2009/03/18 17:06:50 cegger Exp $ */
3 /*
4 * Copyright (c) 1996 Carnegie-Mellon University.
5 * All rights reserved.
7 * Author: Chris G. Demetriou
9 * Permission to use, copy, modify and distribute this software and
10 * its documentation is hereby granted, provided that both the copyright
11 * notice and this permission notice appear in all copies of the
12 * software, derivative works or modified versions, and any portions
13 * thereof, and that both notices appear in supporting documentation.
15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
19 * Carnegie Mellon requests users of this software to return to
21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
26 * any improvements or extensions that they make and grant Carnegie the
27 * rights to redistribute these changes.
31 * LANCE on DEC IOCTL ASIC.
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: if_le_ioasic.c,v 1.32 2009/03/18 17:06:50 cegger Exp $");
37 #include "opt_inet.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/mbuf.h>
42 #include <sys/syslog.h>
43 #include <sys/socket.h>
44 #include <sys/device.h>
46 #include <net/if.h>
47 #include <net/if_ether.h>
48 #include <net/if_media.h>
50 #ifdef INET
51 #include <netinet/in.h>
52 #include <netinet/if_inarp.h>
53 #endif
55 #include <dev/ic/lancereg.h>
56 #include <dev/ic/lancevar.h>
57 #include <dev/ic/am7990reg.h>
58 #include <dev/ic/am7990var.h>
60 #include <dev/tc/if_levar.h>
61 #include <dev/tc/tcvar.h>
62 #include <dev/tc/ioasicreg.h>
63 #include <dev/tc/ioasicvar.h>
65 struct le_ioasic_softc {
66 struct am7990_softc sc_am7990; /* glue to MI code */
67 struct lereg1 *sc_r1; /* LANCE registers */
68 /* XXX must match with le_softc of if_levar.h XXX */
70 bus_dma_tag_t sc_dmat; /* bus dma tag */
71 bus_dmamap_t sc_dmamap; /* bus dmamap */
74 static int le_ioasic_match(device_t, cfdata_t, void *);
75 static void le_ioasic_attach(device_t, device_t, void *);
77 CFATTACH_DECL_NEW(le_ioasic, sizeof(struct le_softc),
78 le_ioasic_match, le_ioasic_attach, NULL, NULL);
80 static void le_ioasic_copytobuf_gap2(struct lance_softc *, void *, int, int);
81 static void le_ioasic_copyfrombuf_gap2(struct lance_softc *, void *, int, int);
82 static void le_ioasic_copytobuf_gap16(struct lance_softc *, void *, int, int);
83 static void le_ioasic_copyfrombuf_gap16(struct lance_softc *, void *,
84 int, int);
85 static void le_ioasic_zerobuf_gap16(struct lance_softc *, int, int);
87 static int
88 le_ioasic_match(device_t parent, cfdata_t cf, void *aux)
90 struct ioasicdev_attach_args *d = aux;
92 if (strncmp("PMAD-BA ", d->iada_modname, TC_ROM_LLEN) != 0)
93 return 0;
95 return 1;
98 /* IOASIC LANCE DMA needs 128KB boundary aligned 128KB chunk */
99 #define LE_IOASIC_MEMSIZE (128 * 1024)
100 #define LE_IOASIC_MEMALIGN (128 * 1024)
102 static void
103 le_ioasic_attach(device_t parent, device_t self, void *aux)
105 struct le_ioasic_softc *sc = device_private(self);
106 struct ioasicdev_attach_args *d = aux;
107 struct lance_softc *le = &sc->sc_am7990.lsc;
108 struct ioasic_softc *iosc = device_private(parent);
109 bus_space_tag_t ioasic_bst;
110 bus_space_handle_t ioasic_bsh;
111 bus_dma_tag_t dmat;
112 bus_dma_segment_t seg;
113 tc_addr_t tca;
114 uint32_t ssr;
115 int rseg;
116 void *le_iomem;
118 le->sc_dev = self;
119 ioasic_bst = iosc->sc_bst;
120 ioasic_bsh = iosc->sc_bsh;
121 dmat = sc->sc_dmat = iosc->sc_dmat;
123 * Allocate a DMA area for the chip.
125 if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN,
126 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
127 aprint_error(": can't allocate DMA area for LANCE\n");
128 return;
130 if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE,
131 &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
132 aprint_error(": can't map DMA area for LANCE\n");
133 bus_dmamem_free(dmat, &seg, rseg);
134 return;
137 * Create and load the DMA map for the DMA area.
139 if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1,
140 LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
141 aprint_error(": can't create DMA map\n");
142 goto bad;
144 if (bus_dmamap_load(dmat, sc->sc_dmamap,
145 le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) {
146 aprint_error(": can't load DMA map\n");
147 goto bad;
150 * Bind 128KB buffer with IOASIC DMA.
152 tca = IOASIC_DMA_ADDR(sc->sc_dmamap->dm_segs[0].ds_addr);
153 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_LANCE_DMAPTR, tca);
154 ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR);
155 ssr |= IOASIC_CSR_DMAEN_LANCE;
156 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_CSR, ssr);
158 sc->sc_r1 = (struct lereg1 *)
159 TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr));
160 le->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem);
161 le->sc_copytodesc = le_ioasic_copytobuf_gap2;
162 le->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2;
163 le->sc_copytobuf = le_ioasic_copytobuf_gap16;
164 le->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16;
165 le->sc_zerobuf = le_ioasic_zerobuf_gap16;
167 dec_le_common_attach(&sc->sc_am7990,
168 (uint8_t *)iosc->sc_base + IOASIC_SLOT_2_START);
170 ioasic_intr_establish(parent, d->iada_cookie, TC_IPL_NET,
171 am7990_intr, sc);
172 return;
174 bad:
175 bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE);
176 bus_dmamem_free(dmat, &seg, rseg);
180 * Special memory access functions needed by ioasic-attached LANCE
181 * chips.
185 * gap2: two bytes of data followed by two bytes of pad.
187 * Buffers must be 4-byte aligned. The code doesn't worry about
188 * doing an extra byte.
191 void
192 le_ioasic_copytobuf_gap2(struct lance_softc *sc, void *fromv, int boff, int len)
194 volatile void *buf = sc->sc_mem;
195 uint8_t *from = fromv;
196 volatile uint16_t *bptr;
198 if (boff & 0x1) {
199 /* handle unaligned first byte */
200 bptr = ((volatile uint16_t *)buf) + (boff - 1);
201 *bptr = (*from++ << 8) | (*bptr & 0xff);
202 bptr += 2;
203 len--;
204 } else
205 bptr = ((volatile uint16_t *)buf) + boff;
206 while (len > 1) {
207 *bptr = (from[1] << 8) | (from[0] & 0xff);
208 bptr += 2;
209 from += 2;
210 len -= 2;
212 if (len == 1)
213 *bptr = (uint16_t)*from;
216 void
217 le_ioasic_copyfrombuf_gap2(struct lance_softc *sc, void *tov, int boff, int len)
219 volatile void *buf = sc->sc_mem;
220 uint8_t *to = tov;
221 volatile uint16_t *bptr;
222 uint16_t tmp;
224 if (boff & 0x1) {
225 /* handle unaligned first byte */
226 bptr = ((volatile uint16_t *)buf) + (boff - 1);
227 *to++ = (*bptr >> 8) & 0xff;
228 bptr += 2;
229 len--;
230 } else
231 bptr = ((volatile uint16_t *)buf) + boff;
232 while (len > 1) {
233 tmp = *bptr;
234 *to++ = tmp & 0xff;
235 *to++ = (tmp >> 8) & 0xff;
236 bptr += 2;
237 len -= 2;
239 if (len == 1)
240 *to = *bptr & 0xff;
244 * gap16: 16 bytes of data followed by 16 bytes of pad.
246 * Buffers must be 32-byte aligned.
249 void
250 le_ioasic_copytobuf_gap16(struct lance_softc *sc, void *fromv, int boff,
251 int len)
253 uint8_t *buf = sc->sc_mem;
254 uint8_t *from = fromv;
255 uint8_t *bptr;
257 bptr = buf + ((boff << 1) & ~0x1f);
258 boff &= 0xf;
261 * Dispose of boff so destination of subsequent copies is
262 * 16-byte aligned.
264 if (boff) {
265 int xfer;
266 xfer = min(len, 16 - boff);
267 memcpy(bptr + boff, from, xfer);
268 from += xfer;
269 bptr += 32;
270 len -= xfer;
273 /* Destination of copies is now 16-byte aligned. */
274 if (len >= 16)
275 switch ((u_long)from & (sizeof(uint32_t) -1)) {
276 case 2:
277 /* Ethernet headers make this the dominant case. */
278 do {
279 uint32_t *dst = (uint32_t *)bptr;
280 uint16_t t0;
281 uint32_t t1, t2, t3, t4;
283 /* read from odd-16-bit-aligned, cached src */
284 t0 = *(uint16_t *)(from + 0);
285 t1 = *(uint32_t *)(from + 2);
286 t2 = *(uint32_t *)(from + 6);
287 t3 = *(uint32_t *)(from + 10);
288 t4 = *(uint16_t *)(from + 14);
290 /* DMA buffer is uncached on mips */
291 dst[0] = t0 | (t1 << 16);
292 dst[1] = (t1 >> 16) | (t2 << 16);
293 dst[2] = (t2 >> 16) | (t3 << 16);
294 dst[3] = (t3 >> 16) | (t4 << 16);
296 from += 16;
297 bptr += 32;
298 len -= 16;
299 } while (len >= 16);
300 break;
302 case 0:
303 do {
304 uint32_t *src = (uint32_t*)from;
305 uint32_t *dst = (uint32_t*)bptr;
306 uint32_t t0, t1, t2, t3;
308 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
309 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
311 from += 16;
312 bptr += 32;
313 len -= 16;
314 } while (len >= 16);
315 break;
317 default:
318 /* Does odd-aligned case ever happen? */
319 do {
320 memcpy(bptr, from, 16);
321 from += 16;
322 bptr += 32;
323 len -= 16;
324 } while (len >= 16);
325 break;
327 if (len)
328 memcpy(bptr, from, len);
331 void
332 le_ioasic_copyfrombuf_gap16(struct lance_softc *sc, void *tov, int boff,
333 int len)
335 uint8_t *buf = sc->sc_mem;
336 uint8_t *to = tov;
337 uint8_t *bptr;
339 bptr = buf + ((boff << 1) & ~0x1f);
340 boff &= 0xf;
342 /* Dispose of boff. source of copy is subsequently 16-byte aligned. */
343 if (boff) {
344 int xfer;
345 xfer = min(len, 16 - boff);
346 memcpy(to, bptr + boff, xfer);
347 to += xfer;
348 bptr += 32;
349 len -= xfer;
351 if (len >= 16)
352 switch ((u_long)to & (sizeof(uint32_t) -1)) {
353 case 2:
355 * to is aligned to an odd 16-bit boundary. Ethernet headers
356 * make this the dominant case (98% or more).
358 do {
359 uint32_t *src = (uint32_t *)bptr;
360 uint32_t t0, t1, t2, t3;
362 /* read from uncached aligned DMA buf */
363 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
365 /* write to odd-16-bit-word aligned dst */
366 *(uint16_t *)(to + 0) = (uint16_t)t0;
367 *(uint32_t *)(to + 2) = (t0 >> 16) | (t1 << 16);
368 *(uint32_t *)(to + 6) = (t1 >> 16) | (t2 << 16);
369 *(uint32_t *)(to + 10) = (t2 >> 16) | (t3 << 16);
370 *(uint16_t *)(to + 14) = (t3 >> 16);
371 bptr += 32;
372 to += 16;
373 len -= 16;
374 } while (len > 16);
375 break;
376 case 0:
377 /* 32-bit aligned aligned copy. Rare. */
378 do {
379 uint32_t *src = (uint32_t *)bptr;
380 uint32_t *dst = (uint32_t *)to;
381 uint32_t t0, t1, t2, t3;
383 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
384 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
385 to += 16;
386 bptr += 32;
387 len -= 16;
388 } while (len > 16);
389 break;
391 /* XXX Does odd-byte-aligned case ever happen? */
392 default:
393 do {
394 memcpy(to, bptr, 16);
395 to += 16;
396 bptr += 32;
397 len -= 16;
398 } while (len > 16);
399 break;
401 if (len)
402 memcpy(to, bptr, len);
405 void
406 le_ioasic_zerobuf_gap16(struct lance_softc *sc, int boff, int len)
408 uint8_t *buf = sc->sc_mem;
409 uint8_t *bptr;
410 int xfer;
412 bptr = buf + ((boff << 1) & ~0x1f);
413 boff &= 0xf;
414 xfer = min(len, 16 - boff);
415 while (len > 0) {
416 memset(bptr + boff, 0, xfer);
417 bptr += 32;
418 boff = 0;
419 len -= xfer;
420 xfer = min(len, 16);