[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / arch / sparc64 / kernel / pci_iommu.c
blob292983413ae2af3616f57b13f47e485212753251
1 /* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $
2 * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
6 */
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/mm.h>
12 #include <asm/pbm.h>
14 #include "iommu_common.h"
16 #define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
17 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
19 /* Accessing IOMMU and Streaming Buffer registers.
20 * REG parameter is a physical address. All registers
21 * are 64-bits in size.
23 #define pci_iommu_read(__reg) \
24 ({ u64 __ret; \
25 __asm__ __volatile__("ldxa [%1] %2, %0" \
26 : "=r" (__ret) \
27 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
28 : "memory"); \
29 __ret; \
31 #define pci_iommu_write(__reg, __val) \
32 __asm__ __volatile__("stxa %0, [%1] %2" \
33 : /* no outputs */ \
34 : "r" (__val), "r" (__reg), \
35 "i" (ASI_PHYS_BYPASS_EC_E))
37 /* Must be invoked under the IOMMU lock. */
38 static void __iommu_flushall(struct pci_iommu *iommu)
40 unsigned long tag;
41 int entry;
43 tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
44 for (entry = 0; entry < 16; entry++) {
45 pci_iommu_write(tag, 0);
46 tag += 8;
49 /* Ensure completion of previous PIO writes. */
50 (void) pci_iommu_read(iommu->write_complete_reg);
52 /* Now update everyone's flush point. */
53 for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
54 iommu->alloc_info[entry].flush =
55 iommu->alloc_info[entry].next;
59 #define IOPTE_CONSISTENT(CTX) \
60 (IOPTE_VALID | IOPTE_CACHE | \
61 (((CTX) << 47) & IOPTE_CONTEXT))
63 #define IOPTE_STREAMING(CTX) \
64 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
66 /* Existing mappings are never marked invalid, instead they
67 * are pointed to a dummy page.
69 #define IOPTE_IS_DUMMY(iommu, iopte) \
70 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
72 static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
74 unsigned long val = iopte_val(*iopte);
76 val &= ~IOPTE_PAGE;
77 val |= iommu->dummy_page_pa;
79 iopte_val(*iopte) = val;
82 void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize)
84 int i;
86 tsbsize /= sizeof(iopte_t);
88 for (i = 0; i < tsbsize; i++)
89 iopte_make_dummy(iommu, &iommu->page_table[i]);
92 static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages)
94 iopte_t *iopte, *limit, *first;
95 unsigned long cnum, ent, flush_point;
97 cnum = 0;
98 while ((1UL << cnum) < npages)
99 cnum++;
100 iopte = (iommu->page_table +
101 (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
103 if (cnum == 0)
104 limit = (iommu->page_table +
105 iommu->lowest_consistent_map);
106 else
107 limit = (iopte +
108 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
110 iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
111 flush_point = iommu->alloc_info[cnum].flush;
113 first = iopte;
114 for (;;) {
115 if (IOPTE_IS_DUMMY(iommu, iopte)) {
116 if ((iopte + (1 << cnum)) >= limit)
117 ent = 0;
118 else
119 ent = ent + 1;
120 iommu->alloc_info[cnum].next = ent;
121 if (ent == flush_point)
122 __iommu_flushall(iommu);
123 break;
125 iopte += (1 << cnum);
126 ent++;
127 if (iopte >= limit) {
128 iopte = (iommu->page_table +
129 (cnum <<
130 (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
131 ent = 0;
133 if (ent == flush_point)
134 __iommu_flushall(iommu);
135 if (iopte == first)
136 goto bad;
139 /* I've got your streaming cluster right here buddy boy... */
140 return iopte;
142 bad:
143 printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n",
144 npages);
145 return NULL;
148 static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base,
149 unsigned long npages, unsigned long ctx)
151 unsigned long cnum, ent;
153 cnum = 0;
154 while ((1UL << cnum) < npages)
155 cnum++;
157 ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits))
158 >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits);
160 /* If the global flush might not have caught this entry,
161 * adjust the flush point such that we will flush before
162 * ever trying to reuse it.
164 #define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
165 if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
166 iommu->alloc_info[cnum].flush = ent;
167 #undef between
170 /* We allocate consistent mappings from the end of cluster zero. */
171 static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
173 iopte_t *iopte;
175 iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS));
176 while (iopte > iommu->page_table) {
177 iopte--;
178 if (IOPTE_IS_DUMMY(iommu, iopte)) {
179 unsigned long tmp = npages;
181 while (--tmp) {
182 iopte--;
183 if (!IOPTE_IS_DUMMY(iommu, iopte))
184 break;
186 if (tmp == 0) {
187 u32 entry = (iopte - iommu->page_table);
189 if (entry < iommu->lowest_consistent_map)
190 iommu->lowest_consistent_map = entry;
191 return iopte;
195 return NULL;
198 /* Allocate and map kernel buffer of size SIZE using consistent mode
199 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
200 * successful and set *DMA_ADDRP to the PCI side dma address.
202 void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
204 struct pcidev_cookie *pcp;
205 struct pci_iommu *iommu;
206 iopte_t *iopte;
207 unsigned long flags, order, first_page, ctx;
208 void *ret;
209 int npages;
211 size = IO_PAGE_ALIGN(size);
212 order = get_order(size);
213 if (order >= 10)
214 return NULL;
216 first_page = __get_free_pages(GFP_ATOMIC, order);
217 if (first_page == 0UL)
218 return NULL;
219 memset((char *)first_page, 0, PAGE_SIZE << order);
221 pcp = pdev->sysdata;
222 iommu = pcp->pbm->iommu;
224 spin_lock_irqsave(&iommu->lock, flags);
225 iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
226 if (iopte == NULL) {
227 spin_unlock_irqrestore(&iommu->lock, flags);
228 free_pages(first_page, order);
229 return NULL;
232 *dma_addrp = (iommu->page_table_map_base +
233 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
234 ret = (void *) first_page;
235 npages = size >> IO_PAGE_SHIFT;
236 ctx = 0;
237 if (iommu->iommu_ctxflush)
238 ctx = iommu->iommu_cur_ctx++;
239 first_page = __pa(first_page);
240 while (npages--) {
241 iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
242 IOPTE_WRITE |
243 (first_page & IOPTE_PAGE));
244 iopte++;
245 first_page += IO_PAGE_SIZE;
249 int i;
250 u32 daddr = *dma_addrp;
252 npages = size >> IO_PAGE_SHIFT;
253 for (i = 0; i < npages; i++) {
254 pci_iommu_write(iommu->iommu_flush, daddr);
255 daddr += IO_PAGE_SIZE;
259 spin_unlock_irqrestore(&iommu->lock, flags);
261 return ret;
264 /* Free and unmap a consistent DMA translation. */
265 void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
267 struct pcidev_cookie *pcp;
268 struct pci_iommu *iommu;
269 iopte_t *iopte;
270 unsigned long flags, order, npages, i, ctx;
272 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
273 pcp = pdev->sysdata;
274 iommu = pcp->pbm->iommu;
275 iopte = iommu->page_table +
276 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
278 spin_lock_irqsave(&iommu->lock, flags);
280 if ((iopte - iommu->page_table) ==
281 iommu->lowest_consistent_map) {
282 iopte_t *walk = iopte + npages;
283 iopte_t *limit;
285 limit = (iommu->page_table +
286 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
287 while (walk < limit) {
288 if (!IOPTE_IS_DUMMY(iommu, walk))
289 break;
290 walk++;
292 iommu->lowest_consistent_map =
293 (walk - iommu->page_table);
296 /* Data for consistent mappings cannot enter the streaming
297 * buffers, so we only need to update the TSB. We flush
298 * the IOMMU here as well to prevent conflicts with the
299 * streaming mapping deferred tlb flush scheme.
302 ctx = 0;
303 if (iommu->iommu_ctxflush)
304 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
306 for (i = 0; i < npages; i++, iopte++)
307 iopte_make_dummy(iommu, iopte);
309 if (iommu->iommu_ctxflush) {
310 pci_iommu_write(iommu->iommu_ctxflush, ctx);
311 } else {
312 for (i = 0; i < npages; i++) {
313 u32 daddr = dvma + (i << IO_PAGE_SHIFT);
315 pci_iommu_write(iommu->iommu_flush, daddr);
319 spin_unlock_irqrestore(&iommu->lock, flags);
321 order = get_order(size);
322 if (order < 10)
323 free_pages((unsigned long)cpu, order);
326 /* Map a single buffer at PTR of SZ bytes for PCI DMA
327 * in streaming mode.
329 dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
331 struct pcidev_cookie *pcp;
332 struct pci_iommu *iommu;
333 struct pci_strbuf *strbuf;
334 iopte_t *base;
335 unsigned long flags, npages, oaddr;
336 unsigned long i, base_paddr, ctx;
337 u32 bus_addr, ret;
338 unsigned long iopte_protection;
340 pcp = pdev->sysdata;
341 iommu = pcp->pbm->iommu;
342 strbuf = &pcp->pbm->stc;
344 if (direction == PCI_DMA_NONE)
345 BUG();
347 oaddr = (unsigned long)ptr;
348 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
349 npages >>= IO_PAGE_SHIFT;
351 spin_lock_irqsave(&iommu->lock, flags);
353 base = alloc_streaming_cluster(iommu, npages);
354 if (base == NULL)
355 goto bad;
356 bus_addr = (iommu->page_table_map_base +
357 ((base - iommu->page_table) << IO_PAGE_SHIFT));
358 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
359 base_paddr = __pa(oaddr & IO_PAGE_MASK);
360 ctx = 0;
361 if (iommu->iommu_ctxflush)
362 ctx = iommu->iommu_cur_ctx++;
363 if (strbuf->strbuf_enabled)
364 iopte_protection = IOPTE_STREAMING(ctx);
365 else
366 iopte_protection = IOPTE_CONSISTENT(ctx);
367 if (direction != PCI_DMA_TODEVICE)
368 iopte_protection |= IOPTE_WRITE;
370 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
371 iopte_val(*base) = iopte_protection | base_paddr;
373 spin_unlock_irqrestore(&iommu->lock, flags);
375 return ret;
377 bad:
378 spin_unlock_irqrestore(&iommu->lock, flags);
379 return PCI_DMA_ERROR_CODE;
382 /* Unmap a single streaming mode DMA translation. */
383 void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
385 struct pcidev_cookie *pcp;
386 struct pci_iommu *iommu;
387 struct pci_strbuf *strbuf;
388 iopte_t *base;
389 unsigned long flags, npages, i, ctx;
391 if (direction == PCI_DMA_NONE)
392 BUG();
394 pcp = pdev->sysdata;
395 iommu = pcp->pbm->iommu;
396 strbuf = &pcp->pbm->stc;
398 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
399 npages >>= IO_PAGE_SHIFT;
400 base = iommu->page_table +
401 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
402 #ifdef DEBUG_PCI_IOMMU
403 if (IOPTE_IS_DUMMY(iommu, base))
404 printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
405 bus_addr, sz, __builtin_return_address(0));
406 #endif
407 bus_addr &= IO_PAGE_MASK;
409 spin_lock_irqsave(&iommu->lock, flags);
411 /* Record the context, if any. */
412 ctx = 0;
413 if (iommu->iommu_ctxflush)
414 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
416 /* Step 1: Kick data out of streaming buffers if necessary. */
417 if (strbuf->strbuf_enabled) {
418 u32 vaddr = bus_addr;
420 PCI_STC_FLUSHFLAG_INIT(strbuf);
421 if (strbuf->strbuf_ctxflush &&
422 iommu->iommu_ctxflush) {
423 unsigned long matchreg, flushreg;
425 flushreg = strbuf->strbuf_ctxflush;
426 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
427 do {
428 pci_iommu_write(flushreg, ctx);
429 } while(((long)pci_iommu_read(matchreg)) < 0L);
430 } else {
431 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
432 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
435 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
436 (void) pci_iommu_read(iommu->write_complete_reg);
437 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
438 membar("#LoadLoad");
441 /* Step 2: Clear out first TSB entry. */
442 iopte_make_dummy(iommu, base);
444 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
445 npages, ctx);
447 spin_unlock_irqrestore(&iommu->lock, flags);
450 #define SG_ENT_PHYS_ADDRESS(SG) \
451 (__pa(page_address((SG)->page)) + (SG)->offset)
453 static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
454 int nused, int nelems, unsigned long iopte_protection)
456 struct scatterlist *dma_sg = sg;
457 struct scatterlist *sg_end = sg + nelems;
458 int i;
460 for (i = 0; i < nused; i++) {
461 unsigned long pteval = ~0UL;
462 u32 dma_npages;
464 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
465 dma_sg->dma_length +
466 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
467 do {
468 unsigned long offset;
469 signed int len;
471 /* If we are here, we know we have at least one
472 * more page to map. So walk forward until we
473 * hit a page crossing, and begin creating new
474 * mappings from that spot.
476 for (;;) {
477 unsigned long tmp;
479 tmp = SG_ENT_PHYS_ADDRESS(sg);
480 len = sg->length;
481 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
482 pteval = tmp & IO_PAGE_MASK;
483 offset = tmp & (IO_PAGE_SIZE - 1UL);
484 break;
486 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
487 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
488 offset = 0UL;
489 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
490 break;
492 sg++;
495 pteval = iopte_protection | (pteval & IOPTE_PAGE);
496 while (len > 0) {
497 *iopte++ = __iopte(pteval);
498 pteval += IO_PAGE_SIZE;
499 len -= (IO_PAGE_SIZE - offset);
500 offset = 0;
501 dma_npages--;
504 pteval = (pteval & IOPTE_PAGE) + len;
505 sg++;
507 /* Skip over any tail mappings we've fully mapped,
508 * adjusting pteval along the way. Stop when we
509 * detect a page crossing event.
511 while (sg < sg_end &&
512 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
513 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
514 ((pteval ^
515 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
516 pteval += sg->length;
517 sg++;
519 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
520 pteval = ~0UL;
521 } while (dma_npages != 0);
522 dma_sg++;
526 /* Map a set of buffers described by SGLIST with NELEMS array
527 * elements in streaming mode for PCI DMA.
528 * When making changes here, inspect the assembly output. I was having
529 * hard time to kepp this routine out of using stack slots for holding variables.
531 int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
533 struct pcidev_cookie *pcp;
534 struct pci_iommu *iommu;
535 struct pci_strbuf *strbuf;
536 unsigned long flags, ctx, npages, iopte_protection;
537 iopte_t *base;
538 u32 dma_base;
539 struct scatterlist *sgtmp;
540 int used;
542 /* Fast path single entry scatterlists. */
543 if (nelems == 1) {
544 sglist->dma_address =
545 pci_map_single(pdev,
546 (page_address(sglist->page) + sglist->offset),
547 sglist->length, direction);
548 sglist->dma_length = sglist->length;
549 return 1;
552 pcp = pdev->sysdata;
553 iommu = pcp->pbm->iommu;
554 strbuf = &pcp->pbm->stc;
556 if (direction == PCI_DMA_NONE)
557 BUG();
559 /* Step 1: Prepare scatter list. */
561 npages = prepare_sg(sglist, nelems);
563 /* Step 2: Allocate a cluster. */
565 spin_lock_irqsave(&iommu->lock, flags);
567 base = alloc_streaming_cluster(iommu, npages);
568 if (base == NULL)
569 goto bad;
570 dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT);
572 /* Step 3: Normalize DMA addresses. */
573 used = nelems;
575 sgtmp = sglist;
576 while (used && sgtmp->dma_length) {
577 sgtmp->dma_address += dma_base;
578 sgtmp++;
579 used--;
581 used = nelems - used;
583 /* Step 4: Choose a context if necessary. */
584 ctx = 0;
585 if (iommu->iommu_ctxflush)
586 ctx = iommu->iommu_cur_ctx++;
588 /* Step 5: Create the mappings. */
589 if (strbuf->strbuf_enabled)
590 iopte_protection = IOPTE_STREAMING(ctx);
591 else
592 iopte_protection = IOPTE_CONSISTENT(ctx);
593 if (direction != PCI_DMA_TODEVICE)
594 iopte_protection |= IOPTE_WRITE;
595 fill_sg (base, sglist, used, nelems, iopte_protection);
596 #ifdef VERIFY_SG
597 verify_sglist(sglist, nelems, base, npages);
598 #endif
600 spin_unlock_irqrestore(&iommu->lock, flags);
602 return used;
604 bad:
605 spin_unlock_irqrestore(&iommu->lock, flags);
606 return PCI_DMA_ERROR_CODE;
609 /* Unmap a set of streaming mode DMA translations. */
610 void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
612 struct pcidev_cookie *pcp;
613 struct pci_iommu *iommu;
614 struct pci_strbuf *strbuf;
615 iopte_t *base;
616 unsigned long flags, ctx, i, npages;
617 u32 bus_addr;
619 if (direction == PCI_DMA_NONE)
620 BUG();
622 pcp = pdev->sysdata;
623 iommu = pcp->pbm->iommu;
624 strbuf = &pcp->pbm->stc;
626 bus_addr = sglist->dma_address & IO_PAGE_MASK;
628 for (i = 1; i < nelems; i++)
629 if (sglist[i].dma_length == 0)
630 break;
631 i--;
632 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
634 base = iommu->page_table +
635 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
637 #ifdef DEBUG_PCI_IOMMU
638 if (IOPTE_IS_DUMMY(iommu, base))
639 printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));
640 #endif
642 spin_lock_irqsave(&iommu->lock, flags);
644 /* Record the context, if any. */
645 ctx = 0;
646 if (iommu->iommu_ctxflush)
647 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
649 /* Step 1: Kick data out of streaming buffers if necessary. */
650 if (strbuf->strbuf_enabled) {
651 u32 vaddr = (u32) bus_addr;
653 PCI_STC_FLUSHFLAG_INIT(strbuf);
654 if (strbuf->strbuf_ctxflush &&
655 iommu->iommu_ctxflush) {
656 unsigned long matchreg, flushreg;
658 flushreg = strbuf->strbuf_ctxflush;
659 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
660 do {
661 pci_iommu_write(flushreg, ctx);
662 } while(((long)pci_iommu_read(matchreg)) < 0L);
663 } else {
664 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
665 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
668 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
669 (void) pci_iommu_read(iommu->write_complete_reg);
670 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
671 membar("#LoadLoad");
674 /* Step 2: Clear out first TSB entry. */
675 iopte_make_dummy(iommu, base);
677 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
678 npages, ctx);
680 spin_unlock_irqrestore(&iommu->lock, flags);
683 /* Make physical memory consistent for a single
684 * streaming mode DMA translation after a transfer.
686 void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
688 struct pcidev_cookie *pcp;
689 struct pci_iommu *iommu;
690 struct pci_strbuf *strbuf;
691 unsigned long flags, ctx, npages;
693 pcp = pdev->sysdata;
694 iommu = pcp->pbm->iommu;
695 strbuf = &pcp->pbm->stc;
697 if (!strbuf->strbuf_enabled)
698 return;
700 spin_lock_irqsave(&iommu->lock, flags);
702 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
703 npages >>= IO_PAGE_SHIFT;
704 bus_addr &= IO_PAGE_MASK;
706 /* Step 1: Record the context, if any. */
707 ctx = 0;
708 if (iommu->iommu_ctxflush &&
709 strbuf->strbuf_ctxflush) {
710 iopte_t *iopte;
712 iopte = iommu->page_table +
713 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
714 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
717 /* Step 2: Kick data out of streaming buffers. */
718 PCI_STC_FLUSHFLAG_INIT(strbuf);
719 if (iommu->iommu_ctxflush &&
720 strbuf->strbuf_ctxflush) {
721 unsigned long matchreg, flushreg;
723 flushreg = strbuf->strbuf_ctxflush;
724 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
725 do {
726 pci_iommu_write(flushreg, ctx);
727 } while(((long)pci_iommu_read(matchreg)) < 0L);
728 } else {
729 unsigned long i;
731 for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
732 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
735 /* Step 3: Perform flush synchronization sequence. */
736 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
737 (void) pci_iommu_read(iommu->write_complete_reg);
738 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
739 membar("#LoadLoad");
741 spin_unlock_irqrestore(&iommu->lock, flags);
744 /* Make physical memory consistent for a set of streaming
745 * mode DMA translations after a transfer.
747 void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
749 struct pcidev_cookie *pcp;
750 struct pci_iommu *iommu;
751 struct pci_strbuf *strbuf;
752 unsigned long flags, ctx;
754 pcp = pdev->sysdata;
755 iommu = pcp->pbm->iommu;
756 strbuf = &pcp->pbm->stc;
758 if (!strbuf->strbuf_enabled)
759 return;
761 spin_lock_irqsave(&iommu->lock, flags);
763 /* Step 1: Record the context, if any. */
764 ctx = 0;
765 if (iommu->iommu_ctxflush &&
766 strbuf->strbuf_ctxflush) {
767 iopte_t *iopte;
769 iopte = iommu->page_table +
770 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
771 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
774 /* Step 2: Kick data out of streaming buffers. */
775 PCI_STC_FLUSHFLAG_INIT(strbuf);
776 if (iommu->iommu_ctxflush &&
777 strbuf->strbuf_ctxflush) {
778 unsigned long matchreg, flushreg;
780 flushreg = strbuf->strbuf_ctxflush;
781 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
782 do {
783 pci_iommu_write(flushreg, ctx);
784 } while (((long)pci_iommu_read(matchreg)) < 0L);
785 } else {
786 unsigned long i, npages;
787 u32 bus_addr;
789 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
791 for(i = 1; i < nelems; i++)
792 if (!sglist[i].dma_length)
793 break;
794 i--;
795 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
796 for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
797 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
800 /* Step 3: Perform flush synchronization sequence. */
801 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
802 (void) pci_iommu_read(iommu->write_complete_reg);
803 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
804 membar("#LoadLoad");
806 spin_unlock_irqrestore(&iommu->lock, flags);
809 static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
811 struct pci_dev *ali_isa_bridge;
812 u8 val;
814 /* ALI sound chips generate 31-bits of DMA, a special register
815 * determines what bit 31 is emitted as.
817 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
818 PCI_DEVICE_ID_AL_M1533,
819 NULL);
821 pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
822 if (set_bit)
823 val |= 0x01;
824 else
825 val &= ~0x01;
826 pci_write_config_byte(ali_isa_bridge, 0x7e, val);
827 pci_dev_put(ali_isa_bridge);
830 int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
832 struct pcidev_cookie *pcp = pdev->sysdata;
833 u64 dma_addr_mask;
835 if (pdev == NULL) {
836 dma_addr_mask = 0xffffffff;
837 } else {
838 struct pci_iommu *iommu = pcp->pbm->iommu;
840 dma_addr_mask = iommu->dma_addr_mask;
842 if (pdev->vendor == PCI_VENDOR_ID_AL &&
843 pdev->device == PCI_DEVICE_ID_AL_M5451 &&
844 device_mask == 0x7fffffff) {
845 ali_sound_dma_hack(pdev,
846 (dma_addr_mask & 0x80000000) != 0);
847 return 1;
851 if (device_mask >= (1UL << 32UL))
852 return 0;
854 return (device_mask & dma_addr_mask) == dma_addr_mask;