MIPS: Alchemy: prom_putchar is board dependent
[linux-2.6/linux-mips.git] / arch / sparc / kernel / iommu.c
blob5fad94950e7680b49a453686420bc9a85e316d78
1 /* iommu.c: Generic sparc64 IOMMU support.
3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
5 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/iommu-helper.h>
14 #include <linux/bitmap.h>
16 #ifdef CONFIG_PCI
17 #include <linux/pci.h>
18 #endif
20 #include <asm/iommu.h>
22 #include "iommu_common.h"
24 #define STC_CTXMATCH_ADDR(STC, CTX) \
25 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
26 #define STC_FLUSHFLAG_INIT(STC) \
27 (*((STC)->strbuf_flushflag) = 0UL)
28 #define STC_FLUSHFLAG_SET(STC) \
29 (*((STC)->strbuf_flushflag) != 0UL)
31 #define iommu_read(__reg) \
32 ({ u64 __ret; \
33 __asm__ __volatile__("ldxa [%1] %2, %0" \
34 : "=r" (__ret) \
35 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
36 : "memory"); \
37 __ret; \
39 #define iommu_write(__reg, __val) \
40 __asm__ __volatile__("stxa %0, [%1] %2" \
41 : /* no outputs */ \
42 : "r" (__val), "r" (__reg), \
43 "i" (ASI_PHYS_BYPASS_EC_E))
45 /* Must be invoked under the IOMMU lock. */
46 static void iommu_flushall(struct iommu *iommu)
48 if (iommu->iommu_flushinv) {
49 iommu_write(iommu->iommu_flushinv, ~(u64)0);
50 } else {
51 unsigned long tag;
52 int entry;
54 tag = iommu->iommu_tags;
55 for (entry = 0; entry < 16; entry++) {
56 iommu_write(tag, 0);
57 tag += 8;
60 /* Ensure completion of previous PIO writes. */
61 (void) iommu_read(iommu->write_complete_reg);
65 #define IOPTE_CONSISTENT(CTX) \
66 (IOPTE_VALID | IOPTE_CACHE | \
67 (((CTX) << 47) & IOPTE_CONTEXT))
69 #define IOPTE_STREAMING(CTX) \
70 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
72 /* Existing mappings are never marked invalid, instead they
73 * are pointed to a dummy page.
75 #define IOPTE_IS_DUMMY(iommu, iopte) \
76 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
78 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
80 unsigned long val = iopte_val(*iopte);
82 val &= ~IOPTE_PAGE;
83 val |= iommu->dummy_page_pa;
85 iopte_val(*iopte) = val;
88 /* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
89 * facility it must all be done in one pass while under the iommu lock.
91 * On sun4u platforms, we only flush the IOMMU once every time we've passed
92 * over the entire page table doing allocations. Therefore we only ever advance
93 * the hint and cannot backtrack it.
95 unsigned long iommu_range_alloc(struct device *dev,
96 struct iommu *iommu,
97 unsigned long npages,
98 unsigned long *handle)
100 unsigned long n, end, start, limit, boundary_size;
101 struct iommu_arena *arena = &iommu->arena;
102 int pass = 0;
104 /* This allocator was derived from x86_64's bit string search */
106 /* Sanity check */
107 if (unlikely(npages == 0)) {
108 if (printk_ratelimit())
109 WARN_ON(1);
110 return DMA_ERROR_CODE;
113 if (handle && *handle)
114 start = *handle;
115 else
116 start = arena->hint;
118 limit = arena->limit;
120 /* The case below can happen if we have a small segment appended
121 * to a large, or when the previous alloc was at the very end of
122 * the available space. If so, go back to the beginning and flush.
124 if (start >= limit) {
125 start = 0;
126 if (iommu->flush_all)
127 iommu->flush_all(iommu);
130 again:
132 if (dev)
133 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
134 1 << IO_PAGE_SHIFT);
135 else
136 boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
138 n = iommu_area_alloc(arena->map, limit, start, npages,
139 iommu->page_table_map_base >> IO_PAGE_SHIFT,
140 boundary_size >> IO_PAGE_SHIFT, 0);
141 if (n == -1) {
142 if (likely(pass < 1)) {
143 /* First failure, rescan from the beginning. */
144 start = 0;
145 if (iommu->flush_all)
146 iommu->flush_all(iommu);
147 pass++;
148 goto again;
149 } else {
150 /* Second failure, give up */
151 return DMA_ERROR_CODE;
155 end = n + npages;
157 arena->hint = end;
159 /* Update handle for SG allocations */
160 if (handle)
161 *handle = end;
163 return n;
166 void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
168 struct iommu_arena *arena = &iommu->arena;
169 unsigned long entry;
171 entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
173 bitmap_clear(arena->map, entry, npages);
176 int iommu_table_init(struct iommu *iommu, int tsbsize,
177 u32 dma_offset, u32 dma_addr_mask,
178 int numa_node)
180 unsigned long i, order, sz, num_tsb_entries;
181 struct page *page;
183 num_tsb_entries = tsbsize / sizeof(iopte_t);
185 /* Setup initial software IOMMU state. */
186 spin_lock_init(&iommu->lock);
187 iommu->ctx_lowest_free = 1;
188 iommu->page_table_map_base = dma_offset;
189 iommu->dma_addr_mask = dma_addr_mask;
191 /* Allocate and initialize the free area map. */
192 sz = num_tsb_entries / 8;
193 sz = (sz + 7UL) & ~7UL;
194 iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
195 if (!iommu->arena.map) {
196 printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
197 return -ENOMEM;
199 memset(iommu->arena.map, 0, sz);
200 iommu->arena.limit = num_tsb_entries;
202 if (tlb_type != hypervisor)
203 iommu->flush_all = iommu_flushall;
205 /* Allocate and initialize the dummy page which we
206 * set inactive IO PTEs to point to.
208 page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
209 if (!page) {
210 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
211 goto out_free_map;
213 iommu->dummy_page = (unsigned long) page_address(page);
214 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
215 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
217 /* Now allocate and setup the IOMMU page table itself. */
218 order = get_order(tsbsize);
219 page = alloc_pages_node(numa_node, GFP_KERNEL, order);
220 if (!page) {
221 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
222 goto out_free_dummy_page;
224 iommu->page_table = (iopte_t *)page_address(page);
226 for (i = 0; i < num_tsb_entries; i++)
227 iopte_make_dummy(iommu, &iommu->page_table[i]);
229 return 0;
231 out_free_dummy_page:
232 free_page(iommu->dummy_page);
233 iommu->dummy_page = 0UL;
235 out_free_map:
236 kfree(iommu->arena.map);
237 iommu->arena.map = NULL;
239 return -ENOMEM;
242 static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
243 unsigned long npages)
245 unsigned long entry;
247 entry = iommu_range_alloc(dev, iommu, npages, NULL);
248 if (unlikely(entry == DMA_ERROR_CODE))
249 return NULL;
251 return iommu->page_table + entry;
254 static int iommu_alloc_ctx(struct iommu *iommu)
256 int lowest = iommu->ctx_lowest_free;
257 int sz = IOMMU_NUM_CTXS - lowest;
258 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
260 if (unlikely(n == sz)) {
261 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
262 if (unlikely(n == lowest)) {
263 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
264 n = 0;
267 if (n)
268 __set_bit(n, iommu->ctx_bitmap);
270 return n;
273 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
275 if (likely(ctx)) {
276 __clear_bit(ctx, iommu->ctx_bitmap);
277 if (ctx < iommu->ctx_lowest_free)
278 iommu->ctx_lowest_free = ctx;
282 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
283 dma_addr_t *dma_addrp, gfp_t gfp)
285 unsigned long flags, order, first_page;
286 struct iommu *iommu;
287 struct page *page;
288 int npages, nid;
289 iopte_t *iopte;
290 void *ret;
292 size = IO_PAGE_ALIGN(size);
293 order = get_order(size);
294 if (order >= 10)
295 return NULL;
297 nid = dev->archdata.numa_node;
298 page = alloc_pages_node(nid, gfp, order);
299 if (unlikely(!page))
300 return NULL;
302 first_page = (unsigned long) page_address(page);
303 memset((char *)first_page, 0, PAGE_SIZE << order);
305 iommu = dev->archdata.iommu;
307 spin_lock_irqsave(&iommu->lock, flags);
308 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
309 spin_unlock_irqrestore(&iommu->lock, flags);
311 if (unlikely(iopte == NULL)) {
312 free_pages(first_page, order);
313 return NULL;
316 *dma_addrp = (iommu->page_table_map_base +
317 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
318 ret = (void *) first_page;
319 npages = size >> IO_PAGE_SHIFT;
320 first_page = __pa(first_page);
321 while (npages--) {
322 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
323 IOPTE_WRITE |
324 (first_page & IOPTE_PAGE));
325 iopte++;
326 first_page += IO_PAGE_SIZE;
329 return ret;
332 static void dma_4u_free_coherent(struct device *dev, size_t size,
333 void *cpu, dma_addr_t dvma)
335 struct iommu *iommu;
336 iopte_t *iopte;
337 unsigned long flags, order, npages;
339 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
340 iommu = dev->archdata.iommu;
341 iopte = iommu->page_table +
342 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
344 spin_lock_irqsave(&iommu->lock, flags);
346 iommu_range_free(iommu, dvma, npages);
348 spin_unlock_irqrestore(&iommu->lock, flags);
350 order = get_order(size);
351 if (order < 10)
352 free_pages((unsigned long)cpu, order);
355 static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
356 unsigned long offset, size_t sz,
357 enum dma_data_direction direction,
358 struct dma_attrs *attrs)
360 struct iommu *iommu;
361 struct strbuf *strbuf;
362 iopte_t *base;
363 unsigned long flags, npages, oaddr;
364 unsigned long i, base_paddr, ctx;
365 u32 bus_addr, ret;
366 unsigned long iopte_protection;
368 iommu = dev->archdata.iommu;
369 strbuf = dev->archdata.stc;
371 if (unlikely(direction == DMA_NONE))
372 goto bad_no_ctx;
374 oaddr = (unsigned long)(page_address(page) + offset);
375 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
376 npages >>= IO_PAGE_SHIFT;
378 spin_lock_irqsave(&iommu->lock, flags);
379 base = alloc_npages(dev, iommu, npages);
380 ctx = 0;
381 if (iommu->iommu_ctxflush)
382 ctx = iommu_alloc_ctx(iommu);
383 spin_unlock_irqrestore(&iommu->lock, flags);
385 if (unlikely(!base))
386 goto bad;
388 bus_addr = (iommu->page_table_map_base +
389 ((base - iommu->page_table) << IO_PAGE_SHIFT));
390 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
391 base_paddr = __pa(oaddr & IO_PAGE_MASK);
392 if (strbuf->strbuf_enabled)
393 iopte_protection = IOPTE_STREAMING(ctx);
394 else
395 iopte_protection = IOPTE_CONSISTENT(ctx);
396 if (direction != DMA_TO_DEVICE)
397 iopte_protection |= IOPTE_WRITE;
399 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
400 iopte_val(*base) = iopte_protection | base_paddr;
402 return ret;
404 bad:
405 iommu_free_ctx(iommu, ctx);
406 bad_no_ctx:
407 if (printk_ratelimit())
408 WARN_ON(1);
409 return DMA_ERROR_CODE;
412 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
413 u32 vaddr, unsigned long ctx, unsigned long npages,
414 enum dma_data_direction direction)
416 int limit;
418 if (strbuf->strbuf_ctxflush &&
419 iommu->iommu_ctxflush) {
420 unsigned long matchreg, flushreg;
421 u64 val;
423 flushreg = strbuf->strbuf_ctxflush;
424 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
426 iommu_write(flushreg, ctx);
427 val = iommu_read(matchreg);
428 val &= 0xffff;
429 if (!val)
430 goto do_flush_sync;
432 while (val) {
433 if (val & 0x1)
434 iommu_write(flushreg, ctx);
435 val >>= 1;
437 val = iommu_read(matchreg);
438 if (unlikely(val)) {
439 printk(KERN_WARNING "strbuf_flush: ctx flush "
440 "timeout matchreg[%llx] ctx[%lx]\n",
441 val, ctx);
442 goto do_page_flush;
444 } else {
445 unsigned long i;
447 do_page_flush:
448 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
449 iommu_write(strbuf->strbuf_pflush, vaddr);
452 do_flush_sync:
453 /* If the device could not have possibly put dirty data into
454 * the streaming cache, no flush-flag synchronization needs
455 * to be performed.
457 if (direction == DMA_TO_DEVICE)
458 return;
460 STC_FLUSHFLAG_INIT(strbuf);
461 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
462 (void) iommu_read(iommu->write_complete_reg);
464 limit = 100000;
465 while (!STC_FLUSHFLAG_SET(strbuf)) {
466 limit--;
467 if (!limit)
468 break;
469 udelay(1);
470 rmb();
472 if (!limit)
473 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
474 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
475 vaddr, ctx, npages);
478 static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
479 size_t sz, enum dma_data_direction direction,
480 struct dma_attrs *attrs)
482 struct iommu *iommu;
483 struct strbuf *strbuf;
484 iopte_t *base;
485 unsigned long flags, npages, ctx, i;
487 if (unlikely(direction == DMA_NONE)) {
488 if (printk_ratelimit())
489 WARN_ON(1);
490 return;
493 iommu = dev->archdata.iommu;
494 strbuf = dev->archdata.stc;
496 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
497 npages >>= IO_PAGE_SHIFT;
498 base = iommu->page_table +
499 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
500 bus_addr &= IO_PAGE_MASK;
502 spin_lock_irqsave(&iommu->lock, flags);
504 /* Record the context, if any. */
505 ctx = 0;
506 if (iommu->iommu_ctxflush)
507 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
509 /* Step 1: Kick data out of streaming buffers if necessary. */
510 if (strbuf->strbuf_enabled)
511 strbuf_flush(strbuf, iommu, bus_addr, ctx,
512 npages, direction);
514 /* Step 2: Clear out TSB entries. */
515 for (i = 0; i < npages; i++)
516 iopte_make_dummy(iommu, base + i);
518 iommu_range_free(iommu, bus_addr, npages);
520 iommu_free_ctx(iommu, ctx);
522 spin_unlock_irqrestore(&iommu->lock, flags);
525 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
526 int nelems, enum dma_data_direction direction,
527 struct dma_attrs *attrs)
529 struct scatterlist *s, *outs, *segstart;
530 unsigned long flags, handle, prot, ctx;
531 dma_addr_t dma_next = 0, dma_addr;
532 unsigned int max_seg_size;
533 unsigned long seg_boundary_size;
534 int outcount, incount, i;
535 struct strbuf *strbuf;
536 struct iommu *iommu;
537 unsigned long base_shift;
539 BUG_ON(direction == DMA_NONE);
541 iommu = dev->archdata.iommu;
542 strbuf = dev->archdata.stc;
543 if (nelems == 0 || !iommu)
544 return 0;
546 spin_lock_irqsave(&iommu->lock, flags);
548 ctx = 0;
549 if (iommu->iommu_ctxflush)
550 ctx = iommu_alloc_ctx(iommu);
552 if (strbuf->strbuf_enabled)
553 prot = IOPTE_STREAMING(ctx);
554 else
555 prot = IOPTE_CONSISTENT(ctx);
556 if (direction != DMA_TO_DEVICE)
557 prot |= IOPTE_WRITE;
559 outs = s = segstart = &sglist[0];
560 outcount = 1;
561 incount = nelems;
562 handle = 0;
564 /* Init first segment length for backout at failure */
565 outs->dma_length = 0;
567 max_seg_size = dma_get_max_seg_size(dev);
568 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
569 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
570 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
571 for_each_sg(sglist, s, nelems, i) {
572 unsigned long paddr, npages, entry, out_entry = 0, slen;
573 iopte_t *base;
575 slen = s->length;
576 /* Sanity check */
577 if (slen == 0) {
578 dma_next = 0;
579 continue;
581 /* Allocate iommu entries for that segment */
582 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
583 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
584 entry = iommu_range_alloc(dev, iommu, npages, &handle);
586 /* Handle failure */
587 if (unlikely(entry == DMA_ERROR_CODE)) {
588 if (printk_ratelimit())
589 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
590 " npages %lx\n", iommu, paddr, npages);
591 goto iommu_map_failed;
594 base = iommu->page_table + entry;
596 /* Convert entry to a dma_addr_t */
597 dma_addr = iommu->page_table_map_base +
598 (entry << IO_PAGE_SHIFT);
599 dma_addr |= (s->offset & ~IO_PAGE_MASK);
601 /* Insert into HW table */
602 paddr &= IO_PAGE_MASK;
603 while (npages--) {
604 iopte_val(*base) = prot | paddr;
605 base++;
606 paddr += IO_PAGE_SIZE;
609 /* If we are in an open segment, try merging */
610 if (segstart != s) {
611 /* We cannot merge if:
612 * - allocated dma_addr isn't contiguous to previous allocation
614 if ((dma_addr != dma_next) ||
615 (outs->dma_length + s->length > max_seg_size) ||
616 (is_span_boundary(out_entry, base_shift,
617 seg_boundary_size, outs, s))) {
618 /* Can't merge: create a new segment */
619 segstart = s;
620 outcount++;
621 outs = sg_next(outs);
622 } else {
623 outs->dma_length += s->length;
627 if (segstart == s) {
628 /* This is a new segment, fill entries */
629 outs->dma_address = dma_addr;
630 outs->dma_length = slen;
631 out_entry = entry;
634 /* Calculate next page pointer for contiguous check */
635 dma_next = dma_addr + slen;
638 spin_unlock_irqrestore(&iommu->lock, flags);
640 if (outcount < incount) {
641 outs = sg_next(outs);
642 outs->dma_address = DMA_ERROR_CODE;
643 outs->dma_length = 0;
646 return outcount;
648 iommu_map_failed:
649 for_each_sg(sglist, s, nelems, i) {
650 if (s->dma_length != 0) {
651 unsigned long vaddr, npages, entry, j;
652 iopte_t *base;
654 vaddr = s->dma_address & IO_PAGE_MASK;
655 npages = iommu_num_pages(s->dma_address, s->dma_length,
656 IO_PAGE_SIZE);
657 iommu_range_free(iommu, vaddr, npages);
659 entry = (vaddr - iommu->page_table_map_base)
660 >> IO_PAGE_SHIFT;
661 base = iommu->page_table + entry;
663 for (j = 0; j < npages; j++)
664 iopte_make_dummy(iommu, base + j);
666 s->dma_address = DMA_ERROR_CODE;
667 s->dma_length = 0;
669 if (s == outs)
670 break;
672 spin_unlock_irqrestore(&iommu->lock, flags);
674 return 0;
677 /* If contexts are being used, they are the same in all of the mappings
678 * we make for a particular SG.
680 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
682 unsigned long ctx = 0;
684 if (iommu->iommu_ctxflush) {
685 iopte_t *base;
686 u32 bus_addr;
688 bus_addr = sg->dma_address & IO_PAGE_MASK;
689 base = iommu->page_table +
690 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
692 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
694 return ctx;
697 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
698 int nelems, enum dma_data_direction direction,
699 struct dma_attrs *attrs)
701 unsigned long flags, ctx;
702 struct scatterlist *sg;
703 struct strbuf *strbuf;
704 struct iommu *iommu;
706 BUG_ON(direction == DMA_NONE);
708 iommu = dev->archdata.iommu;
709 strbuf = dev->archdata.stc;
711 ctx = fetch_sg_ctx(iommu, sglist);
713 spin_lock_irqsave(&iommu->lock, flags);
715 sg = sglist;
716 while (nelems--) {
717 dma_addr_t dma_handle = sg->dma_address;
718 unsigned int len = sg->dma_length;
719 unsigned long npages, entry;
720 iopte_t *base;
721 int i;
723 if (!len)
724 break;
725 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
726 iommu_range_free(iommu, dma_handle, npages);
728 entry = ((dma_handle - iommu->page_table_map_base)
729 >> IO_PAGE_SHIFT);
730 base = iommu->page_table + entry;
732 dma_handle &= IO_PAGE_MASK;
733 if (strbuf->strbuf_enabled)
734 strbuf_flush(strbuf, iommu, dma_handle, ctx,
735 npages, direction);
737 for (i = 0; i < npages; i++)
738 iopte_make_dummy(iommu, base + i);
740 sg = sg_next(sg);
743 iommu_free_ctx(iommu, ctx);
745 spin_unlock_irqrestore(&iommu->lock, flags);
748 static void dma_4u_sync_single_for_cpu(struct device *dev,
749 dma_addr_t bus_addr, size_t sz,
750 enum dma_data_direction direction)
752 struct iommu *iommu;
753 struct strbuf *strbuf;
754 unsigned long flags, ctx, npages;
756 iommu = dev->archdata.iommu;
757 strbuf = dev->archdata.stc;
759 if (!strbuf->strbuf_enabled)
760 return;
762 spin_lock_irqsave(&iommu->lock, flags);
764 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
765 npages >>= IO_PAGE_SHIFT;
766 bus_addr &= IO_PAGE_MASK;
768 /* Step 1: Record the context, if any. */
769 ctx = 0;
770 if (iommu->iommu_ctxflush &&
771 strbuf->strbuf_ctxflush) {
772 iopte_t *iopte;
774 iopte = iommu->page_table +
775 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
776 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
779 /* Step 2: Kick data out of streaming buffers. */
780 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
782 spin_unlock_irqrestore(&iommu->lock, flags);
785 static void dma_4u_sync_sg_for_cpu(struct device *dev,
786 struct scatterlist *sglist, int nelems,
787 enum dma_data_direction direction)
789 struct iommu *iommu;
790 struct strbuf *strbuf;
791 unsigned long flags, ctx, npages, i;
792 struct scatterlist *sg, *sgprv;
793 u32 bus_addr;
795 iommu = dev->archdata.iommu;
796 strbuf = dev->archdata.stc;
798 if (!strbuf->strbuf_enabled)
799 return;
801 spin_lock_irqsave(&iommu->lock, flags);
803 /* Step 1: Record the context, if any. */
804 ctx = 0;
805 if (iommu->iommu_ctxflush &&
806 strbuf->strbuf_ctxflush) {
807 iopte_t *iopte;
809 iopte = iommu->page_table +
810 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
811 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
814 /* Step 2: Kick data out of streaming buffers. */
815 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
816 sgprv = NULL;
817 for_each_sg(sglist, sg, nelems, i) {
818 if (sg->dma_length == 0)
819 break;
820 sgprv = sg;
823 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
824 - bus_addr) >> IO_PAGE_SHIFT;
825 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
827 spin_unlock_irqrestore(&iommu->lock, flags);
830 static struct dma_map_ops sun4u_dma_ops = {
831 .alloc_coherent = dma_4u_alloc_coherent,
832 .free_coherent = dma_4u_free_coherent,
833 .map_page = dma_4u_map_page,
834 .unmap_page = dma_4u_unmap_page,
835 .map_sg = dma_4u_map_sg,
836 .unmap_sg = dma_4u_unmap_sg,
837 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
838 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
841 struct dma_map_ops *dma_ops = &sun4u_dma_ops;
842 EXPORT_SYMBOL(dma_ops);
844 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
846 int dma_supported(struct device *dev, u64 device_mask)
848 struct iommu *iommu = dev->archdata.iommu;
849 u64 dma_addr_mask = iommu->dma_addr_mask;
851 if (device_mask >= (1UL << 32UL))
852 return 0;
854 if ((device_mask & dma_addr_mask) == dma_addr_mask)
855 return 1;
857 #ifdef CONFIG_PCI
858 if (dev->bus == &pci_bus_type)
859 return pci64_dma_supported(to_pci_dev(dev), device_mask);
860 #endif
862 return 0;
864 EXPORT_SYMBOL(dma_supported);
866 int dma_set_mask(struct device *dev, u64 dma_mask)
868 #ifdef CONFIG_PCI
869 if (dev->bus == &pci_bus_type)
870 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
871 #endif
872 return -EINVAL;
874 EXPORT_SYMBOL(dma_set_mask);