Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-btrfs-devel.git] / arch / sparc / kernel / iommu.c
blob6f01e8c83197e6d62c8c8646279e0c76b1606831
1 /* iommu.c: Generic sparc64 IOMMU support.
3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
5 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/errno.h>
14 #include <linux/iommu-helper.h>
15 #include <linux/bitmap.h>
17 #ifdef CONFIG_PCI
18 #include <linux/pci.h>
19 #endif
21 #include <asm/iommu.h>
23 #include "iommu_common.h"
25 #define STC_CTXMATCH_ADDR(STC, CTX) \
26 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
27 #define STC_FLUSHFLAG_INIT(STC) \
28 (*((STC)->strbuf_flushflag) = 0UL)
29 #define STC_FLUSHFLAG_SET(STC) \
30 (*((STC)->strbuf_flushflag) != 0UL)
32 #define iommu_read(__reg) \
33 ({ u64 __ret; \
34 __asm__ __volatile__("ldxa [%1] %2, %0" \
35 : "=r" (__ret) \
36 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
37 : "memory"); \
38 __ret; \
40 #define iommu_write(__reg, __val) \
41 __asm__ __volatile__("stxa %0, [%1] %2" \
42 : /* no outputs */ \
43 : "r" (__val), "r" (__reg), \
44 "i" (ASI_PHYS_BYPASS_EC_E))
46 /* Must be invoked under the IOMMU lock. */
47 static void iommu_flushall(struct iommu *iommu)
49 if (iommu->iommu_flushinv) {
50 iommu_write(iommu->iommu_flushinv, ~(u64)0);
51 } else {
52 unsigned long tag;
53 int entry;
55 tag = iommu->iommu_tags;
56 for (entry = 0; entry < 16; entry++) {
57 iommu_write(tag, 0);
58 tag += 8;
61 /* Ensure completion of previous PIO writes. */
62 (void) iommu_read(iommu->write_complete_reg);
66 #define IOPTE_CONSISTENT(CTX) \
67 (IOPTE_VALID | IOPTE_CACHE | \
68 (((CTX) << 47) & IOPTE_CONTEXT))
70 #define IOPTE_STREAMING(CTX) \
71 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
73 /* Existing mappings are never marked invalid, instead they
74 * are pointed to a dummy page.
76 #define IOPTE_IS_DUMMY(iommu, iopte) \
77 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
79 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
81 unsigned long val = iopte_val(*iopte);
83 val &= ~IOPTE_PAGE;
84 val |= iommu->dummy_page_pa;
86 iopte_val(*iopte) = val;
89 /* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
90 * facility it must all be done in one pass while under the iommu lock.
92 * On sun4u platforms, we only flush the IOMMU once every time we've passed
93 * over the entire page table doing allocations. Therefore we only ever advance
94 * the hint and cannot backtrack it.
96 unsigned long iommu_range_alloc(struct device *dev,
97 struct iommu *iommu,
98 unsigned long npages,
99 unsigned long *handle)
101 unsigned long n, end, start, limit, boundary_size;
102 struct iommu_arena *arena = &iommu->arena;
103 int pass = 0;
105 /* This allocator was derived from x86_64's bit string search */
107 /* Sanity check */
108 if (unlikely(npages == 0)) {
109 if (printk_ratelimit())
110 WARN_ON(1);
111 return DMA_ERROR_CODE;
114 if (handle && *handle)
115 start = *handle;
116 else
117 start = arena->hint;
119 limit = arena->limit;
121 /* The case below can happen if we have a small segment appended
122 * to a large, or when the previous alloc was at the very end of
123 * the available space. If so, go back to the beginning and flush.
125 if (start >= limit) {
126 start = 0;
127 if (iommu->flush_all)
128 iommu->flush_all(iommu);
131 again:
133 if (dev)
134 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
135 1 << IO_PAGE_SHIFT);
136 else
137 boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
139 n = iommu_area_alloc(arena->map, limit, start, npages,
140 iommu->page_table_map_base >> IO_PAGE_SHIFT,
141 boundary_size >> IO_PAGE_SHIFT, 0);
142 if (n == -1) {
143 if (likely(pass < 1)) {
144 /* First failure, rescan from the beginning. */
145 start = 0;
146 if (iommu->flush_all)
147 iommu->flush_all(iommu);
148 pass++;
149 goto again;
150 } else {
151 /* Second failure, give up */
152 return DMA_ERROR_CODE;
156 end = n + npages;
158 arena->hint = end;
160 /* Update handle for SG allocations */
161 if (handle)
162 *handle = end;
164 return n;
167 void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
169 struct iommu_arena *arena = &iommu->arena;
170 unsigned long entry;
172 entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
174 bitmap_clear(arena->map, entry, npages);
177 int iommu_table_init(struct iommu *iommu, int tsbsize,
178 u32 dma_offset, u32 dma_addr_mask,
179 int numa_node)
181 unsigned long i, order, sz, num_tsb_entries;
182 struct page *page;
184 num_tsb_entries = tsbsize / sizeof(iopte_t);
186 /* Setup initial software IOMMU state. */
187 spin_lock_init(&iommu->lock);
188 iommu->ctx_lowest_free = 1;
189 iommu->page_table_map_base = dma_offset;
190 iommu->dma_addr_mask = dma_addr_mask;
192 /* Allocate and initialize the free area map. */
193 sz = num_tsb_entries / 8;
194 sz = (sz + 7UL) & ~7UL;
195 iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
196 if (!iommu->arena.map) {
197 printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
198 return -ENOMEM;
200 memset(iommu->arena.map, 0, sz);
201 iommu->arena.limit = num_tsb_entries;
203 if (tlb_type != hypervisor)
204 iommu->flush_all = iommu_flushall;
206 /* Allocate and initialize the dummy page which we
207 * set inactive IO PTEs to point to.
209 page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
210 if (!page) {
211 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
212 goto out_free_map;
214 iommu->dummy_page = (unsigned long) page_address(page);
215 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
216 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
218 /* Now allocate and setup the IOMMU page table itself. */
219 order = get_order(tsbsize);
220 page = alloc_pages_node(numa_node, GFP_KERNEL, order);
221 if (!page) {
222 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
223 goto out_free_dummy_page;
225 iommu->page_table = (iopte_t *)page_address(page);
227 for (i = 0; i < num_tsb_entries; i++)
228 iopte_make_dummy(iommu, &iommu->page_table[i]);
230 return 0;
232 out_free_dummy_page:
233 free_page(iommu->dummy_page);
234 iommu->dummy_page = 0UL;
236 out_free_map:
237 kfree(iommu->arena.map);
238 iommu->arena.map = NULL;
240 return -ENOMEM;
243 static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
244 unsigned long npages)
246 unsigned long entry;
248 entry = iommu_range_alloc(dev, iommu, npages, NULL);
249 if (unlikely(entry == DMA_ERROR_CODE))
250 return NULL;
252 return iommu->page_table + entry;
255 static int iommu_alloc_ctx(struct iommu *iommu)
257 int lowest = iommu->ctx_lowest_free;
258 int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
260 if (unlikely(n == IOMMU_NUM_CTXS)) {
261 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
262 if (unlikely(n == lowest)) {
263 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
264 n = 0;
267 if (n)
268 __set_bit(n, iommu->ctx_bitmap);
270 return n;
273 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
275 if (likely(ctx)) {
276 __clear_bit(ctx, iommu->ctx_bitmap);
277 if (ctx < iommu->ctx_lowest_free)
278 iommu->ctx_lowest_free = ctx;
282 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
283 dma_addr_t *dma_addrp, gfp_t gfp)
285 unsigned long flags, order, first_page;
286 struct iommu *iommu;
287 struct page *page;
288 int npages, nid;
289 iopte_t *iopte;
290 void *ret;
292 size = IO_PAGE_ALIGN(size);
293 order = get_order(size);
294 if (order >= 10)
295 return NULL;
297 nid = dev->archdata.numa_node;
298 page = alloc_pages_node(nid, gfp, order);
299 if (unlikely(!page))
300 return NULL;
302 first_page = (unsigned long) page_address(page);
303 memset((char *)first_page, 0, PAGE_SIZE << order);
305 iommu = dev->archdata.iommu;
307 spin_lock_irqsave(&iommu->lock, flags);
308 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
309 spin_unlock_irqrestore(&iommu->lock, flags);
311 if (unlikely(iopte == NULL)) {
312 free_pages(first_page, order);
313 return NULL;
316 *dma_addrp = (iommu->page_table_map_base +
317 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
318 ret = (void *) first_page;
319 npages = size >> IO_PAGE_SHIFT;
320 first_page = __pa(first_page);
321 while (npages--) {
322 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
323 IOPTE_WRITE |
324 (first_page & IOPTE_PAGE));
325 iopte++;
326 first_page += IO_PAGE_SIZE;
329 return ret;
332 static void dma_4u_free_coherent(struct device *dev, size_t size,
333 void *cpu, dma_addr_t dvma)
335 struct iommu *iommu;
336 unsigned long flags, order, npages;
338 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
339 iommu = dev->archdata.iommu;
341 spin_lock_irqsave(&iommu->lock, flags);
343 iommu_range_free(iommu, dvma, npages);
345 spin_unlock_irqrestore(&iommu->lock, flags);
347 order = get_order(size);
348 if (order < 10)
349 free_pages((unsigned long)cpu, order);
352 static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
353 unsigned long offset, size_t sz,
354 enum dma_data_direction direction,
355 struct dma_attrs *attrs)
357 struct iommu *iommu;
358 struct strbuf *strbuf;
359 iopte_t *base;
360 unsigned long flags, npages, oaddr;
361 unsigned long i, base_paddr, ctx;
362 u32 bus_addr, ret;
363 unsigned long iopte_protection;
365 iommu = dev->archdata.iommu;
366 strbuf = dev->archdata.stc;
368 if (unlikely(direction == DMA_NONE))
369 goto bad_no_ctx;
371 oaddr = (unsigned long)(page_address(page) + offset);
372 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
373 npages >>= IO_PAGE_SHIFT;
375 spin_lock_irqsave(&iommu->lock, flags);
376 base = alloc_npages(dev, iommu, npages);
377 ctx = 0;
378 if (iommu->iommu_ctxflush)
379 ctx = iommu_alloc_ctx(iommu);
380 spin_unlock_irqrestore(&iommu->lock, flags);
382 if (unlikely(!base))
383 goto bad;
385 bus_addr = (iommu->page_table_map_base +
386 ((base - iommu->page_table) << IO_PAGE_SHIFT));
387 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
388 base_paddr = __pa(oaddr & IO_PAGE_MASK);
389 if (strbuf->strbuf_enabled)
390 iopte_protection = IOPTE_STREAMING(ctx);
391 else
392 iopte_protection = IOPTE_CONSISTENT(ctx);
393 if (direction != DMA_TO_DEVICE)
394 iopte_protection |= IOPTE_WRITE;
396 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
397 iopte_val(*base) = iopte_protection | base_paddr;
399 return ret;
401 bad:
402 iommu_free_ctx(iommu, ctx);
403 bad_no_ctx:
404 if (printk_ratelimit())
405 WARN_ON(1);
406 return DMA_ERROR_CODE;
409 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
410 u32 vaddr, unsigned long ctx, unsigned long npages,
411 enum dma_data_direction direction)
413 int limit;
415 if (strbuf->strbuf_ctxflush &&
416 iommu->iommu_ctxflush) {
417 unsigned long matchreg, flushreg;
418 u64 val;
420 flushreg = strbuf->strbuf_ctxflush;
421 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
423 iommu_write(flushreg, ctx);
424 val = iommu_read(matchreg);
425 val &= 0xffff;
426 if (!val)
427 goto do_flush_sync;
429 while (val) {
430 if (val & 0x1)
431 iommu_write(flushreg, ctx);
432 val >>= 1;
434 val = iommu_read(matchreg);
435 if (unlikely(val)) {
436 printk(KERN_WARNING "strbuf_flush: ctx flush "
437 "timeout matchreg[%llx] ctx[%lx]\n",
438 val, ctx);
439 goto do_page_flush;
441 } else {
442 unsigned long i;
444 do_page_flush:
445 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
446 iommu_write(strbuf->strbuf_pflush, vaddr);
449 do_flush_sync:
450 /* If the device could not have possibly put dirty data into
451 * the streaming cache, no flush-flag synchronization needs
452 * to be performed.
454 if (direction == DMA_TO_DEVICE)
455 return;
457 STC_FLUSHFLAG_INIT(strbuf);
458 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
459 (void) iommu_read(iommu->write_complete_reg);
461 limit = 100000;
462 while (!STC_FLUSHFLAG_SET(strbuf)) {
463 limit--;
464 if (!limit)
465 break;
466 udelay(1);
467 rmb();
469 if (!limit)
470 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
471 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
472 vaddr, ctx, npages);
475 static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
476 size_t sz, enum dma_data_direction direction,
477 struct dma_attrs *attrs)
479 struct iommu *iommu;
480 struct strbuf *strbuf;
481 iopte_t *base;
482 unsigned long flags, npages, ctx, i;
484 if (unlikely(direction == DMA_NONE)) {
485 if (printk_ratelimit())
486 WARN_ON(1);
487 return;
490 iommu = dev->archdata.iommu;
491 strbuf = dev->archdata.stc;
493 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
494 npages >>= IO_PAGE_SHIFT;
495 base = iommu->page_table +
496 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
497 bus_addr &= IO_PAGE_MASK;
499 spin_lock_irqsave(&iommu->lock, flags);
501 /* Record the context, if any. */
502 ctx = 0;
503 if (iommu->iommu_ctxflush)
504 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
506 /* Step 1: Kick data out of streaming buffers if necessary. */
507 if (strbuf->strbuf_enabled)
508 strbuf_flush(strbuf, iommu, bus_addr, ctx,
509 npages, direction);
511 /* Step 2: Clear out TSB entries. */
512 for (i = 0; i < npages; i++)
513 iopte_make_dummy(iommu, base + i);
515 iommu_range_free(iommu, bus_addr, npages);
517 iommu_free_ctx(iommu, ctx);
519 spin_unlock_irqrestore(&iommu->lock, flags);
522 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
523 int nelems, enum dma_data_direction direction,
524 struct dma_attrs *attrs)
526 struct scatterlist *s, *outs, *segstart;
527 unsigned long flags, handle, prot, ctx;
528 dma_addr_t dma_next = 0, dma_addr;
529 unsigned int max_seg_size;
530 unsigned long seg_boundary_size;
531 int outcount, incount, i;
532 struct strbuf *strbuf;
533 struct iommu *iommu;
534 unsigned long base_shift;
536 BUG_ON(direction == DMA_NONE);
538 iommu = dev->archdata.iommu;
539 strbuf = dev->archdata.stc;
540 if (nelems == 0 || !iommu)
541 return 0;
543 spin_lock_irqsave(&iommu->lock, flags);
545 ctx = 0;
546 if (iommu->iommu_ctxflush)
547 ctx = iommu_alloc_ctx(iommu);
549 if (strbuf->strbuf_enabled)
550 prot = IOPTE_STREAMING(ctx);
551 else
552 prot = IOPTE_CONSISTENT(ctx);
553 if (direction != DMA_TO_DEVICE)
554 prot |= IOPTE_WRITE;
556 outs = s = segstart = &sglist[0];
557 outcount = 1;
558 incount = nelems;
559 handle = 0;
561 /* Init first segment length for backout at failure */
562 outs->dma_length = 0;
564 max_seg_size = dma_get_max_seg_size(dev);
565 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
566 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
567 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
568 for_each_sg(sglist, s, nelems, i) {
569 unsigned long paddr, npages, entry, out_entry = 0, slen;
570 iopte_t *base;
572 slen = s->length;
573 /* Sanity check */
574 if (slen == 0) {
575 dma_next = 0;
576 continue;
578 /* Allocate iommu entries for that segment */
579 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
580 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
581 entry = iommu_range_alloc(dev, iommu, npages, &handle);
583 /* Handle failure */
584 if (unlikely(entry == DMA_ERROR_CODE)) {
585 if (printk_ratelimit())
586 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
587 " npages %lx\n", iommu, paddr, npages);
588 goto iommu_map_failed;
591 base = iommu->page_table + entry;
593 /* Convert entry to a dma_addr_t */
594 dma_addr = iommu->page_table_map_base +
595 (entry << IO_PAGE_SHIFT);
596 dma_addr |= (s->offset & ~IO_PAGE_MASK);
598 /* Insert into HW table */
599 paddr &= IO_PAGE_MASK;
600 while (npages--) {
601 iopte_val(*base) = prot | paddr;
602 base++;
603 paddr += IO_PAGE_SIZE;
606 /* If we are in an open segment, try merging */
607 if (segstart != s) {
608 /* We cannot merge if:
609 * - allocated dma_addr isn't contiguous to previous allocation
611 if ((dma_addr != dma_next) ||
612 (outs->dma_length + s->length > max_seg_size) ||
613 (is_span_boundary(out_entry, base_shift,
614 seg_boundary_size, outs, s))) {
615 /* Can't merge: create a new segment */
616 segstart = s;
617 outcount++;
618 outs = sg_next(outs);
619 } else {
620 outs->dma_length += s->length;
624 if (segstart == s) {
625 /* This is a new segment, fill entries */
626 outs->dma_address = dma_addr;
627 outs->dma_length = slen;
628 out_entry = entry;
631 /* Calculate next page pointer for contiguous check */
632 dma_next = dma_addr + slen;
635 spin_unlock_irqrestore(&iommu->lock, flags);
637 if (outcount < incount) {
638 outs = sg_next(outs);
639 outs->dma_address = DMA_ERROR_CODE;
640 outs->dma_length = 0;
643 return outcount;
645 iommu_map_failed:
646 for_each_sg(sglist, s, nelems, i) {
647 if (s->dma_length != 0) {
648 unsigned long vaddr, npages, entry, j;
649 iopte_t *base;
651 vaddr = s->dma_address & IO_PAGE_MASK;
652 npages = iommu_num_pages(s->dma_address, s->dma_length,
653 IO_PAGE_SIZE);
654 iommu_range_free(iommu, vaddr, npages);
656 entry = (vaddr - iommu->page_table_map_base)
657 >> IO_PAGE_SHIFT;
658 base = iommu->page_table + entry;
660 for (j = 0; j < npages; j++)
661 iopte_make_dummy(iommu, base + j);
663 s->dma_address = DMA_ERROR_CODE;
664 s->dma_length = 0;
666 if (s == outs)
667 break;
669 spin_unlock_irqrestore(&iommu->lock, flags);
671 return 0;
674 /* If contexts are being used, they are the same in all of the mappings
675 * we make for a particular SG.
677 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
679 unsigned long ctx = 0;
681 if (iommu->iommu_ctxflush) {
682 iopte_t *base;
683 u32 bus_addr;
685 bus_addr = sg->dma_address & IO_PAGE_MASK;
686 base = iommu->page_table +
687 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
689 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
691 return ctx;
694 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
695 int nelems, enum dma_data_direction direction,
696 struct dma_attrs *attrs)
698 unsigned long flags, ctx;
699 struct scatterlist *sg;
700 struct strbuf *strbuf;
701 struct iommu *iommu;
703 BUG_ON(direction == DMA_NONE);
705 iommu = dev->archdata.iommu;
706 strbuf = dev->archdata.stc;
708 ctx = fetch_sg_ctx(iommu, sglist);
710 spin_lock_irqsave(&iommu->lock, flags);
712 sg = sglist;
713 while (nelems--) {
714 dma_addr_t dma_handle = sg->dma_address;
715 unsigned int len = sg->dma_length;
716 unsigned long npages, entry;
717 iopte_t *base;
718 int i;
720 if (!len)
721 break;
722 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
723 iommu_range_free(iommu, dma_handle, npages);
725 entry = ((dma_handle - iommu->page_table_map_base)
726 >> IO_PAGE_SHIFT);
727 base = iommu->page_table + entry;
729 dma_handle &= IO_PAGE_MASK;
730 if (strbuf->strbuf_enabled)
731 strbuf_flush(strbuf, iommu, dma_handle, ctx,
732 npages, direction);
734 for (i = 0; i < npages; i++)
735 iopte_make_dummy(iommu, base + i);
737 sg = sg_next(sg);
740 iommu_free_ctx(iommu, ctx);
742 spin_unlock_irqrestore(&iommu->lock, flags);
745 static void dma_4u_sync_single_for_cpu(struct device *dev,
746 dma_addr_t bus_addr, size_t sz,
747 enum dma_data_direction direction)
749 struct iommu *iommu;
750 struct strbuf *strbuf;
751 unsigned long flags, ctx, npages;
753 iommu = dev->archdata.iommu;
754 strbuf = dev->archdata.stc;
756 if (!strbuf->strbuf_enabled)
757 return;
759 spin_lock_irqsave(&iommu->lock, flags);
761 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
762 npages >>= IO_PAGE_SHIFT;
763 bus_addr &= IO_PAGE_MASK;
765 /* Step 1: Record the context, if any. */
766 ctx = 0;
767 if (iommu->iommu_ctxflush &&
768 strbuf->strbuf_ctxflush) {
769 iopte_t *iopte;
771 iopte = iommu->page_table +
772 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
773 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
776 /* Step 2: Kick data out of streaming buffers. */
777 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
779 spin_unlock_irqrestore(&iommu->lock, flags);
782 static void dma_4u_sync_sg_for_cpu(struct device *dev,
783 struct scatterlist *sglist, int nelems,
784 enum dma_data_direction direction)
786 struct iommu *iommu;
787 struct strbuf *strbuf;
788 unsigned long flags, ctx, npages, i;
789 struct scatterlist *sg, *sgprv;
790 u32 bus_addr;
792 iommu = dev->archdata.iommu;
793 strbuf = dev->archdata.stc;
795 if (!strbuf->strbuf_enabled)
796 return;
798 spin_lock_irqsave(&iommu->lock, flags);
800 /* Step 1: Record the context, if any. */
801 ctx = 0;
802 if (iommu->iommu_ctxflush &&
803 strbuf->strbuf_ctxflush) {
804 iopte_t *iopte;
806 iopte = iommu->page_table +
807 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
808 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
811 /* Step 2: Kick data out of streaming buffers. */
812 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
813 sgprv = NULL;
814 for_each_sg(sglist, sg, nelems, i) {
815 if (sg->dma_length == 0)
816 break;
817 sgprv = sg;
820 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
821 - bus_addr) >> IO_PAGE_SHIFT;
822 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
824 spin_unlock_irqrestore(&iommu->lock, flags);
827 static struct dma_map_ops sun4u_dma_ops = {
828 .alloc_coherent = dma_4u_alloc_coherent,
829 .free_coherent = dma_4u_free_coherent,
830 .map_page = dma_4u_map_page,
831 .unmap_page = dma_4u_unmap_page,
832 .map_sg = dma_4u_map_sg,
833 .unmap_sg = dma_4u_unmap_sg,
834 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
835 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
838 struct dma_map_ops *dma_ops = &sun4u_dma_ops;
839 EXPORT_SYMBOL(dma_ops);
841 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
843 int dma_supported(struct device *dev, u64 device_mask)
845 struct iommu *iommu = dev->archdata.iommu;
846 u64 dma_addr_mask = iommu->dma_addr_mask;
848 if (device_mask >= (1UL << 32UL))
849 return 0;
851 if ((device_mask & dma_addr_mask) == dma_addr_mask)
852 return 1;
854 #ifdef CONFIG_PCI
855 if (dev->bus == &pci_bus_type)
856 return pci64_dma_supported(to_pci_dev(dev), device_mask);
857 #endif
859 return 0;
861 EXPORT_SYMBOL(dma_supported);