Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / arch / sparc64 / kernel / iommu.c
blob38cbe1574294ef3bc11bcf132bde4668d53406ab
1 /* iommu.c: Generic sparc64 IOMMU support.
3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
5 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/iommu-helper.h>
15 #ifdef CONFIG_PCI
16 #include <linux/pci.h>
17 #endif
19 #include <asm/iommu.h>
21 #include "iommu_common.h"
23 #define STC_CTXMATCH_ADDR(STC, CTX) \
24 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
25 #define STC_FLUSHFLAG_INIT(STC) \
26 (*((STC)->strbuf_flushflag) = 0UL)
27 #define STC_FLUSHFLAG_SET(STC) \
28 (*((STC)->strbuf_flushflag) != 0UL)
30 #define iommu_read(__reg) \
31 ({ u64 __ret; \
32 __asm__ __volatile__("ldxa [%1] %2, %0" \
33 : "=r" (__ret) \
34 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
35 : "memory"); \
36 __ret; \
38 #define iommu_write(__reg, __val) \
39 __asm__ __volatile__("stxa %0, [%1] %2" \
40 : /* no outputs */ \
41 : "r" (__val), "r" (__reg), \
42 "i" (ASI_PHYS_BYPASS_EC_E))
44 /* Must be invoked under the IOMMU lock. */
45 static void iommu_flushall(struct iommu *iommu)
47 if (iommu->iommu_flushinv) {
48 iommu_write(iommu->iommu_flushinv, ~(u64)0);
49 } else {
50 unsigned long tag;
51 int entry;
53 tag = iommu->iommu_tags;
54 for (entry = 0; entry < 16; entry++) {
55 iommu_write(tag, 0);
56 tag += 8;
59 /* Ensure completion of previous PIO writes. */
60 (void) iommu_read(iommu->write_complete_reg);
64 #define IOPTE_CONSISTENT(CTX) \
65 (IOPTE_VALID | IOPTE_CACHE | \
66 (((CTX) << 47) & IOPTE_CONTEXT))
68 #define IOPTE_STREAMING(CTX) \
69 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
71 /* Existing mappings are never marked invalid, instead they
72 * are pointed to a dummy page.
74 #define IOPTE_IS_DUMMY(iommu, iopte) \
75 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
77 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
79 unsigned long val = iopte_val(*iopte);
81 val &= ~IOPTE_PAGE;
82 val |= iommu->dummy_page_pa;
84 iopte_val(*iopte) = val;
87 /* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
88 * facility it must all be done in one pass while under the iommu lock.
90 * On sun4u platforms, we only flush the IOMMU once every time we've passed
91 * over the entire page table doing allocations. Therefore we only ever advance
92 * the hint and cannot backtrack it.
94 unsigned long iommu_range_alloc(struct device *dev,
95 struct iommu *iommu,
96 unsigned long npages,
97 unsigned long *handle)
99 unsigned long n, end, start, limit, boundary_size;
100 struct iommu_arena *arena = &iommu->arena;
101 int pass = 0;
103 /* This allocator was derived from x86_64's bit string search */
105 /* Sanity check */
106 if (unlikely(npages == 0)) {
107 if (printk_ratelimit())
108 WARN_ON(1);
109 return DMA_ERROR_CODE;
112 if (handle && *handle)
113 start = *handle;
114 else
115 start = arena->hint;
117 limit = arena->limit;
119 /* The case below can happen if we have a small segment appended
120 * to a large, or when the previous alloc was at the very end of
121 * the available space. If so, go back to the beginning and flush.
123 if (start >= limit) {
124 start = 0;
125 if (iommu->flush_all)
126 iommu->flush_all(iommu);
129 again:
131 if (dev)
132 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
133 1 << IO_PAGE_SHIFT);
134 else
135 boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
137 <<<<<<< HEAD:arch/sparc64/kernel/iommu.c
138 n = iommu_area_alloc(arena->map, limit, start, npages, 0,
139 =======
140 n = iommu_area_alloc(arena->map, limit, start, npages,
141 iommu->page_table_map_base >> IO_PAGE_SHIFT,
142 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/sparc64/kernel/iommu.c
143 boundary_size >> IO_PAGE_SHIFT, 0);
144 if (n == -1) {
145 if (likely(pass < 1)) {
146 /* First failure, rescan from the beginning. */
147 start = 0;
148 if (iommu->flush_all)
149 iommu->flush_all(iommu);
150 pass++;
151 goto again;
152 } else {
153 /* Second failure, give up */
154 return DMA_ERROR_CODE;
158 end = n + npages;
160 arena->hint = end;
162 /* Update handle for SG allocations */
163 if (handle)
164 *handle = end;
166 return n;
169 void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
171 struct iommu_arena *arena = &iommu->arena;
172 unsigned long entry;
174 entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
176 iommu_area_free(arena->map, entry, npages);
179 int iommu_table_init(struct iommu *iommu, int tsbsize,
180 u32 dma_offset, u32 dma_addr_mask)
182 unsigned long i, tsbbase, order, sz, num_tsb_entries;
184 num_tsb_entries = tsbsize / sizeof(iopte_t);
186 /* Setup initial software IOMMU state. */
187 spin_lock_init(&iommu->lock);
188 iommu->ctx_lowest_free = 1;
189 iommu->page_table_map_base = dma_offset;
190 iommu->dma_addr_mask = dma_addr_mask;
192 /* Allocate and initialize the free area map. */
193 sz = num_tsb_entries / 8;
194 sz = (sz + 7UL) & ~7UL;
195 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
196 if (!iommu->arena.map) {
197 printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
198 return -ENOMEM;
200 iommu->arena.limit = num_tsb_entries;
202 if (tlb_type != hypervisor)
203 iommu->flush_all = iommu_flushall;
205 /* Allocate and initialize the dummy page which we
206 * set inactive IO PTEs to point to.
208 <<<<<<< HEAD:arch/sparc64/kernel/iommu.c
209 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
210 =======
211 iommu->dummy_page = get_zeroed_page(GFP_KERNEL);
212 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/sparc64/kernel/iommu.c
213 if (!iommu->dummy_page) {
214 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
215 goto out_free_map;
217 <<<<<<< HEAD:arch/sparc64/kernel/iommu.c
218 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
219 =======
220 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/sparc64/kernel/iommu.c
221 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
223 /* Now allocate and setup the IOMMU page table itself. */
224 order = get_order(tsbsize);
225 tsbbase = __get_free_pages(GFP_KERNEL, order);
226 if (!tsbbase) {
227 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
228 goto out_free_dummy_page;
230 iommu->page_table = (iopte_t *)tsbbase;
232 for (i = 0; i < num_tsb_entries; i++)
233 iopte_make_dummy(iommu, &iommu->page_table[i]);
235 return 0;
237 out_free_dummy_page:
238 free_page(iommu->dummy_page);
239 iommu->dummy_page = 0UL;
241 out_free_map:
242 kfree(iommu->arena.map);
243 iommu->arena.map = NULL;
245 return -ENOMEM;
248 static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
249 unsigned long npages)
251 unsigned long entry;
253 entry = iommu_range_alloc(dev, iommu, npages, NULL);
254 if (unlikely(entry == DMA_ERROR_CODE))
255 return NULL;
257 return iommu->page_table + entry;
260 static int iommu_alloc_ctx(struct iommu *iommu)
262 int lowest = iommu->ctx_lowest_free;
263 int sz = IOMMU_NUM_CTXS - lowest;
264 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
266 if (unlikely(n == sz)) {
267 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
268 if (unlikely(n == lowest)) {
269 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
270 n = 0;
273 if (n)
274 __set_bit(n, iommu->ctx_bitmap);
276 return n;
279 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
281 if (likely(ctx)) {
282 __clear_bit(ctx, iommu->ctx_bitmap);
283 if (ctx < iommu->ctx_lowest_free)
284 iommu->ctx_lowest_free = ctx;
288 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
289 dma_addr_t *dma_addrp, gfp_t gfp)
291 struct iommu *iommu;
292 iopte_t *iopte;
293 unsigned long flags, order, first_page;
294 void *ret;
295 int npages;
297 size = IO_PAGE_ALIGN(size);
298 order = get_order(size);
299 if (order >= 10)
300 return NULL;
302 first_page = __get_free_pages(gfp, order);
303 if (first_page == 0UL)
304 return NULL;
305 memset((char *)first_page, 0, PAGE_SIZE << order);
307 iommu = dev->archdata.iommu;
309 spin_lock_irqsave(&iommu->lock, flags);
310 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
311 spin_unlock_irqrestore(&iommu->lock, flags);
313 if (unlikely(iopte == NULL)) {
314 free_pages(first_page, order);
315 return NULL;
318 *dma_addrp = (iommu->page_table_map_base +
319 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
320 ret = (void *) first_page;
321 npages = size >> IO_PAGE_SHIFT;
322 first_page = __pa(first_page);
323 while (npages--) {
324 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
325 IOPTE_WRITE |
326 (first_page & IOPTE_PAGE));
327 iopte++;
328 first_page += IO_PAGE_SIZE;
331 return ret;
334 static void dma_4u_free_coherent(struct device *dev, size_t size,
335 void *cpu, dma_addr_t dvma)
337 struct iommu *iommu;
338 iopte_t *iopte;
339 unsigned long flags, order, npages;
341 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
342 iommu = dev->archdata.iommu;
343 iopte = iommu->page_table +
344 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
346 spin_lock_irqsave(&iommu->lock, flags);
348 iommu_range_free(iommu, dvma, npages);
350 spin_unlock_irqrestore(&iommu->lock, flags);
352 order = get_order(size);
353 if (order < 10)
354 free_pages((unsigned long)cpu, order);
357 static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
358 enum dma_data_direction direction)
360 struct iommu *iommu;
361 struct strbuf *strbuf;
362 iopte_t *base;
363 unsigned long flags, npages, oaddr;
364 unsigned long i, base_paddr, ctx;
365 u32 bus_addr, ret;
366 unsigned long iopte_protection;
368 iommu = dev->archdata.iommu;
369 strbuf = dev->archdata.stc;
371 if (unlikely(direction == DMA_NONE))
372 goto bad_no_ctx;
374 oaddr = (unsigned long)ptr;
375 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
376 npages >>= IO_PAGE_SHIFT;
378 spin_lock_irqsave(&iommu->lock, flags);
379 base = alloc_npages(dev, iommu, npages);
380 ctx = 0;
381 if (iommu->iommu_ctxflush)
382 ctx = iommu_alloc_ctx(iommu);
383 spin_unlock_irqrestore(&iommu->lock, flags);
385 if (unlikely(!base))
386 goto bad;
388 bus_addr = (iommu->page_table_map_base +
389 ((base - iommu->page_table) << IO_PAGE_SHIFT));
390 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
391 base_paddr = __pa(oaddr & IO_PAGE_MASK);
392 if (strbuf->strbuf_enabled)
393 iopte_protection = IOPTE_STREAMING(ctx);
394 else
395 iopte_protection = IOPTE_CONSISTENT(ctx);
396 if (direction != DMA_TO_DEVICE)
397 iopte_protection |= IOPTE_WRITE;
399 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
400 iopte_val(*base) = iopte_protection | base_paddr;
402 return ret;
404 bad:
405 iommu_free_ctx(iommu, ctx);
406 bad_no_ctx:
407 if (printk_ratelimit())
408 WARN_ON(1);
409 return DMA_ERROR_CODE;
412 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
413 u32 vaddr, unsigned long ctx, unsigned long npages,
414 enum dma_data_direction direction)
416 int limit;
418 if (strbuf->strbuf_ctxflush &&
419 iommu->iommu_ctxflush) {
420 unsigned long matchreg, flushreg;
421 u64 val;
423 flushreg = strbuf->strbuf_ctxflush;
424 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
426 iommu_write(flushreg, ctx);
427 val = iommu_read(matchreg);
428 val &= 0xffff;
429 if (!val)
430 goto do_flush_sync;
432 while (val) {
433 if (val & 0x1)
434 iommu_write(flushreg, ctx);
435 val >>= 1;
437 val = iommu_read(matchreg);
438 if (unlikely(val)) {
439 printk(KERN_WARNING "strbuf_flush: ctx flush "
440 "timeout matchreg[%lx] ctx[%lx]\n",
441 val, ctx);
442 goto do_page_flush;
444 } else {
445 unsigned long i;
447 do_page_flush:
448 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
449 iommu_write(strbuf->strbuf_pflush, vaddr);
452 do_flush_sync:
453 /* If the device could not have possibly put dirty data into
454 * the streaming cache, no flush-flag synchronization needs
455 * to be performed.
457 if (direction == DMA_TO_DEVICE)
458 return;
460 STC_FLUSHFLAG_INIT(strbuf);
461 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
462 (void) iommu_read(iommu->write_complete_reg);
464 limit = 100000;
465 while (!STC_FLUSHFLAG_SET(strbuf)) {
466 limit--;
467 if (!limit)
468 break;
469 udelay(1);
470 rmb();
472 if (!limit)
473 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
474 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
475 vaddr, ctx, npages);
478 static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
479 size_t sz, enum dma_data_direction direction)
481 struct iommu *iommu;
482 struct strbuf *strbuf;
483 iopte_t *base;
484 unsigned long flags, npages, ctx, i;
486 if (unlikely(direction == DMA_NONE)) {
487 if (printk_ratelimit())
488 WARN_ON(1);
489 return;
492 iommu = dev->archdata.iommu;
493 strbuf = dev->archdata.stc;
495 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
496 npages >>= IO_PAGE_SHIFT;
497 base = iommu->page_table +
498 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
499 bus_addr &= IO_PAGE_MASK;
501 spin_lock_irqsave(&iommu->lock, flags);
503 /* Record the context, if any. */
504 ctx = 0;
505 if (iommu->iommu_ctxflush)
506 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
508 /* Step 1: Kick data out of streaming buffers if necessary. */
509 if (strbuf->strbuf_enabled)
510 strbuf_flush(strbuf, iommu, bus_addr, ctx,
511 npages, direction);
513 /* Step 2: Clear out TSB entries. */
514 for (i = 0; i < npages; i++)
515 iopte_make_dummy(iommu, base + i);
517 iommu_range_free(iommu, bus_addr, npages);
519 iommu_free_ctx(iommu, ctx);
521 spin_unlock_irqrestore(&iommu->lock, flags);
524 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
525 int nelems, enum dma_data_direction direction)
527 struct scatterlist *s, *outs, *segstart;
528 unsigned long flags, handle, prot, ctx;
529 dma_addr_t dma_next = 0, dma_addr;
530 unsigned int max_seg_size;
531 int outcount, incount, i;
532 struct strbuf *strbuf;
533 struct iommu *iommu;
535 BUG_ON(direction == DMA_NONE);
537 iommu = dev->archdata.iommu;
538 strbuf = dev->archdata.stc;
539 if (nelems == 0 || !iommu)
540 return 0;
542 spin_lock_irqsave(&iommu->lock, flags);
544 ctx = 0;
545 if (iommu->iommu_ctxflush)
546 ctx = iommu_alloc_ctx(iommu);
548 if (strbuf->strbuf_enabled)
549 prot = IOPTE_STREAMING(ctx);
550 else
551 prot = IOPTE_CONSISTENT(ctx);
552 if (direction != DMA_TO_DEVICE)
553 prot |= IOPTE_WRITE;
555 outs = s = segstart = &sglist[0];
556 outcount = 1;
557 incount = nelems;
558 handle = 0;
560 /* Init first segment length for backout at failure */
561 outs->dma_length = 0;
563 max_seg_size = dma_get_max_seg_size(dev);
564 for_each_sg(sglist, s, nelems, i) {
565 unsigned long paddr, npages, entry, slen;
566 iopte_t *base;
568 slen = s->length;
569 /* Sanity check */
570 if (slen == 0) {
571 dma_next = 0;
572 continue;
574 /* Allocate iommu entries for that segment */
575 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
576 npages = iommu_num_pages(paddr, slen);
577 entry = iommu_range_alloc(dev, iommu, npages, &handle);
579 /* Handle failure */
580 if (unlikely(entry == DMA_ERROR_CODE)) {
581 if (printk_ratelimit())
582 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
583 " npages %lx\n", iommu, paddr, npages);
584 goto iommu_map_failed;
587 base = iommu->page_table + entry;
589 /* Convert entry to a dma_addr_t */
590 dma_addr = iommu->page_table_map_base +
591 (entry << IO_PAGE_SHIFT);
592 dma_addr |= (s->offset & ~IO_PAGE_MASK);
594 /* Insert into HW table */
595 paddr &= IO_PAGE_MASK;
596 while (npages--) {
597 iopte_val(*base) = prot | paddr;
598 base++;
599 paddr += IO_PAGE_SIZE;
602 /* If we are in an open segment, try merging */
603 if (segstart != s) {
604 /* We cannot merge if:
605 * - allocated dma_addr isn't contiguous to previous allocation
607 if ((dma_addr != dma_next) ||
608 (outs->dma_length + s->length > max_seg_size)) {
609 /* Can't merge: create a new segment */
610 segstart = s;
611 outcount++;
612 outs = sg_next(outs);
613 } else {
614 outs->dma_length += s->length;
618 if (segstart == s) {
619 /* This is a new segment, fill entries */
620 outs->dma_address = dma_addr;
621 outs->dma_length = slen;
624 /* Calculate next page pointer for contiguous check */
625 dma_next = dma_addr + slen;
628 spin_unlock_irqrestore(&iommu->lock, flags);
630 if (outcount < incount) {
631 outs = sg_next(outs);
632 outs->dma_address = DMA_ERROR_CODE;
633 outs->dma_length = 0;
636 return outcount;
638 iommu_map_failed:
639 for_each_sg(sglist, s, nelems, i) {
640 if (s->dma_length != 0) {
641 unsigned long vaddr, npages, entry, i;
642 iopte_t *base;
644 vaddr = s->dma_address & IO_PAGE_MASK;
645 npages = iommu_num_pages(s->dma_address, s->dma_length);
646 iommu_range_free(iommu, vaddr, npages);
648 entry = (vaddr - iommu->page_table_map_base)
649 >> IO_PAGE_SHIFT;
650 base = iommu->page_table + entry;
652 for (i = 0; i < npages; i++)
653 iopte_make_dummy(iommu, base + i);
655 s->dma_address = DMA_ERROR_CODE;
656 s->dma_length = 0;
658 if (s == outs)
659 break;
661 spin_unlock_irqrestore(&iommu->lock, flags);
663 return 0;
666 /* If contexts are being used, they are the same in all of the mappings
667 * we make for a particular SG.
669 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
671 unsigned long ctx = 0;
673 if (iommu->iommu_ctxflush) {
674 iopte_t *base;
675 u32 bus_addr;
677 bus_addr = sg->dma_address & IO_PAGE_MASK;
678 base = iommu->page_table +
679 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
681 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
683 return ctx;
686 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
687 int nelems, enum dma_data_direction direction)
689 unsigned long flags, ctx;
690 struct scatterlist *sg;
691 struct strbuf *strbuf;
692 struct iommu *iommu;
694 BUG_ON(direction == DMA_NONE);
696 iommu = dev->archdata.iommu;
697 strbuf = dev->archdata.stc;
699 ctx = fetch_sg_ctx(iommu, sglist);
701 spin_lock_irqsave(&iommu->lock, flags);
703 sg = sglist;
704 while (nelems--) {
705 dma_addr_t dma_handle = sg->dma_address;
706 unsigned int len = sg->dma_length;
707 unsigned long npages, entry;
708 iopte_t *base;
709 int i;
711 if (!len)
712 break;
713 npages = iommu_num_pages(dma_handle, len);
714 iommu_range_free(iommu, dma_handle, npages);
716 entry = ((dma_handle - iommu->page_table_map_base)
717 >> IO_PAGE_SHIFT);
718 base = iommu->page_table + entry;
720 dma_handle &= IO_PAGE_MASK;
721 if (strbuf->strbuf_enabled)
722 strbuf_flush(strbuf, iommu, dma_handle, ctx,
723 npages, direction);
725 for (i = 0; i < npages; i++)
726 iopte_make_dummy(iommu, base + i);
728 sg = sg_next(sg);
731 iommu_free_ctx(iommu, ctx);
733 spin_unlock_irqrestore(&iommu->lock, flags);
736 static void dma_4u_sync_single_for_cpu(struct device *dev,
737 dma_addr_t bus_addr, size_t sz,
738 enum dma_data_direction direction)
740 struct iommu *iommu;
741 struct strbuf *strbuf;
742 unsigned long flags, ctx, npages;
744 iommu = dev->archdata.iommu;
745 strbuf = dev->archdata.stc;
747 if (!strbuf->strbuf_enabled)
748 return;
750 spin_lock_irqsave(&iommu->lock, flags);
752 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
753 npages >>= IO_PAGE_SHIFT;
754 bus_addr &= IO_PAGE_MASK;
756 /* Step 1: Record the context, if any. */
757 ctx = 0;
758 if (iommu->iommu_ctxflush &&
759 strbuf->strbuf_ctxflush) {
760 iopte_t *iopte;
762 iopte = iommu->page_table +
763 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
764 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
767 /* Step 2: Kick data out of streaming buffers. */
768 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
770 spin_unlock_irqrestore(&iommu->lock, flags);
773 static void dma_4u_sync_sg_for_cpu(struct device *dev,
774 struct scatterlist *sglist, int nelems,
775 enum dma_data_direction direction)
777 struct iommu *iommu;
778 struct strbuf *strbuf;
779 unsigned long flags, ctx, npages, i;
780 struct scatterlist *sg, *sgprv;
781 u32 bus_addr;
783 iommu = dev->archdata.iommu;
784 strbuf = dev->archdata.stc;
786 if (!strbuf->strbuf_enabled)
787 return;
789 spin_lock_irqsave(&iommu->lock, flags);
791 /* Step 1: Record the context, if any. */
792 ctx = 0;
793 if (iommu->iommu_ctxflush &&
794 strbuf->strbuf_ctxflush) {
795 iopte_t *iopte;
797 iopte = iommu->page_table +
798 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
799 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
802 /* Step 2: Kick data out of streaming buffers. */
803 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
804 sgprv = NULL;
805 for_each_sg(sglist, sg, nelems, i) {
806 if (sg->dma_length == 0)
807 break;
808 sgprv = sg;
811 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
812 - bus_addr) >> IO_PAGE_SHIFT;
813 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
815 spin_unlock_irqrestore(&iommu->lock, flags);
818 const struct dma_ops sun4u_dma_ops = {
819 .alloc_coherent = dma_4u_alloc_coherent,
820 .free_coherent = dma_4u_free_coherent,
821 .map_single = dma_4u_map_single,
822 .unmap_single = dma_4u_unmap_single,
823 .map_sg = dma_4u_map_sg,
824 .unmap_sg = dma_4u_unmap_sg,
825 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
826 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
829 const struct dma_ops *dma_ops = &sun4u_dma_ops;
830 EXPORT_SYMBOL(dma_ops);
832 int dma_supported(struct device *dev, u64 device_mask)
834 struct iommu *iommu = dev->archdata.iommu;
835 u64 dma_addr_mask = iommu->dma_addr_mask;
837 if (device_mask >= (1UL << 32UL))
838 return 0;
840 if ((device_mask & dma_addr_mask) == dma_addr_mask)
841 return 1;
843 #ifdef CONFIG_PCI
844 if (dev->bus == &pci_bus_type)
845 return pci_dma_supported(to_pci_dev(dev), device_mask);
846 #endif
848 return 0;
850 EXPORT_SYMBOL(dma_supported);
852 int dma_set_mask(struct device *dev, u64 dma_mask)
854 #ifdef CONFIG_PCI
855 if (dev->bus == &pci_bus_type)
856 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
857 #endif
858 return -EINVAL;
860 EXPORT_SYMBOL(dma_set_mask);