1 /* iommu.c: Generic sparc64 IOMMU support.
3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/iommu-helper.h>
16 #include <linux/pci.h>
19 #include <asm/iommu.h>
21 #include "iommu_common.h"
23 #define STC_CTXMATCH_ADDR(STC, CTX) \
24 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
25 #define STC_FLUSHFLAG_INIT(STC) \
26 (*((STC)->strbuf_flushflag) = 0UL)
27 #define STC_FLUSHFLAG_SET(STC) \
28 (*((STC)->strbuf_flushflag) != 0UL)
30 #define iommu_read(__reg) \
32 __asm__ __volatile__("ldxa [%1] %2, %0" \
34 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
38 #define iommu_write(__reg, __val) \
39 __asm__ __volatile__("stxa %0, [%1] %2" \
41 : "r" (__val), "r" (__reg), \
42 "i" (ASI_PHYS_BYPASS_EC_E))
44 /* Must be invoked under the IOMMU lock. */
45 static void iommu_flushall(struct iommu
*iommu
)
47 if (iommu
->iommu_flushinv
) {
48 iommu_write(iommu
->iommu_flushinv
, ~(u64
)0);
53 tag
= iommu
->iommu_tags
;
54 for (entry
= 0; entry
< 16; entry
++) {
59 /* Ensure completion of previous PIO writes. */
60 (void) iommu_read(iommu
->write_complete_reg
);
64 #define IOPTE_CONSISTENT(CTX) \
65 (IOPTE_VALID | IOPTE_CACHE | \
66 (((CTX) << 47) & IOPTE_CONTEXT))
68 #define IOPTE_STREAMING(CTX) \
69 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
71 /* Existing mappings are never marked invalid, instead they
72 * are pointed to a dummy page.
74 #define IOPTE_IS_DUMMY(iommu, iopte) \
75 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
77 static inline void iopte_make_dummy(struct iommu
*iommu
, iopte_t
*iopte
)
79 unsigned long val
= iopte_val(*iopte
);
82 val
|= iommu
->dummy_page_pa
;
84 iopte_val(*iopte
) = val
;
87 /* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
88 * facility it must all be done in one pass while under the iommu lock.
90 * On sun4u platforms, we only flush the IOMMU once every time we've passed
91 * over the entire page table doing allocations. Therefore we only ever advance
92 * the hint and cannot backtrack it.
94 unsigned long iommu_range_alloc(struct device
*dev
,
97 unsigned long *handle
)
99 unsigned long n
, end
, start
, limit
, boundary_size
;
100 struct iommu_arena
*arena
= &iommu
->arena
;
103 /* This allocator was derived from x86_64's bit string search */
106 if (unlikely(npages
== 0)) {
107 if (printk_ratelimit())
109 return DMA_ERROR_CODE
;
112 if (handle
&& *handle
)
117 limit
= arena
->limit
;
119 /* The case below can happen if we have a small segment appended
120 * to a large, or when the previous alloc was at the very end of
121 * the available space. If so, go back to the beginning and flush.
123 if (start
>= limit
) {
125 if (iommu
->flush_all
)
126 iommu
->flush_all(iommu
);
132 boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
135 boundary_size
= ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT
);
137 <<<<<<< HEAD
:arch
/sparc64
/kernel
/iommu
.c
138 n
= iommu_area_alloc(arena
->map
, limit
, start
, npages
, 0,
140 n
= iommu_area_alloc(arena
->map
, limit
, start
, npages
,
141 iommu
->page_table_map_base
>> IO_PAGE_SHIFT
,
142 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/sparc64
/kernel
/iommu
.c
143 boundary_size
>> IO_PAGE_SHIFT
, 0);
145 if (likely(pass
< 1)) {
146 /* First failure, rescan from the beginning. */
148 if (iommu
->flush_all
)
149 iommu
->flush_all(iommu
);
153 /* Second failure, give up */
154 return DMA_ERROR_CODE
;
162 /* Update handle for SG allocations */
169 void iommu_range_free(struct iommu
*iommu
, dma_addr_t dma_addr
, unsigned long npages
)
171 struct iommu_arena
*arena
= &iommu
->arena
;
174 entry
= (dma_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
;
176 iommu_area_free(arena
->map
, entry
, npages
);
179 int iommu_table_init(struct iommu
*iommu
, int tsbsize
,
180 u32 dma_offset
, u32 dma_addr_mask
)
182 unsigned long i
, tsbbase
, order
, sz
, num_tsb_entries
;
184 num_tsb_entries
= tsbsize
/ sizeof(iopte_t
);
186 /* Setup initial software IOMMU state. */
187 spin_lock_init(&iommu
->lock
);
188 iommu
->ctx_lowest_free
= 1;
189 iommu
->page_table_map_base
= dma_offset
;
190 iommu
->dma_addr_mask
= dma_addr_mask
;
192 /* Allocate and initialize the free area map. */
193 sz
= num_tsb_entries
/ 8;
194 sz
= (sz
+ 7UL) & ~7UL;
195 iommu
->arena
.map
= kzalloc(sz
, GFP_KERNEL
);
196 if (!iommu
->arena
.map
) {
197 printk(KERN_ERR
"IOMMU: Error, kmalloc(arena.map) failed.\n");
200 iommu
->arena
.limit
= num_tsb_entries
;
202 if (tlb_type
!= hypervisor
)
203 iommu
->flush_all
= iommu_flushall
;
205 /* Allocate and initialize the dummy page which we
206 * set inactive IO PTEs to point to.
208 <<<<<<< HEAD
:arch
/sparc64
/kernel
/iommu
.c
209 iommu
->dummy_page
= __get_free_pages(GFP_KERNEL
, 0);
211 iommu
->dummy_page
= get_zeroed_page(GFP_KERNEL
);
212 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/sparc64
/kernel
/iommu
.c
213 if (!iommu
->dummy_page
) {
214 printk(KERN_ERR
"IOMMU: Error, gfp(dummy_page) failed.\n");
217 <<<<<<< HEAD
:arch
/sparc64
/kernel
/iommu
.c
218 memset((void *)iommu
->dummy_page
, 0, PAGE_SIZE
);
220 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/sparc64
/kernel
/iommu
.c
221 iommu
->dummy_page_pa
= (unsigned long) __pa(iommu
->dummy_page
);
223 /* Now allocate and setup the IOMMU page table itself. */
224 order
= get_order(tsbsize
);
225 tsbbase
= __get_free_pages(GFP_KERNEL
, order
);
227 printk(KERN_ERR
"IOMMU: Error, gfp(tsb) failed.\n");
228 goto out_free_dummy_page
;
230 iommu
->page_table
= (iopte_t
*)tsbbase
;
232 for (i
= 0; i
< num_tsb_entries
; i
++)
233 iopte_make_dummy(iommu
, &iommu
->page_table
[i
]);
238 free_page(iommu
->dummy_page
);
239 iommu
->dummy_page
= 0UL;
242 kfree(iommu
->arena
.map
);
243 iommu
->arena
.map
= NULL
;
248 static inline iopte_t
*alloc_npages(struct device
*dev
, struct iommu
*iommu
,
249 unsigned long npages
)
253 entry
= iommu_range_alloc(dev
, iommu
, npages
, NULL
);
254 if (unlikely(entry
== DMA_ERROR_CODE
))
257 return iommu
->page_table
+ entry
;
260 static int iommu_alloc_ctx(struct iommu
*iommu
)
262 int lowest
= iommu
->ctx_lowest_free
;
263 int sz
= IOMMU_NUM_CTXS
- lowest
;
264 int n
= find_next_zero_bit(iommu
->ctx_bitmap
, sz
, lowest
);
266 if (unlikely(n
== sz
)) {
267 n
= find_next_zero_bit(iommu
->ctx_bitmap
, lowest
, 1);
268 if (unlikely(n
== lowest
)) {
269 printk(KERN_WARNING
"IOMMU: Ran out of contexts.\n");
274 __set_bit(n
, iommu
->ctx_bitmap
);
279 static inline void iommu_free_ctx(struct iommu
*iommu
, int ctx
)
282 __clear_bit(ctx
, iommu
->ctx_bitmap
);
283 if (ctx
< iommu
->ctx_lowest_free
)
284 iommu
->ctx_lowest_free
= ctx
;
288 static void *dma_4u_alloc_coherent(struct device
*dev
, size_t size
,
289 dma_addr_t
*dma_addrp
, gfp_t gfp
)
293 unsigned long flags
, order
, first_page
;
297 size
= IO_PAGE_ALIGN(size
);
298 order
= get_order(size
);
302 first_page
= __get_free_pages(gfp
, order
);
303 if (first_page
== 0UL)
305 memset((char *)first_page
, 0, PAGE_SIZE
<< order
);
307 iommu
= dev
->archdata
.iommu
;
309 spin_lock_irqsave(&iommu
->lock
, flags
);
310 iopte
= alloc_npages(dev
, iommu
, size
>> IO_PAGE_SHIFT
);
311 spin_unlock_irqrestore(&iommu
->lock
, flags
);
313 if (unlikely(iopte
== NULL
)) {
314 free_pages(first_page
, order
);
318 *dma_addrp
= (iommu
->page_table_map_base
+
319 ((iopte
- iommu
->page_table
) << IO_PAGE_SHIFT
));
320 ret
= (void *) first_page
;
321 npages
= size
>> IO_PAGE_SHIFT
;
322 first_page
= __pa(first_page
);
324 iopte_val(*iopte
) = (IOPTE_CONSISTENT(0UL) |
326 (first_page
& IOPTE_PAGE
));
328 first_page
+= IO_PAGE_SIZE
;
334 static void dma_4u_free_coherent(struct device
*dev
, size_t size
,
335 void *cpu
, dma_addr_t dvma
)
339 unsigned long flags
, order
, npages
;
341 npages
= IO_PAGE_ALIGN(size
) >> IO_PAGE_SHIFT
;
342 iommu
= dev
->archdata
.iommu
;
343 iopte
= iommu
->page_table
+
344 ((dvma
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
346 spin_lock_irqsave(&iommu
->lock
, flags
);
348 iommu_range_free(iommu
, dvma
, npages
);
350 spin_unlock_irqrestore(&iommu
->lock
, flags
);
352 order
= get_order(size
);
354 free_pages((unsigned long)cpu
, order
);
357 static dma_addr_t
dma_4u_map_single(struct device
*dev
, void *ptr
, size_t sz
,
358 enum dma_data_direction direction
)
361 struct strbuf
*strbuf
;
363 unsigned long flags
, npages
, oaddr
;
364 unsigned long i
, base_paddr
, ctx
;
366 unsigned long iopte_protection
;
368 iommu
= dev
->archdata
.iommu
;
369 strbuf
= dev
->archdata
.stc
;
371 if (unlikely(direction
== DMA_NONE
))
374 oaddr
= (unsigned long)ptr
;
375 npages
= IO_PAGE_ALIGN(oaddr
+ sz
) - (oaddr
& IO_PAGE_MASK
);
376 npages
>>= IO_PAGE_SHIFT
;
378 spin_lock_irqsave(&iommu
->lock
, flags
);
379 base
= alloc_npages(dev
, iommu
, npages
);
381 if (iommu
->iommu_ctxflush
)
382 ctx
= iommu_alloc_ctx(iommu
);
383 spin_unlock_irqrestore(&iommu
->lock
, flags
);
388 bus_addr
= (iommu
->page_table_map_base
+
389 ((base
- iommu
->page_table
) << IO_PAGE_SHIFT
));
390 ret
= bus_addr
| (oaddr
& ~IO_PAGE_MASK
);
391 base_paddr
= __pa(oaddr
& IO_PAGE_MASK
);
392 if (strbuf
->strbuf_enabled
)
393 iopte_protection
= IOPTE_STREAMING(ctx
);
395 iopte_protection
= IOPTE_CONSISTENT(ctx
);
396 if (direction
!= DMA_TO_DEVICE
)
397 iopte_protection
|= IOPTE_WRITE
;
399 for (i
= 0; i
< npages
; i
++, base
++, base_paddr
+= IO_PAGE_SIZE
)
400 iopte_val(*base
) = iopte_protection
| base_paddr
;
405 iommu_free_ctx(iommu
, ctx
);
407 if (printk_ratelimit())
409 return DMA_ERROR_CODE
;
412 static void strbuf_flush(struct strbuf
*strbuf
, struct iommu
*iommu
,
413 u32 vaddr
, unsigned long ctx
, unsigned long npages
,
414 enum dma_data_direction direction
)
418 if (strbuf
->strbuf_ctxflush
&&
419 iommu
->iommu_ctxflush
) {
420 unsigned long matchreg
, flushreg
;
423 flushreg
= strbuf
->strbuf_ctxflush
;
424 matchreg
= STC_CTXMATCH_ADDR(strbuf
, ctx
);
426 iommu_write(flushreg
, ctx
);
427 val
= iommu_read(matchreg
);
434 iommu_write(flushreg
, ctx
);
437 val
= iommu_read(matchreg
);
439 printk(KERN_WARNING
"strbuf_flush: ctx flush "
440 "timeout matchreg[%lx] ctx[%lx]\n",
448 for (i
= 0; i
< npages
; i
++, vaddr
+= IO_PAGE_SIZE
)
449 iommu_write(strbuf
->strbuf_pflush
, vaddr
);
453 /* If the device could not have possibly put dirty data into
454 * the streaming cache, no flush-flag synchronization needs
457 if (direction
== DMA_TO_DEVICE
)
460 STC_FLUSHFLAG_INIT(strbuf
);
461 iommu_write(strbuf
->strbuf_fsync
, strbuf
->strbuf_flushflag_pa
);
462 (void) iommu_read(iommu
->write_complete_reg
);
465 while (!STC_FLUSHFLAG_SET(strbuf
)) {
473 printk(KERN_WARNING
"strbuf_flush: flushflag timeout "
474 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
478 static void dma_4u_unmap_single(struct device
*dev
, dma_addr_t bus_addr
,
479 size_t sz
, enum dma_data_direction direction
)
482 struct strbuf
*strbuf
;
484 unsigned long flags
, npages
, ctx
, i
;
486 if (unlikely(direction
== DMA_NONE
)) {
487 if (printk_ratelimit())
492 iommu
= dev
->archdata
.iommu
;
493 strbuf
= dev
->archdata
.stc
;
495 npages
= IO_PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& IO_PAGE_MASK
);
496 npages
>>= IO_PAGE_SHIFT
;
497 base
= iommu
->page_table
+
498 ((bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
499 bus_addr
&= IO_PAGE_MASK
;
501 spin_lock_irqsave(&iommu
->lock
, flags
);
503 /* Record the context, if any. */
505 if (iommu
->iommu_ctxflush
)
506 ctx
= (iopte_val(*base
) & IOPTE_CONTEXT
) >> 47UL;
508 /* Step 1: Kick data out of streaming buffers if necessary. */
509 if (strbuf
->strbuf_enabled
)
510 strbuf_flush(strbuf
, iommu
, bus_addr
, ctx
,
513 /* Step 2: Clear out TSB entries. */
514 for (i
= 0; i
< npages
; i
++)
515 iopte_make_dummy(iommu
, base
+ i
);
517 iommu_range_free(iommu
, bus_addr
, npages
);
519 iommu_free_ctx(iommu
, ctx
);
521 spin_unlock_irqrestore(&iommu
->lock
, flags
);
524 static int dma_4u_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
525 int nelems
, enum dma_data_direction direction
)
527 struct scatterlist
*s
, *outs
, *segstart
;
528 unsigned long flags
, handle
, prot
, ctx
;
529 dma_addr_t dma_next
= 0, dma_addr
;
530 unsigned int max_seg_size
;
531 int outcount
, incount
, i
;
532 struct strbuf
*strbuf
;
535 BUG_ON(direction
== DMA_NONE
);
537 iommu
= dev
->archdata
.iommu
;
538 strbuf
= dev
->archdata
.stc
;
539 if (nelems
== 0 || !iommu
)
542 spin_lock_irqsave(&iommu
->lock
, flags
);
545 if (iommu
->iommu_ctxflush
)
546 ctx
= iommu_alloc_ctx(iommu
);
548 if (strbuf
->strbuf_enabled
)
549 prot
= IOPTE_STREAMING(ctx
);
551 prot
= IOPTE_CONSISTENT(ctx
);
552 if (direction
!= DMA_TO_DEVICE
)
555 outs
= s
= segstart
= &sglist
[0];
560 /* Init first segment length for backout at failure */
561 outs
->dma_length
= 0;
563 max_seg_size
= dma_get_max_seg_size(dev
);
564 for_each_sg(sglist
, s
, nelems
, i
) {
565 unsigned long paddr
, npages
, entry
, slen
;
574 /* Allocate iommu entries for that segment */
575 paddr
= (unsigned long) SG_ENT_PHYS_ADDRESS(s
);
576 npages
= iommu_num_pages(paddr
, slen
);
577 entry
= iommu_range_alloc(dev
, iommu
, npages
, &handle
);
580 if (unlikely(entry
== DMA_ERROR_CODE
)) {
581 if (printk_ratelimit())
582 printk(KERN_INFO
"iommu_alloc failed, iommu %p paddr %lx"
583 " npages %lx\n", iommu
, paddr
, npages
);
584 goto iommu_map_failed
;
587 base
= iommu
->page_table
+ entry
;
589 /* Convert entry to a dma_addr_t */
590 dma_addr
= iommu
->page_table_map_base
+
591 (entry
<< IO_PAGE_SHIFT
);
592 dma_addr
|= (s
->offset
& ~IO_PAGE_MASK
);
594 /* Insert into HW table */
595 paddr
&= IO_PAGE_MASK
;
597 iopte_val(*base
) = prot
| paddr
;
599 paddr
+= IO_PAGE_SIZE
;
602 /* If we are in an open segment, try merging */
604 /* We cannot merge if:
605 * - allocated dma_addr isn't contiguous to previous allocation
607 if ((dma_addr
!= dma_next
) ||
608 (outs
->dma_length
+ s
->length
> max_seg_size
)) {
609 /* Can't merge: create a new segment */
612 outs
= sg_next(outs
);
614 outs
->dma_length
+= s
->length
;
619 /* This is a new segment, fill entries */
620 outs
->dma_address
= dma_addr
;
621 outs
->dma_length
= slen
;
624 /* Calculate next page pointer for contiguous check */
625 dma_next
= dma_addr
+ slen
;
628 spin_unlock_irqrestore(&iommu
->lock
, flags
);
630 if (outcount
< incount
) {
631 outs
= sg_next(outs
);
632 outs
->dma_address
= DMA_ERROR_CODE
;
633 outs
->dma_length
= 0;
639 for_each_sg(sglist
, s
, nelems
, i
) {
640 if (s
->dma_length
!= 0) {
641 unsigned long vaddr
, npages
, entry
, i
;
644 vaddr
= s
->dma_address
& IO_PAGE_MASK
;
645 npages
= iommu_num_pages(s
->dma_address
, s
->dma_length
);
646 iommu_range_free(iommu
, vaddr
, npages
);
648 entry
= (vaddr
- iommu
->page_table_map_base
)
650 base
= iommu
->page_table
+ entry
;
652 for (i
= 0; i
< npages
; i
++)
653 iopte_make_dummy(iommu
, base
+ i
);
655 s
->dma_address
= DMA_ERROR_CODE
;
661 spin_unlock_irqrestore(&iommu
->lock
, flags
);
666 /* If contexts are being used, they are the same in all of the mappings
667 * we make for a particular SG.
669 static unsigned long fetch_sg_ctx(struct iommu
*iommu
, struct scatterlist
*sg
)
671 unsigned long ctx
= 0;
673 if (iommu
->iommu_ctxflush
) {
677 bus_addr
= sg
->dma_address
& IO_PAGE_MASK
;
678 base
= iommu
->page_table
+
679 ((bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
681 ctx
= (iopte_val(*base
) & IOPTE_CONTEXT
) >> 47UL;
686 static void dma_4u_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
687 int nelems
, enum dma_data_direction direction
)
689 unsigned long flags
, ctx
;
690 struct scatterlist
*sg
;
691 struct strbuf
*strbuf
;
694 BUG_ON(direction
== DMA_NONE
);
696 iommu
= dev
->archdata
.iommu
;
697 strbuf
= dev
->archdata
.stc
;
699 ctx
= fetch_sg_ctx(iommu
, sglist
);
701 spin_lock_irqsave(&iommu
->lock
, flags
);
705 dma_addr_t dma_handle
= sg
->dma_address
;
706 unsigned int len
= sg
->dma_length
;
707 unsigned long npages
, entry
;
713 npages
= iommu_num_pages(dma_handle
, len
);
714 iommu_range_free(iommu
, dma_handle
, npages
);
716 entry
= ((dma_handle
- iommu
->page_table_map_base
)
718 base
= iommu
->page_table
+ entry
;
720 dma_handle
&= IO_PAGE_MASK
;
721 if (strbuf
->strbuf_enabled
)
722 strbuf_flush(strbuf
, iommu
, dma_handle
, ctx
,
725 for (i
= 0; i
< npages
; i
++)
726 iopte_make_dummy(iommu
, base
+ i
);
731 iommu_free_ctx(iommu
, ctx
);
733 spin_unlock_irqrestore(&iommu
->lock
, flags
);
736 static void dma_4u_sync_single_for_cpu(struct device
*dev
,
737 dma_addr_t bus_addr
, size_t sz
,
738 enum dma_data_direction direction
)
741 struct strbuf
*strbuf
;
742 unsigned long flags
, ctx
, npages
;
744 iommu
= dev
->archdata
.iommu
;
745 strbuf
= dev
->archdata
.stc
;
747 if (!strbuf
->strbuf_enabled
)
750 spin_lock_irqsave(&iommu
->lock
, flags
);
752 npages
= IO_PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& IO_PAGE_MASK
);
753 npages
>>= IO_PAGE_SHIFT
;
754 bus_addr
&= IO_PAGE_MASK
;
756 /* Step 1: Record the context, if any. */
758 if (iommu
->iommu_ctxflush
&&
759 strbuf
->strbuf_ctxflush
) {
762 iopte
= iommu
->page_table
+
763 ((bus_addr
- iommu
->page_table_map_base
)>>IO_PAGE_SHIFT
);
764 ctx
= (iopte_val(*iopte
) & IOPTE_CONTEXT
) >> 47UL;
767 /* Step 2: Kick data out of streaming buffers. */
768 strbuf_flush(strbuf
, iommu
, bus_addr
, ctx
, npages
, direction
);
770 spin_unlock_irqrestore(&iommu
->lock
, flags
);
773 static void dma_4u_sync_sg_for_cpu(struct device
*dev
,
774 struct scatterlist
*sglist
, int nelems
,
775 enum dma_data_direction direction
)
778 struct strbuf
*strbuf
;
779 unsigned long flags
, ctx
, npages
, i
;
780 struct scatterlist
*sg
, *sgprv
;
783 iommu
= dev
->archdata
.iommu
;
784 strbuf
= dev
->archdata
.stc
;
786 if (!strbuf
->strbuf_enabled
)
789 spin_lock_irqsave(&iommu
->lock
, flags
);
791 /* Step 1: Record the context, if any. */
793 if (iommu
->iommu_ctxflush
&&
794 strbuf
->strbuf_ctxflush
) {
797 iopte
= iommu
->page_table
+
798 ((sglist
[0].dma_address
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
799 ctx
= (iopte_val(*iopte
) & IOPTE_CONTEXT
) >> 47UL;
802 /* Step 2: Kick data out of streaming buffers. */
803 bus_addr
= sglist
[0].dma_address
& IO_PAGE_MASK
;
805 for_each_sg(sglist
, sg
, nelems
, i
) {
806 if (sg
->dma_length
== 0)
811 npages
= (IO_PAGE_ALIGN(sgprv
->dma_address
+ sgprv
->dma_length
)
812 - bus_addr
) >> IO_PAGE_SHIFT
;
813 strbuf_flush(strbuf
, iommu
, bus_addr
, ctx
, npages
, direction
);
815 spin_unlock_irqrestore(&iommu
->lock
, flags
);
818 const struct dma_ops sun4u_dma_ops
= {
819 .alloc_coherent
= dma_4u_alloc_coherent
,
820 .free_coherent
= dma_4u_free_coherent
,
821 .map_single
= dma_4u_map_single
,
822 .unmap_single
= dma_4u_unmap_single
,
823 .map_sg
= dma_4u_map_sg
,
824 .unmap_sg
= dma_4u_unmap_sg
,
825 .sync_single_for_cpu
= dma_4u_sync_single_for_cpu
,
826 .sync_sg_for_cpu
= dma_4u_sync_sg_for_cpu
,
829 const struct dma_ops
*dma_ops
= &sun4u_dma_ops
;
830 EXPORT_SYMBOL(dma_ops
);
832 int dma_supported(struct device
*dev
, u64 device_mask
)
834 struct iommu
*iommu
= dev
->archdata
.iommu
;
835 u64 dma_addr_mask
= iommu
->dma_addr_mask
;
837 if (device_mask
>= (1UL << 32UL))
840 if ((device_mask
& dma_addr_mask
) == dma_addr_mask
)
844 if (dev
->bus
== &pci_bus_type
)
845 return pci_dma_supported(to_pci_dev(dev
), device_mask
);
850 EXPORT_SYMBOL(dma_supported
);
852 int dma_set_mask(struct device
*dev
, u64 dma_mask
)
855 if (dev
->bus
== &pci_bus_type
)
856 return pci_set_dma_mask(to_pci_dev(dev
), dma_mask
);
860 EXPORT_SYMBOL(dma_set_mask
);