1 /* iommu.c: Generic sparc64 IOMMU support.
3 * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
15 #include <linux/pci.h>
18 #include <asm/iommu.h>
20 #include "iommu_common.h"
22 #define STC_CTXMATCH_ADDR(STC, CTX) \
23 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
24 #define STC_FLUSHFLAG_INIT(STC) \
25 (*((STC)->strbuf_flushflag) = 0UL)
26 #define STC_FLUSHFLAG_SET(STC) \
27 (*((STC)->strbuf_flushflag) != 0UL)
29 #define iommu_read(__reg) \
31 __asm__ __volatile__("ldxa [%1] %2, %0" \
33 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
37 #define iommu_write(__reg, __val) \
38 __asm__ __volatile__("stxa %0, [%1] %2" \
40 : "r" (__val), "r" (__reg), \
41 "i" (ASI_PHYS_BYPASS_EC_E))
43 /* Must be invoked under the IOMMU lock. */
44 static void __iommu_flushall(struct iommu
*iommu
)
46 if (iommu
->iommu_flushinv
) {
47 iommu_write(iommu
->iommu_flushinv
, ~(u64
)0);
52 tag
= iommu
->iommu_tags
;
53 for (entry
= 0; entry
< 16; entry
++) {
58 /* Ensure completion of previous PIO writes. */
59 (void) iommu_read(iommu
->write_complete_reg
);
63 #define IOPTE_CONSISTENT(CTX) \
64 (IOPTE_VALID | IOPTE_CACHE | \
65 (((CTX) << 47) & IOPTE_CONTEXT))
67 #define IOPTE_STREAMING(CTX) \
68 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
70 /* Existing mappings are never marked invalid, instead they
71 * are pointed to a dummy page.
73 #define IOPTE_IS_DUMMY(iommu, iopte) \
74 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
76 static inline void iopte_make_dummy(struct iommu
*iommu
, iopte_t
*iopte
)
78 unsigned long val
= iopte_val(*iopte
);
81 val
|= iommu
->dummy_page_pa
;
83 iopte_val(*iopte
) = val
;
86 /* Based largely upon the ppc64 iommu allocator. */
87 static long arena_alloc(struct iommu
*iommu
, unsigned long npages
)
89 struct iommu_arena
*arena
= &iommu
->arena
;
90 unsigned long n
, i
, start
, end
, limit
;
98 n
= find_next_zero_bit(arena
->map
, limit
, start
);
100 if (unlikely(end
>= limit
)) {
101 if (likely(pass
< 1)) {
104 __iommu_flushall(iommu
);
108 /* Scanned the whole thing, give up. */
113 for (i
= n
; i
< end
; i
++) {
114 if (test_bit(i
, arena
->map
)) {
120 for (i
= n
; i
< end
; i
++)
121 __set_bit(i
, arena
->map
);
128 static void arena_free(struct iommu_arena
*arena
, unsigned long base
, unsigned long npages
)
132 for (i
= base
; i
< (base
+ npages
); i
++)
133 __clear_bit(i
, arena
->map
);
136 int iommu_table_init(struct iommu
*iommu
, int tsbsize
,
137 u32 dma_offset
, u32 dma_addr_mask
)
139 unsigned long i
, tsbbase
, order
, sz
, num_tsb_entries
;
141 num_tsb_entries
= tsbsize
/ sizeof(iopte_t
);
143 /* Setup initial software IOMMU state. */
144 spin_lock_init(&iommu
->lock
);
145 iommu
->ctx_lowest_free
= 1;
146 iommu
->page_table_map_base
= dma_offset
;
147 iommu
->dma_addr_mask
= dma_addr_mask
;
149 /* Allocate and initialize the free area map. */
150 sz
= num_tsb_entries
/ 8;
151 sz
= (sz
+ 7UL) & ~7UL;
152 iommu
->arena
.map
= kzalloc(sz
, GFP_KERNEL
);
153 if (!iommu
->arena
.map
) {
154 printk(KERN_ERR
"IOMMU: Error, kmalloc(arena.map) failed.\n");
157 iommu
->arena
.limit
= num_tsb_entries
;
159 /* Allocate and initialize the dummy page which we
160 * set inactive IO PTEs to point to.
162 iommu
->dummy_page
= __get_free_pages(GFP_KERNEL
, 0);
163 if (!iommu
->dummy_page
) {
164 printk(KERN_ERR
"IOMMU: Error, gfp(dummy_page) failed.\n");
167 memset((void *)iommu
->dummy_page
, 0, PAGE_SIZE
);
168 iommu
->dummy_page_pa
= (unsigned long) __pa(iommu
->dummy_page
);
170 /* Now allocate and setup the IOMMU page table itself. */
171 order
= get_order(tsbsize
);
172 tsbbase
= __get_free_pages(GFP_KERNEL
, order
);
174 printk(KERN_ERR
"IOMMU: Error, gfp(tsb) failed.\n");
175 goto out_free_dummy_page
;
177 iommu
->page_table
= (iopte_t
*)tsbbase
;
179 for (i
= 0; i
< num_tsb_entries
; i
++)
180 iopte_make_dummy(iommu
, &iommu
->page_table
[i
]);
185 free_page(iommu
->dummy_page
);
186 iommu
->dummy_page
= 0UL;
189 kfree(iommu
->arena
.map
);
190 iommu
->arena
.map
= NULL
;
195 static inline iopte_t
*alloc_npages(struct iommu
*iommu
, unsigned long npages
)
199 entry
= arena_alloc(iommu
, npages
);
200 if (unlikely(entry
< 0))
203 return iommu
->page_table
+ entry
;
206 static inline void free_npages(struct iommu
*iommu
, dma_addr_t base
, unsigned long npages
)
208 arena_free(&iommu
->arena
, base
>> IO_PAGE_SHIFT
, npages
);
211 static int iommu_alloc_ctx(struct iommu
*iommu
)
213 int lowest
= iommu
->ctx_lowest_free
;
214 int sz
= IOMMU_NUM_CTXS
- lowest
;
215 int n
= find_next_zero_bit(iommu
->ctx_bitmap
, sz
, lowest
);
217 if (unlikely(n
== sz
)) {
218 n
= find_next_zero_bit(iommu
->ctx_bitmap
, lowest
, 1);
219 if (unlikely(n
== lowest
)) {
220 printk(KERN_WARNING
"IOMMU: Ran out of contexts.\n");
225 __set_bit(n
, iommu
->ctx_bitmap
);
230 static inline void iommu_free_ctx(struct iommu
*iommu
, int ctx
)
233 __clear_bit(ctx
, iommu
->ctx_bitmap
);
234 if (ctx
< iommu
->ctx_lowest_free
)
235 iommu
->ctx_lowest_free
= ctx
;
239 static void *dma_4u_alloc_coherent(struct device
*dev
, size_t size
,
240 dma_addr_t
*dma_addrp
, gfp_t gfp
)
244 unsigned long flags
, order
, first_page
;
248 size
= IO_PAGE_ALIGN(size
);
249 order
= get_order(size
);
253 first_page
= __get_free_pages(gfp
, order
);
254 if (first_page
== 0UL)
256 memset((char *)first_page
, 0, PAGE_SIZE
<< order
);
258 iommu
= dev
->archdata
.iommu
;
260 spin_lock_irqsave(&iommu
->lock
, flags
);
261 iopte
= alloc_npages(iommu
, size
>> IO_PAGE_SHIFT
);
262 spin_unlock_irqrestore(&iommu
->lock
, flags
);
264 if (unlikely(iopte
== NULL
)) {
265 free_pages(first_page
, order
);
269 *dma_addrp
= (iommu
->page_table_map_base
+
270 ((iopte
- iommu
->page_table
) << IO_PAGE_SHIFT
));
271 ret
= (void *) first_page
;
272 npages
= size
>> IO_PAGE_SHIFT
;
273 first_page
= __pa(first_page
);
275 iopte_val(*iopte
) = (IOPTE_CONSISTENT(0UL) |
277 (first_page
& IOPTE_PAGE
));
279 first_page
+= IO_PAGE_SIZE
;
285 static void dma_4u_free_coherent(struct device
*dev
, size_t size
,
286 void *cpu
, dma_addr_t dvma
)
290 unsigned long flags
, order
, npages
;
292 npages
= IO_PAGE_ALIGN(size
) >> IO_PAGE_SHIFT
;
293 iommu
= dev
->archdata
.iommu
;
294 iopte
= iommu
->page_table
+
295 ((dvma
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
297 spin_lock_irqsave(&iommu
->lock
, flags
);
299 free_npages(iommu
, dvma
- iommu
->page_table_map_base
, npages
);
301 spin_unlock_irqrestore(&iommu
->lock
, flags
);
303 order
= get_order(size
);
305 free_pages((unsigned long)cpu
, order
);
308 static dma_addr_t
dma_4u_map_single(struct device
*dev
, void *ptr
, size_t sz
,
309 enum dma_data_direction direction
)
312 struct strbuf
*strbuf
;
314 unsigned long flags
, npages
, oaddr
;
315 unsigned long i
, base_paddr
, ctx
;
317 unsigned long iopte_protection
;
319 iommu
= dev
->archdata
.iommu
;
320 strbuf
= dev
->archdata
.stc
;
322 if (unlikely(direction
== DMA_NONE
))
325 oaddr
= (unsigned long)ptr
;
326 npages
= IO_PAGE_ALIGN(oaddr
+ sz
) - (oaddr
& IO_PAGE_MASK
);
327 npages
>>= IO_PAGE_SHIFT
;
329 spin_lock_irqsave(&iommu
->lock
, flags
);
330 base
= alloc_npages(iommu
, npages
);
332 if (iommu
->iommu_ctxflush
)
333 ctx
= iommu_alloc_ctx(iommu
);
334 spin_unlock_irqrestore(&iommu
->lock
, flags
);
339 bus_addr
= (iommu
->page_table_map_base
+
340 ((base
- iommu
->page_table
) << IO_PAGE_SHIFT
));
341 ret
= bus_addr
| (oaddr
& ~IO_PAGE_MASK
);
342 base_paddr
= __pa(oaddr
& IO_PAGE_MASK
);
343 if (strbuf
->strbuf_enabled
)
344 iopte_protection
= IOPTE_STREAMING(ctx
);
346 iopte_protection
= IOPTE_CONSISTENT(ctx
);
347 if (direction
!= DMA_TO_DEVICE
)
348 iopte_protection
|= IOPTE_WRITE
;
350 for (i
= 0; i
< npages
; i
++, base
++, base_paddr
+= IO_PAGE_SIZE
)
351 iopte_val(*base
) = iopte_protection
| base_paddr
;
356 iommu_free_ctx(iommu
, ctx
);
358 if (printk_ratelimit())
360 return DMA_ERROR_CODE
;
363 static void strbuf_flush(struct strbuf
*strbuf
, struct iommu
*iommu
,
364 u32 vaddr
, unsigned long ctx
, unsigned long npages
,
365 enum dma_data_direction direction
)
369 if (strbuf
->strbuf_ctxflush
&&
370 iommu
->iommu_ctxflush
) {
371 unsigned long matchreg
, flushreg
;
374 flushreg
= strbuf
->strbuf_ctxflush
;
375 matchreg
= STC_CTXMATCH_ADDR(strbuf
, ctx
);
377 iommu_write(flushreg
, ctx
);
378 val
= iommu_read(matchreg
);
385 iommu_write(flushreg
, ctx
);
388 val
= iommu_read(matchreg
);
390 printk(KERN_WARNING
"strbuf_flush: ctx flush "
391 "timeout matchreg[%lx] ctx[%lx]\n",
399 for (i
= 0; i
< npages
; i
++, vaddr
+= IO_PAGE_SIZE
)
400 iommu_write(strbuf
->strbuf_pflush
, vaddr
);
404 /* If the device could not have possibly put dirty data into
405 * the streaming cache, no flush-flag synchronization needs
408 if (direction
== DMA_TO_DEVICE
)
411 STC_FLUSHFLAG_INIT(strbuf
);
412 iommu_write(strbuf
->strbuf_fsync
, strbuf
->strbuf_flushflag_pa
);
413 (void) iommu_read(iommu
->write_complete_reg
);
416 while (!STC_FLUSHFLAG_SET(strbuf
)) {
424 printk(KERN_WARNING
"strbuf_flush: flushflag timeout "
425 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
429 static void dma_4u_unmap_single(struct device
*dev
, dma_addr_t bus_addr
,
430 size_t sz
, enum dma_data_direction direction
)
433 struct strbuf
*strbuf
;
435 unsigned long flags
, npages
, ctx
, i
;
437 if (unlikely(direction
== DMA_NONE
)) {
438 if (printk_ratelimit())
443 iommu
= dev
->archdata
.iommu
;
444 strbuf
= dev
->archdata
.stc
;
446 npages
= IO_PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& IO_PAGE_MASK
);
447 npages
>>= IO_PAGE_SHIFT
;
448 base
= iommu
->page_table
+
449 ((bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
450 bus_addr
&= IO_PAGE_MASK
;
452 spin_lock_irqsave(&iommu
->lock
, flags
);
454 /* Record the context, if any. */
456 if (iommu
->iommu_ctxflush
)
457 ctx
= (iopte_val(*base
) & IOPTE_CONTEXT
) >> 47UL;
459 /* Step 1: Kick data out of streaming buffers if necessary. */
460 if (strbuf
->strbuf_enabled
)
461 strbuf_flush(strbuf
, iommu
, bus_addr
, ctx
,
464 /* Step 2: Clear out TSB entries. */
465 for (i
= 0; i
< npages
; i
++)
466 iopte_make_dummy(iommu
, base
+ i
);
468 free_npages(iommu
, bus_addr
- iommu
->page_table_map_base
, npages
);
470 iommu_free_ctx(iommu
, ctx
);
472 spin_unlock_irqrestore(&iommu
->lock
, flags
);
475 #define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
477 static void fill_sg(iopte_t
*iopte
, struct scatterlist
*sg
,
478 int nused
, int nelems
,
479 unsigned long iopte_protection
)
481 struct scatterlist
*dma_sg
= sg
;
484 for (i
= 0; i
< nused
; i
++) {
485 unsigned long pteval
= ~0UL;
488 dma_npages
= ((dma_sg
->dma_address
& (IO_PAGE_SIZE
- 1UL)) +
490 ((IO_PAGE_SIZE
- 1UL))) >> IO_PAGE_SHIFT
;
492 unsigned long offset
;
495 /* If we are here, we know we have at least one
496 * more page to map. So walk forward until we
497 * hit a page crossing, and begin creating new
498 * mappings from that spot.
503 tmp
= SG_ENT_PHYS_ADDRESS(sg
);
505 if (((tmp
^ pteval
) >> IO_PAGE_SHIFT
) != 0UL) {
506 pteval
= tmp
& IO_PAGE_MASK
;
507 offset
= tmp
& (IO_PAGE_SIZE
- 1UL);
510 if (((tmp
^ (tmp
+ len
- 1UL)) >> IO_PAGE_SHIFT
) != 0UL) {
511 pteval
= (tmp
+ IO_PAGE_SIZE
) & IO_PAGE_MASK
;
513 len
-= (IO_PAGE_SIZE
- (tmp
& (IO_PAGE_SIZE
- 1UL)));
520 pteval
= iopte_protection
| (pteval
& IOPTE_PAGE
);
522 *iopte
++ = __iopte(pteval
);
523 pteval
+= IO_PAGE_SIZE
;
524 len
-= (IO_PAGE_SIZE
- offset
);
529 pteval
= (pteval
& IOPTE_PAGE
) + len
;
533 /* Skip over any tail mappings we've fully mapped,
534 * adjusting pteval along the way. Stop when we
535 * detect a page crossing event.
538 (pteval
<< (64 - IO_PAGE_SHIFT
)) != 0UL &&
539 (pteval
== SG_ENT_PHYS_ADDRESS(sg
)) &&
541 (SG_ENT_PHYS_ADDRESS(sg
) + sg
->length
- 1UL)) >> IO_PAGE_SHIFT
) == 0UL) {
542 pteval
+= sg
->length
;
546 if ((pteval
<< (64 - IO_PAGE_SHIFT
)) == 0UL)
548 } while (dma_npages
!= 0);
549 dma_sg
= sg_next(dma_sg
);
553 static int dma_4u_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
554 int nelems
, enum dma_data_direction direction
)
557 struct strbuf
*strbuf
;
558 unsigned long flags
, ctx
, npages
, iopte_protection
;
561 struct scatterlist
*sgtmp
;
564 /* Fast path single entry scatterlists. */
566 sglist
->dma_address
=
567 dma_4u_map_single(dev
, sg_virt(sglist
),
568 sglist
->length
, direction
);
569 if (unlikely(sglist
->dma_address
== DMA_ERROR_CODE
))
571 sglist
->dma_length
= sglist
->length
;
575 iommu
= dev
->archdata
.iommu
;
576 strbuf
= dev
->archdata
.stc
;
578 if (unlikely(direction
== DMA_NONE
))
581 /* Step 1: Prepare scatter list. */
583 npages
= prepare_sg(sglist
, nelems
);
585 /* Step 2: Allocate a cluster and context, if necessary. */
587 spin_lock_irqsave(&iommu
->lock
, flags
);
589 base
= alloc_npages(iommu
, npages
);
591 if (iommu
->iommu_ctxflush
)
592 ctx
= iommu_alloc_ctx(iommu
);
594 spin_unlock_irqrestore(&iommu
->lock
, flags
);
599 dma_base
= iommu
->page_table_map_base
+
600 ((base
- iommu
->page_table
) << IO_PAGE_SHIFT
);
602 /* Step 3: Normalize DMA addresses. */
606 while (used
&& sgtmp
->dma_length
) {
607 sgtmp
->dma_address
+= dma_base
;
608 sgtmp
= sg_next(sgtmp
);
611 used
= nelems
- used
;
613 /* Step 4: Create the mappings. */
614 if (strbuf
->strbuf_enabled
)
615 iopte_protection
= IOPTE_STREAMING(ctx
);
617 iopte_protection
= IOPTE_CONSISTENT(ctx
);
618 if (direction
!= DMA_TO_DEVICE
)
619 iopte_protection
|= IOPTE_WRITE
;
621 fill_sg(base
, sglist
, used
, nelems
, iopte_protection
);
624 verify_sglist(sglist
, nelems
, base
, npages
);
630 iommu_free_ctx(iommu
, ctx
);
632 if (printk_ratelimit())
637 static void dma_4u_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
638 int nelems
, enum dma_data_direction direction
)
641 struct strbuf
*strbuf
;
643 unsigned long flags
, ctx
, i
, npages
;
644 struct scatterlist
*sg
, *sgprv
;
647 if (unlikely(direction
== DMA_NONE
)) {
648 if (printk_ratelimit())
652 iommu
= dev
->archdata
.iommu
;
653 strbuf
= dev
->archdata
.stc
;
655 bus_addr
= sglist
->dma_address
& IO_PAGE_MASK
;
658 for_each_sg(sglist
, sg
, nelems
, i
) {
659 if (sg
->dma_length
== 0)
664 npages
= (IO_PAGE_ALIGN(sgprv
->dma_address
+ sgprv
->dma_length
) -
665 bus_addr
) >> IO_PAGE_SHIFT
;
667 base
= iommu
->page_table
+
668 ((bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
670 spin_lock_irqsave(&iommu
->lock
, flags
);
672 /* Record the context, if any. */
674 if (iommu
->iommu_ctxflush
)
675 ctx
= (iopte_val(*base
) & IOPTE_CONTEXT
) >> 47UL;
677 /* Step 1: Kick data out of streaming buffers if necessary. */
678 if (strbuf
->strbuf_enabled
)
679 strbuf_flush(strbuf
, iommu
, bus_addr
, ctx
, npages
, direction
);
681 /* Step 2: Clear out the TSB entries. */
682 for (i
= 0; i
< npages
; i
++)
683 iopte_make_dummy(iommu
, base
+ i
);
685 free_npages(iommu
, bus_addr
- iommu
->page_table_map_base
, npages
);
687 iommu_free_ctx(iommu
, ctx
);
689 spin_unlock_irqrestore(&iommu
->lock
, flags
);
692 static void dma_4u_sync_single_for_cpu(struct device
*dev
,
693 dma_addr_t bus_addr
, size_t sz
,
694 enum dma_data_direction direction
)
697 struct strbuf
*strbuf
;
698 unsigned long flags
, ctx
, npages
;
700 iommu
= dev
->archdata
.iommu
;
701 strbuf
= dev
->archdata
.stc
;
703 if (!strbuf
->strbuf_enabled
)
706 spin_lock_irqsave(&iommu
->lock
, flags
);
708 npages
= IO_PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& IO_PAGE_MASK
);
709 npages
>>= IO_PAGE_SHIFT
;
710 bus_addr
&= IO_PAGE_MASK
;
712 /* Step 1: Record the context, if any. */
714 if (iommu
->iommu_ctxflush
&&
715 strbuf
->strbuf_ctxflush
) {
718 iopte
= iommu
->page_table
+
719 ((bus_addr
- iommu
->page_table_map_base
)>>IO_PAGE_SHIFT
);
720 ctx
= (iopte_val(*iopte
) & IOPTE_CONTEXT
) >> 47UL;
723 /* Step 2: Kick data out of streaming buffers. */
724 strbuf_flush(strbuf
, iommu
, bus_addr
, ctx
, npages
, direction
);
726 spin_unlock_irqrestore(&iommu
->lock
, flags
);
729 static void dma_4u_sync_sg_for_cpu(struct device
*dev
,
730 struct scatterlist
*sglist
, int nelems
,
731 enum dma_data_direction direction
)
734 struct strbuf
*strbuf
;
735 unsigned long flags
, ctx
, npages
, i
;
736 struct scatterlist
*sg
, *sgprv
;
739 iommu
= dev
->archdata
.iommu
;
740 strbuf
= dev
->archdata
.stc
;
742 if (!strbuf
->strbuf_enabled
)
745 spin_lock_irqsave(&iommu
->lock
, flags
);
747 /* Step 1: Record the context, if any. */
749 if (iommu
->iommu_ctxflush
&&
750 strbuf
->strbuf_ctxflush
) {
753 iopte
= iommu
->page_table
+
754 ((sglist
[0].dma_address
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
755 ctx
= (iopte_val(*iopte
) & IOPTE_CONTEXT
) >> 47UL;
758 /* Step 2: Kick data out of streaming buffers. */
759 bus_addr
= sglist
[0].dma_address
& IO_PAGE_MASK
;
761 for_each_sg(sglist
, sg
, nelems
, i
) {
762 if (sg
->dma_length
== 0)
767 npages
= (IO_PAGE_ALIGN(sgprv
->dma_address
+ sgprv
->dma_length
)
768 - bus_addr
) >> IO_PAGE_SHIFT
;
769 strbuf_flush(strbuf
, iommu
, bus_addr
, ctx
, npages
, direction
);
771 spin_unlock_irqrestore(&iommu
->lock
, flags
);
774 const struct dma_ops sun4u_dma_ops
= {
775 .alloc_coherent
= dma_4u_alloc_coherent
,
776 .free_coherent
= dma_4u_free_coherent
,
777 .map_single
= dma_4u_map_single
,
778 .unmap_single
= dma_4u_unmap_single
,
779 .map_sg
= dma_4u_map_sg
,
780 .unmap_sg
= dma_4u_unmap_sg
,
781 .sync_single_for_cpu
= dma_4u_sync_single_for_cpu
,
782 .sync_sg_for_cpu
= dma_4u_sync_sg_for_cpu
,
785 const struct dma_ops
*dma_ops
= &sun4u_dma_ops
;
786 EXPORT_SYMBOL(dma_ops
);
788 int dma_supported(struct device
*dev
, u64 device_mask
)
790 struct iommu
*iommu
= dev
->archdata
.iommu
;
791 u64 dma_addr_mask
= iommu
->dma_addr_mask
;
793 if (device_mask
>= (1UL << 32UL))
796 if ((device_mask
& dma_addr_mask
) == dma_addr_mask
)
800 if (dev
->bus
== &pci_bus_type
)
801 return pci_dma_supported(to_pci_dev(dev
), device_mask
);
806 EXPORT_SYMBOL(dma_supported
);
808 int dma_set_mask(struct device
*dev
, u64 dma_mask
)
811 if (dev
->bus
== &pci_bus_type
)
812 return pci_set_dma_mask(to_pci_dev(dev
), dma_mask
);
816 EXPORT_SYMBOL(dma_set_mask
);