2 * linux/arch/alpha/kernel/pci_iommu.c
5 #include <linux/kernel.h>
8 #include <linux/slab.h>
9 #include <linux/bootmem.h>
10 #include <linux/scatterlist.h>
11 #include <linux/log2.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/iommu-helper.h>
16 #include <asm/hwrpb.h>
24 # define DBGA(args...) printk(KERN_DEBUG args)
26 # define DBGA(args...)
29 # define DBGA2(args...) printk(KERN_DEBUG args)
31 # define DBGA2(args...)
34 #define DEBUG_NODIRECT 0
36 #define ISA_DMA_MASK 0x00ffffff
38 static inline unsigned long
39 mk_iommu_pte(unsigned long paddr
)
41 return (paddr
>> (PAGE_SHIFT
-1)) | 1;
44 /* Return the minimum of MAX or the first power of two larger
48 size_for_memory(unsigned long max
)
50 unsigned long mem
= max_low_pfn
<< PAGE_SHIFT
;
52 max
= roundup_pow_of_two(mem
);
56 struct pci_iommu_arena
* __init
57 iommu_arena_new_node(int nid
, struct pci_controller
*hose
, dma_addr_t base
,
58 unsigned long window_size
, unsigned long align
)
60 unsigned long mem_size
;
61 struct pci_iommu_arena
*arena
;
63 mem_size
= window_size
/ (PAGE_SIZE
/ sizeof(unsigned long));
65 /* Note that the TLB lookup logic uses bitwise concatenation,
66 not addition, so the required arena alignment is based on
67 the size of the window. Retain the align parameter so that
68 particular systems can over-align the arena. */
73 #ifdef CONFIG_DISCONTIGMEM
75 arena
= alloc_bootmem_node(NODE_DATA(nid
), sizeof(*arena
));
76 if (!NODE_DATA(nid
) || !arena
) {
77 printk("%s: couldn't allocate arena from node %d\n"
78 " falling back to system-wide allocation\n",
80 arena
= alloc_bootmem(sizeof(*arena
));
83 arena
->ptes
= __alloc_bootmem_node(NODE_DATA(nid
), mem_size
, align
, 0);
84 if (!NODE_DATA(nid
) || !arena
->ptes
) {
85 printk("%s: couldn't allocate arena ptes from node %d\n"
86 " falling back to system-wide allocation\n",
88 arena
->ptes
= __alloc_bootmem(mem_size
, align
, 0);
91 #else /* CONFIG_DISCONTIGMEM */
93 arena
= alloc_bootmem(sizeof(*arena
));
94 arena
->ptes
= __alloc_bootmem(mem_size
, align
, 0);
96 #endif /* CONFIG_DISCONTIGMEM */
98 spin_lock_init(&arena
->lock
);
100 arena
->dma_base
= base
;
101 arena
->size
= window_size
;
102 arena
->next_entry
= 0;
104 /* Align allocations to a multiple of a page size. Not needed
105 unless there are chip bugs. */
106 arena
->align_entry
= 1;
111 struct pci_iommu_arena
* __init
112 iommu_arena_new(struct pci_controller
*hose
, dma_addr_t base
,
113 unsigned long window_size
, unsigned long align
)
115 return iommu_arena_new_node(0, hose
, base
, window_size
, align
);
118 /* Must be called with the arena lock held */
120 iommu_arena_find_pages(struct device
*dev
, struct pci_iommu_arena
*arena
,
127 unsigned long boundary_size
;
129 base
= arena
->dma_base
>> PAGE_SHIFT
;
131 boundary_size
= dma_get_seg_boundary(dev
) + 1;
132 boundary_size
>>= PAGE_SHIFT
;
134 boundary_size
= 1UL << (32 - PAGE_SHIFT
);
137 /* Search forward for the first mask-aligned sequence of N free ptes */
139 nent
= arena
->size
>> PAGE_SHIFT
;
140 p
= ALIGN(arena
->next_entry
, mask
+ 1);
144 while (i
< n
&& p
+i
< nent
) {
145 if (!i
&& iommu_is_span_boundary(p
, n
, base
, boundary_size
)) {
146 p
= ALIGN(p
+ 1, mask
+ 1);
151 p
= ALIGN(p
+ i
+ 1, mask
+ 1), i
= 0;
159 * Reached the end. Flush the TLB and restart
160 * the search from the beginning.
162 alpha_mv
.mv_pci_tbi(arena
->hose
, 0, -1);
172 /* Success. It's the responsibility of the caller to mark them
173 in use before releasing the lock */
178 iommu_arena_alloc(struct device
*dev
, struct pci_iommu_arena
*arena
, long n
,
185 spin_lock_irqsave(&arena
->lock
, flags
);
187 /* Search for N empty ptes */
189 mask
= max(align
, arena
->align_entry
) - 1;
190 p
= iommu_arena_find_pages(dev
, arena
, n
, mask
);
192 spin_unlock_irqrestore(&arena
->lock
, flags
);
196 /* Success. Mark them all in use, ie not zero and invalid
197 for the iommu tlb that could load them from under us.
198 The chip specific bits will fill this in with something
199 kosher when we return. */
200 for (i
= 0; i
< n
; ++i
)
201 ptes
[p
+i
] = IOMMU_INVALID_PTE
;
203 arena
->next_entry
= p
+ n
;
204 spin_unlock_irqrestore(&arena
->lock
, flags
);
210 iommu_arena_free(struct pci_iommu_arena
*arena
, long ofs
, long n
)
215 p
= arena
->ptes
+ ofs
;
216 for (i
= 0; i
< n
; ++i
)
220 /* True if the machine supports DAC addressing, and DEV can
221 make use of it given MASK. */
222 static int pci_dac_dma_supported(struct pci_dev
*hwdev
, u64 mask
);
224 /* Map a single buffer of the indicated size for PCI DMA in streaming
225 mode. The 32-bit PCI bus mastering address to use is returned.
226 Once the device is given the dma address, the device owns this memory
227 until either pci_unmap_single or pci_dma_sync_single is performed. */
230 pci_map_single_1(struct pci_dev
*pdev
, void *cpu_addr
, size_t size
,
233 struct pci_controller
*hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
234 dma_addr_t max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
235 struct pci_iommu_arena
*arena
;
236 long npages
, dma_ofs
, i
;
239 unsigned int align
= 0;
240 struct device
*dev
= pdev
? &pdev
->dev
: NULL
;
242 paddr
= __pa(cpu_addr
);
245 /* First check to see if we can use the direct map window. */
246 if (paddr
+ size
+ __direct_map_base
- 1 <= max_dma
247 && paddr
+ size
<= __direct_map_size
) {
248 ret
= paddr
+ __direct_map_base
;
250 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %p\n",
251 cpu_addr
, size
, ret
, __builtin_return_address(0));
257 /* Next, use DAC if selected earlier. */
259 ret
= paddr
+ alpha_mv
.pci_dac_offset
;
261 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %p\n",
262 cpu_addr
, size
, ret
, __builtin_return_address(0));
267 /* If the machine doesn't define a pci_tbi routine, we have to
268 assume it doesn't support sg mapping, and, since we tried to
269 use direct_map above, it now must be considered an error. */
270 if (! alpha_mv
.mv_pci_tbi
) {
271 printk_once(KERN_WARNING
"pci_map_single: no HW sg\n");
275 arena
= hose
->sg_pci
;
276 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
277 arena
= hose
->sg_isa
;
279 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
281 /* Force allocation to 64KB boundary for ISA bridges. */
282 if (pdev
&& pdev
== isa_bridge
)
284 dma_ofs
= iommu_arena_alloc(dev
, arena
, npages
, align
);
286 printk(KERN_WARNING
"pci_map_single failed: "
287 "could not allocate dma page tables\n");
292 for (i
= 0; i
< npages
; ++i
, paddr
+= PAGE_SIZE
)
293 arena
->ptes
[i
+ dma_ofs
] = mk_iommu_pte(paddr
);
295 ret
= arena
->dma_base
+ dma_ofs
* PAGE_SIZE
;
296 ret
+= (unsigned long)cpu_addr
& ~PAGE_MASK
;
298 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %p\n",
299 cpu_addr
, size
, npages
, ret
, __builtin_return_address(0));
305 pci_map_single(struct pci_dev
*pdev
, void *cpu_addr
, size_t size
, int dir
)
309 if (dir
== PCI_DMA_NONE
)
312 dac_allowed
= pdev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
313 return pci_map_single_1(pdev
, cpu_addr
, size
, dac_allowed
);
315 EXPORT_SYMBOL(pci_map_single
);
318 pci_map_page(struct pci_dev
*pdev
, struct page
*page
, unsigned long offset
,
319 size_t size
, int dir
)
323 if (dir
== PCI_DMA_NONE
)
326 dac_allowed
= pdev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
327 return pci_map_single_1(pdev
, (char *)page_address(page
) + offset
,
330 EXPORT_SYMBOL(pci_map_page
);
332 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
333 SIZE must match what was provided for in a previous pci_map_single
334 call. All other usages are undefined. After this call, reads by
335 the cpu to the buffer are guaranteed to see whatever the device
339 pci_unmap_single(struct pci_dev
*pdev
, dma_addr_t dma_addr
, size_t size
,
343 struct pci_controller
*hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
344 struct pci_iommu_arena
*arena
;
345 long dma_ofs
, npages
;
347 if (direction
== PCI_DMA_NONE
)
350 if (dma_addr
>= __direct_map_base
351 && dma_addr
< __direct_map_base
+ __direct_map_size
) {
354 DBGA2("pci_unmap_single: direct [%llx,%zx] from %p\n",
355 dma_addr
, size
, __builtin_return_address(0));
360 if (dma_addr
> 0xffffffff) {
361 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %p\n",
362 dma_addr
, size
, __builtin_return_address(0));
366 arena
= hose
->sg_pci
;
367 if (!arena
|| dma_addr
< arena
->dma_base
)
368 arena
= hose
->sg_isa
;
370 dma_ofs
= (dma_addr
- arena
->dma_base
) >> PAGE_SHIFT
;
371 if (dma_ofs
* PAGE_SIZE
>= arena
->size
) {
372 printk(KERN_ERR
"Bogus pci_unmap_single: dma_addr %llx "
373 " base %llx size %x\n",
374 dma_addr
, arena
->dma_base
, arena
->size
);
379 npages
= iommu_num_pages(dma_addr
, size
, PAGE_SIZE
);
381 spin_lock_irqsave(&arena
->lock
, flags
);
383 iommu_arena_free(arena
, dma_ofs
, npages
);
385 /* If we're freeing ptes above the `next_entry' pointer (they
386 may have snuck back into the TLB since the last wrap flush),
387 we need to flush the TLB before reallocating the latter. */
388 if (dma_ofs
>= arena
->next_entry
)
389 alpha_mv
.mv_pci_tbi(hose
, dma_addr
, dma_addr
+ size
- 1);
391 spin_unlock_irqrestore(&arena
->lock
, flags
);
393 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n",
394 dma_addr
, size
, npages
, __builtin_return_address(0));
396 EXPORT_SYMBOL(pci_unmap_single
);
399 pci_unmap_page(struct pci_dev
*pdev
, dma_addr_t dma_addr
,
400 size_t size
, int direction
)
402 pci_unmap_single(pdev
, dma_addr
, size
, direction
);
404 EXPORT_SYMBOL(pci_unmap_page
);
406 /* Allocate and map kernel buffer using consistent mode DMA for PCI
407 device. Returns non-NULL cpu-view pointer to the buffer if
408 successful and sets *DMA_ADDRP to the pci side dma address as well,
409 else DMA_ADDRP is undefined. */
412 __pci_alloc_consistent(struct pci_dev
*pdev
, size_t size
,
413 dma_addr_t
*dma_addrp
, gfp_t gfp
)
416 long order
= get_order(size
);
421 cpu_addr
= (void *)__get_free_pages(gfp
, order
);
423 printk(KERN_INFO
"pci_alloc_consistent: "
424 "get_free_pages failed from %p\n",
425 __builtin_return_address(0));
426 /* ??? Really atomic allocation? Otherwise we could play
427 with vmalloc and sg if we can't find contiguous memory. */
430 memset(cpu_addr
, 0, size
);
432 *dma_addrp
= pci_map_single_1(pdev
, cpu_addr
, size
, 0);
433 if (*dma_addrp
== 0) {
434 free_pages((unsigned long)cpu_addr
, order
);
435 if (alpha_mv
.mv_pci_tbi
|| (gfp
& GFP_DMA
))
437 /* The address doesn't fit required mask and we
438 do not have iommu. Try again with GFP_DMA. */
443 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n",
444 size
, cpu_addr
, *dma_addrp
, __builtin_return_address(0));
448 EXPORT_SYMBOL(__pci_alloc_consistent
);
450 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
451 be values that were returned from pci_alloc_consistent. SIZE must
452 be the same as what as passed into pci_alloc_consistent.
453 References to the memory and mappings associated with CPU_ADDR or
454 DMA_ADDR past this call are illegal. */
457 pci_free_consistent(struct pci_dev
*pdev
, size_t size
, void *cpu_addr
,
460 pci_unmap_single(pdev
, dma_addr
, size
, PCI_DMA_BIDIRECTIONAL
);
461 free_pages((unsigned long)cpu_addr
, get_order(size
));
463 DBGA2("pci_free_consistent: [%llx,%zx] from %p\n",
464 dma_addr
, size
, __builtin_return_address(0));
466 EXPORT_SYMBOL(pci_free_consistent
);
468 /* Classify the elements of the scatterlist. Write dma_address
469 of each element with:
470 0 : Followers all physically adjacent.
471 1 : Followers all virtually adjacent.
472 -1 : Not leader, physically adjacent to previous.
473 -2 : Not leader, virtually adjacent to previous.
474 Write dma_length of each leader with the combined lengths of
475 the mergable followers. */
477 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
478 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
481 sg_classify(struct device
*dev
, struct scatterlist
*sg
, struct scatterlist
*end
,
484 unsigned long next_paddr
;
485 struct scatterlist
*leader
;
486 long leader_flag
, leader_length
;
487 unsigned int max_seg_size
;
491 leader_length
= leader
->length
;
492 next_paddr
= SG_ENT_PHYS_ADDRESS(leader
) + leader_length
;
494 /* we will not marge sg without device. */
495 max_seg_size
= dev
? dma_get_max_seg_size(dev
) : 0;
496 for (++sg
; sg
< end
; ++sg
) {
497 unsigned long addr
, len
;
498 addr
= SG_ENT_PHYS_ADDRESS(sg
);
501 if (leader_length
+ len
> max_seg_size
)
504 if (next_paddr
== addr
) {
505 sg
->dma_address
= -1;
506 leader_length
+= len
;
507 } else if (((next_paddr
| addr
) & ~PAGE_MASK
) == 0 && virt_ok
) {
508 sg
->dma_address
= -2;
510 leader_length
+= len
;
513 leader
->dma_address
= leader_flag
;
514 leader
->dma_length
= leader_length
;
520 next_paddr
= addr
+ len
;
523 leader
->dma_address
= leader_flag
;
524 leader
->dma_length
= leader_length
;
527 /* Given a scatterlist leader, choose an allocation method and fill
531 sg_fill(struct device
*dev
, struct scatterlist
*leader
, struct scatterlist
*end
,
532 struct scatterlist
*out
, struct pci_iommu_arena
*arena
,
533 dma_addr_t max_dma
, int dac_allowed
)
535 unsigned long paddr
= SG_ENT_PHYS_ADDRESS(leader
);
536 long size
= leader
->dma_length
;
537 struct scatterlist
*sg
;
539 long npages
, dma_ofs
, i
;
542 /* If everything is physically contiguous, and the addresses
543 fall into the direct-map window, use it. */
544 if (leader
->dma_address
== 0
545 && paddr
+ size
+ __direct_map_base
- 1 <= max_dma
546 && paddr
+ size
<= __direct_map_size
) {
547 out
->dma_address
= paddr
+ __direct_map_base
;
548 out
->dma_length
= size
;
550 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
551 __va(paddr
), size
, out
->dma_address
);
557 /* If physically contiguous and DAC is available, use it. */
558 if (leader
->dma_address
== 0 && dac_allowed
) {
559 out
->dma_address
= paddr
+ alpha_mv
.pci_dac_offset
;
560 out
->dma_length
= size
;
562 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
563 __va(paddr
), size
, out
->dma_address
);
568 /* Otherwise, we'll use the iommu to make the pages virtually
572 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
573 dma_ofs
= iommu_arena_alloc(dev
, arena
, npages
, 0);
575 /* If we attempted a direct map above but failed, die. */
576 if (leader
->dma_address
== 0)
579 /* Otherwise, break up the remaining virtually contiguous
580 hunks into individual direct maps and retry. */
581 sg_classify(dev
, leader
, end
, 0);
582 return sg_fill(dev
, leader
, end
, out
, arena
, max_dma
, dac_allowed
);
585 out
->dma_address
= arena
->dma_base
+ dma_ofs
*PAGE_SIZE
+ paddr
;
586 out
->dma_length
= size
;
588 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
589 __va(paddr
), size
, out
->dma_address
, npages
);
591 /* All virtually contiguous. We need to find the length of each
592 physically contiguous subsegment to fill in the ptes. */
593 ptes
= &arena
->ptes
[dma_ofs
];
597 struct scatterlist
*last_sg
= sg
;
601 paddr
= SG_ENT_PHYS_ADDRESS(sg
);
603 while (sg
+1 < end
&& (int) sg
[1].dma_address
== -1) {
604 size
+= sg
[1].length
;
608 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
611 for (i
= 0; i
< npages
; ++i
, paddr
+= PAGE_SIZE
)
612 *ptes
++ = mk_iommu_pte(paddr
);
615 DBGA(" (%ld) [%p,%x] np %ld\n",
616 last_sg
- leader
, SG_ENT_VIRT_ADDRESS(last_sg
),
617 last_sg
->length
, npages
);
618 while (++last_sg
<= sg
) {
619 DBGA(" (%ld) [%p,%x] cont\n",
620 last_sg
- leader
, SG_ENT_VIRT_ADDRESS(last_sg
),
624 } while (++sg
< end
&& (int) sg
->dma_address
< 0);
630 pci_map_sg(struct pci_dev
*pdev
, struct scatterlist
*sg
, int nents
,
633 struct scatterlist
*start
, *end
, *out
;
634 struct pci_controller
*hose
;
635 struct pci_iommu_arena
*arena
;
640 if (direction
== PCI_DMA_NONE
)
643 dac_allowed
= pdev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
645 dev
= pdev
? &pdev
->dev
: NULL
;
647 /* Fast path single entry scatterlists. */
649 sg
->dma_length
= sg
->length
;
651 = pci_map_single_1(pdev
, SG_ENT_VIRT_ADDRESS(sg
),
652 sg
->length
, dac_allowed
);
653 return sg
->dma_address
!= 0;
659 /* First, prepare information about the entries. */
660 sg_classify(dev
, sg
, end
, alpha_mv
.mv_pci_tbi
!= 0);
662 /* Second, figure out where we're going to map things. */
663 if (alpha_mv
.mv_pci_tbi
) {
664 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
665 max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
666 arena
= hose
->sg_pci
;
667 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
668 arena
= hose
->sg_isa
;
675 /* Third, iterate over the scatterlist leaders and allocate
676 dma space as needed. */
677 for (out
= sg
; sg
< end
; ++sg
) {
678 if ((int) sg
->dma_address
< 0)
680 if (sg_fill(dev
, sg
, end
, out
, arena
, max_dma
, dac_allowed
) < 0)
685 /* Mark the end of the list for pci_unmap_sg. */
689 if (out
- start
== 0)
690 printk(KERN_WARNING
"pci_map_sg failed: no entries?\n");
691 DBGA("pci_map_sg: %ld entries\n", out
- start
);
696 printk(KERN_WARNING
"pci_map_sg failed: "
697 "could not allocate dma page tables\n");
699 /* Some allocation failed while mapping the scatterlist
700 entries. Unmap them now. */
702 pci_unmap_sg(pdev
, start
, out
- start
, direction
);
705 EXPORT_SYMBOL(pci_map_sg
);
707 /* Unmap a set of streaming mode DMA translations. Again, cpu read
708 rules concerning calls here are the same as for pci_unmap_single()
712 pci_unmap_sg(struct pci_dev
*pdev
, struct scatterlist
*sg
, int nents
,
716 struct pci_controller
*hose
;
717 struct pci_iommu_arena
*arena
;
718 struct scatterlist
*end
;
720 dma_addr_t fbeg
, fend
;
722 if (direction
== PCI_DMA_NONE
)
725 if (! alpha_mv
.mv_pci_tbi
)
728 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
729 max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
730 arena
= hose
->sg_pci
;
731 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
732 arena
= hose
->sg_isa
;
736 spin_lock_irqsave(&arena
->lock
, flags
);
738 for (end
= sg
+ nents
; sg
< end
; ++sg
) {
744 addr
= sg
->dma_address
;
745 size
= sg
->dma_length
;
749 if (addr
> 0xffffffff) {
750 /* It's a DAC address -- nothing to do. */
751 DBGA(" (%ld) DAC [%llx,%zx]\n",
752 sg
- end
+ nents
, addr
, size
);
756 if (addr
>= __direct_map_base
757 && addr
< __direct_map_base
+ __direct_map_size
) {
759 DBGA(" (%ld) direct [%llx,%zx]\n",
760 sg
- end
+ nents
, addr
, size
);
764 DBGA(" (%ld) sg [%llx,%zx]\n",
765 sg
- end
+ nents
, addr
, size
);
767 npages
= iommu_num_pages(addr
, size
, PAGE_SIZE
);
768 ofs
= (addr
- arena
->dma_base
) >> PAGE_SHIFT
;
769 iommu_arena_free(arena
, ofs
, npages
);
771 tend
= addr
+ size
- 1;
772 if (fbeg
> addr
) fbeg
= addr
;
773 if (fend
< tend
) fend
= tend
;
776 /* If we're freeing ptes above the `next_entry' pointer (they
777 may have snuck back into the TLB since the last wrap flush),
778 we need to flush the TLB before reallocating the latter. */
779 if ((fend
- arena
->dma_base
) >> PAGE_SHIFT
>= arena
->next_entry
)
780 alpha_mv
.mv_pci_tbi(hose
, fbeg
, fend
);
782 spin_unlock_irqrestore(&arena
->lock
, flags
);
784 DBGA("pci_unmap_sg: %ld entries\n", nents
- (end
- sg
));
786 EXPORT_SYMBOL(pci_unmap_sg
);
789 /* Return whether the given PCI device DMA address mask can be
790 supported properly. */
793 pci_dma_supported(struct pci_dev
*pdev
, u64 mask
)
795 struct pci_controller
*hose
;
796 struct pci_iommu_arena
*arena
;
798 /* If there exists a direct map, and the mask fits either
799 the entire direct mapped space or the total system memory as
800 shifted by the map base */
801 if (__direct_map_size
!= 0
802 && (__direct_map_base
+ __direct_map_size
- 1 <= mask
||
803 __direct_map_base
+ (max_low_pfn
<< PAGE_SHIFT
) - 1 <= mask
))
806 /* Check that we have a scatter-gather arena that fits. */
807 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
808 arena
= hose
->sg_isa
;
809 if (arena
&& arena
->dma_base
+ arena
->size
- 1 <= mask
)
811 arena
= hose
->sg_pci
;
812 if (arena
&& arena
->dma_base
+ arena
->size
- 1 <= mask
)
815 /* As last resort try ZONE_DMA. */
816 if (!__direct_map_base
&& MAX_DMA_ADDRESS
- IDENT_ADDR
- 1 <= mask
)
821 EXPORT_SYMBOL(pci_dma_supported
);
825 * AGP GART extensions to the IOMMU
828 iommu_reserve(struct pci_iommu_arena
*arena
, long pg_count
, long align_mask
)
834 if (!arena
) return -EINVAL
;
836 spin_lock_irqsave(&arena
->lock
, flags
);
838 /* Search for N empty ptes. */
840 p
= iommu_arena_find_pages(NULL
, arena
, pg_count
, align_mask
);
842 spin_unlock_irqrestore(&arena
->lock
, flags
);
846 /* Success. Mark them all reserved (ie not zero and invalid)
847 for the iommu tlb that could load them from under us.
848 They will be filled in with valid bits by _bind() */
849 for (i
= 0; i
< pg_count
; ++i
)
850 ptes
[p
+i
] = IOMMU_RESERVED_PTE
;
852 arena
->next_entry
= p
+ pg_count
;
853 spin_unlock_irqrestore(&arena
->lock
, flags
);
859 iommu_release(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
)
864 if (!arena
) return -EINVAL
;
868 /* Make sure they're all reserved first... */
869 for(i
= pg_start
; i
< pg_start
+ pg_count
; i
++)
870 if (ptes
[i
] != IOMMU_RESERVED_PTE
)
873 iommu_arena_free(arena
, pg_start
, pg_count
);
878 iommu_bind(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
,
885 if (!arena
) return -EINVAL
;
887 spin_lock_irqsave(&arena
->lock
, flags
);
891 for(j
= pg_start
; j
< pg_start
+ pg_count
; j
++) {
892 if (ptes
[j
] != IOMMU_RESERVED_PTE
) {
893 spin_unlock_irqrestore(&arena
->lock
, flags
);
898 for(i
= 0, j
= pg_start
; i
< pg_count
; i
++, j
++)
899 ptes
[j
] = mk_iommu_pte(page_to_phys(pages
[i
]));
901 spin_unlock_irqrestore(&arena
->lock
, flags
);
907 iommu_unbind(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
)
912 if (!arena
) return -EINVAL
;
914 p
= arena
->ptes
+ pg_start
;
915 for(i
= 0; i
< pg_count
; i
++)
916 p
[i
] = IOMMU_RESERVED_PTE
;
921 /* True if the machine supports DAC addressing, and DEV can
922 make use of it given MASK. */
925 pci_dac_dma_supported(struct pci_dev
*dev
, u64 mask
)
927 dma64_addr_t dac_offset
= alpha_mv
.pci_dac_offset
;
930 /* If this is not set, the machine doesn't support DAC at all. */
934 /* The device has to be able to address our DAC bit. */
935 if ((dac_offset
& dev
->dma_mask
) != dac_offset
)
938 /* If both conditions above are met, we are fine. */
939 DBGA("pci_dac_dma_supported %s from %p\n",
940 ok
? "yes" : "no", __builtin_return_address(0));
945 /* Helper for generic DMA-mapping functions. */
948 alpha_gendev_to_pci(struct device
*dev
)
950 if (dev
&& dev
->bus
== &pci_bus_type
)
951 return to_pci_dev(dev
);
953 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
957 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
958 bridge is bus master then). */
959 if (!dev
|| !dev
->dma_mask
|| !*dev
->dma_mask
)
962 /* For EISA bus masters, return isa_bridge (it might have smaller
963 dma_mask due to wiring limitations). */
964 if (*dev
->dma_mask
>= isa_bridge
->dma_mask
)
967 /* This assumes ISA bus master with dma_mask 0xffffff. */
970 EXPORT_SYMBOL(alpha_gendev_to_pci
);
973 dma_set_mask(struct device
*dev
, u64 mask
)
975 if (!dev
->dma_mask
||
976 !pci_dma_supported(alpha_gendev_to_pci(dev
), mask
))
979 *dev
->dma_mask
= mask
;
983 EXPORT_SYMBOL(dma_set_mask
);