2 * linux/arch/alpha/kernel/pci_iommu.c
5 #include <linux/kernel.h>
8 #include <linux/slab.h>
9 #include <linux/bootmem.h>
10 #include <linux/scatterlist.h>
11 #include <linux/log2.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/iommu-helper.h>
16 #include <asm/hwrpb.h>
24 # define DBGA(args...) printk(KERN_DEBUG args)
26 # define DBGA(args...)
29 # define DBGA2(args...) printk(KERN_DEBUG args)
31 # define DBGA2(args...)
34 #define DEBUG_NODIRECT 0
36 #define ISA_DMA_MASK 0x00ffffff
38 static inline unsigned long
39 mk_iommu_pte(unsigned long paddr
)
41 return (paddr
>> (PAGE_SHIFT
-1)) | 1;
44 /* Return the minimum of MAX or the first power of two larger
48 size_for_memory(unsigned long max
)
50 unsigned long mem
= max_low_pfn
<< PAGE_SHIFT
;
52 max
= roundup_pow_of_two(mem
);
56 struct pci_iommu_arena
* __init
57 iommu_arena_new_node(int nid
, struct pci_controller
*hose
, dma_addr_t base
,
58 unsigned long window_size
, unsigned long align
)
60 unsigned long mem_size
;
61 struct pci_iommu_arena
*arena
;
63 mem_size
= window_size
/ (PAGE_SIZE
/ sizeof(unsigned long));
65 /* Note that the TLB lookup logic uses bitwise concatenation,
66 not addition, so the required arena alignment is based on
67 the size of the window. Retain the align parameter so that
68 particular systems can over-align the arena. */
73 #ifdef CONFIG_DISCONTIGMEM
75 arena
= alloc_bootmem_node(NODE_DATA(nid
), sizeof(*arena
));
76 if (!NODE_DATA(nid
) || !arena
) {
77 printk("%s: couldn't allocate arena from node %d\n"
78 " falling back to system-wide allocation\n",
80 arena
= alloc_bootmem(sizeof(*arena
));
83 arena
->ptes
= __alloc_bootmem_node(NODE_DATA(nid
), mem_size
, align
, 0);
84 if (!NODE_DATA(nid
) || !arena
->ptes
) {
85 printk("%s: couldn't allocate arena ptes from node %d\n"
86 " falling back to system-wide allocation\n",
88 arena
->ptes
= __alloc_bootmem(mem_size
, align
, 0);
91 #else /* CONFIG_DISCONTIGMEM */
93 arena
= alloc_bootmem(sizeof(*arena
));
94 arena
->ptes
= __alloc_bootmem(mem_size
, align
, 0);
96 #endif /* CONFIG_DISCONTIGMEM */
98 spin_lock_init(&arena
->lock
);
100 arena
->dma_base
= base
;
101 arena
->size
= window_size
;
102 arena
->next_entry
= 0;
104 /* Align allocations to a multiple of a page size. Not needed
105 unless there are chip bugs. */
106 arena
->align_entry
= 1;
111 struct pci_iommu_arena
* __init
112 iommu_arena_new(struct pci_controller
*hose
, dma_addr_t base
,
113 unsigned long window_size
, unsigned long align
)
115 return iommu_arena_new_node(0, hose
, base
, window_size
, align
);
118 /* Must be called with the arena lock held */
120 iommu_arena_find_pages(struct device
*dev
, struct pci_iommu_arena
*arena
,
127 unsigned long boundary_size
;
129 base
= arena
->dma_base
>> PAGE_SHIFT
;
131 boundary_size
= dma_get_seg_boundary(dev
) + 1;
132 boundary_size
>>= PAGE_SHIFT
;
134 boundary_size
= 1UL << (32 - PAGE_SHIFT
);
137 /* Search forward for the first mask-aligned sequence of N free ptes */
139 nent
= arena
->size
>> PAGE_SHIFT
;
140 p
= ALIGN(arena
->next_entry
, mask
+ 1);
144 while (i
< n
&& p
+i
< nent
) {
145 if (!i
&& iommu_is_span_boundary(p
, n
, base
, boundary_size
)) {
146 p
= ALIGN(p
+ 1, mask
+ 1);
151 p
= ALIGN(p
+ i
+ 1, mask
+ 1), i
= 0;
159 * Reached the end. Flush the TLB and restart
160 * the search from the beginning.
162 alpha_mv
.mv_pci_tbi(arena
->hose
, 0, -1);
172 /* Success. It's the responsibility of the caller to mark them
173 in use before releasing the lock */
178 iommu_arena_alloc(struct device
*dev
, struct pci_iommu_arena
*arena
, long n
,
185 spin_lock_irqsave(&arena
->lock
, flags
);
187 /* Search for N empty ptes */
189 mask
= max(align
, arena
->align_entry
) - 1;
190 p
= iommu_arena_find_pages(dev
, arena
, n
, mask
);
192 spin_unlock_irqrestore(&arena
->lock
, flags
);
196 /* Success. Mark them all in use, ie not zero and invalid
197 for the iommu tlb that could load them from under us.
198 The chip specific bits will fill this in with something
199 kosher when we return. */
200 for (i
= 0; i
< n
; ++i
)
201 ptes
[p
+i
] = IOMMU_INVALID_PTE
;
203 arena
->next_entry
= p
+ n
;
204 spin_unlock_irqrestore(&arena
->lock
, flags
);
210 iommu_arena_free(struct pci_iommu_arena
*arena
, long ofs
, long n
)
215 p
= arena
->ptes
+ ofs
;
216 for (i
= 0; i
< n
; ++i
)
220 /* True if the machine supports DAC addressing, and DEV can
221 make use of it given MASK. */
222 static int pci_dac_dma_supported(struct pci_dev
*hwdev
, u64 mask
);
224 /* Map a single buffer of the indicated size for PCI DMA in streaming
225 mode. The 32-bit PCI bus mastering address to use is returned.
226 Once the device is given the dma address, the device owns this memory
227 until either pci_unmap_single or pci_dma_sync_single is performed. */
230 pci_map_single_1(struct pci_dev
*pdev
, void *cpu_addr
, size_t size
,
233 struct pci_controller
*hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
234 dma_addr_t max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
235 struct pci_iommu_arena
*arena
;
236 long npages
, dma_ofs
, i
;
239 unsigned int align
= 0;
240 struct device
*dev
= pdev
? &pdev
->dev
: NULL
;
242 paddr
= __pa(cpu_addr
);
245 /* First check to see if we can use the direct map window. */
246 if (paddr
+ size
+ __direct_map_base
- 1 <= max_dma
247 && paddr
+ size
<= __direct_map_size
) {
248 ret
= paddr
+ __direct_map_base
;
250 DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
251 cpu_addr
, size
, ret
, __builtin_return_address(0));
257 /* Next, use DAC if selected earlier. */
259 ret
= paddr
+ alpha_mv
.pci_dac_offset
;
261 DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
262 cpu_addr
, size
, ret
, __builtin_return_address(0));
267 /* If the machine doesn't define a pci_tbi routine, we have to
268 assume it doesn't support sg mapping, and, since we tried to
269 use direct_map above, it now must be considered an error. */
270 if (! alpha_mv
.mv_pci_tbi
) {
271 static int been_here
= 0; /* Only print the message once. */
273 printk(KERN_WARNING
"pci_map_single: no HW sg\n");
279 arena
= hose
->sg_pci
;
280 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
281 arena
= hose
->sg_isa
;
283 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
285 /* Force allocation to 64KB boundary for ISA bridges. */
286 if (pdev
&& pdev
== isa_bridge
)
288 dma_ofs
= iommu_arena_alloc(dev
, arena
, npages
, align
);
290 printk(KERN_WARNING
"pci_map_single failed: "
291 "could not allocate dma page tables\n");
296 for (i
= 0; i
< npages
; ++i
, paddr
+= PAGE_SIZE
)
297 arena
->ptes
[i
+ dma_ofs
] = mk_iommu_pte(paddr
);
299 ret
= arena
->dma_base
+ dma_ofs
* PAGE_SIZE
;
300 ret
+= (unsigned long)cpu_addr
& ~PAGE_MASK
;
302 DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
303 cpu_addr
, size
, npages
, ret
, __builtin_return_address(0));
309 pci_map_single(struct pci_dev
*pdev
, void *cpu_addr
, size_t size
, int dir
)
313 if (dir
== PCI_DMA_NONE
)
316 dac_allowed
= pdev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
317 return pci_map_single_1(pdev
, cpu_addr
, size
, dac_allowed
);
319 EXPORT_SYMBOL(pci_map_single
);
322 pci_map_page(struct pci_dev
*pdev
, struct page
*page
, unsigned long offset
,
323 size_t size
, int dir
)
327 if (dir
== PCI_DMA_NONE
)
330 dac_allowed
= pdev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
331 return pci_map_single_1(pdev
, (char *)page_address(page
) + offset
,
334 EXPORT_SYMBOL(pci_map_page
);
336 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
337 SIZE must match what was provided for in a previous pci_map_single
338 call. All other usages are undefined. After this call, reads by
339 the cpu to the buffer are guaranteed to see whatever the device
343 pci_unmap_single(struct pci_dev
*pdev
, dma_addr_t dma_addr
, size_t size
,
347 struct pci_controller
*hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
348 struct pci_iommu_arena
*arena
;
349 long dma_ofs
, npages
;
351 if (direction
== PCI_DMA_NONE
)
354 if (dma_addr
>= __direct_map_base
355 && dma_addr
< __direct_map_base
+ __direct_map_size
) {
358 DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
359 dma_addr
, size
, __builtin_return_address(0));
364 if (dma_addr
> 0xffffffff) {
365 DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
366 dma_addr
, size
, __builtin_return_address(0));
370 arena
= hose
->sg_pci
;
371 if (!arena
|| dma_addr
< arena
->dma_base
)
372 arena
= hose
->sg_isa
;
374 dma_ofs
= (dma_addr
- arena
->dma_base
) >> PAGE_SHIFT
;
375 if (dma_ofs
* PAGE_SIZE
>= arena
->size
) {
376 printk(KERN_ERR
"Bogus pci_unmap_single: dma_addr %lx "
377 " base %lx size %x\n", dma_addr
, arena
->dma_base
,
383 npages
= iommu_num_pages(dma_addr
, size
, PAGE_SIZE
);
385 spin_lock_irqsave(&arena
->lock
, flags
);
387 iommu_arena_free(arena
, dma_ofs
, npages
);
389 /* If we're freeing ptes above the `next_entry' pointer (they
390 may have snuck back into the TLB since the last wrap flush),
391 we need to flush the TLB before reallocating the latter. */
392 if (dma_ofs
>= arena
->next_entry
)
393 alpha_mv
.mv_pci_tbi(hose
, dma_addr
, dma_addr
+ size
- 1);
395 spin_unlock_irqrestore(&arena
->lock
, flags
);
397 DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
398 dma_addr
, size
, npages
, __builtin_return_address(0));
400 EXPORT_SYMBOL(pci_unmap_single
);
403 pci_unmap_page(struct pci_dev
*pdev
, dma_addr_t dma_addr
,
404 size_t size
, int direction
)
406 pci_unmap_single(pdev
, dma_addr
, size
, direction
);
408 EXPORT_SYMBOL(pci_unmap_page
);
410 /* Allocate and map kernel buffer using consistent mode DMA for PCI
411 device. Returns non-NULL cpu-view pointer to the buffer if
412 successful and sets *DMA_ADDRP to the pci side dma address as well,
413 else DMA_ADDRP is undefined. */
416 __pci_alloc_consistent(struct pci_dev
*pdev
, size_t size
,
417 dma_addr_t
*dma_addrp
, gfp_t gfp
)
420 long order
= get_order(size
);
425 cpu_addr
= (void *)__get_free_pages(gfp
, order
);
427 printk(KERN_INFO
"pci_alloc_consistent: "
428 "get_free_pages failed from %p\n",
429 __builtin_return_address(0));
430 /* ??? Really atomic allocation? Otherwise we could play
431 with vmalloc and sg if we can't find contiguous memory. */
434 memset(cpu_addr
, 0, size
);
436 *dma_addrp
= pci_map_single_1(pdev
, cpu_addr
, size
, 0);
437 if (*dma_addrp
== 0) {
438 free_pages((unsigned long)cpu_addr
, order
);
439 if (alpha_mv
.mv_pci_tbi
|| (gfp
& GFP_DMA
))
441 /* The address doesn't fit required mask and we
442 do not have iommu. Try again with GFP_DMA. */
447 DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
448 size
, cpu_addr
, *dma_addrp
, __builtin_return_address(0));
452 EXPORT_SYMBOL(__pci_alloc_consistent
);
454 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
455 be values that were returned from pci_alloc_consistent. SIZE must
456 be the same as what as passed into pci_alloc_consistent.
457 References to the memory and mappings associated with CPU_ADDR or
458 DMA_ADDR past this call are illegal. */
461 pci_free_consistent(struct pci_dev
*pdev
, size_t size
, void *cpu_addr
,
464 pci_unmap_single(pdev
, dma_addr
, size
, PCI_DMA_BIDIRECTIONAL
);
465 free_pages((unsigned long)cpu_addr
, get_order(size
));
467 DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
468 dma_addr
, size
, __builtin_return_address(0));
470 EXPORT_SYMBOL(pci_free_consistent
);
472 /* Classify the elements of the scatterlist. Write dma_address
473 of each element with:
474 0 : Followers all physically adjacent.
475 1 : Followers all virtually adjacent.
476 -1 : Not leader, physically adjacent to previous.
477 -2 : Not leader, virtually adjacent to previous.
478 Write dma_length of each leader with the combined lengths of
479 the mergable followers. */
481 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
482 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
485 sg_classify(struct device
*dev
, struct scatterlist
*sg
, struct scatterlist
*end
,
488 unsigned long next_paddr
;
489 struct scatterlist
*leader
;
490 long leader_flag
, leader_length
;
491 unsigned int max_seg_size
;
495 leader_length
= leader
->length
;
496 next_paddr
= SG_ENT_PHYS_ADDRESS(leader
) + leader_length
;
498 /* we will not marge sg without device. */
499 max_seg_size
= dev
? dma_get_max_seg_size(dev
) : 0;
500 for (++sg
; sg
< end
; ++sg
) {
501 unsigned long addr
, len
;
502 addr
= SG_ENT_PHYS_ADDRESS(sg
);
505 if (leader_length
+ len
> max_seg_size
)
508 if (next_paddr
== addr
) {
509 sg
->dma_address
= -1;
510 leader_length
+= len
;
511 } else if (((next_paddr
| addr
) & ~PAGE_MASK
) == 0 && virt_ok
) {
512 sg
->dma_address
= -2;
514 leader_length
+= len
;
517 leader
->dma_address
= leader_flag
;
518 leader
->dma_length
= leader_length
;
524 next_paddr
= addr
+ len
;
527 leader
->dma_address
= leader_flag
;
528 leader
->dma_length
= leader_length
;
531 /* Given a scatterlist leader, choose an allocation method and fill
535 sg_fill(struct device
*dev
, struct scatterlist
*leader
, struct scatterlist
*end
,
536 struct scatterlist
*out
, struct pci_iommu_arena
*arena
,
537 dma_addr_t max_dma
, int dac_allowed
)
539 unsigned long paddr
= SG_ENT_PHYS_ADDRESS(leader
);
540 long size
= leader
->dma_length
;
541 struct scatterlist
*sg
;
543 long npages
, dma_ofs
, i
;
546 /* If everything is physically contiguous, and the addresses
547 fall into the direct-map window, use it. */
548 if (leader
->dma_address
== 0
549 && paddr
+ size
+ __direct_map_base
- 1 <= max_dma
550 && paddr
+ size
<= __direct_map_size
) {
551 out
->dma_address
= paddr
+ __direct_map_base
;
552 out
->dma_length
= size
;
554 DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
555 __va(paddr
), size
, out
->dma_address
);
561 /* If physically contiguous and DAC is available, use it. */
562 if (leader
->dma_address
== 0 && dac_allowed
) {
563 out
->dma_address
= paddr
+ alpha_mv
.pci_dac_offset
;
564 out
->dma_length
= size
;
566 DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
567 __va(paddr
), size
, out
->dma_address
);
572 /* Otherwise, we'll use the iommu to make the pages virtually
576 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
577 dma_ofs
= iommu_arena_alloc(dev
, arena
, npages
, 0);
579 /* If we attempted a direct map above but failed, die. */
580 if (leader
->dma_address
== 0)
583 /* Otherwise, break up the remaining virtually contiguous
584 hunks into individual direct maps and retry. */
585 sg_classify(dev
, leader
, end
, 0);
586 return sg_fill(dev
, leader
, end
, out
, arena
, max_dma
, dac_allowed
);
589 out
->dma_address
= arena
->dma_base
+ dma_ofs
*PAGE_SIZE
+ paddr
;
590 out
->dma_length
= size
;
592 DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
593 __va(paddr
), size
, out
->dma_address
, npages
);
595 /* All virtually contiguous. We need to find the length of each
596 physically contiguous subsegment to fill in the ptes. */
597 ptes
= &arena
->ptes
[dma_ofs
];
601 struct scatterlist
*last_sg
= sg
;
605 paddr
= SG_ENT_PHYS_ADDRESS(sg
);
607 while (sg
+1 < end
&& (int) sg
[1].dma_address
== -1) {
608 size
+= sg
[1].length
;
612 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
615 for (i
= 0; i
< npages
; ++i
, paddr
+= PAGE_SIZE
)
616 *ptes
++ = mk_iommu_pte(paddr
);
619 DBGA(" (%ld) [%p,%x] np %ld\n",
620 last_sg
- leader
, SG_ENT_VIRT_ADDRESS(last_sg
),
621 last_sg
->length
, npages
);
622 while (++last_sg
<= sg
) {
623 DBGA(" (%ld) [%p,%x] cont\n",
624 last_sg
- leader
, SG_ENT_VIRT_ADDRESS(last_sg
),
628 } while (++sg
< end
&& (int) sg
->dma_address
< 0);
634 pci_map_sg(struct pci_dev
*pdev
, struct scatterlist
*sg
, int nents
,
637 struct scatterlist
*start
, *end
, *out
;
638 struct pci_controller
*hose
;
639 struct pci_iommu_arena
*arena
;
644 if (direction
== PCI_DMA_NONE
)
647 dac_allowed
= pdev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
649 dev
= pdev
? &pdev
->dev
: NULL
;
651 /* Fast path single entry scatterlists. */
653 sg
->dma_length
= sg
->length
;
655 = pci_map_single_1(pdev
, SG_ENT_VIRT_ADDRESS(sg
),
656 sg
->length
, dac_allowed
);
657 return sg
->dma_address
!= 0;
663 /* First, prepare information about the entries. */
664 sg_classify(dev
, sg
, end
, alpha_mv
.mv_pci_tbi
!= 0);
666 /* Second, figure out where we're going to map things. */
667 if (alpha_mv
.mv_pci_tbi
) {
668 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
669 max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
670 arena
= hose
->sg_pci
;
671 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
672 arena
= hose
->sg_isa
;
679 /* Third, iterate over the scatterlist leaders and allocate
680 dma space as needed. */
681 for (out
= sg
; sg
< end
; ++sg
) {
682 if ((int) sg
->dma_address
< 0)
684 if (sg_fill(dev
, sg
, end
, out
, arena
, max_dma
, dac_allowed
) < 0)
689 /* Mark the end of the list for pci_unmap_sg. */
693 if (out
- start
== 0)
694 printk(KERN_WARNING
"pci_map_sg failed: no entries?\n");
695 DBGA("pci_map_sg: %ld entries\n", out
- start
);
700 printk(KERN_WARNING
"pci_map_sg failed: "
701 "could not allocate dma page tables\n");
703 /* Some allocation failed while mapping the scatterlist
704 entries. Unmap them now. */
706 pci_unmap_sg(pdev
, start
, out
- start
, direction
);
709 EXPORT_SYMBOL(pci_map_sg
);
711 /* Unmap a set of streaming mode DMA translations. Again, cpu read
712 rules concerning calls here are the same as for pci_unmap_single()
716 pci_unmap_sg(struct pci_dev
*pdev
, struct scatterlist
*sg
, int nents
,
720 struct pci_controller
*hose
;
721 struct pci_iommu_arena
*arena
;
722 struct scatterlist
*end
;
724 dma_addr_t fbeg
, fend
;
726 if (direction
== PCI_DMA_NONE
)
729 if (! alpha_mv
.mv_pci_tbi
)
732 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
733 max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
734 arena
= hose
->sg_pci
;
735 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
736 arena
= hose
->sg_isa
;
740 spin_lock_irqsave(&arena
->lock
, flags
);
742 for (end
= sg
+ nents
; sg
< end
; ++sg
) {
748 addr
= sg
->dma_address
;
749 size
= sg
->dma_length
;
753 if (addr
> 0xffffffff) {
754 /* It's a DAC address -- nothing to do. */
755 DBGA(" (%ld) DAC [%lx,%lx]\n",
756 sg
- end
+ nents
, addr
, size
);
760 if (addr
>= __direct_map_base
761 && addr
< __direct_map_base
+ __direct_map_size
) {
763 DBGA(" (%ld) direct [%lx,%lx]\n",
764 sg
- end
+ nents
, addr
, size
);
768 DBGA(" (%ld) sg [%lx,%lx]\n",
769 sg
- end
+ nents
, addr
, size
);
771 npages
= iommu_num_pages(addr
, size
, PAGE_SIZE
);
772 ofs
= (addr
- arena
->dma_base
) >> PAGE_SHIFT
;
773 iommu_arena_free(arena
, ofs
, npages
);
775 tend
= addr
+ size
- 1;
776 if (fbeg
> addr
) fbeg
= addr
;
777 if (fend
< tend
) fend
= tend
;
780 /* If we're freeing ptes above the `next_entry' pointer (they
781 may have snuck back into the TLB since the last wrap flush),
782 we need to flush the TLB before reallocating the latter. */
783 if ((fend
- arena
->dma_base
) >> PAGE_SHIFT
>= arena
->next_entry
)
784 alpha_mv
.mv_pci_tbi(hose
, fbeg
, fend
);
786 spin_unlock_irqrestore(&arena
->lock
, flags
);
788 DBGA("pci_unmap_sg: %ld entries\n", nents
- (end
- sg
));
790 EXPORT_SYMBOL(pci_unmap_sg
);
793 /* Return whether the given PCI device DMA address mask can be
794 supported properly. */
797 pci_dma_supported(struct pci_dev
*pdev
, u64 mask
)
799 struct pci_controller
*hose
;
800 struct pci_iommu_arena
*arena
;
802 /* If there exists a direct map, and the mask fits either
803 the entire direct mapped space or the total system memory as
804 shifted by the map base */
805 if (__direct_map_size
!= 0
806 && (__direct_map_base
+ __direct_map_size
- 1 <= mask
||
807 __direct_map_base
+ (max_low_pfn
<< PAGE_SHIFT
) - 1 <= mask
))
810 /* Check that we have a scatter-gather arena that fits. */
811 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
812 arena
= hose
->sg_isa
;
813 if (arena
&& arena
->dma_base
+ arena
->size
- 1 <= mask
)
815 arena
= hose
->sg_pci
;
816 if (arena
&& arena
->dma_base
+ arena
->size
- 1 <= mask
)
819 /* As last resort try ZONE_DMA. */
820 if (!__direct_map_base
&& MAX_DMA_ADDRESS
- IDENT_ADDR
- 1 <= mask
)
825 EXPORT_SYMBOL(pci_dma_supported
);
829 * AGP GART extensions to the IOMMU
832 iommu_reserve(struct pci_iommu_arena
*arena
, long pg_count
, long align_mask
)
838 if (!arena
) return -EINVAL
;
840 spin_lock_irqsave(&arena
->lock
, flags
);
842 /* Search for N empty ptes. */
844 p
= iommu_arena_find_pages(NULL
, arena
, pg_count
, align_mask
);
846 spin_unlock_irqrestore(&arena
->lock
, flags
);
850 /* Success. Mark them all reserved (ie not zero and invalid)
851 for the iommu tlb that could load them from under us.
852 They will be filled in with valid bits by _bind() */
853 for (i
= 0; i
< pg_count
; ++i
)
854 ptes
[p
+i
] = IOMMU_RESERVED_PTE
;
856 arena
->next_entry
= p
+ pg_count
;
857 spin_unlock_irqrestore(&arena
->lock
, flags
);
863 iommu_release(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
)
868 if (!arena
) return -EINVAL
;
872 /* Make sure they're all reserved first... */
873 for(i
= pg_start
; i
< pg_start
+ pg_count
; i
++)
874 if (ptes
[i
] != IOMMU_RESERVED_PTE
)
877 iommu_arena_free(arena
, pg_start
, pg_count
);
882 iommu_bind(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
,
883 unsigned long *physaddrs
)
889 if (!arena
) return -EINVAL
;
891 spin_lock_irqsave(&arena
->lock
, flags
);
895 for(j
= pg_start
; j
< pg_start
+ pg_count
; j
++) {
896 if (ptes
[j
] != IOMMU_RESERVED_PTE
) {
897 spin_unlock_irqrestore(&arena
->lock
, flags
);
902 for(i
= 0, j
= pg_start
; i
< pg_count
; i
++, j
++)
903 ptes
[j
] = mk_iommu_pte(physaddrs
[i
]);
905 spin_unlock_irqrestore(&arena
->lock
, flags
);
911 iommu_unbind(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
)
916 if (!arena
) return -EINVAL
;
918 p
= arena
->ptes
+ pg_start
;
919 for(i
= 0; i
< pg_count
; i
++)
920 p
[i
] = IOMMU_RESERVED_PTE
;
925 /* True if the machine supports DAC addressing, and DEV can
926 make use of it given MASK. */
929 pci_dac_dma_supported(struct pci_dev
*dev
, u64 mask
)
931 dma64_addr_t dac_offset
= alpha_mv
.pci_dac_offset
;
934 /* If this is not set, the machine doesn't support DAC at all. */
938 /* The device has to be able to address our DAC bit. */
939 if ((dac_offset
& dev
->dma_mask
) != dac_offset
)
942 /* If both conditions above are met, we are fine. */
943 DBGA("pci_dac_dma_supported %s from %p\n",
944 ok
? "yes" : "no", __builtin_return_address(0));
949 /* Helper for generic DMA-mapping functions. */
952 alpha_gendev_to_pci(struct device
*dev
)
954 if (dev
&& dev
->bus
== &pci_bus_type
)
955 return to_pci_dev(dev
);
957 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
961 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
962 bridge is bus master then). */
963 if (!dev
|| !dev
->dma_mask
|| !*dev
->dma_mask
)
966 /* For EISA bus masters, return isa_bridge (it might have smaller
967 dma_mask due to wiring limitations). */
968 if (*dev
->dma_mask
>= isa_bridge
->dma_mask
)
971 /* This assumes ISA bus master with dma_mask 0xffffff. */
974 EXPORT_SYMBOL(alpha_gendev_to_pci
);
977 dma_set_mask(struct device
*dev
, u64 mask
)
979 if (!dev
->dma_mask
||
980 !pci_dma_supported(alpha_gendev_to_pci(dev
), mask
))
983 *dev
->dma_mask
= mask
;
987 EXPORT_SYMBOL(dma_set_mask
);