2 * linux/arch/alpha/kernel/pci_iommu.c
5 #include <linux/kernel.h>
9 #include <linux/bootmem.h>
10 #include <linux/scatterlist.h>
11 #include <linux/log2.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/iommu-helper.h>
16 #include <asm/hwrpb.h>
24 # define DBGA(args...) printk(KERN_DEBUG args)
26 # define DBGA(args...)
29 # define DBGA2(args...) printk(KERN_DEBUG args)
31 # define DBGA2(args...)
34 #define DEBUG_NODIRECT 0
36 #define ISA_DMA_MASK 0x00ffffff
38 static inline unsigned long
39 mk_iommu_pte(unsigned long paddr
)
41 return (paddr
>> (PAGE_SHIFT
-1)) | 1;
44 /* Return the minimum of MAX or the first power of two larger
48 size_for_memory(unsigned long max
)
50 unsigned long mem
= max_low_pfn
<< PAGE_SHIFT
;
52 max
= roundup_pow_of_two(mem
);
56 struct pci_iommu_arena
* __init
57 iommu_arena_new_node(int nid
, struct pci_controller
*hose
, dma_addr_t base
,
58 unsigned long window_size
, unsigned long align
)
60 unsigned long mem_size
;
61 struct pci_iommu_arena
*arena
;
63 mem_size
= window_size
/ (PAGE_SIZE
/ sizeof(unsigned long));
65 /* Note that the TLB lookup logic uses bitwise concatenation,
66 not addition, so the required arena alignment is based on
67 the size of the window. Retain the align parameter so that
68 particular systems can over-align the arena. */
73 #ifdef CONFIG_DISCONTIGMEM
75 arena
= alloc_bootmem_node(NODE_DATA(nid
), sizeof(*arena
));
76 if (!NODE_DATA(nid
) || !arena
) {
77 printk("%s: couldn't allocate arena from node %d\n"
78 " falling back to system-wide allocation\n",
80 arena
= alloc_bootmem(sizeof(*arena
));
83 arena
->ptes
= __alloc_bootmem_node(NODE_DATA(nid
), mem_size
, align
, 0);
84 if (!NODE_DATA(nid
) || !arena
->ptes
) {
85 printk("%s: couldn't allocate arena ptes from node %d\n"
86 " falling back to system-wide allocation\n",
88 arena
->ptes
= __alloc_bootmem(mem_size
, align
, 0);
91 #else /* CONFIG_DISCONTIGMEM */
93 arena
= alloc_bootmem(sizeof(*arena
));
94 arena
->ptes
= __alloc_bootmem(mem_size
, align
, 0);
96 #endif /* CONFIG_DISCONTIGMEM */
98 spin_lock_init(&arena
->lock
);
100 arena
->dma_base
= base
;
101 arena
->size
= window_size
;
102 arena
->next_entry
= 0;
104 /* Align allocations to a multiple of a page size. Not needed
105 unless there are chip bugs. */
106 arena
->align_entry
= 1;
111 struct pci_iommu_arena
* __init
112 iommu_arena_new(struct pci_controller
*hose
, dma_addr_t base
,
113 unsigned long window_size
, unsigned long align
)
115 return iommu_arena_new_node(0, hose
, base
, window_size
, align
);
118 /* Must be called with the arena lock held */
120 iommu_arena_find_pages(struct device
*dev
, struct pci_iommu_arena
*arena
,
127 unsigned long boundary_size
;
129 base
= arena
->dma_base
>> PAGE_SHIFT
;
131 boundary_size
= dma_get_seg_boundary(dev
) + 1;
132 boundary_size
>>= PAGE_SHIFT
;
134 boundary_size
= 1UL << (32 - PAGE_SHIFT
);
137 /* Search forward for the first mask-aligned sequence of N free ptes */
139 nent
= arena
->size
>> PAGE_SHIFT
;
140 p
= ALIGN(arena
->next_entry
, mask
+ 1);
144 while (i
< n
&& p
+i
< nent
) {
145 if (!i
&& iommu_is_span_boundary(p
, n
, base
, boundary_size
)) {
146 p
= ALIGN(p
+ 1, mask
+ 1);
151 p
= ALIGN(p
+ i
+ 1, mask
+ 1), i
= 0;
159 * Reached the end. Flush the TLB and restart
160 * the search from the beginning.
162 alpha_mv
.mv_pci_tbi(arena
->hose
, 0, -1);
172 /* Success. It's the responsibility of the caller to mark them
173 in use before releasing the lock */
178 iommu_arena_alloc(struct device
*dev
, struct pci_iommu_arena
*arena
, long n
,
185 spin_lock_irqsave(&arena
->lock
, flags
);
187 /* Search for N empty ptes */
189 mask
= max(align
, arena
->align_entry
) - 1;
190 p
= iommu_arena_find_pages(dev
, arena
, n
, mask
);
192 spin_unlock_irqrestore(&arena
->lock
, flags
);
196 /* Success. Mark them all in use, ie not zero and invalid
197 for the iommu tlb that could load them from under us.
198 The chip specific bits will fill this in with something
199 kosher when we return. */
200 for (i
= 0; i
< n
; ++i
)
201 ptes
[p
+i
] = IOMMU_INVALID_PTE
;
203 arena
->next_entry
= p
+ n
;
204 spin_unlock_irqrestore(&arena
->lock
, flags
);
210 iommu_arena_free(struct pci_iommu_arena
*arena
, long ofs
, long n
)
215 p
= arena
->ptes
+ ofs
;
216 for (i
= 0; i
< n
; ++i
)
221 * True if the machine supports DAC addressing, and DEV can
222 * make use of it given MASK.
224 static int pci_dac_dma_supported(struct pci_dev
*dev
, u64 mask
)
226 dma_addr_t dac_offset
= alpha_mv
.pci_dac_offset
;
229 /* If this is not set, the machine doesn't support DAC at all. */
233 /* The device has to be able to address our DAC bit. */
234 if ((dac_offset
& dev
->dma_mask
) != dac_offset
)
237 /* If both conditions above are met, we are fine. */
238 DBGA("pci_dac_dma_supported %s from %p\n",
239 ok
? "yes" : "no", __builtin_return_address(0));
244 /* Map a single buffer of the indicated size for PCI DMA in streaming
245 mode. The 32-bit PCI bus mastering address to use is returned.
246 Once the device is given the dma address, the device owns this memory
247 until either pci_unmap_single or pci_dma_sync_single is performed. */
250 pci_map_single_1(struct pci_dev
*pdev
, void *cpu_addr
, size_t size
,
253 struct pci_controller
*hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
254 dma_addr_t max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
255 struct pci_iommu_arena
*arena
;
256 long npages
, dma_ofs
, i
;
259 unsigned int align
= 0;
260 struct device
*dev
= pdev
? &pdev
->dev
: NULL
;
262 paddr
= __pa(cpu_addr
);
265 /* First check to see if we can use the direct map window. */
266 if (paddr
+ size
+ __direct_map_base
- 1 <= max_dma
267 && paddr
+ size
<= __direct_map_size
) {
268 ret
= paddr
+ __direct_map_base
;
270 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %p\n",
271 cpu_addr
, size
, ret
, __builtin_return_address(0));
277 /* Next, use DAC if selected earlier. */
279 ret
= paddr
+ alpha_mv
.pci_dac_offset
;
281 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %p\n",
282 cpu_addr
, size
, ret
, __builtin_return_address(0));
287 /* If the machine doesn't define a pci_tbi routine, we have to
288 assume it doesn't support sg mapping, and, since we tried to
289 use direct_map above, it now must be considered an error. */
290 if (! alpha_mv
.mv_pci_tbi
) {
291 printk_once(KERN_WARNING
"pci_map_single: no HW sg\n");
295 arena
= hose
->sg_pci
;
296 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
297 arena
= hose
->sg_isa
;
299 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
301 /* Force allocation to 64KB boundary for ISA bridges. */
302 if (pdev
&& pdev
== isa_bridge
)
304 dma_ofs
= iommu_arena_alloc(dev
, arena
, npages
, align
);
306 printk(KERN_WARNING
"pci_map_single failed: "
307 "could not allocate dma page tables\n");
312 for (i
= 0; i
< npages
; ++i
, paddr
+= PAGE_SIZE
)
313 arena
->ptes
[i
+ dma_ofs
] = mk_iommu_pte(paddr
);
315 ret
= arena
->dma_base
+ dma_ofs
* PAGE_SIZE
;
316 ret
+= (unsigned long)cpu_addr
& ~PAGE_MASK
;
318 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %p\n",
319 cpu_addr
, size
, npages
, ret
, __builtin_return_address(0));
324 /* Helper for generic DMA-mapping functions. */
325 static struct pci_dev
*alpha_gendev_to_pci(struct device
*dev
)
327 if (dev
&& dev
->bus
== &pci_bus_type
)
328 return to_pci_dev(dev
);
330 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
334 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
335 bridge is bus master then). */
336 if (!dev
|| !dev
->dma_mask
|| !*dev
->dma_mask
)
339 /* For EISA bus masters, return isa_bridge (it might have smaller
340 dma_mask due to wiring limitations). */
341 if (*dev
->dma_mask
>= isa_bridge
->dma_mask
)
344 /* This assumes ISA bus master with dma_mask 0xffffff. */
348 static dma_addr_t
alpha_pci_map_page(struct device
*dev
, struct page
*page
,
349 unsigned long offset
, size_t size
,
350 enum dma_data_direction dir
,
351 struct dma_attrs
*attrs
)
353 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
356 if (dir
== PCI_DMA_NONE
)
359 dac_allowed
= pdev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
360 return pci_map_single_1(pdev
, (char *)page_address(page
) + offset
,
364 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
365 SIZE must match what was provided for in a previous pci_map_single
366 call. All other usages are undefined. After this call, reads by
367 the cpu to the buffer are guaranteed to see whatever the device
370 static void alpha_pci_unmap_page(struct device
*dev
, dma_addr_t dma_addr
,
371 size_t size
, enum dma_data_direction dir
,
372 struct dma_attrs
*attrs
)
375 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
376 struct pci_controller
*hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
377 struct pci_iommu_arena
*arena
;
378 long dma_ofs
, npages
;
380 if (dir
== PCI_DMA_NONE
)
383 if (dma_addr
>= __direct_map_base
384 && dma_addr
< __direct_map_base
+ __direct_map_size
) {
387 DBGA2("pci_unmap_single: direct [%llx,%zx] from %p\n",
388 dma_addr
, size
, __builtin_return_address(0));
393 if (dma_addr
> 0xffffffff) {
394 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %p\n",
395 dma_addr
, size
, __builtin_return_address(0));
399 arena
= hose
->sg_pci
;
400 if (!arena
|| dma_addr
< arena
->dma_base
)
401 arena
= hose
->sg_isa
;
403 dma_ofs
= (dma_addr
- arena
->dma_base
) >> PAGE_SHIFT
;
404 if (dma_ofs
* PAGE_SIZE
>= arena
->size
) {
405 printk(KERN_ERR
"Bogus pci_unmap_single: dma_addr %llx "
406 " base %llx size %x\n",
407 dma_addr
, arena
->dma_base
, arena
->size
);
412 npages
= iommu_num_pages(dma_addr
, size
, PAGE_SIZE
);
414 spin_lock_irqsave(&arena
->lock
, flags
);
416 iommu_arena_free(arena
, dma_ofs
, npages
);
418 /* If we're freeing ptes above the `next_entry' pointer (they
419 may have snuck back into the TLB since the last wrap flush),
420 we need to flush the TLB before reallocating the latter. */
421 if (dma_ofs
>= arena
->next_entry
)
422 alpha_mv
.mv_pci_tbi(hose
, dma_addr
, dma_addr
+ size
- 1);
424 spin_unlock_irqrestore(&arena
->lock
, flags
);
426 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n",
427 dma_addr
, size
, npages
, __builtin_return_address(0));
430 /* Allocate and map kernel buffer using consistent mode DMA for PCI
431 device. Returns non-NULL cpu-view pointer to the buffer if
432 successful and sets *DMA_ADDRP to the pci side dma address as well,
433 else DMA_ADDRP is undefined. */
435 static void *alpha_pci_alloc_coherent(struct device
*dev
, size_t size
,
436 dma_addr_t
*dma_addrp
, gfp_t gfp
)
438 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
440 long order
= get_order(size
);
445 cpu_addr
= (void *)__get_free_pages(gfp
, order
);
447 printk(KERN_INFO
"pci_alloc_consistent: "
448 "get_free_pages failed from %p\n",
449 __builtin_return_address(0));
450 /* ??? Really atomic allocation? Otherwise we could play
451 with vmalloc and sg if we can't find contiguous memory. */
454 memset(cpu_addr
, 0, size
);
456 *dma_addrp
= pci_map_single_1(pdev
, cpu_addr
, size
, 0);
457 if (*dma_addrp
== 0) {
458 free_pages((unsigned long)cpu_addr
, order
);
459 if (alpha_mv
.mv_pci_tbi
|| (gfp
& GFP_DMA
))
461 /* The address doesn't fit required mask and we
462 do not have iommu. Try again with GFP_DMA. */
467 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n",
468 size
, cpu_addr
, *dma_addrp
, __builtin_return_address(0));
473 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
474 be values that were returned from pci_alloc_consistent. SIZE must
475 be the same as what as passed into pci_alloc_consistent.
476 References to the memory and mappings associated with CPU_ADDR or
477 DMA_ADDR past this call are illegal. */
479 static void alpha_pci_free_coherent(struct device
*dev
, size_t size
,
480 void *cpu_addr
, dma_addr_t dma_addr
)
482 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
483 pci_unmap_single(pdev
, dma_addr
, size
, PCI_DMA_BIDIRECTIONAL
);
484 free_pages((unsigned long)cpu_addr
, get_order(size
));
486 DBGA2("pci_free_consistent: [%llx,%zx] from %p\n",
487 dma_addr
, size
, __builtin_return_address(0));
490 /* Classify the elements of the scatterlist. Write dma_address
491 of each element with:
492 0 : Followers all physically adjacent.
493 1 : Followers all virtually adjacent.
494 -1 : Not leader, physically adjacent to previous.
495 -2 : Not leader, virtually adjacent to previous.
496 Write dma_length of each leader with the combined lengths of
497 the mergable followers. */
499 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
500 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
503 sg_classify(struct device
*dev
, struct scatterlist
*sg
, struct scatterlist
*end
,
506 unsigned long next_paddr
;
507 struct scatterlist
*leader
;
508 long leader_flag
, leader_length
;
509 unsigned int max_seg_size
;
513 leader_length
= leader
->length
;
514 next_paddr
= SG_ENT_PHYS_ADDRESS(leader
) + leader_length
;
516 /* we will not marge sg without device. */
517 max_seg_size
= dev
? dma_get_max_seg_size(dev
) : 0;
518 for (++sg
; sg
< end
; ++sg
) {
519 unsigned long addr
, len
;
520 addr
= SG_ENT_PHYS_ADDRESS(sg
);
523 if (leader_length
+ len
> max_seg_size
)
526 if (next_paddr
== addr
) {
527 sg
->dma_address
= -1;
528 leader_length
+= len
;
529 } else if (((next_paddr
| addr
) & ~PAGE_MASK
) == 0 && virt_ok
) {
530 sg
->dma_address
= -2;
532 leader_length
+= len
;
535 leader
->dma_address
= leader_flag
;
536 leader
->dma_length
= leader_length
;
542 next_paddr
= addr
+ len
;
545 leader
->dma_address
= leader_flag
;
546 leader
->dma_length
= leader_length
;
549 /* Given a scatterlist leader, choose an allocation method and fill
553 sg_fill(struct device
*dev
, struct scatterlist
*leader
, struct scatterlist
*end
,
554 struct scatterlist
*out
, struct pci_iommu_arena
*arena
,
555 dma_addr_t max_dma
, int dac_allowed
)
557 unsigned long paddr
= SG_ENT_PHYS_ADDRESS(leader
);
558 long size
= leader
->dma_length
;
559 struct scatterlist
*sg
;
561 long npages
, dma_ofs
, i
;
564 /* If everything is physically contiguous, and the addresses
565 fall into the direct-map window, use it. */
566 if (leader
->dma_address
== 0
567 && paddr
+ size
+ __direct_map_base
- 1 <= max_dma
568 && paddr
+ size
<= __direct_map_size
) {
569 out
->dma_address
= paddr
+ __direct_map_base
;
570 out
->dma_length
= size
;
572 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
573 __va(paddr
), size
, out
->dma_address
);
579 /* If physically contiguous and DAC is available, use it. */
580 if (leader
->dma_address
== 0 && dac_allowed
) {
581 out
->dma_address
= paddr
+ alpha_mv
.pci_dac_offset
;
582 out
->dma_length
= size
;
584 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
585 __va(paddr
), size
, out
->dma_address
);
590 /* Otherwise, we'll use the iommu to make the pages virtually
594 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
595 dma_ofs
= iommu_arena_alloc(dev
, arena
, npages
, 0);
597 /* If we attempted a direct map above but failed, die. */
598 if (leader
->dma_address
== 0)
601 /* Otherwise, break up the remaining virtually contiguous
602 hunks into individual direct maps and retry. */
603 sg_classify(dev
, leader
, end
, 0);
604 return sg_fill(dev
, leader
, end
, out
, arena
, max_dma
, dac_allowed
);
607 out
->dma_address
= arena
->dma_base
+ dma_ofs
*PAGE_SIZE
+ paddr
;
608 out
->dma_length
= size
;
610 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
611 __va(paddr
), size
, out
->dma_address
, npages
);
613 /* All virtually contiguous. We need to find the length of each
614 physically contiguous subsegment to fill in the ptes. */
615 ptes
= &arena
->ptes
[dma_ofs
];
619 struct scatterlist
*last_sg
= sg
;
623 paddr
= SG_ENT_PHYS_ADDRESS(sg
);
625 while (sg
+1 < end
&& (int) sg
[1].dma_address
== -1) {
626 size
+= sg
[1].length
;
630 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
633 for (i
= 0; i
< npages
; ++i
, paddr
+= PAGE_SIZE
)
634 *ptes
++ = mk_iommu_pte(paddr
);
637 DBGA(" (%ld) [%p,%x] np %ld\n",
638 last_sg
- leader
, SG_ENT_VIRT_ADDRESS(last_sg
),
639 last_sg
->length
, npages
);
640 while (++last_sg
<= sg
) {
641 DBGA(" (%ld) [%p,%x] cont\n",
642 last_sg
- leader
, SG_ENT_VIRT_ADDRESS(last_sg
),
646 } while (++sg
< end
&& (int) sg
->dma_address
< 0);
651 static int alpha_pci_map_sg(struct device
*dev
, struct scatterlist
*sg
,
652 int nents
, enum dma_data_direction dir
,
653 struct dma_attrs
*attrs
)
655 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
656 struct scatterlist
*start
, *end
, *out
;
657 struct pci_controller
*hose
;
658 struct pci_iommu_arena
*arena
;
662 if (dir
== PCI_DMA_NONE
)
665 dac_allowed
= dev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
667 /* Fast path single entry scatterlists. */
669 sg
->dma_length
= sg
->length
;
671 = pci_map_single_1(pdev
, SG_ENT_VIRT_ADDRESS(sg
),
672 sg
->length
, dac_allowed
);
673 return sg
->dma_address
!= 0;
679 /* First, prepare information about the entries. */
680 sg_classify(dev
, sg
, end
, alpha_mv
.mv_pci_tbi
!= 0);
682 /* Second, figure out where we're going to map things. */
683 if (alpha_mv
.mv_pci_tbi
) {
684 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
685 max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
686 arena
= hose
->sg_pci
;
687 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
688 arena
= hose
->sg_isa
;
695 /* Third, iterate over the scatterlist leaders and allocate
696 dma space as needed. */
697 for (out
= sg
; sg
< end
; ++sg
) {
698 if ((int) sg
->dma_address
< 0)
700 if (sg_fill(dev
, sg
, end
, out
, arena
, max_dma
, dac_allowed
) < 0)
705 /* Mark the end of the list for pci_unmap_sg. */
709 if (out
- start
== 0)
710 printk(KERN_WARNING
"pci_map_sg failed: no entries?\n");
711 DBGA("pci_map_sg: %ld entries\n", out
- start
);
716 printk(KERN_WARNING
"pci_map_sg failed: "
717 "could not allocate dma page tables\n");
719 /* Some allocation failed while mapping the scatterlist
720 entries. Unmap them now. */
722 pci_unmap_sg(pdev
, start
, out
- start
, dir
);
726 /* Unmap a set of streaming mode DMA translations. Again, cpu read
727 rules concerning calls here are the same as for pci_unmap_single()
730 static void alpha_pci_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
731 int nents
, enum dma_data_direction dir
,
732 struct dma_attrs
*attrs
)
734 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
736 struct pci_controller
*hose
;
737 struct pci_iommu_arena
*arena
;
738 struct scatterlist
*end
;
740 dma_addr_t fbeg
, fend
;
742 if (dir
== PCI_DMA_NONE
)
745 if (! alpha_mv
.mv_pci_tbi
)
748 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
749 max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
750 arena
= hose
->sg_pci
;
751 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
752 arena
= hose
->sg_isa
;
756 spin_lock_irqsave(&arena
->lock
, flags
);
758 for (end
= sg
+ nents
; sg
< end
; ++sg
) {
764 addr
= sg
->dma_address
;
765 size
= sg
->dma_length
;
769 if (addr
> 0xffffffff) {
770 /* It's a DAC address -- nothing to do. */
771 DBGA(" (%ld) DAC [%llx,%zx]\n",
772 sg
- end
+ nents
, addr
, size
);
776 if (addr
>= __direct_map_base
777 && addr
< __direct_map_base
+ __direct_map_size
) {
779 DBGA(" (%ld) direct [%llx,%zx]\n",
780 sg
- end
+ nents
, addr
, size
);
784 DBGA(" (%ld) sg [%llx,%zx]\n",
785 sg
- end
+ nents
, addr
, size
);
787 npages
= iommu_num_pages(addr
, size
, PAGE_SIZE
);
788 ofs
= (addr
- arena
->dma_base
) >> PAGE_SHIFT
;
789 iommu_arena_free(arena
, ofs
, npages
);
791 tend
= addr
+ size
- 1;
792 if (fbeg
> addr
) fbeg
= addr
;
793 if (fend
< tend
) fend
= tend
;
796 /* If we're freeing ptes above the `next_entry' pointer (they
797 may have snuck back into the TLB since the last wrap flush),
798 we need to flush the TLB before reallocating the latter. */
799 if ((fend
- arena
->dma_base
) >> PAGE_SHIFT
>= arena
->next_entry
)
800 alpha_mv
.mv_pci_tbi(hose
, fbeg
, fend
);
802 spin_unlock_irqrestore(&arena
->lock
, flags
);
804 DBGA("pci_unmap_sg: %ld entries\n", nents
- (end
- sg
));
807 /* Return whether the given PCI device DMA address mask can be
808 supported properly. */
810 static int alpha_pci_supported(struct device
*dev
, u64 mask
)
812 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
813 struct pci_controller
*hose
;
814 struct pci_iommu_arena
*arena
;
816 /* If there exists a direct map, and the mask fits either
817 the entire direct mapped space or the total system memory as
818 shifted by the map base */
819 if (__direct_map_size
!= 0
820 && (__direct_map_base
+ __direct_map_size
- 1 <= mask
||
821 __direct_map_base
+ (max_low_pfn
<< PAGE_SHIFT
) - 1 <= mask
))
824 /* Check that we have a scatter-gather arena that fits. */
825 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
826 arena
= hose
->sg_isa
;
827 if (arena
&& arena
->dma_base
+ arena
->size
- 1 <= mask
)
829 arena
= hose
->sg_pci
;
830 if (arena
&& arena
->dma_base
+ arena
->size
- 1 <= mask
)
833 /* As last resort try ZONE_DMA. */
834 if (!__direct_map_base
&& MAX_DMA_ADDRESS
- IDENT_ADDR
- 1 <= mask
)
842 * AGP GART extensions to the IOMMU
845 iommu_reserve(struct pci_iommu_arena
*arena
, long pg_count
, long align_mask
)
851 if (!arena
) return -EINVAL
;
853 spin_lock_irqsave(&arena
->lock
, flags
);
855 /* Search for N empty ptes. */
857 p
= iommu_arena_find_pages(NULL
, arena
, pg_count
, align_mask
);
859 spin_unlock_irqrestore(&arena
->lock
, flags
);
863 /* Success. Mark them all reserved (ie not zero and invalid)
864 for the iommu tlb that could load them from under us.
865 They will be filled in with valid bits by _bind() */
866 for (i
= 0; i
< pg_count
; ++i
)
867 ptes
[p
+i
] = IOMMU_RESERVED_PTE
;
869 arena
->next_entry
= p
+ pg_count
;
870 spin_unlock_irqrestore(&arena
->lock
, flags
);
876 iommu_release(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
)
881 if (!arena
) return -EINVAL
;
885 /* Make sure they're all reserved first... */
886 for(i
= pg_start
; i
< pg_start
+ pg_count
; i
++)
887 if (ptes
[i
] != IOMMU_RESERVED_PTE
)
890 iommu_arena_free(arena
, pg_start
, pg_count
);
895 iommu_bind(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
,
902 if (!arena
) return -EINVAL
;
904 spin_lock_irqsave(&arena
->lock
, flags
);
908 for(j
= pg_start
; j
< pg_start
+ pg_count
; j
++) {
909 if (ptes
[j
] != IOMMU_RESERVED_PTE
) {
910 spin_unlock_irqrestore(&arena
->lock
, flags
);
915 for(i
= 0, j
= pg_start
; i
< pg_count
; i
++, j
++)
916 ptes
[j
] = mk_iommu_pte(page_to_phys(pages
[i
]));
918 spin_unlock_irqrestore(&arena
->lock
, flags
);
924 iommu_unbind(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
)
929 if (!arena
) return -EINVAL
;
931 p
= arena
->ptes
+ pg_start
;
932 for(i
= 0; i
< pg_count
; i
++)
933 p
[i
] = IOMMU_RESERVED_PTE
;
938 static int alpha_pci_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
940 return dma_addr
== 0;
943 static int alpha_pci_set_mask(struct device
*dev
, u64 mask
)
945 if (!dev
->dma_mask
||
946 !pci_dma_supported(alpha_gendev_to_pci(dev
), mask
))
949 *dev
->dma_mask
= mask
;
953 struct dma_map_ops alpha_pci_ops
= {
954 .alloc_coherent
= alpha_pci_alloc_coherent
,
955 .free_coherent
= alpha_pci_free_coherent
,
956 .map_page
= alpha_pci_map_page
,
957 .unmap_page
= alpha_pci_unmap_page
,
958 .map_sg
= alpha_pci_map_sg
,
959 .unmap_sg
= alpha_pci_unmap_sg
,
960 .mapping_error
= alpha_pci_mapping_error
,
961 .dma_supported
= alpha_pci_supported
,
962 .set_dma_mask
= alpha_pci_set_mask
,
965 struct dma_map_ops
*dma_ops
= &alpha_pci_ops
;
966 EXPORT_SYMBOL(dma_ops
);