2 * linux/arch/alpha/kernel/pci_iommu.c
5 #include <linux/kernel.h>
9 #include <linux/bootmem.h>
10 #include <linux/export.h>
11 #include <linux/scatterlist.h>
12 #include <linux/log2.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/iommu-helper.h>
17 #include <asm/hwrpb.h>
25 # define DBGA(args...) printk(KERN_DEBUG args)
27 # define DBGA(args...)
30 # define DBGA2(args...) printk(KERN_DEBUG args)
32 # define DBGA2(args...)
35 #define DEBUG_NODIRECT 0
37 #define ISA_DMA_MASK 0x00ffffff
39 static inline unsigned long
40 mk_iommu_pte(unsigned long paddr
)
42 return (paddr
>> (PAGE_SHIFT
-1)) | 1;
45 /* Return the minimum of MAX or the first power of two larger
49 size_for_memory(unsigned long max
)
51 unsigned long mem
= max_low_pfn
<< PAGE_SHIFT
;
53 max
= roundup_pow_of_two(mem
);
57 struct pci_iommu_arena
* __init
58 iommu_arena_new_node(int nid
, struct pci_controller
*hose
, dma_addr_t base
,
59 unsigned long window_size
, unsigned long align
)
61 unsigned long mem_size
;
62 struct pci_iommu_arena
*arena
;
64 mem_size
= window_size
/ (PAGE_SIZE
/ sizeof(unsigned long));
66 /* Note that the TLB lookup logic uses bitwise concatenation,
67 not addition, so the required arena alignment is based on
68 the size of the window. Retain the align parameter so that
69 particular systems can over-align the arena. */
74 #ifdef CONFIG_DISCONTIGMEM
76 arena
= alloc_bootmem_node(NODE_DATA(nid
), sizeof(*arena
));
77 if (!NODE_DATA(nid
) || !arena
) {
78 printk("%s: couldn't allocate arena from node %d\n"
79 " falling back to system-wide allocation\n",
81 arena
= alloc_bootmem(sizeof(*arena
));
84 arena
->ptes
= __alloc_bootmem_node(NODE_DATA(nid
), mem_size
, align
, 0);
85 if (!NODE_DATA(nid
) || !arena
->ptes
) {
86 printk("%s: couldn't allocate arena ptes from node %d\n"
87 " falling back to system-wide allocation\n",
89 arena
->ptes
= __alloc_bootmem(mem_size
, align
, 0);
92 #else /* CONFIG_DISCONTIGMEM */
94 arena
= alloc_bootmem(sizeof(*arena
));
95 arena
->ptes
= __alloc_bootmem(mem_size
, align
, 0);
97 #endif /* CONFIG_DISCONTIGMEM */
99 spin_lock_init(&arena
->lock
);
101 arena
->dma_base
= base
;
102 arena
->size
= window_size
;
103 arena
->next_entry
= 0;
105 /* Align allocations to a multiple of a page size. Not needed
106 unless there are chip bugs. */
107 arena
->align_entry
= 1;
112 struct pci_iommu_arena
* __init
113 iommu_arena_new(struct pci_controller
*hose
, dma_addr_t base
,
114 unsigned long window_size
, unsigned long align
)
116 return iommu_arena_new_node(0, hose
, base
, window_size
, align
);
119 /* Must be called with the arena lock held */
121 iommu_arena_find_pages(struct device
*dev
, struct pci_iommu_arena
*arena
,
128 unsigned long boundary_size
;
130 base
= arena
->dma_base
>> PAGE_SHIFT
;
132 boundary_size
= dma_get_seg_boundary(dev
) + 1;
133 boundary_size
>>= PAGE_SHIFT
;
135 boundary_size
= 1UL << (32 - PAGE_SHIFT
);
138 /* Search forward for the first mask-aligned sequence of N free ptes */
140 nent
= arena
->size
>> PAGE_SHIFT
;
141 p
= ALIGN(arena
->next_entry
, mask
+ 1);
145 while (i
< n
&& p
+i
< nent
) {
146 if (!i
&& iommu_is_span_boundary(p
, n
, base
, boundary_size
)) {
147 p
= ALIGN(p
+ 1, mask
+ 1);
152 p
= ALIGN(p
+ i
+ 1, mask
+ 1), i
= 0;
160 * Reached the end. Flush the TLB and restart
161 * the search from the beginning.
163 alpha_mv
.mv_pci_tbi(arena
->hose
, 0, -1);
173 /* Success. It's the responsibility of the caller to mark them
174 in use before releasing the lock */
179 iommu_arena_alloc(struct device
*dev
, struct pci_iommu_arena
*arena
, long n
,
186 spin_lock_irqsave(&arena
->lock
, flags
);
188 /* Search for N empty ptes */
190 mask
= max(align
, arena
->align_entry
) - 1;
191 p
= iommu_arena_find_pages(dev
, arena
, n
, mask
);
193 spin_unlock_irqrestore(&arena
->lock
, flags
);
197 /* Success. Mark them all in use, ie not zero and invalid
198 for the iommu tlb that could load them from under us.
199 The chip specific bits will fill this in with something
200 kosher when we return. */
201 for (i
= 0; i
< n
; ++i
)
202 ptes
[p
+i
] = IOMMU_INVALID_PTE
;
204 arena
->next_entry
= p
+ n
;
205 spin_unlock_irqrestore(&arena
->lock
, flags
);
211 iommu_arena_free(struct pci_iommu_arena
*arena
, long ofs
, long n
)
216 p
= arena
->ptes
+ ofs
;
217 for (i
= 0; i
< n
; ++i
)
222 * True if the machine supports DAC addressing, and DEV can
223 * make use of it given MASK.
225 static int pci_dac_dma_supported(struct pci_dev
*dev
, u64 mask
)
227 dma_addr_t dac_offset
= alpha_mv
.pci_dac_offset
;
230 /* If this is not set, the machine doesn't support DAC at all. */
234 /* The device has to be able to address our DAC bit. */
235 if ((dac_offset
& dev
->dma_mask
) != dac_offset
)
238 /* If both conditions above are met, we are fine. */
239 DBGA("pci_dac_dma_supported %s from %p\n",
240 ok
? "yes" : "no", __builtin_return_address(0));
245 /* Map a single buffer of the indicated size for PCI DMA in streaming
246 mode. The 32-bit PCI bus mastering address to use is returned.
247 Once the device is given the dma address, the device owns this memory
248 until either pci_unmap_single or pci_dma_sync_single is performed. */
251 pci_map_single_1(struct pci_dev
*pdev
, void *cpu_addr
, size_t size
,
254 struct pci_controller
*hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
255 dma_addr_t max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
256 struct pci_iommu_arena
*arena
;
257 long npages
, dma_ofs
, i
;
260 unsigned int align
= 0;
261 struct device
*dev
= pdev
? &pdev
->dev
: NULL
;
263 paddr
= __pa(cpu_addr
);
266 /* First check to see if we can use the direct map window. */
267 if (paddr
+ size
+ __direct_map_base
- 1 <= max_dma
268 && paddr
+ size
<= __direct_map_size
) {
269 ret
= paddr
+ __direct_map_base
;
271 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %p\n",
272 cpu_addr
, size
, ret
, __builtin_return_address(0));
278 /* Next, use DAC if selected earlier. */
280 ret
= paddr
+ alpha_mv
.pci_dac_offset
;
282 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %p\n",
283 cpu_addr
, size
, ret
, __builtin_return_address(0));
288 /* If the machine doesn't define a pci_tbi routine, we have to
289 assume it doesn't support sg mapping, and, since we tried to
290 use direct_map above, it now must be considered an error. */
291 if (! alpha_mv
.mv_pci_tbi
) {
292 printk_once(KERN_WARNING
"pci_map_single: no HW sg\n");
296 arena
= hose
->sg_pci
;
297 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
298 arena
= hose
->sg_isa
;
300 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
302 /* Force allocation to 64KB boundary for ISA bridges. */
303 if (pdev
&& pdev
== isa_bridge
)
305 dma_ofs
= iommu_arena_alloc(dev
, arena
, npages
, align
);
307 printk(KERN_WARNING
"pci_map_single failed: "
308 "could not allocate dma page tables\n");
313 for (i
= 0; i
< npages
; ++i
, paddr
+= PAGE_SIZE
)
314 arena
->ptes
[i
+ dma_ofs
] = mk_iommu_pte(paddr
);
316 ret
= arena
->dma_base
+ dma_ofs
* PAGE_SIZE
;
317 ret
+= (unsigned long)cpu_addr
& ~PAGE_MASK
;
319 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %p\n",
320 cpu_addr
, size
, npages
, ret
, __builtin_return_address(0));
325 /* Helper for generic DMA-mapping functions. */
326 static struct pci_dev
*alpha_gendev_to_pci(struct device
*dev
)
328 if (dev
&& dev
->bus
== &pci_bus_type
)
329 return to_pci_dev(dev
);
331 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
335 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
336 bridge is bus master then). */
337 if (!dev
|| !dev
->dma_mask
|| !*dev
->dma_mask
)
340 /* For EISA bus masters, return isa_bridge (it might have smaller
341 dma_mask due to wiring limitations). */
342 if (*dev
->dma_mask
>= isa_bridge
->dma_mask
)
345 /* This assumes ISA bus master with dma_mask 0xffffff. */
349 static dma_addr_t
alpha_pci_map_page(struct device
*dev
, struct page
*page
,
350 unsigned long offset
, size_t size
,
351 enum dma_data_direction dir
,
352 struct dma_attrs
*attrs
)
354 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
357 if (dir
== PCI_DMA_NONE
)
360 dac_allowed
= pdev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
361 return pci_map_single_1(pdev
, (char *)page_address(page
) + offset
,
365 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
366 SIZE must match what was provided for in a previous pci_map_single
367 call. All other usages are undefined. After this call, reads by
368 the cpu to the buffer are guaranteed to see whatever the device
371 static void alpha_pci_unmap_page(struct device
*dev
, dma_addr_t dma_addr
,
372 size_t size
, enum dma_data_direction dir
,
373 struct dma_attrs
*attrs
)
376 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
377 struct pci_controller
*hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
378 struct pci_iommu_arena
*arena
;
379 long dma_ofs
, npages
;
381 if (dir
== PCI_DMA_NONE
)
384 if (dma_addr
>= __direct_map_base
385 && dma_addr
< __direct_map_base
+ __direct_map_size
) {
388 DBGA2("pci_unmap_single: direct [%llx,%zx] from %p\n",
389 dma_addr
, size
, __builtin_return_address(0));
394 if (dma_addr
> 0xffffffff) {
395 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %p\n",
396 dma_addr
, size
, __builtin_return_address(0));
400 arena
= hose
->sg_pci
;
401 if (!arena
|| dma_addr
< arena
->dma_base
)
402 arena
= hose
->sg_isa
;
404 dma_ofs
= (dma_addr
- arena
->dma_base
) >> PAGE_SHIFT
;
405 if (dma_ofs
* PAGE_SIZE
>= arena
->size
) {
406 printk(KERN_ERR
"Bogus pci_unmap_single: dma_addr %llx "
407 " base %llx size %x\n",
408 dma_addr
, arena
->dma_base
, arena
->size
);
413 npages
= iommu_num_pages(dma_addr
, size
, PAGE_SIZE
);
415 spin_lock_irqsave(&arena
->lock
, flags
);
417 iommu_arena_free(arena
, dma_ofs
, npages
);
419 /* If we're freeing ptes above the `next_entry' pointer (they
420 may have snuck back into the TLB since the last wrap flush),
421 we need to flush the TLB before reallocating the latter. */
422 if (dma_ofs
>= arena
->next_entry
)
423 alpha_mv
.mv_pci_tbi(hose
, dma_addr
, dma_addr
+ size
- 1);
425 spin_unlock_irqrestore(&arena
->lock
, flags
);
427 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n",
428 dma_addr
, size
, npages
, __builtin_return_address(0));
431 /* Allocate and map kernel buffer using consistent mode DMA for PCI
432 device. Returns non-NULL cpu-view pointer to the buffer if
433 successful and sets *DMA_ADDRP to the pci side dma address as well,
434 else DMA_ADDRP is undefined. */
436 static void *alpha_pci_alloc_coherent(struct device
*dev
, size_t size
,
437 dma_addr_t
*dma_addrp
, gfp_t gfp
)
439 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
441 long order
= get_order(size
);
446 cpu_addr
= (void *)__get_free_pages(gfp
, order
);
448 printk(KERN_INFO
"pci_alloc_consistent: "
449 "get_free_pages failed from %p\n",
450 __builtin_return_address(0));
451 /* ??? Really atomic allocation? Otherwise we could play
452 with vmalloc and sg if we can't find contiguous memory. */
455 memset(cpu_addr
, 0, size
);
457 *dma_addrp
= pci_map_single_1(pdev
, cpu_addr
, size
, 0);
458 if (*dma_addrp
== 0) {
459 free_pages((unsigned long)cpu_addr
, order
);
460 if (alpha_mv
.mv_pci_tbi
|| (gfp
& GFP_DMA
))
462 /* The address doesn't fit required mask and we
463 do not have iommu. Try again with GFP_DMA. */
468 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n",
469 size
, cpu_addr
, *dma_addrp
, __builtin_return_address(0));
474 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
475 be values that were returned from pci_alloc_consistent. SIZE must
476 be the same as what as passed into pci_alloc_consistent.
477 References to the memory and mappings associated with CPU_ADDR or
478 DMA_ADDR past this call are illegal. */
480 static void alpha_pci_free_coherent(struct device
*dev
, size_t size
,
481 void *cpu_addr
, dma_addr_t dma_addr
)
483 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
484 pci_unmap_single(pdev
, dma_addr
, size
, PCI_DMA_BIDIRECTIONAL
);
485 free_pages((unsigned long)cpu_addr
, get_order(size
));
487 DBGA2("pci_free_consistent: [%llx,%zx] from %p\n",
488 dma_addr
, size
, __builtin_return_address(0));
491 /* Classify the elements of the scatterlist. Write dma_address
492 of each element with:
493 0 : Followers all physically adjacent.
494 1 : Followers all virtually adjacent.
495 -1 : Not leader, physically adjacent to previous.
496 -2 : Not leader, virtually adjacent to previous.
497 Write dma_length of each leader with the combined lengths of
498 the mergable followers. */
500 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
501 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
504 sg_classify(struct device
*dev
, struct scatterlist
*sg
, struct scatterlist
*end
,
507 unsigned long next_paddr
;
508 struct scatterlist
*leader
;
509 long leader_flag
, leader_length
;
510 unsigned int max_seg_size
;
514 leader_length
= leader
->length
;
515 next_paddr
= SG_ENT_PHYS_ADDRESS(leader
) + leader_length
;
517 /* we will not marge sg without device. */
518 max_seg_size
= dev
? dma_get_max_seg_size(dev
) : 0;
519 for (++sg
; sg
< end
; ++sg
) {
520 unsigned long addr
, len
;
521 addr
= SG_ENT_PHYS_ADDRESS(sg
);
524 if (leader_length
+ len
> max_seg_size
)
527 if (next_paddr
== addr
) {
528 sg
->dma_address
= -1;
529 leader_length
+= len
;
530 } else if (((next_paddr
| addr
) & ~PAGE_MASK
) == 0 && virt_ok
) {
531 sg
->dma_address
= -2;
533 leader_length
+= len
;
536 leader
->dma_address
= leader_flag
;
537 leader
->dma_length
= leader_length
;
543 next_paddr
= addr
+ len
;
546 leader
->dma_address
= leader_flag
;
547 leader
->dma_length
= leader_length
;
550 /* Given a scatterlist leader, choose an allocation method and fill
554 sg_fill(struct device
*dev
, struct scatterlist
*leader
, struct scatterlist
*end
,
555 struct scatterlist
*out
, struct pci_iommu_arena
*arena
,
556 dma_addr_t max_dma
, int dac_allowed
)
558 unsigned long paddr
= SG_ENT_PHYS_ADDRESS(leader
);
559 long size
= leader
->dma_length
;
560 struct scatterlist
*sg
;
562 long npages
, dma_ofs
, i
;
565 /* If everything is physically contiguous, and the addresses
566 fall into the direct-map window, use it. */
567 if (leader
->dma_address
== 0
568 && paddr
+ size
+ __direct_map_base
- 1 <= max_dma
569 && paddr
+ size
<= __direct_map_size
) {
570 out
->dma_address
= paddr
+ __direct_map_base
;
571 out
->dma_length
= size
;
573 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
574 __va(paddr
), size
, out
->dma_address
);
580 /* If physically contiguous and DAC is available, use it. */
581 if (leader
->dma_address
== 0 && dac_allowed
) {
582 out
->dma_address
= paddr
+ alpha_mv
.pci_dac_offset
;
583 out
->dma_length
= size
;
585 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
586 __va(paddr
), size
, out
->dma_address
);
591 /* Otherwise, we'll use the iommu to make the pages virtually
595 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
596 dma_ofs
= iommu_arena_alloc(dev
, arena
, npages
, 0);
598 /* If we attempted a direct map above but failed, die. */
599 if (leader
->dma_address
== 0)
602 /* Otherwise, break up the remaining virtually contiguous
603 hunks into individual direct maps and retry. */
604 sg_classify(dev
, leader
, end
, 0);
605 return sg_fill(dev
, leader
, end
, out
, arena
, max_dma
, dac_allowed
);
608 out
->dma_address
= arena
->dma_base
+ dma_ofs
*PAGE_SIZE
+ paddr
;
609 out
->dma_length
= size
;
611 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
612 __va(paddr
), size
, out
->dma_address
, npages
);
614 /* All virtually contiguous. We need to find the length of each
615 physically contiguous subsegment to fill in the ptes. */
616 ptes
= &arena
->ptes
[dma_ofs
];
620 struct scatterlist
*last_sg
= sg
;
624 paddr
= SG_ENT_PHYS_ADDRESS(sg
);
626 while (sg
+1 < end
&& (int) sg
[1].dma_address
== -1) {
627 size
+= sg
[1].length
;
631 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
634 for (i
= 0; i
< npages
; ++i
, paddr
+= PAGE_SIZE
)
635 *ptes
++ = mk_iommu_pte(paddr
);
638 DBGA(" (%ld) [%p,%x] np %ld\n",
639 last_sg
- leader
, SG_ENT_VIRT_ADDRESS(last_sg
),
640 last_sg
->length
, npages
);
641 while (++last_sg
<= sg
) {
642 DBGA(" (%ld) [%p,%x] cont\n",
643 last_sg
- leader
, SG_ENT_VIRT_ADDRESS(last_sg
),
647 } while (++sg
< end
&& (int) sg
->dma_address
< 0);
652 static int alpha_pci_map_sg(struct device
*dev
, struct scatterlist
*sg
,
653 int nents
, enum dma_data_direction dir
,
654 struct dma_attrs
*attrs
)
656 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
657 struct scatterlist
*start
, *end
, *out
;
658 struct pci_controller
*hose
;
659 struct pci_iommu_arena
*arena
;
663 if (dir
== PCI_DMA_NONE
)
666 dac_allowed
= dev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
668 /* Fast path single entry scatterlists. */
670 sg
->dma_length
= sg
->length
;
672 = pci_map_single_1(pdev
, SG_ENT_VIRT_ADDRESS(sg
),
673 sg
->length
, dac_allowed
);
674 return sg
->dma_address
!= 0;
680 /* First, prepare information about the entries. */
681 sg_classify(dev
, sg
, end
, alpha_mv
.mv_pci_tbi
!= 0);
683 /* Second, figure out where we're going to map things. */
684 if (alpha_mv
.mv_pci_tbi
) {
685 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
686 max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
687 arena
= hose
->sg_pci
;
688 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
689 arena
= hose
->sg_isa
;
696 /* Third, iterate over the scatterlist leaders and allocate
697 dma space as needed. */
698 for (out
= sg
; sg
< end
; ++sg
) {
699 if ((int) sg
->dma_address
< 0)
701 if (sg_fill(dev
, sg
, end
, out
, arena
, max_dma
, dac_allowed
) < 0)
706 /* Mark the end of the list for pci_unmap_sg. */
710 if (out
- start
== 0)
711 printk(KERN_WARNING
"pci_map_sg failed: no entries?\n");
712 DBGA("pci_map_sg: %ld entries\n", out
- start
);
717 printk(KERN_WARNING
"pci_map_sg failed: "
718 "could not allocate dma page tables\n");
720 /* Some allocation failed while mapping the scatterlist
721 entries. Unmap them now. */
723 pci_unmap_sg(pdev
, start
, out
- start
, dir
);
727 /* Unmap a set of streaming mode DMA translations. Again, cpu read
728 rules concerning calls here are the same as for pci_unmap_single()
731 static void alpha_pci_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
732 int nents
, enum dma_data_direction dir
,
733 struct dma_attrs
*attrs
)
735 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
737 struct pci_controller
*hose
;
738 struct pci_iommu_arena
*arena
;
739 struct scatterlist
*end
;
741 dma_addr_t fbeg
, fend
;
743 if (dir
== PCI_DMA_NONE
)
746 if (! alpha_mv
.mv_pci_tbi
)
749 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
750 max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
751 arena
= hose
->sg_pci
;
752 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
753 arena
= hose
->sg_isa
;
757 spin_lock_irqsave(&arena
->lock
, flags
);
759 for (end
= sg
+ nents
; sg
< end
; ++sg
) {
765 addr
= sg
->dma_address
;
766 size
= sg
->dma_length
;
770 if (addr
> 0xffffffff) {
771 /* It's a DAC address -- nothing to do. */
772 DBGA(" (%ld) DAC [%llx,%zx]\n",
773 sg
- end
+ nents
, addr
, size
);
777 if (addr
>= __direct_map_base
778 && addr
< __direct_map_base
+ __direct_map_size
) {
780 DBGA(" (%ld) direct [%llx,%zx]\n",
781 sg
- end
+ nents
, addr
, size
);
785 DBGA(" (%ld) sg [%llx,%zx]\n",
786 sg
- end
+ nents
, addr
, size
);
788 npages
= iommu_num_pages(addr
, size
, PAGE_SIZE
);
789 ofs
= (addr
- arena
->dma_base
) >> PAGE_SHIFT
;
790 iommu_arena_free(arena
, ofs
, npages
);
792 tend
= addr
+ size
- 1;
793 if (fbeg
> addr
) fbeg
= addr
;
794 if (fend
< tend
) fend
= tend
;
797 /* If we're freeing ptes above the `next_entry' pointer (they
798 may have snuck back into the TLB since the last wrap flush),
799 we need to flush the TLB before reallocating the latter. */
800 if ((fend
- arena
->dma_base
) >> PAGE_SHIFT
>= arena
->next_entry
)
801 alpha_mv
.mv_pci_tbi(hose
, fbeg
, fend
);
803 spin_unlock_irqrestore(&arena
->lock
, flags
);
805 DBGA("pci_unmap_sg: %ld entries\n", nents
- (end
- sg
));
808 /* Return whether the given PCI device DMA address mask can be
809 supported properly. */
811 static int alpha_pci_supported(struct device
*dev
, u64 mask
)
813 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
814 struct pci_controller
*hose
;
815 struct pci_iommu_arena
*arena
;
817 /* If there exists a direct map, and the mask fits either
818 the entire direct mapped space or the total system memory as
819 shifted by the map base */
820 if (__direct_map_size
!= 0
821 && (__direct_map_base
+ __direct_map_size
- 1 <= mask
||
822 __direct_map_base
+ (max_low_pfn
<< PAGE_SHIFT
) - 1 <= mask
))
825 /* Check that we have a scatter-gather arena that fits. */
826 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
827 arena
= hose
->sg_isa
;
828 if (arena
&& arena
->dma_base
+ arena
->size
- 1 <= mask
)
830 arena
= hose
->sg_pci
;
831 if (arena
&& arena
->dma_base
+ arena
->size
- 1 <= mask
)
834 /* As last resort try ZONE_DMA. */
835 if (!__direct_map_base
&& MAX_DMA_ADDRESS
- IDENT_ADDR
- 1 <= mask
)
843 * AGP GART extensions to the IOMMU
846 iommu_reserve(struct pci_iommu_arena
*arena
, long pg_count
, long align_mask
)
852 if (!arena
) return -EINVAL
;
854 spin_lock_irqsave(&arena
->lock
, flags
);
856 /* Search for N empty ptes. */
858 p
= iommu_arena_find_pages(NULL
, arena
, pg_count
, align_mask
);
860 spin_unlock_irqrestore(&arena
->lock
, flags
);
864 /* Success. Mark them all reserved (ie not zero and invalid)
865 for the iommu tlb that could load them from under us.
866 They will be filled in with valid bits by _bind() */
867 for (i
= 0; i
< pg_count
; ++i
)
868 ptes
[p
+i
] = IOMMU_RESERVED_PTE
;
870 arena
->next_entry
= p
+ pg_count
;
871 spin_unlock_irqrestore(&arena
->lock
, flags
);
877 iommu_release(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
)
882 if (!arena
) return -EINVAL
;
886 /* Make sure they're all reserved first... */
887 for(i
= pg_start
; i
< pg_start
+ pg_count
; i
++)
888 if (ptes
[i
] != IOMMU_RESERVED_PTE
)
891 iommu_arena_free(arena
, pg_start
, pg_count
);
896 iommu_bind(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
,
903 if (!arena
) return -EINVAL
;
905 spin_lock_irqsave(&arena
->lock
, flags
);
909 for(j
= pg_start
; j
< pg_start
+ pg_count
; j
++) {
910 if (ptes
[j
] != IOMMU_RESERVED_PTE
) {
911 spin_unlock_irqrestore(&arena
->lock
, flags
);
916 for(i
= 0, j
= pg_start
; i
< pg_count
; i
++, j
++)
917 ptes
[j
] = mk_iommu_pte(page_to_phys(pages
[i
]));
919 spin_unlock_irqrestore(&arena
->lock
, flags
);
925 iommu_unbind(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
)
930 if (!arena
) return -EINVAL
;
932 p
= arena
->ptes
+ pg_start
;
933 for(i
= 0; i
< pg_count
; i
++)
934 p
[i
] = IOMMU_RESERVED_PTE
;
939 static int alpha_pci_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
941 return dma_addr
== 0;
944 static int alpha_pci_set_mask(struct device
*dev
, u64 mask
)
946 if (!dev
->dma_mask
||
947 !pci_dma_supported(alpha_gendev_to_pci(dev
), mask
))
950 *dev
->dma_mask
= mask
;
954 struct dma_map_ops alpha_pci_ops
= {
955 .alloc_coherent
= alpha_pci_alloc_coherent
,
956 .free_coherent
= alpha_pci_free_coherent
,
957 .map_page
= alpha_pci_map_page
,
958 .unmap_page
= alpha_pci_unmap_page
,
959 .map_sg
= alpha_pci_map_sg
,
960 .unmap_sg
= alpha_pci_unmap_sg
,
961 .mapping_error
= alpha_pci_mapping_error
,
962 .dma_supported
= alpha_pci_supported
,
963 .set_dma_mask
= alpha_pci_set_mask
,
966 struct dma_map_ops
*dma_ops
= &alpha_pci_ops
;
967 EXPORT_SYMBOL(dma_ops
);