2 * linux/arch/alpha/kernel/pci_iommu.c
5 #include <linux/kernel.h>
9 #include <linux/bootmem.h>
10 #include <linux/export.h>
11 #include <linux/scatterlist.h>
12 #include <linux/log2.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/iommu-helper.h>
17 #include <asm/hwrpb.h>
25 # define DBGA(args...) printk(KERN_DEBUG args)
27 # define DBGA(args...)
30 # define DBGA2(args...) printk(KERN_DEBUG args)
32 # define DBGA2(args...)
35 #define DEBUG_NODIRECT 0
37 #define ISA_DMA_MASK 0x00ffffff
39 static inline unsigned long
40 mk_iommu_pte(unsigned long paddr
)
42 return (paddr
>> (PAGE_SHIFT
-1)) | 1;
45 /* Return the minimum of MAX or the first power of two larger
49 size_for_memory(unsigned long max
)
51 unsigned long mem
= max_low_pfn
<< PAGE_SHIFT
;
53 max
= roundup_pow_of_two(mem
);
57 struct pci_iommu_arena
* __init
58 iommu_arena_new_node(int nid
, struct pci_controller
*hose
, dma_addr_t base
,
59 unsigned long window_size
, unsigned long align
)
61 unsigned long mem_size
;
62 struct pci_iommu_arena
*arena
;
64 mem_size
= window_size
/ (PAGE_SIZE
/ sizeof(unsigned long));
66 /* Note that the TLB lookup logic uses bitwise concatenation,
67 not addition, so the required arena alignment is based on
68 the size of the window. Retain the align parameter so that
69 particular systems can over-align the arena. */
74 #ifdef CONFIG_DISCONTIGMEM
76 arena
= alloc_bootmem_node(NODE_DATA(nid
), sizeof(*arena
));
77 if (!NODE_DATA(nid
) || !arena
) {
78 printk("%s: couldn't allocate arena from node %d\n"
79 " falling back to system-wide allocation\n",
81 arena
= alloc_bootmem(sizeof(*arena
));
84 arena
->ptes
= __alloc_bootmem_node(NODE_DATA(nid
), mem_size
, align
, 0);
85 if (!NODE_DATA(nid
) || !arena
->ptes
) {
86 printk("%s: couldn't allocate arena ptes from node %d\n"
87 " falling back to system-wide allocation\n",
89 arena
->ptes
= __alloc_bootmem(mem_size
, align
, 0);
92 #else /* CONFIG_DISCONTIGMEM */
94 arena
= alloc_bootmem(sizeof(*arena
));
95 arena
->ptes
= __alloc_bootmem(mem_size
, align
, 0);
97 #endif /* CONFIG_DISCONTIGMEM */
99 spin_lock_init(&arena
->lock
);
101 arena
->dma_base
= base
;
102 arena
->size
= window_size
;
103 arena
->next_entry
= 0;
105 /* Align allocations to a multiple of a page size. Not needed
106 unless there are chip bugs. */
107 arena
->align_entry
= 1;
112 struct pci_iommu_arena
* __init
113 iommu_arena_new(struct pci_controller
*hose
, dma_addr_t base
,
114 unsigned long window_size
, unsigned long align
)
116 return iommu_arena_new_node(0, hose
, base
, window_size
, align
);
119 /* Must be called with the arena lock held */
121 iommu_arena_find_pages(struct device
*dev
, struct pci_iommu_arena
*arena
,
128 unsigned long boundary_size
;
130 base
= arena
->dma_base
>> PAGE_SHIFT
;
132 boundary_size
= dma_get_seg_boundary(dev
) + 1;
133 boundary_size
>>= PAGE_SHIFT
;
135 boundary_size
= 1UL << (32 - PAGE_SHIFT
);
138 /* Search forward for the first mask-aligned sequence of N free ptes */
140 nent
= arena
->size
>> PAGE_SHIFT
;
141 p
= ALIGN(arena
->next_entry
, mask
+ 1);
145 while (i
< n
&& p
+i
< nent
) {
146 if (!i
&& iommu_is_span_boundary(p
, n
, base
, boundary_size
)) {
147 p
= ALIGN(p
+ 1, mask
+ 1);
152 p
= ALIGN(p
+ i
+ 1, mask
+ 1), i
= 0;
160 * Reached the end. Flush the TLB and restart
161 * the search from the beginning.
163 alpha_mv
.mv_pci_tbi(arena
->hose
, 0, -1);
173 /* Success. It's the responsibility of the caller to mark them
174 in use before releasing the lock */
179 iommu_arena_alloc(struct device
*dev
, struct pci_iommu_arena
*arena
, long n
,
186 spin_lock_irqsave(&arena
->lock
, flags
);
188 /* Search for N empty ptes */
190 mask
= max(align
, arena
->align_entry
) - 1;
191 p
= iommu_arena_find_pages(dev
, arena
, n
, mask
);
193 spin_unlock_irqrestore(&arena
->lock
, flags
);
197 /* Success. Mark them all in use, ie not zero and invalid
198 for the iommu tlb that could load them from under us.
199 The chip specific bits will fill this in with something
200 kosher when we return. */
201 for (i
= 0; i
< n
; ++i
)
202 ptes
[p
+i
] = IOMMU_INVALID_PTE
;
204 arena
->next_entry
= p
+ n
;
205 spin_unlock_irqrestore(&arena
->lock
, flags
);
211 iommu_arena_free(struct pci_iommu_arena
*arena
, long ofs
, long n
)
216 p
= arena
->ptes
+ ofs
;
217 for (i
= 0; i
< n
; ++i
)
222 * True if the machine supports DAC addressing, and DEV can
223 * make use of it given MASK.
225 static int pci_dac_dma_supported(struct pci_dev
*dev
, u64 mask
)
227 dma_addr_t dac_offset
= alpha_mv
.pci_dac_offset
;
230 /* If this is not set, the machine doesn't support DAC at all. */
234 /* The device has to be able to address our DAC bit. */
235 if ((dac_offset
& dev
->dma_mask
) != dac_offset
)
238 /* If both conditions above are met, we are fine. */
239 DBGA("pci_dac_dma_supported %s from %pf\n",
240 ok
? "yes" : "no", __builtin_return_address(0));
245 /* Map a single buffer of the indicated size for PCI DMA in streaming
246 mode. The 32-bit PCI bus mastering address to use is returned.
247 Once the device is given the dma address, the device owns this memory
248 until either pci_unmap_single or pci_dma_sync_single is performed. */
251 pci_map_single_1(struct pci_dev
*pdev
, void *cpu_addr
, size_t size
,
254 struct pci_controller
*hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
255 dma_addr_t max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
256 struct pci_iommu_arena
*arena
;
257 long npages
, dma_ofs
, i
;
260 unsigned int align
= 0;
261 struct device
*dev
= pdev
? &pdev
->dev
: NULL
;
263 paddr
= __pa(cpu_addr
);
266 /* First check to see if we can use the direct map window. */
267 if (paddr
+ size
+ __direct_map_base
- 1 <= max_dma
268 && paddr
+ size
<= __direct_map_size
) {
269 ret
= paddr
+ __direct_map_base
;
271 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n",
272 cpu_addr
, size
, ret
, __builtin_return_address(0));
278 /* Next, use DAC if selected earlier. */
280 ret
= paddr
+ alpha_mv
.pci_dac_offset
;
282 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n",
283 cpu_addr
, size
, ret
, __builtin_return_address(0));
288 /* If the machine doesn't define a pci_tbi routine, we have to
289 assume it doesn't support sg mapping, and, since we tried to
290 use direct_map above, it now must be considered an error. */
291 if (! alpha_mv
.mv_pci_tbi
) {
292 printk_once(KERN_WARNING
"pci_map_single: no HW sg\n");
296 arena
= hose
->sg_pci
;
297 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
298 arena
= hose
->sg_isa
;
300 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
302 /* Force allocation to 64KB boundary for ISA bridges. */
303 if (pdev
&& pdev
== isa_bridge
)
305 dma_ofs
= iommu_arena_alloc(dev
, arena
, npages
, align
);
307 printk(KERN_WARNING
"pci_map_single failed: "
308 "could not allocate dma page tables\n");
313 for (i
= 0; i
< npages
; ++i
, paddr
+= PAGE_SIZE
)
314 arena
->ptes
[i
+ dma_ofs
] = mk_iommu_pte(paddr
);
316 ret
= arena
->dma_base
+ dma_ofs
* PAGE_SIZE
;
317 ret
+= (unsigned long)cpu_addr
& ~PAGE_MASK
;
319 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n",
320 cpu_addr
, size
, npages
, ret
, __builtin_return_address(0));
325 /* Helper for generic DMA-mapping functions. */
326 static struct pci_dev
*alpha_gendev_to_pci(struct device
*dev
)
328 if (dev
&& dev
->bus
== &pci_bus_type
)
329 return to_pci_dev(dev
);
331 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
335 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
336 bridge is bus master then). */
337 if (!dev
|| !dev
->dma_mask
|| !*dev
->dma_mask
)
340 /* For EISA bus masters, return isa_bridge (it might have smaller
341 dma_mask due to wiring limitations). */
342 if (*dev
->dma_mask
>= isa_bridge
->dma_mask
)
345 /* This assumes ISA bus master with dma_mask 0xffffff. */
349 static dma_addr_t
alpha_pci_map_page(struct device
*dev
, struct page
*page
,
350 unsigned long offset
, size_t size
,
351 enum dma_data_direction dir
,
352 struct dma_attrs
*attrs
)
354 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
357 if (dir
== PCI_DMA_NONE
)
360 dac_allowed
= pdev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
361 return pci_map_single_1(pdev
, (char *)page_address(page
) + offset
,
365 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
366 SIZE must match what was provided for in a previous pci_map_single
367 call. All other usages are undefined. After this call, reads by
368 the cpu to the buffer are guaranteed to see whatever the device
371 static void alpha_pci_unmap_page(struct device
*dev
, dma_addr_t dma_addr
,
372 size_t size
, enum dma_data_direction dir
,
373 struct dma_attrs
*attrs
)
376 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
377 struct pci_controller
*hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
378 struct pci_iommu_arena
*arena
;
379 long dma_ofs
, npages
;
381 if (dir
== PCI_DMA_NONE
)
384 if (dma_addr
>= __direct_map_base
385 && dma_addr
< __direct_map_base
+ __direct_map_size
) {
388 DBGA2("pci_unmap_single: direct [%llx,%zx] from %pf\n",
389 dma_addr
, size
, __builtin_return_address(0));
394 if (dma_addr
> 0xffffffff) {
395 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %pf\n",
396 dma_addr
, size
, __builtin_return_address(0));
400 arena
= hose
->sg_pci
;
401 if (!arena
|| dma_addr
< arena
->dma_base
)
402 arena
= hose
->sg_isa
;
404 dma_ofs
= (dma_addr
- arena
->dma_base
) >> PAGE_SHIFT
;
405 if (dma_ofs
* PAGE_SIZE
>= arena
->size
) {
406 printk(KERN_ERR
"Bogus pci_unmap_single: dma_addr %llx "
407 " base %llx size %x\n",
408 dma_addr
, arena
->dma_base
, arena
->size
);
413 npages
= iommu_num_pages(dma_addr
, size
, PAGE_SIZE
);
415 spin_lock_irqsave(&arena
->lock
, flags
);
417 iommu_arena_free(arena
, dma_ofs
, npages
);
419 /* If we're freeing ptes above the `next_entry' pointer (they
420 may have snuck back into the TLB since the last wrap flush),
421 we need to flush the TLB before reallocating the latter. */
422 if (dma_ofs
>= arena
->next_entry
)
423 alpha_mv
.mv_pci_tbi(hose
, dma_addr
, dma_addr
+ size
- 1);
425 spin_unlock_irqrestore(&arena
->lock
, flags
);
427 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n",
428 dma_addr
, size
, npages
, __builtin_return_address(0));
431 /* Allocate and map kernel buffer using consistent mode DMA for PCI
432 device. Returns non-NULL cpu-view pointer to the buffer if
433 successful and sets *DMA_ADDRP to the pci side dma address as well,
434 else DMA_ADDRP is undefined. */
436 static void *alpha_pci_alloc_coherent(struct device
*dev
, size_t size
,
437 dma_addr_t
*dma_addrp
, gfp_t gfp
,
438 struct dma_attrs
*attrs
)
440 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
442 long order
= get_order(size
);
447 cpu_addr
= (void *)__get_free_pages(gfp
, order
);
449 printk(KERN_INFO
"pci_alloc_consistent: "
450 "get_free_pages failed from %pf\n",
451 __builtin_return_address(0));
452 /* ??? Really atomic allocation? Otherwise we could play
453 with vmalloc and sg if we can't find contiguous memory. */
456 memset(cpu_addr
, 0, size
);
458 *dma_addrp
= pci_map_single_1(pdev
, cpu_addr
, size
, 0);
459 if (*dma_addrp
== 0) {
460 free_pages((unsigned long)cpu_addr
, order
);
461 if (alpha_mv
.mv_pci_tbi
|| (gfp
& GFP_DMA
))
463 /* The address doesn't fit required mask and we
464 do not have iommu. Try again with GFP_DMA. */
469 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %pf\n",
470 size
, cpu_addr
, *dma_addrp
, __builtin_return_address(0));
475 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
476 be values that were returned from pci_alloc_consistent. SIZE must
477 be the same as what as passed into pci_alloc_consistent.
478 References to the memory and mappings associated with CPU_ADDR or
479 DMA_ADDR past this call are illegal. */
481 static void alpha_pci_free_coherent(struct device
*dev
, size_t size
,
482 void *cpu_addr
, dma_addr_t dma_addr
,
483 struct dma_attrs
*attrs
)
485 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
486 pci_unmap_single(pdev
, dma_addr
, size
, PCI_DMA_BIDIRECTIONAL
);
487 free_pages((unsigned long)cpu_addr
, get_order(size
));
489 DBGA2("pci_free_consistent: [%llx,%zx] from %pf\n",
490 dma_addr
, size
, __builtin_return_address(0));
493 /* Classify the elements of the scatterlist. Write dma_address
494 of each element with:
495 0 : Followers all physically adjacent.
496 1 : Followers all virtually adjacent.
497 -1 : Not leader, physically adjacent to previous.
498 -2 : Not leader, virtually adjacent to previous.
499 Write dma_length of each leader with the combined lengths of
500 the mergable followers. */
502 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
503 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
506 sg_classify(struct device
*dev
, struct scatterlist
*sg
, struct scatterlist
*end
,
509 unsigned long next_paddr
;
510 struct scatterlist
*leader
;
511 long leader_flag
, leader_length
;
512 unsigned int max_seg_size
;
516 leader_length
= leader
->length
;
517 next_paddr
= SG_ENT_PHYS_ADDRESS(leader
) + leader_length
;
519 /* we will not marge sg without device. */
520 max_seg_size
= dev
? dma_get_max_seg_size(dev
) : 0;
521 for (++sg
; sg
< end
; ++sg
) {
522 unsigned long addr
, len
;
523 addr
= SG_ENT_PHYS_ADDRESS(sg
);
526 if (leader_length
+ len
> max_seg_size
)
529 if (next_paddr
== addr
) {
530 sg
->dma_address
= -1;
531 leader_length
+= len
;
532 } else if (((next_paddr
| addr
) & ~PAGE_MASK
) == 0 && virt_ok
) {
533 sg
->dma_address
= -2;
535 leader_length
+= len
;
538 leader
->dma_address
= leader_flag
;
539 leader
->dma_length
= leader_length
;
545 next_paddr
= addr
+ len
;
548 leader
->dma_address
= leader_flag
;
549 leader
->dma_length
= leader_length
;
552 /* Given a scatterlist leader, choose an allocation method and fill
556 sg_fill(struct device
*dev
, struct scatterlist
*leader
, struct scatterlist
*end
,
557 struct scatterlist
*out
, struct pci_iommu_arena
*arena
,
558 dma_addr_t max_dma
, int dac_allowed
)
560 unsigned long paddr
= SG_ENT_PHYS_ADDRESS(leader
);
561 long size
= leader
->dma_length
;
562 struct scatterlist
*sg
;
564 long npages
, dma_ofs
, i
;
567 /* If everything is physically contiguous, and the addresses
568 fall into the direct-map window, use it. */
569 if (leader
->dma_address
== 0
570 && paddr
+ size
+ __direct_map_base
- 1 <= max_dma
571 && paddr
+ size
<= __direct_map_size
) {
572 out
->dma_address
= paddr
+ __direct_map_base
;
573 out
->dma_length
= size
;
575 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
576 __va(paddr
), size
, out
->dma_address
);
582 /* If physically contiguous and DAC is available, use it. */
583 if (leader
->dma_address
== 0 && dac_allowed
) {
584 out
->dma_address
= paddr
+ alpha_mv
.pci_dac_offset
;
585 out
->dma_length
= size
;
587 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
588 __va(paddr
), size
, out
->dma_address
);
593 /* Otherwise, we'll use the iommu to make the pages virtually
597 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
598 dma_ofs
= iommu_arena_alloc(dev
, arena
, npages
, 0);
600 /* If we attempted a direct map above but failed, die. */
601 if (leader
->dma_address
== 0)
604 /* Otherwise, break up the remaining virtually contiguous
605 hunks into individual direct maps and retry. */
606 sg_classify(dev
, leader
, end
, 0);
607 return sg_fill(dev
, leader
, end
, out
, arena
, max_dma
, dac_allowed
);
610 out
->dma_address
= arena
->dma_base
+ dma_ofs
*PAGE_SIZE
+ paddr
;
611 out
->dma_length
= size
;
613 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
614 __va(paddr
), size
, out
->dma_address
, npages
);
616 /* All virtually contiguous. We need to find the length of each
617 physically contiguous subsegment to fill in the ptes. */
618 ptes
= &arena
->ptes
[dma_ofs
];
622 struct scatterlist
*last_sg
= sg
;
626 paddr
= SG_ENT_PHYS_ADDRESS(sg
);
628 while (sg
+1 < end
&& (int) sg
[1].dma_address
== -1) {
629 size
+= sg
[1].length
;
633 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
636 for (i
= 0; i
< npages
; ++i
, paddr
+= PAGE_SIZE
)
637 *ptes
++ = mk_iommu_pte(paddr
);
640 DBGA(" (%ld) [%p,%x] np %ld\n",
641 last_sg
- leader
, SG_ENT_VIRT_ADDRESS(last_sg
),
642 last_sg
->length
, npages
);
643 while (++last_sg
<= sg
) {
644 DBGA(" (%ld) [%p,%x] cont\n",
645 last_sg
- leader
, SG_ENT_VIRT_ADDRESS(last_sg
),
649 } while (++sg
< end
&& (int) sg
->dma_address
< 0);
654 static int alpha_pci_map_sg(struct device
*dev
, struct scatterlist
*sg
,
655 int nents
, enum dma_data_direction dir
,
656 struct dma_attrs
*attrs
)
658 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
659 struct scatterlist
*start
, *end
, *out
;
660 struct pci_controller
*hose
;
661 struct pci_iommu_arena
*arena
;
665 if (dir
== PCI_DMA_NONE
)
668 dac_allowed
= dev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
670 /* Fast path single entry scatterlists. */
672 sg
->dma_length
= sg
->length
;
674 = pci_map_single_1(pdev
, SG_ENT_VIRT_ADDRESS(sg
),
675 sg
->length
, dac_allowed
);
676 return sg
->dma_address
!= 0;
682 /* First, prepare information about the entries. */
683 sg_classify(dev
, sg
, end
, alpha_mv
.mv_pci_tbi
!= 0);
685 /* Second, figure out where we're going to map things. */
686 if (alpha_mv
.mv_pci_tbi
) {
687 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
688 max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
689 arena
= hose
->sg_pci
;
690 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
691 arena
= hose
->sg_isa
;
698 /* Third, iterate over the scatterlist leaders and allocate
699 dma space as needed. */
700 for (out
= sg
; sg
< end
; ++sg
) {
701 if ((int) sg
->dma_address
< 0)
703 if (sg_fill(dev
, sg
, end
, out
, arena
, max_dma
, dac_allowed
) < 0)
708 /* Mark the end of the list for pci_unmap_sg. */
712 if (out
- start
== 0)
713 printk(KERN_WARNING
"pci_map_sg failed: no entries?\n");
714 DBGA("pci_map_sg: %ld entries\n", out
- start
);
719 printk(KERN_WARNING
"pci_map_sg failed: "
720 "could not allocate dma page tables\n");
722 /* Some allocation failed while mapping the scatterlist
723 entries. Unmap them now. */
725 pci_unmap_sg(pdev
, start
, out
- start
, dir
);
729 /* Unmap a set of streaming mode DMA translations. Again, cpu read
730 rules concerning calls here are the same as for pci_unmap_single()
733 static void alpha_pci_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
734 int nents
, enum dma_data_direction dir
,
735 struct dma_attrs
*attrs
)
737 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
739 struct pci_controller
*hose
;
740 struct pci_iommu_arena
*arena
;
741 struct scatterlist
*end
;
743 dma_addr_t fbeg
, fend
;
745 if (dir
== PCI_DMA_NONE
)
748 if (! alpha_mv
.mv_pci_tbi
)
751 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
752 max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
753 arena
= hose
->sg_pci
;
754 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
755 arena
= hose
->sg_isa
;
759 spin_lock_irqsave(&arena
->lock
, flags
);
761 for (end
= sg
+ nents
; sg
< end
; ++sg
) {
767 addr
= sg
->dma_address
;
768 size
= sg
->dma_length
;
772 if (addr
> 0xffffffff) {
773 /* It's a DAC address -- nothing to do. */
774 DBGA(" (%ld) DAC [%llx,%zx]\n",
775 sg
- end
+ nents
, addr
, size
);
779 if (addr
>= __direct_map_base
780 && addr
< __direct_map_base
+ __direct_map_size
) {
782 DBGA(" (%ld) direct [%llx,%zx]\n",
783 sg
- end
+ nents
, addr
, size
);
787 DBGA(" (%ld) sg [%llx,%zx]\n",
788 sg
- end
+ nents
, addr
, size
);
790 npages
= iommu_num_pages(addr
, size
, PAGE_SIZE
);
791 ofs
= (addr
- arena
->dma_base
) >> PAGE_SHIFT
;
792 iommu_arena_free(arena
, ofs
, npages
);
794 tend
= addr
+ size
- 1;
795 if (fbeg
> addr
) fbeg
= addr
;
796 if (fend
< tend
) fend
= tend
;
799 /* If we're freeing ptes above the `next_entry' pointer (they
800 may have snuck back into the TLB since the last wrap flush),
801 we need to flush the TLB before reallocating the latter. */
802 if ((fend
- arena
->dma_base
) >> PAGE_SHIFT
>= arena
->next_entry
)
803 alpha_mv
.mv_pci_tbi(hose
, fbeg
, fend
);
805 spin_unlock_irqrestore(&arena
->lock
, flags
);
807 DBGA("pci_unmap_sg: %ld entries\n", nents
- (end
- sg
));
810 /* Return whether the given PCI device DMA address mask can be
811 supported properly. */
813 static int alpha_pci_supported(struct device
*dev
, u64 mask
)
815 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
816 struct pci_controller
*hose
;
817 struct pci_iommu_arena
*arena
;
819 /* If there exists a direct map, and the mask fits either
820 the entire direct mapped space or the total system memory as
821 shifted by the map base */
822 if (__direct_map_size
!= 0
823 && (__direct_map_base
+ __direct_map_size
- 1 <= mask
||
824 __direct_map_base
+ (max_low_pfn
<< PAGE_SHIFT
) - 1 <= mask
))
827 /* Check that we have a scatter-gather arena that fits. */
828 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
829 arena
= hose
->sg_isa
;
830 if (arena
&& arena
->dma_base
+ arena
->size
- 1 <= mask
)
832 arena
= hose
->sg_pci
;
833 if (arena
&& arena
->dma_base
+ arena
->size
- 1 <= mask
)
836 /* As last resort try ZONE_DMA. */
837 if (!__direct_map_base
&& MAX_DMA_ADDRESS
- IDENT_ADDR
- 1 <= mask
)
845 * AGP GART extensions to the IOMMU
848 iommu_reserve(struct pci_iommu_arena
*arena
, long pg_count
, long align_mask
)
854 if (!arena
) return -EINVAL
;
856 spin_lock_irqsave(&arena
->lock
, flags
);
858 /* Search for N empty ptes. */
860 p
= iommu_arena_find_pages(NULL
, arena
, pg_count
, align_mask
);
862 spin_unlock_irqrestore(&arena
->lock
, flags
);
866 /* Success. Mark them all reserved (ie not zero and invalid)
867 for the iommu tlb that could load them from under us.
868 They will be filled in with valid bits by _bind() */
869 for (i
= 0; i
< pg_count
; ++i
)
870 ptes
[p
+i
] = IOMMU_RESERVED_PTE
;
872 arena
->next_entry
= p
+ pg_count
;
873 spin_unlock_irqrestore(&arena
->lock
, flags
);
879 iommu_release(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
)
884 if (!arena
) return -EINVAL
;
888 /* Make sure they're all reserved first... */
889 for(i
= pg_start
; i
< pg_start
+ pg_count
; i
++)
890 if (ptes
[i
] != IOMMU_RESERVED_PTE
)
893 iommu_arena_free(arena
, pg_start
, pg_count
);
898 iommu_bind(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
,
905 if (!arena
) return -EINVAL
;
907 spin_lock_irqsave(&arena
->lock
, flags
);
911 for(j
= pg_start
; j
< pg_start
+ pg_count
; j
++) {
912 if (ptes
[j
] != IOMMU_RESERVED_PTE
) {
913 spin_unlock_irqrestore(&arena
->lock
, flags
);
918 for(i
= 0, j
= pg_start
; i
< pg_count
; i
++, j
++)
919 ptes
[j
] = mk_iommu_pte(page_to_phys(pages
[i
]));
921 spin_unlock_irqrestore(&arena
->lock
, flags
);
927 iommu_unbind(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
)
932 if (!arena
) return -EINVAL
;
934 p
= arena
->ptes
+ pg_start
;
935 for(i
= 0; i
< pg_count
; i
++)
936 p
[i
] = IOMMU_RESERVED_PTE
;
941 static int alpha_pci_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
943 return dma_addr
== 0;
946 static int alpha_pci_set_mask(struct device
*dev
, u64 mask
)
948 if (!dev
->dma_mask
||
949 !pci_dma_supported(alpha_gendev_to_pci(dev
), mask
))
952 *dev
->dma_mask
= mask
;
956 struct dma_map_ops alpha_pci_ops
= {
957 .alloc
= alpha_pci_alloc_coherent
,
958 .free
= alpha_pci_free_coherent
,
959 .map_page
= alpha_pci_map_page
,
960 .unmap_page
= alpha_pci_unmap_page
,
961 .map_sg
= alpha_pci_map_sg
,
962 .unmap_sg
= alpha_pci_unmap_sg
,
963 .mapping_error
= alpha_pci_mapping_error
,
964 .dma_supported
= alpha_pci_supported
,
965 .set_dma_mask
= alpha_pci_set_mask
,
968 struct dma_map_ops
*dma_ops
= &alpha_pci_ops
;
969 EXPORT_SYMBOL(dma_ops
);