1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/alpha/kernel/pci_iommu.c
6 #include <linux/kernel.h>
10 #include <linux/bootmem.h>
11 #include <linux/export.h>
12 #include <linux/scatterlist.h>
13 #include <linux/log2.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/iommu-helper.h>
18 #include <asm/hwrpb.h>
26 # define DBGA(args...) printk(KERN_DEBUG args)
28 # define DBGA(args...)
31 # define DBGA2(args...) printk(KERN_DEBUG args)
33 # define DBGA2(args...)
36 #define DEBUG_NODIRECT 0
38 #define ISA_DMA_MASK 0x00ffffff
40 static inline unsigned long
41 mk_iommu_pte(unsigned long paddr
)
43 return (paddr
>> (PAGE_SHIFT
-1)) | 1;
46 /* Return the minimum of MAX or the first power of two larger
50 size_for_memory(unsigned long max
)
52 unsigned long mem
= max_low_pfn
<< PAGE_SHIFT
;
54 max
= roundup_pow_of_two(mem
);
58 struct pci_iommu_arena
* __init
59 iommu_arena_new_node(int nid
, struct pci_controller
*hose
, dma_addr_t base
,
60 unsigned long window_size
, unsigned long align
)
62 unsigned long mem_size
;
63 struct pci_iommu_arena
*arena
;
65 mem_size
= window_size
/ (PAGE_SIZE
/ sizeof(unsigned long));
67 /* Note that the TLB lookup logic uses bitwise concatenation,
68 not addition, so the required arena alignment is based on
69 the size of the window. Retain the align parameter so that
70 particular systems can over-align the arena. */
75 #ifdef CONFIG_DISCONTIGMEM
77 arena
= alloc_bootmem_node(NODE_DATA(nid
), sizeof(*arena
));
78 if (!NODE_DATA(nid
) || !arena
) {
79 printk("%s: couldn't allocate arena from node %d\n"
80 " falling back to system-wide allocation\n",
82 arena
= alloc_bootmem(sizeof(*arena
));
85 arena
->ptes
= __alloc_bootmem_node(NODE_DATA(nid
), mem_size
, align
, 0);
86 if (!NODE_DATA(nid
) || !arena
->ptes
) {
87 printk("%s: couldn't allocate arena ptes from node %d\n"
88 " falling back to system-wide allocation\n",
90 arena
->ptes
= __alloc_bootmem(mem_size
, align
, 0);
93 #else /* CONFIG_DISCONTIGMEM */
95 arena
= alloc_bootmem(sizeof(*arena
));
96 arena
->ptes
= __alloc_bootmem(mem_size
, align
, 0);
98 #endif /* CONFIG_DISCONTIGMEM */
100 spin_lock_init(&arena
->lock
);
102 arena
->dma_base
= base
;
103 arena
->size
= window_size
;
104 arena
->next_entry
= 0;
106 /* Align allocations to a multiple of a page size. Not needed
107 unless there are chip bugs. */
108 arena
->align_entry
= 1;
113 struct pci_iommu_arena
* __init
114 iommu_arena_new(struct pci_controller
*hose
, dma_addr_t base
,
115 unsigned long window_size
, unsigned long align
)
117 return iommu_arena_new_node(0, hose
, base
, window_size
, align
);
120 /* Must be called with the arena lock held */
122 iommu_arena_find_pages(struct device
*dev
, struct pci_iommu_arena
*arena
,
129 unsigned long boundary_size
;
131 base
= arena
->dma_base
>> PAGE_SHIFT
;
133 boundary_size
= dma_get_seg_boundary(dev
) + 1;
134 boundary_size
>>= PAGE_SHIFT
;
136 boundary_size
= 1UL << (32 - PAGE_SHIFT
);
139 /* Search forward for the first mask-aligned sequence of N free ptes */
141 nent
= arena
->size
>> PAGE_SHIFT
;
142 p
= ALIGN(arena
->next_entry
, mask
+ 1);
146 while (i
< n
&& p
+i
< nent
) {
147 if (!i
&& iommu_is_span_boundary(p
, n
, base
, boundary_size
)) {
148 p
= ALIGN(p
+ 1, mask
+ 1);
153 p
= ALIGN(p
+ i
+ 1, mask
+ 1), i
= 0;
161 * Reached the end. Flush the TLB and restart
162 * the search from the beginning.
164 alpha_mv
.mv_pci_tbi(arena
->hose
, 0, -1);
174 /* Success. It's the responsibility of the caller to mark them
175 in use before releasing the lock */
180 iommu_arena_alloc(struct device
*dev
, struct pci_iommu_arena
*arena
, long n
,
187 spin_lock_irqsave(&arena
->lock
, flags
);
189 /* Search for N empty ptes */
191 mask
= max(align
, arena
->align_entry
) - 1;
192 p
= iommu_arena_find_pages(dev
, arena
, n
, mask
);
194 spin_unlock_irqrestore(&arena
->lock
, flags
);
198 /* Success. Mark them all in use, ie not zero and invalid
199 for the iommu tlb that could load them from under us.
200 The chip specific bits will fill this in with something
201 kosher when we return. */
202 for (i
= 0; i
< n
; ++i
)
203 ptes
[p
+i
] = IOMMU_INVALID_PTE
;
205 arena
->next_entry
= p
+ n
;
206 spin_unlock_irqrestore(&arena
->lock
, flags
);
212 iommu_arena_free(struct pci_iommu_arena
*arena
, long ofs
, long n
)
217 p
= arena
->ptes
+ ofs
;
218 for (i
= 0; i
< n
; ++i
)
223 * True if the machine supports DAC addressing, and DEV can
224 * make use of it given MASK.
226 static int pci_dac_dma_supported(struct pci_dev
*dev
, u64 mask
)
228 dma_addr_t dac_offset
= alpha_mv
.pci_dac_offset
;
231 /* If this is not set, the machine doesn't support DAC at all. */
235 /* The device has to be able to address our DAC bit. */
236 if ((dac_offset
& dev
->dma_mask
) != dac_offset
)
239 /* If both conditions above are met, we are fine. */
240 DBGA("pci_dac_dma_supported %s from %pf\n",
241 ok
? "yes" : "no", __builtin_return_address(0));
246 /* Map a single buffer of the indicated size for PCI DMA in streaming
247 mode. The 32-bit PCI bus mastering address to use is returned.
248 Once the device is given the dma address, the device owns this memory
249 until either pci_unmap_single or pci_dma_sync_single is performed. */
252 pci_map_single_1(struct pci_dev
*pdev
, void *cpu_addr
, size_t size
,
255 struct pci_controller
*hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
256 dma_addr_t max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
257 struct pci_iommu_arena
*arena
;
258 long npages
, dma_ofs
, i
;
261 unsigned int align
= 0;
262 struct device
*dev
= pdev
? &pdev
->dev
: NULL
;
264 paddr
= __pa(cpu_addr
);
267 /* First check to see if we can use the direct map window. */
268 if (paddr
+ size
+ __direct_map_base
- 1 <= max_dma
269 && paddr
+ size
<= __direct_map_size
) {
270 ret
= paddr
+ __direct_map_base
;
272 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n",
273 cpu_addr
, size
, ret
, __builtin_return_address(0));
279 /* Next, use DAC if selected earlier. */
281 ret
= paddr
+ alpha_mv
.pci_dac_offset
;
283 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n",
284 cpu_addr
, size
, ret
, __builtin_return_address(0));
289 /* If the machine doesn't define a pci_tbi routine, we have to
290 assume it doesn't support sg mapping, and, since we tried to
291 use direct_map above, it now must be considered an error. */
292 if (! alpha_mv
.mv_pci_tbi
) {
293 printk_once(KERN_WARNING
"pci_map_single: no HW sg\n");
297 arena
= hose
->sg_pci
;
298 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
299 arena
= hose
->sg_isa
;
301 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
303 /* Force allocation to 64KB boundary for ISA bridges. */
304 if (pdev
&& pdev
== isa_bridge
)
306 dma_ofs
= iommu_arena_alloc(dev
, arena
, npages
, align
);
308 printk(KERN_WARNING
"pci_map_single failed: "
309 "could not allocate dma page tables\n");
314 for (i
= 0; i
< npages
; ++i
, paddr
+= PAGE_SIZE
)
315 arena
->ptes
[i
+ dma_ofs
] = mk_iommu_pte(paddr
);
317 ret
= arena
->dma_base
+ dma_ofs
* PAGE_SIZE
;
318 ret
+= (unsigned long)cpu_addr
& ~PAGE_MASK
;
320 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n",
321 cpu_addr
, size
, npages
, ret
, __builtin_return_address(0));
326 /* Helper for generic DMA-mapping functions. */
327 static struct pci_dev
*alpha_gendev_to_pci(struct device
*dev
)
329 if (dev
&& dev_is_pci(dev
))
330 return to_pci_dev(dev
);
332 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
336 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
337 bridge is bus master then). */
338 if (!dev
|| !dev
->dma_mask
|| !*dev
->dma_mask
)
341 /* For EISA bus masters, return isa_bridge (it might have smaller
342 dma_mask due to wiring limitations). */
343 if (*dev
->dma_mask
>= isa_bridge
->dma_mask
)
346 /* This assumes ISA bus master with dma_mask 0xffffff. */
350 static dma_addr_t
alpha_pci_map_page(struct device
*dev
, struct page
*page
,
351 unsigned long offset
, size_t size
,
352 enum dma_data_direction dir
,
355 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
358 BUG_ON(dir
== PCI_DMA_NONE
);
360 dac_allowed
= pdev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
361 return pci_map_single_1(pdev
, (char *)page_address(page
) + offset
,
365 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
366 SIZE must match what was provided for in a previous pci_map_single
367 call. All other usages are undefined. After this call, reads by
368 the cpu to the buffer are guaranteed to see whatever the device
371 static void alpha_pci_unmap_page(struct device
*dev
, dma_addr_t dma_addr
,
372 size_t size
, enum dma_data_direction dir
,
376 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
377 struct pci_controller
*hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
378 struct pci_iommu_arena
*arena
;
379 long dma_ofs
, npages
;
381 BUG_ON(dir
== PCI_DMA_NONE
);
383 if (dma_addr
>= __direct_map_base
384 && dma_addr
< __direct_map_base
+ __direct_map_size
) {
387 DBGA2("pci_unmap_single: direct [%llx,%zx] from %pf\n",
388 dma_addr
, size
, __builtin_return_address(0));
393 if (dma_addr
> 0xffffffff) {
394 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %pf\n",
395 dma_addr
, size
, __builtin_return_address(0));
399 arena
= hose
->sg_pci
;
400 if (!arena
|| dma_addr
< arena
->dma_base
)
401 arena
= hose
->sg_isa
;
403 dma_ofs
= (dma_addr
- arena
->dma_base
) >> PAGE_SHIFT
;
404 if (dma_ofs
* PAGE_SIZE
>= arena
->size
) {
405 printk(KERN_ERR
"Bogus pci_unmap_single: dma_addr %llx "
406 " base %llx size %x\n",
407 dma_addr
, arena
->dma_base
, arena
->size
);
412 npages
= iommu_num_pages(dma_addr
, size
, PAGE_SIZE
);
414 spin_lock_irqsave(&arena
->lock
, flags
);
416 iommu_arena_free(arena
, dma_ofs
, npages
);
418 /* If we're freeing ptes above the `next_entry' pointer (they
419 may have snuck back into the TLB since the last wrap flush),
420 we need to flush the TLB before reallocating the latter. */
421 if (dma_ofs
>= arena
->next_entry
)
422 alpha_mv
.mv_pci_tbi(hose
, dma_addr
, dma_addr
+ size
- 1);
424 spin_unlock_irqrestore(&arena
->lock
, flags
);
426 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n",
427 dma_addr
, size
, npages
, __builtin_return_address(0));
430 /* Allocate and map kernel buffer using consistent mode DMA for PCI
431 device. Returns non-NULL cpu-view pointer to the buffer if
432 successful and sets *DMA_ADDRP to the pci side dma address as well,
433 else DMA_ADDRP is undefined. */
435 static void *alpha_pci_alloc_coherent(struct device
*dev
, size_t size
,
436 dma_addr_t
*dma_addrp
, gfp_t gfp
,
439 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
441 long order
= get_order(size
);
446 cpu_addr
= (void *)__get_free_pages(gfp
, order
);
448 printk(KERN_INFO
"pci_alloc_consistent: "
449 "get_free_pages failed from %pf\n",
450 __builtin_return_address(0));
451 /* ??? Really atomic allocation? Otherwise we could play
452 with vmalloc and sg if we can't find contiguous memory. */
455 memset(cpu_addr
, 0, size
);
457 *dma_addrp
= pci_map_single_1(pdev
, cpu_addr
, size
, 0);
458 if (*dma_addrp
== 0) {
459 free_pages((unsigned long)cpu_addr
, order
);
460 if (alpha_mv
.mv_pci_tbi
|| (gfp
& GFP_DMA
))
462 /* The address doesn't fit required mask and we
463 do not have iommu. Try again with GFP_DMA. */
468 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %pf\n",
469 size
, cpu_addr
, *dma_addrp
, __builtin_return_address(0));
474 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
475 be values that were returned from pci_alloc_consistent. SIZE must
476 be the same as what as passed into pci_alloc_consistent.
477 References to the memory and mappings associated with CPU_ADDR or
478 DMA_ADDR past this call are illegal. */
480 static void alpha_pci_free_coherent(struct device
*dev
, size_t size
,
481 void *cpu_addr
, dma_addr_t dma_addr
,
484 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
485 pci_unmap_single(pdev
, dma_addr
, size
, PCI_DMA_BIDIRECTIONAL
);
486 free_pages((unsigned long)cpu_addr
, get_order(size
));
488 DBGA2("pci_free_consistent: [%llx,%zx] from %pf\n",
489 dma_addr
, size
, __builtin_return_address(0));
492 /* Classify the elements of the scatterlist. Write dma_address
493 of each element with:
494 0 : Followers all physically adjacent.
495 1 : Followers all virtually adjacent.
496 -1 : Not leader, physically adjacent to previous.
497 -2 : Not leader, virtually adjacent to previous.
498 Write dma_length of each leader with the combined lengths of
499 the mergable followers. */
501 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
502 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
505 sg_classify(struct device
*dev
, struct scatterlist
*sg
, struct scatterlist
*end
,
508 unsigned long next_paddr
;
509 struct scatterlist
*leader
;
510 long leader_flag
, leader_length
;
511 unsigned int max_seg_size
;
515 leader_length
= leader
->length
;
516 next_paddr
= SG_ENT_PHYS_ADDRESS(leader
) + leader_length
;
518 /* we will not marge sg without device. */
519 max_seg_size
= dev
? dma_get_max_seg_size(dev
) : 0;
520 for (++sg
; sg
< end
; ++sg
) {
521 unsigned long addr
, len
;
522 addr
= SG_ENT_PHYS_ADDRESS(sg
);
525 if (leader_length
+ len
> max_seg_size
)
528 if (next_paddr
== addr
) {
529 sg
->dma_address
= -1;
530 leader_length
+= len
;
531 } else if (((next_paddr
| addr
) & ~PAGE_MASK
) == 0 && virt_ok
) {
532 sg
->dma_address
= -2;
534 leader_length
+= len
;
537 leader
->dma_address
= leader_flag
;
538 leader
->dma_length
= leader_length
;
544 next_paddr
= addr
+ len
;
547 leader
->dma_address
= leader_flag
;
548 leader
->dma_length
= leader_length
;
551 /* Given a scatterlist leader, choose an allocation method and fill
555 sg_fill(struct device
*dev
, struct scatterlist
*leader
, struct scatterlist
*end
,
556 struct scatterlist
*out
, struct pci_iommu_arena
*arena
,
557 dma_addr_t max_dma
, int dac_allowed
)
559 unsigned long paddr
= SG_ENT_PHYS_ADDRESS(leader
);
560 long size
= leader
->dma_length
;
561 struct scatterlist
*sg
;
563 long npages
, dma_ofs
, i
;
566 /* If everything is physically contiguous, and the addresses
567 fall into the direct-map window, use it. */
568 if (leader
->dma_address
== 0
569 && paddr
+ size
+ __direct_map_base
- 1 <= max_dma
570 && paddr
+ size
<= __direct_map_size
) {
571 out
->dma_address
= paddr
+ __direct_map_base
;
572 out
->dma_length
= size
;
574 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
575 __va(paddr
), size
, out
->dma_address
);
581 /* If physically contiguous and DAC is available, use it. */
582 if (leader
->dma_address
== 0 && dac_allowed
) {
583 out
->dma_address
= paddr
+ alpha_mv
.pci_dac_offset
;
584 out
->dma_length
= size
;
586 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
587 __va(paddr
), size
, out
->dma_address
);
592 /* Otherwise, we'll use the iommu to make the pages virtually
596 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
597 dma_ofs
= iommu_arena_alloc(dev
, arena
, npages
, 0);
599 /* If we attempted a direct map above but failed, die. */
600 if (leader
->dma_address
== 0)
603 /* Otherwise, break up the remaining virtually contiguous
604 hunks into individual direct maps and retry. */
605 sg_classify(dev
, leader
, end
, 0);
606 return sg_fill(dev
, leader
, end
, out
, arena
, max_dma
, dac_allowed
);
609 out
->dma_address
= arena
->dma_base
+ dma_ofs
*PAGE_SIZE
+ paddr
;
610 out
->dma_length
= size
;
612 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
613 __va(paddr
), size
, out
->dma_address
, npages
);
615 /* All virtually contiguous. We need to find the length of each
616 physically contiguous subsegment to fill in the ptes. */
617 ptes
= &arena
->ptes
[dma_ofs
];
621 struct scatterlist
*last_sg
= sg
;
625 paddr
= SG_ENT_PHYS_ADDRESS(sg
);
627 while (sg
+1 < end
&& (int) sg
[1].dma_address
== -1) {
628 size
+= sg
[1].length
;
632 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
635 for (i
= 0; i
< npages
; ++i
, paddr
+= PAGE_SIZE
)
636 *ptes
++ = mk_iommu_pte(paddr
);
639 DBGA(" (%ld) [%p,%x] np %ld\n",
640 last_sg
- leader
, SG_ENT_VIRT_ADDRESS(last_sg
),
641 last_sg
->length
, npages
);
642 while (++last_sg
<= sg
) {
643 DBGA(" (%ld) [%p,%x] cont\n",
644 last_sg
- leader
, SG_ENT_VIRT_ADDRESS(last_sg
),
648 } while (++sg
< end
&& (int) sg
->dma_address
< 0);
653 static int alpha_pci_map_sg(struct device
*dev
, struct scatterlist
*sg
,
654 int nents
, enum dma_data_direction dir
,
657 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
658 struct scatterlist
*start
, *end
, *out
;
659 struct pci_controller
*hose
;
660 struct pci_iommu_arena
*arena
;
664 BUG_ON(dir
== PCI_DMA_NONE
);
666 dac_allowed
= dev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
668 /* Fast path single entry scatterlists. */
670 sg
->dma_length
= sg
->length
;
672 = pci_map_single_1(pdev
, SG_ENT_VIRT_ADDRESS(sg
),
673 sg
->length
, dac_allowed
);
674 return sg
->dma_address
!= 0;
680 /* First, prepare information about the entries. */
681 sg_classify(dev
, sg
, end
, alpha_mv
.mv_pci_tbi
!= 0);
683 /* Second, figure out where we're going to map things. */
684 if (alpha_mv
.mv_pci_tbi
) {
685 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
686 max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
687 arena
= hose
->sg_pci
;
688 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
689 arena
= hose
->sg_isa
;
696 /* Third, iterate over the scatterlist leaders and allocate
697 dma space as needed. */
698 for (out
= sg
; sg
< end
; ++sg
) {
699 if ((int) sg
->dma_address
< 0)
701 if (sg_fill(dev
, sg
, end
, out
, arena
, max_dma
, dac_allowed
) < 0)
706 /* Mark the end of the list for pci_unmap_sg. */
710 if (out
- start
== 0)
711 printk(KERN_WARNING
"pci_map_sg failed: no entries?\n");
712 DBGA("pci_map_sg: %ld entries\n", out
- start
);
717 printk(KERN_WARNING
"pci_map_sg failed: "
718 "could not allocate dma page tables\n");
720 /* Some allocation failed while mapping the scatterlist
721 entries. Unmap them now. */
723 pci_unmap_sg(pdev
, start
, out
- start
, dir
);
727 /* Unmap a set of streaming mode DMA translations. Again, cpu read
728 rules concerning calls here are the same as for pci_unmap_single()
731 static void alpha_pci_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
732 int nents
, enum dma_data_direction dir
,
735 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
737 struct pci_controller
*hose
;
738 struct pci_iommu_arena
*arena
;
739 struct scatterlist
*end
;
741 dma_addr_t fbeg
, fend
;
743 BUG_ON(dir
== PCI_DMA_NONE
);
745 if (! alpha_mv
.mv_pci_tbi
)
748 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
749 max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
750 arena
= hose
->sg_pci
;
751 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
752 arena
= hose
->sg_isa
;
756 spin_lock_irqsave(&arena
->lock
, flags
);
758 for (end
= sg
+ nents
; sg
< end
; ++sg
) {
764 addr
= sg
->dma_address
;
765 size
= sg
->dma_length
;
769 if (addr
> 0xffffffff) {
770 /* It's a DAC address -- nothing to do. */
771 DBGA(" (%ld) DAC [%llx,%zx]\n",
772 sg
- end
+ nents
, addr
, size
);
776 if (addr
>= __direct_map_base
777 && addr
< __direct_map_base
+ __direct_map_size
) {
779 DBGA(" (%ld) direct [%llx,%zx]\n",
780 sg
- end
+ nents
, addr
, size
);
784 DBGA(" (%ld) sg [%llx,%zx]\n",
785 sg
- end
+ nents
, addr
, size
);
787 npages
= iommu_num_pages(addr
, size
, PAGE_SIZE
);
788 ofs
= (addr
- arena
->dma_base
) >> PAGE_SHIFT
;
789 iommu_arena_free(arena
, ofs
, npages
);
791 tend
= addr
+ size
- 1;
792 if (fbeg
> addr
) fbeg
= addr
;
793 if (fend
< tend
) fend
= tend
;
796 /* If we're freeing ptes above the `next_entry' pointer (they
797 may have snuck back into the TLB since the last wrap flush),
798 we need to flush the TLB before reallocating the latter. */
799 if ((fend
- arena
->dma_base
) >> PAGE_SHIFT
>= arena
->next_entry
)
800 alpha_mv
.mv_pci_tbi(hose
, fbeg
, fend
);
802 spin_unlock_irqrestore(&arena
->lock
, flags
);
804 DBGA("pci_unmap_sg: %ld entries\n", nents
- (end
- sg
));
807 /* Return whether the given PCI device DMA address mask can be
808 supported properly. */
810 static int alpha_pci_supported(struct device
*dev
, u64 mask
)
812 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
813 struct pci_controller
*hose
;
814 struct pci_iommu_arena
*arena
;
816 /* If there exists a direct map, and the mask fits either
817 the entire direct mapped space or the total system memory as
818 shifted by the map base */
819 if (__direct_map_size
!= 0
820 && (__direct_map_base
+ __direct_map_size
- 1 <= mask
||
821 __direct_map_base
+ (max_low_pfn
<< PAGE_SHIFT
) - 1 <= mask
))
824 /* Check that we have a scatter-gather arena that fits. */
825 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
826 arena
= hose
->sg_isa
;
827 if (arena
&& arena
->dma_base
+ arena
->size
- 1 <= mask
)
829 arena
= hose
->sg_pci
;
830 if (arena
&& arena
->dma_base
+ arena
->size
- 1 <= mask
)
833 /* As last resort try ZONE_DMA. */
834 if (!__direct_map_base
&& MAX_DMA_ADDRESS
- IDENT_ADDR
- 1 <= mask
)
842 * AGP GART extensions to the IOMMU
845 iommu_reserve(struct pci_iommu_arena
*arena
, long pg_count
, long align_mask
)
851 if (!arena
) return -EINVAL
;
853 spin_lock_irqsave(&arena
->lock
, flags
);
855 /* Search for N empty ptes. */
857 p
= iommu_arena_find_pages(NULL
, arena
, pg_count
, align_mask
);
859 spin_unlock_irqrestore(&arena
->lock
, flags
);
863 /* Success. Mark them all reserved (ie not zero and invalid)
864 for the iommu tlb that could load them from under us.
865 They will be filled in with valid bits by _bind() */
866 for (i
= 0; i
< pg_count
; ++i
)
867 ptes
[p
+i
] = IOMMU_RESERVED_PTE
;
869 arena
->next_entry
= p
+ pg_count
;
870 spin_unlock_irqrestore(&arena
->lock
, flags
);
876 iommu_release(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
)
881 if (!arena
) return -EINVAL
;
885 /* Make sure they're all reserved first... */
886 for(i
= pg_start
; i
< pg_start
+ pg_count
; i
++)
887 if (ptes
[i
] != IOMMU_RESERVED_PTE
)
890 iommu_arena_free(arena
, pg_start
, pg_count
);
895 iommu_bind(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
,
902 if (!arena
) return -EINVAL
;
904 spin_lock_irqsave(&arena
->lock
, flags
);
908 for(j
= pg_start
; j
< pg_start
+ pg_count
; j
++) {
909 if (ptes
[j
] != IOMMU_RESERVED_PTE
) {
910 spin_unlock_irqrestore(&arena
->lock
, flags
);
915 for(i
= 0, j
= pg_start
; i
< pg_count
; i
++, j
++)
916 ptes
[j
] = mk_iommu_pte(page_to_phys(pages
[i
]));
918 spin_unlock_irqrestore(&arena
->lock
, flags
);
924 iommu_unbind(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
)
929 if (!arena
) return -EINVAL
;
931 p
= arena
->ptes
+ pg_start
;
932 for(i
= 0; i
< pg_count
; i
++)
933 p
[i
] = IOMMU_RESERVED_PTE
;
938 static int alpha_pci_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
940 return dma_addr
== 0;
943 const struct dma_map_ops alpha_pci_ops
= {
944 .alloc
= alpha_pci_alloc_coherent
,
945 .free
= alpha_pci_free_coherent
,
946 .map_page
= alpha_pci_map_page
,
947 .unmap_page
= alpha_pci_unmap_page
,
948 .map_sg
= alpha_pci_map_sg
,
949 .unmap_sg
= alpha_pci_unmap_sg
,
950 .mapping_error
= alpha_pci_mapping_error
,
951 .dma_supported
= alpha_pci_supported
,
954 const struct dma_map_ops
*dma_ops
= &alpha_pci_ops
;
955 EXPORT_SYMBOL(dma_ops
);