1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/alpha/kernel/pci_iommu.c
6 #include <linux/kernel.h>
10 #include <linux/memblock.h>
11 #include <linux/export.h>
12 #include <linux/scatterlist.h>
13 #include <linux/log2.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/iommu-helper.h>
18 #include <asm/hwrpb.h>
26 # define DBGA(args...) printk(KERN_DEBUG args)
28 # define DBGA(args...)
31 # define DBGA2(args...) printk(KERN_DEBUG args)
33 # define DBGA2(args...)
36 #define DEBUG_NODIRECT 0
38 #define ISA_DMA_MASK 0x00ffffff
40 static inline unsigned long
41 mk_iommu_pte(unsigned long paddr
)
43 return (paddr
>> (PAGE_SHIFT
-1)) | 1;
46 /* Return the minimum of MAX or the first power of two larger
50 size_for_memory(unsigned long max
)
52 unsigned long mem
= max_low_pfn
<< PAGE_SHIFT
;
54 max
= roundup_pow_of_two(mem
);
58 struct pci_iommu_arena
* __init
59 iommu_arena_new_node(int nid
, struct pci_controller
*hose
, dma_addr_t base
,
60 unsigned long window_size
, unsigned long align
)
62 unsigned long mem_size
;
63 struct pci_iommu_arena
*arena
;
65 mem_size
= window_size
/ (PAGE_SIZE
/ sizeof(unsigned long));
67 /* Note that the TLB lookup logic uses bitwise concatenation,
68 not addition, so the required arena alignment is based on
69 the size of the window. Retain the align parameter so that
70 particular systems can over-align the arena. */
75 #ifdef CONFIG_DISCONTIGMEM
77 arena
= memblock_alloc_node(sizeof(*arena
), align
, nid
);
78 if (!NODE_DATA(nid
) || !arena
) {
79 printk("%s: couldn't allocate arena from node %d\n"
80 " falling back to system-wide allocation\n",
82 arena
= memblock_alloc(sizeof(*arena
), SMP_CACHE_BYTES
);
84 panic("%s: Failed to allocate %zu bytes\n", __func__
,
88 arena
->ptes
= memblock_alloc_node(sizeof(*arena
), align
, nid
);
89 if (!NODE_DATA(nid
) || !arena
->ptes
) {
90 printk("%s: couldn't allocate arena ptes from node %d\n"
91 " falling back to system-wide allocation\n",
93 arena
->ptes
= memblock_alloc(mem_size
, align
);
95 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
96 __func__
, mem_size
, align
);
99 #else /* CONFIG_DISCONTIGMEM */
101 arena
= memblock_alloc(sizeof(*arena
), SMP_CACHE_BYTES
);
103 panic("%s: Failed to allocate %zu bytes\n", __func__
,
105 arena
->ptes
= memblock_alloc(mem_size
, align
);
107 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
108 __func__
, mem_size
, align
);
110 #endif /* CONFIG_DISCONTIGMEM */
112 spin_lock_init(&arena
->lock
);
114 arena
->dma_base
= base
;
115 arena
->size
= window_size
;
116 arena
->next_entry
= 0;
118 /* Align allocations to a multiple of a page size. Not needed
119 unless there are chip bugs. */
120 arena
->align_entry
= 1;
125 struct pci_iommu_arena
* __init
126 iommu_arena_new(struct pci_controller
*hose
, dma_addr_t base
,
127 unsigned long window_size
, unsigned long align
)
129 return iommu_arena_new_node(0, hose
, base
, window_size
, align
);
132 /* Must be called with the arena lock held */
134 iommu_arena_find_pages(struct device
*dev
, struct pci_iommu_arena
*arena
,
141 unsigned long boundary_size
;
143 base
= arena
->dma_base
>> PAGE_SHIFT
;
145 boundary_size
= dma_get_seg_boundary(dev
) + 1;
146 boundary_size
>>= PAGE_SHIFT
;
148 boundary_size
= 1UL << (32 - PAGE_SHIFT
);
151 /* Search forward for the first mask-aligned sequence of N free ptes */
153 nent
= arena
->size
>> PAGE_SHIFT
;
154 p
= ALIGN(arena
->next_entry
, mask
+ 1);
158 while (i
< n
&& p
+i
< nent
) {
159 if (!i
&& iommu_is_span_boundary(p
, n
, base
, boundary_size
)) {
160 p
= ALIGN(p
+ 1, mask
+ 1);
165 p
= ALIGN(p
+ i
+ 1, mask
+ 1), i
= 0;
173 * Reached the end. Flush the TLB and restart
174 * the search from the beginning.
176 alpha_mv
.mv_pci_tbi(arena
->hose
, 0, -1);
186 /* Success. It's the responsibility of the caller to mark them
187 in use before releasing the lock */
192 iommu_arena_alloc(struct device
*dev
, struct pci_iommu_arena
*arena
, long n
,
199 spin_lock_irqsave(&arena
->lock
, flags
);
201 /* Search for N empty ptes */
203 mask
= max(align
, arena
->align_entry
) - 1;
204 p
= iommu_arena_find_pages(dev
, arena
, n
, mask
);
206 spin_unlock_irqrestore(&arena
->lock
, flags
);
210 /* Success. Mark them all in use, ie not zero and invalid
211 for the iommu tlb that could load them from under us.
212 The chip specific bits will fill this in with something
213 kosher when we return. */
214 for (i
= 0; i
< n
; ++i
)
215 ptes
[p
+i
] = IOMMU_INVALID_PTE
;
217 arena
->next_entry
= p
+ n
;
218 spin_unlock_irqrestore(&arena
->lock
, flags
);
224 iommu_arena_free(struct pci_iommu_arena
*arena
, long ofs
, long n
)
229 p
= arena
->ptes
+ ofs
;
230 for (i
= 0; i
< n
; ++i
)
235 * True if the machine supports DAC addressing, and DEV can
236 * make use of it given MASK.
238 static int pci_dac_dma_supported(struct pci_dev
*dev
, u64 mask
)
240 dma_addr_t dac_offset
= alpha_mv
.pci_dac_offset
;
243 /* If this is not set, the machine doesn't support DAC at all. */
247 /* The device has to be able to address our DAC bit. */
248 if ((dac_offset
& dev
->dma_mask
) != dac_offset
)
251 /* If both conditions above are met, we are fine. */
252 DBGA("pci_dac_dma_supported %s from %pf\n",
253 ok
? "yes" : "no", __builtin_return_address(0));
258 /* Map a single buffer of the indicated size for PCI DMA in streaming
259 mode. The 32-bit PCI bus mastering address to use is returned.
260 Once the device is given the dma address, the device owns this memory
261 until either pci_unmap_single or pci_dma_sync_single is performed. */
264 pci_map_single_1(struct pci_dev
*pdev
, void *cpu_addr
, size_t size
,
267 struct pci_controller
*hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
268 dma_addr_t max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
269 struct pci_iommu_arena
*arena
;
270 long npages
, dma_ofs
, i
;
273 unsigned int align
= 0;
274 struct device
*dev
= pdev
? &pdev
->dev
: NULL
;
276 paddr
= __pa(cpu_addr
);
279 /* First check to see if we can use the direct map window. */
280 if (paddr
+ size
+ __direct_map_base
- 1 <= max_dma
281 && paddr
+ size
<= __direct_map_size
) {
282 ret
= paddr
+ __direct_map_base
;
284 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n",
285 cpu_addr
, size
, ret
, __builtin_return_address(0));
291 /* Next, use DAC if selected earlier. */
293 ret
= paddr
+ alpha_mv
.pci_dac_offset
;
295 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n",
296 cpu_addr
, size
, ret
, __builtin_return_address(0));
301 /* If the machine doesn't define a pci_tbi routine, we have to
302 assume it doesn't support sg mapping, and, since we tried to
303 use direct_map above, it now must be considered an error. */
304 if (! alpha_mv
.mv_pci_tbi
) {
305 printk_once(KERN_WARNING
"pci_map_single: no HW sg\n");
306 return DMA_MAPPING_ERROR
;
309 arena
= hose
->sg_pci
;
310 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
311 arena
= hose
->sg_isa
;
313 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
315 /* Force allocation to 64KB boundary for ISA bridges. */
316 if (pdev
&& pdev
== isa_bridge
)
318 dma_ofs
= iommu_arena_alloc(dev
, arena
, npages
, align
);
320 printk(KERN_WARNING
"pci_map_single failed: "
321 "could not allocate dma page tables\n");
322 return DMA_MAPPING_ERROR
;
326 for (i
= 0; i
< npages
; ++i
, paddr
+= PAGE_SIZE
)
327 arena
->ptes
[i
+ dma_ofs
] = mk_iommu_pte(paddr
);
329 ret
= arena
->dma_base
+ dma_ofs
* PAGE_SIZE
;
330 ret
+= (unsigned long)cpu_addr
& ~PAGE_MASK
;
332 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n",
333 cpu_addr
, size
, npages
, ret
, __builtin_return_address(0));
338 /* Helper for generic DMA-mapping functions. */
339 static struct pci_dev
*alpha_gendev_to_pci(struct device
*dev
)
341 if (dev
&& dev_is_pci(dev
))
342 return to_pci_dev(dev
);
344 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
348 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
349 bridge is bus master then). */
350 if (!dev
|| !dev
->dma_mask
|| !*dev
->dma_mask
)
353 /* For EISA bus masters, return isa_bridge (it might have smaller
354 dma_mask due to wiring limitations). */
355 if (*dev
->dma_mask
>= isa_bridge
->dma_mask
)
358 /* This assumes ISA bus master with dma_mask 0xffffff. */
362 static dma_addr_t
alpha_pci_map_page(struct device
*dev
, struct page
*page
,
363 unsigned long offset
, size_t size
,
364 enum dma_data_direction dir
,
367 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
370 BUG_ON(dir
== PCI_DMA_NONE
);
372 dac_allowed
= pdev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
373 return pci_map_single_1(pdev
, (char *)page_address(page
) + offset
,
377 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
378 SIZE must match what was provided for in a previous pci_map_single
379 call. All other usages are undefined. After this call, reads by
380 the cpu to the buffer are guaranteed to see whatever the device
383 static void alpha_pci_unmap_page(struct device
*dev
, dma_addr_t dma_addr
,
384 size_t size
, enum dma_data_direction dir
,
388 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
389 struct pci_controller
*hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
390 struct pci_iommu_arena
*arena
;
391 long dma_ofs
, npages
;
393 BUG_ON(dir
== PCI_DMA_NONE
);
395 if (dma_addr
>= __direct_map_base
396 && dma_addr
< __direct_map_base
+ __direct_map_size
) {
399 DBGA2("pci_unmap_single: direct [%llx,%zx] from %pf\n",
400 dma_addr
, size
, __builtin_return_address(0));
405 if (dma_addr
> 0xffffffff) {
406 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %pf\n",
407 dma_addr
, size
, __builtin_return_address(0));
411 arena
= hose
->sg_pci
;
412 if (!arena
|| dma_addr
< arena
->dma_base
)
413 arena
= hose
->sg_isa
;
415 dma_ofs
= (dma_addr
- arena
->dma_base
) >> PAGE_SHIFT
;
416 if (dma_ofs
* PAGE_SIZE
>= arena
->size
) {
417 printk(KERN_ERR
"Bogus pci_unmap_single: dma_addr %llx "
418 " base %llx size %x\n",
419 dma_addr
, arena
->dma_base
, arena
->size
);
424 npages
= iommu_num_pages(dma_addr
, size
, PAGE_SIZE
);
426 spin_lock_irqsave(&arena
->lock
, flags
);
428 iommu_arena_free(arena
, dma_ofs
, npages
);
430 /* If we're freeing ptes above the `next_entry' pointer (they
431 may have snuck back into the TLB since the last wrap flush),
432 we need to flush the TLB before reallocating the latter. */
433 if (dma_ofs
>= arena
->next_entry
)
434 alpha_mv
.mv_pci_tbi(hose
, dma_addr
, dma_addr
+ size
- 1);
436 spin_unlock_irqrestore(&arena
->lock
, flags
);
438 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n",
439 dma_addr
, size
, npages
, __builtin_return_address(0));
442 /* Allocate and map kernel buffer using consistent mode DMA for PCI
443 device. Returns non-NULL cpu-view pointer to the buffer if
444 successful and sets *DMA_ADDRP to the pci side dma address as well,
445 else DMA_ADDRP is undefined. */
447 static void *alpha_pci_alloc_coherent(struct device
*dev
, size_t size
,
448 dma_addr_t
*dma_addrp
, gfp_t gfp
,
451 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
453 long order
= get_order(size
);
458 cpu_addr
= (void *)__get_free_pages(gfp
| __GFP_ZERO
, order
);
460 printk(KERN_INFO
"pci_alloc_consistent: "
461 "get_free_pages failed from %pf\n",
462 __builtin_return_address(0));
463 /* ??? Really atomic allocation? Otherwise we could play
464 with vmalloc and sg if we can't find contiguous memory. */
467 memset(cpu_addr
, 0, size
);
469 *dma_addrp
= pci_map_single_1(pdev
, cpu_addr
, size
, 0);
470 if (*dma_addrp
== DMA_MAPPING_ERROR
) {
471 free_pages((unsigned long)cpu_addr
, order
);
472 if (alpha_mv
.mv_pci_tbi
|| (gfp
& GFP_DMA
))
474 /* The address doesn't fit required mask and we
475 do not have iommu. Try again with GFP_DMA. */
480 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %pf\n",
481 size
, cpu_addr
, *dma_addrp
, __builtin_return_address(0));
486 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
487 be values that were returned from pci_alloc_consistent. SIZE must
488 be the same as what as passed into pci_alloc_consistent.
489 References to the memory and mappings associated with CPU_ADDR or
490 DMA_ADDR past this call are illegal. */
492 static void alpha_pci_free_coherent(struct device
*dev
, size_t size
,
493 void *cpu_addr
, dma_addr_t dma_addr
,
496 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
497 pci_unmap_single(pdev
, dma_addr
, size
, PCI_DMA_BIDIRECTIONAL
);
498 free_pages((unsigned long)cpu_addr
, get_order(size
));
500 DBGA2("pci_free_consistent: [%llx,%zx] from %pf\n",
501 dma_addr
, size
, __builtin_return_address(0));
504 /* Classify the elements of the scatterlist. Write dma_address
505 of each element with:
506 0 : Followers all physically adjacent.
507 1 : Followers all virtually adjacent.
508 -1 : Not leader, physically adjacent to previous.
509 -2 : Not leader, virtually adjacent to previous.
510 Write dma_length of each leader with the combined lengths of
511 the mergable followers. */
513 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
514 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
517 sg_classify(struct device
*dev
, struct scatterlist
*sg
, struct scatterlist
*end
,
520 unsigned long next_paddr
;
521 struct scatterlist
*leader
;
522 long leader_flag
, leader_length
;
523 unsigned int max_seg_size
;
527 leader_length
= leader
->length
;
528 next_paddr
= SG_ENT_PHYS_ADDRESS(leader
) + leader_length
;
530 /* we will not marge sg without device. */
531 max_seg_size
= dev
? dma_get_max_seg_size(dev
) : 0;
532 for (++sg
; sg
< end
; ++sg
) {
533 unsigned long addr
, len
;
534 addr
= SG_ENT_PHYS_ADDRESS(sg
);
537 if (leader_length
+ len
> max_seg_size
)
540 if (next_paddr
== addr
) {
541 sg
->dma_address
= -1;
542 leader_length
+= len
;
543 } else if (((next_paddr
| addr
) & ~PAGE_MASK
) == 0 && virt_ok
) {
544 sg
->dma_address
= -2;
546 leader_length
+= len
;
549 leader
->dma_address
= leader_flag
;
550 leader
->dma_length
= leader_length
;
556 next_paddr
= addr
+ len
;
559 leader
->dma_address
= leader_flag
;
560 leader
->dma_length
= leader_length
;
563 /* Given a scatterlist leader, choose an allocation method and fill
567 sg_fill(struct device
*dev
, struct scatterlist
*leader
, struct scatterlist
*end
,
568 struct scatterlist
*out
, struct pci_iommu_arena
*arena
,
569 dma_addr_t max_dma
, int dac_allowed
)
571 unsigned long paddr
= SG_ENT_PHYS_ADDRESS(leader
);
572 long size
= leader
->dma_length
;
573 struct scatterlist
*sg
;
575 long npages
, dma_ofs
, i
;
578 /* If everything is physically contiguous, and the addresses
579 fall into the direct-map window, use it. */
580 if (leader
->dma_address
== 0
581 && paddr
+ size
+ __direct_map_base
- 1 <= max_dma
582 && paddr
+ size
<= __direct_map_size
) {
583 out
->dma_address
= paddr
+ __direct_map_base
;
584 out
->dma_length
= size
;
586 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
587 __va(paddr
), size
, out
->dma_address
);
593 /* If physically contiguous and DAC is available, use it. */
594 if (leader
->dma_address
== 0 && dac_allowed
) {
595 out
->dma_address
= paddr
+ alpha_mv
.pci_dac_offset
;
596 out
->dma_length
= size
;
598 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
599 __va(paddr
), size
, out
->dma_address
);
604 /* Otherwise, we'll use the iommu to make the pages virtually
608 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
609 dma_ofs
= iommu_arena_alloc(dev
, arena
, npages
, 0);
611 /* If we attempted a direct map above but failed, die. */
612 if (leader
->dma_address
== 0)
615 /* Otherwise, break up the remaining virtually contiguous
616 hunks into individual direct maps and retry. */
617 sg_classify(dev
, leader
, end
, 0);
618 return sg_fill(dev
, leader
, end
, out
, arena
, max_dma
, dac_allowed
);
621 out
->dma_address
= arena
->dma_base
+ dma_ofs
*PAGE_SIZE
+ paddr
;
622 out
->dma_length
= size
;
624 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
625 __va(paddr
), size
, out
->dma_address
, npages
);
627 /* All virtually contiguous. We need to find the length of each
628 physically contiguous subsegment to fill in the ptes. */
629 ptes
= &arena
->ptes
[dma_ofs
];
633 struct scatterlist
*last_sg
= sg
;
637 paddr
= SG_ENT_PHYS_ADDRESS(sg
);
639 while (sg
+1 < end
&& (int) sg
[1].dma_address
== -1) {
640 size
+= sg
[1].length
;
644 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
647 for (i
= 0; i
< npages
; ++i
, paddr
+= PAGE_SIZE
)
648 *ptes
++ = mk_iommu_pte(paddr
);
651 DBGA(" (%ld) [%p,%x] np %ld\n",
652 last_sg
- leader
, SG_ENT_VIRT_ADDRESS(last_sg
),
653 last_sg
->length
, npages
);
654 while (++last_sg
<= sg
) {
655 DBGA(" (%ld) [%p,%x] cont\n",
656 last_sg
- leader
, SG_ENT_VIRT_ADDRESS(last_sg
),
660 } while (++sg
< end
&& (int) sg
->dma_address
< 0);
665 static int alpha_pci_map_sg(struct device
*dev
, struct scatterlist
*sg
,
666 int nents
, enum dma_data_direction dir
,
669 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
670 struct scatterlist
*start
, *end
, *out
;
671 struct pci_controller
*hose
;
672 struct pci_iommu_arena
*arena
;
676 BUG_ON(dir
== PCI_DMA_NONE
);
678 dac_allowed
= dev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
680 /* Fast path single entry scatterlists. */
682 sg
->dma_length
= sg
->length
;
684 = pci_map_single_1(pdev
, SG_ENT_VIRT_ADDRESS(sg
),
685 sg
->length
, dac_allowed
);
686 return sg
->dma_address
!= DMA_MAPPING_ERROR
;
692 /* First, prepare information about the entries. */
693 sg_classify(dev
, sg
, end
, alpha_mv
.mv_pci_tbi
!= 0);
695 /* Second, figure out where we're going to map things. */
696 if (alpha_mv
.mv_pci_tbi
) {
697 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
698 max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
699 arena
= hose
->sg_pci
;
700 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
701 arena
= hose
->sg_isa
;
708 /* Third, iterate over the scatterlist leaders and allocate
709 dma space as needed. */
710 for (out
= sg
; sg
< end
; ++sg
) {
711 if ((int) sg
->dma_address
< 0)
713 if (sg_fill(dev
, sg
, end
, out
, arena
, max_dma
, dac_allowed
) < 0)
718 /* Mark the end of the list for pci_unmap_sg. */
722 if (out
- start
== 0)
723 printk(KERN_WARNING
"pci_map_sg failed: no entries?\n");
724 DBGA("pci_map_sg: %ld entries\n", out
- start
);
729 printk(KERN_WARNING
"pci_map_sg failed: "
730 "could not allocate dma page tables\n");
732 /* Some allocation failed while mapping the scatterlist
733 entries. Unmap them now. */
735 pci_unmap_sg(pdev
, start
, out
- start
, dir
);
739 /* Unmap a set of streaming mode DMA translations. Again, cpu read
740 rules concerning calls here are the same as for pci_unmap_single()
743 static void alpha_pci_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
744 int nents
, enum dma_data_direction dir
,
747 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
749 struct pci_controller
*hose
;
750 struct pci_iommu_arena
*arena
;
751 struct scatterlist
*end
;
753 dma_addr_t fbeg
, fend
;
755 BUG_ON(dir
== PCI_DMA_NONE
);
757 if (! alpha_mv
.mv_pci_tbi
)
760 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
761 max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
762 arena
= hose
->sg_pci
;
763 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
764 arena
= hose
->sg_isa
;
768 spin_lock_irqsave(&arena
->lock
, flags
);
770 for (end
= sg
+ nents
; sg
< end
; ++sg
) {
776 addr
= sg
->dma_address
;
777 size
= sg
->dma_length
;
781 if (addr
> 0xffffffff) {
782 /* It's a DAC address -- nothing to do. */
783 DBGA(" (%ld) DAC [%llx,%zx]\n",
784 sg
- end
+ nents
, addr
, size
);
788 if (addr
>= __direct_map_base
789 && addr
< __direct_map_base
+ __direct_map_size
) {
791 DBGA(" (%ld) direct [%llx,%zx]\n",
792 sg
- end
+ nents
, addr
, size
);
796 DBGA(" (%ld) sg [%llx,%zx]\n",
797 sg
- end
+ nents
, addr
, size
);
799 npages
= iommu_num_pages(addr
, size
, PAGE_SIZE
);
800 ofs
= (addr
- arena
->dma_base
) >> PAGE_SHIFT
;
801 iommu_arena_free(arena
, ofs
, npages
);
803 tend
= addr
+ size
- 1;
804 if (fbeg
> addr
) fbeg
= addr
;
805 if (fend
< tend
) fend
= tend
;
808 /* If we're freeing ptes above the `next_entry' pointer (they
809 may have snuck back into the TLB since the last wrap flush),
810 we need to flush the TLB before reallocating the latter. */
811 if ((fend
- arena
->dma_base
) >> PAGE_SHIFT
>= arena
->next_entry
)
812 alpha_mv
.mv_pci_tbi(hose
, fbeg
, fend
);
814 spin_unlock_irqrestore(&arena
->lock
, flags
);
816 DBGA("pci_unmap_sg: %ld entries\n", nents
- (end
- sg
));
819 /* Return whether the given PCI device DMA address mask can be
820 supported properly. */
822 static int alpha_pci_supported(struct device
*dev
, u64 mask
)
824 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
825 struct pci_controller
*hose
;
826 struct pci_iommu_arena
*arena
;
828 /* If there exists a direct map, and the mask fits either
829 the entire direct mapped space or the total system memory as
830 shifted by the map base */
831 if (__direct_map_size
!= 0
832 && (__direct_map_base
+ __direct_map_size
- 1 <= mask
||
833 __direct_map_base
+ (max_low_pfn
<< PAGE_SHIFT
) - 1 <= mask
))
836 /* Check that we have a scatter-gather arena that fits. */
837 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
838 arena
= hose
->sg_isa
;
839 if (arena
&& arena
->dma_base
+ arena
->size
- 1 <= mask
)
841 arena
= hose
->sg_pci
;
842 if (arena
&& arena
->dma_base
+ arena
->size
- 1 <= mask
)
845 /* As last resort try ZONE_DMA. */
846 if (!__direct_map_base
&& MAX_DMA_ADDRESS
- IDENT_ADDR
- 1 <= mask
)
854 * AGP GART extensions to the IOMMU
857 iommu_reserve(struct pci_iommu_arena
*arena
, long pg_count
, long align_mask
)
863 if (!arena
) return -EINVAL
;
865 spin_lock_irqsave(&arena
->lock
, flags
);
867 /* Search for N empty ptes. */
869 p
= iommu_arena_find_pages(NULL
, arena
, pg_count
, align_mask
);
871 spin_unlock_irqrestore(&arena
->lock
, flags
);
875 /* Success. Mark them all reserved (ie not zero and invalid)
876 for the iommu tlb that could load them from under us.
877 They will be filled in with valid bits by _bind() */
878 for (i
= 0; i
< pg_count
; ++i
)
879 ptes
[p
+i
] = IOMMU_RESERVED_PTE
;
881 arena
->next_entry
= p
+ pg_count
;
882 spin_unlock_irqrestore(&arena
->lock
, flags
);
888 iommu_release(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
)
893 if (!arena
) return -EINVAL
;
897 /* Make sure they're all reserved first... */
898 for(i
= pg_start
; i
< pg_start
+ pg_count
; i
++)
899 if (ptes
[i
] != IOMMU_RESERVED_PTE
)
902 iommu_arena_free(arena
, pg_start
, pg_count
);
907 iommu_bind(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
,
914 if (!arena
) return -EINVAL
;
916 spin_lock_irqsave(&arena
->lock
, flags
);
920 for(j
= pg_start
; j
< pg_start
+ pg_count
; j
++) {
921 if (ptes
[j
] != IOMMU_RESERVED_PTE
) {
922 spin_unlock_irqrestore(&arena
->lock
, flags
);
927 for(i
= 0, j
= pg_start
; i
< pg_count
; i
++, j
++)
928 ptes
[j
] = mk_iommu_pte(page_to_phys(pages
[i
]));
930 spin_unlock_irqrestore(&arena
->lock
, flags
);
936 iommu_unbind(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
)
941 if (!arena
) return -EINVAL
;
943 p
= arena
->ptes
+ pg_start
;
944 for(i
= 0; i
< pg_count
; i
++)
945 p
[i
] = IOMMU_RESERVED_PTE
;
950 const struct dma_map_ops alpha_pci_ops
= {
951 .alloc
= alpha_pci_alloc_coherent
,
952 .free
= alpha_pci_free_coherent
,
953 .map_page
= alpha_pci_map_page
,
954 .unmap_page
= alpha_pci_unmap_page
,
955 .map_sg
= alpha_pci_map_sg
,
956 .unmap_sg
= alpha_pci_unmap_sg
,
957 .dma_supported
= alpha_pci_supported
,
959 EXPORT_SYMBOL(alpha_pci_ops
);