1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/alpha/kernel/pci_iommu.c
6 #include <linux/kernel.h>
10 #include <linux/memblock.h>
11 #include <linux/export.h>
12 #include <linux/scatterlist.h>
13 #include <linux/log2.h>
14 #include <linux/dma-map-ops.h>
15 #include <linux/iommu-helper.h>
18 #include <asm/hwrpb.h>
26 # define DBGA(args...) printk(KERN_DEBUG args)
28 # define DBGA(args...)
31 # define DBGA2(args...) printk(KERN_DEBUG args)
33 # define DBGA2(args...)
36 #define DEBUG_NODIRECT 0
38 #define ISA_DMA_MASK 0x00ffffff
40 static inline unsigned long
41 mk_iommu_pte(unsigned long paddr
)
43 return (paddr
>> (PAGE_SHIFT
-1)) | 1;
46 /* Return the minimum of MAX or the first power of two larger
50 size_for_memory(unsigned long max
)
52 unsigned long mem
= max_low_pfn
<< PAGE_SHIFT
;
54 max
= roundup_pow_of_two(mem
);
58 struct pci_iommu_arena
* __init
59 iommu_arena_new_node(int nid
, struct pci_controller
*hose
, dma_addr_t base
,
60 unsigned long window_size
, unsigned long align
)
62 unsigned long mem_size
;
63 struct pci_iommu_arena
*arena
;
65 mem_size
= window_size
/ (PAGE_SIZE
/ sizeof(unsigned long));
67 /* Note that the TLB lookup logic uses bitwise concatenation,
68 not addition, so the required arena alignment is based on
69 the size of the window. Retain the align parameter so that
70 particular systems can over-align the arena. */
75 #ifdef CONFIG_DISCONTIGMEM
77 arena
= memblock_alloc_node(sizeof(*arena
), align
, nid
);
78 if (!NODE_DATA(nid
) || !arena
) {
79 printk("%s: couldn't allocate arena from node %d\n"
80 " falling back to system-wide allocation\n",
82 arena
= memblock_alloc(sizeof(*arena
), SMP_CACHE_BYTES
);
84 panic("%s: Failed to allocate %zu bytes\n", __func__
,
88 arena
->ptes
= memblock_alloc_node(sizeof(*arena
), align
, nid
);
89 if (!NODE_DATA(nid
) || !arena
->ptes
) {
90 printk("%s: couldn't allocate arena ptes from node %d\n"
91 " falling back to system-wide allocation\n",
93 arena
->ptes
= memblock_alloc(mem_size
, align
);
95 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
96 __func__
, mem_size
, align
);
99 #else /* CONFIG_DISCONTIGMEM */
101 arena
= memblock_alloc(sizeof(*arena
), SMP_CACHE_BYTES
);
103 panic("%s: Failed to allocate %zu bytes\n", __func__
,
105 arena
->ptes
= memblock_alloc(mem_size
, align
);
107 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
108 __func__
, mem_size
, align
);
110 #endif /* CONFIG_DISCONTIGMEM */
112 spin_lock_init(&arena
->lock
);
114 arena
->dma_base
= base
;
115 arena
->size
= window_size
;
116 arena
->next_entry
= 0;
118 /* Align allocations to a multiple of a page size. Not needed
119 unless there are chip bugs. */
120 arena
->align_entry
= 1;
125 struct pci_iommu_arena
* __init
126 iommu_arena_new(struct pci_controller
*hose
, dma_addr_t base
,
127 unsigned long window_size
, unsigned long align
)
129 return iommu_arena_new_node(0, hose
, base
, window_size
, align
);
132 /* Must be called with the arena lock held */
134 iommu_arena_find_pages(struct device
*dev
, struct pci_iommu_arena
*arena
,
141 unsigned long boundary_size
;
143 base
= arena
->dma_base
>> PAGE_SHIFT
;
144 boundary_size
= dma_get_seg_boundary_nr_pages(dev
, PAGE_SHIFT
);
146 /* Search forward for the first mask-aligned sequence of N free ptes */
148 nent
= arena
->size
>> PAGE_SHIFT
;
149 p
= ALIGN(arena
->next_entry
, mask
+ 1);
153 while (i
< n
&& p
+i
< nent
) {
154 if (!i
&& iommu_is_span_boundary(p
, n
, base
, boundary_size
)) {
155 p
= ALIGN(p
+ 1, mask
+ 1);
160 p
= ALIGN(p
+ i
+ 1, mask
+ 1), i
= 0;
168 * Reached the end. Flush the TLB and restart
169 * the search from the beginning.
171 alpha_mv
.mv_pci_tbi(arena
->hose
, 0, -1);
181 /* Success. It's the responsibility of the caller to mark them
182 in use before releasing the lock */
187 iommu_arena_alloc(struct device
*dev
, struct pci_iommu_arena
*arena
, long n
,
194 spin_lock_irqsave(&arena
->lock
, flags
);
196 /* Search for N empty ptes */
198 mask
= max(align
, arena
->align_entry
) - 1;
199 p
= iommu_arena_find_pages(dev
, arena
, n
, mask
);
201 spin_unlock_irqrestore(&arena
->lock
, flags
);
205 /* Success. Mark them all in use, ie not zero and invalid
206 for the iommu tlb that could load them from under us.
207 The chip specific bits will fill this in with something
208 kosher when we return. */
209 for (i
= 0; i
< n
; ++i
)
210 ptes
[p
+i
] = IOMMU_INVALID_PTE
;
212 arena
->next_entry
= p
+ n
;
213 spin_unlock_irqrestore(&arena
->lock
, flags
);
219 iommu_arena_free(struct pci_iommu_arena
*arena
, long ofs
, long n
)
224 p
= arena
->ptes
+ ofs
;
225 for (i
= 0; i
< n
; ++i
)
230 * True if the machine supports DAC addressing, and DEV can
231 * make use of it given MASK.
233 static int pci_dac_dma_supported(struct pci_dev
*dev
, u64 mask
)
235 dma_addr_t dac_offset
= alpha_mv
.pci_dac_offset
;
238 /* If this is not set, the machine doesn't support DAC at all. */
242 /* The device has to be able to address our DAC bit. */
243 if ((dac_offset
& dev
->dma_mask
) != dac_offset
)
246 /* If both conditions above are met, we are fine. */
247 DBGA("pci_dac_dma_supported %s from %ps\n",
248 ok
? "yes" : "no", __builtin_return_address(0));
253 /* Map a single buffer of the indicated size for PCI DMA in streaming
254 mode. The 32-bit PCI bus mastering address to use is returned.
255 Once the device is given the dma address, the device owns this memory
256 until either pci_unmap_single or pci_dma_sync_single is performed. */
259 pci_map_single_1(struct pci_dev
*pdev
, void *cpu_addr
, size_t size
,
262 struct pci_controller
*hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
263 dma_addr_t max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
264 struct pci_iommu_arena
*arena
;
265 long npages
, dma_ofs
, i
;
268 unsigned int align
= 0;
269 struct device
*dev
= pdev
? &pdev
->dev
: NULL
;
271 paddr
= __pa(cpu_addr
);
274 /* First check to see if we can use the direct map window. */
275 if (paddr
+ size
+ __direct_map_base
- 1 <= max_dma
276 && paddr
+ size
<= __direct_map_size
) {
277 ret
= paddr
+ __direct_map_base
;
279 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n",
280 cpu_addr
, size
, ret
, __builtin_return_address(0));
286 /* Next, use DAC if selected earlier. */
288 ret
= paddr
+ alpha_mv
.pci_dac_offset
;
290 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n",
291 cpu_addr
, size
, ret
, __builtin_return_address(0));
296 /* If the machine doesn't define a pci_tbi routine, we have to
297 assume it doesn't support sg mapping, and, since we tried to
298 use direct_map above, it now must be considered an error. */
299 if (! alpha_mv
.mv_pci_tbi
) {
300 printk_once(KERN_WARNING
"pci_map_single: no HW sg\n");
301 return DMA_MAPPING_ERROR
;
304 arena
= hose
->sg_pci
;
305 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
306 arena
= hose
->sg_isa
;
308 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
310 /* Force allocation to 64KB boundary for ISA bridges. */
311 if (pdev
&& pdev
== isa_bridge
)
313 dma_ofs
= iommu_arena_alloc(dev
, arena
, npages
, align
);
315 printk(KERN_WARNING
"pci_map_single failed: "
316 "could not allocate dma page tables\n");
317 return DMA_MAPPING_ERROR
;
321 for (i
= 0; i
< npages
; ++i
, paddr
+= PAGE_SIZE
)
322 arena
->ptes
[i
+ dma_ofs
] = mk_iommu_pte(paddr
);
324 ret
= arena
->dma_base
+ dma_ofs
* PAGE_SIZE
;
325 ret
+= (unsigned long)cpu_addr
& ~PAGE_MASK
;
327 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n",
328 cpu_addr
, size
, npages
, ret
, __builtin_return_address(0));
333 /* Helper for generic DMA-mapping functions. */
334 static struct pci_dev
*alpha_gendev_to_pci(struct device
*dev
)
336 if (dev
&& dev_is_pci(dev
))
337 return to_pci_dev(dev
);
339 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
343 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
344 bridge is bus master then). */
345 if (!dev
|| !dev
->dma_mask
|| !*dev
->dma_mask
)
348 /* For EISA bus masters, return isa_bridge (it might have smaller
349 dma_mask due to wiring limitations). */
350 if (*dev
->dma_mask
>= isa_bridge
->dma_mask
)
353 /* This assumes ISA bus master with dma_mask 0xffffff. */
357 static dma_addr_t
alpha_pci_map_page(struct device
*dev
, struct page
*page
,
358 unsigned long offset
, size_t size
,
359 enum dma_data_direction dir
,
362 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
365 BUG_ON(dir
== PCI_DMA_NONE
);
367 dac_allowed
= pdev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
368 return pci_map_single_1(pdev
, (char *)page_address(page
) + offset
,
372 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
373 SIZE must match what was provided for in a previous pci_map_single
374 call. All other usages are undefined. After this call, reads by
375 the cpu to the buffer are guaranteed to see whatever the device
378 static void alpha_pci_unmap_page(struct device
*dev
, dma_addr_t dma_addr
,
379 size_t size
, enum dma_data_direction dir
,
383 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
384 struct pci_controller
*hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
385 struct pci_iommu_arena
*arena
;
386 long dma_ofs
, npages
;
388 BUG_ON(dir
== PCI_DMA_NONE
);
390 if (dma_addr
>= __direct_map_base
391 && dma_addr
< __direct_map_base
+ __direct_map_size
) {
394 DBGA2("pci_unmap_single: direct [%llx,%zx] from %ps\n",
395 dma_addr
, size
, __builtin_return_address(0));
400 if (dma_addr
> 0xffffffff) {
401 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %ps\n",
402 dma_addr
, size
, __builtin_return_address(0));
406 arena
= hose
->sg_pci
;
407 if (!arena
|| dma_addr
< arena
->dma_base
)
408 arena
= hose
->sg_isa
;
410 dma_ofs
= (dma_addr
- arena
->dma_base
) >> PAGE_SHIFT
;
411 if (dma_ofs
* PAGE_SIZE
>= arena
->size
) {
412 printk(KERN_ERR
"Bogus pci_unmap_single: dma_addr %llx "
413 " base %llx size %x\n",
414 dma_addr
, arena
->dma_base
, arena
->size
);
419 npages
= iommu_num_pages(dma_addr
, size
, PAGE_SIZE
);
421 spin_lock_irqsave(&arena
->lock
, flags
);
423 iommu_arena_free(arena
, dma_ofs
, npages
);
425 /* If we're freeing ptes above the `next_entry' pointer (they
426 may have snuck back into the TLB since the last wrap flush),
427 we need to flush the TLB before reallocating the latter. */
428 if (dma_ofs
>= arena
->next_entry
)
429 alpha_mv
.mv_pci_tbi(hose
, dma_addr
, dma_addr
+ size
- 1);
431 spin_unlock_irqrestore(&arena
->lock
, flags
);
433 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %ps\n",
434 dma_addr
, size
, npages
, __builtin_return_address(0));
437 /* Allocate and map kernel buffer using consistent mode DMA for PCI
438 device. Returns non-NULL cpu-view pointer to the buffer if
439 successful and sets *DMA_ADDRP to the pci side dma address as well,
440 else DMA_ADDRP is undefined. */
442 static void *alpha_pci_alloc_coherent(struct device
*dev
, size_t size
,
443 dma_addr_t
*dma_addrp
, gfp_t gfp
,
446 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
448 long order
= get_order(size
);
453 cpu_addr
= (void *)__get_free_pages(gfp
| __GFP_ZERO
, order
);
455 printk(KERN_INFO
"pci_alloc_consistent: "
456 "get_free_pages failed from %ps\n",
457 __builtin_return_address(0));
458 /* ??? Really atomic allocation? Otherwise we could play
459 with vmalloc and sg if we can't find contiguous memory. */
462 memset(cpu_addr
, 0, size
);
464 *dma_addrp
= pci_map_single_1(pdev
, cpu_addr
, size
, 0);
465 if (*dma_addrp
== DMA_MAPPING_ERROR
) {
466 free_pages((unsigned long)cpu_addr
, order
);
467 if (alpha_mv
.mv_pci_tbi
|| (gfp
& GFP_DMA
))
469 /* The address doesn't fit required mask and we
470 do not have iommu. Try again with GFP_DMA. */
475 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n",
476 size
, cpu_addr
, *dma_addrp
, __builtin_return_address(0));
481 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
482 be values that were returned from pci_alloc_consistent. SIZE must
483 be the same as what as passed into pci_alloc_consistent.
484 References to the memory and mappings associated with CPU_ADDR or
485 DMA_ADDR past this call are illegal. */
487 static void alpha_pci_free_coherent(struct device
*dev
, size_t size
,
488 void *cpu_addr
, dma_addr_t dma_addr
,
491 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
492 pci_unmap_single(pdev
, dma_addr
, size
, PCI_DMA_BIDIRECTIONAL
);
493 free_pages((unsigned long)cpu_addr
, get_order(size
));
495 DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n",
496 dma_addr
, size
, __builtin_return_address(0));
499 /* Classify the elements of the scatterlist. Write dma_address
500 of each element with:
501 0 : Followers all physically adjacent.
502 1 : Followers all virtually adjacent.
503 -1 : Not leader, physically adjacent to previous.
504 -2 : Not leader, virtually adjacent to previous.
505 Write dma_length of each leader with the combined lengths of
506 the mergable followers. */
508 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
509 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
512 sg_classify(struct device
*dev
, struct scatterlist
*sg
, struct scatterlist
*end
,
515 unsigned long next_paddr
;
516 struct scatterlist
*leader
;
517 long leader_flag
, leader_length
;
518 unsigned int max_seg_size
;
522 leader_length
= leader
->length
;
523 next_paddr
= SG_ENT_PHYS_ADDRESS(leader
) + leader_length
;
525 /* we will not marge sg without device. */
526 max_seg_size
= dev
? dma_get_max_seg_size(dev
) : 0;
527 for (++sg
; sg
< end
; ++sg
) {
528 unsigned long addr
, len
;
529 addr
= SG_ENT_PHYS_ADDRESS(sg
);
532 if (leader_length
+ len
> max_seg_size
)
535 if (next_paddr
== addr
) {
536 sg
->dma_address
= -1;
537 leader_length
+= len
;
538 } else if (((next_paddr
| addr
) & ~PAGE_MASK
) == 0 && virt_ok
) {
539 sg
->dma_address
= -2;
541 leader_length
+= len
;
544 leader
->dma_address
= leader_flag
;
545 leader
->dma_length
= leader_length
;
551 next_paddr
= addr
+ len
;
554 leader
->dma_address
= leader_flag
;
555 leader
->dma_length
= leader_length
;
558 /* Given a scatterlist leader, choose an allocation method and fill
562 sg_fill(struct device
*dev
, struct scatterlist
*leader
, struct scatterlist
*end
,
563 struct scatterlist
*out
, struct pci_iommu_arena
*arena
,
564 dma_addr_t max_dma
, int dac_allowed
)
566 unsigned long paddr
= SG_ENT_PHYS_ADDRESS(leader
);
567 long size
= leader
->dma_length
;
568 struct scatterlist
*sg
;
570 long npages
, dma_ofs
, i
;
573 /* If everything is physically contiguous, and the addresses
574 fall into the direct-map window, use it. */
575 if (leader
->dma_address
== 0
576 && paddr
+ size
+ __direct_map_base
- 1 <= max_dma
577 && paddr
+ size
<= __direct_map_size
) {
578 out
->dma_address
= paddr
+ __direct_map_base
;
579 out
->dma_length
= size
;
581 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
582 __va(paddr
), size
, out
->dma_address
);
588 /* If physically contiguous and DAC is available, use it. */
589 if (leader
->dma_address
== 0 && dac_allowed
) {
590 out
->dma_address
= paddr
+ alpha_mv
.pci_dac_offset
;
591 out
->dma_length
= size
;
593 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
594 __va(paddr
), size
, out
->dma_address
);
599 /* Otherwise, we'll use the iommu to make the pages virtually
603 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
604 dma_ofs
= iommu_arena_alloc(dev
, arena
, npages
, 0);
606 /* If we attempted a direct map above but failed, die. */
607 if (leader
->dma_address
== 0)
610 /* Otherwise, break up the remaining virtually contiguous
611 hunks into individual direct maps and retry. */
612 sg_classify(dev
, leader
, end
, 0);
613 return sg_fill(dev
, leader
, end
, out
, arena
, max_dma
, dac_allowed
);
616 out
->dma_address
= arena
->dma_base
+ dma_ofs
*PAGE_SIZE
+ paddr
;
617 out
->dma_length
= size
;
619 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
620 __va(paddr
), size
, out
->dma_address
, npages
);
622 /* All virtually contiguous. We need to find the length of each
623 physically contiguous subsegment to fill in the ptes. */
624 ptes
= &arena
->ptes
[dma_ofs
];
628 struct scatterlist
*last_sg
= sg
;
632 paddr
= SG_ENT_PHYS_ADDRESS(sg
);
634 while (sg
+1 < end
&& (int) sg
[1].dma_address
== -1) {
635 size
+= sg
[1].length
;
639 npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
642 for (i
= 0; i
< npages
; ++i
, paddr
+= PAGE_SIZE
)
643 *ptes
++ = mk_iommu_pte(paddr
);
646 DBGA(" (%ld) [%p,%x] np %ld\n",
647 last_sg
- leader
, SG_ENT_VIRT_ADDRESS(last_sg
),
648 last_sg
->length
, npages
);
649 while (++last_sg
<= sg
) {
650 DBGA(" (%ld) [%p,%x] cont\n",
651 last_sg
- leader
, SG_ENT_VIRT_ADDRESS(last_sg
),
655 } while (++sg
< end
&& (int) sg
->dma_address
< 0);
660 static int alpha_pci_map_sg(struct device
*dev
, struct scatterlist
*sg
,
661 int nents
, enum dma_data_direction dir
,
664 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
665 struct scatterlist
*start
, *end
, *out
;
666 struct pci_controller
*hose
;
667 struct pci_iommu_arena
*arena
;
671 BUG_ON(dir
== PCI_DMA_NONE
);
673 dac_allowed
= dev
? pci_dac_dma_supported(pdev
, pdev
->dma_mask
) : 0;
675 /* Fast path single entry scatterlists. */
677 sg
->dma_length
= sg
->length
;
679 = pci_map_single_1(pdev
, SG_ENT_VIRT_ADDRESS(sg
),
680 sg
->length
, dac_allowed
);
681 return sg
->dma_address
!= DMA_MAPPING_ERROR
;
687 /* First, prepare information about the entries. */
688 sg_classify(dev
, sg
, end
, alpha_mv
.mv_pci_tbi
!= 0);
690 /* Second, figure out where we're going to map things. */
691 if (alpha_mv
.mv_pci_tbi
) {
692 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
693 max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
694 arena
= hose
->sg_pci
;
695 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
696 arena
= hose
->sg_isa
;
703 /* Third, iterate over the scatterlist leaders and allocate
704 dma space as needed. */
705 for (out
= sg
; sg
< end
; ++sg
) {
706 if ((int) sg
->dma_address
< 0)
708 if (sg_fill(dev
, sg
, end
, out
, arena
, max_dma
, dac_allowed
) < 0)
713 /* Mark the end of the list for pci_unmap_sg. */
717 if (out
- start
== 0)
718 printk(KERN_WARNING
"pci_map_sg failed: no entries?\n");
719 DBGA("pci_map_sg: %ld entries\n", out
- start
);
724 printk(KERN_WARNING
"pci_map_sg failed: "
725 "could not allocate dma page tables\n");
727 /* Some allocation failed while mapping the scatterlist
728 entries. Unmap them now. */
730 pci_unmap_sg(pdev
, start
, out
- start
, dir
);
734 /* Unmap a set of streaming mode DMA translations. Again, cpu read
735 rules concerning calls here are the same as for pci_unmap_single()
738 static void alpha_pci_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
739 int nents
, enum dma_data_direction dir
,
742 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
744 struct pci_controller
*hose
;
745 struct pci_iommu_arena
*arena
;
746 struct scatterlist
*end
;
748 dma_addr_t fbeg
, fend
;
750 BUG_ON(dir
== PCI_DMA_NONE
);
752 if (! alpha_mv
.mv_pci_tbi
)
755 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
756 max_dma
= pdev
? pdev
->dma_mask
: ISA_DMA_MASK
;
757 arena
= hose
->sg_pci
;
758 if (!arena
|| arena
->dma_base
+ arena
->size
- 1 > max_dma
)
759 arena
= hose
->sg_isa
;
763 spin_lock_irqsave(&arena
->lock
, flags
);
765 for (end
= sg
+ nents
; sg
< end
; ++sg
) {
771 addr
= sg
->dma_address
;
772 size
= sg
->dma_length
;
776 if (addr
> 0xffffffff) {
777 /* It's a DAC address -- nothing to do. */
778 DBGA(" (%ld) DAC [%llx,%zx]\n",
779 sg
- end
+ nents
, addr
, size
);
783 if (addr
>= __direct_map_base
784 && addr
< __direct_map_base
+ __direct_map_size
) {
786 DBGA(" (%ld) direct [%llx,%zx]\n",
787 sg
- end
+ nents
, addr
, size
);
791 DBGA(" (%ld) sg [%llx,%zx]\n",
792 sg
- end
+ nents
, addr
, size
);
794 npages
= iommu_num_pages(addr
, size
, PAGE_SIZE
);
795 ofs
= (addr
- arena
->dma_base
) >> PAGE_SHIFT
;
796 iommu_arena_free(arena
, ofs
, npages
);
798 tend
= addr
+ size
- 1;
799 if (fbeg
> addr
) fbeg
= addr
;
800 if (fend
< tend
) fend
= tend
;
803 /* If we're freeing ptes above the `next_entry' pointer (they
804 may have snuck back into the TLB since the last wrap flush),
805 we need to flush the TLB before reallocating the latter. */
806 if ((fend
- arena
->dma_base
) >> PAGE_SHIFT
>= arena
->next_entry
)
807 alpha_mv
.mv_pci_tbi(hose
, fbeg
, fend
);
809 spin_unlock_irqrestore(&arena
->lock
, flags
);
811 DBGA("pci_unmap_sg: %ld entries\n", nents
- (end
- sg
));
814 /* Return whether the given PCI device DMA address mask can be
815 supported properly. */
817 static int alpha_pci_supported(struct device
*dev
, u64 mask
)
819 struct pci_dev
*pdev
= alpha_gendev_to_pci(dev
);
820 struct pci_controller
*hose
;
821 struct pci_iommu_arena
*arena
;
823 /* If there exists a direct map, and the mask fits either
824 the entire direct mapped space or the total system memory as
825 shifted by the map base */
826 if (__direct_map_size
!= 0
827 && (__direct_map_base
+ __direct_map_size
- 1 <= mask
||
828 __direct_map_base
+ (max_low_pfn
<< PAGE_SHIFT
) - 1 <= mask
))
831 /* Check that we have a scatter-gather arena that fits. */
832 hose
= pdev
? pdev
->sysdata
: pci_isa_hose
;
833 arena
= hose
->sg_isa
;
834 if (arena
&& arena
->dma_base
+ arena
->size
- 1 <= mask
)
836 arena
= hose
->sg_pci
;
837 if (arena
&& arena
->dma_base
+ arena
->size
- 1 <= mask
)
840 /* As last resort try ZONE_DMA. */
841 if (!__direct_map_base
&& MAX_DMA_ADDRESS
- IDENT_ADDR
- 1 <= mask
)
849 * AGP GART extensions to the IOMMU
852 iommu_reserve(struct pci_iommu_arena
*arena
, long pg_count
, long align_mask
)
858 if (!arena
) return -EINVAL
;
860 spin_lock_irqsave(&arena
->lock
, flags
);
862 /* Search for N empty ptes. */
864 p
= iommu_arena_find_pages(NULL
, arena
, pg_count
, align_mask
);
866 spin_unlock_irqrestore(&arena
->lock
, flags
);
870 /* Success. Mark them all reserved (ie not zero and invalid)
871 for the iommu tlb that could load them from under us.
872 They will be filled in with valid bits by _bind() */
873 for (i
= 0; i
< pg_count
; ++i
)
874 ptes
[p
+i
] = IOMMU_RESERVED_PTE
;
876 arena
->next_entry
= p
+ pg_count
;
877 spin_unlock_irqrestore(&arena
->lock
, flags
);
883 iommu_release(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
)
888 if (!arena
) return -EINVAL
;
892 /* Make sure they're all reserved first... */
893 for(i
= pg_start
; i
< pg_start
+ pg_count
; i
++)
894 if (ptes
[i
] != IOMMU_RESERVED_PTE
)
897 iommu_arena_free(arena
, pg_start
, pg_count
);
902 iommu_bind(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
,
909 if (!arena
) return -EINVAL
;
911 spin_lock_irqsave(&arena
->lock
, flags
);
915 for(j
= pg_start
; j
< pg_start
+ pg_count
; j
++) {
916 if (ptes
[j
] != IOMMU_RESERVED_PTE
) {
917 spin_unlock_irqrestore(&arena
->lock
, flags
);
922 for(i
= 0, j
= pg_start
; i
< pg_count
; i
++, j
++)
923 ptes
[j
] = mk_iommu_pte(page_to_phys(pages
[i
]));
925 spin_unlock_irqrestore(&arena
->lock
, flags
);
931 iommu_unbind(struct pci_iommu_arena
*arena
, long pg_start
, long pg_count
)
936 if (!arena
) return -EINVAL
;
938 p
= arena
->ptes
+ pg_start
;
939 for(i
= 0; i
< pg_count
; i
++)
940 p
[i
] = IOMMU_RESERVED_PTE
;
945 const struct dma_map_ops alpha_pci_ops
= {
946 .alloc
= alpha_pci_alloc_coherent
,
947 .free
= alpha_pci_free_coherent
,
948 .map_page
= alpha_pci_map_page
,
949 .unmap_page
= alpha_pci_unmap_page
,
950 .map_sg
= alpha_pci_map_sg
,
951 .unmap_sg
= alpha_pci_unmap_sg
,
952 .dma_supported
= alpha_pci_supported
,
953 .mmap
= dma_common_mmap
,
954 .get_sgtable
= dma_common_get_sgtable
,
955 .alloc_pages
= dma_common_alloc_pages
,
956 .free_pages
= dma_common_free_pages
,
958 EXPORT_SYMBOL(alpha_pci_ops
);