1 // SPDX-License-Identifier: GPL-2.0
3 * iommu.c: IOMMU specific routines for memory management.
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
7 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
11 #include <linux/kernel.h>
12 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
16 #include <linux/dma-mapping.h>
18 #include <linux/of_device.h>
20 #include <asm/pgalloc.h>
21 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <asm/bitext.h>
28 #include <asm/iommu.h>
34 * This can be sized dynamically, but we will do this
35 * only when we have a guidance about actual I/O pressures.
37 #define IOMMU_RNGE IOMMU_RNGE_256MB
38 #define IOMMU_START 0xF0000000
39 #define IOMMU_WINSIZE (256*1024*1024U)
40 #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
41 #define IOMMU_ORDER 6 /* 4096 * (1<<6) */
43 static int viking_flush
;
45 extern void viking_flush_page(unsigned long page
);
46 extern void viking_mxcc_flush_page(unsigned long page
);
49 * Values precomputed according to CPU type.
51 static unsigned int ioperm_noc
; /* Consistent mapping iopte flags */
52 static pgprot_t dvma_prot
; /* Consistent mapping pte flags */
54 #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
55 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
57 static void __init
sbus_iommu_init(struct platform_device
*op
)
59 struct iommu_struct
*iommu
;
60 unsigned int impl
, vers
;
61 unsigned long *bitmap
;
62 unsigned long control
;
66 iommu
= kmalloc(sizeof(struct iommu_struct
), GFP_KERNEL
);
68 prom_printf("Unable to allocate iommu structure\n");
72 iommu
->regs
= of_ioremap(&op
->resource
[0], 0, PAGE_SIZE
* 3,
75 prom_printf("Cannot map IOMMU registers\n");
79 control
= sbus_readl(&iommu
->regs
->control
);
80 impl
= (control
& IOMMU_CTRL_IMPL
) >> 28;
81 vers
= (control
& IOMMU_CTRL_VERS
) >> 24;
82 control
&= ~(IOMMU_CTRL_RNGE
);
83 control
|= (IOMMU_RNGE_256MB
| IOMMU_CTRL_ENAB
);
84 sbus_writel(control
, &iommu
->regs
->control
);
86 iommu_invalidate(iommu
->regs
);
87 iommu
->start
= IOMMU_START
;
88 iommu
->end
= 0xffffffff;
90 /* Allocate IOMMU page table */
91 /* Stupid alignment constraints give me a headache.
92 We need 256K or 512K or 1M or 2M area aligned to
93 its size and current gfp will fortunately give
95 tmp
= __get_free_pages(GFP_KERNEL
, IOMMU_ORDER
);
97 prom_printf("Unable to allocate iommu table [0x%lx]\n",
98 IOMMU_NPTES
* sizeof(iopte_t
));
101 iommu
->page_table
= (iopte_t
*)tmp
;
103 /* Initialize new table. */
104 memset(iommu
->page_table
, 0, IOMMU_NPTES
*sizeof(iopte_t
));
108 base
= __pa((unsigned long)iommu
->page_table
) >> 4;
109 sbus_writel(base
, &iommu
->regs
->base
);
110 iommu_invalidate(iommu
->regs
);
112 bitmap
= kmalloc(IOMMU_NPTES
>>3, GFP_KERNEL
);
114 prom_printf("Unable to allocate iommu bitmap [%d]\n",
115 (int)(IOMMU_NPTES
>>3));
118 bit_map_init(&iommu
->usemap
, bitmap
, IOMMU_NPTES
);
119 /* To be coherent on HyperSparc, the page color of DVMA
120 * and physical addresses must match.
122 if (srmmu_modtype
== HyperSparc
)
123 iommu
->usemap
.num_colors
= vac_cache_size
>> PAGE_SHIFT
;
125 iommu
->usemap
.num_colors
= 1;
127 printk(KERN_INFO
"IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
128 impl
, vers
, iommu
->page_table
,
129 (int)(IOMMU_NPTES
*sizeof(iopte_t
)), (int)IOMMU_NPTES
);
131 op
->dev
.archdata
.iommu
= iommu
;
134 static int __init
iommu_init(void)
136 struct device_node
*dp
;
138 for_each_node_by_name(dp
, "iommu") {
139 struct platform_device
*op
= of_find_device_by_node(dp
);
142 of_propagate_archdata(op
);
148 subsys_initcall(iommu_init
);
150 /* Flush the iotlb entries to ram. */
151 /* This could be better if we didn't have to flush whole pages. */
152 static void iommu_flush_iotlb(iopte_t
*iopte
, unsigned int niopte
)
157 start
= (unsigned long)iopte
;
158 end
= PAGE_ALIGN(start
+ niopte
*sizeof(iopte_t
));
160 if (viking_mxcc_present
) {
162 viking_mxcc_flush_page(start
);
165 } else if (viking_flush
) {
167 viking_flush_page(start
);
172 __flush_page_to_ram(start
);
178 static u32
iommu_get_one(struct device
*dev
, struct page
*page
, int npages
)
180 struct iommu_struct
*iommu
= dev
->archdata
.iommu
;
182 iopte_t
*iopte
, *iopte0
;
183 unsigned int busa
, busa0
;
186 /* page color = pfn of page */
187 ioptex
= bit_map_string_get(&iommu
->usemap
, npages
, page_to_pfn(page
));
190 busa0
= iommu
->start
+ (ioptex
<< PAGE_SHIFT
);
191 iopte0
= &iommu
->page_table
[ioptex
];
195 for (i
= 0; i
< npages
; i
++) {
196 iopte_val(*iopte
) = MKIOPTE(page_to_pfn(page
), IOPERM
);
197 iommu_invalidate_page(iommu
->regs
, busa
);
203 iommu_flush_iotlb(iopte0
, npages
);
208 static dma_addr_t
__sbus_iommu_map_page(struct device
*dev
, struct page
*page
,
209 unsigned long offset
, size_t len
)
211 void *vaddr
= page_address(page
) + offset
;
212 unsigned long off
= (unsigned long)vaddr
& ~PAGE_MASK
;
213 unsigned long npages
= (off
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
215 /* XXX So what is maxphys for us and how do drivers know it? */
216 if (!len
|| len
> 256 * 1024)
217 return DMA_MAPPING_ERROR
;
218 return iommu_get_one(dev
, virt_to_page(vaddr
), npages
) + off
;
221 static dma_addr_t
sbus_iommu_map_page_gflush(struct device
*dev
,
222 struct page
*page
, unsigned long offset
, size_t len
,
223 enum dma_data_direction dir
, unsigned long attrs
)
225 flush_page_for_dma(0);
226 return __sbus_iommu_map_page(dev
, page
, offset
, len
);
229 static dma_addr_t
sbus_iommu_map_page_pflush(struct device
*dev
,
230 struct page
*page
, unsigned long offset
, size_t len
,
231 enum dma_data_direction dir
, unsigned long attrs
)
233 void *vaddr
= page_address(page
) + offset
;
234 unsigned long p
= ((unsigned long)vaddr
) & PAGE_MASK
;
236 while (p
< (unsigned long)vaddr
+ len
) {
237 flush_page_for_dma(p
);
241 return __sbus_iommu_map_page(dev
, page
, offset
, len
);
244 static int sbus_iommu_map_sg_gflush(struct device
*dev
, struct scatterlist
*sgl
,
245 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
247 struct scatterlist
*sg
;
250 flush_page_for_dma(0);
252 for_each_sg(sgl
, sg
, nents
, i
) {
253 n
= (sg
->length
+ sg
->offset
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
254 sg
->dma_address
= iommu_get_one(dev
, sg_page(sg
), n
) + sg
->offset
;
255 sg
->dma_length
= sg
->length
;
261 static int sbus_iommu_map_sg_pflush(struct device
*dev
, struct scatterlist
*sgl
,
262 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
264 unsigned long page
, oldpage
= 0;
265 struct scatterlist
*sg
;
268 for_each_sg(sgl
, sg
, nents
, j
) {
269 n
= (sg
->length
+ sg
->offset
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
272 * We expect unmapped highmem pages to be not in the cache.
273 * XXX Is this a good assumption?
274 * XXX What if someone else unmaps it here and races us?
276 if ((page
= (unsigned long) page_address(sg_page(sg
))) != 0) {
277 for (i
= 0; i
< n
; i
++) {
278 if (page
!= oldpage
) { /* Already flushed? */
279 flush_page_for_dma(page
);
286 sg
->dma_address
= iommu_get_one(dev
, sg_page(sg
), n
) + sg
->offset
;
287 sg
->dma_length
= sg
->length
;
293 static void iommu_release_one(struct device
*dev
, u32 busa
, int npages
)
295 struct iommu_struct
*iommu
= dev
->archdata
.iommu
;
299 BUG_ON(busa
< iommu
->start
);
300 ioptex
= (busa
- iommu
->start
) >> PAGE_SHIFT
;
301 for (i
= 0; i
< npages
; i
++) {
302 iopte_val(iommu
->page_table
[ioptex
+ i
]) = 0;
303 iommu_invalidate_page(iommu
->regs
, busa
);
306 bit_map_clear(&iommu
->usemap
, ioptex
, npages
);
309 static void sbus_iommu_unmap_page(struct device
*dev
, dma_addr_t dma_addr
,
310 size_t len
, enum dma_data_direction dir
, unsigned long attrs
)
312 unsigned long off
= dma_addr
& ~PAGE_MASK
;
315 npages
= (off
+ len
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
316 iommu_release_one(dev
, dma_addr
& PAGE_MASK
, npages
);
319 static void sbus_iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sgl
,
320 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
322 struct scatterlist
*sg
;
325 for_each_sg(sgl
, sg
, nents
, i
) {
326 n
= (sg
->length
+ sg
->offset
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
327 iommu_release_one(dev
, sg
->dma_address
& PAGE_MASK
, n
);
328 sg
->dma_address
= 0x21212121;
333 static void *sbus_iommu_alloc(struct device
*dev
, size_t len
,
334 dma_addr_t
*dma_handle
, gfp_t gfp
, unsigned long attrs
)
336 struct iommu_struct
*iommu
= dev
->archdata
.iommu
;
337 unsigned long va
, addr
, page
, end
, ret
;
338 iopte_t
*iopte
= iommu
->page_table
;
342 /* XXX So what is maxphys for us and how do drivers know it? */
343 if (!len
|| len
> 256 * 1024)
346 len
= PAGE_ALIGN(len
);
347 va
= __get_free_pages(gfp
| __GFP_ZERO
, get_order(len
));
351 addr
= ret
= sparc_dma_alloc_resource(dev
, len
);
355 BUG_ON((va
& ~PAGE_MASK
) != 0);
356 BUG_ON((addr
& ~PAGE_MASK
) != 0);
357 BUG_ON((len
& ~PAGE_MASK
) != 0);
359 /* page color = physical address */
360 ioptex
= bit_map_string_get(&iommu
->usemap
, len
>> PAGE_SHIFT
,
375 if (viking_mxcc_present
)
376 viking_mxcc_flush_page(page
);
377 else if (viking_flush
)
378 viking_flush_page(page
);
380 __flush_page_to_ram(page
);
382 pgdp
= pgd_offset(&init_mm
, addr
);
383 pmdp
= pmd_offset(pgdp
, addr
);
384 ptep
= pte_offset_map(pmdp
, addr
);
386 set_pte(ptep
, mk_pte(virt_to_page(page
), dvma_prot
));
388 iopte_val(*iopte
++) =
389 MKIOPTE(page_to_pfn(virt_to_page(page
)), ioperm_noc
);
393 /* P3: why do we need this?
395 * DAVEM: Because there are several aspects, none of which
396 * are handled by a single interface. Some cpus are
397 * completely not I/O DMA coherent, and some have
398 * virtually indexed caches. The driver DMA flushing
399 * methods handle the former case, but here during
400 * IOMMU page table modifications, and usage of non-cacheable
401 * cpu mappings of pages potentially in the cpu caches, we have
402 * to handle the latter case as well.
405 iommu_flush_iotlb(first
, len
>> PAGE_SHIFT
);
407 iommu_invalidate(iommu
->regs
);
409 *dma_handle
= iommu
->start
+ (ioptex
<< PAGE_SHIFT
);
413 free_pages(va
, get_order(len
));
417 static void sbus_iommu_free(struct device
*dev
, size_t len
, void *cpu_addr
,
418 dma_addr_t busa
, unsigned long attrs
)
420 struct iommu_struct
*iommu
= dev
->archdata
.iommu
;
421 iopte_t
*iopte
= iommu
->page_table
;
422 struct page
*page
= virt_to_page(cpu_addr
);
423 int ioptex
= (busa
- iommu
->start
) >> PAGE_SHIFT
;
426 if (!sparc_dma_free_resource(cpu_addr
, len
))
429 BUG_ON((busa
& ~PAGE_MASK
) != 0);
430 BUG_ON((len
& ~PAGE_MASK
) != 0);
435 iopte_val(*iopte
++) = 0;
439 iommu_invalidate(iommu
->regs
);
440 bit_map_clear(&iommu
->usemap
, ioptex
, len
>> PAGE_SHIFT
);
442 __free_pages(page
, get_order(len
));
446 static const struct dma_map_ops sbus_iommu_dma_gflush_ops
= {
448 .alloc
= sbus_iommu_alloc
,
449 .free
= sbus_iommu_free
,
451 .map_page
= sbus_iommu_map_page_gflush
,
452 .unmap_page
= sbus_iommu_unmap_page
,
453 .map_sg
= sbus_iommu_map_sg_gflush
,
454 .unmap_sg
= sbus_iommu_unmap_sg
,
457 static const struct dma_map_ops sbus_iommu_dma_pflush_ops
= {
459 .alloc
= sbus_iommu_alloc
,
460 .free
= sbus_iommu_free
,
462 .map_page
= sbus_iommu_map_page_pflush
,
463 .unmap_page
= sbus_iommu_unmap_page
,
464 .map_sg
= sbus_iommu_map_sg_pflush
,
465 .unmap_sg
= sbus_iommu_unmap_sg
,
468 void __init
ld_mmu_iommu(void)
470 if (flush_page_for_dma_global
) {
471 /* flush_page_for_dma flushes everything, no matter of what page is it */
472 dma_ops
= &sbus_iommu_dma_gflush_ops
;
474 dma_ops
= &sbus_iommu_dma_pflush_ops
;
477 if (viking_mxcc_present
|| srmmu_modtype
== HyperSparc
) {
478 dvma_prot
= __pgprot(SRMMU_CACHE
| SRMMU_ET_PTE
| SRMMU_PRIV
);
479 ioperm_noc
= IOPTE_CACHE
| IOPTE_WRITE
| IOPTE_VALID
;
481 dvma_prot
= __pgprot(SRMMU_ET_PTE
| SRMMU_PRIV
);
482 ioperm_noc
= IOPTE_WRITE
| IOPTE_VALID
;