2 * CPU-agnostic ARM page table allocator.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2014 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
23 #include <linux/atomic.h>
24 #include <linux/bitops.h>
25 #include <linux/io-pgtable.h>
26 #include <linux/iommu.h>
27 #include <linux/kernel.h>
28 #include <linux/sizes.h>
29 #include <linux/slab.h>
30 #include <linux/types.h>
31 #include <linux/dma-mapping.h>
33 #include <asm/barrier.h>
35 #define ARM_LPAE_MAX_ADDR_BITS 52
36 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
37 #define ARM_LPAE_MAX_LEVELS 4
39 /* Struct accessors */
40 #define io_pgtable_to_data(x) \
41 container_of((x), struct arm_lpae_io_pgtable, iop)
43 #define io_pgtable_ops_to_data(x) \
44 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
47 * For consistency with the architecture, we always consider
48 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
50 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
53 * Calculate the right shift amount to get to the portion describing level l
54 * in a virtual address mapped by the pagetable in d.
56 #define ARM_LPAE_LVL_SHIFT(l,d) \
57 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
58 * (d)->bits_per_level) + (d)->pg_shift)
60 #define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
62 #define ARM_LPAE_PAGES_PER_PGD(d) \
63 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
66 * Calculate the index at level l used to map virtual address a using the
69 #define ARM_LPAE_PGD_IDX(l,d) \
70 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
72 #define ARM_LPAE_LVL_IDX(a,l,d) \
73 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
74 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
76 /* Calculate the block/page mapping size at level l for pagetable in d. */
77 #define ARM_LPAE_BLOCK_SIZE(l,d) \
78 (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
79 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
82 #define ARM_LPAE_PTE_TYPE_SHIFT 0
83 #define ARM_LPAE_PTE_TYPE_MASK 0x3
85 #define ARM_LPAE_PTE_TYPE_BLOCK 1
86 #define ARM_LPAE_PTE_TYPE_TABLE 3
87 #define ARM_LPAE_PTE_TYPE_PAGE 3
89 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
91 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
92 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
93 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
94 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
95 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
96 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
97 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
98 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
100 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
101 /* Ignore the contiguous bit for block splitting */
102 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
103 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
104 ARM_LPAE_PTE_ATTR_HI_MASK)
105 /* Software bit for solving coherency races */
106 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
109 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
110 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
111 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
112 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
115 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
116 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
117 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
118 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
119 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
120 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
123 #define ARM_32_LPAE_TCR_EAE (1 << 31)
124 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
126 #define ARM_LPAE_TCR_EPD1 (1 << 23)
128 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
129 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
130 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
132 #define ARM_LPAE_TCR_SH0_SHIFT 12
133 #define ARM_LPAE_TCR_SH0_MASK 0x3
134 #define ARM_LPAE_TCR_SH_NS 0
135 #define ARM_LPAE_TCR_SH_OS 2
136 #define ARM_LPAE_TCR_SH_IS 3
138 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
139 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
140 #define ARM_LPAE_TCR_RGN_MASK 0x3
141 #define ARM_LPAE_TCR_RGN_NC 0
142 #define ARM_LPAE_TCR_RGN_WBWA 1
143 #define ARM_LPAE_TCR_RGN_WT 2
144 #define ARM_LPAE_TCR_RGN_WB 3
146 #define ARM_LPAE_TCR_SL0_SHIFT 6
147 #define ARM_LPAE_TCR_SL0_MASK 0x3
149 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
150 #define ARM_LPAE_TCR_SZ_MASK 0xf
152 #define ARM_LPAE_TCR_PS_SHIFT 16
153 #define ARM_LPAE_TCR_PS_MASK 0x7
155 #define ARM_LPAE_TCR_IPS_SHIFT 32
156 #define ARM_LPAE_TCR_IPS_MASK 0x7
158 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
159 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
160 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
161 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
162 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
163 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
164 #define ARM_LPAE_TCR_PS_52_BIT 0x6ULL
166 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
167 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
168 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
169 #define ARM_LPAE_MAIR_ATTR_NC 0x44
170 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
171 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
172 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
173 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
175 /* IOPTE accessors */
176 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
178 #define iopte_type(pte,l) \
179 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
181 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
183 #define iopte_leaf(pte,l) \
184 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
185 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
186 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
188 struct arm_lpae_io_pgtable
{
189 struct io_pgtable iop
;
193 unsigned long pg_shift
;
194 unsigned long bits_per_level
;
199 typedef u64 arm_lpae_iopte
;
201 static arm_lpae_iopte
paddr_to_iopte(phys_addr_t paddr
,
202 struct arm_lpae_io_pgtable
*data
)
204 arm_lpae_iopte pte
= paddr
;
206 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
207 return (pte
| (pte
>> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK
;
210 static phys_addr_t
iopte_to_paddr(arm_lpae_iopte pte
,
211 struct arm_lpae_io_pgtable
*data
)
213 u64 paddr
= pte
& ARM_LPAE_PTE_ADDR_MASK
;
215 if (data
->pg_shift
< 16)
218 /* Rotate the packed high-order bits back to the top */
219 return (paddr
| (paddr
<< (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK
<< 4);
222 static bool selftest_running
= false;
224 static dma_addr_t
__arm_lpae_dma_addr(void *pages
)
226 return (dma_addr_t
)virt_to_phys(pages
);
229 static void *__arm_lpae_alloc_pages(size_t size
, gfp_t gfp
,
230 struct io_pgtable_cfg
*cfg
)
232 struct device
*dev
= cfg
->iommu_dev
;
233 int order
= get_order(size
);
238 VM_BUG_ON((gfp
& __GFP_HIGHMEM
));
239 p
= alloc_pages_node(dev
? dev_to_node(dev
) : NUMA_NO_NODE
,
240 gfp
| __GFP_ZERO
, order
);
244 pages
= page_address(p
);
245 if (!(cfg
->quirks
& IO_PGTABLE_QUIRK_NO_DMA
)) {
246 dma
= dma_map_single(dev
, pages
, size
, DMA_TO_DEVICE
);
247 if (dma_mapping_error(dev
, dma
))
250 * We depend on the IOMMU being able to work with any physical
251 * address directly, so if the DMA layer suggests otherwise by
252 * translating or truncating them, that bodes very badly...
254 if (dma
!= virt_to_phys(pages
))
261 dev_err(dev
, "Cannot accommodate DMA translation for IOMMU page tables\n");
262 dma_unmap_single(dev
, dma
, size
, DMA_TO_DEVICE
);
264 __free_pages(p
, order
);
268 static void __arm_lpae_free_pages(void *pages
, size_t size
,
269 struct io_pgtable_cfg
*cfg
)
271 if (!(cfg
->quirks
& IO_PGTABLE_QUIRK_NO_DMA
))
272 dma_unmap_single(cfg
->iommu_dev
, __arm_lpae_dma_addr(pages
),
273 size
, DMA_TO_DEVICE
);
274 free_pages((unsigned long)pages
, get_order(size
));
277 static void __arm_lpae_sync_pte(arm_lpae_iopte
*ptep
,
278 struct io_pgtable_cfg
*cfg
)
280 dma_sync_single_for_device(cfg
->iommu_dev
, __arm_lpae_dma_addr(ptep
),
281 sizeof(*ptep
), DMA_TO_DEVICE
);
284 static void __arm_lpae_set_pte(arm_lpae_iopte
*ptep
, arm_lpae_iopte pte
,
285 struct io_pgtable_cfg
*cfg
)
289 if (!(cfg
->quirks
& IO_PGTABLE_QUIRK_NO_DMA
))
290 __arm_lpae_sync_pte(ptep
, cfg
);
293 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable
*data
,
294 unsigned long iova
, size_t size
, int lvl
,
295 arm_lpae_iopte
*ptep
);
297 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable
*data
,
298 phys_addr_t paddr
, arm_lpae_iopte prot
,
299 int lvl
, arm_lpae_iopte
*ptep
)
301 arm_lpae_iopte pte
= prot
;
303 if (data
->iop
.cfg
.quirks
& IO_PGTABLE_QUIRK_ARM_NS
)
304 pte
|= ARM_LPAE_PTE_NS
;
306 if (lvl
== ARM_LPAE_MAX_LEVELS
- 1)
307 pte
|= ARM_LPAE_PTE_TYPE_PAGE
;
309 pte
|= ARM_LPAE_PTE_TYPE_BLOCK
;
311 pte
|= ARM_LPAE_PTE_AF
| ARM_LPAE_PTE_SH_IS
;
312 pte
|= paddr_to_iopte(paddr
, data
);
314 __arm_lpae_set_pte(ptep
, pte
, &data
->iop
.cfg
);
317 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable
*data
,
318 unsigned long iova
, phys_addr_t paddr
,
319 arm_lpae_iopte prot
, int lvl
,
320 arm_lpae_iopte
*ptep
)
322 arm_lpae_iopte pte
= *ptep
;
324 if (iopte_leaf(pte
, lvl
)) {
325 /* We require an unmap first */
326 WARN_ON(!selftest_running
);
328 } else if (iopte_type(pte
, lvl
) == ARM_LPAE_PTE_TYPE_TABLE
) {
330 * We need to unmap and free the old table before
331 * overwriting it with a block entry.
333 arm_lpae_iopte
*tblp
;
334 size_t sz
= ARM_LPAE_BLOCK_SIZE(lvl
, data
);
336 tblp
= ptep
- ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
337 if (WARN_ON(__arm_lpae_unmap(data
, iova
, sz
, lvl
, tblp
) != sz
))
341 __arm_lpae_init_pte(data
, paddr
, prot
, lvl
, ptep
);
345 static arm_lpae_iopte
arm_lpae_install_table(arm_lpae_iopte
*table
,
346 arm_lpae_iopte
*ptep
,
348 struct io_pgtable_cfg
*cfg
)
350 arm_lpae_iopte old
, new;
352 new = __pa(table
) | ARM_LPAE_PTE_TYPE_TABLE
;
353 if (cfg
->quirks
& IO_PGTABLE_QUIRK_ARM_NS
)
354 new |= ARM_LPAE_PTE_NSTABLE
;
357 * Ensure the table itself is visible before its PTE can be.
358 * Whilst we could get away with cmpxchg64_release below, this
359 * doesn't have any ordering semantics when !CONFIG_SMP.
363 old
= cmpxchg64_relaxed(ptep
, curr
, new);
365 if ((cfg
->quirks
& IO_PGTABLE_QUIRK_NO_DMA
) ||
366 (old
& ARM_LPAE_PTE_SW_SYNC
))
369 /* Even if it's not ours, there's no point waiting; just kick it */
370 __arm_lpae_sync_pte(ptep
, cfg
);
372 WRITE_ONCE(*ptep
, new | ARM_LPAE_PTE_SW_SYNC
);
377 static int __arm_lpae_map(struct arm_lpae_io_pgtable
*data
, unsigned long iova
,
378 phys_addr_t paddr
, size_t size
, arm_lpae_iopte prot
,
379 int lvl
, arm_lpae_iopte
*ptep
)
381 arm_lpae_iopte
*cptep
, pte
;
382 size_t block_size
= ARM_LPAE_BLOCK_SIZE(lvl
, data
);
383 size_t tblsz
= ARM_LPAE_GRANULE(data
);
384 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
386 /* Find our entry at the current level */
387 ptep
+= ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
389 /* If we can install a leaf entry at this level, then do so */
390 if (size
== block_size
&& (size
& cfg
->pgsize_bitmap
))
391 return arm_lpae_init_pte(data
, iova
, paddr
, prot
, lvl
, ptep
);
393 /* We can't allocate tables at the final level */
394 if (WARN_ON(lvl
>= ARM_LPAE_MAX_LEVELS
- 1))
397 /* Grab a pointer to the next level */
398 pte
= READ_ONCE(*ptep
);
400 cptep
= __arm_lpae_alloc_pages(tblsz
, GFP_ATOMIC
, cfg
);
404 pte
= arm_lpae_install_table(cptep
, ptep
, 0, cfg
);
406 __arm_lpae_free_pages(cptep
, tblsz
, cfg
);
407 } else if (!(cfg
->quirks
& IO_PGTABLE_QUIRK_NO_DMA
) &&
408 !(pte
& ARM_LPAE_PTE_SW_SYNC
)) {
409 __arm_lpae_sync_pte(ptep
, cfg
);
412 if (pte
&& !iopte_leaf(pte
, lvl
)) {
413 cptep
= iopte_deref(pte
, data
);
415 /* We require an unmap first */
416 WARN_ON(!selftest_running
);
421 return __arm_lpae_map(data
, iova
, paddr
, size
, prot
, lvl
+ 1, cptep
);
424 static arm_lpae_iopte
arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable
*data
,
429 if (data
->iop
.fmt
== ARM_64_LPAE_S1
||
430 data
->iop
.fmt
== ARM_32_LPAE_S1
) {
431 pte
= ARM_LPAE_PTE_nG
;
433 if (!(prot
& IOMMU_WRITE
) && (prot
& IOMMU_READ
))
434 pte
|= ARM_LPAE_PTE_AP_RDONLY
;
436 if (!(prot
& IOMMU_PRIV
))
437 pte
|= ARM_LPAE_PTE_AP_UNPRIV
;
439 if (prot
& IOMMU_MMIO
)
440 pte
|= (ARM_LPAE_MAIR_ATTR_IDX_DEV
441 << ARM_LPAE_PTE_ATTRINDX_SHIFT
);
442 else if (prot
& IOMMU_CACHE
)
443 pte
|= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
444 << ARM_LPAE_PTE_ATTRINDX_SHIFT
);
446 pte
= ARM_LPAE_PTE_HAP_FAULT
;
447 if (prot
& IOMMU_READ
)
448 pte
|= ARM_LPAE_PTE_HAP_READ
;
449 if (prot
& IOMMU_WRITE
)
450 pte
|= ARM_LPAE_PTE_HAP_WRITE
;
451 if (prot
& IOMMU_MMIO
)
452 pte
|= ARM_LPAE_PTE_MEMATTR_DEV
;
453 else if (prot
& IOMMU_CACHE
)
454 pte
|= ARM_LPAE_PTE_MEMATTR_OIWB
;
456 pte
|= ARM_LPAE_PTE_MEMATTR_NC
;
459 if (prot
& IOMMU_NOEXEC
)
460 pte
|= ARM_LPAE_PTE_XN
;
465 static int arm_lpae_map(struct io_pgtable_ops
*ops
, unsigned long iova
,
466 phys_addr_t paddr
, size_t size
, int iommu_prot
)
468 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
469 arm_lpae_iopte
*ptep
= data
->pgd
;
470 int ret
, lvl
= ARM_LPAE_START_LVL(data
);
473 /* If no access, then nothing to do */
474 if (!(iommu_prot
& (IOMMU_READ
| IOMMU_WRITE
)))
477 if (WARN_ON(iova
>= (1ULL << data
->iop
.cfg
.ias
) ||
478 paddr
>= (1ULL << data
->iop
.cfg
.oas
)))
481 prot
= arm_lpae_prot_to_pte(data
, iommu_prot
);
482 ret
= __arm_lpae_map(data
, iova
, paddr
, size
, prot
, lvl
, ptep
);
484 * Synchronise all PTE updates for the new mapping before there's
485 * a chance for anything to kick off a table walk for the new iova.
492 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable
*data
, int lvl
,
493 arm_lpae_iopte
*ptep
)
495 arm_lpae_iopte
*start
, *end
;
496 unsigned long table_size
;
498 if (lvl
== ARM_LPAE_START_LVL(data
))
499 table_size
= data
->pgd_size
;
501 table_size
= ARM_LPAE_GRANULE(data
);
505 /* Only leaf entries at the last level */
506 if (lvl
== ARM_LPAE_MAX_LEVELS
- 1)
509 end
= (void *)ptep
+ table_size
;
511 while (ptep
!= end
) {
512 arm_lpae_iopte pte
= *ptep
++;
514 if (!pte
|| iopte_leaf(pte
, lvl
))
517 __arm_lpae_free_pgtable(data
, lvl
+ 1, iopte_deref(pte
, data
));
520 __arm_lpae_free_pages(start
, table_size
, &data
->iop
.cfg
);
523 static void arm_lpae_free_pgtable(struct io_pgtable
*iop
)
525 struct arm_lpae_io_pgtable
*data
= io_pgtable_to_data(iop
);
527 __arm_lpae_free_pgtable(data
, ARM_LPAE_START_LVL(data
), data
->pgd
);
531 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable
*data
,
532 unsigned long iova
, size_t size
,
533 arm_lpae_iopte blk_pte
, int lvl
,
534 arm_lpae_iopte
*ptep
)
536 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
537 arm_lpae_iopte pte
, *tablep
;
538 phys_addr_t blk_paddr
;
539 size_t tablesz
= ARM_LPAE_GRANULE(data
);
540 size_t split_sz
= ARM_LPAE_BLOCK_SIZE(lvl
, data
);
541 int i
, unmap_idx
= -1;
543 if (WARN_ON(lvl
== ARM_LPAE_MAX_LEVELS
))
546 tablep
= __arm_lpae_alloc_pages(tablesz
, GFP_ATOMIC
, cfg
);
548 return 0; /* Bytes unmapped */
550 if (size
== split_sz
)
551 unmap_idx
= ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
553 blk_paddr
= iopte_to_paddr(blk_pte
, data
);
554 pte
= iopte_prot(blk_pte
);
556 for (i
= 0; i
< tablesz
/ sizeof(pte
); i
++, blk_paddr
+= split_sz
) {
561 __arm_lpae_init_pte(data
, blk_paddr
, pte
, lvl
, &tablep
[i
]);
564 pte
= arm_lpae_install_table(tablep
, ptep
, blk_pte
, cfg
);
565 if (pte
!= blk_pte
) {
566 __arm_lpae_free_pages(tablep
, tablesz
, cfg
);
568 * We may race against someone unmapping another part of this
569 * block, but anything else is invalid. We can't misinterpret
570 * a page entry here since we're never at the last level.
572 if (iopte_type(pte
, lvl
- 1) != ARM_LPAE_PTE_TYPE_TABLE
)
575 tablep
= iopte_deref(pte
, data
);
576 } else if (unmap_idx
>= 0) {
577 io_pgtable_tlb_add_flush(&data
->iop
, iova
, size
, size
, true);
578 io_pgtable_tlb_sync(&data
->iop
);
582 return __arm_lpae_unmap(data
, iova
, size
, lvl
, tablep
);
585 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable
*data
,
586 unsigned long iova
, size_t size
, int lvl
,
587 arm_lpae_iopte
*ptep
)
590 struct io_pgtable
*iop
= &data
->iop
;
592 /* Something went horribly wrong and we ran out of page table */
593 if (WARN_ON(lvl
== ARM_LPAE_MAX_LEVELS
))
596 ptep
+= ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
597 pte
= READ_ONCE(*ptep
);
601 /* If the size matches this level, we're in the right place */
602 if (size
== ARM_LPAE_BLOCK_SIZE(lvl
, data
)) {
603 __arm_lpae_set_pte(ptep
, 0, &iop
->cfg
);
605 if (!iopte_leaf(pte
, lvl
)) {
606 /* Also flush any partial walks */
607 io_pgtable_tlb_add_flush(iop
, iova
, size
,
608 ARM_LPAE_GRANULE(data
), false);
609 io_pgtable_tlb_sync(iop
);
610 ptep
= iopte_deref(pte
, data
);
611 __arm_lpae_free_pgtable(data
, lvl
+ 1, ptep
);
612 } else if (iop
->cfg
.quirks
& IO_PGTABLE_QUIRK_NON_STRICT
) {
614 * Order the PTE update against queueing the IOVA, to
615 * guarantee that a flush callback from a different CPU
616 * has observed it before the TLBIALL can be issued.
620 io_pgtable_tlb_add_flush(iop
, iova
, size
, size
, true);
624 } else if (iopte_leaf(pte
, lvl
)) {
626 * Insert a table at the next level to map the old region,
627 * minus the part we want to unmap
629 return arm_lpae_split_blk_unmap(data
, iova
, size
, pte
,
633 /* Keep on walkin' */
634 ptep
= iopte_deref(pte
, data
);
635 return __arm_lpae_unmap(data
, iova
, size
, lvl
+ 1, ptep
);
638 static size_t arm_lpae_unmap(struct io_pgtable_ops
*ops
, unsigned long iova
,
641 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
642 arm_lpae_iopte
*ptep
= data
->pgd
;
643 int lvl
= ARM_LPAE_START_LVL(data
);
645 if (WARN_ON(iova
>= (1ULL << data
->iop
.cfg
.ias
)))
648 return __arm_lpae_unmap(data
, iova
, size
, lvl
, ptep
);
651 static phys_addr_t
arm_lpae_iova_to_phys(struct io_pgtable_ops
*ops
,
654 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
655 arm_lpae_iopte pte
, *ptep
= data
->pgd
;
656 int lvl
= ARM_LPAE_START_LVL(data
);
659 /* Valid IOPTE pointer? */
663 /* Grab the IOPTE we're interested in */
664 ptep
+= ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
665 pte
= READ_ONCE(*ptep
);
672 if (iopte_leaf(pte
,lvl
))
673 goto found_translation
;
675 /* Take it to the next level */
676 ptep
= iopte_deref(pte
, data
);
677 } while (++lvl
< ARM_LPAE_MAX_LEVELS
);
679 /* Ran out of page tables to walk */
683 iova
&= (ARM_LPAE_BLOCK_SIZE(lvl
, data
) - 1);
684 return iopte_to_paddr(pte
, data
) | iova
;
687 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg
*cfg
)
689 unsigned long granule
, page_sizes
;
690 unsigned int max_addr_bits
= 48;
693 * We need to restrict the supported page sizes to match the
694 * translation regime for a particular granule. Aim to match
695 * the CPU page size if possible, otherwise prefer smaller sizes.
696 * While we're at it, restrict the block sizes to match the
699 if (cfg
->pgsize_bitmap
& PAGE_SIZE
)
701 else if (cfg
->pgsize_bitmap
& ~PAGE_MASK
)
702 granule
= 1UL << __fls(cfg
->pgsize_bitmap
& ~PAGE_MASK
);
703 else if (cfg
->pgsize_bitmap
& PAGE_MASK
)
704 granule
= 1UL << __ffs(cfg
->pgsize_bitmap
& PAGE_MASK
);
710 page_sizes
= (SZ_4K
| SZ_2M
| SZ_1G
);
713 page_sizes
= (SZ_16K
| SZ_32M
);
717 page_sizes
= (SZ_64K
| SZ_512M
);
719 page_sizes
|= 1ULL << 42; /* 4TB */
725 cfg
->pgsize_bitmap
&= page_sizes
;
726 cfg
->ias
= min(cfg
->ias
, max_addr_bits
);
727 cfg
->oas
= min(cfg
->oas
, max_addr_bits
);
730 static struct arm_lpae_io_pgtable
*
731 arm_lpae_alloc_pgtable(struct io_pgtable_cfg
*cfg
)
733 unsigned long va_bits
, pgd_bits
;
734 struct arm_lpae_io_pgtable
*data
;
736 arm_lpae_restrict_pgsizes(cfg
);
738 if (!(cfg
->pgsize_bitmap
& (SZ_4K
| SZ_16K
| SZ_64K
)))
741 if (cfg
->ias
> ARM_LPAE_MAX_ADDR_BITS
)
744 if (cfg
->oas
> ARM_LPAE_MAX_ADDR_BITS
)
747 if (!selftest_running
&& cfg
->iommu_dev
->dma_pfn_offset
) {
748 dev_err(cfg
->iommu_dev
, "Cannot accommodate DMA offset for IOMMU page tables\n");
752 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
756 data
->pg_shift
= __ffs(cfg
->pgsize_bitmap
);
757 data
->bits_per_level
= data
->pg_shift
- ilog2(sizeof(arm_lpae_iopte
));
759 va_bits
= cfg
->ias
- data
->pg_shift
;
760 data
->levels
= DIV_ROUND_UP(va_bits
, data
->bits_per_level
);
762 /* Calculate the actual size of our pgd (without concatenation) */
763 pgd_bits
= va_bits
- (data
->bits_per_level
* (data
->levels
- 1));
764 data
->pgd_size
= 1UL << (pgd_bits
+ ilog2(sizeof(arm_lpae_iopte
)));
766 data
->iop
.ops
= (struct io_pgtable_ops
) {
768 .unmap
= arm_lpae_unmap
,
769 .iova_to_phys
= arm_lpae_iova_to_phys
,
775 static struct io_pgtable
*
776 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg
*cfg
, void *cookie
)
779 struct arm_lpae_io_pgtable
*data
;
781 if (cfg
->quirks
& ~(IO_PGTABLE_QUIRK_ARM_NS
| IO_PGTABLE_QUIRK_NO_DMA
|
782 IO_PGTABLE_QUIRK_NON_STRICT
))
785 data
= arm_lpae_alloc_pgtable(cfg
);
790 reg
= (ARM_LPAE_TCR_SH_IS
<< ARM_LPAE_TCR_SH0_SHIFT
) |
791 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_IRGN0_SHIFT
) |
792 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_ORGN0_SHIFT
);
794 switch (ARM_LPAE_GRANULE(data
)) {
796 reg
|= ARM_LPAE_TCR_TG0_4K
;
799 reg
|= ARM_LPAE_TCR_TG0_16K
;
802 reg
|= ARM_LPAE_TCR_TG0_64K
;
808 reg
|= (ARM_LPAE_TCR_PS_32_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
811 reg
|= (ARM_LPAE_TCR_PS_36_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
814 reg
|= (ARM_LPAE_TCR_PS_40_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
817 reg
|= (ARM_LPAE_TCR_PS_42_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
820 reg
|= (ARM_LPAE_TCR_PS_44_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
823 reg
|= (ARM_LPAE_TCR_PS_48_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
826 reg
|= (ARM_LPAE_TCR_PS_52_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
832 reg
|= (64ULL - cfg
->ias
) << ARM_LPAE_TCR_T0SZ_SHIFT
;
834 /* Disable speculative walks through TTBR1 */
835 reg
|= ARM_LPAE_TCR_EPD1
;
836 cfg
->arm_lpae_s1_cfg
.tcr
= reg
;
839 reg
= (ARM_LPAE_MAIR_ATTR_NC
840 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC
)) |
841 (ARM_LPAE_MAIR_ATTR_WBRWA
842 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE
)) |
843 (ARM_LPAE_MAIR_ATTR_DEVICE
844 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV
));
846 cfg
->arm_lpae_s1_cfg
.mair
[0] = reg
;
847 cfg
->arm_lpae_s1_cfg
.mair
[1] = 0;
849 /* Looking good; allocate a pgd */
850 data
->pgd
= __arm_lpae_alloc_pages(data
->pgd_size
, GFP_KERNEL
, cfg
);
854 /* Ensure the empty pgd is visible before any actual TTBR write */
858 cfg
->arm_lpae_s1_cfg
.ttbr
[0] = virt_to_phys(data
->pgd
);
859 cfg
->arm_lpae_s1_cfg
.ttbr
[1] = 0;
867 static struct io_pgtable
*
868 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg
*cfg
, void *cookie
)
871 struct arm_lpae_io_pgtable
*data
;
873 /* The NS quirk doesn't apply at stage 2 */
874 if (cfg
->quirks
& ~(IO_PGTABLE_QUIRK_NO_DMA
|
875 IO_PGTABLE_QUIRK_NON_STRICT
))
878 data
= arm_lpae_alloc_pgtable(cfg
);
883 * Concatenate PGDs at level 1 if possible in order to reduce
884 * the depth of the stage-2 walk.
886 if (data
->levels
== ARM_LPAE_MAX_LEVELS
) {
887 unsigned long pgd_pages
;
889 pgd_pages
= data
->pgd_size
>> ilog2(sizeof(arm_lpae_iopte
));
890 if (pgd_pages
<= ARM_LPAE_S2_MAX_CONCAT_PAGES
) {
891 data
->pgd_size
= pgd_pages
<< data
->pg_shift
;
897 reg
= ARM_64_LPAE_S2_TCR_RES1
|
898 (ARM_LPAE_TCR_SH_IS
<< ARM_LPAE_TCR_SH0_SHIFT
) |
899 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_IRGN0_SHIFT
) |
900 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_ORGN0_SHIFT
);
902 sl
= ARM_LPAE_START_LVL(data
);
904 switch (ARM_LPAE_GRANULE(data
)) {
906 reg
|= ARM_LPAE_TCR_TG0_4K
;
907 sl
++; /* SL0 format is different for 4K granule size */
910 reg
|= ARM_LPAE_TCR_TG0_16K
;
913 reg
|= ARM_LPAE_TCR_TG0_64K
;
919 reg
|= (ARM_LPAE_TCR_PS_32_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
922 reg
|= (ARM_LPAE_TCR_PS_36_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
925 reg
|= (ARM_LPAE_TCR_PS_40_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
928 reg
|= (ARM_LPAE_TCR_PS_42_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
931 reg
|= (ARM_LPAE_TCR_PS_44_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
934 reg
|= (ARM_LPAE_TCR_PS_48_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
937 reg
|= (ARM_LPAE_TCR_PS_52_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
943 reg
|= (64ULL - cfg
->ias
) << ARM_LPAE_TCR_T0SZ_SHIFT
;
944 reg
|= (~sl
& ARM_LPAE_TCR_SL0_MASK
) << ARM_LPAE_TCR_SL0_SHIFT
;
945 cfg
->arm_lpae_s2_cfg
.vtcr
= reg
;
947 /* Allocate pgd pages */
948 data
->pgd
= __arm_lpae_alloc_pages(data
->pgd_size
, GFP_KERNEL
, cfg
);
952 /* Ensure the empty pgd is visible before any actual TTBR write */
956 cfg
->arm_lpae_s2_cfg
.vttbr
= virt_to_phys(data
->pgd
);
964 static struct io_pgtable
*
965 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg
*cfg
, void *cookie
)
967 struct io_pgtable
*iop
;
969 if (cfg
->ias
> 32 || cfg
->oas
> 40)
972 cfg
->pgsize_bitmap
&= (SZ_4K
| SZ_2M
| SZ_1G
);
973 iop
= arm_64_lpae_alloc_pgtable_s1(cfg
, cookie
);
975 cfg
->arm_lpae_s1_cfg
.tcr
|= ARM_32_LPAE_TCR_EAE
;
976 cfg
->arm_lpae_s1_cfg
.tcr
&= 0xffffffff;
982 static struct io_pgtable
*
983 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg
*cfg
, void *cookie
)
985 struct io_pgtable
*iop
;
987 if (cfg
->ias
> 40 || cfg
->oas
> 40)
990 cfg
->pgsize_bitmap
&= (SZ_4K
| SZ_2M
| SZ_1G
);
991 iop
= arm_64_lpae_alloc_pgtable_s2(cfg
, cookie
);
993 cfg
->arm_lpae_s2_cfg
.vtcr
&= 0xffffffff;
998 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns
= {
999 .alloc
= arm_64_lpae_alloc_pgtable_s1
,
1000 .free
= arm_lpae_free_pgtable
,
1003 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns
= {
1004 .alloc
= arm_64_lpae_alloc_pgtable_s2
,
1005 .free
= arm_lpae_free_pgtable
,
1008 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns
= {
1009 .alloc
= arm_32_lpae_alloc_pgtable_s1
,
1010 .free
= arm_lpae_free_pgtable
,
1013 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns
= {
1014 .alloc
= arm_32_lpae_alloc_pgtable_s2
,
1015 .free
= arm_lpae_free_pgtable
,
1018 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1020 static struct io_pgtable_cfg
*cfg_cookie
;
1022 static void dummy_tlb_flush_all(void *cookie
)
1024 WARN_ON(cookie
!= cfg_cookie
);
1027 static void dummy_tlb_add_flush(unsigned long iova
, size_t size
,
1028 size_t granule
, bool leaf
, void *cookie
)
1030 WARN_ON(cookie
!= cfg_cookie
);
1031 WARN_ON(!(size
& cfg_cookie
->pgsize_bitmap
));
1034 static void dummy_tlb_sync(void *cookie
)
1036 WARN_ON(cookie
!= cfg_cookie
);
1039 static const struct iommu_gather_ops dummy_tlb_ops __initconst
= {
1040 .tlb_flush_all
= dummy_tlb_flush_all
,
1041 .tlb_add_flush
= dummy_tlb_add_flush
,
1042 .tlb_sync
= dummy_tlb_sync
,
1045 static void __init
arm_lpae_dump_ops(struct io_pgtable_ops
*ops
)
1047 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
1048 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
1050 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1051 cfg
->pgsize_bitmap
, cfg
->ias
);
1052 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
1053 data
->levels
, data
->pgd_size
, data
->pg_shift
,
1054 data
->bits_per_level
, data
->pgd
);
1057 #define __FAIL(ops, i) ({ \
1058 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1059 arm_lpae_dump_ops(ops); \
1060 selftest_running = false; \
1064 static int __init
arm_lpae_run_tests(struct io_pgtable_cfg
*cfg
)
1066 static const enum io_pgtable_fmt fmts
[] = {
1074 struct io_pgtable_ops
*ops
;
1076 selftest_running
= true;
1078 for (i
= 0; i
< ARRAY_SIZE(fmts
); ++i
) {
1080 ops
= alloc_io_pgtable_ops(fmts
[i
], cfg
, cfg
);
1082 pr_err("selftest: failed to allocate io pgtable ops\n");
1087 * Initial sanity checks.
1088 * Empty page tables shouldn't provide any translations.
1090 if (ops
->iova_to_phys(ops
, 42))
1091 return __FAIL(ops
, i
);
1093 if (ops
->iova_to_phys(ops
, SZ_1G
+ 42))
1094 return __FAIL(ops
, i
);
1096 if (ops
->iova_to_phys(ops
, SZ_2G
+ 42))
1097 return __FAIL(ops
, i
);
1100 * Distinct mappings of different granule sizes.
1103 for_each_set_bit(j
, &cfg
->pgsize_bitmap
, BITS_PER_LONG
) {
1106 if (ops
->map(ops
, iova
, iova
, size
, IOMMU_READ
|
1110 return __FAIL(ops
, i
);
1112 /* Overlapping mappings */
1113 if (!ops
->map(ops
, iova
, iova
+ size
, size
,
1114 IOMMU_READ
| IOMMU_NOEXEC
))
1115 return __FAIL(ops
, i
);
1117 if (ops
->iova_to_phys(ops
, iova
+ 42) != (iova
+ 42))
1118 return __FAIL(ops
, i
);
1124 size
= 1UL << __ffs(cfg
->pgsize_bitmap
);
1125 if (ops
->unmap(ops
, SZ_1G
+ size
, size
) != size
)
1126 return __FAIL(ops
, i
);
1128 /* Remap of partial unmap */
1129 if (ops
->map(ops
, SZ_1G
+ size
, size
, size
, IOMMU_READ
))
1130 return __FAIL(ops
, i
);
1132 if (ops
->iova_to_phys(ops
, SZ_1G
+ size
+ 42) != (size
+ 42))
1133 return __FAIL(ops
, i
);
1137 for_each_set_bit(j
, &cfg
->pgsize_bitmap
, BITS_PER_LONG
) {
1140 if (ops
->unmap(ops
, iova
, size
) != size
)
1141 return __FAIL(ops
, i
);
1143 if (ops
->iova_to_phys(ops
, iova
+ 42))
1144 return __FAIL(ops
, i
);
1146 /* Remap full block */
1147 if (ops
->map(ops
, iova
, iova
, size
, IOMMU_WRITE
))
1148 return __FAIL(ops
, i
);
1150 if (ops
->iova_to_phys(ops
, iova
+ 42) != (iova
+ 42))
1151 return __FAIL(ops
, i
);
1156 free_io_pgtable_ops(ops
);
1159 selftest_running
= false;
1163 static int __init
arm_lpae_do_selftests(void)
1165 static const unsigned long pgsize
[] = {
1166 SZ_4K
| SZ_2M
| SZ_1G
,
1171 static const unsigned int ias
[] = {
1172 32, 36, 40, 42, 44, 48,
1175 int i
, j
, pass
= 0, fail
= 0;
1176 struct io_pgtable_cfg cfg
= {
1177 .tlb
= &dummy_tlb_ops
,
1179 .quirks
= IO_PGTABLE_QUIRK_NO_DMA
,
1182 for (i
= 0; i
< ARRAY_SIZE(pgsize
); ++i
) {
1183 for (j
= 0; j
< ARRAY_SIZE(ias
); ++j
) {
1184 cfg
.pgsize_bitmap
= pgsize
[i
];
1186 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1188 if (arm_lpae_run_tests(&cfg
))
1195 pr_info("selftest: completed with %d PASS %d FAIL\n", pass
, fail
);
1196 return fail
? -EFAULT
: 0;
1198 subsys_initcall(arm_lpae_do_selftests
);