1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 // Copyright (C) 2016-2018, Allwinner Technology CO., LTD.
3 // Copyright (C) 2019-2020, Cerno
5 #include <linux/bitfield.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/errno.h>
13 #include <linux/interrupt.h>
14 #include <linux/iommu.h>
15 #include <linux/iopoll.h>
16 #include <linux/ioport.h>
17 #include <linux/log2.h>
18 #include <linux/module.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/reset.h>
24 #include <linux/sizes.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include <linux/types.h>
29 #include "iommu-pages.h"
31 #define IOMMU_RESET_REG 0x010
32 #define IOMMU_RESET_RELEASE_ALL 0xffffffff
33 #define IOMMU_ENABLE_REG 0x020
34 #define IOMMU_ENABLE_ENABLE BIT(0)
36 #define IOMMU_BYPASS_REG 0x030
37 #define IOMMU_AUTO_GATING_REG 0x040
38 #define IOMMU_AUTO_GATING_ENABLE BIT(0)
40 #define IOMMU_WBUF_CTRL_REG 0x044
41 #define IOMMU_OOO_CTRL_REG 0x048
42 #define IOMMU_4KB_BDY_PRT_CTRL_REG 0x04c
43 #define IOMMU_TTB_REG 0x050
44 #define IOMMU_TLB_ENABLE_REG 0x060
45 #define IOMMU_TLB_PREFETCH_REG 0x070
46 #define IOMMU_TLB_PREFETCH_MASTER_ENABLE(m) BIT(m)
48 #define IOMMU_TLB_FLUSH_REG 0x080
49 #define IOMMU_TLB_FLUSH_PTW_CACHE BIT(17)
50 #define IOMMU_TLB_FLUSH_MACRO_TLB BIT(16)
51 #define IOMMU_TLB_FLUSH_MICRO_TLB(i) (BIT(i) & GENMASK(5, 0))
53 #define IOMMU_TLB_IVLD_ADDR_REG 0x090
54 #define IOMMU_TLB_IVLD_ADDR_MASK_REG 0x094
55 #define IOMMU_TLB_IVLD_ENABLE_REG 0x098
56 #define IOMMU_TLB_IVLD_ENABLE_ENABLE BIT(0)
58 #define IOMMU_PC_IVLD_ADDR_REG 0x0a0
59 #define IOMMU_PC_IVLD_ENABLE_REG 0x0a8
60 #define IOMMU_PC_IVLD_ENABLE_ENABLE BIT(0)
62 #define IOMMU_DM_AUT_CTRL_REG(d) (0x0b0 + ((d) / 2) * 4)
63 #define IOMMU_DM_AUT_CTRL_RD_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2)))
64 #define IOMMU_DM_AUT_CTRL_WR_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2) + 1))
66 #define IOMMU_DM_AUT_OVWT_REG 0x0d0
67 #define IOMMU_INT_ENABLE_REG 0x100
68 #define IOMMU_INT_CLR_REG 0x104
69 #define IOMMU_INT_STA_REG 0x108
70 #define IOMMU_INT_ERR_ADDR_REG(i) (0x110 + (i) * 4)
71 #define IOMMU_INT_ERR_ADDR_L1_REG 0x130
72 #define IOMMU_INT_ERR_ADDR_L2_REG 0x134
73 #define IOMMU_INT_ERR_DATA_REG(i) (0x150 + (i) * 4)
74 #define IOMMU_L1PG_INT_REG 0x0180
75 #define IOMMU_L2PG_INT_REG 0x0184
77 #define IOMMU_INT_INVALID_L2PG BIT(17)
78 #define IOMMU_INT_INVALID_L1PG BIT(16)
79 #define IOMMU_INT_MASTER_PERMISSION(m) BIT(m)
80 #define IOMMU_INT_MASTER_MASK (IOMMU_INT_MASTER_PERMISSION(0) | \
81 IOMMU_INT_MASTER_PERMISSION(1) | \
82 IOMMU_INT_MASTER_PERMISSION(2) | \
83 IOMMU_INT_MASTER_PERMISSION(3) | \
84 IOMMU_INT_MASTER_PERMISSION(4) | \
85 IOMMU_INT_MASTER_PERMISSION(5))
86 #define IOMMU_INT_MASK (IOMMU_INT_INVALID_L1PG | \
87 IOMMU_INT_INVALID_L2PG | \
88 IOMMU_INT_MASTER_MASK)
90 #define PT_ENTRY_SIZE sizeof(u32)
92 #define NUM_DT_ENTRIES 4096
93 #define DT_SIZE (NUM_DT_ENTRIES * PT_ENTRY_SIZE)
95 #define NUM_PT_ENTRIES 256
96 #define PT_SIZE (NUM_PT_ENTRIES * PT_ENTRY_SIZE)
98 #define SPAGE_SIZE 4096
100 struct sun50i_iommu
{
101 struct iommu_device iommu
;
103 /* Lock to modify the IOMMU registers */
104 spinlock_t iommu_lock
;
108 struct reset_control
*reset
;
111 struct iommu_domain
*domain
;
112 struct kmem_cache
*pt_pool
;
115 struct sun50i_iommu_domain
{
116 struct iommu_domain domain
;
118 /* Number of devices attached to the domain */
125 struct sun50i_iommu
*iommu
;
128 static struct sun50i_iommu_domain
*to_sun50i_domain(struct iommu_domain
*domain
)
130 return container_of(domain
, struct sun50i_iommu_domain
, domain
);
133 static struct sun50i_iommu
*sun50i_iommu_from_dev(struct device
*dev
)
135 return dev_iommu_priv_get(dev
);
138 static u32
iommu_read(struct sun50i_iommu
*iommu
, u32 offset
)
140 return readl(iommu
->base
+ offset
);
143 static void iommu_write(struct sun50i_iommu
*iommu
, u32 offset
, u32 value
)
145 writel(value
, iommu
->base
+ offset
);
149 * The Allwinner H6 IOMMU uses a 2-level page table.
151 * The first level is the usual Directory Table (DT), that consists of
152 * 4096 4-bytes Directory Table Entries (DTE), each pointing to a Page
155 * Each PT consits of 256 4-bytes Page Table Entries (PTE), each
156 * pointing to a 4kB page of physical memory.
158 * The IOMMU supports a single DT, pointed by the IOMMU_TTB_REG
159 * register that contains its physical address.
162 #define SUN50I_IOVA_DTE_MASK GENMASK(31, 20)
163 #define SUN50I_IOVA_PTE_MASK GENMASK(19, 12)
164 #define SUN50I_IOVA_PAGE_MASK GENMASK(11, 0)
166 static u32
sun50i_iova_get_dte_index(dma_addr_t iova
)
168 return FIELD_GET(SUN50I_IOVA_DTE_MASK
, iova
);
171 static u32
sun50i_iova_get_pte_index(dma_addr_t iova
)
173 return FIELD_GET(SUN50I_IOVA_PTE_MASK
, iova
);
176 static u32
sun50i_iova_get_page_offset(dma_addr_t iova
)
178 return FIELD_GET(SUN50I_IOVA_PAGE_MASK
, iova
);
182 * Each Directory Table Entry has a Page Table address and a valid
185 * +---------------------+-----------+-+
186 * | PT address | Reserved |V|
187 * +---------------------+-----------+-+
188 * 31:10 - Page Table address
190 * 1:0 - 1 if the entry is valid
193 #define SUN50I_DTE_PT_ADDRESS_MASK GENMASK(31, 10)
194 #define SUN50I_DTE_PT_ATTRS GENMASK(1, 0)
195 #define SUN50I_DTE_PT_VALID 1
197 static phys_addr_t
sun50i_dte_get_pt_address(u32 dte
)
199 return (phys_addr_t
)dte
& SUN50I_DTE_PT_ADDRESS_MASK
;
202 static bool sun50i_dte_is_pt_valid(u32 dte
)
204 return (dte
& SUN50I_DTE_PT_ATTRS
) == SUN50I_DTE_PT_VALID
;
207 static u32
sun50i_mk_dte(dma_addr_t pt_dma
)
209 return (pt_dma
& SUN50I_DTE_PT_ADDRESS_MASK
) | SUN50I_DTE_PT_VALID
;
213 * Each PTE has a Page address, an authority index and a valid bit:
215 * +----------------+-----+-----+-----+---+-----+
216 * | Page address | Rsv | ACI | Rsv | V | Rsv |
217 * +----------------+-----+-----+-----+---+-----+
218 * 31:12 - Page address
220 * 7:4 - Authority Control Index
222 * 1 - 1 if the entry is valid
225 * The way permissions work is that the IOMMU has 16 "domains" that
226 * can be configured to give each masters either read or write
227 * permissions through the IOMMU_DM_AUT_CTRL_REG registers. The domain
228 * 0 seems like the default domain, and its permissions in the
229 * IOMMU_DM_AUT_CTRL_REG are only read-only, so it's not really
230 * useful to enforce any particular permission.
232 * Each page entry will then have a reference to the domain they are
233 * affected to, so that we can actually enforce them on a per-page
236 * In order to make it work with the IOMMU framework, we will be using
237 * 4 different domains, starting at 1: RD_WR, RD, WR and NONE
238 * depending on the permission we want to enforce. Each domain will
239 * have each master setup in the same way, since the IOMMU framework
240 * doesn't seem to restrict page access on a per-device basis. And
241 * then we will use the relevant domain index when generating the page
242 * table entry depending on the permissions we want to be enforced.
245 enum sun50i_iommu_aci
{
246 SUN50I_IOMMU_ACI_DO_NOT_USE
= 0,
247 SUN50I_IOMMU_ACI_NONE
,
250 SUN50I_IOMMU_ACI_RD_WR
,
253 #define SUN50I_PTE_PAGE_ADDRESS_MASK GENMASK(31, 12)
254 #define SUN50I_PTE_ACI_MASK GENMASK(7, 4)
255 #define SUN50I_PTE_PAGE_VALID BIT(1)
257 static phys_addr_t
sun50i_pte_get_page_address(u32 pte
)
259 return (phys_addr_t
)pte
& SUN50I_PTE_PAGE_ADDRESS_MASK
;
262 static enum sun50i_iommu_aci
sun50i_get_pte_aci(u32 pte
)
264 return FIELD_GET(SUN50I_PTE_ACI_MASK
, pte
);
267 static bool sun50i_pte_is_page_valid(u32 pte
)
269 return pte
& SUN50I_PTE_PAGE_VALID
;
272 static u32
sun50i_mk_pte(phys_addr_t page
, int prot
)
274 enum sun50i_iommu_aci aci
;
277 if ((prot
& (IOMMU_READ
| IOMMU_WRITE
)) == (IOMMU_READ
| IOMMU_WRITE
))
278 aci
= SUN50I_IOMMU_ACI_RD_WR
;
279 else if (prot
& IOMMU_READ
)
280 aci
= SUN50I_IOMMU_ACI_RD
;
281 else if (prot
& IOMMU_WRITE
)
282 aci
= SUN50I_IOMMU_ACI_WR
;
284 aci
= SUN50I_IOMMU_ACI_NONE
;
286 flags
|= FIELD_PREP(SUN50I_PTE_ACI_MASK
, aci
);
287 page
&= SUN50I_PTE_PAGE_ADDRESS_MASK
;
288 return page
| flags
| SUN50I_PTE_PAGE_VALID
;
291 static void sun50i_table_flush(struct sun50i_iommu_domain
*sun50i_domain
,
292 void *vaddr
, unsigned int count
)
294 struct sun50i_iommu
*iommu
= sun50i_domain
->iommu
;
295 dma_addr_t dma
= virt_to_phys(vaddr
);
296 size_t size
= count
* PT_ENTRY_SIZE
;
298 dma_sync_single_for_device(iommu
->dev
, dma
, size
, DMA_TO_DEVICE
);
301 static void sun50i_iommu_zap_iova(struct sun50i_iommu
*iommu
,
307 iommu_write(iommu
, IOMMU_TLB_IVLD_ADDR_REG
, iova
);
308 iommu_write(iommu
, IOMMU_TLB_IVLD_ADDR_MASK_REG
, GENMASK(31, 12));
309 iommu_write(iommu
, IOMMU_TLB_IVLD_ENABLE_REG
,
310 IOMMU_TLB_IVLD_ENABLE_ENABLE
);
312 ret
= readl_poll_timeout_atomic(iommu
->base
+ IOMMU_TLB_IVLD_ENABLE_REG
,
315 dev_warn(iommu
->dev
, "TLB invalidation timed out!\n");
318 static void sun50i_iommu_zap_ptw_cache(struct sun50i_iommu
*iommu
,
324 iommu_write(iommu
, IOMMU_PC_IVLD_ADDR_REG
, iova
);
325 iommu_write(iommu
, IOMMU_PC_IVLD_ENABLE_REG
,
326 IOMMU_PC_IVLD_ENABLE_ENABLE
);
328 ret
= readl_poll_timeout_atomic(iommu
->base
+ IOMMU_PC_IVLD_ENABLE_REG
,
331 dev_warn(iommu
->dev
, "PTW cache invalidation timed out!\n");
334 static void sun50i_iommu_zap_range(struct sun50i_iommu
*iommu
,
335 unsigned long iova
, size_t size
)
337 assert_spin_locked(&iommu
->iommu_lock
);
339 iommu_write(iommu
, IOMMU_AUTO_GATING_REG
, 0);
341 sun50i_iommu_zap_iova(iommu
, iova
);
342 sun50i_iommu_zap_iova(iommu
, iova
+ SPAGE_SIZE
);
343 if (size
> SPAGE_SIZE
) {
344 sun50i_iommu_zap_iova(iommu
, iova
+ size
);
345 sun50i_iommu_zap_iova(iommu
, iova
+ size
+ SPAGE_SIZE
);
347 sun50i_iommu_zap_ptw_cache(iommu
, iova
);
348 sun50i_iommu_zap_ptw_cache(iommu
, iova
+ SZ_1M
);
350 sun50i_iommu_zap_ptw_cache(iommu
, iova
+ size
);
351 sun50i_iommu_zap_ptw_cache(iommu
, iova
+ size
+ SZ_1M
);
354 iommu_write(iommu
, IOMMU_AUTO_GATING_REG
, IOMMU_AUTO_GATING_ENABLE
);
357 static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu
*iommu
)
362 assert_spin_locked(&iommu
->iommu_lock
);
366 IOMMU_TLB_FLUSH_PTW_CACHE
|
367 IOMMU_TLB_FLUSH_MACRO_TLB
|
368 IOMMU_TLB_FLUSH_MICRO_TLB(5) |
369 IOMMU_TLB_FLUSH_MICRO_TLB(4) |
370 IOMMU_TLB_FLUSH_MICRO_TLB(3) |
371 IOMMU_TLB_FLUSH_MICRO_TLB(2) |
372 IOMMU_TLB_FLUSH_MICRO_TLB(1) |
373 IOMMU_TLB_FLUSH_MICRO_TLB(0));
375 ret
= readl_poll_timeout_atomic(iommu
->base
+ IOMMU_TLB_FLUSH_REG
,
379 dev_warn(iommu
->dev
, "TLB Flush timed out!\n");
384 static void sun50i_iommu_flush_iotlb_all(struct iommu_domain
*domain
)
386 struct sun50i_iommu_domain
*sun50i_domain
= to_sun50i_domain(domain
);
387 struct sun50i_iommu
*iommu
= sun50i_domain
->iommu
;
391 * At boot, we'll have a first call into .flush_iotlb_all right after
392 * .probe_device, and since we link our (single) domain to our iommu in
393 * the .attach_device callback, we don't have that pointer set.
395 * It shouldn't really be any trouble to ignore it though since we flush
396 * all caches as part of the device powerup.
401 spin_lock_irqsave(&iommu
->iommu_lock
, flags
);
402 sun50i_iommu_flush_all_tlb(iommu
);
403 spin_unlock_irqrestore(&iommu
->iommu_lock
, flags
);
406 static int sun50i_iommu_iotlb_sync_map(struct iommu_domain
*domain
,
407 unsigned long iova
, size_t size
)
409 struct sun50i_iommu_domain
*sun50i_domain
= to_sun50i_domain(domain
);
410 struct sun50i_iommu
*iommu
= sun50i_domain
->iommu
;
413 spin_lock_irqsave(&iommu
->iommu_lock
, flags
);
414 sun50i_iommu_zap_range(iommu
, iova
, size
);
415 spin_unlock_irqrestore(&iommu
->iommu_lock
, flags
);
420 static void sun50i_iommu_iotlb_sync(struct iommu_domain
*domain
,
421 struct iommu_iotlb_gather
*gather
)
423 sun50i_iommu_flush_iotlb_all(domain
);
426 static int sun50i_iommu_enable(struct sun50i_iommu
*iommu
)
428 struct sun50i_iommu_domain
*sun50i_domain
;
435 sun50i_domain
= to_sun50i_domain(iommu
->domain
);
437 ret
= reset_control_deassert(iommu
->reset
);
441 ret
= clk_prepare_enable(iommu
->clk
);
443 goto err_reset_assert
;
445 spin_lock_irqsave(&iommu
->iommu_lock
, flags
);
447 iommu_write(iommu
, IOMMU_TTB_REG
, sun50i_domain
->dt_dma
);
448 iommu_write(iommu
, IOMMU_TLB_PREFETCH_REG
,
449 IOMMU_TLB_PREFETCH_MASTER_ENABLE(0) |
450 IOMMU_TLB_PREFETCH_MASTER_ENABLE(1) |
451 IOMMU_TLB_PREFETCH_MASTER_ENABLE(2) |
452 IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
453 IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
454 IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
455 iommu_write(iommu
, IOMMU_BYPASS_REG
, 0);
456 iommu_write(iommu
, IOMMU_INT_ENABLE_REG
, IOMMU_INT_MASK
);
457 iommu_write(iommu
, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE
),
458 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 0) |
459 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 0) |
460 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 1) |
461 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 1) |
462 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 2) |
463 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 2) |
464 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 3) |
465 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 3) |
466 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 4) |
467 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 4) |
468 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 5) |
469 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 5));
471 iommu_write(iommu
, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD
),
472 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD
, 0) |
473 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD
, 1) |
474 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD
, 2) |
475 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD
, 3) |
476 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD
, 4) |
477 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD
, 5));
479 iommu_write(iommu
, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR
),
480 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR
, 0) |
481 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR
, 1) |
482 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR
, 2) |
483 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR
, 3) |
484 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR
, 4) |
485 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR
, 5));
487 ret
= sun50i_iommu_flush_all_tlb(iommu
);
489 spin_unlock_irqrestore(&iommu
->iommu_lock
, flags
);
490 goto err_clk_disable
;
493 iommu_write(iommu
, IOMMU_AUTO_GATING_REG
, IOMMU_AUTO_GATING_ENABLE
);
494 iommu_write(iommu
, IOMMU_ENABLE_REG
, IOMMU_ENABLE_ENABLE
);
496 spin_unlock_irqrestore(&iommu
->iommu_lock
, flags
);
501 clk_disable_unprepare(iommu
->clk
);
504 reset_control_assert(iommu
->reset
);
509 static void sun50i_iommu_disable(struct sun50i_iommu
*iommu
)
513 spin_lock_irqsave(&iommu
->iommu_lock
, flags
);
515 iommu_write(iommu
, IOMMU_ENABLE_REG
, 0);
516 iommu_write(iommu
, IOMMU_TTB_REG
, 0);
518 spin_unlock_irqrestore(&iommu
->iommu_lock
, flags
);
520 clk_disable_unprepare(iommu
->clk
);
521 reset_control_assert(iommu
->reset
);
524 static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu
*iommu
,
530 page_table
= kmem_cache_zalloc(iommu
->pt_pool
, gfp
);
532 return ERR_PTR(-ENOMEM
);
534 pt_dma
= dma_map_single(iommu
->dev
, page_table
, PT_SIZE
, DMA_TO_DEVICE
);
535 if (dma_mapping_error(iommu
->dev
, pt_dma
)) {
536 dev_err(iommu
->dev
, "Couldn't map L2 Page Table\n");
537 kmem_cache_free(iommu
->pt_pool
, page_table
);
538 return ERR_PTR(-ENOMEM
);
541 /* We rely on the physical address and DMA address being the same */
542 WARN_ON(pt_dma
!= virt_to_phys(page_table
));
547 static void sun50i_iommu_free_page_table(struct sun50i_iommu
*iommu
,
550 phys_addr_t pt_phys
= virt_to_phys(page_table
);
552 dma_unmap_single(iommu
->dev
, pt_phys
, PT_SIZE
, DMA_TO_DEVICE
);
553 kmem_cache_free(iommu
->pt_pool
, page_table
);
556 static u32
*sun50i_dte_get_page_table(struct sun50i_iommu_domain
*sun50i_domain
,
557 dma_addr_t iova
, gfp_t gfp
)
559 struct sun50i_iommu
*iommu
= sun50i_domain
->iommu
;
565 dte_addr
= &sun50i_domain
->dt
[sun50i_iova_get_dte_index(iova
)];
567 if (sun50i_dte_is_pt_valid(dte
)) {
568 phys_addr_t pt_phys
= sun50i_dte_get_pt_address(dte
);
569 return (u32
*)phys_to_virt(pt_phys
);
572 page_table
= sun50i_iommu_alloc_page_table(iommu
, gfp
);
573 if (IS_ERR(page_table
))
576 dte
= sun50i_mk_dte(virt_to_phys(page_table
));
577 old_dte
= cmpxchg(dte_addr
, 0, dte
);
579 phys_addr_t installed_pt_phys
=
580 sun50i_dte_get_pt_address(old_dte
);
581 u32
*installed_pt
= phys_to_virt(installed_pt_phys
);
582 u32
*drop_pt
= page_table
;
584 page_table
= installed_pt
;
586 sun50i_iommu_free_page_table(iommu
, drop_pt
);
589 sun50i_table_flush(sun50i_domain
, page_table
, NUM_PT_ENTRIES
);
590 sun50i_table_flush(sun50i_domain
, dte_addr
, 1);
595 static int sun50i_iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
596 phys_addr_t paddr
, size_t size
, size_t count
,
597 int prot
, gfp_t gfp
, size_t *mapped
)
599 struct sun50i_iommu_domain
*sun50i_domain
= to_sun50i_domain(domain
);
600 struct sun50i_iommu
*iommu
= sun50i_domain
->iommu
;
602 u32
*page_table
, *pte_addr
;
605 /* the IOMMU can only handle 32-bit addresses, both input and output */
606 if ((uint64_t)paddr
>> 32) {
608 dev_warn_once(iommu
->dev
,
609 "attempt to map address beyond 4GB\n");
613 page_table
= sun50i_dte_get_page_table(sun50i_domain
, iova
, gfp
);
614 if (IS_ERR(page_table
)) {
615 ret
= PTR_ERR(page_table
);
619 pte_index
= sun50i_iova_get_pte_index(iova
);
620 pte_addr
= &page_table
[pte_index
];
621 if (unlikely(sun50i_pte_is_page_valid(*pte_addr
))) {
622 phys_addr_t page_phys
= sun50i_pte_get_page_address(*pte_addr
);
624 "iova %pad already mapped to %pa cannot remap to %pa prot: %#x\n",
625 &iova
, &page_phys
, &paddr
, prot
);
630 *pte_addr
= sun50i_mk_pte(paddr
, prot
);
631 sun50i_table_flush(sun50i_domain
, pte_addr
, 1);
638 static size_t sun50i_iommu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
639 size_t size
, size_t count
, struct iommu_iotlb_gather
*gather
)
641 struct sun50i_iommu_domain
*sun50i_domain
= to_sun50i_domain(domain
);
646 dte
= sun50i_domain
->dt
[sun50i_iova_get_dte_index(iova
)];
647 if (!sun50i_dte_is_pt_valid(dte
))
650 pt_phys
= sun50i_dte_get_pt_address(dte
);
651 pte_addr
= (u32
*)phys_to_virt(pt_phys
) + sun50i_iova_get_pte_index(iova
);
653 if (!sun50i_pte_is_page_valid(*pte_addr
))
656 memset(pte_addr
, 0, sizeof(*pte_addr
));
657 sun50i_table_flush(sun50i_domain
, pte_addr
, 1);
662 static phys_addr_t
sun50i_iommu_iova_to_phys(struct iommu_domain
*domain
,
665 struct sun50i_iommu_domain
*sun50i_domain
= to_sun50i_domain(domain
);
670 dte
= sun50i_domain
->dt
[sun50i_iova_get_dte_index(iova
)];
671 if (!sun50i_dte_is_pt_valid(dte
))
674 pt_phys
= sun50i_dte_get_pt_address(dte
);
675 page_table
= (u32
*)phys_to_virt(pt_phys
);
676 pte
= page_table
[sun50i_iova_get_pte_index(iova
)];
677 if (!sun50i_pte_is_page_valid(pte
))
680 return sun50i_pte_get_page_address(pte
) +
681 sun50i_iova_get_page_offset(iova
);
684 static struct iommu_domain
*
685 sun50i_iommu_domain_alloc_paging(struct device
*dev
)
687 struct sun50i_iommu_domain
*sun50i_domain
;
689 sun50i_domain
= kzalloc(sizeof(*sun50i_domain
), GFP_KERNEL
);
693 sun50i_domain
->dt
= iommu_alloc_pages(GFP_KERNEL
| GFP_DMA32
,
695 if (!sun50i_domain
->dt
)
696 goto err_free_domain
;
698 refcount_set(&sun50i_domain
->refcnt
, 1);
700 sun50i_domain
->domain
.geometry
.aperture_start
= 0;
701 sun50i_domain
->domain
.geometry
.aperture_end
= DMA_BIT_MASK(32);
702 sun50i_domain
->domain
.geometry
.force_aperture
= true;
704 return &sun50i_domain
->domain
;
707 kfree(sun50i_domain
);
712 static void sun50i_iommu_domain_free(struct iommu_domain
*domain
)
714 struct sun50i_iommu_domain
*sun50i_domain
= to_sun50i_domain(domain
);
716 iommu_free_pages(sun50i_domain
->dt
, get_order(DT_SIZE
));
717 sun50i_domain
->dt
= NULL
;
719 kfree(sun50i_domain
);
722 static int sun50i_iommu_attach_domain(struct sun50i_iommu
*iommu
,
723 struct sun50i_iommu_domain
*sun50i_domain
)
725 iommu
->domain
= &sun50i_domain
->domain
;
726 sun50i_domain
->iommu
= iommu
;
728 sun50i_domain
->dt_dma
= dma_map_single(iommu
->dev
, sun50i_domain
->dt
,
729 DT_SIZE
, DMA_TO_DEVICE
);
730 if (dma_mapping_error(iommu
->dev
, sun50i_domain
->dt_dma
)) {
731 dev_err(iommu
->dev
, "Couldn't map L1 Page Table\n");
735 return sun50i_iommu_enable(iommu
);
738 static void sun50i_iommu_detach_domain(struct sun50i_iommu
*iommu
,
739 struct sun50i_iommu_domain
*sun50i_domain
)
743 for (i
= 0; i
< NUM_DT_ENTRIES
; i
++) {
749 dte_addr
= &sun50i_domain
->dt
[i
];
751 if (!sun50i_dte_is_pt_valid(dte
))
754 memset(dte_addr
, 0, sizeof(*dte_addr
));
755 sun50i_table_flush(sun50i_domain
, dte_addr
, 1);
757 pt_phys
= sun50i_dte_get_pt_address(dte
);
758 page_table
= phys_to_virt(pt_phys
);
759 sun50i_iommu_free_page_table(iommu
, page_table
);
763 sun50i_iommu_disable(iommu
);
765 dma_unmap_single(iommu
->dev
, virt_to_phys(sun50i_domain
->dt
),
766 DT_SIZE
, DMA_TO_DEVICE
);
768 iommu
->domain
= NULL
;
771 static int sun50i_iommu_identity_attach(struct iommu_domain
*identity_domain
,
774 struct sun50i_iommu
*iommu
= dev_iommu_priv_get(dev
);
775 struct sun50i_iommu_domain
*sun50i_domain
;
777 dev_dbg(dev
, "Detaching from IOMMU domain\n");
779 if (iommu
->domain
== identity_domain
)
782 sun50i_domain
= to_sun50i_domain(iommu
->domain
);
783 if (refcount_dec_and_test(&sun50i_domain
->refcnt
))
784 sun50i_iommu_detach_domain(iommu
, sun50i_domain
);
788 static struct iommu_domain_ops sun50i_iommu_identity_ops
= {
789 .attach_dev
= sun50i_iommu_identity_attach
,
792 static struct iommu_domain sun50i_iommu_identity_domain
= {
793 .type
= IOMMU_DOMAIN_IDENTITY
,
794 .ops
= &sun50i_iommu_identity_ops
,
797 static int sun50i_iommu_attach_device(struct iommu_domain
*domain
,
800 struct sun50i_iommu_domain
*sun50i_domain
= to_sun50i_domain(domain
);
801 struct sun50i_iommu
*iommu
;
803 iommu
= sun50i_iommu_from_dev(dev
);
807 dev_dbg(dev
, "Attaching to IOMMU domain\n");
809 refcount_inc(&sun50i_domain
->refcnt
);
811 if (iommu
->domain
== domain
)
814 sun50i_iommu_identity_attach(&sun50i_iommu_identity_domain
, dev
);
816 sun50i_iommu_attach_domain(iommu
, sun50i_domain
);
821 static struct iommu_device
*sun50i_iommu_probe_device(struct device
*dev
)
823 struct sun50i_iommu
*iommu
;
825 iommu
= sun50i_iommu_from_dev(dev
);
827 return ERR_PTR(-ENODEV
);
829 return &iommu
->iommu
;
832 static int sun50i_iommu_of_xlate(struct device
*dev
,
833 const struct of_phandle_args
*args
)
835 struct platform_device
*iommu_pdev
= of_find_device_by_node(args
->np
);
836 unsigned id
= args
->args
[0];
838 dev_iommu_priv_set(dev
, platform_get_drvdata(iommu_pdev
));
840 return iommu_fwspec_add_ids(dev
, &id
, 1);
843 static const struct iommu_ops sun50i_iommu_ops
= {
844 .identity_domain
= &sun50i_iommu_identity_domain
,
845 .pgsize_bitmap
= SZ_4K
,
846 .device_group
= generic_single_device_group
,
847 .domain_alloc_paging
= sun50i_iommu_domain_alloc_paging
,
848 .of_xlate
= sun50i_iommu_of_xlate
,
849 .probe_device
= sun50i_iommu_probe_device
,
850 .default_domain_ops
= &(const struct iommu_domain_ops
) {
851 .attach_dev
= sun50i_iommu_attach_device
,
852 .flush_iotlb_all
= sun50i_iommu_flush_iotlb_all
,
853 .iotlb_sync_map
= sun50i_iommu_iotlb_sync_map
,
854 .iotlb_sync
= sun50i_iommu_iotlb_sync
,
855 .iova_to_phys
= sun50i_iommu_iova_to_phys
,
856 .map_pages
= sun50i_iommu_map
,
857 .unmap_pages
= sun50i_iommu_unmap
,
858 .free
= sun50i_iommu_domain_free
,
862 static void sun50i_iommu_report_fault(struct sun50i_iommu
*iommu
,
863 unsigned master
, phys_addr_t iova
,
866 dev_err(iommu
->dev
, "Page fault for %pad (master %d, dir %s)\n",
867 &iova
, master
, (prot
== IOMMU_FAULT_WRITE
) ? "wr" : "rd");
870 report_iommu_fault(iommu
->domain
, iommu
->dev
, iova
, prot
);
872 dev_err(iommu
->dev
, "Page fault while iommu not attached to any domain?\n");
874 sun50i_iommu_zap_range(iommu
, iova
, SPAGE_SIZE
);
877 static phys_addr_t
sun50i_iommu_handle_pt_irq(struct sun50i_iommu
*iommu
,
885 assert_spin_locked(&iommu
->iommu_lock
);
887 iova
= iommu_read(iommu
, addr_reg
);
888 blame
= iommu_read(iommu
, blame_reg
);
889 master
= ilog2(blame
& IOMMU_INT_MASTER_MASK
);
892 * If the address is not in the page table, we can't get what
893 * operation triggered the fault. Assume it's a read
896 sun50i_iommu_report_fault(iommu
, master
, iova
, IOMMU_FAULT_READ
);
901 static phys_addr_t
sun50i_iommu_handle_perm_irq(struct sun50i_iommu
*iommu
)
903 enum sun50i_iommu_aci aci
;
909 assert_spin_locked(&iommu
->iommu_lock
);
911 blame
= iommu_read(iommu
, IOMMU_INT_STA_REG
);
912 master
= ilog2(blame
& IOMMU_INT_MASTER_MASK
);
913 iova
= iommu_read(iommu
, IOMMU_INT_ERR_ADDR_REG(master
));
914 aci
= sun50i_get_pte_aci(iommu_read(iommu
,
915 IOMMU_INT_ERR_DATA_REG(master
)));
919 * If we are in the read-only domain, then it means we
922 case SUN50I_IOMMU_ACI_RD
:
923 dir
= IOMMU_FAULT_WRITE
;
927 * If we are in the write-only domain, then it means
930 case SUN50I_IOMMU_ACI_WR
:
933 * If we are in the domain without any permission, we
934 * can't really tell. Let's default to a read
937 case SUN50I_IOMMU_ACI_NONE
:
940 case SUN50I_IOMMU_ACI_RD_WR
:
942 dir
= IOMMU_FAULT_READ
;
947 * If the address is not in the page table, we can't get what
948 * operation triggered the fault. Assume it's a read
951 sun50i_iommu_report_fault(iommu
, master
, iova
, dir
);
956 static irqreturn_t
sun50i_iommu_irq(int irq
, void *dev_id
)
958 u32 status
, l1_status
, l2_status
, resets
;
959 struct sun50i_iommu
*iommu
= dev_id
;
961 spin_lock(&iommu
->iommu_lock
);
963 status
= iommu_read(iommu
, IOMMU_INT_STA_REG
);
964 if (!(status
& IOMMU_INT_MASK
)) {
965 spin_unlock(&iommu
->iommu_lock
);
969 l1_status
= iommu_read(iommu
, IOMMU_L1PG_INT_REG
);
970 l2_status
= iommu_read(iommu
, IOMMU_L2PG_INT_REG
);
972 if (status
& IOMMU_INT_INVALID_L2PG
)
973 sun50i_iommu_handle_pt_irq(iommu
,
974 IOMMU_INT_ERR_ADDR_L2_REG
,
976 else if (status
& IOMMU_INT_INVALID_L1PG
)
977 sun50i_iommu_handle_pt_irq(iommu
,
978 IOMMU_INT_ERR_ADDR_L1_REG
,
981 sun50i_iommu_handle_perm_irq(iommu
);
983 iommu_write(iommu
, IOMMU_INT_CLR_REG
, status
);
985 resets
= (status
| l1_status
| l2_status
) & IOMMU_INT_MASTER_MASK
;
986 iommu_write(iommu
, IOMMU_RESET_REG
, ~resets
);
987 iommu_write(iommu
, IOMMU_RESET_REG
, IOMMU_RESET_RELEASE_ALL
);
989 spin_unlock(&iommu
->iommu_lock
);
994 static int sun50i_iommu_probe(struct platform_device
*pdev
)
996 struct sun50i_iommu
*iommu
;
999 iommu
= devm_kzalloc(&pdev
->dev
, sizeof(*iommu
), GFP_KERNEL
);
1002 spin_lock_init(&iommu
->iommu_lock
);
1003 iommu
->domain
= &sun50i_iommu_identity_domain
;
1004 platform_set_drvdata(pdev
, iommu
);
1005 iommu
->dev
= &pdev
->dev
;
1007 iommu
->pt_pool
= kmem_cache_create(dev_name(&pdev
->dev
),
1009 SLAB_HWCACHE_ALIGN
| SLAB_CACHE_DMA32
,
1011 if (!iommu
->pt_pool
)
1014 iommu
->base
= devm_platform_ioremap_resource(pdev
, 0);
1015 if (IS_ERR(iommu
->base
)) {
1016 ret
= PTR_ERR(iommu
->base
);
1017 goto err_free_cache
;
1020 irq
= platform_get_irq(pdev
, 0);
1023 goto err_free_cache
;
1026 iommu
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1027 if (IS_ERR(iommu
->clk
)) {
1028 dev_err(&pdev
->dev
, "Couldn't get our clock.\n");
1029 ret
= PTR_ERR(iommu
->clk
);
1030 goto err_free_cache
;
1033 iommu
->reset
= devm_reset_control_get(&pdev
->dev
, NULL
);
1034 if (IS_ERR(iommu
->reset
)) {
1035 dev_err(&pdev
->dev
, "Couldn't get our reset line.\n");
1036 ret
= PTR_ERR(iommu
->reset
);
1037 goto err_free_cache
;
1040 ret
= iommu_device_sysfs_add(&iommu
->iommu
, &pdev
->dev
,
1041 NULL
, dev_name(&pdev
->dev
));
1043 goto err_free_cache
;
1045 ret
= iommu_device_register(&iommu
->iommu
, &sun50i_iommu_ops
, &pdev
->dev
);
1047 goto err_remove_sysfs
;
1049 ret
= devm_request_irq(&pdev
->dev
, irq
, sun50i_iommu_irq
, 0,
1050 dev_name(&pdev
->dev
), iommu
);
1052 goto err_unregister
;
1057 iommu_device_unregister(&iommu
->iommu
);
1060 iommu_device_sysfs_remove(&iommu
->iommu
);
1063 kmem_cache_destroy(iommu
->pt_pool
);
1068 static const struct of_device_id sun50i_iommu_dt
[] = {
1069 { .compatible
= "allwinner,sun50i-h6-iommu", },
1070 { .compatible
= "allwinner,sun50i-h616-iommu", },
1073 MODULE_DEVICE_TABLE(of
, sun50i_iommu_dt
);
1075 static struct platform_driver sun50i_iommu_driver
= {
1077 .name
= "sun50i-iommu",
1078 .of_match_table
= sun50i_iommu_dt
,
1079 .suppress_bind_attrs
= true,
1082 builtin_platform_driver_probe(sun50i_iommu_driver
, sun50i_iommu_probe
);
1084 MODULE_DESCRIPTION("Allwinner H6 IOMMU driver");
1085 MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
1086 MODULE_AUTHOR("zhuxianbin <zhuxianbin@allwinnertech.com>");