1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 // Copyright (C) 2016-2018, Allwinner Technology CO., LTD.
3 // Copyright (C) 2019-2020, Cerno
5 #include <linux/bitfield.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-iommu.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/err.h>
13 #include <linux/errno.h>
14 #include <linux/interrupt.h>
15 #include <linux/iommu.h>
16 #include <linux/iopoll.h>
17 #include <linux/ioport.h>
18 #include <linux/log2.h>
19 #include <linux/module.h>
20 #include <linux/of_platform.h>
21 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/reset.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/types.h>
30 #define IOMMU_RESET_REG 0x010
31 #define IOMMU_ENABLE_REG 0x020
32 #define IOMMU_ENABLE_ENABLE BIT(0)
34 #define IOMMU_BYPASS_REG 0x030
35 #define IOMMU_AUTO_GATING_REG 0x040
36 #define IOMMU_AUTO_GATING_ENABLE BIT(0)
38 #define IOMMU_WBUF_CTRL_REG 0x044
39 #define IOMMU_OOO_CTRL_REG 0x048
40 #define IOMMU_4KB_BDY_PRT_CTRL_REG 0x04c
41 #define IOMMU_TTB_REG 0x050
42 #define IOMMU_TLB_ENABLE_REG 0x060
43 #define IOMMU_TLB_PREFETCH_REG 0x070
44 #define IOMMU_TLB_PREFETCH_MASTER_ENABLE(m) BIT(m)
46 #define IOMMU_TLB_FLUSH_REG 0x080
47 #define IOMMU_TLB_FLUSH_PTW_CACHE BIT(17)
48 #define IOMMU_TLB_FLUSH_MACRO_TLB BIT(16)
49 #define IOMMU_TLB_FLUSH_MICRO_TLB(i) (BIT(i) & GENMASK(5, 0))
51 #define IOMMU_TLB_IVLD_ADDR_REG 0x090
52 #define IOMMU_TLB_IVLD_ADDR_MASK_REG 0x094
53 #define IOMMU_TLB_IVLD_ENABLE_REG 0x098
54 #define IOMMU_TLB_IVLD_ENABLE_ENABLE BIT(0)
56 #define IOMMU_PC_IVLD_ADDR_REG 0x0a0
57 #define IOMMU_PC_IVLD_ENABLE_REG 0x0a8
58 #define IOMMU_PC_IVLD_ENABLE_ENABLE BIT(0)
60 #define IOMMU_DM_AUT_CTRL_REG(d) (0x0b0 + ((d) / 2) * 4)
61 #define IOMMU_DM_AUT_CTRL_RD_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2)))
62 #define IOMMU_DM_AUT_CTRL_WR_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2) + 1))
64 #define IOMMU_DM_AUT_OVWT_REG 0x0d0
65 #define IOMMU_INT_ENABLE_REG 0x100
66 #define IOMMU_INT_CLR_REG 0x104
67 #define IOMMU_INT_STA_REG 0x108
68 #define IOMMU_INT_ERR_ADDR_REG(i) (0x110 + (i) * 4)
69 #define IOMMU_INT_ERR_ADDR_L1_REG 0x130
70 #define IOMMU_INT_ERR_ADDR_L2_REG 0x134
71 #define IOMMU_INT_ERR_DATA_REG(i) (0x150 + (i) * 4)
72 #define IOMMU_L1PG_INT_REG 0x0180
73 #define IOMMU_L2PG_INT_REG 0x0184
75 #define IOMMU_INT_INVALID_L2PG BIT(17)
76 #define IOMMU_INT_INVALID_L1PG BIT(16)
77 #define IOMMU_INT_MASTER_PERMISSION(m) BIT(m)
78 #define IOMMU_INT_MASTER_MASK (IOMMU_INT_MASTER_PERMISSION(0) | \
79 IOMMU_INT_MASTER_PERMISSION(1) | \
80 IOMMU_INT_MASTER_PERMISSION(2) | \
81 IOMMU_INT_MASTER_PERMISSION(3) | \
82 IOMMU_INT_MASTER_PERMISSION(4) | \
83 IOMMU_INT_MASTER_PERMISSION(5))
84 #define IOMMU_INT_MASK (IOMMU_INT_INVALID_L1PG | \
85 IOMMU_INT_INVALID_L2PG | \
86 IOMMU_INT_MASTER_MASK)
88 #define PT_ENTRY_SIZE sizeof(u32)
90 #define NUM_DT_ENTRIES 4096
91 #define DT_SIZE (NUM_DT_ENTRIES * PT_ENTRY_SIZE)
93 #define NUM_PT_ENTRIES 256
94 #define PT_SIZE (NUM_PT_ENTRIES * PT_ENTRY_SIZE)
97 struct iommu_device iommu
;
99 /* Lock to modify the IOMMU registers */
100 spinlock_t iommu_lock
;
104 struct reset_control
*reset
;
107 struct iommu_domain
*domain
;
108 struct iommu_group
*group
;
109 struct kmem_cache
*pt_pool
;
112 struct sun50i_iommu_domain
{
113 struct iommu_domain domain
;
115 /* Number of devices attached to the domain */
122 struct sun50i_iommu
*iommu
;
125 static struct sun50i_iommu_domain
*to_sun50i_domain(struct iommu_domain
*domain
)
127 return container_of(domain
, struct sun50i_iommu_domain
, domain
);
130 static struct sun50i_iommu
*sun50i_iommu_from_dev(struct device
*dev
)
132 return dev_iommu_priv_get(dev
);
135 static u32
iommu_read(struct sun50i_iommu
*iommu
, u32 offset
)
137 return readl(iommu
->base
+ offset
);
140 static void iommu_write(struct sun50i_iommu
*iommu
, u32 offset
, u32 value
)
142 writel(value
, iommu
->base
+ offset
);
146 * The Allwinner H6 IOMMU uses a 2-level page table.
148 * The first level is the usual Directory Table (DT), that consists of
149 * 4096 4-bytes Directory Table Entries (DTE), each pointing to a Page
152 * Each PT consits of 256 4-bytes Page Table Entries (PTE), each
153 * pointing to a 4kB page of physical memory.
155 * The IOMMU supports a single DT, pointed by the IOMMU_TTB_REG
156 * register that contains its physical address.
159 #define SUN50I_IOVA_DTE_MASK GENMASK(31, 20)
160 #define SUN50I_IOVA_PTE_MASK GENMASK(19, 12)
161 #define SUN50I_IOVA_PAGE_MASK GENMASK(11, 0)
163 static u32
sun50i_iova_get_dte_index(dma_addr_t iova
)
165 return FIELD_GET(SUN50I_IOVA_DTE_MASK
, iova
);
168 static u32
sun50i_iova_get_pte_index(dma_addr_t iova
)
170 return FIELD_GET(SUN50I_IOVA_PTE_MASK
, iova
);
173 static u32
sun50i_iova_get_page_offset(dma_addr_t iova
)
175 return FIELD_GET(SUN50I_IOVA_PAGE_MASK
, iova
);
179 * Each Directory Table Entry has a Page Table address and a valid
182 * +---------------------+-----------+-+
183 * | PT address | Reserved |V|
184 * +---------------------+-----------+-+
185 * 31:10 - Page Table address
187 * 1:0 - 1 if the entry is valid
190 #define SUN50I_DTE_PT_ADDRESS_MASK GENMASK(31, 10)
191 #define SUN50I_DTE_PT_ATTRS GENMASK(1, 0)
192 #define SUN50I_DTE_PT_VALID 1
194 static phys_addr_t
sun50i_dte_get_pt_address(u32 dte
)
196 return (phys_addr_t
)dte
& SUN50I_DTE_PT_ADDRESS_MASK
;
199 static bool sun50i_dte_is_pt_valid(u32 dte
)
201 return (dte
& SUN50I_DTE_PT_ATTRS
) == SUN50I_DTE_PT_VALID
;
204 static u32
sun50i_mk_dte(dma_addr_t pt_dma
)
206 return (pt_dma
& SUN50I_DTE_PT_ADDRESS_MASK
) | SUN50I_DTE_PT_VALID
;
210 * Each PTE has a Page address, an authority index and a valid bit:
212 * +----------------+-----+-----+-----+---+-----+
213 * | Page address | Rsv | ACI | Rsv | V | Rsv |
214 * +----------------+-----+-----+-----+---+-----+
215 * 31:12 - Page address
217 * 7:4 - Authority Control Index
219 * 1 - 1 if the entry is valid
222 * The way permissions work is that the IOMMU has 16 "domains" that
223 * can be configured to give each masters either read or write
224 * permissions through the IOMMU_DM_AUT_CTRL_REG registers. The domain
225 * 0 seems like the default domain, and its permissions in the
226 * IOMMU_DM_AUT_CTRL_REG are only read-only, so it's not really
227 * useful to enforce any particular permission.
229 * Each page entry will then have a reference to the domain they are
230 * affected to, so that we can actually enforce them on a per-page
233 * In order to make it work with the IOMMU framework, we will be using
234 * 4 different domains, starting at 1: RD_WR, RD, WR and NONE
235 * depending on the permission we want to enforce. Each domain will
236 * have each master setup in the same way, since the IOMMU framework
237 * doesn't seem to restrict page access on a per-device basis. And
238 * then we will use the relevant domain index when generating the page
239 * table entry depending on the permissions we want to be enforced.
242 enum sun50i_iommu_aci
{
243 SUN50I_IOMMU_ACI_DO_NOT_USE
= 0,
244 SUN50I_IOMMU_ACI_NONE
,
247 SUN50I_IOMMU_ACI_RD_WR
,
250 #define SUN50I_PTE_PAGE_ADDRESS_MASK GENMASK(31, 12)
251 #define SUN50I_PTE_ACI_MASK GENMASK(7, 4)
252 #define SUN50I_PTE_PAGE_VALID BIT(1)
254 static phys_addr_t
sun50i_pte_get_page_address(u32 pte
)
256 return (phys_addr_t
)pte
& SUN50I_PTE_PAGE_ADDRESS_MASK
;
259 static enum sun50i_iommu_aci
sun50i_get_pte_aci(u32 pte
)
261 return FIELD_GET(SUN50I_PTE_ACI_MASK
, pte
);
264 static bool sun50i_pte_is_page_valid(u32 pte
)
266 return pte
& SUN50I_PTE_PAGE_VALID
;
269 static u32
sun50i_mk_pte(phys_addr_t page
, int prot
)
271 enum sun50i_iommu_aci aci
;
274 if (prot
& (IOMMU_READ
| IOMMU_WRITE
))
275 aci
= SUN50I_IOMMU_ACI_RD_WR
;
276 else if (prot
& IOMMU_READ
)
277 aci
= SUN50I_IOMMU_ACI_RD
;
278 else if (prot
& IOMMU_WRITE
)
279 aci
= SUN50I_IOMMU_ACI_WR
;
281 aci
= SUN50I_IOMMU_ACI_NONE
;
283 flags
|= FIELD_PREP(SUN50I_PTE_ACI_MASK
, aci
);
284 page
&= SUN50I_PTE_PAGE_ADDRESS_MASK
;
285 return page
| flags
| SUN50I_PTE_PAGE_VALID
;
288 static void sun50i_table_flush(struct sun50i_iommu_domain
*sun50i_domain
,
289 void *vaddr
, unsigned int count
)
291 struct sun50i_iommu
*iommu
= sun50i_domain
->iommu
;
292 dma_addr_t dma
= virt_to_phys(vaddr
);
293 size_t size
= count
* PT_ENTRY_SIZE
;
295 dma_sync_single_for_device(iommu
->dev
, dma
, size
, DMA_TO_DEVICE
);
298 static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu
*iommu
)
303 assert_spin_locked(&iommu
->iommu_lock
);
307 IOMMU_TLB_FLUSH_PTW_CACHE
|
308 IOMMU_TLB_FLUSH_MACRO_TLB
|
309 IOMMU_TLB_FLUSH_MICRO_TLB(5) |
310 IOMMU_TLB_FLUSH_MICRO_TLB(4) |
311 IOMMU_TLB_FLUSH_MICRO_TLB(3) |
312 IOMMU_TLB_FLUSH_MICRO_TLB(2) |
313 IOMMU_TLB_FLUSH_MICRO_TLB(1) |
314 IOMMU_TLB_FLUSH_MICRO_TLB(0));
316 ret
= readl_poll_timeout_atomic(iommu
->base
+ IOMMU_TLB_FLUSH_REG
,
320 dev_warn(iommu
->dev
, "TLB Flush timed out!\n");
325 static void sun50i_iommu_flush_iotlb_all(struct iommu_domain
*domain
)
327 struct sun50i_iommu_domain
*sun50i_domain
= to_sun50i_domain(domain
);
328 struct sun50i_iommu
*iommu
= sun50i_domain
->iommu
;
332 * At boot, we'll have a first call into .flush_iotlb_all right after
333 * .probe_device, and since we link our (single) domain to our iommu in
334 * the .attach_device callback, we don't have that pointer set.
336 * It shouldn't really be any trouble to ignore it though since we flush
337 * all caches as part of the device powerup.
342 spin_lock_irqsave(&iommu
->iommu_lock
, flags
);
343 sun50i_iommu_flush_all_tlb(iommu
);
344 spin_unlock_irqrestore(&iommu
->iommu_lock
, flags
);
347 static void sun50i_iommu_iotlb_sync(struct iommu_domain
*domain
,
348 struct iommu_iotlb_gather
*gather
)
350 sun50i_iommu_flush_iotlb_all(domain
);
353 static int sun50i_iommu_enable(struct sun50i_iommu
*iommu
)
355 struct sun50i_iommu_domain
*sun50i_domain
;
362 sun50i_domain
= to_sun50i_domain(iommu
->domain
);
364 ret
= reset_control_deassert(iommu
->reset
);
368 ret
= clk_prepare_enable(iommu
->clk
);
370 goto err_reset_assert
;
372 spin_lock_irqsave(&iommu
->iommu_lock
, flags
);
374 iommu_write(iommu
, IOMMU_TTB_REG
, sun50i_domain
->dt_dma
);
375 iommu_write(iommu
, IOMMU_TLB_PREFETCH_REG
,
376 IOMMU_TLB_PREFETCH_MASTER_ENABLE(0) |
377 IOMMU_TLB_PREFETCH_MASTER_ENABLE(1) |
378 IOMMU_TLB_PREFETCH_MASTER_ENABLE(2) |
379 IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
380 IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
381 IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
382 iommu_write(iommu
, IOMMU_INT_ENABLE_REG
, IOMMU_INT_MASK
);
383 iommu_write(iommu
, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE
),
384 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 0) |
385 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 0) |
386 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 1) |
387 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 1) |
388 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 2) |
389 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 2) |
390 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 3) |
391 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 3) |
392 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 4) |
393 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 4) |
394 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 5) |
395 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE
, 5));
397 iommu_write(iommu
, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD
),
398 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD
, 0) |
399 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD
, 1) |
400 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD
, 2) |
401 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD
, 3) |
402 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD
, 4) |
403 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD
, 5));
405 iommu_write(iommu
, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR
),
406 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR
, 0) |
407 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR
, 1) |
408 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR
, 2) |
409 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR
, 3) |
410 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR
, 4) |
411 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR
, 5));
413 ret
= sun50i_iommu_flush_all_tlb(iommu
);
415 spin_unlock_irqrestore(&iommu
->iommu_lock
, flags
);
416 goto err_clk_disable
;
419 iommu_write(iommu
, IOMMU_AUTO_GATING_REG
, IOMMU_AUTO_GATING_ENABLE
);
420 iommu_write(iommu
, IOMMU_ENABLE_REG
, IOMMU_ENABLE_ENABLE
);
422 spin_unlock_irqrestore(&iommu
->iommu_lock
, flags
);
427 clk_disable_unprepare(iommu
->clk
);
430 reset_control_assert(iommu
->reset
);
435 static void sun50i_iommu_disable(struct sun50i_iommu
*iommu
)
439 spin_lock_irqsave(&iommu
->iommu_lock
, flags
);
441 iommu_write(iommu
, IOMMU_ENABLE_REG
, 0);
442 iommu_write(iommu
, IOMMU_TTB_REG
, 0);
444 spin_unlock_irqrestore(&iommu
->iommu_lock
, flags
);
446 clk_disable_unprepare(iommu
->clk
);
447 reset_control_assert(iommu
->reset
);
450 static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu
*iommu
,
456 page_table
= kmem_cache_zalloc(iommu
->pt_pool
, gfp
);
458 return ERR_PTR(-ENOMEM
);
460 pt_dma
= dma_map_single(iommu
->dev
, page_table
, PT_SIZE
, DMA_TO_DEVICE
);
461 if (dma_mapping_error(iommu
->dev
, pt_dma
)) {
462 dev_err(iommu
->dev
, "Couldn't map L2 Page Table\n");
463 kmem_cache_free(iommu
->pt_pool
, page_table
);
464 return ERR_PTR(-ENOMEM
);
467 /* We rely on the physical address and DMA address being the same */
468 WARN_ON(pt_dma
!= virt_to_phys(page_table
));
473 static void sun50i_iommu_free_page_table(struct sun50i_iommu
*iommu
,
476 phys_addr_t pt_phys
= virt_to_phys(page_table
);
478 dma_unmap_single(iommu
->dev
, pt_phys
, PT_SIZE
, DMA_TO_DEVICE
);
479 kmem_cache_free(iommu
->pt_pool
, page_table
);
482 static u32
*sun50i_dte_get_page_table(struct sun50i_iommu_domain
*sun50i_domain
,
483 dma_addr_t iova
, gfp_t gfp
)
485 struct sun50i_iommu
*iommu
= sun50i_domain
->iommu
;
491 dte_addr
= &sun50i_domain
->dt
[sun50i_iova_get_dte_index(iova
)];
493 if (sun50i_dte_is_pt_valid(dte
)) {
494 phys_addr_t pt_phys
= sun50i_dte_get_pt_address(dte
);
495 return (u32
*)phys_to_virt(pt_phys
);
498 page_table
= sun50i_iommu_alloc_page_table(iommu
, gfp
);
499 if (IS_ERR(page_table
))
502 dte
= sun50i_mk_dte(virt_to_phys(page_table
));
503 old_dte
= cmpxchg(dte_addr
, 0, dte
);
505 phys_addr_t installed_pt_phys
=
506 sun50i_dte_get_pt_address(old_dte
);
507 u32
*installed_pt
= phys_to_virt(installed_pt_phys
);
508 u32
*drop_pt
= page_table
;
510 page_table
= installed_pt
;
512 sun50i_iommu_free_page_table(iommu
, drop_pt
);
515 sun50i_table_flush(sun50i_domain
, page_table
, PT_SIZE
);
516 sun50i_table_flush(sun50i_domain
, dte_addr
, 1);
521 static int sun50i_iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
522 phys_addr_t paddr
, size_t size
, int prot
, gfp_t gfp
)
524 struct sun50i_iommu_domain
*sun50i_domain
= to_sun50i_domain(domain
);
525 struct sun50i_iommu
*iommu
= sun50i_domain
->iommu
;
527 u32
*page_table
, *pte_addr
;
530 page_table
= sun50i_dte_get_page_table(sun50i_domain
, iova
, gfp
);
531 if (IS_ERR(page_table
)) {
532 ret
= PTR_ERR(page_table
);
536 pte_index
= sun50i_iova_get_pte_index(iova
);
537 pte_addr
= &page_table
[pte_index
];
538 if (unlikely(sun50i_pte_is_page_valid(*pte_addr
))) {
539 phys_addr_t page_phys
= sun50i_pte_get_page_address(*pte_addr
);
541 "iova %pad already mapped to %pa cannot remap to %pa prot: %#x\n",
542 &iova
, &page_phys
, &paddr
, prot
);
547 *pte_addr
= sun50i_mk_pte(paddr
, prot
);
548 sun50i_table_flush(sun50i_domain
, pte_addr
, 1);
554 static size_t sun50i_iommu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
555 size_t size
, struct iommu_iotlb_gather
*gather
)
557 struct sun50i_iommu_domain
*sun50i_domain
= to_sun50i_domain(domain
);
562 dte
= sun50i_domain
->dt
[sun50i_iova_get_dte_index(iova
)];
563 if (!sun50i_dte_is_pt_valid(dte
))
566 pt_phys
= sun50i_dte_get_pt_address(dte
);
567 pte_addr
= (u32
*)phys_to_virt(pt_phys
) + sun50i_iova_get_pte_index(iova
);
569 if (!sun50i_pte_is_page_valid(*pte_addr
))
572 memset(pte_addr
, 0, sizeof(*pte_addr
));
573 sun50i_table_flush(sun50i_domain
, pte_addr
, 1);
578 static phys_addr_t
sun50i_iommu_iova_to_phys(struct iommu_domain
*domain
,
581 struct sun50i_iommu_domain
*sun50i_domain
= to_sun50i_domain(domain
);
586 dte
= sun50i_domain
->dt
[sun50i_iova_get_dte_index(iova
)];
587 if (!sun50i_dte_is_pt_valid(dte
))
590 pt_phys
= sun50i_dte_get_pt_address(dte
);
591 page_table
= (u32
*)phys_to_virt(pt_phys
);
592 pte
= page_table
[sun50i_iova_get_pte_index(iova
)];
593 if (!sun50i_pte_is_page_valid(pte
))
596 return sun50i_pte_get_page_address(pte
) +
597 sun50i_iova_get_page_offset(iova
);
600 static struct iommu_domain
*sun50i_iommu_domain_alloc(unsigned type
)
602 struct sun50i_iommu_domain
*sun50i_domain
;
604 if (type
!= IOMMU_DOMAIN_DMA
&&
605 type
!= IOMMU_DOMAIN_IDENTITY
&&
606 type
!= IOMMU_DOMAIN_UNMANAGED
)
609 sun50i_domain
= kzalloc(sizeof(*sun50i_domain
), GFP_KERNEL
);
613 if (type
== IOMMU_DOMAIN_DMA
&&
614 iommu_get_dma_cookie(&sun50i_domain
->domain
))
615 goto err_free_domain
;
617 sun50i_domain
->dt
= (u32
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
619 if (!sun50i_domain
->dt
)
622 refcount_set(&sun50i_domain
->refcnt
, 1);
624 sun50i_domain
->domain
.geometry
.aperture_start
= 0;
625 sun50i_domain
->domain
.geometry
.aperture_end
= DMA_BIT_MASK(32);
626 sun50i_domain
->domain
.geometry
.force_aperture
= true;
628 return &sun50i_domain
->domain
;
631 if (type
== IOMMU_DOMAIN_DMA
)
632 iommu_put_dma_cookie(&sun50i_domain
->domain
);
635 kfree(sun50i_domain
);
640 static void sun50i_iommu_domain_free(struct iommu_domain
*domain
)
642 struct sun50i_iommu_domain
*sun50i_domain
= to_sun50i_domain(domain
);
644 free_pages((unsigned long)sun50i_domain
->dt
, get_order(DT_SIZE
));
645 sun50i_domain
->dt
= NULL
;
647 iommu_put_dma_cookie(domain
);
649 kfree(sun50i_domain
);
652 static int sun50i_iommu_attach_domain(struct sun50i_iommu
*iommu
,
653 struct sun50i_iommu_domain
*sun50i_domain
)
655 iommu
->domain
= &sun50i_domain
->domain
;
656 sun50i_domain
->iommu
= iommu
;
658 sun50i_domain
->dt_dma
= dma_map_single(iommu
->dev
, sun50i_domain
->dt
,
659 DT_SIZE
, DMA_TO_DEVICE
);
660 if (dma_mapping_error(iommu
->dev
, sun50i_domain
->dt_dma
)) {
661 dev_err(iommu
->dev
, "Couldn't map L1 Page Table\n");
665 return sun50i_iommu_enable(iommu
);
668 static void sun50i_iommu_detach_domain(struct sun50i_iommu
*iommu
,
669 struct sun50i_iommu_domain
*sun50i_domain
)
673 for (i
= 0; i
< NUM_DT_ENTRIES
; i
++) {
679 dte_addr
= &sun50i_domain
->dt
[i
];
681 if (!sun50i_dte_is_pt_valid(dte
))
684 memset(dte_addr
, 0, sizeof(*dte_addr
));
685 sun50i_table_flush(sun50i_domain
, dte_addr
, 1);
687 pt_phys
= sun50i_dte_get_pt_address(dte
);
688 page_table
= phys_to_virt(pt_phys
);
689 sun50i_iommu_free_page_table(iommu
, page_table
);
693 sun50i_iommu_disable(iommu
);
695 dma_unmap_single(iommu
->dev
, virt_to_phys(sun50i_domain
->dt
),
696 DT_SIZE
, DMA_TO_DEVICE
);
698 iommu
->domain
= NULL
;
701 static void sun50i_iommu_detach_device(struct iommu_domain
*domain
,
704 struct sun50i_iommu_domain
*sun50i_domain
= to_sun50i_domain(domain
);
705 struct sun50i_iommu
*iommu
= dev_iommu_priv_get(dev
);
707 dev_dbg(dev
, "Detaching from IOMMU domain\n");
709 if (iommu
->domain
!= domain
)
712 if (refcount_dec_and_test(&sun50i_domain
->refcnt
))
713 sun50i_iommu_detach_domain(iommu
, sun50i_domain
);
716 static int sun50i_iommu_attach_device(struct iommu_domain
*domain
,
719 struct sun50i_iommu_domain
*sun50i_domain
= to_sun50i_domain(domain
);
720 struct sun50i_iommu
*iommu
;
722 iommu
= sun50i_iommu_from_dev(dev
);
726 dev_dbg(dev
, "Attaching to IOMMU domain\n");
728 refcount_inc(&sun50i_domain
->refcnt
);
730 if (iommu
->domain
== domain
)
734 sun50i_iommu_detach_device(iommu
->domain
, dev
);
736 sun50i_iommu_attach_domain(iommu
, sun50i_domain
);
741 static struct iommu_device
*sun50i_iommu_probe_device(struct device
*dev
)
743 struct sun50i_iommu
*iommu
;
745 iommu
= sun50i_iommu_from_dev(dev
);
747 return ERR_PTR(-ENODEV
);
749 return &iommu
->iommu
;
752 static void sun50i_iommu_release_device(struct device
*dev
) {}
754 static struct iommu_group
*sun50i_iommu_device_group(struct device
*dev
)
756 struct sun50i_iommu
*iommu
= sun50i_iommu_from_dev(dev
);
758 return iommu_group_ref_get(iommu
->group
);
761 static int sun50i_iommu_of_xlate(struct device
*dev
,
762 struct of_phandle_args
*args
)
764 struct platform_device
*iommu_pdev
= of_find_device_by_node(args
->np
);
765 unsigned id
= args
->args
[0];
767 dev_iommu_priv_set(dev
, platform_get_drvdata(iommu_pdev
));
769 return iommu_fwspec_add_ids(dev
, &id
, 1);
772 static const struct iommu_ops sun50i_iommu_ops
= {
773 .pgsize_bitmap
= SZ_4K
,
774 .attach_dev
= sun50i_iommu_attach_device
,
775 .detach_dev
= sun50i_iommu_detach_device
,
776 .device_group
= sun50i_iommu_device_group
,
777 .domain_alloc
= sun50i_iommu_domain_alloc
,
778 .domain_free
= sun50i_iommu_domain_free
,
779 .flush_iotlb_all
= sun50i_iommu_flush_iotlb_all
,
780 .iotlb_sync
= sun50i_iommu_iotlb_sync
,
781 .iova_to_phys
= sun50i_iommu_iova_to_phys
,
782 .map
= sun50i_iommu_map
,
783 .of_xlate
= sun50i_iommu_of_xlate
,
784 .probe_device
= sun50i_iommu_probe_device
,
785 .release_device
= sun50i_iommu_release_device
,
786 .unmap
= sun50i_iommu_unmap
,
789 static void sun50i_iommu_report_fault(struct sun50i_iommu
*iommu
,
790 unsigned master
, phys_addr_t iova
,
793 dev_err(iommu
->dev
, "Page fault for %pad (master %d, dir %s)\n",
794 &iova
, master
, (prot
== IOMMU_FAULT_WRITE
) ? "wr" : "rd");
797 report_iommu_fault(iommu
->domain
, iommu
->dev
, iova
, prot
);
799 dev_err(iommu
->dev
, "Page fault while iommu not attached to any domain?\n");
802 static phys_addr_t
sun50i_iommu_handle_pt_irq(struct sun50i_iommu
*iommu
,
810 assert_spin_locked(&iommu
->iommu_lock
);
812 iova
= iommu_read(iommu
, addr_reg
);
813 blame
= iommu_read(iommu
, blame_reg
);
814 master
= ilog2(blame
& IOMMU_INT_MASTER_MASK
);
817 * If the address is not in the page table, we can't get what
818 * operation triggered the fault. Assume it's a read
821 sun50i_iommu_report_fault(iommu
, master
, iova
, IOMMU_FAULT_READ
);
826 static phys_addr_t
sun50i_iommu_handle_perm_irq(struct sun50i_iommu
*iommu
)
828 enum sun50i_iommu_aci aci
;
834 assert_spin_locked(&iommu
->iommu_lock
);
836 blame
= iommu_read(iommu
, IOMMU_INT_STA_REG
);
837 master
= ilog2(blame
& IOMMU_INT_MASTER_MASK
);
838 iova
= iommu_read(iommu
, IOMMU_INT_ERR_ADDR_REG(master
));
839 aci
= sun50i_get_pte_aci(iommu_read(iommu
,
840 IOMMU_INT_ERR_DATA_REG(master
)));
844 * If we are in the read-only domain, then it means we
847 case SUN50I_IOMMU_ACI_RD
:
848 dir
= IOMMU_FAULT_WRITE
;
852 * If we are in the write-only domain, then it means
855 case SUN50I_IOMMU_ACI_WR
:
858 * If we are in the domain without any permission, we
859 * can't really tell. Let's default to a read
862 case SUN50I_IOMMU_ACI_NONE
:
865 case SUN50I_IOMMU_ACI_RD_WR
:
867 dir
= IOMMU_FAULT_READ
;
872 * If the address is not in the page table, we can't get what
873 * operation triggered the fault. Assume it's a read
876 sun50i_iommu_report_fault(iommu
, master
, iova
, dir
);
881 static irqreturn_t
sun50i_iommu_irq(int irq
, void *dev_id
)
883 struct sun50i_iommu
*iommu
= dev_id
;
886 spin_lock(&iommu
->iommu_lock
);
888 status
= iommu_read(iommu
, IOMMU_INT_STA_REG
);
889 if (!(status
& IOMMU_INT_MASK
)) {
890 spin_unlock(&iommu
->iommu_lock
);
894 if (status
& IOMMU_INT_INVALID_L2PG
)
895 sun50i_iommu_handle_pt_irq(iommu
,
896 IOMMU_INT_ERR_ADDR_L2_REG
,
898 else if (status
& IOMMU_INT_INVALID_L1PG
)
899 sun50i_iommu_handle_pt_irq(iommu
,
900 IOMMU_INT_ERR_ADDR_L1_REG
,
903 sun50i_iommu_handle_perm_irq(iommu
);
905 iommu_write(iommu
, IOMMU_INT_CLR_REG
, status
);
907 iommu_write(iommu
, IOMMU_RESET_REG
, ~status
);
908 iommu_write(iommu
, IOMMU_RESET_REG
, status
);
910 spin_unlock(&iommu
->iommu_lock
);
915 static int sun50i_iommu_probe(struct platform_device
*pdev
)
917 struct sun50i_iommu
*iommu
;
920 iommu
= devm_kzalloc(&pdev
->dev
, sizeof(*iommu
), GFP_KERNEL
);
923 spin_lock_init(&iommu
->iommu_lock
);
924 platform_set_drvdata(pdev
, iommu
);
925 iommu
->dev
= &pdev
->dev
;
927 iommu
->pt_pool
= kmem_cache_create(dev_name(&pdev
->dev
),
934 iommu
->group
= iommu_group_alloc();
935 if (IS_ERR(iommu
->group
)) {
936 ret
= PTR_ERR(iommu
->group
);
940 iommu
->base
= devm_platform_ioremap_resource(pdev
, 0);
941 if (IS_ERR(iommu
->base
)) {
942 ret
= PTR_ERR(iommu
->base
);
946 irq
= platform_get_irq(pdev
, 0);
952 iommu
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
953 if (IS_ERR(iommu
->clk
)) {
954 dev_err(&pdev
->dev
, "Couldn't get our clock.\n");
955 ret
= PTR_ERR(iommu
->clk
);
959 iommu
->reset
= devm_reset_control_get(&pdev
->dev
, NULL
);
960 if (IS_ERR(iommu
->reset
)) {
961 dev_err(&pdev
->dev
, "Couldn't get our reset line.\n");
962 ret
= PTR_ERR(iommu
->reset
);
966 ret
= iommu_device_sysfs_add(&iommu
->iommu
, &pdev
->dev
,
967 NULL
, dev_name(&pdev
->dev
));
971 iommu_device_set_ops(&iommu
->iommu
, &sun50i_iommu_ops
);
972 iommu_device_set_fwnode(&iommu
->iommu
, &pdev
->dev
.of_node
->fwnode
);
974 ret
= iommu_device_register(&iommu
->iommu
);
976 goto err_remove_sysfs
;
978 ret
= devm_request_irq(&pdev
->dev
, irq
, sun50i_iommu_irq
, 0,
979 dev_name(&pdev
->dev
), iommu
);
983 bus_set_iommu(&platform_bus_type
, &sun50i_iommu_ops
);
988 iommu_device_unregister(&iommu
->iommu
);
991 iommu_device_sysfs_remove(&iommu
->iommu
);
994 iommu_group_put(iommu
->group
);
997 kmem_cache_destroy(iommu
->pt_pool
);
1002 static const struct of_device_id sun50i_iommu_dt
[] = {
1003 { .compatible
= "allwinner,sun50i-h6-iommu", },
1006 MODULE_DEVICE_TABLE(of
, sun50i_iommu_dt
);
1008 static struct platform_driver sun50i_iommu_driver
= {
1010 .name
= "sun50i-iommu",
1011 .of_match_table
= sun50i_iommu_dt
,
1012 .suppress_bind_attrs
= true,
1015 builtin_platform_driver_probe(sun50i_iommu_driver
, sun50i_iommu_probe
);
1017 MODULE_DESCRIPTION("Allwinner H6 IOMMU driver");
1018 MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
1019 MODULE_AUTHOR("zhuxianbin <zhuxianbin@allwinnertech.com>");
1020 MODULE_LICENSE("Dual BSD/GPL");