1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2020 Unisoc, Inc.
6 * Author: Chunyan Zhang <chunyan.zhang@unisoc.com>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/iommu.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/module.h>
16 #include <linux/of_platform.h>
17 #include <linux/platform_device.h>
18 #include <linux/regmap.h>
19 #include <linux/slab.h>
21 #define SPRD_IOMMU_PAGE_SHIFT 12
22 #define SPRD_IOMMU_PAGE_SIZE SZ_4K
24 #define SPRD_EX_CFG 0x0
25 #define SPRD_IOMMU_VAOR_BYPASS BIT(4)
26 #define SPRD_IOMMU_GATE_EN BIT(1)
27 #define SPRD_IOMMU_EN BIT(0)
28 #define SPRD_EX_UPDATE 0x4
29 #define SPRD_EX_FIRST_VPN 0x8
30 #define SPRD_EX_VPN_RANGE 0xc
31 #define SPRD_EX_FIRST_PPN 0x10
32 #define SPRD_EX_DEFAULT_PPN 0x14
34 #define SPRD_IOMMU_VERSION 0x0
35 #define SPRD_VERSION_MASK GENMASK(15, 8)
36 #define SPRD_VERSION_SHIFT 0x8
37 #define SPRD_VAU_CFG 0x4
38 #define SPRD_VAU_UPDATE 0x8
39 #define SPRD_VAU_AUTH_CFG 0xc
40 #define SPRD_VAU_FIRST_PPN 0x10
41 #define SPRD_VAU_DEFAULT_PPN_RD 0x14
42 #define SPRD_VAU_DEFAULT_PPN_WR 0x18
43 #define SPRD_VAU_FIRST_VPN 0x1c
44 #define SPRD_VAU_VPN_RANGE 0x20
46 enum sprd_iommu_version
{
52 * struct sprd_iommu_device - high-level sprd IOMMU device representation,
53 * including hardware information and configuration, also driver data, etc
55 * @ver: sprd IOMMU IP version
56 * @prot_page_va: protect page base virtual address
57 * @prot_page_pa: protect page base physical address, data would be
58 * written to here while translation fault
59 * @base: mapped base address for accessing registers
60 * @dev: pointer to basic device structure
61 * @iommu: IOMMU core representation
63 * @eb: gate clock which controls IOMMU access
65 struct sprd_iommu_device
{
66 struct sprd_iommu_domain
*dom
;
67 enum sprd_iommu_version ver
;
69 dma_addr_t prot_page_pa
;
72 struct iommu_device iommu
;
76 struct sprd_iommu_domain
{
77 spinlock_t pgtlock
; /* lock for page table */
78 struct iommu_domain domain
;
79 u32
*pgt_va
; /* page table virtual address base */
80 dma_addr_t pgt_pa
; /* page table physical address base */
81 struct sprd_iommu_device
*sdev
;
84 static const struct iommu_ops sprd_iommu_ops
;
86 static struct sprd_iommu_domain
*to_sprd_domain(struct iommu_domain
*dom
)
88 return container_of(dom
, struct sprd_iommu_domain
, domain
);
92 sprd_iommu_write(struct sprd_iommu_device
*sdev
, unsigned int reg
, u32 val
)
94 writel_relaxed(val
, sdev
->base
+ reg
);
98 sprd_iommu_read(struct sprd_iommu_device
*sdev
, unsigned int reg
)
100 return readl_relaxed(sdev
->base
+ reg
);
104 sprd_iommu_update_bits(struct sprd_iommu_device
*sdev
, unsigned int reg
,
105 u32 mask
, u32 shift
, u32 val
)
107 u32 t
= sprd_iommu_read(sdev
, reg
);
109 t
= (t
& (~(mask
<< shift
))) | ((val
& mask
) << shift
);
110 sprd_iommu_write(sdev
, reg
, t
);
114 sprd_iommu_get_version(struct sprd_iommu_device
*sdev
)
116 int ver
= (sprd_iommu_read(sdev
, SPRD_IOMMU_VERSION
) &
117 SPRD_VERSION_MASK
) >> SPRD_VERSION_SHIFT
;
129 sprd_iommu_pgt_size(struct iommu_domain
*domain
)
131 return ((domain
->geometry
.aperture_end
-
132 domain
->geometry
.aperture_start
+ 1) >>
133 SPRD_IOMMU_PAGE_SHIFT
) * sizeof(u32
);
136 static struct iommu_domain
*sprd_iommu_domain_alloc_paging(struct device
*dev
)
138 struct sprd_iommu_domain
*dom
;
140 dom
= kzalloc(sizeof(*dom
), GFP_KERNEL
);
144 spin_lock_init(&dom
->pgtlock
);
146 dom
->domain
.geometry
.aperture_start
= 0;
147 dom
->domain
.geometry
.aperture_end
= SZ_256M
- 1;
148 dom
->domain
.geometry
.force_aperture
= true;
153 static void sprd_iommu_first_vpn(struct sprd_iommu_domain
*dom
)
155 struct sprd_iommu_device
*sdev
= dom
->sdev
;
159 if (sdev
->ver
== SPRD_IOMMU_EX
)
160 reg
= SPRD_EX_FIRST_VPN
;
162 reg
= SPRD_VAU_FIRST_VPN
;
164 val
= dom
->domain
.geometry
.aperture_start
>> SPRD_IOMMU_PAGE_SHIFT
;
165 sprd_iommu_write(sdev
, reg
, val
);
168 static void sprd_iommu_vpn_range(struct sprd_iommu_domain
*dom
)
170 struct sprd_iommu_device
*sdev
= dom
->sdev
;
174 if (sdev
->ver
== SPRD_IOMMU_EX
)
175 reg
= SPRD_EX_VPN_RANGE
;
177 reg
= SPRD_VAU_VPN_RANGE
;
179 val
= (dom
->domain
.geometry
.aperture_end
-
180 dom
->domain
.geometry
.aperture_start
) >> SPRD_IOMMU_PAGE_SHIFT
;
181 sprd_iommu_write(sdev
, reg
, val
);
184 static void sprd_iommu_first_ppn(struct sprd_iommu_domain
*dom
)
186 u32 val
= dom
->pgt_pa
>> SPRD_IOMMU_PAGE_SHIFT
;
187 struct sprd_iommu_device
*sdev
= dom
->sdev
;
190 if (sdev
->ver
== SPRD_IOMMU_EX
)
191 reg
= SPRD_EX_FIRST_PPN
;
193 reg
= SPRD_VAU_FIRST_PPN
;
195 sprd_iommu_write(sdev
, reg
, val
);
198 static void sprd_iommu_default_ppn(struct sprd_iommu_device
*sdev
)
200 u32 val
= sdev
->prot_page_pa
>> SPRD_IOMMU_PAGE_SHIFT
;
202 if (sdev
->ver
== SPRD_IOMMU_EX
) {
203 sprd_iommu_write(sdev
, SPRD_EX_DEFAULT_PPN
, val
);
204 } else if (sdev
->ver
== SPRD_IOMMU_VAU
) {
205 sprd_iommu_write(sdev
, SPRD_VAU_DEFAULT_PPN_RD
, val
);
206 sprd_iommu_write(sdev
, SPRD_VAU_DEFAULT_PPN_WR
, val
);
210 static void sprd_iommu_hw_en(struct sprd_iommu_device
*sdev
, bool en
)
212 unsigned int reg_cfg
;
215 if (sdev
->ver
== SPRD_IOMMU_EX
)
216 reg_cfg
= SPRD_EX_CFG
;
218 reg_cfg
= SPRD_VAU_CFG
;
220 mask
= SPRD_IOMMU_EN
| SPRD_IOMMU_GATE_EN
;
222 sprd_iommu_update_bits(sdev
, reg_cfg
, mask
, 0, val
);
225 static void sprd_iommu_cleanup(struct sprd_iommu_domain
*dom
)
229 /* Nothing need to do if the domain hasn't been attached */
233 pgt_size
= sprd_iommu_pgt_size(&dom
->domain
);
234 dma_free_coherent(dom
->sdev
->dev
, pgt_size
, dom
->pgt_va
, dom
->pgt_pa
);
235 sprd_iommu_hw_en(dom
->sdev
, false);
239 static void sprd_iommu_domain_free(struct iommu_domain
*domain
)
241 struct sprd_iommu_domain
*dom
= to_sprd_domain(domain
);
243 sprd_iommu_cleanup(dom
);
247 static int sprd_iommu_attach_device(struct iommu_domain
*domain
,
250 struct sprd_iommu_device
*sdev
= dev_iommu_priv_get(dev
);
251 struct sprd_iommu_domain
*dom
= to_sprd_domain(domain
);
252 size_t pgt_size
= sprd_iommu_pgt_size(domain
);
254 /* The device is attached to this domain */
255 if (sdev
->dom
== dom
)
258 /* The first time that domain is attaching to a device */
260 dom
->pgt_va
= dma_alloc_coherent(sdev
->dev
, pgt_size
, &dom
->pgt_pa
, GFP_KERNEL
);
270 * One sprd IOMMU serves one client device only, disabled it before
271 * configure mapping table to avoid access conflict in case other
272 * mapping table is stored in.
274 sprd_iommu_hw_en(sdev
, false);
275 sprd_iommu_first_ppn(dom
);
276 sprd_iommu_first_vpn(dom
);
277 sprd_iommu_vpn_range(dom
);
278 sprd_iommu_default_ppn(sdev
);
279 sprd_iommu_hw_en(sdev
, true);
284 static int sprd_iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
285 phys_addr_t paddr
, size_t pgsize
, size_t pgcount
,
286 int prot
, gfp_t gfp
, size_t *mapped
)
288 struct sprd_iommu_domain
*dom
= to_sprd_domain(domain
);
289 size_t size
= pgcount
* SPRD_IOMMU_PAGE_SIZE
;
293 u32 pabase
= (u32
)paddr
;
294 unsigned long start
= domain
->geometry
.aperture_start
;
295 unsigned long end
= domain
->geometry
.aperture_end
;
298 pr_err("No sprd_iommu_device attached to the domain\n");
302 if (iova
< start
|| (iova
+ size
) > (end
+ 1)) {
303 dev_err(dom
->sdev
->dev
, "(iova(0x%lx) + size(%zx)) are not in the range!\n",
308 pgt_base_iova
= dom
->pgt_va
+ ((iova
- start
) >> SPRD_IOMMU_PAGE_SHIFT
);
310 spin_lock_irqsave(&dom
->pgtlock
, flags
);
311 for (i
= 0; i
< pgcount
; i
++) {
312 pgt_base_iova
[i
] = pabase
>> SPRD_IOMMU_PAGE_SHIFT
;
313 pabase
+= SPRD_IOMMU_PAGE_SIZE
;
315 spin_unlock_irqrestore(&dom
->pgtlock
, flags
);
321 static size_t sprd_iommu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
322 size_t pgsize
, size_t pgcount
,
323 struct iommu_iotlb_gather
*iotlb_gather
)
325 struct sprd_iommu_domain
*dom
= to_sprd_domain(domain
);
328 size_t size
= pgcount
* SPRD_IOMMU_PAGE_SIZE
;
329 unsigned long start
= domain
->geometry
.aperture_start
;
330 unsigned long end
= domain
->geometry
.aperture_end
;
332 if (iova
< start
|| (iova
+ size
) > (end
+ 1))
335 pgt_base_iova
= dom
->pgt_va
+ ((iova
- start
) >> SPRD_IOMMU_PAGE_SHIFT
);
337 spin_lock_irqsave(&dom
->pgtlock
, flags
);
338 memset(pgt_base_iova
, 0, pgcount
* sizeof(u32
));
339 spin_unlock_irqrestore(&dom
->pgtlock
, flags
);
344 static int sprd_iommu_sync_map(struct iommu_domain
*domain
,
345 unsigned long iova
, size_t size
)
347 struct sprd_iommu_domain
*dom
= to_sprd_domain(domain
);
350 if (dom
->sdev
->ver
== SPRD_IOMMU_EX
)
351 reg
= SPRD_EX_UPDATE
;
353 reg
= SPRD_VAU_UPDATE
;
355 /* clear IOMMU TLB buffer after page table updated */
356 sprd_iommu_write(dom
->sdev
, reg
, 0xffffffff);
360 static void sprd_iommu_sync(struct iommu_domain
*domain
,
361 struct iommu_iotlb_gather
*iotlb_gather
)
363 sprd_iommu_sync_map(domain
, 0, 0);
366 static phys_addr_t
sprd_iommu_iova_to_phys(struct iommu_domain
*domain
,
369 struct sprd_iommu_domain
*dom
= to_sprd_domain(domain
);
372 unsigned long start
= domain
->geometry
.aperture_start
;
373 unsigned long end
= domain
->geometry
.aperture_end
;
375 if (WARN_ON(iova
< start
|| iova
> end
))
378 spin_lock_irqsave(&dom
->pgtlock
, flags
);
379 pa
= *(dom
->pgt_va
+ ((iova
- start
) >> SPRD_IOMMU_PAGE_SHIFT
));
380 pa
= (pa
<< SPRD_IOMMU_PAGE_SHIFT
) + ((iova
- start
) & (SPRD_IOMMU_PAGE_SIZE
- 1));
381 spin_unlock_irqrestore(&dom
->pgtlock
, flags
);
386 static struct iommu_device
*sprd_iommu_probe_device(struct device
*dev
)
388 struct sprd_iommu_device
*sdev
= dev_iommu_priv_get(dev
);
393 static int sprd_iommu_of_xlate(struct device
*dev
,
394 const struct of_phandle_args
*args
)
396 struct platform_device
*pdev
;
398 if (!dev_iommu_priv_get(dev
)) {
399 pdev
= of_find_device_by_node(args
->np
);
400 dev_iommu_priv_set(dev
, platform_get_drvdata(pdev
));
401 platform_device_put(pdev
);
408 static const struct iommu_ops sprd_iommu_ops
= {
409 .domain_alloc_paging
= sprd_iommu_domain_alloc_paging
,
410 .probe_device
= sprd_iommu_probe_device
,
411 .device_group
= generic_single_device_group
,
412 .of_xlate
= sprd_iommu_of_xlate
,
413 .pgsize_bitmap
= SPRD_IOMMU_PAGE_SIZE
,
414 .owner
= THIS_MODULE
,
415 .default_domain_ops
= &(const struct iommu_domain_ops
) {
416 .attach_dev
= sprd_iommu_attach_device
,
417 .map_pages
= sprd_iommu_map
,
418 .unmap_pages
= sprd_iommu_unmap
,
419 .iotlb_sync_map
= sprd_iommu_sync_map
,
420 .iotlb_sync
= sprd_iommu_sync
,
421 .iova_to_phys
= sprd_iommu_iova_to_phys
,
422 .free
= sprd_iommu_domain_free
,
426 static const struct of_device_id sprd_iommu_of_match
[] = {
427 { .compatible
= "sprd,iommu-v1" },
430 MODULE_DEVICE_TABLE(of
, sprd_iommu_of_match
);
433 * Clock is not required, access to some of IOMMUs is controlled by gate
434 * clk, enabled clocks for that kind of IOMMUs before accessing.
435 * Return 0 for success or no clocks found.
437 static int sprd_iommu_clk_enable(struct sprd_iommu_device
*sdev
)
441 eb
= devm_clk_get_optional(sdev
->dev
, NULL
);
449 return clk_prepare_enable(eb
);
452 static void sprd_iommu_clk_disable(struct sprd_iommu_device
*sdev
)
455 clk_disable_unprepare(sdev
->eb
);
458 static int sprd_iommu_probe(struct platform_device
*pdev
)
460 struct sprd_iommu_device
*sdev
;
461 struct device
*dev
= &pdev
->dev
;
465 sdev
= devm_kzalloc(dev
, sizeof(*sdev
), GFP_KERNEL
);
469 base
= devm_platform_ioremap_resource(pdev
, 0);
471 dev_err(dev
, "Failed to get ioremap resource.\n");
472 return PTR_ERR(base
);
476 sdev
->prot_page_va
= dma_alloc_coherent(dev
, SPRD_IOMMU_PAGE_SIZE
,
477 &sdev
->prot_page_pa
, GFP_KERNEL
);
478 if (!sdev
->prot_page_va
)
481 platform_set_drvdata(pdev
, sdev
);
484 ret
= iommu_device_sysfs_add(&sdev
->iommu
, dev
, NULL
, dev_name(dev
));
488 ret
= iommu_device_register(&sdev
->iommu
, &sprd_iommu_ops
, dev
);
492 ret
= sprd_iommu_clk_enable(sdev
);
494 goto unregister_iommu
;
496 ret
= sprd_iommu_get_version(sdev
);
498 dev_err(dev
, "IOMMU version(%d) is invalid.\n", ret
);
506 sprd_iommu_clk_disable(sdev
);
508 iommu_device_unregister(&sdev
->iommu
);
510 iommu_device_sysfs_remove(&sdev
->iommu
);
512 dma_free_coherent(sdev
->dev
, SPRD_IOMMU_PAGE_SIZE
, sdev
->prot_page_va
, sdev
->prot_page_pa
);
516 static void sprd_iommu_remove(struct platform_device
*pdev
)
518 struct sprd_iommu_device
*sdev
= platform_get_drvdata(pdev
);
520 dma_free_coherent(sdev
->dev
, SPRD_IOMMU_PAGE_SIZE
, sdev
->prot_page_va
, sdev
->prot_page_pa
);
522 platform_set_drvdata(pdev
, NULL
);
523 iommu_device_sysfs_remove(&sdev
->iommu
);
524 iommu_device_unregister(&sdev
->iommu
);
527 static struct platform_driver sprd_iommu_driver
= {
529 .name
= "sprd-iommu",
530 .of_match_table
= sprd_iommu_of_match
,
531 .suppress_bind_attrs
= true,
533 .probe
= sprd_iommu_probe
,
534 .remove
= sprd_iommu_remove
,
536 module_platform_driver(sprd_iommu_driver
);
538 MODULE_DESCRIPTION("IOMMU driver for Unisoc SoCs");
539 MODULE_ALIAS("platform:sprd-iommu");
540 MODULE_LICENSE("GPL");