1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
7 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
11 #include <linux/clk.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
15 #include <linux/iommu.h>
16 #include <linux/interrupt.h>
17 #include <linux/kmemleak.h>
18 #include <linux/list.h>
20 #include <linux/of_platform.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/slab.h>
25 #include "iommu-pages.h"
27 typedef u32 sysmmu_iova_t
;
28 typedef u32 sysmmu_pte_t
;
29 static struct iommu_domain exynos_identity_domain
;
31 /* We do not consider super section mapping (16MB) */
33 #define LPAGE_ORDER 16
34 #define SPAGE_ORDER 12
36 #define SECT_SIZE (1 << SECT_ORDER)
37 #define LPAGE_SIZE (1 << LPAGE_ORDER)
38 #define SPAGE_SIZE (1 << SPAGE_ORDER)
40 #define SECT_MASK (~(SECT_SIZE - 1))
41 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
42 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
44 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
45 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
46 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
47 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
48 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
50 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
52 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
53 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
54 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
57 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
58 * v5.0 introduced support for 36bit physical address space by shifting
59 * all page entry values by 4 bits.
60 * All SYSMMU controllers in the system support the address spaces of the same
61 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
64 static short PG_ENT_SHIFT
= -1;
65 #define SYSMMU_PG_ENT_SHIFT 0
66 #define SYSMMU_V5_PG_ENT_SHIFT 4
68 static const sysmmu_pte_t
*LV1_PROT
;
69 static const sysmmu_pte_t SYSMMU_LV1_PROT
[] = {
70 ((0 << 15) | (0 << 10)), /* no access */
71 ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
72 ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
73 ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
75 static const sysmmu_pte_t SYSMMU_V5_LV1_PROT
[] = {
76 (0 << 4), /* no access */
77 (1 << 4), /* IOMMU_READ only */
78 (2 << 4), /* IOMMU_WRITE only */
79 (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
82 static const sysmmu_pte_t
*LV2_PROT
;
83 static const sysmmu_pte_t SYSMMU_LV2_PROT
[] = {
84 ((0 << 9) | (0 << 4)), /* no access */
85 ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
86 ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
87 ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
89 static const sysmmu_pte_t SYSMMU_V5_LV2_PROT
[] = {
90 (0 << 2), /* no access */
91 (1 << 2), /* IOMMU_READ only */
92 (2 << 2), /* IOMMU_WRITE only */
93 (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
96 #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
98 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
99 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
100 #define section_offs(iova) (iova & (SECT_SIZE - 1))
101 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
102 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
103 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
104 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
106 #define NUM_LV1ENTRIES 4096
107 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
109 static u32
lv1ent_offset(sysmmu_iova_t iova
)
111 return iova
>> SECT_ORDER
;
114 static u32
lv2ent_offset(sysmmu_iova_t iova
)
116 return (iova
>> SPAGE_ORDER
) & (NUM_LV2ENTRIES
- 1);
119 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
120 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
122 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
123 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
125 #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
126 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
127 #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
128 #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
130 #define CTRL_ENABLE 0x5
131 #define CTRL_BLOCK 0x7
132 #define CTRL_DISABLE 0x0
135 #define CFG_EAP (1 << 2)
136 #define CFG_QOS(n) ((n & 0xF) << 7)
137 #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
138 #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
139 #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
141 #define CTRL_VM_ENABLE BIT(0)
142 #define CTRL_VM_FAULT_MODE_STALL BIT(3)
143 #define CAPA0_CAPA1_EXIST BIT(11)
144 #define CAPA1_VCR_ENABLED BIT(14)
146 /* common registers */
147 #define REG_MMU_CTRL 0x000
148 #define REG_MMU_CFG 0x004
149 #define REG_MMU_STATUS 0x008
150 #define REG_MMU_VERSION 0x034
152 #define MMU_MAJ_VER(val) ((val) >> 7)
153 #define MMU_MIN_VER(val) ((val) & 0x7F)
154 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
156 #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
158 /* v1.x - v3.x registers */
159 #define REG_PAGE_FAULT_ADDR 0x024
160 #define REG_AW_FAULT_ADDR 0x028
161 #define REG_AR_FAULT_ADDR 0x02C
162 #define REG_DEFAULT_SLAVE_ADDR 0x030
165 #define REG_V5_FAULT_AR_VA 0x070
166 #define REG_V5_FAULT_AW_VA 0x080
169 #define REG_V7_CAPA0 0x870
170 #define REG_V7_CAPA1 0x874
171 #define REG_V7_CTRL_VM 0x8000
173 #define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL)
175 static struct device
*dma_dev
;
176 static struct kmem_cache
*lv2table_kmem_cache
;
177 static sysmmu_pte_t
*zero_lv2_table
;
178 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
180 static sysmmu_pte_t
*section_entry(sysmmu_pte_t
*pgtable
, sysmmu_iova_t iova
)
182 return pgtable
+ lv1ent_offset(iova
);
185 static sysmmu_pte_t
*page_entry(sysmmu_pte_t
*sent
, sysmmu_iova_t iova
)
187 return (sysmmu_pte_t
*)phys_to_virt(
188 lv2table_base(sent
)) + lv2ent_offset(iova
);
191 struct sysmmu_fault
{
192 sysmmu_iova_t addr
; /* IOVA address that caused fault */
193 const char *name
; /* human readable fault name */
194 unsigned int type
; /* fault type for report_iommu_fault() */
197 struct sysmmu_v1_fault_info
{
198 unsigned short addr_reg
; /* register to read IOVA fault address */
199 const char *name
; /* human readable fault name */
200 unsigned int type
; /* fault type for report_iommu_fault */
203 static const struct sysmmu_v1_fault_info sysmmu_v1_faults
[] = {
204 { REG_PAGE_FAULT_ADDR
, "PAGE", IOMMU_FAULT_READ
},
205 { REG_AR_FAULT_ADDR
, "MULTI-HIT", IOMMU_FAULT_READ
},
206 { REG_AW_FAULT_ADDR
, "MULTI-HIT", IOMMU_FAULT_WRITE
},
207 { REG_DEFAULT_SLAVE_ADDR
, "BUS ERROR", IOMMU_FAULT_READ
},
208 { REG_AR_FAULT_ADDR
, "SECURITY PROTECTION", IOMMU_FAULT_READ
},
209 { REG_AR_FAULT_ADDR
, "ACCESS PROTECTION", IOMMU_FAULT_READ
},
210 { REG_AW_FAULT_ADDR
, "SECURITY PROTECTION", IOMMU_FAULT_WRITE
},
211 { REG_AW_FAULT_ADDR
, "ACCESS PROTECTION", IOMMU_FAULT_WRITE
},
214 /* SysMMU v5 has the same faults for AR (0..4 bits) and AW (16..20 bits) */
215 static const char * const sysmmu_v5_fault_names
[] = {
220 "SECURITY PROTECTION"
223 static const char * const sysmmu_v7_fault_names
[] = {
231 * This structure is attached to dev->iommu->priv of the master device
232 * on device add, contains a list of SYSMMU controllers defined by device tree,
233 * which are bound to given master device. It is usually referenced by 'owner'
236 struct exynos_iommu_owner
{
237 struct list_head controllers
; /* list of sysmmu_drvdata.owner_node */
238 struct iommu_domain
*domain
; /* domain this device is attached */
239 struct mutex rpm_lock
; /* for runtime pm of all sysmmus */
243 * This structure exynos specific generalization of struct iommu_domain.
244 * It contains list of SYSMMU controllers from all master devices, which has
245 * been attached to this domain and page tables of IO address space defined by
246 * it. It is usually referenced by 'domain' pointer.
248 struct exynos_iommu_domain
{
249 struct list_head clients
; /* list of sysmmu_drvdata.domain_node */
250 sysmmu_pte_t
*pgtable
; /* lv1 page table, 16KB */
251 short *lv2entcnt
; /* free lv2 entry counter for each section */
252 spinlock_t lock
; /* lock for modyfying list of clients */
253 spinlock_t pgtablelock
; /* lock for modifying page table @ pgtable */
254 struct iommu_domain domain
; /* generic domain data structure */
257 struct sysmmu_drvdata
;
260 * SysMMU version specific data. Contains offsets for the registers which can
261 * be found in different SysMMU variants, but have different offset values.
262 * Also contains version specific callbacks to abstract the hardware.
264 struct sysmmu_variant
{
265 u32 pt_base
; /* page table base address (physical) */
266 u32 flush_all
; /* invalidate all TLB entries */
267 u32 flush_entry
; /* invalidate specific TLB entry */
268 u32 flush_range
; /* invalidate TLB entries in specified range */
269 u32 flush_start
; /* start address of range invalidation */
270 u32 flush_end
; /* end address of range invalidation */
271 u32 int_status
; /* interrupt status information */
272 u32 int_clear
; /* clear the interrupt */
273 u32 fault_va
; /* IOVA address that caused fault */
274 u32 fault_info
; /* fault transaction info */
276 int (*get_fault_info
)(struct sysmmu_drvdata
*data
, unsigned int itype
,
277 struct sysmmu_fault
*fault
);
281 * This structure hold all data of a single SYSMMU controller, this includes
282 * hw resources like registers and clocks, pointers and list nodes to connect
283 * it to all other structures, internal state and parameters read from device
284 * tree. It is usually referenced by 'data' pointer.
286 struct sysmmu_drvdata
{
287 struct device
*sysmmu
; /* SYSMMU controller device */
288 struct device
*master
; /* master device (owner) */
289 struct device_link
*link
; /* runtime PM link to master */
290 void __iomem
*sfrbase
; /* our registers */
291 struct clk
*clk
; /* SYSMMU's clock */
292 struct clk
*aclk
; /* SYSMMU's aclk clock */
293 struct clk
*pclk
; /* SYSMMU's pclk clock */
294 struct clk
*clk_master
; /* master's device clock */
295 spinlock_t lock
; /* lock for modyfying state */
296 bool active
; /* current status */
297 struct exynos_iommu_domain
*domain
; /* domain we belong to */
298 struct list_head domain_node
; /* node for domain clients list */
299 struct list_head owner_node
; /* node for owner controllers list */
300 phys_addr_t pgtable
; /* assigned page table structure */
301 unsigned int version
; /* our version */
303 struct iommu_device iommu
; /* IOMMU core handle */
304 const struct sysmmu_variant
*variant
; /* version specific data */
307 bool has_vcr
; /* virtual machine control register */
310 #define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg)
312 static int exynos_sysmmu_v1_get_fault_info(struct sysmmu_drvdata
*data
,
314 struct sysmmu_fault
*fault
)
316 const struct sysmmu_v1_fault_info
*finfo
;
318 if (itype
>= ARRAY_SIZE(sysmmu_v1_faults
))
321 finfo
= &sysmmu_v1_faults
[itype
];
322 fault
->addr
= readl(data
->sfrbase
+ finfo
->addr_reg
);
323 fault
->name
= finfo
->name
;
324 fault
->type
= finfo
->type
;
329 static int exynos_sysmmu_v5_get_fault_info(struct sysmmu_drvdata
*data
,
331 struct sysmmu_fault
*fault
)
333 unsigned int addr_reg
;
335 if (itype
< ARRAY_SIZE(sysmmu_v5_fault_names
)) {
336 fault
->type
= IOMMU_FAULT_READ
;
337 addr_reg
= REG_V5_FAULT_AR_VA
;
338 } else if (itype
>= 16 && itype
<= 20) {
339 fault
->type
= IOMMU_FAULT_WRITE
;
340 addr_reg
= REG_V5_FAULT_AW_VA
;
346 fault
->name
= sysmmu_v5_fault_names
[itype
];
347 fault
->addr
= readl(data
->sfrbase
+ addr_reg
);
352 static int exynos_sysmmu_v7_get_fault_info(struct sysmmu_drvdata
*data
,
354 struct sysmmu_fault
*fault
)
356 u32 info
= readl(SYSMMU_REG(data
, fault_info
));
358 fault
->addr
= readl(SYSMMU_REG(data
, fault_va
));
359 fault
->name
= sysmmu_v7_fault_names
[itype
% 4];
360 fault
->type
= (info
& BIT(20)) ? IOMMU_FAULT_WRITE
: IOMMU_FAULT_READ
;
366 static const struct sysmmu_variant sysmmu_v1_variant
= {
373 .get_fault_info
= exynos_sysmmu_v1_get_fault_info
,
377 static const struct sysmmu_variant sysmmu_v5_variant
= {
387 .get_fault_info
= exynos_sysmmu_v5_get_fault_info
,
390 /* SysMMU v7: non-VM capable register layout */
391 static const struct sysmmu_variant sysmmu_v7_variant
= {
403 .get_fault_info
= exynos_sysmmu_v7_get_fault_info
,
406 /* SysMMU v7: VM capable register layout */
407 static const struct sysmmu_variant sysmmu_v7_vm_variant
= {
410 .flush_entry
= 0x8014,
411 .flush_range
= 0x8018,
412 .flush_start
= 0x8020,
417 .fault_info
= 0x1004,
419 .get_fault_info
= exynos_sysmmu_v7_get_fault_info
,
422 static struct exynos_iommu_domain
*to_exynos_domain(struct iommu_domain
*dom
)
424 return container_of(dom
, struct exynos_iommu_domain
, domain
);
427 static void sysmmu_unblock(struct sysmmu_drvdata
*data
)
429 writel(CTRL_ENABLE
, data
->sfrbase
+ REG_MMU_CTRL
);
432 static bool sysmmu_block(struct sysmmu_drvdata
*data
)
436 writel(CTRL_BLOCK
, data
->sfrbase
+ REG_MMU_CTRL
);
437 while ((i
> 0) && !(readl(data
->sfrbase
+ REG_MMU_STATUS
) & 1))
440 if (!(readl(data
->sfrbase
+ REG_MMU_STATUS
) & 1)) {
441 sysmmu_unblock(data
);
448 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata
*data
)
450 writel(0x1, SYSMMU_REG(data
, flush_all
));
453 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata
*data
,
454 sysmmu_iova_t iova
, unsigned int num_inv
)
458 if (MMU_MAJ_VER(data
->version
) < 5 || num_inv
== 1) {
459 for (i
= 0; i
< num_inv
; i
++) {
460 writel((iova
& SPAGE_MASK
) | 1,
461 SYSMMU_REG(data
, flush_entry
));
465 writel(iova
& SPAGE_MASK
, SYSMMU_REG(data
, flush_start
));
466 writel((iova
& SPAGE_MASK
) + (num_inv
- 1) * SPAGE_SIZE
,
467 SYSMMU_REG(data
, flush_end
));
468 writel(0x1, SYSMMU_REG(data
, flush_range
));
472 static void __sysmmu_set_ptbase(struct sysmmu_drvdata
*data
, phys_addr_t pgd
)
476 if (MMU_MAJ_VER(data
->version
) < 5)
479 pt_base
= pgd
>> SPAGE_ORDER
;
481 writel(pt_base
, SYSMMU_REG(data
, pt_base
));
482 __sysmmu_tlb_invalidate(data
);
485 static void __sysmmu_enable_clocks(struct sysmmu_drvdata
*data
)
487 BUG_ON(clk_prepare_enable(data
->clk_master
));
488 BUG_ON(clk_prepare_enable(data
->clk
));
489 BUG_ON(clk_prepare_enable(data
->pclk
));
490 BUG_ON(clk_prepare_enable(data
->aclk
));
493 static void __sysmmu_disable_clocks(struct sysmmu_drvdata
*data
)
495 clk_disable_unprepare(data
->aclk
);
496 clk_disable_unprepare(data
->pclk
);
497 clk_disable_unprepare(data
->clk
);
498 clk_disable_unprepare(data
->clk_master
);
501 static bool __sysmmu_has_capa1(struct sysmmu_drvdata
*data
)
503 u32 capa0
= readl(data
->sfrbase
+ REG_V7_CAPA0
);
505 return capa0
& CAPA0_CAPA1_EXIST
;
508 static void __sysmmu_get_vcr(struct sysmmu_drvdata
*data
)
510 u32 capa1
= readl(data
->sfrbase
+ REG_V7_CAPA1
);
512 data
->has_vcr
= capa1
& CAPA1_VCR_ENABLED
;
515 static void __sysmmu_get_version(struct sysmmu_drvdata
*data
)
519 __sysmmu_enable_clocks(data
);
521 ver
= readl(data
->sfrbase
+ REG_MMU_VERSION
);
523 /* controllers on some SoCs don't report proper version */
524 if (ver
== 0x80000001u
)
525 data
->version
= MAKE_MMU_VER(1, 0);
527 data
->version
= MMU_RAW_VER(ver
);
529 dev_dbg(data
->sysmmu
, "hardware version: %d.%d\n",
530 MMU_MAJ_VER(data
->version
), MMU_MIN_VER(data
->version
));
532 if (MMU_MAJ_VER(data
->version
) < 5) {
533 data
->variant
= &sysmmu_v1_variant
;
534 } else if (MMU_MAJ_VER(data
->version
) < 7) {
535 data
->variant
= &sysmmu_v5_variant
;
537 if (__sysmmu_has_capa1(data
))
538 __sysmmu_get_vcr(data
);
540 data
->variant
= &sysmmu_v7_vm_variant
;
542 data
->variant
= &sysmmu_v7_variant
;
545 __sysmmu_disable_clocks(data
);
548 static void show_fault_information(struct sysmmu_drvdata
*data
,
549 const struct sysmmu_fault
*fault
)
553 dev_err(data
->sysmmu
, "%s: [%s] %s FAULT occurred at %#x\n",
554 dev_name(data
->master
),
555 fault
->type
== IOMMU_FAULT_READ
? "READ" : "WRITE",
556 fault
->name
, fault
->addr
);
557 dev_dbg(data
->sysmmu
, "Page table base: %pa\n", &data
->pgtable
);
558 ent
= section_entry(phys_to_virt(data
->pgtable
), fault
->addr
);
559 dev_dbg(data
->sysmmu
, "\tLv1 entry: %#x\n", *ent
);
560 if (lv1ent_page(ent
)) {
561 ent
= page_entry(ent
, fault
->addr
);
562 dev_dbg(data
->sysmmu
, "\t Lv2 entry: %#x\n", *ent
);
566 static irqreturn_t
exynos_sysmmu_irq(int irq
, void *dev_id
)
568 struct sysmmu_drvdata
*data
= dev_id
;
570 struct sysmmu_fault fault
;
573 WARN_ON(!data
->active
);
575 spin_lock(&data
->lock
);
576 clk_enable(data
->clk_master
);
578 itype
= __ffs(readl(SYSMMU_REG(data
, int_status
)));
579 ret
= data
->variant
->get_fault_info(data
, itype
, &fault
);
581 dev_err(data
->sysmmu
, "Unhandled interrupt bit %u\n", itype
);
584 show_fault_information(data
, &fault
);
587 ret
= report_iommu_fault(&data
->domain
->domain
, data
->master
,
588 fault
.addr
, fault
.type
);
591 panic("Unrecoverable System MMU Fault!");
594 writel(1 << itype
, SYSMMU_REG(data
, int_clear
));
596 /* SysMMU is in blocked state when interrupt occurred */
597 sysmmu_unblock(data
);
598 clk_disable(data
->clk_master
);
599 spin_unlock(&data
->lock
);
604 static void __sysmmu_disable(struct sysmmu_drvdata
*data
)
608 clk_enable(data
->clk_master
);
610 spin_lock_irqsave(&data
->lock
, flags
);
611 writel(CTRL_DISABLE
, data
->sfrbase
+ REG_MMU_CTRL
);
612 writel(0, data
->sfrbase
+ REG_MMU_CFG
);
613 data
->active
= false;
614 spin_unlock_irqrestore(&data
->lock
, flags
);
616 __sysmmu_disable_clocks(data
);
619 static void __sysmmu_init_config(struct sysmmu_drvdata
*data
)
623 if (data
->version
<= MAKE_MMU_VER(3, 1))
624 cfg
= CFG_LRU
| CFG_QOS(15);
625 else if (data
->version
<= MAKE_MMU_VER(3, 2))
626 cfg
= CFG_LRU
| CFG_QOS(15) | CFG_FLPDCACHE
| CFG_SYSSEL
;
628 cfg
= CFG_QOS(15) | CFG_FLPDCACHE
| CFG_ACGEN
;
630 cfg
|= CFG_EAP
; /* enable access protection bits check */
632 writel(cfg
, data
->sfrbase
+ REG_MMU_CFG
);
635 static void __sysmmu_enable_vid(struct sysmmu_drvdata
*data
)
639 if (MMU_MAJ_VER(data
->version
) < 7 || !data
->has_vcr
)
642 ctrl
= readl(data
->sfrbase
+ REG_V7_CTRL_VM
);
643 ctrl
|= CTRL_VM_ENABLE
| CTRL_VM_FAULT_MODE_STALL
;
644 writel(ctrl
, data
->sfrbase
+ REG_V7_CTRL_VM
);
647 static void __sysmmu_enable(struct sysmmu_drvdata
*data
)
651 __sysmmu_enable_clocks(data
);
653 spin_lock_irqsave(&data
->lock
, flags
);
654 writel(CTRL_BLOCK
, data
->sfrbase
+ REG_MMU_CTRL
);
655 __sysmmu_init_config(data
);
656 __sysmmu_set_ptbase(data
, data
->pgtable
);
657 __sysmmu_enable_vid(data
);
658 writel(CTRL_ENABLE
, data
->sfrbase
+ REG_MMU_CTRL
);
660 spin_unlock_irqrestore(&data
->lock
, flags
);
663 * SYSMMU driver keeps master's clock enabled only for the short
664 * time, while accessing the registers. For performing address
665 * translation during DMA transaction it relies on the client
666 * driver to enable it.
668 clk_disable(data
->clk_master
);
671 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata
*data
,
676 spin_lock_irqsave(&data
->lock
, flags
);
677 if (data
->active
&& data
->version
>= MAKE_MMU_VER(3, 3)) {
678 clk_enable(data
->clk_master
);
679 if (sysmmu_block(data
)) {
680 if (data
->version
>= MAKE_MMU_VER(5, 0))
681 __sysmmu_tlb_invalidate(data
);
683 __sysmmu_tlb_invalidate_entry(data
, iova
, 1);
684 sysmmu_unblock(data
);
686 clk_disable(data
->clk_master
);
688 spin_unlock_irqrestore(&data
->lock
, flags
);
691 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata
*data
,
692 sysmmu_iova_t iova
, size_t size
)
696 spin_lock_irqsave(&data
->lock
, flags
);
698 unsigned int num_inv
= 1;
700 clk_enable(data
->clk_master
);
703 * L2TLB invalidation required
704 * 4KB page: 1 invalidation
705 * 64KB page: 16 invalidations
706 * 1MB page: 64 invalidations
707 * because it is set-associative TLB
708 * with 8-way and 64 sets.
709 * 1MB page can be cached in one of all sets.
710 * 64KB page can be one of 16 consecutive sets.
712 if (MMU_MAJ_VER(data
->version
) == 2)
713 num_inv
= min_t(unsigned int, size
/ SPAGE_SIZE
, 64);
715 if (sysmmu_block(data
)) {
716 __sysmmu_tlb_invalidate_entry(data
, iova
, num_inv
);
717 sysmmu_unblock(data
);
719 clk_disable(data
->clk_master
);
721 spin_unlock_irqrestore(&data
->lock
, flags
);
724 static const struct iommu_ops exynos_iommu_ops
;
726 static int exynos_sysmmu_probe(struct platform_device
*pdev
)
729 struct device
*dev
= &pdev
->dev
;
730 struct sysmmu_drvdata
*data
;
731 struct resource
*res
;
733 data
= devm_kzalloc(dev
, sizeof(*data
), GFP_KERNEL
);
737 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
738 data
->sfrbase
= devm_ioremap_resource(dev
, res
);
739 if (IS_ERR(data
->sfrbase
))
740 return PTR_ERR(data
->sfrbase
);
742 irq
= platform_get_irq(pdev
, 0);
746 ret
= devm_request_irq(dev
, irq
, exynos_sysmmu_irq
, 0,
747 dev_name(dev
), data
);
749 dev_err(dev
, "Unabled to register handler of irq %d\n", irq
);
753 data
->clk
= devm_clk_get_optional(dev
, "sysmmu");
754 if (IS_ERR(data
->clk
))
755 return PTR_ERR(data
->clk
);
757 data
->aclk
= devm_clk_get_optional(dev
, "aclk");
758 if (IS_ERR(data
->aclk
))
759 return PTR_ERR(data
->aclk
);
761 data
->pclk
= devm_clk_get_optional(dev
, "pclk");
762 if (IS_ERR(data
->pclk
))
763 return PTR_ERR(data
->pclk
);
765 if (!data
->clk
&& (!data
->aclk
|| !data
->pclk
)) {
766 dev_err(dev
, "Failed to get device clock(s)!\n");
770 data
->clk_master
= devm_clk_get_optional(dev
, "master");
771 if (IS_ERR(data
->clk_master
))
772 return PTR_ERR(data
->clk_master
);
775 spin_lock_init(&data
->lock
);
777 __sysmmu_get_version(data
);
779 ret
= iommu_device_sysfs_add(&data
->iommu
, &pdev
->dev
, NULL
,
780 dev_name(data
->sysmmu
));
784 platform_set_drvdata(pdev
, data
);
786 if (PG_ENT_SHIFT
< 0) {
787 if (MMU_MAJ_VER(data
->version
) < 5) {
788 PG_ENT_SHIFT
= SYSMMU_PG_ENT_SHIFT
;
789 LV1_PROT
= SYSMMU_LV1_PROT
;
790 LV2_PROT
= SYSMMU_LV2_PROT
;
792 PG_ENT_SHIFT
= SYSMMU_V5_PG_ENT_SHIFT
;
793 LV1_PROT
= SYSMMU_V5_LV1_PROT
;
794 LV2_PROT
= SYSMMU_V5_LV2_PROT
;
798 if (MMU_MAJ_VER(data
->version
) >= 5) {
799 ret
= dma_set_mask(dev
, DMA_BIT_MASK(36));
801 dev_err(dev
, "Unable to set DMA mask: %d\n", ret
);
802 goto err_dma_set_mask
;
807 * use the first registered sysmmu device for performing
808 * dma mapping operations on iommu page tables (cpu cache flush)
811 dma_dev
= &pdev
->dev
;
813 pm_runtime_enable(dev
);
815 ret
= iommu_device_register(&data
->iommu
, &exynos_iommu_ops
, dev
);
817 goto err_dma_set_mask
;
822 iommu_device_sysfs_remove(&data
->iommu
);
826 static int __maybe_unused
exynos_sysmmu_suspend(struct device
*dev
)
828 struct sysmmu_drvdata
*data
= dev_get_drvdata(dev
);
829 struct device
*master
= data
->master
;
832 struct exynos_iommu_owner
*owner
= dev_iommu_priv_get(master
);
834 mutex_lock(&owner
->rpm_lock
);
835 if (&data
->domain
->domain
!= &exynos_identity_domain
) {
836 dev_dbg(data
->sysmmu
, "saving state\n");
837 __sysmmu_disable(data
);
839 mutex_unlock(&owner
->rpm_lock
);
844 static int __maybe_unused
exynos_sysmmu_resume(struct device
*dev
)
846 struct sysmmu_drvdata
*data
= dev_get_drvdata(dev
);
847 struct device
*master
= data
->master
;
850 struct exynos_iommu_owner
*owner
= dev_iommu_priv_get(master
);
852 mutex_lock(&owner
->rpm_lock
);
853 if (&data
->domain
->domain
!= &exynos_identity_domain
) {
854 dev_dbg(data
->sysmmu
, "restoring state\n");
855 __sysmmu_enable(data
);
857 mutex_unlock(&owner
->rpm_lock
);
862 static const struct dev_pm_ops sysmmu_pm_ops
= {
863 SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend
, exynos_sysmmu_resume
, NULL
)
864 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend
,
865 pm_runtime_force_resume
)
868 static const struct of_device_id sysmmu_of_match
[] = {
869 { .compatible
= "samsung,exynos-sysmmu", },
873 static struct platform_driver exynos_sysmmu_driver __refdata
= {
874 .probe
= exynos_sysmmu_probe
,
876 .name
= "exynos-sysmmu",
877 .of_match_table
= sysmmu_of_match
,
878 .pm
= &sysmmu_pm_ops
,
879 .suppress_bind_attrs
= true,
883 static inline void exynos_iommu_set_pte(sysmmu_pte_t
*ent
, sysmmu_pte_t val
)
885 dma_sync_single_for_cpu(dma_dev
, virt_to_phys(ent
), sizeof(*ent
),
887 *ent
= cpu_to_le32(val
);
888 dma_sync_single_for_device(dma_dev
, virt_to_phys(ent
), sizeof(*ent
),
892 static struct iommu_domain
*exynos_iommu_domain_alloc_paging(struct device
*dev
)
894 struct exynos_iommu_domain
*domain
;
898 /* Check if correct PTE offsets are initialized */
899 BUG_ON(PG_ENT_SHIFT
< 0 || !dma_dev
);
901 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
905 domain
->pgtable
= iommu_alloc_pages(GFP_KERNEL
, 2);
906 if (!domain
->pgtable
)
909 domain
->lv2entcnt
= iommu_alloc_pages(GFP_KERNEL
, 1);
910 if (!domain
->lv2entcnt
)
913 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
914 for (i
= 0; i
< NUM_LV1ENTRIES
; i
++)
915 domain
->pgtable
[i
] = ZERO_LV2LINK
;
917 handle
= dma_map_single(dma_dev
, domain
->pgtable
, LV1TABLE_SIZE
,
919 /* For mapping page table entries we rely on dma == phys */
920 BUG_ON(handle
!= virt_to_phys(domain
->pgtable
));
921 if (dma_mapping_error(dma_dev
, handle
))
924 spin_lock_init(&domain
->lock
);
925 spin_lock_init(&domain
->pgtablelock
);
926 INIT_LIST_HEAD(&domain
->clients
);
928 domain
->domain
.geometry
.aperture_start
= 0;
929 domain
->domain
.geometry
.aperture_end
= ~0UL;
930 domain
->domain
.geometry
.force_aperture
= true;
932 return &domain
->domain
;
935 iommu_free_pages(domain
->lv2entcnt
, 1);
937 iommu_free_pages(domain
->pgtable
, 2);
943 static void exynos_iommu_domain_free(struct iommu_domain
*iommu_domain
)
945 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
946 struct sysmmu_drvdata
*data
, *next
;
950 WARN_ON(!list_empty(&domain
->clients
));
952 spin_lock_irqsave(&domain
->lock
, flags
);
954 list_for_each_entry_safe(data
, next
, &domain
->clients
, domain_node
) {
955 spin_lock(&data
->lock
);
956 __sysmmu_disable(data
);
959 list_del_init(&data
->domain_node
);
960 spin_unlock(&data
->lock
);
963 spin_unlock_irqrestore(&domain
->lock
, flags
);
965 dma_unmap_single(dma_dev
, virt_to_phys(domain
->pgtable
), LV1TABLE_SIZE
,
968 for (i
= 0; i
< NUM_LV1ENTRIES
; i
++)
969 if (lv1ent_page(domain
->pgtable
+ i
)) {
970 phys_addr_t base
= lv2table_base(domain
->pgtable
+ i
);
972 dma_unmap_single(dma_dev
, base
, LV2TABLE_SIZE
,
974 kmem_cache_free(lv2table_kmem_cache
,
978 iommu_free_pages(domain
->pgtable
, 2);
979 iommu_free_pages(domain
->lv2entcnt
, 1);
983 static int exynos_iommu_identity_attach(struct iommu_domain
*identity_domain
,
986 struct exynos_iommu_owner
*owner
= dev_iommu_priv_get(dev
);
987 struct exynos_iommu_domain
*domain
;
988 phys_addr_t pagetable
;
989 struct sysmmu_drvdata
*data
, *next
;
992 if (owner
->domain
== identity_domain
)
995 domain
= to_exynos_domain(owner
->domain
);
996 pagetable
= virt_to_phys(domain
->pgtable
);
998 mutex_lock(&owner
->rpm_lock
);
1000 list_for_each_entry(data
, &owner
->controllers
, owner_node
) {
1001 pm_runtime_get_noresume(data
->sysmmu
);
1002 if (pm_runtime_active(data
->sysmmu
))
1003 __sysmmu_disable(data
);
1004 pm_runtime_put(data
->sysmmu
);
1007 spin_lock_irqsave(&domain
->lock
, flags
);
1008 list_for_each_entry_safe(data
, next
, &domain
->clients
, domain_node
) {
1009 spin_lock(&data
->lock
);
1011 data
->domain
= NULL
;
1012 list_del_init(&data
->domain_node
);
1013 spin_unlock(&data
->lock
);
1015 owner
->domain
= identity_domain
;
1016 spin_unlock_irqrestore(&domain
->lock
, flags
);
1018 mutex_unlock(&owner
->rpm_lock
);
1020 dev_dbg(dev
, "%s: Restored IOMMU to IDENTITY from pgtable %pa\n",
1021 __func__
, &pagetable
);
1025 static struct iommu_domain_ops exynos_identity_ops
= {
1026 .attach_dev
= exynos_iommu_identity_attach
,
1029 static struct iommu_domain exynos_identity_domain
= {
1030 .type
= IOMMU_DOMAIN_IDENTITY
,
1031 .ops
= &exynos_identity_ops
,
1034 static int exynos_iommu_attach_device(struct iommu_domain
*iommu_domain
,
1037 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
1038 struct exynos_iommu_owner
*owner
= dev_iommu_priv_get(dev
);
1039 struct sysmmu_drvdata
*data
;
1040 phys_addr_t pagetable
= virt_to_phys(domain
->pgtable
);
1041 unsigned long flags
;
1044 err
= exynos_iommu_identity_attach(&exynos_identity_domain
, dev
);
1048 mutex_lock(&owner
->rpm_lock
);
1050 spin_lock_irqsave(&domain
->lock
, flags
);
1051 list_for_each_entry(data
, &owner
->controllers
, owner_node
) {
1052 spin_lock(&data
->lock
);
1053 data
->pgtable
= pagetable
;
1054 data
->domain
= domain
;
1055 list_add_tail(&data
->domain_node
, &domain
->clients
);
1056 spin_unlock(&data
->lock
);
1058 owner
->domain
= iommu_domain
;
1059 spin_unlock_irqrestore(&domain
->lock
, flags
);
1061 list_for_each_entry(data
, &owner
->controllers
, owner_node
) {
1062 pm_runtime_get_noresume(data
->sysmmu
);
1063 if (pm_runtime_active(data
->sysmmu
))
1064 __sysmmu_enable(data
);
1065 pm_runtime_put(data
->sysmmu
);
1068 mutex_unlock(&owner
->rpm_lock
);
1070 dev_dbg(dev
, "%s: Attached IOMMU with pgtable %pa\n", __func__
,
1076 static sysmmu_pte_t
*alloc_lv2entry(struct exynos_iommu_domain
*domain
,
1077 sysmmu_pte_t
*sent
, sysmmu_iova_t iova
, short *pgcounter
)
1079 if (lv1ent_section(sent
)) {
1080 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova
);
1081 return ERR_PTR(-EADDRINUSE
);
1084 if (lv1ent_fault(sent
)) {
1087 bool need_flush_flpd_cache
= lv1ent_zero(sent
);
1089 pent
= kmem_cache_zalloc(lv2table_kmem_cache
, GFP_ATOMIC
);
1090 BUG_ON((uintptr_t)pent
& (LV2TABLE_SIZE
- 1));
1092 return ERR_PTR(-ENOMEM
);
1094 exynos_iommu_set_pte(sent
, mk_lv1ent_page(virt_to_phys(pent
)));
1095 kmemleak_ignore(pent
);
1096 *pgcounter
= NUM_LV2ENTRIES
;
1097 handle
= dma_map_single(dma_dev
, pent
, LV2TABLE_SIZE
,
1099 if (dma_mapping_error(dma_dev
, handle
)) {
1100 kmem_cache_free(lv2table_kmem_cache
, pent
);
1101 return ERR_PTR(-EADDRINUSE
);
1105 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
1106 * FLPD cache may cache the address of zero_l2_table. This
1107 * function replaces the zero_l2_table with new L2 page table
1108 * to write valid mappings.
1109 * Accessing the valid area may cause page fault since FLPD
1110 * cache may still cache zero_l2_table for the valid area
1111 * instead of new L2 page table that has the mapping
1112 * information of the valid area.
1113 * Thus any replacement of zero_l2_table with other valid L2
1114 * page table must involve FLPD cache invalidation for System
1116 * FLPD cache invalidation is performed with TLB invalidation
1117 * by VPN without blocking. It is safe to invalidate TLB without
1118 * blocking because the target address of TLB invalidation is
1119 * not currently mapped.
1121 if (need_flush_flpd_cache
) {
1122 struct sysmmu_drvdata
*data
;
1124 spin_lock(&domain
->lock
);
1125 list_for_each_entry(data
, &domain
->clients
, domain_node
)
1126 sysmmu_tlb_invalidate_flpdcache(data
, iova
);
1127 spin_unlock(&domain
->lock
);
1131 return page_entry(sent
, iova
);
1134 static int lv1set_section(struct exynos_iommu_domain
*domain
,
1135 sysmmu_pte_t
*sent
, sysmmu_iova_t iova
,
1136 phys_addr_t paddr
, int prot
, short *pgcnt
)
1138 if (lv1ent_section(sent
)) {
1139 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
1144 if (lv1ent_page(sent
)) {
1145 if (*pgcnt
!= NUM_LV2ENTRIES
) {
1146 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
1151 kmem_cache_free(lv2table_kmem_cache
, page_entry(sent
, 0));
1155 exynos_iommu_set_pte(sent
, mk_lv1ent_sect(paddr
, prot
));
1157 spin_lock(&domain
->lock
);
1158 if (lv1ent_page_zero(sent
)) {
1159 struct sysmmu_drvdata
*data
;
1161 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
1162 * entry by speculative prefetch of SLPD which has no mapping.
1164 list_for_each_entry(data
, &domain
->clients
, domain_node
)
1165 sysmmu_tlb_invalidate_flpdcache(data
, iova
);
1167 spin_unlock(&domain
->lock
);
1172 static int lv2set_page(sysmmu_pte_t
*pent
, phys_addr_t paddr
, size_t size
,
1173 int prot
, short *pgcnt
)
1175 if (size
== SPAGE_SIZE
) {
1176 if (WARN_ON(!lv2ent_fault(pent
)))
1179 exynos_iommu_set_pte(pent
, mk_lv2ent_spage(paddr
, prot
));
1181 } else { /* size == LPAGE_SIZE */
1183 dma_addr_t pent_base
= virt_to_phys(pent
);
1185 dma_sync_single_for_cpu(dma_dev
, pent_base
,
1186 sizeof(*pent
) * SPAGES_PER_LPAGE
,
1188 for (i
= 0; i
< SPAGES_PER_LPAGE
; i
++, pent
++) {
1189 if (WARN_ON(!lv2ent_fault(pent
))) {
1191 memset(pent
- i
, 0, sizeof(*pent
) * i
);
1195 *pent
= mk_lv2ent_lpage(paddr
, prot
);
1197 dma_sync_single_for_device(dma_dev
, pent_base
,
1198 sizeof(*pent
) * SPAGES_PER_LPAGE
,
1200 *pgcnt
-= SPAGES_PER_LPAGE
;
1207 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1209 * System MMU v3.x has advanced logic to improve address translation
1210 * performance with caching more page table entries by a page table walk.
1211 * However, the logic has a bug that while caching faulty page table entries,
1212 * System MMU reports page fault if the cached fault entry is hit even though
1213 * the fault entry is updated to a valid entry after the entry is cached.
1214 * To prevent caching faulty page table entries which may be updated to valid
1215 * entries later, the virtual memory manager should care about the workaround
1216 * for the problem. The following describes the workaround.
1218 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1219 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
1221 * Precisely, any start address of I/O virtual region must be aligned with
1222 * the following sizes for System MMU v3.1 and v3.2.
1223 * System MMU v3.1: 128KiB
1224 * System MMU v3.2: 256KiB
1226 * Because System MMU v3.3 caches page table entries more aggressively, it needs
1228 * - Any two consecutive I/O virtual regions must have a hole of size larger
1229 * than or equal to 128KiB.
1230 * - Start address of an I/O virtual region must be aligned by 128KiB.
1232 static int exynos_iommu_map(struct iommu_domain
*iommu_domain
,
1233 unsigned long l_iova
, phys_addr_t paddr
, size_t size
,
1234 size_t count
, int prot
, gfp_t gfp
, size_t *mapped
)
1236 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
1237 sysmmu_pte_t
*entry
;
1238 sysmmu_iova_t iova
= (sysmmu_iova_t
)l_iova
;
1239 unsigned long flags
;
1242 BUG_ON(domain
->pgtable
== NULL
);
1243 prot
&= SYSMMU_SUPPORTED_PROT_BITS
;
1245 spin_lock_irqsave(&domain
->pgtablelock
, flags
);
1247 entry
= section_entry(domain
->pgtable
, iova
);
1249 if (size
== SECT_SIZE
) {
1250 ret
= lv1set_section(domain
, entry
, iova
, paddr
, prot
,
1251 &domain
->lv2entcnt
[lv1ent_offset(iova
)]);
1255 pent
= alloc_lv2entry(domain
, entry
, iova
,
1256 &domain
->lv2entcnt
[lv1ent_offset(iova
)]);
1259 ret
= PTR_ERR(pent
);
1261 ret
= lv2set_page(pent
, paddr
, size
, prot
,
1262 &domain
->lv2entcnt
[lv1ent_offset(iova
)]);
1266 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1267 __func__
, ret
, size
, iova
);
1271 spin_unlock_irqrestore(&domain
->pgtablelock
, flags
);
1276 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain
*domain
,
1277 sysmmu_iova_t iova
, size_t size
)
1279 struct sysmmu_drvdata
*data
;
1280 unsigned long flags
;
1282 spin_lock_irqsave(&domain
->lock
, flags
);
1284 list_for_each_entry(data
, &domain
->clients
, domain_node
)
1285 sysmmu_tlb_invalidate_entry(data
, iova
, size
);
1287 spin_unlock_irqrestore(&domain
->lock
, flags
);
1290 static size_t exynos_iommu_unmap(struct iommu_domain
*iommu_domain
,
1291 unsigned long l_iova
, size_t size
, size_t count
,
1292 struct iommu_iotlb_gather
*gather
)
1294 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
1295 sysmmu_iova_t iova
= (sysmmu_iova_t
)l_iova
;
1298 unsigned long flags
;
1300 BUG_ON(domain
->pgtable
== NULL
);
1302 spin_lock_irqsave(&domain
->pgtablelock
, flags
);
1304 ent
= section_entry(domain
->pgtable
, iova
);
1306 if (lv1ent_section(ent
)) {
1307 if (WARN_ON(size
< SECT_SIZE
)) {
1308 err_pgsize
= SECT_SIZE
;
1312 /* workaround for h/w bug in System MMU v3.3 */
1313 exynos_iommu_set_pte(ent
, ZERO_LV2LINK
);
1318 if (unlikely(lv1ent_fault(ent
))) {
1319 if (size
> SECT_SIZE
)
1324 /* lv1ent_page(sent) == true here */
1326 ent
= page_entry(ent
, iova
);
1328 if (unlikely(lv2ent_fault(ent
))) {
1333 if (lv2ent_small(ent
)) {
1334 exynos_iommu_set_pte(ent
, 0);
1336 domain
->lv2entcnt
[lv1ent_offset(iova
)] += 1;
1340 /* lv1ent_large(ent) == true here */
1341 if (WARN_ON(size
< LPAGE_SIZE
)) {
1342 err_pgsize
= LPAGE_SIZE
;
1346 dma_sync_single_for_cpu(dma_dev
, virt_to_phys(ent
),
1347 sizeof(*ent
) * SPAGES_PER_LPAGE
,
1349 memset(ent
, 0, sizeof(*ent
) * SPAGES_PER_LPAGE
);
1350 dma_sync_single_for_device(dma_dev
, virt_to_phys(ent
),
1351 sizeof(*ent
) * SPAGES_PER_LPAGE
,
1354 domain
->lv2entcnt
[lv1ent_offset(iova
)] += SPAGES_PER_LPAGE
;
1356 spin_unlock_irqrestore(&domain
->pgtablelock
, flags
);
1358 exynos_iommu_tlb_invalidate_entry(domain
, iova
, size
);
1362 spin_unlock_irqrestore(&domain
->pgtablelock
, flags
);
1364 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1365 __func__
, size
, iova
, err_pgsize
);
1370 static phys_addr_t
exynos_iommu_iova_to_phys(struct iommu_domain
*iommu_domain
,
1373 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
1374 sysmmu_pte_t
*entry
;
1375 unsigned long flags
;
1376 phys_addr_t phys
= 0;
1378 spin_lock_irqsave(&domain
->pgtablelock
, flags
);
1380 entry
= section_entry(domain
->pgtable
, iova
);
1382 if (lv1ent_section(entry
)) {
1383 phys
= section_phys(entry
) + section_offs(iova
);
1384 } else if (lv1ent_page(entry
)) {
1385 entry
= page_entry(entry
, iova
);
1387 if (lv2ent_large(entry
))
1388 phys
= lpage_phys(entry
) + lpage_offs(iova
);
1389 else if (lv2ent_small(entry
))
1390 phys
= spage_phys(entry
) + spage_offs(iova
);
1393 spin_unlock_irqrestore(&domain
->pgtablelock
, flags
);
1398 static struct iommu_device
*exynos_iommu_probe_device(struct device
*dev
)
1400 struct exynos_iommu_owner
*owner
= dev_iommu_priv_get(dev
);
1401 struct sysmmu_drvdata
*data
;
1403 if (!has_sysmmu(dev
))
1404 return ERR_PTR(-ENODEV
);
1406 list_for_each_entry(data
, &owner
->controllers
, owner_node
) {
1408 * SYSMMU will be runtime activated via device link
1409 * (dependency) to its master device, so there are no
1410 * direct calls to pm_runtime_get/put in this driver.
1412 data
->link
= device_link_add(dev
, data
->sysmmu
,
1414 DL_FLAG_PM_RUNTIME
);
1417 /* There is always at least one entry, see exynos_iommu_of_xlate() */
1418 data
= list_first_entry(&owner
->controllers
,
1419 struct sysmmu_drvdata
, owner_node
);
1421 return &data
->iommu
;
1424 static void exynos_iommu_release_device(struct device
*dev
)
1426 struct exynos_iommu_owner
*owner
= dev_iommu_priv_get(dev
);
1427 struct sysmmu_drvdata
*data
;
1429 WARN_ON(exynos_iommu_identity_attach(&exynos_identity_domain
, dev
));
1431 list_for_each_entry(data
, &owner
->controllers
, owner_node
)
1432 device_link_del(data
->link
);
1435 static int exynos_iommu_of_xlate(struct device
*dev
,
1436 const struct of_phandle_args
*spec
)
1438 struct platform_device
*sysmmu
= of_find_device_by_node(spec
->np
);
1439 struct exynos_iommu_owner
*owner
= dev_iommu_priv_get(dev
);
1440 struct sysmmu_drvdata
*data
, *entry
;
1445 data
= platform_get_drvdata(sysmmu
);
1447 put_device(&sysmmu
->dev
);
1452 owner
= kzalloc(sizeof(*owner
), GFP_KERNEL
);
1454 put_device(&sysmmu
->dev
);
1458 INIT_LIST_HEAD(&owner
->controllers
);
1459 mutex_init(&owner
->rpm_lock
);
1460 owner
->domain
= &exynos_identity_domain
;
1461 dev_iommu_priv_set(dev
, owner
);
1464 list_for_each_entry(entry
, &owner
->controllers
, owner_node
)
1468 list_add_tail(&data
->owner_node
, &owner
->controllers
);
1474 static const struct iommu_ops exynos_iommu_ops
= {
1475 .identity_domain
= &exynos_identity_domain
,
1476 .domain_alloc_paging
= exynos_iommu_domain_alloc_paging
,
1477 .device_group
= generic_device_group
,
1478 .probe_device
= exynos_iommu_probe_device
,
1479 .release_device
= exynos_iommu_release_device
,
1480 .pgsize_bitmap
= SECT_SIZE
| LPAGE_SIZE
| SPAGE_SIZE
,
1481 .of_xlate
= exynos_iommu_of_xlate
,
1482 .default_domain_ops
= &(const struct iommu_domain_ops
) {
1483 .attach_dev
= exynos_iommu_attach_device
,
1484 .map_pages
= exynos_iommu_map
,
1485 .unmap_pages
= exynos_iommu_unmap
,
1486 .iova_to_phys
= exynos_iommu_iova_to_phys
,
1487 .free
= exynos_iommu_domain_free
,
1491 static int __init
exynos_iommu_init(void)
1493 struct device_node
*np
;
1496 np
= of_find_matching_node(NULL
, sysmmu_of_match
);
1502 lv2table_kmem_cache
= kmem_cache_create("exynos-iommu-lv2table",
1503 LV2TABLE_SIZE
, LV2TABLE_SIZE
, 0, NULL
);
1504 if (!lv2table_kmem_cache
) {
1505 pr_err("%s: Failed to create kmem cache\n", __func__
);
1509 zero_lv2_table
= kmem_cache_zalloc(lv2table_kmem_cache
, GFP_KERNEL
);
1510 if (zero_lv2_table
== NULL
) {
1511 pr_err("%s: Failed to allocate zero level2 page table\n",
1517 ret
= platform_driver_register(&exynos_sysmmu_driver
);
1519 pr_err("%s: Failed to register driver\n", __func__
);
1520 goto err_reg_driver
;
1525 kmem_cache_free(lv2table_kmem_cache
, zero_lv2_table
);
1527 kmem_cache_destroy(lv2table_kmem_cache
);
1530 core_initcall(exynos_iommu_init
);