1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
7 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
11 #include <linux/clk.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
15 #include <linux/iommu.h>
16 #include <linux/interrupt.h>
17 #include <linux/kmemleak.h>
18 #include <linux/list.h>
20 #include <linux/of_iommu.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/dma-iommu.h>
27 typedef u32 sysmmu_iova_t
;
28 typedef u32 sysmmu_pte_t
;
30 /* We do not consider super section mapping (16MB) */
32 #define LPAGE_ORDER 16
33 #define SPAGE_ORDER 12
35 #define SECT_SIZE (1 << SECT_ORDER)
36 #define LPAGE_SIZE (1 << LPAGE_ORDER)
37 #define SPAGE_SIZE (1 << SPAGE_ORDER)
39 #define SECT_MASK (~(SECT_SIZE - 1))
40 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
41 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
43 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
44 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
45 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
46 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
47 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
49 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
51 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
52 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
53 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
56 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
57 * v5.0 introduced support for 36bit physical address space by shifting
58 * all page entry values by 4 bits.
59 * All SYSMMU controllers in the system support the address spaces of the same
60 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
63 static short PG_ENT_SHIFT
= -1;
64 #define SYSMMU_PG_ENT_SHIFT 0
65 #define SYSMMU_V5_PG_ENT_SHIFT 4
67 static const sysmmu_pte_t
*LV1_PROT
;
68 static const sysmmu_pte_t SYSMMU_LV1_PROT
[] = {
69 ((0 << 15) | (0 << 10)), /* no access */
70 ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
71 ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
72 ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
74 static const sysmmu_pte_t SYSMMU_V5_LV1_PROT
[] = {
75 (0 << 4), /* no access */
76 (1 << 4), /* IOMMU_READ only */
77 (2 << 4), /* IOMMU_WRITE only */
78 (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
81 static const sysmmu_pte_t
*LV2_PROT
;
82 static const sysmmu_pte_t SYSMMU_LV2_PROT
[] = {
83 ((0 << 9) | (0 << 4)), /* no access */
84 ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
85 ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
86 ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
88 static const sysmmu_pte_t SYSMMU_V5_LV2_PROT
[] = {
89 (0 << 2), /* no access */
90 (1 << 2), /* IOMMU_READ only */
91 (2 << 2), /* IOMMU_WRITE only */
92 (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
95 #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
97 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
98 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
99 #define section_offs(iova) (iova & (SECT_SIZE - 1))
100 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
101 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
102 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
103 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
105 #define NUM_LV1ENTRIES 4096
106 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
108 static u32
lv1ent_offset(sysmmu_iova_t iova
)
110 return iova
>> SECT_ORDER
;
113 static u32
lv2ent_offset(sysmmu_iova_t iova
)
115 return (iova
>> SPAGE_ORDER
) & (NUM_LV2ENTRIES
- 1);
118 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
119 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
121 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
122 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
124 #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
125 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
126 #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
127 #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
129 #define CTRL_ENABLE 0x5
130 #define CTRL_BLOCK 0x7
131 #define CTRL_DISABLE 0x0
134 #define CFG_EAP (1 << 2)
135 #define CFG_QOS(n) ((n & 0xF) << 7)
136 #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
137 #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
138 #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
140 /* common registers */
141 #define REG_MMU_CTRL 0x000
142 #define REG_MMU_CFG 0x004
143 #define REG_MMU_STATUS 0x008
144 #define REG_MMU_VERSION 0x034
146 #define MMU_MAJ_VER(val) ((val) >> 7)
147 #define MMU_MIN_VER(val) ((val) & 0x7F)
148 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
150 #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
152 /* v1.x - v3.x registers */
153 #define REG_MMU_FLUSH 0x00C
154 #define REG_MMU_FLUSH_ENTRY 0x010
155 #define REG_PT_BASE_ADDR 0x014
156 #define REG_INT_STATUS 0x018
157 #define REG_INT_CLEAR 0x01C
159 #define REG_PAGE_FAULT_ADDR 0x024
160 #define REG_AW_FAULT_ADDR 0x028
161 #define REG_AR_FAULT_ADDR 0x02C
162 #define REG_DEFAULT_SLAVE_ADDR 0x030
165 #define REG_V5_PT_BASE_PFN 0x00C
166 #define REG_V5_MMU_FLUSH_ALL 0x010
167 #define REG_V5_MMU_FLUSH_ENTRY 0x014
168 #define REG_V5_MMU_FLUSH_RANGE 0x018
169 #define REG_V5_MMU_FLUSH_START 0x020
170 #define REG_V5_MMU_FLUSH_END 0x024
171 #define REG_V5_INT_STATUS 0x060
172 #define REG_V5_INT_CLEAR 0x064
173 #define REG_V5_FAULT_AR_VA 0x070
174 #define REG_V5_FAULT_AW_VA 0x080
176 #define has_sysmmu(dev) (dev->archdata.iommu != NULL)
178 static struct device
*dma_dev
;
179 static struct kmem_cache
*lv2table_kmem_cache
;
180 static sysmmu_pte_t
*zero_lv2_table
;
181 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
183 static sysmmu_pte_t
*section_entry(sysmmu_pte_t
*pgtable
, sysmmu_iova_t iova
)
185 return pgtable
+ lv1ent_offset(iova
);
188 static sysmmu_pte_t
*page_entry(sysmmu_pte_t
*sent
, sysmmu_iova_t iova
)
190 return (sysmmu_pte_t
*)phys_to_virt(
191 lv2table_base(sent
)) + lv2ent_offset(iova
);
195 * IOMMU fault information register
197 struct sysmmu_fault_info
{
198 unsigned int bit
; /* bit number in STATUS register */
199 unsigned short addr_reg
; /* register to read VA fault address */
200 const char *name
; /* human readable fault name */
201 unsigned int type
; /* fault type for report_iommu_fault */
204 static const struct sysmmu_fault_info sysmmu_faults
[] = {
205 { 0, REG_PAGE_FAULT_ADDR
, "PAGE", IOMMU_FAULT_READ
},
206 { 1, REG_AR_FAULT_ADDR
, "AR MULTI-HIT", IOMMU_FAULT_READ
},
207 { 2, REG_AW_FAULT_ADDR
, "AW MULTI-HIT", IOMMU_FAULT_WRITE
},
208 { 3, REG_DEFAULT_SLAVE_ADDR
, "BUS ERROR", IOMMU_FAULT_READ
},
209 { 4, REG_AR_FAULT_ADDR
, "AR SECURITY PROTECTION", IOMMU_FAULT_READ
},
210 { 5, REG_AR_FAULT_ADDR
, "AR ACCESS PROTECTION", IOMMU_FAULT_READ
},
211 { 6, REG_AW_FAULT_ADDR
, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE
},
212 { 7, REG_AW_FAULT_ADDR
, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE
},
215 static const struct sysmmu_fault_info sysmmu_v5_faults
[] = {
216 { 0, REG_V5_FAULT_AR_VA
, "AR PTW", IOMMU_FAULT_READ
},
217 { 1, REG_V5_FAULT_AR_VA
, "AR PAGE", IOMMU_FAULT_READ
},
218 { 2, REG_V5_FAULT_AR_VA
, "AR MULTI-HIT", IOMMU_FAULT_READ
},
219 { 3, REG_V5_FAULT_AR_VA
, "AR ACCESS PROTECTION", IOMMU_FAULT_READ
},
220 { 4, REG_V5_FAULT_AR_VA
, "AR SECURITY PROTECTION", IOMMU_FAULT_READ
},
221 { 16, REG_V5_FAULT_AW_VA
, "AW PTW", IOMMU_FAULT_WRITE
},
222 { 17, REG_V5_FAULT_AW_VA
, "AW PAGE", IOMMU_FAULT_WRITE
},
223 { 18, REG_V5_FAULT_AW_VA
, "AW MULTI-HIT", IOMMU_FAULT_WRITE
},
224 { 19, REG_V5_FAULT_AW_VA
, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE
},
225 { 20, REG_V5_FAULT_AW_VA
, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE
},
229 * This structure is attached to dev.archdata.iommu of the master device
230 * on device add, contains a list of SYSMMU controllers defined by device tree,
231 * which are bound to given master device. It is usually referenced by 'owner'
234 struct exynos_iommu_owner
{
235 struct list_head controllers
; /* list of sysmmu_drvdata.owner_node */
236 struct iommu_domain
*domain
; /* domain this device is attached */
237 struct mutex rpm_lock
; /* for runtime pm of all sysmmus */
241 * This structure exynos specific generalization of struct iommu_domain.
242 * It contains list of SYSMMU controllers from all master devices, which has
243 * been attached to this domain and page tables of IO address space defined by
244 * it. It is usually referenced by 'domain' pointer.
246 struct exynos_iommu_domain
{
247 struct list_head clients
; /* list of sysmmu_drvdata.domain_node */
248 sysmmu_pte_t
*pgtable
; /* lv1 page table, 16KB */
249 short *lv2entcnt
; /* free lv2 entry counter for each section */
250 spinlock_t lock
; /* lock for modyfying list of clients */
251 spinlock_t pgtablelock
; /* lock for modifying page table @ pgtable */
252 struct iommu_domain domain
; /* generic domain data structure */
256 * This structure hold all data of a single SYSMMU controller, this includes
257 * hw resources like registers and clocks, pointers and list nodes to connect
258 * it to all other structures, internal state and parameters read from device
259 * tree. It is usually referenced by 'data' pointer.
261 struct sysmmu_drvdata
{
262 struct device
*sysmmu
; /* SYSMMU controller device */
263 struct device
*master
; /* master device (owner) */
264 struct device_link
*link
; /* runtime PM link to master */
265 void __iomem
*sfrbase
; /* our registers */
266 struct clk
*clk
; /* SYSMMU's clock */
267 struct clk
*aclk
; /* SYSMMU's aclk clock */
268 struct clk
*pclk
; /* SYSMMU's pclk clock */
269 struct clk
*clk_master
; /* master's device clock */
270 spinlock_t lock
; /* lock for modyfying state */
271 bool active
; /* current status */
272 struct exynos_iommu_domain
*domain
; /* domain we belong to */
273 struct list_head domain_node
; /* node for domain clients list */
274 struct list_head owner_node
; /* node for owner controllers list */
275 phys_addr_t pgtable
; /* assigned page table structure */
276 unsigned int version
; /* our version */
278 struct iommu_device iommu
; /* IOMMU core handle */
281 static struct exynos_iommu_domain
*to_exynos_domain(struct iommu_domain
*dom
)
283 return container_of(dom
, struct exynos_iommu_domain
, domain
);
286 static void sysmmu_unblock(struct sysmmu_drvdata
*data
)
288 writel(CTRL_ENABLE
, data
->sfrbase
+ REG_MMU_CTRL
);
291 static bool sysmmu_block(struct sysmmu_drvdata
*data
)
295 writel(CTRL_BLOCK
, data
->sfrbase
+ REG_MMU_CTRL
);
296 while ((i
> 0) && !(readl(data
->sfrbase
+ REG_MMU_STATUS
) & 1))
299 if (!(readl(data
->sfrbase
+ REG_MMU_STATUS
) & 1)) {
300 sysmmu_unblock(data
);
307 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata
*data
)
309 if (MMU_MAJ_VER(data
->version
) < 5)
310 writel(0x1, data
->sfrbase
+ REG_MMU_FLUSH
);
312 writel(0x1, data
->sfrbase
+ REG_V5_MMU_FLUSH_ALL
);
315 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata
*data
,
316 sysmmu_iova_t iova
, unsigned int num_inv
)
320 if (MMU_MAJ_VER(data
->version
) < 5) {
321 for (i
= 0; i
< num_inv
; i
++) {
322 writel((iova
& SPAGE_MASK
) | 1,
323 data
->sfrbase
+ REG_MMU_FLUSH_ENTRY
);
328 writel((iova
& SPAGE_MASK
) | 1,
329 data
->sfrbase
+ REG_V5_MMU_FLUSH_ENTRY
);
331 writel((iova
& SPAGE_MASK
),
332 data
->sfrbase
+ REG_V5_MMU_FLUSH_START
);
333 writel((iova
& SPAGE_MASK
) + (num_inv
- 1) * SPAGE_SIZE
,
334 data
->sfrbase
+ REG_V5_MMU_FLUSH_END
);
335 writel(1, data
->sfrbase
+ REG_V5_MMU_FLUSH_RANGE
);
340 static void __sysmmu_set_ptbase(struct sysmmu_drvdata
*data
, phys_addr_t pgd
)
342 if (MMU_MAJ_VER(data
->version
) < 5)
343 writel(pgd
, data
->sfrbase
+ REG_PT_BASE_ADDR
);
345 writel(pgd
>> PAGE_SHIFT
,
346 data
->sfrbase
+ REG_V5_PT_BASE_PFN
);
348 __sysmmu_tlb_invalidate(data
);
351 static void __sysmmu_enable_clocks(struct sysmmu_drvdata
*data
)
353 BUG_ON(clk_prepare_enable(data
->clk_master
));
354 BUG_ON(clk_prepare_enable(data
->clk
));
355 BUG_ON(clk_prepare_enable(data
->pclk
));
356 BUG_ON(clk_prepare_enable(data
->aclk
));
359 static void __sysmmu_disable_clocks(struct sysmmu_drvdata
*data
)
361 clk_disable_unprepare(data
->aclk
);
362 clk_disable_unprepare(data
->pclk
);
363 clk_disable_unprepare(data
->clk
);
364 clk_disable_unprepare(data
->clk_master
);
367 static void __sysmmu_get_version(struct sysmmu_drvdata
*data
)
371 __sysmmu_enable_clocks(data
);
373 ver
= readl(data
->sfrbase
+ REG_MMU_VERSION
);
375 /* controllers on some SoCs don't report proper version */
376 if (ver
== 0x80000001u
)
377 data
->version
= MAKE_MMU_VER(1, 0);
379 data
->version
= MMU_RAW_VER(ver
);
381 dev_dbg(data
->sysmmu
, "hardware version: %d.%d\n",
382 MMU_MAJ_VER(data
->version
), MMU_MIN_VER(data
->version
));
384 __sysmmu_disable_clocks(data
);
387 static void show_fault_information(struct sysmmu_drvdata
*data
,
388 const struct sysmmu_fault_info
*finfo
,
389 sysmmu_iova_t fault_addr
)
393 dev_err(data
->sysmmu
, "%s: %s FAULT occurred at %#x\n",
394 dev_name(data
->master
), finfo
->name
, fault_addr
);
395 dev_dbg(data
->sysmmu
, "Page table base: %pa\n", &data
->pgtable
);
396 ent
= section_entry(phys_to_virt(data
->pgtable
), fault_addr
);
397 dev_dbg(data
->sysmmu
, "\tLv1 entry: %#x\n", *ent
);
398 if (lv1ent_page(ent
)) {
399 ent
= page_entry(ent
, fault_addr
);
400 dev_dbg(data
->sysmmu
, "\t Lv2 entry: %#x\n", *ent
);
404 static irqreturn_t
exynos_sysmmu_irq(int irq
, void *dev_id
)
406 /* SYSMMU is in blocked state when interrupt occurred. */
407 struct sysmmu_drvdata
*data
= dev_id
;
408 const struct sysmmu_fault_info
*finfo
;
409 unsigned int i
, n
, itype
;
410 sysmmu_iova_t fault_addr
= -1;
411 unsigned short reg_status
, reg_clear
;
414 WARN_ON(!data
->active
);
416 if (MMU_MAJ_VER(data
->version
) < 5) {
417 reg_status
= REG_INT_STATUS
;
418 reg_clear
= REG_INT_CLEAR
;
419 finfo
= sysmmu_faults
;
420 n
= ARRAY_SIZE(sysmmu_faults
);
422 reg_status
= REG_V5_INT_STATUS
;
423 reg_clear
= REG_V5_INT_CLEAR
;
424 finfo
= sysmmu_v5_faults
;
425 n
= ARRAY_SIZE(sysmmu_v5_faults
);
428 spin_lock(&data
->lock
);
430 clk_enable(data
->clk_master
);
432 itype
= __ffs(readl(data
->sfrbase
+ reg_status
));
433 for (i
= 0; i
< n
; i
++, finfo
++)
434 if (finfo
->bit
== itype
)
436 /* unknown/unsupported fault */
439 /* print debug message */
440 fault_addr
= readl(data
->sfrbase
+ finfo
->addr_reg
);
441 show_fault_information(data
, finfo
, fault_addr
);
444 ret
= report_iommu_fault(&data
->domain
->domain
,
445 data
->master
, fault_addr
, finfo
->type
);
446 /* fault is not recovered by fault handler */
449 writel(1 << itype
, data
->sfrbase
+ reg_clear
);
451 sysmmu_unblock(data
);
453 clk_disable(data
->clk_master
);
455 spin_unlock(&data
->lock
);
460 static void __sysmmu_disable(struct sysmmu_drvdata
*data
)
464 clk_enable(data
->clk_master
);
466 spin_lock_irqsave(&data
->lock
, flags
);
467 writel(CTRL_DISABLE
, data
->sfrbase
+ REG_MMU_CTRL
);
468 writel(0, data
->sfrbase
+ REG_MMU_CFG
);
469 data
->active
= false;
470 spin_unlock_irqrestore(&data
->lock
, flags
);
472 __sysmmu_disable_clocks(data
);
475 static void __sysmmu_init_config(struct sysmmu_drvdata
*data
)
479 if (data
->version
<= MAKE_MMU_VER(3, 1))
480 cfg
= CFG_LRU
| CFG_QOS(15);
481 else if (data
->version
<= MAKE_MMU_VER(3, 2))
482 cfg
= CFG_LRU
| CFG_QOS(15) | CFG_FLPDCACHE
| CFG_SYSSEL
;
484 cfg
= CFG_QOS(15) | CFG_FLPDCACHE
| CFG_ACGEN
;
486 cfg
|= CFG_EAP
; /* enable access protection bits check */
488 writel(cfg
, data
->sfrbase
+ REG_MMU_CFG
);
491 static void __sysmmu_enable(struct sysmmu_drvdata
*data
)
495 __sysmmu_enable_clocks(data
);
497 spin_lock_irqsave(&data
->lock
, flags
);
498 writel(CTRL_BLOCK
, data
->sfrbase
+ REG_MMU_CTRL
);
499 __sysmmu_init_config(data
);
500 __sysmmu_set_ptbase(data
, data
->pgtable
);
501 writel(CTRL_ENABLE
, data
->sfrbase
+ REG_MMU_CTRL
);
503 spin_unlock_irqrestore(&data
->lock
, flags
);
506 * SYSMMU driver keeps master's clock enabled only for the short
507 * time, while accessing the registers. For performing address
508 * translation during DMA transaction it relies on the client
509 * driver to enable it.
511 clk_disable(data
->clk_master
);
514 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata
*data
,
519 spin_lock_irqsave(&data
->lock
, flags
);
520 if (data
->active
&& data
->version
>= MAKE_MMU_VER(3, 3)) {
521 clk_enable(data
->clk_master
);
522 if (sysmmu_block(data
)) {
523 if (data
->version
>= MAKE_MMU_VER(5, 0))
524 __sysmmu_tlb_invalidate(data
);
526 __sysmmu_tlb_invalidate_entry(data
, iova
, 1);
527 sysmmu_unblock(data
);
529 clk_disable(data
->clk_master
);
531 spin_unlock_irqrestore(&data
->lock
, flags
);
534 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata
*data
,
535 sysmmu_iova_t iova
, size_t size
)
539 spin_lock_irqsave(&data
->lock
, flags
);
541 unsigned int num_inv
= 1;
543 clk_enable(data
->clk_master
);
546 * L2TLB invalidation required
547 * 4KB page: 1 invalidation
548 * 64KB page: 16 invalidations
549 * 1MB page: 64 invalidations
550 * because it is set-associative TLB
551 * with 8-way and 64 sets.
552 * 1MB page can be cached in one of all sets.
553 * 64KB page can be one of 16 consecutive sets.
555 if (MMU_MAJ_VER(data
->version
) == 2)
556 num_inv
= min_t(unsigned int, size
/ PAGE_SIZE
, 64);
558 if (sysmmu_block(data
)) {
559 __sysmmu_tlb_invalidate_entry(data
, iova
, num_inv
);
560 sysmmu_unblock(data
);
562 clk_disable(data
->clk_master
);
564 spin_unlock_irqrestore(&data
->lock
, flags
);
567 static const struct iommu_ops exynos_iommu_ops
;
569 static int __init
exynos_sysmmu_probe(struct platform_device
*pdev
)
572 struct device
*dev
= &pdev
->dev
;
573 struct sysmmu_drvdata
*data
;
574 struct resource
*res
;
576 data
= devm_kzalloc(dev
, sizeof(*data
), GFP_KERNEL
);
580 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
581 data
->sfrbase
= devm_ioremap_resource(dev
, res
);
582 if (IS_ERR(data
->sfrbase
))
583 return PTR_ERR(data
->sfrbase
);
585 irq
= platform_get_irq(pdev
, 0);
587 dev_err(dev
, "Unable to find IRQ resource\n");
591 ret
= devm_request_irq(dev
, irq
, exynos_sysmmu_irq
, 0,
592 dev_name(dev
), data
);
594 dev_err(dev
, "Unabled to register handler of irq %d\n", irq
);
598 data
->clk
= devm_clk_get(dev
, "sysmmu");
599 if (PTR_ERR(data
->clk
) == -ENOENT
)
601 else if (IS_ERR(data
->clk
))
602 return PTR_ERR(data
->clk
);
604 data
->aclk
= devm_clk_get(dev
, "aclk");
605 if (PTR_ERR(data
->aclk
) == -ENOENT
)
607 else if (IS_ERR(data
->aclk
))
608 return PTR_ERR(data
->aclk
);
610 data
->pclk
= devm_clk_get(dev
, "pclk");
611 if (PTR_ERR(data
->pclk
) == -ENOENT
)
613 else if (IS_ERR(data
->pclk
))
614 return PTR_ERR(data
->pclk
);
616 if (!data
->clk
&& (!data
->aclk
|| !data
->pclk
)) {
617 dev_err(dev
, "Failed to get device clock(s)!\n");
621 data
->clk_master
= devm_clk_get(dev
, "master");
622 if (PTR_ERR(data
->clk_master
) == -ENOENT
)
623 data
->clk_master
= NULL
;
624 else if (IS_ERR(data
->clk_master
))
625 return PTR_ERR(data
->clk_master
);
628 spin_lock_init(&data
->lock
);
630 ret
= iommu_device_sysfs_add(&data
->iommu
, &pdev
->dev
, NULL
,
631 dev_name(data
->sysmmu
));
635 iommu_device_set_ops(&data
->iommu
, &exynos_iommu_ops
);
636 iommu_device_set_fwnode(&data
->iommu
, &dev
->of_node
->fwnode
);
638 ret
= iommu_device_register(&data
->iommu
);
642 platform_set_drvdata(pdev
, data
);
644 __sysmmu_get_version(data
);
645 if (PG_ENT_SHIFT
< 0) {
646 if (MMU_MAJ_VER(data
->version
) < 5) {
647 PG_ENT_SHIFT
= SYSMMU_PG_ENT_SHIFT
;
648 LV1_PROT
= SYSMMU_LV1_PROT
;
649 LV2_PROT
= SYSMMU_LV2_PROT
;
651 PG_ENT_SHIFT
= SYSMMU_V5_PG_ENT_SHIFT
;
652 LV1_PROT
= SYSMMU_V5_LV1_PROT
;
653 LV2_PROT
= SYSMMU_V5_LV2_PROT
;
658 * use the first registered sysmmu device for performing
659 * dma mapping operations on iommu page tables (cpu cache flush)
662 dma_dev
= &pdev
->dev
;
664 pm_runtime_enable(dev
);
669 static int __maybe_unused
exynos_sysmmu_suspend(struct device
*dev
)
671 struct sysmmu_drvdata
*data
= dev_get_drvdata(dev
);
672 struct device
*master
= data
->master
;
675 struct exynos_iommu_owner
*owner
= master
->archdata
.iommu
;
677 mutex_lock(&owner
->rpm_lock
);
679 dev_dbg(data
->sysmmu
, "saving state\n");
680 __sysmmu_disable(data
);
682 mutex_unlock(&owner
->rpm_lock
);
687 static int __maybe_unused
exynos_sysmmu_resume(struct device
*dev
)
689 struct sysmmu_drvdata
*data
= dev_get_drvdata(dev
);
690 struct device
*master
= data
->master
;
693 struct exynos_iommu_owner
*owner
= master
->archdata
.iommu
;
695 mutex_lock(&owner
->rpm_lock
);
697 dev_dbg(data
->sysmmu
, "restoring state\n");
698 __sysmmu_enable(data
);
700 mutex_unlock(&owner
->rpm_lock
);
705 static const struct dev_pm_ops sysmmu_pm_ops
= {
706 SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend
, exynos_sysmmu_resume
, NULL
)
707 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend
,
708 pm_runtime_force_resume
)
711 static const struct of_device_id sysmmu_of_match
[] = {
712 { .compatible
= "samsung,exynos-sysmmu", },
716 static struct platform_driver exynos_sysmmu_driver __refdata
= {
717 .probe
= exynos_sysmmu_probe
,
719 .name
= "exynos-sysmmu",
720 .of_match_table
= sysmmu_of_match
,
721 .pm
= &sysmmu_pm_ops
,
722 .suppress_bind_attrs
= true,
726 static inline void update_pte(sysmmu_pte_t
*ent
, sysmmu_pte_t val
)
728 dma_sync_single_for_cpu(dma_dev
, virt_to_phys(ent
), sizeof(*ent
),
730 *ent
= cpu_to_le32(val
);
731 dma_sync_single_for_device(dma_dev
, virt_to_phys(ent
), sizeof(*ent
),
735 static struct iommu_domain
*exynos_iommu_domain_alloc(unsigned type
)
737 struct exynos_iommu_domain
*domain
;
741 /* Check if correct PTE offsets are initialized */
742 BUG_ON(PG_ENT_SHIFT
< 0 || !dma_dev
);
744 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
748 if (type
== IOMMU_DOMAIN_DMA
) {
749 if (iommu_get_dma_cookie(&domain
->domain
) != 0)
751 } else if (type
!= IOMMU_DOMAIN_UNMANAGED
) {
755 domain
->pgtable
= (sysmmu_pte_t
*)__get_free_pages(GFP_KERNEL
, 2);
756 if (!domain
->pgtable
)
759 domain
->lv2entcnt
= (short *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, 1);
760 if (!domain
->lv2entcnt
)
763 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
764 for (i
= 0; i
< NUM_LV1ENTRIES
; i
++)
765 domain
->pgtable
[i
] = ZERO_LV2LINK
;
767 handle
= dma_map_single(dma_dev
, domain
->pgtable
, LV1TABLE_SIZE
,
769 /* For mapping page table entries we rely on dma == phys */
770 BUG_ON(handle
!= virt_to_phys(domain
->pgtable
));
771 if (dma_mapping_error(dma_dev
, handle
))
774 spin_lock_init(&domain
->lock
);
775 spin_lock_init(&domain
->pgtablelock
);
776 INIT_LIST_HEAD(&domain
->clients
);
778 domain
->domain
.geometry
.aperture_start
= 0;
779 domain
->domain
.geometry
.aperture_end
= ~0UL;
780 domain
->domain
.geometry
.force_aperture
= true;
782 return &domain
->domain
;
785 free_pages((unsigned long)domain
->lv2entcnt
, 1);
787 free_pages((unsigned long)domain
->pgtable
, 2);
789 if (type
== IOMMU_DOMAIN_DMA
)
790 iommu_put_dma_cookie(&domain
->domain
);
796 static void exynos_iommu_domain_free(struct iommu_domain
*iommu_domain
)
798 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
799 struct sysmmu_drvdata
*data
, *next
;
803 WARN_ON(!list_empty(&domain
->clients
));
805 spin_lock_irqsave(&domain
->lock
, flags
);
807 list_for_each_entry_safe(data
, next
, &domain
->clients
, domain_node
) {
808 spin_lock(&data
->lock
);
809 __sysmmu_disable(data
);
812 list_del_init(&data
->domain_node
);
813 spin_unlock(&data
->lock
);
816 spin_unlock_irqrestore(&domain
->lock
, flags
);
818 if (iommu_domain
->type
== IOMMU_DOMAIN_DMA
)
819 iommu_put_dma_cookie(iommu_domain
);
821 dma_unmap_single(dma_dev
, virt_to_phys(domain
->pgtable
), LV1TABLE_SIZE
,
824 for (i
= 0; i
< NUM_LV1ENTRIES
; i
++)
825 if (lv1ent_page(domain
->pgtable
+ i
)) {
826 phys_addr_t base
= lv2table_base(domain
->pgtable
+ i
);
828 dma_unmap_single(dma_dev
, base
, LV2TABLE_SIZE
,
830 kmem_cache_free(lv2table_kmem_cache
,
834 free_pages((unsigned long)domain
->pgtable
, 2);
835 free_pages((unsigned long)domain
->lv2entcnt
, 1);
839 static void exynos_iommu_detach_device(struct iommu_domain
*iommu_domain
,
842 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
843 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
844 phys_addr_t pagetable
= virt_to_phys(domain
->pgtable
);
845 struct sysmmu_drvdata
*data
, *next
;
848 if (!has_sysmmu(dev
) || owner
->domain
!= iommu_domain
)
851 mutex_lock(&owner
->rpm_lock
);
853 list_for_each_entry(data
, &owner
->controllers
, owner_node
) {
854 pm_runtime_get_noresume(data
->sysmmu
);
855 if (pm_runtime_active(data
->sysmmu
))
856 __sysmmu_disable(data
);
857 pm_runtime_put(data
->sysmmu
);
860 spin_lock_irqsave(&domain
->lock
, flags
);
861 list_for_each_entry_safe(data
, next
, &domain
->clients
, domain_node
) {
862 spin_lock(&data
->lock
);
865 list_del_init(&data
->domain_node
);
866 spin_unlock(&data
->lock
);
868 owner
->domain
= NULL
;
869 spin_unlock_irqrestore(&domain
->lock
, flags
);
871 mutex_unlock(&owner
->rpm_lock
);
873 dev_dbg(dev
, "%s: Detached IOMMU with pgtable %pa\n", __func__
,
877 static int exynos_iommu_attach_device(struct iommu_domain
*iommu_domain
,
880 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
881 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
882 struct sysmmu_drvdata
*data
;
883 phys_addr_t pagetable
= virt_to_phys(domain
->pgtable
);
886 if (!has_sysmmu(dev
))
890 exynos_iommu_detach_device(owner
->domain
, dev
);
892 mutex_lock(&owner
->rpm_lock
);
894 spin_lock_irqsave(&domain
->lock
, flags
);
895 list_for_each_entry(data
, &owner
->controllers
, owner_node
) {
896 spin_lock(&data
->lock
);
897 data
->pgtable
= pagetable
;
898 data
->domain
= domain
;
899 list_add_tail(&data
->domain_node
, &domain
->clients
);
900 spin_unlock(&data
->lock
);
902 owner
->domain
= iommu_domain
;
903 spin_unlock_irqrestore(&domain
->lock
, flags
);
905 list_for_each_entry(data
, &owner
->controllers
, owner_node
) {
906 pm_runtime_get_noresume(data
->sysmmu
);
907 if (pm_runtime_active(data
->sysmmu
))
908 __sysmmu_enable(data
);
909 pm_runtime_put(data
->sysmmu
);
912 mutex_unlock(&owner
->rpm_lock
);
914 dev_dbg(dev
, "%s: Attached IOMMU with pgtable %pa\n", __func__
,
920 static sysmmu_pte_t
*alloc_lv2entry(struct exynos_iommu_domain
*domain
,
921 sysmmu_pte_t
*sent
, sysmmu_iova_t iova
, short *pgcounter
)
923 if (lv1ent_section(sent
)) {
924 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova
);
925 return ERR_PTR(-EADDRINUSE
);
928 if (lv1ent_fault(sent
)) {
931 bool need_flush_flpd_cache
= lv1ent_zero(sent
);
933 pent
= kmem_cache_zalloc(lv2table_kmem_cache
, GFP_ATOMIC
);
934 BUG_ON((uintptr_t)pent
& (LV2TABLE_SIZE
- 1));
936 return ERR_PTR(-ENOMEM
);
938 update_pte(sent
, mk_lv1ent_page(virt_to_phys(pent
)));
939 kmemleak_ignore(pent
);
940 *pgcounter
= NUM_LV2ENTRIES
;
941 handle
= dma_map_single(dma_dev
, pent
, LV2TABLE_SIZE
,
943 if (dma_mapping_error(dma_dev
, handle
)) {
944 kmem_cache_free(lv2table_kmem_cache
, pent
);
945 return ERR_PTR(-EADDRINUSE
);
949 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
950 * FLPD cache may cache the address of zero_l2_table. This
951 * function replaces the zero_l2_table with new L2 page table
952 * to write valid mappings.
953 * Accessing the valid area may cause page fault since FLPD
954 * cache may still cache zero_l2_table for the valid area
955 * instead of new L2 page table that has the mapping
956 * information of the valid area.
957 * Thus any replacement of zero_l2_table with other valid L2
958 * page table must involve FLPD cache invalidation for System
960 * FLPD cache invalidation is performed with TLB invalidation
961 * by VPN without blocking. It is safe to invalidate TLB without
962 * blocking because the target address of TLB invalidation is
963 * not currently mapped.
965 if (need_flush_flpd_cache
) {
966 struct sysmmu_drvdata
*data
;
968 spin_lock(&domain
->lock
);
969 list_for_each_entry(data
, &domain
->clients
, domain_node
)
970 sysmmu_tlb_invalidate_flpdcache(data
, iova
);
971 spin_unlock(&domain
->lock
);
975 return page_entry(sent
, iova
);
978 static int lv1set_section(struct exynos_iommu_domain
*domain
,
979 sysmmu_pte_t
*sent
, sysmmu_iova_t iova
,
980 phys_addr_t paddr
, int prot
, short *pgcnt
)
982 if (lv1ent_section(sent
)) {
983 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
988 if (lv1ent_page(sent
)) {
989 if (*pgcnt
!= NUM_LV2ENTRIES
) {
990 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
995 kmem_cache_free(lv2table_kmem_cache
, page_entry(sent
, 0));
999 update_pte(sent
, mk_lv1ent_sect(paddr
, prot
));
1001 spin_lock(&domain
->lock
);
1002 if (lv1ent_page_zero(sent
)) {
1003 struct sysmmu_drvdata
*data
;
1005 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
1006 * entry by speculative prefetch of SLPD which has no mapping.
1008 list_for_each_entry(data
, &domain
->clients
, domain_node
)
1009 sysmmu_tlb_invalidate_flpdcache(data
, iova
);
1011 spin_unlock(&domain
->lock
);
1016 static int lv2set_page(sysmmu_pte_t
*pent
, phys_addr_t paddr
, size_t size
,
1017 int prot
, short *pgcnt
)
1019 if (size
== SPAGE_SIZE
) {
1020 if (WARN_ON(!lv2ent_fault(pent
)))
1023 update_pte(pent
, mk_lv2ent_spage(paddr
, prot
));
1025 } else { /* size == LPAGE_SIZE */
1027 dma_addr_t pent_base
= virt_to_phys(pent
);
1029 dma_sync_single_for_cpu(dma_dev
, pent_base
,
1030 sizeof(*pent
) * SPAGES_PER_LPAGE
,
1032 for (i
= 0; i
< SPAGES_PER_LPAGE
; i
++, pent
++) {
1033 if (WARN_ON(!lv2ent_fault(pent
))) {
1035 memset(pent
- i
, 0, sizeof(*pent
) * i
);
1039 *pent
= mk_lv2ent_lpage(paddr
, prot
);
1041 dma_sync_single_for_device(dma_dev
, pent_base
,
1042 sizeof(*pent
) * SPAGES_PER_LPAGE
,
1044 *pgcnt
-= SPAGES_PER_LPAGE
;
1051 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1053 * System MMU v3.x has advanced logic to improve address translation
1054 * performance with caching more page table entries by a page table walk.
1055 * However, the logic has a bug that while caching faulty page table entries,
1056 * System MMU reports page fault if the cached fault entry is hit even though
1057 * the fault entry is updated to a valid entry after the entry is cached.
1058 * To prevent caching faulty page table entries which may be updated to valid
1059 * entries later, the virtual memory manager should care about the workaround
1060 * for the problem. The following describes the workaround.
1062 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1063 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
1065 * Precisely, any start address of I/O virtual region must be aligned with
1066 * the following sizes for System MMU v3.1 and v3.2.
1067 * System MMU v3.1: 128KiB
1068 * System MMU v3.2: 256KiB
1070 * Because System MMU v3.3 caches page table entries more aggressively, it needs
1072 * - Any two consecutive I/O virtual regions must have a hole of size larger
1073 * than or equal to 128KiB.
1074 * - Start address of an I/O virtual region must be aligned by 128KiB.
1076 static int exynos_iommu_map(struct iommu_domain
*iommu_domain
,
1077 unsigned long l_iova
, phys_addr_t paddr
, size_t size
,
1080 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
1081 sysmmu_pte_t
*entry
;
1082 sysmmu_iova_t iova
= (sysmmu_iova_t
)l_iova
;
1083 unsigned long flags
;
1086 BUG_ON(domain
->pgtable
== NULL
);
1087 prot
&= SYSMMU_SUPPORTED_PROT_BITS
;
1089 spin_lock_irqsave(&domain
->pgtablelock
, flags
);
1091 entry
= section_entry(domain
->pgtable
, iova
);
1093 if (size
== SECT_SIZE
) {
1094 ret
= lv1set_section(domain
, entry
, iova
, paddr
, prot
,
1095 &domain
->lv2entcnt
[lv1ent_offset(iova
)]);
1099 pent
= alloc_lv2entry(domain
, entry
, iova
,
1100 &domain
->lv2entcnt
[lv1ent_offset(iova
)]);
1103 ret
= PTR_ERR(pent
);
1105 ret
= lv2set_page(pent
, paddr
, size
, prot
,
1106 &domain
->lv2entcnt
[lv1ent_offset(iova
)]);
1110 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1111 __func__
, ret
, size
, iova
);
1113 spin_unlock_irqrestore(&domain
->pgtablelock
, flags
);
1118 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain
*domain
,
1119 sysmmu_iova_t iova
, size_t size
)
1121 struct sysmmu_drvdata
*data
;
1122 unsigned long flags
;
1124 spin_lock_irqsave(&domain
->lock
, flags
);
1126 list_for_each_entry(data
, &domain
->clients
, domain_node
)
1127 sysmmu_tlb_invalidate_entry(data
, iova
, size
);
1129 spin_unlock_irqrestore(&domain
->lock
, flags
);
1132 static size_t exynos_iommu_unmap(struct iommu_domain
*iommu_domain
,
1133 unsigned long l_iova
, size_t size
)
1135 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
1136 sysmmu_iova_t iova
= (sysmmu_iova_t
)l_iova
;
1139 unsigned long flags
;
1141 BUG_ON(domain
->pgtable
== NULL
);
1143 spin_lock_irqsave(&domain
->pgtablelock
, flags
);
1145 ent
= section_entry(domain
->pgtable
, iova
);
1147 if (lv1ent_section(ent
)) {
1148 if (WARN_ON(size
< SECT_SIZE
)) {
1149 err_pgsize
= SECT_SIZE
;
1153 /* workaround for h/w bug in System MMU v3.3 */
1154 update_pte(ent
, ZERO_LV2LINK
);
1159 if (unlikely(lv1ent_fault(ent
))) {
1160 if (size
> SECT_SIZE
)
1165 /* lv1ent_page(sent) == true here */
1167 ent
= page_entry(ent
, iova
);
1169 if (unlikely(lv2ent_fault(ent
))) {
1174 if (lv2ent_small(ent
)) {
1177 domain
->lv2entcnt
[lv1ent_offset(iova
)] += 1;
1181 /* lv1ent_large(ent) == true here */
1182 if (WARN_ON(size
< LPAGE_SIZE
)) {
1183 err_pgsize
= LPAGE_SIZE
;
1187 dma_sync_single_for_cpu(dma_dev
, virt_to_phys(ent
),
1188 sizeof(*ent
) * SPAGES_PER_LPAGE
,
1190 memset(ent
, 0, sizeof(*ent
) * SPAGES_PER_LPAGE
);
1191 dma_sync_single_for_device(dma_dev
, virt_to_phys(ent
),
1192 sizeof(*ent
) * SPAGES_PER_LPAGE
,
1195 domain
->lv2entcnt
[lv1ent_offset(iova
)] += SPAGES_PER_LPAGE
;
1197 spin_unlock_irqrestore(&domain
->pgtablelock
, flags
);
1199 exynos_iommu_tlb_invalidate_entry(domain
, iova
, size
);
1203 spin_unlock_irqrestore(&domain
->pgtablelock
, flags
);
1205 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1206 __func__
, size
, iova
, err_pgsize
);
1211 static phys_addr_t
exynos_iommu_iova_to_phys(struct iommu_domain
*iommu_domain
,
1214 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
1215 sysmmu_pte_t
*entry
;
1216 unsigned long flags
;
1217 phys_addr_t phys
= 0;
1219 spin_lock_irqsave(&domain
->pgtablelock
, flags
);
1221 entry
= section_entry(domain
->pgtable
, iova
);
1223 if (lv1ent_section(entry
)) {
1224 phys
= section_phys(entry
) + section_offs(iova
);
1225 } else if (lv1ent_page(entry
)) {
1226 entry
= page_entry(entry
, iova
);
1228 if (lv2ent_large(entry
))
1229 phys
= lpage_phys(entry
) + lpage_offs(iova
);
1230 else if (lv2ent_small(entry
))
1231 phys
= spage_phys(entry
) + spage_offs(iova
);
1234 spin_unlock_irqrestore(&domain
->pgtablelock
, flags
);
1239 static int exynos_iommu_add_device(struct device
*dev
)
1241 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
1242 struct sysmmu_drvdata
*data
;
1243 struct iommu_group
*group
;
1245 if (!has_sysmmu(dev
))
1248 group
= iommu_group_get_for_dev(dev
);
1251 return PTR_ERR(group
);
1253 list_for_each_entry(data
, &owner
->controllers
, owner_node
) {
1255 * SYSMMU will be runtime activated via device link
1256 * (dependency) to its master device, so there are no
1257 * direct calls to pm_runtime_get/put in this driver.
1259 data
->link
= device_link_add(dev
, data
->sysmmu
,
1261 DL_FLAG_PM_RUNTIME
);
1263 iommu_group_put(group
);
1268 static void exynos_iommu_remove_device(struct device
*dev
)
1270 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
1271 struct sysmmu_drvdata
*data
;
1273 if (!has_sysmmu(dev
))
1276 if (owner
->domain
) {
1277 struct iommu_group
*group
= iommu_group_get(dev
);
1280 WARN_ON(owner
->domain
!=
1281 iommu_group_default_domain(group
));
1282 exynos_iommu_detach_device(owner
->domain
, dev
);
1283 iommu_group_put(group
);
1286 iommu_group_remove_device(dev
);
1288 list_for_each_entry(data
, &owner
->controllers
, owner_node
)
1289 device_link_del(data
->link
);
1292 static int exynos_iommu_of_xlate(struct device
*dev
,
1293 struct of_phandle_args
*spec
)
1295 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
1296 struct platform_device
*sysmmu
= of_find_device_by_node(spec
->np
);
1297 struct sysmmu_drvdata
*data
, *entry
;
1302 data
= platform_get_drvdata(sysmmu
);
1307 owner
= kzalloc(sizeof(*owner
), GFP_KERNEL
);
1311 INIT_LIST_HEAD(&owner
->controllers
);
1312 mutex_init(&owner
->rpm_lock
);
1313 dev
->archdata
.iommu
= owner
;
1316 list_for_each_entry(entry
, &owner
->controllers
, owner_node
)
1320 list_add_tail(&data
->owner_node
, &owner
->controllers
);
1326 static const struct iommu_ops exynos_iommu_ops
= {
1327 .domain_alloc
= exynos_iommu_domain_alloc
,
1328 .domain_free
= exynos_iommu_domain_free
,
1329 .attach_dev
= exynos_iommu_attach_device
,
1330 .detach_dev
= exynos_iommu_detach_device
,
1331 .map
= exynos_iommu_map
,
1332 .unmap
= exynos_iommu_unmap
,
1333 .iova_to_phys
= exynos_iommu_iova_to_phys
,
1334 .device_group
= generic_device_group
,
1335 .add_device
= exynos_iommu_add_device
,
1336 .remove_device
= exynos_iommu_remove_device
,
1337 .pgsize_bitmap
= SECT_SIZE
| LPAGE_SIZE
| SPAGE_SIZE
,
1338 .of_xlate
= exynos_iommu_of_xlate
,
1341 static int __init
exynos_iommu_init(void)
1343 struct device_node
*np
;
1346 np
= of_find_matching_node(NULL
, sysmmu_of_match
);
1352 lv2table_kmem_cache
= kmem_cache_create("exynos-iommu-lv2table",
1353 LV2TABLE_SIZE
, LV2TABLE_SIZE
, 0, NULL
);
1354 if (!lv2table_kmem_cache
) {
1355 pr_err("%s: Failed to create kmem cache\n", __func__
);
1359 ret
= platform_driver_register(&exynos_sysmmu_driver
);
1361 pr_err("%s: Failed to register driver\n", __func__
);
1362 goto err_reg_driver
;
1365 zero_lv2_table
= kmem_cache_zalloc(lv2table_kmem_cache
, GFP_KERNEL
);
1366 if (zero_lv2_table
== NULL
) {
1367 pr_err("%s: Failed to allocate zero level2 page table\n",
1373 ret
= bus_set_iommu(&platform_bus_type
, &exynos_iommu_ops
);
1375 pr_err("%s: Failed to register exynos-iommu driver.\n",
1382 kmem_cache_free(lv2table_kmem_cache
, zero_lv2_table
);
1384 platform_driver_unregister(&exynos_sysmmu_driver
);
1386 kmem_cache_destroy(lv2table_kmem_cache
);
1389 core_initcall(exynos_iommu_init
);