2 * omap iommu: omap2/3 architecture specific functions
4 * Copyright (C) 2008-2009 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/err.h>
15 #include <linux/device.h>
16 #include <linux/jiffies.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/stringify.h>
21 #include <plat/iommu.h>
24 * omap2 architecture specific register bit definitions
26 #define IOMMU_ARCH_VERSION 0x00000011
29 #define MMU_SYS_IDLE_SHIFT 3
30 #define MMU_SYS_IDLE_FORCE (0 << MMU_SYS_IDLE_SHIFT)
31 #define MMU_SYS_IDLE_NONE (1 << MMU_SYS_IDLE_SHIFT)
32 #define MMU_SYS_IDLE_SMART (2 << MMU_SYS_IDLE_SHIFT)
33 #define MMU_SYS_IDLE_MASK (3 << MMU_SYS_IDLE_SHIFT)
35 #define MMU_SYS_SOFTRESET (1 << 1)
36 #define MMU_SYS_AUTOIDLE 1
39 #define MMU_SYS_RESETDONE 1
41 /* IRQSTATUS & IRQENABLE */
42 #define MMU_IRQ_MULTIHITFAULT (1 << 4)
43 #define MMU_IRQ_TABLEWALKFAULT (1 << 3)
44 #define MMU_IRQ_EMUMISS (1 << 2)
45 #define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
46 #define MMU_IRQ_TLBMISS (1 << 0)
48 #define __MMU_IRQ_FAULT \
49 (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
50 #define MMU_IRQ_MASK \
51 (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
52 #define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
53 #define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
56 #define MMU_CNTL_SHIFT 1
57 #define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
58 #define MMU_CNTL_EML_TLB (1 << 3)
59 #define MMU_CNTL_TWL_EN (1 << 2)
60 #define MMU_CNTL_MMU_EN (1 << 1)
62 #define get_cam_va_mask(pgsz) \
63 (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
64 ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
65 ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
66 ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
69 static void __iommu_set_twl(struct iommu
*obj
, bool on
)
71 u32 l
= iommu_read_reg(obj
, MMU_CNTL
);
74 iommu_write_reg(obj
, MMU_IRQ_TWL_MASK
, MMU_IRQENABLE
);
76 iommu_write_reg(obj
, MMU_IRQ_TLB_MISS_MASK
, MMU_IRQENABLE
);
80 l
|= (MMU_CNTL_MMU_EN
| MMU_CNTL_TWL_EN
);
82 l
|= (MMU_CNTL_MMU_EN
);
84 iommu_write_reg(obj
, l
, MMU_CNTL
);
88 static int omap2_iommu_enable(struct iommu
*obj
)
91 unsigned long timeout
;
93 if (!obj
->iopgd
|| !IS_ALIGNED((u32
)obj
->iopgd
, SZ_16K
))
96 pa
= virt_to_phys(obj
->iopgd
);
97 if (!IS_ALIGNED(pa
, SZ_16K
))
100 iommu_write_reg(obj
, MMU_SYS_SOFTRESET
, MMU_SYSCONFIG
);
102 timeout
= jiffies
+ msecs_to_jiffies(20);
104 l
= iommu_read_reg(obj
, MMU_SYSSTATUS
);
105 if (l
& MMU_SYS_RESETDONE
)
107 } while (!time_after(jiffies
, timeout
));
109 if (!(l
& MMU_SYS_RESETDONE
)) {
110 dev_err(obj
->dev
, "can't take mmu out of reset\n");
114 l
= iommu_read_reg(obj
, MMU_REVISION
);
115 dev_info(obj
->dev
, "%s: version %d.%d\n", obj
->name
,
116 (l
>> 4) & 0xf, l
& 0xf);
118 l
= iommu_read_reg(obj
, MMU_SYSCONFIG
);
119 l
&= ~MMU_SYS_IDLE_MASK
;
120 l
|= (MMU_SYS_IDLE_SMART
| MMU_SYS_AUTOIDLE
);
121 iommu_write_reg(obj
, l
, MMU_SYSCONFIG
);
123 iommu_write_reg(obj
, pa
, MMU_TTB
);
125 __iommu_set_twl(obj
, true);
130 static void omap2_iommu_disable(struct iommu
*obj
)
132 u32 l
= iommu_read_reg(obj
, MMU_CNTL
);
135 iommu_write_reg(obj
, l
, MMU_CNTL
);
136 iommu_write_reg(obj
, MMU_SYS_IDLE_FORCE
, MMU_SYSCONFIG
);
138 dev_dbg(obj
->dev
, "%s is shutting down\n", obj
->name
);
141 static void omap2_iommu_set_twl(struct iommu
*obj
, bool on
)
143 __iommu_set_twl(obj
, false);
146 static u32
omap2_iommu_fault_isr(struct iommu
*obj
, u32
*ra
)
151 stat
= iommu_read_reg(obj
, MMU_IRQSTATUS
);
152 stat
&= MMU_IRQ_MASK
;
158 da
= iommu_read_reg(obj
, MMU_FAULT_AD
);
161 if (stat
& MMU_IRQ_TLBMISS
)
162 errs
|= OMAP_IOMMU_ERR_TLB_MISS
;
163 if (stat
& MMU_IRQ_TRANSLATIONFAULT
)
164 errs
|= OMAP_IOMMU_ERR_TRANS_FAULT
;
165 if (stat
& MMU_IRQ_EMUMISS
)
166 errs
|= OMAP_IOMMU_ERR_EMU_MISS
;
167 if (stat
& MMU_IRQ_TABLEWALKFAULT
)
168 errs
|= OMAP_IOMMU_ERR_TBLWALK_FAULT
;
169 if (stat
& MMU_IRQ_MULTIHITFAULT
)
170 errs
|= OMAP_IOMMU_ERR_MULTIHIT_FAULT
;
171 iommu_write_reg(obj
, stat
, MMU_IRQSTATUS
);
176 static void omap2_tlb_read_cr(struct iommu
*obj
, struct cr_regs
*cr
)
178 cr
->cam
= iommu_read_reg(obj
, MMU_READ_CAM
);
179 cr
->ram
= iommu_read_reg(obj
, MMU_READ_RAM
);
182 static void omap2_tlb_load_cr(struct iommu
*obj
, struct cr_regs
*cr
)
184 iommu_write_reg(obj
, cr
->cam
| MMU_CAM_V
, MMU_CAM
);
185 iommu_write_reg(obj
, cr
->ram
, MMU_RAM
);
188 static u32
omap2_cr_to_virt(struct cr_regs
*cr
)
190 u32 page_size
= cr
->cam
& MMU_CAM_PGSZ_MASK
;
191 u32 mask
= get_cam_va_mask(cr
->cam
& page_size
);
193 return cr
->cam
& mask
;
196 static struct cr_regs
*omap2_alloc_cr(struct iommu
*obj
, struct iotlb_entry
*e
)
200 if (e
->da
& ~(get_cam_va_mask(e
->pgsz
))) {
201 dev_err(obj
->dev
, "%s:\twrong alignment: %08x\n", __func__
,
203 return ERR_PTR(-EINVAL
);
206 cr
= kmalloc(sizeof(*cr
), GFP_KERNEL
);
208 return ERR_PTR(-ENOMEM
);
210 cr
->cam
= (e
->da
& MMU_CAM_VATAG_MASK
) | e
->prsvd
| e
->pgsz
| e
->valid
;
211 cr
->ram
= e
->pa
| e
->endian
| e
->elsz
| e
->mixed
;
216 static inline int omap2_cr_valid(struct cr_regs
*cr
)
218 return cr
->cam
& MMU_CAM_V
;
221 static u32
omap2_get_pte_attr(struct iotlb_entry
*e
)
225 attr
= e
->mixed
<< 5;
227 attr
|= e
->elsz
>> 3;
228 attr
<<= (((e
->pgsz
== MMU_CAM_PGSZ_4K
) ||
229 (e
->pgsz
== MMU_CAM_PGSZ_64K
)) ? 0 : 6);
233 static ssize_t
omap2_dump_cr(struct iommu
*obj
, struct cr_regs
*cr
, char *buf
)
237 /* FIXME: Need more detail analysis of cam/ram */
238 p
+= sprintf(p
, "%08x %08x %01x\n", cr
->cam
, cr
->ram
,
239 (cr
->cam
& MMU_CAM_P
) ? 1 : 0);
244 #define pr_reg(name) \
247 const char *str = "%20s: %08x\n"; \
248 const int maxcol = 32; \
249 bytes = snprintf(p, maxcol, str, __stringify(name), \
250 iommu_read_reg(obj, MMU_##name)); \
257 static ssize_t
omap2_iommu_dump_ctx(struct iommu
*obj
, char *buf
, ssize_t len
)
278 pr_reg(EMU_FAULT_AD
);
283 static void omap2_iommu_save_ctx(struct iommu
*obj
)
288 for (i
= 0; i
< (MMU_REG_SIZE
/ sizeof(u32
)); i
++) {
289 p
[i
] = iommu_read_reg(obj
, i
* sizeof(u32
));
290 dev_dbg(obj
->dev
, "%s\t[%02d] %08x\n", __func__
, i
, p
[i
]);
293 BUG_ON(p
[0] != IOMMU_ARCH_VERSION
);
296 static void omap2_iommu_restore_ctx(struct iommu
*obj
)
301 for (i
= 0; i
< (MMU_REG_SIZE
/ sizeof(u32
)); i
++) {
302 iommu_write_reg(obj
, p
[i
], i
* sizeof(u32
));
303 dev_dbg(obj
->dev
, "%s\t[%02d] %08x\n", __func__
, i
, p
[i
]);
306 BUG_ON(p
[0] != IOMMU_ARCH_VERSION
);
309 static void omap2_cr_to_e(struct cr_regs
*cr
, struct iotlb_entry
*e
)
311 e
->da
= cr
->cam
& MMU_CAM_VATAG_MASK
;
312 e
->pa
= cr
->ram
& MMU_RAM_PADDR_MASK
;
313 e
->valid
= cr
->cam
& MMU_CAM_V
;
314 e
->pgsz
= cr
->cam
& MMU_CAM_PGSZ_MASK
;
315 e
->endian
= cr
->ram
& MMU_RAM_ENDIAN_MASK
;
316 e
->elsz
= cr
->ram
& MMU_RAM_ELSZ_MASK
;
317 e
->mixed
= cr
->ram
& MMU_RAM_MIXED
;
320 static const struct iommu_functions omap2_iommu_ops
= {
321 .version
= IOMMU_ARCH_VERSION
,
323 .enable
= omap2_iommu_enable
,
324 .disable
= omap2_iommu_disable
,
325 .set_twl
= omap2_iommu_set_twl
,
326 .fault_isr
= omap2_iommu_fault_isr
,
328 .tlb_read_cr
= omap2_tlb_read_cr
,
329 .tlb_load_cr
= omap2_tlb_load_cr
,
331 .cr_to_e
= omap2_cr_to_e
,
332 .cr_to_virt
= omap2_cr_to_virt
,
333 .alloc_cr
= omap2_alloc_cr
,
334 .cr_valid
= omap2_cr_valid
,
335 .dump_cr
= omap2_dump_cr
,
337 .get_pte_attr
= omap2_get_pte_attr
,
339 .save_ctx
= omap2_iommu_save_ctx
,
340 .restore_ctx
= omap2_iommu_restore_ctx
,
341 .dump_ctx
= omap2_iommu_dump_ctx
,
344 static int __init
omap2_iommu_init(void)
346 return install_iommu_arch(&omap2_iommu_ops
);
348 module_init(omap2_iommu_init
);
350 static void __exit
omap2_iommu_exit(void)
352 uninstall_iommu_arch(&omap2_iommu_ops
);
354 module_exit(omap2_iommu_exit
);
356 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
357 MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions");
358 MODULE_LICENSE("GPL v2");