Add linux-next specific files for 20110831
[linux-2.6/next.git] / arch / arm / mach-omap2 / iommu2.c
blobeefc37912ef37fce2b65236f0f4f60ee34c6b99d
1 /*
2 * omap iommu: omap2/3 architecture specific functions
4 * Copyright (C) 2008-2009 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/err.h>
15 #include <linux/device.h>
16 #include <linux/jiffies.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/stringify.h>
21 #include <plat/iommu.h>
24 * omap2 architecture specific register bit definitions
26 #define IOMMU_ARCH_VERSION 0x00000011
28 /* SYSCONF */
29 #define MMU_SYS_IDLE_SHIFT 3
30 #define MMU_SYS_IDLE_FORCE (0 << MMU_SYS_IDLE_SHIFT)
31 #define MMU_SYS_IDLE_NONE (1 << MMU_SYS_IDLE_SHIFT)
32 #define MMU_SYS_IDLE_SMART (2 << MMU_SYS_IDLE_SHIFT)
33 #define MMU_SYS_IDLE_MASK (3 << MMU_SYS_IDLE_SHIFT)
35 #define MMU_SYS_SOFTRESET (1 << 1)
36 #define MMU_SYS_AUTOIDLE 1
38 /* SYSSTATUS */
39 #define MMU_SYS_RESETDONE 1
41 /* IRQSTATUS & IRQENABLE */
42 #define MMU_IRQ_MULTIHITFAULT (1 << 4)
43 #define MMU_IRQ_TABLEWALKFAULT (1 << 3)
44 #define MMU_IRQ_EMUMISS (1 << 2)
45 #define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
46 #define MMU_IRQ_TLBMISS (1 << 0)
48 #define __MMU_IRQ_FAULT \
49 (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
50 #define MMU_IRQ_MASK \
51 (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
52 #define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
53 #define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
55 /* MMU_CNTL */
56 #define MMU_CNTL_SHIFT 1
57 #define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
58 #define MMU_CNTL_EML_TLB (1 << 3)
59 #define MMU_CNTL_TWL_EN (1 << 2)
60 #define MMU_CNTL_MMU_EN (1 << 1)
62 #define get_cam_va_mask(pgsz) \
63 (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
64 ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
65 ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
66 ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
69 static void __iommu_set_twl(struct omap_iommu *obj, bool on)
71 u32 l = iommu_read_reg(obj, MMU_CNTL);
73 if (on)
74 iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
75 else
76 iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
78 l &= ~MMU_CNTL_MASK;
79 if (on)
80 l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
81 else
82 l |= (MMU_CNTL_MMU_EN);
84 iommu_write_reg(obj, l, MMU_CNTL);
88 static int omap2_iommu_enable(struct omap_iommu *obj)
90 u32 l, pa;
91 unsigned long timeout;
93 if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
94 return -EINVAL;
96 pa = virt_to_phys(obj->iopgd);
97 if (!IS_ALIGNED(pa, SZ_16K))
98 return -EINVAL;
100 iommu_write_reg(obj, MMU_SYS_SOFTRESET, MMU_SYSCONFIG);
102 timeout = jiffies + msecs_to_jiffies(20);
103 do {
104 l = iommu_read_reg(obj, MMU_SYSSTATUS);
105 if (l & MMU_SYS_RESETDONE)
106 break;
107 } while (!time_after(jiffies, timeout));
109 if (!(l & MMU_SYS_RESETDONE)) {
110 dev_err(obj->dev, "can't take mmu out of reset\n");
111 return -ENODEV;
114 l = iommu_read_reg(obj, MMU_REVISION);
115 dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
116 (l >> 4) & 0xf, l & 0xf);
118 l = iommu_read_reg(obj, MMU_SYSCONFIG);
119 l &= ~MMU_SYS_IDLE_MASK;
120 l |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE);
121 iommu_write_reg(obj, l, MMU_SYSCONFIG);
123 iommu_write_reg(obj, pa, MMU_TTB);
125 __iommu_set_twl(obj, true);
127 return 0;
130 static void omap2_iommu_disable(struct omap_iommu *obj)
132 u32 l = iommu_read_reg(obj, MMU_CNTL);
134 l &= ~MMU_CNTL_MASK;
135 iommu_write_reg(obj, l, MMU_CNTL);
136 iommu_write_reg(obj, MMU_SYS_IDLE_FORCE, MMU_SYSCONFIG);
138 dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
141 static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on)
143 __iommu_set_twl(obj, false);
146 static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra)
148 u32 stat, da;
149 u32 errs = 0;
151 stat = iommu_read_reg(obj, MMU_IRQSTATUS);
152 stat &= MMU_IRQ_MASK;
153 if (!stat) {
154 *ra = 0;
155 return 0;
158 da = iommu_read_reg(obj, MMU_FAULT_AD);
159 *ra = da;
161 if (stat & MMU_IRQ_TLBMISS)
162 errs |= OMAP_IOMMU_ERR_TLB_MISS;
163 if (stat & MMU_IRQ_TRANSLATIONFAULT)
164 errs |= OMAP_IOMMU_ERR_TRANS_FAULT;
165 if (stat & MMU_IRQ_EMUMISS)
166 errs |= OMAP_IOMMU_ERR_EMU_MISS;
167 if (stat & MMU_IRQ_TABLEWALKFAULT)
168 errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT;
169 if (stat & MMU_IRQ_MULTIHITFAULT)
170 errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT;
171 iommu_write_reg(obj, stat, MMU_IRQSTATUS);
173 return errs;
176 static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
178 cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
179 cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
182 static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
184 iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
185 iommu_write_reg(obj, cr->ram, MMU_RAM);
188 static u32 omap2_cr_to_virt(struct cr_regs *cr)
190 u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
191 u32 mask = get_cam_va_mask(cr->cam & page_size);
193 return cr->cam & mask;
196 static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj,
197 struct iotlb_entry *e)
199 struct cr_regs *cr;
201 if (e->da & ~(get_cam_va_mask(e->pgsz))) {
202 dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
203 e->da);
204 return ERR_PTR(-EINVAL);
207 cr = kmalloc(sizeof(*cr), GFP_KERNEL);
208 if (!cr)
209 return ERR_PTR(-ENOMEM);
211 cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
212 cr->ram = e->pa | e->endian | e->elsz | e->mixed;
214 return cr;
217 static inline int omap2_cr_valid(struct cr_regs *cr)
219 return cr->cam & MMU_CAM_V;
222 static u32 omap2_get_pte_attr(struct iotlb_entry *e)
224 u32 attr;
226 attr = e->mixed << 5;
227 attr |= e->endian;
228 attr |= e->elsz >> 3;
229 attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
230 (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
231 return attr;
234 static ssize_t
235 omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf)
237 char *p = buf;
239 /* FIXME: Need more detail analysis of cam/ram */
240 p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
241 (cr->cam & MMU_CAM_P) ? 1 : 0);
243 return p - buf;
246 #define pr_reg(name) \
247 do { \
248 ssize_t bytes; \
249 const char *str = "%20s: %08x\n"; \
250 const int maxcol = 32; \
251 bytes = snprintf(p, maxcol, str, __stringify(name), \
252 iommu_read_reg(obj, MMU_##name)); \
253 p += bytes; \
254 len -= bytes; \
255 if (len < maxcol) \
256 goto out; \
257 } while (0)
259 static ssize_t
260 omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
262 char *p = buf;
264 pr_reg(REVISION);
265 pr_reg(SYSCONFIG);
266 pr_reg(SYSSTATUS);
267 pr_reg(IRQSTATUS);
268 pr_reg(IRQENABLE);
269 pr_reg(WALKING_ST);
270 pr_reg(CNTL);
271 pr_reg(FAULT_AD);
272 pr_reg(TTB);
273 pr_reg(LOCK);
274 pr_reg(LD_TLB);
275 pr_reg(CAM);
276 pr_reg(RAM);
277 pr_reg(GFLUSH);
278 pr_reg(FLUSH_ENTRY);
279 pr_reg(READ_CAM);
280 pr_reg(READ_RAM);
281 pr_reg(EMU_FAULT_AD);
282 out:
283 return p - buf;
286 static void omap2_iommu_save_ctx(struct omap_iommu *obj)
288 int i;
289 u32 *p = obj->ctx;
291 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
292 p[i] = iommu_read_reg(obj, i * sizeof(u32));
293 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
296 BUG_ON(p[0] != IOMMU_ARCH_VERSION);
299 static void omap2_iommu_restore_ctx(struct omap_iommu *obj)
301 int i;
302 u32 *p = obj->ctx;
304 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
305 iommu_write_reg(obj, p[i], i * sizeof(u32));
306 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
309 BUG_ON(p[0] != IOMMU_ARCH_VERSION);
312 static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
314 e->da = cr->cam & MMU_CAM_VATAG_MASK;
315 e->pa = cr->ram & MMU_RAM_PADDR_MASK;
316 e->valid = cr->cam & MMU_CAM_V;
317 e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK;
318 e->endian = cr->ram & MMU_RAM_ENDIAN_MASK;
319 e->elsz = cr->ram & MMU_RAM_ELSZ_MASK;
320 e->mixed = cr->ram & MMU_RAM_MIXED;
323 static const struct iommu_functions omap2_iommu_ops = {
324 .version = IOMMU_ARCH_VERSION,
326 .enable = omap2_iommu_enable,
327 .disable = omap2_iommu_disable,
328 .set_twl = omap2_iommu_set_twl,
329 .fault_isr = omap2_iommu_fault_isr,
331 .tlb_read_cr = omap2_tlb_read_cr,
332 .tlb_load_cr = omap2_tlb_load_cr,
334 .cr_to_e = omap2_cr_to_e,
335 .cr_to_virt = omap2_cr_to_virt,
336 .alloc_cr = omap2_alloc_cr,
337 .cr_valid = omap2_cr_valid,
338 .dump_cr = omap2_dump_cr,
340 .get_pte_attr = omap2_get_pte_attr,
342 .save_ctx = omap2_iommu_save_ctx,
343 .restore_ctx = omap2_iommu_restore_ctx,
344 .dump_ctx = omap2_iommu_dump_ctx,
347 static int __init omap2_iommu_init(void)
349 return omap_install_iommu_arch(&omap2_iommu_ops);
351 module_init(omap2_iommu_init);
353 static void __exit omap2_iommu_exit(void)
355 omap_uninstall_iommu_arch(&omap2_iommu_ops);
357 module_exit(omap2_iommu_exit);
359 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
360 MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions");
361 MODULE_LICENSE("GPL v2");