OMAP3: GPIO: Enable debounce clock only when debounce is enabled v3.
[linux-ginger.git] / arch / arm / mach-omap1 / mmu.c
blob75dd310a9073921264659e6e8f34cac93718d661
1 /*
2 * linux/arch/arm/mach-omap1/mmu.c
4 * Support for non-MPU OMAP1 MMUs.
6 * Copyright (C) 2002-2005 Nokia Corporation
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9 * and Paul Mundt <paul.mundt@nokia.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
15 #include <linux/types.h>
16 #include <linux/init.h>
17 #include <linux/rwsem.h>
18 #include <linux/device.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/interrupt.h>
22 #include <linux/err.h>
23 #include "mmu.h"
24 #include <asm/tlbflush.h>
25 #include <mach/dsp_common.h>
27 static void *dspvect_page;
28 #define DSP_INIT_PAGE 0xfff000
30 #define MMUFAULT_MASK (OMAP_MMU_FAULT_ST_PERM |\
31 OMAP_MMU_FAULT_ST_TLB_MISS |\
32 OMAP_MMU_FAULT_ST_TRANS)
34 static unsigned int get_cam_l_va_mask(u16 pgsz)
36 switch (pgsz) {
37 case OMAP_MMU_CAM_PAGESIZE_1MB:
38 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
39 OMAP_MMU_CAM_L_VA_TAG_L2_MASK_1MB;
40 case OMAP_MMU_CAM_PAGESIZE_64KB:
41 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
42 OMAP_MMU_CAM_L_VA_TAG_L2_MASK_64KB;
43 case OMAP_MMU_CAM_PAGESIZE_4KB:
44 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
45 OMAP_MMU_CAM_L_VA_TAG_L2_MASK_4KB;
46 case OMAP_MMU_CAM_PAGESIZE_1KB:
47 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
48 OMAP_MMU_CAM_L_VA_TAG_L2_MASK_1KB;
50 return 0;
53 #define get_cam_va_mask(pgsz) \
54 ((u32)OMAP_MMU_CAM_H_VA_TAG_H_MASK << 22 | \
55 (u32)get_cam_l_va_mask(pgsz) << 6)
57 static int intmem_usecount;
59 /* for safety */
60 void dsp_mem_usecount_clear(void)
62 if (intmem_usecount != 0) {
63 printk(KERN_WARNING
64 "MMU: unbalanced memory request/release detected.\n"
65 " intmem_usecount is not zero at where "
66 "it should be! ... fixed to be zero.\n");
67 intmem_usecount = 0;
68 omap_dsp_release_mem();
71 EXPORT_SYMBOL_GPL(dsp_mem_usecount_clear);
73 void omap_mmu_itack(struct omap_mmu *mmu)
75 omap_mmu_write_reg(mmu, OMAP_MMU_IT_ACK_IT_ACK, OMAP_MMU_IT_ACK);
77 EXPORT_SYMBOL(omap_mmu_itack);
79 static int omap1_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
81 int ret = 0;
83 if (omap_mmu_internal_memory(mmu, addr)) {
84 if (intmem_usecount++ == 0)
85 ret = omap_dsp_request_mem();
88 return ret;
91 static int omap1_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
93 int ret = 0;
95 if (omap_mmu_internal_memory(mmu, addr)) {
96 if (--intmem_usecount == 0)
97 omap_dsp_release_mem();
98 } else
99 ret = -EIO;
101 return ret;
104 static inline void
105 omap1_mmu_read_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
107 /* read a TLB entry */
108 omap_mmu_write_reg(mmu, OMAP_MMU_LD_TLB_RD, OMAP_MMU_LD_TLB);
110 cr->cam_h = omap_mmu_read_reg(mmu, OMAP_MMU_READ_CAM_H);
111 cr->cam_l = omap_mmu_read_reg(mmu, OMAP_MMU_READ_CAM_L);
112 cr->ram_h = omap_mmu_read_reg(mmu, OMAP_MMU_READ_RAM_H);
113 cr->ram_l = omap_mmu_read_reg(mmu, OMAP_MMU_READ_RAM_L);
116 static inline void
117 omap1_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
119 /* Set the CAM and RAM entries */
120 omap_mmu_write_reg(mmu, cr->cam_h, OMAP_MMU_CAM_H);
121 omap_mmu_write_reg(mmu, cr->cam_l, OMAP_MMU_CAM_L);
122 omap_mmu_write_reg(mmu, cr->ram_h, OMAP_MMU_RAM_H);
123 omap_mmu_write_reg(mmu, cr->ram_l, OMAP_MMU_RAM_L);
126 static ssize_t omap1_mmu_show(struct omap_mmu *mmu, char *buf,
127 struct omap_mmu_tlb_lock *tlb_lock)
129 int i, len;
131 len = sprintf(buf, "P: preserved, V: valid\n"
132 "ety P V size cam_va ram_pa ap\n");
133 /* 00: P V 4KB 0x300000 0x10171800 FA */
135 for (i = 0; i < mmu->nr_tlb_entries; i++) {
136 struct omap_mmu_tlb_entry ent;
137 struct cam_ram_regset cr;
138 struct omap_mmu_tlb_lock entry_lock;
139 char *pgsz_str, *ap_str;
141 /* read a TLB entry */
142 entry_lock.base = tlb_lock->base;
143 entry_lock.victim = i;
144 omap_mmu_read_tlb(mmu, &entry_lock, &cr);
146 ent.pgsz = cr.cam_l & OMAP_MMU_CAM_PAGESIZE_MASK;
147 ent.prsvd = cr.cam_l & OMAP_MMU_CAM_P;
148 ent.valid = cr.cam_l & OMAP_MMU_CAM_V;
149 ent.ap = cr.ram_l & OMAP_MMU_RAM_L_AP_MASK;
150 ent.va = (u32)(cr.cam_h & OMAP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
151 (u32)(cr.cam_l & get_cam_l_va_mask(ent.pgsz)) << 6;
152 ent.pa = (unsigned long)cr.ram_h << 16 |
153 (cr.ram_l & OMAP_MMU_RAM_L_RAM_LSB_MASK);
155 pgsz_str = (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1MB) ? " 1MB":
156 (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
157 (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_4KB) ? " 4KB":
158 (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1KB) ? " 1KB":
159 " ???";
160 ap_str = (ent.ap == OMAP_MMU_RAM_L_AP_RO) ? "RO":
161 (ent.ap == OMAP_MMU_RAM_L_AP_FA) ? "FA":
162 (ent.ap == OMAP_MMU_RAM_L_AP_NA) ? "NA":
163 "??";
165 if (i == tlb_lock->base)
166 len += sprintf(buf + len, "lock base = %d\n",
167 tlb_lock->base);
168 if (i == tlb_lock->victim)
169 len += sprintf(buf + len, "victim = %d\n",
170 tlb_lock->victim);
171 len += sprintf(buf + len,
172 /* 00: P V 4KB 0x300000 0x10171800 FA */
173 "%02d: %c %c %s 0x%06lx 0x%08lx %s\n",
175 ent.prsvd ? 'P' : ' ',
176 ent.valid ? 'V' : ' ',
177 pgsz_str, ent.va, ent.pa, ap_str);
180 return len;
183 static int exmap_setup_preserved_entries(struct omap_mmu *mmu)
185 int n = 0;
187 exmap_setup_preserved_mem_page(mmu, dspvect_page, DSP_INIT_PAGE, n++);
189 return n;
192 static void exmap_clear_preserved_entries(struct omap_mmu *mmu)
194 exmap_clear_mem_page(mmu, DSP_INIT_PAGE);
197 static int omap1_mmu_startup(struct omap_mmu *mmu)
199 dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
200 if (dspvect_page == NULL) {
201 dev_err(mmu->dev, "MMU %s: failed to allocate memory "
202 "for vector table\n", mmu->name);
203 return -ENOMEM;
206 mmu->nr_exmap_preserved = exmap_setup_preserved_entries(mmu);
208 return 0;
211 static void omap1_mmu_shutdown(struct omap_mmu *mmu)
213 exmap_clear_preserved_entries(mmu);
215 if (dspvect_page != NULL) {
216 unsigned long virt;
218 down_read(&mmu->exmap_sem);
220 virt = (unsigned long)omap_mmu_to_virt(mmu, DSP_INIT_PAGE);
221 flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
222 free_page((unsigned long)dspvect_page);
223 dspvect_page = NULL;
225 up_read(&mmu->exmap_sem);
229 static inline unsigned long omap1_mmu_cam_va(struct cam_ram_regset *cr)
231 unsigned int page_size = cr->cam_l & OMAP_MMU_CAM_PAGESIZE_MASK;
233 return (u32)(cr->cam_h & OMAP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
234 (u32)(cr->cam_l & get_cam_l_va_mask(page_size)) << 6;
237 static struct cam_ram_regset *
238 omap1_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
240 struct cam_ram_regset *cr;
242 if (entry->va & ~(get_cam_va_mask(entry->pgsz))) {
243 dev_err(mmu->dev, "MMU %s: mapping vadr (0x%06lx) is not on"
244 " an aligned boundary\n", mmu->name, entry->va);
245 return ERR_PTR(-EINVAL);
248 cr = kmalloc(sizeof(struct cam_ram_regset), GFP_KERNEL);
250 cr->cam_h = entry->va >> 22;
251 cr->cam_l = (entry->va >> 6 & get_cam_l_va_mask(entry->pgsz)) |
252 entry->prsvd | entry->pgsz;
253 cr->ram_h = entry->pa >> 16;
254 cr->ram_l = (entry->pa & OMAP_MMU_RAM_L_RAM_LSB_MASK) | entry->ap;
256 return cr;
259 static inline int omap1_mmu_cam_ram_valid(struct cam_ram_regset *cr)
261 return cr->cam_l & OMAP_MMU_CAM_V;
264 static void omap1_mmu_interrupt(struct omap_mmu *mmu)
266 unsigned long status;
267 unsigned long adh, adl;
268 unsigned long dp;
269 unsigned long va;
271 status = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_ST);
272 adh = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_AD_H);
273 adl = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_AD_L);
274 dp = adh & OMAP_MMU_FAULT_AD_H_DP;
275 va = (((adh & OMAP_MMU_FAULT_AD_H_ADR_MASK) << 16) | adl);
277 /* if the fault is masked, nothing to do */
278 if ((status & MMUFAULT_MASK) == 0) {
279 pr_debug("MMU interrupt, but ignoring.\n");
281 * note: in OMAP1710,
282 * when CACHE + DMA domain gets out of idle in DSP,
283 * MMU interrupt occurs but MMU_FAULT_ST is not set.
284 * in this case, we just ignore the interrupt.
286 if (status) {
287 pr_debug("%s%s%s%s\n",
288 (status & OMAP_MMU_FAULT_ST_PREF)?
289 " (prefetch err)" : "",
290 (status & OMAP_MMU_FAULT_ST_PERM)?
291 " (permission fault)" : "",
292 (status & OMAP_MMU_FAULT_ST_TLB_MISS)?
293 " (TLB miss)" : "",
294 (status & OMAP_MMU_FAULT_ST_TRANS) ?
295 " (translation fault)": "");
296 pr_debug("fault address = %#08lx\n", va);
298 enable_irq(mmu->irq);
299 return;
302 pr_info("%s%s%s%s\n",
303 (status & OMAP_MMU_FAULT_ST_PREF)?
304 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_PREF)?
305 " prefetch err":
306 " (prefetch err)":
308 (status & OMAP_MMU_FAULT_ST_PERM)?
309 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_PERM)?
310 " permission fault":
311 " (permission fault)":
313 (status & OMAP_MMU_FAULT_ST_TLB_MISS)?
314 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_TLB_MISS)?
315 " TLB miss":
316 " (TLB miss)":
318 (status & OMAP_MMU_FAULT_ST_TRANS)?
319 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_TRANS)?
320 " translation fault":
321 " (translation fault)":
322 "");
323 pr_info("fault address = %#08lx\n", va);
325 mmu->fault_address = va;
326 schedule_work(&mmu->irq_work);
329 static pgprot_t omap1_mmu_pte_get_attr(struct omap_mmu_tlb_entry *entry)
331 /* 4KB AP position as default */
332 u32 attr = entry->ap >> 4;
333 attr <<= ((entry->pgsz == OMAP_MMU_CAM_PAGESIZE_1MB) ? 6:0);
334 return attr;
337 struct omap_mmu_ops omap1_mmu_ops = {
338 .startup = omap1_mmu_startup,
339 .shutdown = omap1_mmu_shutdown,
340 .mem_enable = omap1_mmu_mem_enable,
341 .mem_disable = omap1_mmu_mem_disable,
342 .read_tlb = omap1_mmu_read_tlb,
343 .load_tlb = omap1_mmu_load_tlb,
344 .show = omap1_mmu_show,
345 .cam_va = omap1_mmu_cam_va,
346 .cam_ram_alloc = omap1_mmu_cam_ram_alloc,
347 .cam_ram_valid = omap1_mmu_cam_ram_valid,
348 .interrupt = omap1_mmu_interrupt,
349 .pte_get_attr = omap1_mmu_pte_get_attr,
351 EXPORT_SYMBOL_GPL(omap1_mmu_ops);