2 * linux/arch/arm/mach-omap2/mmu.c
4 * Support for non-MPU OMAP2 MMUs.
6 * Copyright (C) 2002-2007 Nokia Corporation
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9 * and Paul Mundt <paul.mundt@nokia.com>
11 * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
17 #include <linux/types.h>
18 #include <linux/init.h>
19 #include <linux/rwsem.h>
20 #include <linux/device.h>
22 #include <linux/interrupt.h>
23 #include <linux/err.h>
26 #include <asm/arch/mmu.h>
27 #include <asm/tlbflush.h>
28 #include <asm/sizes.h>
30 static void *dspvect_page
;
31 #define DSP_INIT_PAGE 0xfff000
34 omap2_mmu_read_tlb(struct omap_mmu
*mmu
, struct cam_ram_regset
*cr
)
36 cr
->cam
= omap_mmu_read_reg(mmu
, OMAP_MMU_READ_CAM
);
37 cr
->ram
= omap_mmu_read_reg(mmu
, OMAP_MMU_READ_RAM
);
41 omap2_mmu_load_tlb(struct omap_mmu
*mmu
, struct cam_ram_regset
*cr
)
43 /* Set the CAM and RAM entries */
44 omap_mmu_write_reg(mmu
, cr
->cam
| OMAP_MMU_CAM_V
, OMAP_MMU_CAM
);
45 omap_mmu_write_reg(mmu
, cr
->ram
, OMAP_MMU_RAM
);
48 static void exmap_setup_iomap_page(struct omap_mmu
*mmu
, unsigned long phys
,
49 unsigned long dsp_io_adr
, int index
)
53 struct omap_mmu_tlb_entry tlb_ent
;
55 dspadr
= (IOMAP_VAL
<< 18) + (dsp_io_adr
<< 1);
56 virt
= omap_mmu_to_virt(mmu
, dspadr
);
57 exmap_set_armmmu(mmu
, (unsigned long)virt
, phys
, PAGE_SIZE
);
58 INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu
->exmap_tbl
+ index
, NULL
, virt
);
59 INIT_TLB_ENTRY_4KB_ES32_PRESERVED(&tlb_ent
, dspadr
, phys
);
60 omap_mmu_load_pte_entry(mmu
, &tlb_ent
);
63 static void exmap_clear_iomap_page(struct omap_mmu
*mmu
,
64 unsigned long dsp_io_adr
)
69 dspadr
= (IOMAP_VAL
<< 18) + (dsp_io_adr
<< 1);
70 virt
= omap_mmu_to_virt(mmu
, dspadr
);
71 exmap_clear_armmmu(mmu
, (unsigned long)virt
, PAGE_SIZE
);
72 /* DSP MMU is shutting down. not handled here. */
75 #define OMAP24XX_MAILBOX_BASE (L4_24XX_BASE + 0x94000)
76 #define OMAP2420_GPT5_BASE (L4_24XX_BASE + 0x7c000)
77 #define OMAP2420_GPT6_BASE (L4_24XX_BASE + 0x7e000)
78 #define OMAP2420_GPT7_BASE (L4_24XX_BASE + 0x80000)
79 #define OMAP2420_GPT8_BASE (L4_24XX_BASE + 0x82000)
80 #define OMAP24XX_EAC_BASE (L4_24XX_BASE + 0x90000)
81 #define OMAP24XX_STI_BASE (L4_24XX_BASE + 0x68000)
82 #define OMAP24XX_STI_CH_BASE (L4_24XX_BASE + 0x0c000000)
84 static int exmap_setup_preserved_entries(struct omap_mmu
*mmu
)
88 exmap_setup_preserved_mem_page(mmu
, dspvect_page
, DSP_INIT_PAGE
, n
++);
90 /* REVISIT: This will need to be revisited for 3430 */
91 exmap_setup_iomap_page(mmu
, OMAP2_PRCM_BASE
, 0x7000, n
++);
92 exmap_setup_iomap_page(mmu
, OMAP24XX_MAILBOX_BASE
, 0x11000, n
++);
94 if (cpu_is_omap2420()) {
95 exmap_setup_iomap_page(mmu
, OMAP2420_GPT5_BASE
, 0xe000, n
++);
96 exmap_setup_iomap_page(mmu
, OMAP2420_GPT6_BASE
, 0xe800, n
++);
97 exmap_setup_iomap_page(mmu
, OMAP2420_GPT7_BASE
, 0xf000, n
++);
98 exmap_setup_iomap_page(mmu
, OMAP2420_GPT8_BASE
, 0xf800, n
++);
99 exmap_setup_iomap_page(mmu
, OMAP24XX_EAC_BASE
, 0x10000, n
++);
100 exmap_setup_iomap_page(mmu
, OMAP24XX_STI_BASE
, 0xc800, n
++);
101 for (i
= 0; i
< 5; i
++)
102 exmap_setup_preserved_mem_page(mmu
,
103 __va(OMAP24XX_STI_CH_BASE
+ i
*SZ_4K
),
104 0xfb0000 + i
*SZ_4K
, n
++);
110 static void exmap_clear_preserved_entries(struct omap_mmu
*mmu
)
114 exmap_clear_iomap_page(mmu
, 0x7000); /* PRCM registers */
115 exmap_clear_iomap_page(mmu
, 0x11000); /* MAILBOX registers */
117 if (cpu_is_omap2420()) {
118 exmap_clear_iomap_page(mmu
, 0xe000); /* GPT5 */
119 exmap_clear_iomap_page(mmu
, 0xe800); /* GPT6 */
120 exmap_clear_iomap_page(mmu
, 0xf000); /* GPT7 */
121 exmap_clear_iomap_page(mmu
, 0xf800); /* GPT8 */
122 exmap_clear_iomap_page(mmu
, 0x10000); /* EAC */
123 exmap_clear_iomap_page(mmu
, 0xc800); /* STI */
124 for (i
= 0; i
< 5; i
++) /* STI CH */
125 exmap_clear_mem_page(mmu
, 0xfb0000 + i
*SZ_4K
);
128 exmap_clear_mem_page(mmu
, DSP_INIT_PAGE
);
131 #define MMU_IRQ_MASK \
132 (OMAP_MMU_IRQ_MULTIHITFAULT | \
133 OMAP_MMU_IRQ_TABLEWALKFAULT | \
134 OMAP_MMU_IRQ_EMUMISS | \
135 OMAP_MMU_IRQ_TRANSLATIONFAULT)
137 static int omap2_mmu_startup(struct omap_mmu
*mmu
)
139 u32 rev
= omap_mmu_read_reg(mmu
, OMAP_MMU_REVISION
);
141 pr_info("MMU: OMAP %s MMU initialized (HW v%d.%d)\n", mmu
->name
,
142 (rev
>> 4) & 0xf, rev
& 0xf);
144 dspvect_page
= (void *)__get_dma_pages(GFP_KERNEL
, 0);
145 if (dspvect_page
== NULL
) {
146 dev_err(mmu
->dev
, "MMU %s: failed to allocate memory "
147 "for vector table\n", mmu
->name
);
151 mmu
->nr_exmap_preserved
= exmap_setup_preserved_entries(mmu
);
153 omap_mmu_write_reg(mmu
, MMU_IRQ_MASK
, OMAP_MMU_IRQENABLE
);
158 static void omap2_mmu_shutdown(struct omap_mmu
*mmu
)
160 exmap_clear_preserved_entries(mmu
);
162 if (dspvect_page
!= NULL
) {
165 down_read(&mmu
->exmap_sem
);
167 virt
= (unsigned long)omap_mmu_to_virt(mmu
, DSP_INIT_PAGE
);
168 flush_tlb_kernel_range(virt
, virt
+ PAGE_SIZE
);
169 free_page((unsigned long)dspvect_page
);
172 up_read(&mmu
->exmap_sem
);
176 static ssize_t
omap2_mmu_show(struct omap_mmu
*mmu
, char *buf
,
177 struct omap_mmu_tlb_lock
*tlb_lock
)
181 len
= sprintf(buf
, "P: preserved, V: valid\n"
182 "B: big endian, L:little endian, "
183 "M: mixed page attribute\n"
184 "ety P V size cam_va ram_pa E ES M\n");
185 /* 00: P V 4KB 0x300000 0x10171800 B 16 M */
187 for (i
= 0; i
< mmu
->nr_tlb_entries
; i
++) {
188 struct omap_mmu_tlb_entry ent
;
189 struct cam_ram_regset cr
;
190 struct omap_mmu_tlb_lock entry_lock
;
191 char *pgsz_str
, *elsz_str
;
193 /* read a TLB entry */
194 entry_lock
.base
= tlb_lock
->base
;
195 entry_lock
.victim
= i
;
196 omap_mmu_read_tlb(mmu
, &entry_lock
, &cr
);
198 ent
.pgsz
= cr
.cam
& OMAP_MMU_CAM_PAGESIZE_MASK
;
199 ent
.prsvd
= cr
.cam
& OMAP_MMU_CAM_P
;
200 ent
.valid
= cr
.cam
& OMAP_MMU_CAM_V
;
201 ent
.va
= cr
.cam
& OMAP_MMU_CAM_VATAG_MASK
;
202 ent
.endian
= cr
.ram
& OMAP_MMU_RAM_ENDIANNESS
;
203 ent
.elsz
= cr
.ram
& OMAP_MMU_RAM_ELEMENTSIZE_MASK
;
204 ent
.pa
= cr
.ram
& OMAP_MMU_RAM_PADDR_MASK
;
205 ent
.mixed
= cr
.ram
& OMAP_MMU_RAM_MIXED
;
207 pgsz_str
= (ent
.pgsz
== OMAP_MMU_CAM_PAGESIZE_16MB
) ? "64MB":
208 (ent
.pgsz
== OMAP_MMU_CAM_PAGESIZE_1MB
) ? " 1MB":
209 (ent
.pgsz
== OMAP_MMU_CAM_PAGESIZE_64KB
) ? "64KB":
210 (ent
.pgsz
== OMAP_MMU_CAM_PAGESIZE_4KB
) ? " 4KB":
212 elsz_str
= (ent
.elsz
== OMAP_MMU_RAM_ELEMENTSIZE_8
) ? " 8":
213 (ent
.elsz
== OMAP_MMU_RAM_ELEMENTSIZE_16
) ? "16":
214 (ent
.elsz
== OMAP_MMU_RAM_ELEMENTSIZE_32
) ? "32":
217 if (i
== tlb_lock
->base
)
218 len
+= sprintf(buf
+ len
, "lock base = %d\n",
220 if (i
== tlb_lock
->victim
)
221 len
+= sprintf(buf
+ len
, "victim = %d\n",
224 len
+= sprintf(buf
+ len
,
225 /* 00: P V 4KB 0x300000 0x10171800 B 16 M */
226 "%02d: %c %c %s 0x%06lx 0x%08lx %c %s %c\n",
228 ent
.prsvd
? 'P' : ' ',
229 ent
.valid
? 'V' : ' ',
230 pgsz_str
, ent
.va
, ent
.pa
,
231 ent
.endian
? 'B' : 'L',
233 ent
.mixed
? 'M' : ' ');
239 #define get_cam_va_mask(pgsz) \
240 (((pgsz) == OMAP_MMU_CAM_PAGESIZE_16MB) ? 0xff000000 : \
241 ((pgsz) == OMAP_MMU_CAM_PAGESIZE_1MB) ? 0xfff00000 : \
242 ((pgsz) == OMAP_MMU_CAM_PAGESIZE_64KB) ? 0xffff0000 : \
243 ((pgsz) == OMAP_MMU_CAM_PAGESIZE_4KB) ? 0xfffff000 : 0)
245 static inline unsigned long omap2_mmu_cam_va(struct cam_ram_regset
*cr
)
247 unsigned int page_size
= cr
->cam
& OMAP_MMU_CAM_PAGESIZE_MASK
;
248 unsigned int mask
= get_cam_va_mask(cr
->cam
& page_size
);
250 return cr
->cam
& mask
;
253 static struct cam_ram_regset
*
254 omap2_mmu_cam_ram_alloc(struct omap_mmu
*mmu
, struct omap_mmu_tlb_entry
*entry
)
256 struct cam_ram_regset
*cr
;
258 if (entry
->va
& ~(get_cam_va_mask(entry
->pgsz
))) {
259 dev_err(mmu
->dev
, "MMU %s: mapping vadr (0x%06lx) is not on"
260 " an aligned boundary\n", mmu
->name
, entry
->va
);
261 return ERR_PTR(-EINVAL
);
264 cr
= kmalloc(sizeof(struct cam_ram_regset
), GFP_KERNEL
);
266 cr
->cam
= (entry
->va
& OMAP_MMU_CAM_VATAG_MASK
) |
267 entry
->prsvd
| entry
->pgsz
;
268 cr
->ram
= entry
->pa
| entry
->endian
| entry
->elsz
;
273 static inline int omap2_mmu_cam_ram_valid(struct cam_ram_regset
*cr
)
275 return cr
->cam
& OMAP_MMU_CAM_V
;
278 static void omap2_mmu_interrupt(struct omap_mmu
*mmu
)
280 unsigned long status
, va
;
282 status
= MMU_IRQ_MASK
& omap_mmu_read_reg(mmu
, OMAP_MMU_IRQSTATUS
);
283 va
= omap_mmu_read_reg(mmu
, OMAP_MMU_FAULT_AD
);
285 pr_info("%s\n", (status
& OMAP_MMU_IRQ_MULTIHITFAULT
)?
287 pr_info("%s\n", (status
& OMAP_MMU_IRQ_TABLEWALKFAULT
)?
288 "table walk fault":"");
289 pr_info("%s\n", (status
& OMAP_MMU_IRQ_EMUMISS
)?
291 pr_info("%s\n", (status
& OMAP_MMU_IRQ_TRANSLATIONFAULT
)?
292 "translation fault":"");
293 pr_info("%s\n", (status
& OMAP_MMU_IRQ_TLBMISS
)?
295 pr_info("fault address = %#08lx\n", va
);
297 omap_mmu_disable(mmu
);
298 omap_mmu_write_reg(mmu
, status
, OMAP_MMU_IRQSTATUS
);
300 mmu
->fault_address
= va
;
301 schedule_work(&mmu
->irq_work
);
304 static pgprot_t
omap2_mmu_pte_get_attr(struct omap_mmu_tlb_entry
*entry
)
308 attr
= entry
->mixed
<< 5;
309 attr
|= entry
->endian
;
310 attr
|= entry
->elsz
>> 3;
311 attr
<<= ((entry
->pgsz
& OMAP_MMU_CAM_PAGESIZE_4KB
) ? 0:6);
316 struct omap_mmu_ops omap2_mmu_ops
= {
317 .startup
= omap2_mmu_startup
,
318 .shutdown
= omap2_mmu_shutdown
,
319 .read_tlb
= omap2_mmu_read_tlb
,
320 .load_tlb
= omap2_mmu_load_tlb
,
321 .show
= omap2_mmu_show
,
322 .cam_va
= omap2_mmu_cam_va
,
323 .cam_ram_alloc
= omap2_mmu_cam_ram_alloc
,
324 .cam_ram_valid
= omap2_mmu_cam_ram_valid
,
325 .interrupt
= omap2_mmu_interrupt
,
326 .pte_get_attr
= omap2_mmu_pte_get_attr
,
328 EXPORT_SYMBOL_GPL(omap2_mmu_ops
);
330 MODULE_LICENSE("GPL");