4 * Logic that manipulates the Xtensa MMU. Derived from MIPS.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
10 * Copyright (C) 2001 - 2003 Tensilica Inc.
13 * Chris Zankel <chris@zankel.net>
18 #include <asm/processor.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlbflush.h>
21 #include <asm/cacheflush.h>
24 static inline void __flush_itlb_all (void)
28 for (w
= 0; w
< ITLB_ARF_WAYS
; w
++) {
29 for (i
= 0; i
< (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2
); i
++) {
30 int e
= w
+ (i
<< PAGE_SHIFT
);
31 invalidate_itlb_entry_no_isync(e
);
34 asm volatile ("isync\n");
37 static inline void __flush_dtlb_all (void)
41 for (w
= 0; w
< DTLB_ARF_WAYS
; w
++) {
42 for (i
= 0; i
< (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2
); i
++) {
43 int e
= w
+ (i
<< PAGE_SHIFT
);
44 invalidate_dtlb_entry_no_isync(e
);
47 asm volatile ("isync\n");
51 void local_flush_tlb_all(void)
57 /* If mm is current, we simply assign the current task a new ASID, thus,
58 * invalidating all previous tlb entries. If mm is someone else's user mapping,
59 * wie invalidate the context, thus, when that user mapping is swapped in,
60 * a new context will be assigned to it.
63 void local_flush_tlb_mm(struct mm_struct
*mm
)
65 int cpu
= smp_processor_id();
67 if (mm
== current
->active_mm
) {
69 local_irq_save(flags
);
70 mm
->context
.asid
[cpu
] = NO_CONTEXT
;
71 activate_context(mm
, cpu
);
72 local_irq_restore(flags
);
74 mm
->context
.asid
[cpu
] = NO_CONTEXT
;
80 #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
81 #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
82 #if _ITLB_ENTRIES > _DTLB_ENTRIES
83 # define _TLB_ENTRIES _ITLB_ENTRIES
85 # define _TLB_ENTRIES _DTLB_ENTRIES
88 void local_flush_tlb_range(struct vm_area_struct
*vma
,
89 unsigned long start
, unsigned long end
)
91 int cpu
= smp_processor_id();
92 struct mm_struct
*mm
= vma
->vm_mm
;
95 if (mm
->context
.asid
[cpu
] == NO_CONTEXT
)
98 pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n",
99 (unsigned long)mm
->context
.asid
[cpu
], start
, end
);
100 local_irq_save(flags
);
102 if (end
-start
+ (PAGE_SIZE
-1) <= _TLB_ENTRIES
<< PAGE_SHIFT
) {
103 int oldpid
= get_rasid_register();
105 set_rasid_register(ASID_INSERT(mm
->context
.asid
[cpu
]));
107 if (vma
->vm_flags
& VM_EXEC
)
109 invalidate_itlb_mapping(start
);
110 invalidate_dtlb_mapping(start
);
115 invalidate_dtlb_mapping(start
);
119 set_rasid_register(oldpid
);
121 local_flush_tlb_mm(mm
);
123 local_irq_restore(flags
);
126 void local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
128 int cpu
= smp_processor_id();
129 struct mm_struct
* mm
= vma
->vm_mm
;
133 if (mm
->context
.asid
[cpu
] == NO_CONTEXT
)
136 local_irq_save(flags
);
138 oldpid
= get_rasid_register();
139 set_rasid_register(ASID_INSERT(mm
->context
.asid
[cpu
]));
141 if (vma
->vm_flags
& VM_EXEC
)
142 invalidate_itlb_mapping(page
);
143 invalidate_dtlb_mapping(page
);
145 set_rasid_register(oldpid
);
147 local_irq_restore(flags
);
150 void local_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
152 if (end
> start
&& start
>= TASK_SIZE
&& end
<= PAGE_OFFSET
&&
153 end
- start
< _TLB_ENTRIES
<< PAGE_SHIFT
) {
155 while (start
< end
) {
156 invalidate_itlb_mapping(start
);
157 invalidate_dtlb_mapping(start
);
161 local_flush_tlb_all();
165 #ifdef CONFIG_DEBUG_TLB_SANITY
167 static unsigned get_pte_for_vaddr(unsigned vaddr
)
169 struct task_struct
*task
= get_current();
170 struct mm_struct
*mm
= task
->mm
;
178 mm
= task
->active_mm
;
179 pgd
= pgd_offset(mm
, vaddr
);
180 if (pgd_none_or_clear_bad(pgd
))
182 p4d
= p4d_offset(pgd
, vaddr
);
183 if (p4d_none_or_clear_bad(p4d
))
185 pud
= pud_offset(p4d
, vaddr
);
186 if (pud_none_or_clear_bad(pud
))
188 pmd
= pmd_offset(pud
, vaddr
);
189 if (pmd_none_or_clear_bad(pmd
))
191 pte
= pte_offset_map(pmd
, vaddr
);
194 return pte_val(*pte
);
202 static void tlb_insane(void)
207 static void tlb_suspicious(void)
213 * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
214 * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
216 * Check that valid TLB entries either have the same PA as the PTE, or PTE is
217 * marked as non-present. Non-present PTE and the page with non-zero refcount
218 * and zero mapcount is normal for batched TLB flush operation. Zero refcount
219 * means that the page was freed prematurely. Non-zero mapcount is unusual,
220 * but does not necessary means an error, thus marked as suspicious.
222 static int check_tlb_entry(unsigned w
, unsigned e
, bool dtlb
)
224 unsigned tlbidx
= w
| (e
<< PAGE_SHIFT
);
226 read_dtlb_virtual(tlbidx
) : read_itlb_virtual(tlbidx
);
228 read_dtlb_translation(tlbidx
) : read_itlb_translation(tlbidx
);
229 unsigned vpn
= (r0
& PAGE_MASK
) | (e
<< PAGE_SHIFT
);
230 unsigned pte
= get_pte_for_vaddr(vpn
);
231 unsigned mm_asid
= (get_rasid_register() >> 8) & ASID_MASK
;
232 unsigned tlb_asid
= r0
& ASID_MASK
;
233 bool kernel
= tlb_asid
== 1;
236 if (tlb_asid
> 0 && ((vpn
< TASK_SIZE
) == kernel
)) {
237 pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
238 dtlb
? 'D' : 'I', w
, e
, vpn
,
239 kernel
? "kernel" : "user");
243 if (tlb_asid
== mm_asid
) {
244 if ((pte
^ r1
) & PAGE_MASK
) {
245 pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
246 dtlb
? 'D' : 'I', w
, e
, r0
, r1
, pte
);
247 if (pte
== 0 || !pte_present(__pte(pte
))) {
248 struct page
*p
= pfn_to_page(r1
>> PAGE_SHIFT
);
249 pr_err("page refcount: %d, mapcount: %d\n",
254 else if (page_mapcount(p
))
255 rc
|= TLB_SUSPICIOUS
;
264 void check_tlb_sanity(void)
270 local_irq_save(flags
);
271 for (w
= 0; w
< DTLB_ARF_WAYS
; ++w
)
272 for (e
= 0; e
< (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2
); ++e
)
273 bug
|= check_tlb_entry(w
, e
, true);
274 for (w
= 0; w
< ITLB_ARF_WAYS
; ++w
)
275 for (e
= 0; e
< (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2
); ++e
)
276 bug
|= check_tlb_entry(w
, e
, false);
277 if (bug
& TLB_INSANE
)
279 if (bug
& TLB_SUSPICIOUS
)
281 local_irq_restore(flags
);
284 #endif /* CONFIG_DEBUG_TLB_SANITY */