4 * Logic that manipulates the Xtensa MMU. Derived from MIPS.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
10 * Copyright (C) 2001 - 2003 Tensilica Inc.
13 * Chris Zankel <chris@zankel.net>
18 #include <asm/processor.h>
19 #include <asm/mmu_context.h>
21 #include <asm/tlbflush.h>
22 #include <asm/cacheflush.h>
25 static inline void __flush_itlb_all (void)
29 for (w
= 0; w
< ITLB_ARF_WAYS
; w
++) {
30 for (i
= 0; i
< (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2
); i
++) {
31 int e
= w
+ (i
<< PAGE_SHIFT
);
32 invalidate_itlb_entry_no_isync(e
);
35 asm volatile ("isync\n");
38 static inline void __flush_dtlb_all (void)
42 for (w
= 0; w
< DTLB_ARF_WAYS
; w
++) {
43 for (i
= 0; i
< (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2
); i
++) {
44 int e
= w
+ (i
<< PAGE_SHIFT
);
45 invalidate_dtlb_entry_no_isync(e
);
48 asm volatile ("isync\n");
52 void local_flush_tlb_all(void)
58 /* If mm is current, we simply assign the current task a new ASID, thus,
59 * invalidating all previous tlb entries. If mm is someone else's user mapping,
60 * wie invalidate the context, thus, when that user mapping is swapped in,
61 * a new context will be assigned to it.
64 void local_flush_tlb_mm(struct mm_struct
*mm
)
66 int cpu
= smp_processor_id();
68 if (mm
== current
->active_mm
) {
70 local_irq_save(flags
);
71 mm
->context
.asid
[cpu
] = NO_CONTEXT
;
72 activate_context(mm
, cpu
);
73 local_irq_restore(flags
);
75 mm
->context
.asid
[cpu
] = NO_CONTEXT
;
81 #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
82 #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
83 #if _ITLB_ENTRIES > _DTLB_ENTRIES
84 # define _TLB_ENTRIES _ITLB_ENTRIES
86 # define _TLB_ENTRIES _DTLB_ENTRIES
89 void local_flush_tlb_range(struct vm_area_struct
*vma
,
90 unsigned long start
, unsigned long end
)
92 int cpu
= smp_processor_id();
93 struct mm_struct
*mm
= vma
->vm_mm
;
96 if (mm
->context
.asid
[cpu
] == NO_CONTEXT
)
99 pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n",
100 (unsigned long)mm
->context
.asid
[cpu
], start
, end
);
101 local_irq_save(flags
);
103 if (end
-start
+ (PAGE_SIZE
-1) <= _TLB_ENTRIES
<< PAGE_SHIFT
) {
104 int oldpid
= get_rasid_register();
106 set_rasid_register(ASID_INSERT(mm
->context
.asid
[cpu
]));
108 if (vma
->vm_flags
& VM_EXEC
)
110 invalidate_itlb_mapping(start
);
111 invalidate_dtlb_mapping(start
);
116 invalidate_dtlb_mapping(start
);
120 set_rasid_register(oldpid
);
122 local_flush_tlb_mm(mm
);
124 local_irq_restore(flags
);
127 void local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
129 int cpu
= smp_processor_id();
130 struct mm_struct
* mm
= vma
->vm_mm
;
134 if (mm
->context
.asid
[cpu
] == NO_CONTEXT
)
137 local_irq_save(flags
);
139 oldpid
= get_rasid_register();
140 set_rasid_register(ASID_INSERT(mm
->context
.asid
[cpu
]));
142 if (vma
->vm_flags
& VM_EXEC
)
143 invalidate_itlb_mapping(page
);
144 invalidate_dtlb_mapping(page
);
146 set_rasid_register(oldpid
);
148 local_irq_restore(flags
);
151 void local_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
153 if (end
> start
&& start
>= TASK_SIZE
&& end
<= PAGE_OFFSET
&&
154 end
- start
< _TLB_ENTRIES
<< PAGE_SHIFT
) {
156 while (start
< end
) {
157 invalidate_itlb_mapping(start
);
158 invalidate_dtlb_mapping(start
);
162 local_flush_tlb_all();
166 void update_mmu_tlb_range(struct vm_area_struct
*vma
,
167 unsigned long address
, pte_t
*ptep
, unsigned int nr
)
169 local_flush_tlb_range(vma
, address
, address
+ PAGE_SIZE
* nr
);
172 #ifdef CONFIG_DEBUG_TLB_SANITY
174 static unsigned get_pte_for_vaddr(unsigned vaddr
)
176 struct task_struct
*task
= get_current();
177 struct mm_struct
*mm
= task
->mm
;
186 mm
= task
->active_mm
;
187 pgd
= pgd_offset(mm
, vaddr
);
188 if (pgd_none_or_clear_bad(pgd
))
190 p4d
= p4d_offset(pgd
, vaddr
);
191 if (p4d_none_or_clear_bad(p4d
))
193 pud
= pud_offset(p4d
, vaddr
);
194 if (pud_none_or_clear_bad(pud
))
196 pmd
= pmd_offset(pud
, vaddr
);
197 if (pmd_none_or_clear_bad(pmd
))
199 pte
= pte_offset_map(pmd
, vaddr
);
202 pteval
= pte_val(*pte
);
212 static void tlb_insane(void)
217 static void tlb_suspicious(void)
223 * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
224 * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
226 * Check that valid TLB entries either have the same PA as the PTE, or PTE is
227 * marked as non-present. Non-present PTE and the page with non-zero refcount
228 * and zero mapcount is normal for batched TLB flush operation. Zero refcount
229 * means that the page was freed prematurely. Non-zero mapcount is unusual,
230 * but does not necessary means an error, thus marked as suspicious.
232 static int check_tlb_entry(unsigned w
, unsigned e
, bool dtlb
)
234 unsigned tlbidx
= w
| (e
<< PAGE_SHIFT
);
236 read_dtlb_virtual(tlbidx
) : read_itlb_virtual(tlbidx
);
238 read_dtlb_translation(tlbidx
) : read_itlb_translation(tlbidx
);
239 unsigned vpn
= (r0
& PAGE_MASK
) | (e
<< PAGE_SHIFT
);
240 unsigned pte
= get_pte_for_vaddr(vpn
);
241 unsigned mm_asid
= (get_rasid_register() >> 8) & ASID_MASK
;
242 unsigned tlb_asid
= r0
& ASID_MASK
;
243 bool kernel
= tlb_asid
== 1;
246 if (tlb_asid
> 0 && ((vpn
< TASK_SIZE
) == kernel
)) {
247 pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
248 dtlb
? 'D' : 'I', w
, e
, vpn
,
249 kernel
? "kernel" : "user");
253 if (tlb_asid
== mm_asid
) {
254 if ((pte
^ r1
) & PAGE_MASK
) {
255 pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
256 dtlb
? 'D' : 'I', w
, e
, r0
, r1
, pte
);
257 if (pte
== 0 || !pte_present(__pte(pte
))) {
258 struct page
*p
= pfn_to_page(r1
>> PAGE_SHIFT
);
259 struct folio
*f
= page_folio(p
);
261 pr_err("folio refcount: %d, mapcount: %d\n",
262 folio_ref_count(f
), folio_mapcount(f
));
263 if (!folio_ref_count(f
))
265 else if (folio_mapped(f
))
266 rc
|= TLB_SUSPICIOUS
;
275 void check_tlb_sanity(void)
281 local_irq_save(flags
);
282 for (w
= 0; w
< DTLB_ARF_WAYS
; ++w
)
283 for (e
= 0; e
< (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2
); ++e
)
284 bug
|= check_tlb_entry(w
, e
, true);
285 for (w
= 0; w
< ITLB_ARF_WAYS
; ++w
)
286 for (e
= 0; e
< (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2
); ++e
)
287 bug
|= check_tlb_entry(w
, e
, false);
288 if (bug
& TLB_INSANE
)
290 if (bug
& TLB_SUSPICIOUS
)
292 local_irq_restore(flags
);
295 #endif /* CONFIG_DEBUG_TLB_SANITY */