1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_IA64_TLB_H
3 #define _ASM_IA64_TLB_H
5 * Based on <asm-generic/tlb.h>.
7 * Copyright (C) 2002-2003 Hewlett-Packard Co
8 * David Mosberger-Tang <davidm@hpl.hp.com>
11 * Removing a translation from a page table (including TLB-shootdown) is a four-step
14 * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
15 * (this is a no-op on ia64).
16 * (2) Clear the relevant portions of the page-table
17 * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
18 * (4) Release the pages that were freed up in step (2).
20 * Note that the ordering of these steps is crucial to avoid races on MP machines.
22 * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When
23 * unmapping a portion of the virtual address space, these hooks are called according to
24 * the following template:
26 * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
28 * for each vma that needs a shootdown do {
29 * tlb_start_vma(tlb, vma);
30 * for each page-table-entry PTE that needs to be removed do {
31 * tlb_remove_tlb_entry(tlb, pte, address);
32 * if (pte refers to a normal page) {
33 * tlb_remove_page(tlb, page);
36 * tlb_end_vma(tlb, vma);
39 * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
42 #include <linux/pagemap.h>
43 #include <linux/swap.h>
45 #include <asm/pgalloc.h>
46 #include <asm/processor.h>
47 #include <asm/tlbflush.h>
48 #include <asm/machvec.h>
51 * If we can't allocate a page to make a big batch of page pointers
52 * to work on, then just handle a few from the on-stack structure.
54 #define IA64_GATHER_BUNDLE 8
60 unsigned char fullmm
; /* non-zero means full mm flush */
61 unsigned char need_flush
; /* really unmapped some PTEs? */
62 unsigned long start
, end
;
63 unsigned long start_addr
;
64 unsigned long end_addr
;
66 struct page
*local
[IA64_GATHER_BUNDLE
];
69 struct ia64_tr_entry
{
74 }; /*Record for tr entry!*/
76 extern int ia64_itr_entry(u64 target_mask
, u64 va
, u64 pte
, u64 log_size
);
77 extern void ia64_ptr_entry(u64 target_mask
, int slot
);
79 extern struct ia64_tr_entry
*ia64_idtrs
[NR_CPUS
];
82 region register macros
84 #define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
85 #define RR_VE(val) (((val) & 0x0000000000000001) << 0)
86 #define RR_VE_MASK 0x0000000000000001L
88 #define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
89 #define RR_PS(val) (((val) & 0x000000000000003f) << 2)
90 #define RR_PS_MASK 0x00000000000000fcL
92 #define RR_RID_MASK 0x00000000ffffff00L
93 #define RR_TO_RID(val) ((val >> 8) & 0xffffff)
96 ia64_tlb_flush_mmu_tlbonly(struct mmu_gather
*tlb
, unsigned long start
, unsigned long end
)
102 * Tearing down the entire address space. This happens both as a result
103 * of exit() and execve(). The latter case necessitates the call to
104 * flush_tlb_mm() here.
106 flush_tlb_mm(tlb
->mm
);
107 } else if (unlikely (end
- start
>= 1024*1024*1024*1024UL
108 || REGION_NUMBER(start
) != REGION_NUMBER(end
- 1)))
111 * If we flush more than a tera-byte or across regions, we're probably
112 * better off just flushing the entire TLB(s). This should be very rare
113 * and is not worth optimizing for.
118 * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
121 struct vm_area_struct vma
;
124 /* flush the address range from the tlb: */
125 flush_tlb_range(&vma
, start
, end
);
126 /* now flush the virt. page-table area mapping the address range: */
127 flush_tlb_range(&vma
, ia64_thash(start
), ia64_thash(end
));
133 ia64_tlb_flush_mmu_free(struct mmu_gather
*tlb
)
138 /* lastly, release the freed pages */
142 tlb
->start_addr
= ~0UL;
143 for (i
= 0; i
< nr
; ++i
)
144 free_page_and_swap_cache(tlb
->pages
[i
]);
148 * Flush the TLB for address range START to END and, if not in fast mode, release the
149 * freed pages that where gathered up to this point.
152 ia64_tlb_flush_mmu (struct mmu_gather
*tlb
, unsigned long start
, unsigned long end
)
154 if (!tlb
->need_flush
)
156 ia64_tlb_flush_mmu_tlbonly(tlb
, start
, end
);
157 ia64_tlb_flush_mmu_free(tlb
);
160 static inline void __tlb_alloc_page(struct mmu_gather
*tlb
)
162 unsigned long addr
= __get_free_pages(GFP_NOWAIT
| __GFP_NOWARN
, 0);
165 tlb
->pages
= (void *)addr
;
166 tlb
->max
= PAGE_SIZE
/ sizeof(void *);
172 arch_tlb_gather_mmu(struct mmu_gather
*tlb
, struct mm_struct
*mm
,
173 unsigned long start
, unsigned long end
)
176 tlb
->max
= ARRAY_SIZE(tlb
->local
);
177 tlb
->pages
= tlb
->local
;
179 tlb
->fullmm
= !(start
| (end
+1));
182 tlb
->start_addr
= ~0UL;
186 * Called at the end of the shootdown operation to free up any resources that were
190 arch_tlb_finish_mmu(struct mmu_gather
*tlb
,
191 unsigned long start
, unsigned long end
, bool force
)
196 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
199 ia64_tlb_flush_mmu(tlb
, start
, end
);
201 /* keep the page table cache within bounds */
204 if (tlb
->pages
!= tlb
->local
)
205 free_pages((unsigned long)tlb
->pages
, 0);
209 * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
210 * must be delayed until after the TLB has been flushed (see comments at the beginning of
213 static inline bool __tlb_remove_page(struct mmu_gather
*tlb
, struct page
*page
)
217 if (!tlb
->nr
&& tlb
->pages
== tlb
->local
)
218 __tlb_alloc_page(tlb
);
220 tlb
->pages
[tlb
->nr
++] = page
;
221 VM_WARN_ON(tlb
->nr
> tlb
->max
);
222 if (tlb
->nr
== tlb
->max
)
227 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather
*tlb
)
229 ia64_tlb_flush_mmu_tlbonly(tlb
, tlb
->start_addr
, tlb
->end_addr
);
232 static inline void tlb_flush_mmu_free(struct mmu_gather
*tlb
)
234 ia64_tlb_flush_mmu_free(tlb
);
237 static inline void tlb_flush_mmu(struct mmu_gather
*tlb
)
239 ia64_tlb_flush_mmu(tlb
, tlb
->start_addr
, tlb
->end_addr
);
242 static inline void tlb_remove_page(struct mmu_gather
*tlb
, struct page
*page
)
244 if (__tlb_remove_page(tlb
, page
))
248 static inline bool __tlb_remove_page_size(struct mmu_gather
*tlb
,
249 struct page
*page
, int page_size
)
251 return __tlb_remove_page(tlb
, page
);
254 static inline void tlb_remove_page_size(struct mmu_gather
*tlb
,
255 struct page
*page
, int page_size
)
257 return tlb_remove_page(tlb
, page
);
261 * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
262 * PTE, not just those pointing to (normal) physical memory.
265 __tlb_remove_tlb_entry (struct mmu_gather
*tlb
, pte_t
*ptep
, unsigned long address
)
267 if (tlb
->start_addr
== ~0UL)
268 tlb
->start_addr
= address
;
269 tlb
->end_addr
= address
+ PAGE_SIZE
;
272 #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
274 #define tlb_start_vma(tlb, vma) do { } while (0)
275 #define tlb_end_vma(tlb, vma) do { } while (0)
277 #define tlb_remove_tlb_entry(tlb, ptep, addr) \
279 tlb->need_flush = 1; \
280 __tlb_remove_tlb_entry(tlb, ptep, addr); \
283 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
284 tlb_remove_tlb_entry(tlb, ptep, address)
286 #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
287 static inline void tlb_remove_check_page_size_change(struct mmu_gather
*tlb
,
288 unsigned int page_size
)
292 #define pte_free_tlb(tlb, ptep, address) \
294 tlb->need_flush = 1; \
295 __pte_free_tlb(tlb, ptep, address); \
298 #define pmd_free_tlb(tlb, ptep, address) \
300 tlb->need_flush = 1; \
301 __pmd_free_tlb(tlb, ptep, address); \
304 #define pud_free_tlb(tlb, pudp, address) \
306 tlb->need_flush = 1; \
307 __pud_free_tlb(tlb, pudp, address); \
310 #endif /* _ASM_IA64_TLB_H */