1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * This file contains the routines for flushing entries from the
4 * TLB and MMU hash table.
6 * Derived from arch/ppc64/mm/init.c:
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Paul Mackerras
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
16 * Dave Engebretsen <engebret@us.ibm.com>
17 * Rework for PPC64 port.
20 #include <linux/kernel.h>
22 #include <linux/percpu.h>
23 #include <linux/hardirq.h>
24 #include <asm/tlbflush.h>
27 #include <asm/pte-walk.h>
30 #include <trace/events/thp.h>
32 DEFINE_PER_CPU(struct ppc64_tlb_batch
, ppc64_tlb_batch
);
35 * A linux PTE was changed and the corresponding hash table entry
36 * neesd to be flushed. This function will either perform the flush
37 * immediately or will batch it up if the current CPU has an active
40 void hpte_need_flush(struct mm_struct
*mm
, unsigned long addr
,
41 pte_t
*ptep
, unsigned long pte
, int huge
)
44 struct ppc64_tlb_batch
*batch
= &get_cpu_var(ppc64_tlb_batch
);
54 * Get page size (maybe move back to caller).
56 * NOTE: when using special 64K mappings in 4K environment like
57 * for SPEs, we obtain the page size from the slice, which thus
58 * must still exist (and thus the VMA not reused) at the time
62 #ifdef CONFIG_HUGETLB_PAGE
63 psize
= get_slice_psize(mm
, addr
);
64 /* Mask the address for the correct page size */
65 addr
&= ~((1UL << mmu_psize_defs
[psize
].shift
) - 1);
66 if (unlikely(psize
== MMU_PAGE_16G
))
67 offset
= PTRS_PER_PUD
;
69 offset
= PTRS_PER_PMD
;
72 psize
= pte_pagesize_index(mm
, addr
, pte
); /* shutup gcc */
75 psize
= pte_pagesize_index(mm
, addr
, pte
);
77 * Mask the address for the standard page size. If we
78 * have a 64k page kernel, but the hardware does not
79 * support 64k pages, this might be different from the
80 * hardware page size encoded in the slice table.
83 offset
= PTRS_PER_PTE
;
87 /* Build full vaddr */
88 if (!is_kernel_addr(addr
)) {
89 ssize
= user_segment_size(addr
);
90 vsid
= get_user_vsid(&mm
->context
, addr
, ssize
);
92 vsid
= get_kernel_vsid(addr
, mmu_kernel_ssize
);
93 ssize
= mmu_kernel_ssize
;
96 vpn
= hpt_vpn(addr
, vsid
, ssize
);
97 rpte
= __real_pte(__pte(pte
), ptep
, offset
);
100 * Check if we have an active batch on this CPU. If not, just
101 * flush now and return.
103 if (!batch
->active
) {
104 flush_hash_page(vpn
, rpte
, psize
, ssize
, mm_is_thread_local(mm
));
105 put_cpu_var(ppc64_tlb_batch
);
110 * This can happen when we are in the middle of a TLB batch and
111 * we encounter memory pressure (eg copy_page_range when it tries
112 * to allocate a new pte). If we have to reclaim memory and end
113 * up scanning and resetting referenced bits then our batch context
114 * will change mid stream.
116 * We also need to ensure only one page size is present in a given
119 if (i
!= 0 && (mm
!= batch
->mm
|| batch
->psize
!= psize
||
120 batch
->ssize
!= ssize
)) {
121 __flush_tlb_pending(batch
);
126 batch
->psize
= psize
;
127 batch
->ssize
= ssize
;
129 batch
->pte
[i
] = rpte
;
132 if (i
>= PPC64_TLB_BATCH_NR
)
133 __flush_tlb_pending(batch
);
134 put_cpu_var(ppc64_tlb_batch
);
138 * This function is called when terminating an mmu batch or when a batch
139 * is full. It will perform the flush of all the entries currently stored
142 * Must be called from within some kind of spinlock/non-preempt region...
144 void __flush_tlb_pending(struct ppc64_tlb_batch
*batch
)
149 local
= mm_is_thread_local(batch
->mm
);
151 flush_hash_page(batch
->vpn
[0], batch
->pte
[0],
152 batch
->psize
, batch
->ssize
, local
);
154 flush_hash_range(i
, local
);
158 void hash__tlb_flush(struct mmu_gather
*tlb
)
160 struct ppc64_tlb_batch
*tlbbatch
= &get_cpu_var(ppc64_tlb_batch
);
163 * If there's a TLB batch pending, then we must flush it because the
164 * pages are going to be freed and we really don't want to have a CPU
165 * access a freed page because it has a stale TLB
168 __flush_tlb_pending(tlbbatch
);
170 put_cpu_var(ppc64_tlb_batch
);
174 * __flush_hash_table_range - Flush all HPTEs for a given address range
175 * from the hash table (and the TLB). But keeps
176 * the linux PTEs intact.
178 * @start : starting address
179 * @end : ending address (not included in the flush)
181 * This function is mostly to be used by some IO hotplug code in order
182 * to remove all hash entries from a given address range used to map IO
183 * space on a removed PCI-PCI bidge without tearing down the full mapping
184 * since 64K pages may overlap with other bridges when using 64K pages
185 * with 4K HW pages on IO space.
187 * Because of that usage pattern, it is implemented for small size rather
190 void __flush_hash_table_range(unsigned long start
, unsigned long end
)
195 start
= ALIGN_DOWN(start
, PAGE_SIZE
);
196 end
= ALIGN(end
, PAGE_SIZE
);
200 * Note: Normally, we should only ever use a batch within a
201 * PTE locked section. This violates the rule, but will work
202 * since we don't actually modify the PTEs, we just flush the
203 * hash while leaving the PTEs intact (including their reference
204 * to being hashed). This is not the most performance oriented
205 * way to do things but is fine for our needs here.
207 local_irq_save(flags
);
208 arch_enter_lazy_mmu_mode();
209 for (; start
< end
; start
+= PAGE_SIZE
) {
210 pte_t
*ptep
= find_init_mm_pte(start
, &hugepage_shift
);
215 pte
= pte_val(*ptep
);
216 if (!(pte
& H_PAGE_HASHPTE
))
218 hpte_need_flush(&init_mm
, start
, ptep
, pte
, hugepage_shift
);
220 arch_leave_lazy_mmu_mode();
221 local_irq_restore(flags
);
224 void flush_tlb_pmd_range(struct mm_struct
*mm
, pmd_t
*pmd
, unsigned long addr
)
230 addr
= ALIGN_DOWN(addr
, PMD_SIZE
);
232 * Note: Normally, we should only ever use a batch within a
233 * PTE locked section. This violates the rule, but will work
234 * since we don't actually modify the PTEs, we just flush the
235 * hash while leaving the PTEs intact (including their reference
236 * to being hashed). This is not the most performance oriented
237 * way to do things but is fine for our needs here.
239 local_irq_save(flags
);
240 arch_enter_lazy_mmu_mode();
241 start_pte
= pte_offset_map(pmd
, addr
);
242 for (pte
= start_pte
; pte
< start_pte
+ PTRS_PER_PTE
; pte
++) {
243 unsigned long pteval
= pte_val(*pte
);
244 if (pteval
& H_PAGE_HASHPTE
)
245 hpte_need_flush(mm
, addr
, pte
, pteval
, 0);
248 arch_leave_lazy_mmu_mode();
249 local_irq_restore(flags
);