1 // SPDX-License-Identifier: GPL-2.0
2 /* arch/sparc64/mm/tlb.c
4 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
7 #include <linux/kernel.h>
8 #include <linux/percpu.h>
10 #include <linux/swap.h>
11 #include <linux/preempt.h>
12 #include <linux/pagemap.h>
14 #include <asm/tlbflush.h>
15 #include <asm/cacheflush.h>
16 #include <asm/mmu_context.h>
19 /* Heavily inspired by the ppc64 code. */
21 static DEFINE_PER_CPU(struct tlb_batch
, tlb_batch
);
23 void flush_tlb_pending(void)
25 struct tlb_batch
*tb
= &get_cpu_var(tlb_batch
);
26 struct mm_struct
*mm
= tb
->mm
;
33 if (CTX_VALID(mm
->context
)) {
34 if (tb
->tlb_nr
== 1) {
35 global_flush_tlb_page(mm
, tb
->vaddrs
[0]);
38 smp_flush_tlb_pending(tb
->mm
, tb
->tlb_nr
,
41 __flush_tlb_pending(CTX_HWBITS(tb
->mm
->context
),
42 tb
->tlb_nr
, &tb
->vaddrs
[0]);
50 put_cpu_var(tlb_batch
);
53 void arch_enter_lazy_mmu_mode(void)
55 struct tlb_batch
*tb
= this_cpu_ptr(&tlb_batch
);
60 void arch_leave_lazy_mmu_mode(void)
62 struct tlb_batch
*tb
= this_cpu_ptr(&tlb_batch
);
69 static void tlb_batch_add_one(struct mm_struct
*mm
, unsigned long vaddr
,
70 bool exec
, unsigned int hugepage_shift
)
72 struct tlb_batch
*tb
= &get_cpu_var(tlb_batch
);
81 if (unlikely(nr
!= 0 && mm
!= tb
->mm
)) {
87 flush_tsb_user_page(mm
, vaddr
, hugepage_shift
);
88 global_flush_tlb_page(mm
, vaddr
);
94 tb
->hugepage_shift
= hugepage_shift
;
97 if (tb
->hugepage_shift
!= hugepage_shift
) {
99 tb
->hugepage_shift
= hugepage_shift
;
103 tb
->vaddrs
[nr
] = vaddr
;
105 if (nr
>= TLB_BATCH_NR
)
109 put_cpu_var(tlb_batch
);
112 void tlb_batch_add(struct mm_struct
*mm
, unsigned long vaddr
,
113 pte_t
*ptep
, pte_t orig
, int fullmm
,
114 unsigned int hugepage_shift
)
116 if (tlb_type
!= hypervisor
&&
118 unsigned long paddr
, pfn
= pte_pfn(orig
);
119 struct address_space
*mapping
;
126 page
= pfn_to_page(pfn
);
127 if (PageReserved(page
))
130 /* A real file page? */
131 folio
= page_folio(page
);
132 mapping
= folio_flush_mapping(folio
);
136 paddr
= (unsigned long) page_address(page
);
137 if ((paddr
^ vaddr
) & (1 << 13))
138 flush_dcache_folio_all(mm
, folio
);
143 tlb_batch_add_one(mm
, vaddr
, pte_exec(orig
), hugepage_shift
);
146 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
147 static void tlb_batch_pmd_scan(struct mm_struct
*mm
, unsigned long vaddr
,
153 pte
= pte_offset_map(&pmd
, vaddr
);
156 end
= vaddr
+ HPAGE_SIZE
;
157 while (vaddr
< end
) {
158 if (pte_val(*pte
) & _PAGE_VALID
) {
159 bool exec
= pte_exec(*pte
);
161 tlb_batch_add_one(mm
, vaddr
, exec
, PAGE_SHIFT
);
170 static void __set_pmd_acct(struct mm_struct
*mm
, unsigned long addr
,
171 pmd_t orig
, pmd_t pmd
)
176 if ((pmd_val(pmd
) ^ pmd_val(orig
)) & _PAGE_PMD_HUGE
) {
178 * Note that this routine only sets pmds for THP pages.
179 * Hugetlb pages are handled elsewhere. We need to check
180 * for huge zero page. Huge zero pages are like hugetlb
181 * pages in that there is no RSS, but there is the need
182 * for TSB entries. So, huge zero page counts go into
185 if (pmd_val(pmd
) & _PAGE_PMD_HUGE
) {
186 if (is_huge_zero_pmd(pmd
))
187 mm
->context
.hugetlb_pte_count
++;
189 mm
->context
.thp_pte_count
++;
191 if (is_huge_zero_pmd(orig
))
192 mm
->context
.hugetlb_pte_count
--;
194 mm
->context
.thp_pte_count
--;
197 /* Do not try to allocate the TSB hash table if we
198 * don't have one already. We have various locks held
199 * and thus we'll end up doing a GFP_KERNEL allocation
200 * in an atomic context.
202 * Instead, we let the first TLB miss on a hugepage
207 if (!pmd_none(orig
)) {
209 if (pmd_trans_huge(orig
)) {
210 pte_t orig_pte
= __pte(pmd_val(orig
));
211 bool exec
= pte_exec(orig_pte
);
213 tlb_batch_add_one(mm
, addr
, exec
, REAL_HPAGE_SHIFT
);
214 tlb_batch_add_one(mm
, addr
+ REAL_HPAGE_SIZE
, exec
,
217 tlb_batch_pmd_scan(mm
, addr
, orig
);
222 void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
223 pmd_t
*pmdp
, pmd_t pmd
)
228 __set_pmd_acct(mm
, addr
, orig
, pmd
);
231 static inline pmd_t
pmdp_establish(struct vm_area_struct
*vma
,
232 unsigned long address
, pmd_t
*pmdp
, pmd_t pmd
)
238 } while (cmpxchg64(&pmdp
->pmd
, old
.pmd
, pmd
.pmd
) != old
.pmd
);
239 __set_pmd_acct(vma
->vm_mm
, address
, old
, pmd
);
245 * This routine is only called when splitting a THP
247 pmd_t
pmdp_invalidate(struct vm_area_struct
*vma
, unsigned long address
,
252 VM_WARN_ON_ONCE(!pmd_present(*pmdp
));
253 entry
= __pmd(pmd_val(*pmdp
) & ~_PAGE_VALID
);
254 old
= pmdp_establish(vma
, address
, pmdp
, entry
);
255 flush_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
258 * set_pmd_at() will not be called in a way to decrement
259 * thp_pte_count when splitting a THP, so do it now.
260 * Sanity check pmd before doing the actual decrement.
262 if ((pmd_val(entry
) & _PAGE_PMD_HUGE
) &&
263 !is_huge_zero_pmd(entry
))
264 (vma
->vm_mm
)->context
.thp_pte_count
--;
269 void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
272 struct list_head
*lh
= (struct list_head
*) pgtable
;
274 assert_spin_locked(&mm
->page_table_lock
);
277 if (!pmd_huge_pte(mm
, pmdp
))
280 list_add(lh
, (struct list_head
*) pmd_huge_pte(mm
, pmdp
));
281 pmd_huge_pte(mm
, pmdp
) = pgtable
;
284 pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
)
286 struct list_head
*lh
;
289 assert_spin_locked(&mm
->page_table_lock
);
292 pgtable
= pmd_huge_pte(mm
, pmdp
);
293 lh
= (struct list_head
*) pgtable
;
295 pmd_huge_pte(mm
, pmdp
) = NULL
;
297 pmd_huge_pte(mm
, pmdp
) = (pgtable_t
) lh
->next
;
300 pte_val(pgtable
[0]) = 0;
301 pte_val(pgtable
[1]) = 0;
305 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */