1 // SPDX-License-Identifier: GPL-2.0
3 #include <asm/pgalloc.h>
5 #include <linux/kernel.h>
6 #include <linux/pgtable.h>
8 int ptep_set_access_flags(struct vm_area_struct
*vma
,
9 unsigned long address
, pte_t
*ptep
,
10 pte_t entry
, int dirty
)
12 asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC
, 1)
15 if (!pte_same(ptep_get(ptep
), entry
))
16 __set_pte_at(vma
->vm_mm
, ptep
, entry
);
18 * update_mmu_cache will unconditionally execute, handling both
19 * the case that the PTE changed and the spurious fault case.
24 if (!pte_same(ptep_get(ptep
), entry
)) {
25 __set_pte_at(vma
->vm_mm
, ptep
, entry
);
26 /* Here only not svadu is impacted */
27 flush_tlb_page(vma
, address
);
34 int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
35 unsigned long address
,
38 if (!pte_young(ptep_get(ptep
)))
40 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET
, &pte_val(*ptep
));
42 EXPORT_SYMBOL_GPL(ptep_test_and_clear_young
);
45 pud_t
*pud_offset(p4d_t
*p4d
, unsigned long address
)
47 if (pgtable_l4_enabled
)
48 return p4d_pgtable(p4dp_get(p4d
)) + pud_index(address
);
53 p4d_t
*p4d_offset(pgd_t
*pgd
, unsigned long address
)
55 if (pgtable_l5_enabled
)
56 return pgd_pgtable(pgdp_get(pgd
)) + p4d_index(address
);
62 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
63 int p4d_set_huge(p4d_t
*p4d
, phys_addr_t addr
, pgprot_t prot
)
68 void p4d_clear_huge(p4d_t
*p4d
)
72 int pud_set_huge(pud_t
*pud
, phys_addr_t phys
, pgprot_t prot
)
74 pud_t new_pud
= pfn_pud(__phys_to_pfn(phys
), prot
);
76 set_pud(pud
, new_pud
);
80 int pud_clear_huge(pud_t
*pud
)
82 if (!pud_leaf(pudp_get(pud
)))
88 int pud_free_pmd_page(pud_t
*pud
, unsigned long addr
)
90 pmd_t
*pmd
= pud_pgtable(pudp_get(pud
));
95 flush_tlb_kernel_range(addr
, addr
+ PUD_SIZE
);
97 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
98 if (!pmd_none(pmd
[i
])) {
99 pte_t
*pte
= (pte_t
*)pmd_page_vaddr(pmd
[i
]);
101 pte_free_kernel(NULL
, pte
);
110 int pmd_set_huge(pmd_t
*pmd
, phys_addr_t phys
, pgprot_t prot
)
112 pmd_t new_pmd
= pfn_pmd(__phys_to_pfn(phys
), prot
);
114 set_pmd(pmd
, new_pmd
);
118 int pmd_clear_huge(pmd_t
*pmd
)
120 if (!pmd_leaf(pmdp_get(pmd
)))
126 int pmd_free_pte_page(pmd_t
*pmd
, unsigned long addr
)
128 pte_t
*pte
= (pte_t
*)pmd_page_vaddr(pmdp_get(pmd
));
132 flush_tlb_kernel_range(addr
, addr
+ PMD_SIZE
);
133 pte_free_kernel(NULL
, pte
);
137 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
138 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
139 pmd_t
pmdp_collapse_flush(struct vm_area_struct
*vma
,
140 unsigned long address
, pmd_t
*pmdp
)
142 pmd_t pmd
= pmdp_huge_get_and_clear(vma
->vm_mm
, address
, pmdp
);
144 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
145 VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp
)));
147 * When leaf PTE entries (regular pages) are collapsed into a leaf
148 * PMD entry (huge page), a valid non-leaf PTE is converted into a
149 * valid leaf PTE at the level 1 page table. Since the sfence.vma
150 * forms that specify an address only apply to leaf PTEs, we need a
151 * global flush here. collapse_huge_page() assumes these flushes are
152 * eager, so just do the fence here.
154 flush_tlb_mm(vma
->vm_mm
);
157 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */