1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 SiFive
6 #include <linux/pagewalk.h>
7 #include <linux/pgtable.h>
8 #include <asm/tlbflush.h>
9 #include <asm/bitops.h>
10 #include <asm/set_memory.h>
12 struct pageattr_masks
{
17 static unsigned long set_pageattr_masks(unsigned long val
, struct mm_walk
*walk
)
19 struct pageattr_masks
*masks
= walk
->private;
20 unsigned long new_val
= val
;
22 new_val
&= ~(pgprot_val(masks
->clear_mask
));
23 new_val
|= (pgprot_val(masks
->set_mask
));
28 static int pageattr_pgd_entry(pgd_t
*pgd
, unsigned long addr
,
29 unsigned long next
, struct mm_walk
*walk
)
31 pgd_t val
= READ_ONCE(*pgd
);
34 val
= __pgd(set_pageattr_masks(pgd_val(val
), walk
));
41 static int pageattr_p4d_entry(p4d_t
*p4d
, unsigned long addr
,
42 unsigned long next
, struct mm_walk
*walk
)
44 p4d_t val
= READ_ONCE(*p4d
);
47 val
= __p4d(set_pageattr_masks(p4d_val(val
), walk
));
54 static int pageattr_pud_entry(pud_t
*pud
, unsigned long addr
,
55 unsigned long next
, struct mm_walk
*walk
)
57 pud_t val
= READ_ONCE(*pud
);
60 val
= __pud(set_pageattr_masks(pud_val(val
), walk
));
67 static int pageattr_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
68 unsigned long next
, struct mm_walk
*walk
)
70 pmd_t val
= READ_ONCE(*pmd
);
73 val
= __pmd(set_pageattr_masks(pmd_val(val
), walk
));
80 static int pageattr_pte_entry(pte_t
*pte
, unsigned long addr
,
81 unsigned long next
, struct mm_walk
*walk
)
83 pte_t val
= READ_ONCE(*pte
);
85 val
= __pte(set_pageattr_masks(pte_val(val
), walk
));
91 static int pageattr_pte_hole(unsigned long addr
, unsigned long next
,
92 int depth
, struct mm_walk
*walk
)
94 /* Nothing to do here */
98 static const struct mm_walk_ops pageattr_ops
= {
99 .pgd_entry
= pageattr_pgd_entry
,
100 .p4d_entry
= pageattr_p4d_entry
,
101 .pud_entry
= pageattr_pud_entry
,
102 .pmd_entry
= pageattr_pmd_entry
,
103 .pte_entry
= pageattr_pte_entry
,
104 .pte_hole
= pageattr_pte_hole
,
107 static int __set_memory(unsigned long addr
, int numpages
, pgprot_t set_mask
,
111 unsigned long start
= addr
;
112 unsigned long end
= start
+ PAGE_SIZE
* numpages
;
113 struct pageattr_masks masks
= {
114 .set_mask
= set_mask
,
115 .clear_mask
= clear_mask
121 mmap_read_lock(&init_mm
);
122 ret
= walk_page_range_novma(&init_mm
, start
, end
, &pageattr_ops
, NULL
,
124 mmap_read_unlock(&init_mm
);
126 flush_tlb_kernel_range(start
, end
);
131 int set_memory_rw_nx(unsigned long addr
, int numpages
)
133 return __set_memory(addr
, numpages
, __pgprot(_PAGE_READ
| _PAGE_WRITE
),
134 __pgprot(_PAGE_EXEC
));
137 int set_memory_ro(unsigned long addr
, int numpages
)
139 return __set_memory(addr
, numpages
, __pgprot(_PAGE_READ
),
140 __pgprot(_PAGE_WRITE
));
143 int set_memory_rw(unsigned long addr
, int numpages
)
145 return __set_memory(addr
, numpages
, __pgprot(_PAGE_READ
| _PAGE_WRITE
),
149 int set_memory_x(unsigned long addr
, int numpages
)
151 return __set_memory(addr
, numpages
, __pgprot(_PAGE_EXEC
), __pgprot(0));
154 int set_memory_nx(unsigned long addr
, int numpages
)
156 return __set_memory(addr
, numpages
, __pgprot(0), __pgprot(_PAGE_EXEC
));
159 int set_direct_map_invalid_noflush(struct page
*page
)
162 unsigned long start
= (unsigned long)page_address(page
);
163 unsigned long end
= start
+ PAGE_SIZE
;
164 struct pageattr_masks masks
= {
165 .set_mask
= __pgprot(0),
166 .clear_mask
= __pgprot(_PAGE_PRESENT
)
169 mmap_read_lock(&init_mm
);
170 ret
= walk_page_range(&init_mm
, start
, end
, &pageattr_ops
, &masks
);
171 mmap_read_unlock(&init_mm
);
176 int set_direct_map_default_noflush(struct page
*page
)
179 unsigned long start
= (unsigned long)page_address(page
);
180 unsigned long end
= start
+ PAGE_SIZE
;
181 struct pageattr_masks masks
= {
182 .set_mask
= PAGE_KERNEL
,
183 .clear_mask
= __pgprot(0)
186 mmap_read_lock(&init_mm
);
187 ret
= walk_page_range(&init_mm
, start
, end
, &pageattr_ops
, &masks
);
188 mmap_read_unlock(&init_mm
);
193 #ifdef CONFIG_DEBUG_PAGEALLOC
194 void __kernel_map_pages(struct page
*page
, int numpages
, int enable
)
196 if (!debug_pagealloc_enabled())
200 __set_memory((unsigned long)page_address(page
), numpages
,
201 __pgprot(_PAGE_PRESENT
), __pgprot(0));
203 __set_memory((unsigned long)page_address(page
), numpages
,
204 __pgprot(0), __pgprot(_PAGE_PRESENT
));
208 bool kernel_page_present(struct page
*page
)
210 unsigned long addr
= (unsigned long)page_address(page
);
217 pgd
= pgd_offset_k(addr
);
218 if (!pgd_present(*pgd
))
221 p4d
= p4d_offset(pgd
, addr
);
222 if (!p4d_present(*p4d
))
225 pud
= pud_offset(p4d
, addr
);
226 if (!pud_present(*pud
))
229 pmd
= pmd_offset(pud
, addr
);
230 if (!pmd_present(*pmd
))
233 pte
= pte_offset_kernel(pmd
, addr
);
234 return pte_present(*pte
);