1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 SiFive
6 #include <linux/pagewalk.h>
7 #include <linux/pgtable.h>
8 #include <linux/vmalloc.h>
9 #include <asm/tlbflush.h>
10 #include <asm/bitops.h>
11 #include <asm/set_memory.h>
13 struct pageattr_masks
{
18 static unsigned long set_pageattr_masks(unsigned long val
, struct mm_walk
*walk
)
20 struct pageattr_masks
*masks
= walk
->private;
21 unsigned long new_val
= val
;
23 new_val
&= ~(pgprot_val(masks
->clear_mask
));
24 new_val
|= (pgprot_val(masks
->set_mask
));
29 static int pageattr_p4d_entry(p4d_t
*p4d
, unsigned long addr
,
30 unsigned long next
, struct mm_walk
*walk
)
32 p4d_t val
= p4dp_get(p4d
);
35 val
= __p4d(set_pageattr_masks(p4d_val(val
), walk
));
42 static int pageattr_pud_entry(pud_t
*pud
, unsigned long addr
,
43 unsigned long next
, struct mm_walk
*walk
)
45 pud_t val
= pudp_get(pud
);
48 val
= __pud(set_pageattr_masks(pud_val(val
), walk
));
55 static int pageattr_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
56 unsigned long next
, struct mm_walk
*walk
)
58 pmd_t val
= pmdp_get(pmd
);
61 val
= __pmd(set_pageattr_masks(pmd_val(val
), walk
));
68 static int pageattr_pte_entry(pte_t
*pte
, unsigned long addr
,
69 unsigned long next
, struct mm_walk
*walk
)
71 pte_t val
= ptep_get(pte
);
73 val
= __pte(set_pageattr_masks(pte_val(val
), walk
));
79 static int pageattr_pte_hole(unsigned long addr
, unsigned long next
,
80 int depth
, struct mm_walk
*walk
)
82 /* Nothing to do here */
86 static const struct mm_walk_ops pageattr_ops
= {
87 .p4d_entry
= pageattr_p4d_entry
,
88 .pud_entry
= pageattr_pud_entry
,
89 .pmd_entry
= pageattr_pmd_entry
,
90 .pte_entry
= pageattr_pte_entry
,
91 .pte_hole
= pageattr_pte_hole
,
92 .walk_lock
= PGWALK_RDLOCK
,
96 static int __split_linear_mapping_pmd(pud_t
*pudp
,
97 unsigned long vaddr
, unsigned long end
)
102 pmdp
= pmd_offset(pudp
, vaddr
);
105 next
= pmd_addr_end(vaddr
, end
);
107 if (next
- vaddr
>= PMD_SIZE
&&
108 vaddr
<= (vaddr
& PMD_MASK
) && end
>= next
)
111 if (pmd_leaf(pmdp_get(pmdp
))) {
112 struct page
*pte_page
;
113 unsigned long pfn
= _pmd_pfn(pmdp_get(pmdp
));
114 pgprot_t prot
= __pgprot(pmd_val(pmdp_get(pmdp
)) & ~_PAGE_PFN_MASK
);
118 pte_page
= alloc_page(GFP_KERNEL
);
122 ptep_new
= (pte_t
*)page_address(pte_page
);
123 for (i
= 0; i
< PTRS_PER_PTE
; ++i
, ++ptep_new
)
124 set_pte(ptep_new
, pfn_pte(pfn
+ i
, prot
));
128 set_pmd(pmdp
, pfn_pmd(page_to_pfn(pte_page
), PAGE_TABLE
));
130 } while (pmdp
++, vaddr
= next
, vaddr
!= end
);
135 static int __split_linear_mapping_pud(p4d_t
*p4dp
,
136 unsigned long vaddr
, unsigned long end
)
142 pudp
= pud_offset(p4dp
, vaddr
);
145 next
= pud_addr_end(vaddr
, end
);
147 if (next
- vaddr
>= PUD_SIZE
&&
148 vaddr
<= (vaddr
& PUD_MASK
) && end
>= next
)
151 if (pud_leaf(pudp_get(pudp
))) {
152 struct page
*pmd_page
;
153 unsigned long pfn
= _pud_pfn(pudp_get(pudp
));
154 pgprot_t prot
= __pgprot(pud_val(pudp_get(pudp
)) & ~_PAGE_PFN_MASK
);
158 pmd_page
= alloc_page(GFP_KERNEL
);
162 pmdp_new
= (pmd_t
*)page_address(pmd_page
);
163 for (i
= 0; i
< PTRS_PER_PMD
; ++i
, ++pmdp_new
)
165 pfn_pmd(pfn
+ ((i
* PMD_SIZE
) >> PAGE_SHIFT
), prot
));
169 set_pud(pudp
, pfn_pud(page_to_pfn(pmd_page
), PAGE_TABLE
));
172 ret
= __split_linear_mapping_pmd(pudp
, vaddr
, next
);
175 } while (pudp
++, vaddr
= next
, vaddr
!= end
);
180 static int __split_linear_mapping_p4d(pgd_t
*pgdp
,
181 unsigned long vaddr
, unsigned long end
)
187 p4dp
= p4d_offset(pgdp
, vaddr
);
190 next
= p4d_addr_end(vaddr
, end
);
193 * If [vaddr; end] contains [vaddr & P4D_MASK; next], we don't
194 * need to split, we'll change the protections on the whole P4D.
196 if (next
- vaddr
>= P4D_SIZE
&&
197 vaddr
<= (vaddr
& P4D_MASK
) && end
>= next
)
200 if (p4d_leaf(p4dp_get(p4dp
))) {
201 struct page
*pud_page
;
202 unsigned long pfn
= _p4d_pfn(p4dp_get(p4dp
));
203 pgprot_t prot
= __pgprot(p4d_val(p4dp_get(p4dp
)) & ~_PAGE_PFN_MASK
);
207 pud_page
= alloc_page(GFP_KERNEL
);
212 * Fill the pud level with leaf puds that have the same
213 * protections as the leaf p4d.
215 pudp_new
= (pud_t
*)page_address(pud_page
);
216 for (i
= 0; i
< PTRS_PER_PUD
; ++i
, ++pudp_new
)
218 pfn_pud(pfn
+ ((i
* PUD_SIZE
) >> PAGE_SHIFT
), prot
));
221 * Make sure the pud filling is not reordered with the
222 * p4d store which could result in seeing a partially
227 set_p4d(p4dp
, pfn_p4d(page_to_pfn(pud_page
), PAGE_TABLE
));
230 ret
= __split_linear_mapping_pud(p4dp
, vaddr
, next
);
233 } while (p4dp
++, vaddr
= next
, vaddr
!= end
);
238 static int __split_linear_mapping_pgd(pgd_t
*pgdp
,
246 next
= pgd_addr_end(vaddr
, end
);
247 /* We never use PGD mappings for the linear mapping */
248 ret
= __split_linear_mapping_p4d(pgdp
, vaddr
, next
);
251 } while (pgdp
++, vaddr
= next
, vaddr
!= end
);
256 static int split_linear_mapping(unsigned long start
, unsigned long end
)
258 return __split_linear_mapping_pgd(pgd_offset_k(start
), start
, end
);
260 #endif /* CONFIG_64BIT */
262 static int __set_memory(unsigned long addr
, int numpages
, pgprot_t set_mask
,
266 unsigned long start
= addr
;
267 unsigned long end
= start
+ PAGE_SIZE
* numpages
;
268 unsigned long __maybe_unused lm_start
;
269 unsigned long __maybe_unused lm_end
;
270 struct pageattr_masks masks
= {
271 .set_mask
= set_mask
,
272 .clear_mask
= clear_mask
278 mmap_write_lock(&init_mm
);
282 * We are about to change the permissions of a kernel mapping, we must
283 * apply the same changes to its linear mapping alias, which may imply
284 * splitting a huge mapping.
287 if (is_vmalloc_or_module_addr((void *)start
)) {
288 struct vm_struct
*area
= NULL
;
291 area
= find_vm_area((void *)start
);
292 page_start
= (start
- (unsigned long)area
->addr
) >> PAGE_SHIFT
;
294 for (i
= page_start
; i
< page_start
+ numpages
; ++i
) {
295 lm_start
= (unsigned long)page_address(area
->pages
[i
]);
296 lm_end
= lm_start
+ PAGE_SIZE
;
298 ret
= split_linear_mapping(lm_start
, lm_end
);
302 ret
= walk_page_range_novma(&init_mm
, lm_start
, lm_end
,
303 &pageattr_ops
, NULL
, &masks
);
307 } else if (is_kernel_mapping(start
) || is_linear_mapping(start
)) {
308 if (is_kernel_mapping(start
)) {
309 lm_start
= (unsigned long)lm_alias(start
);
310 lm_end
= (unsigned long)lm_alias(end
);
316 ret
= split_linear_mapping(lm_start
, lm_end
);
320 ret
= walk_page_range_novma(&init_mm
, lm_start
, lm_end
,
321 &pageattr_ops
, NULL
, &masks
);
326 ret
= walk_page_range_novma(&init_mm
, start
, end
, &pageattr_ops
, NULL
,
330 mmap_write_unlock(&init_mm
);
333 * We can't use flush_tlb_kernel_range() here as we may have split a
334 * hugepage that is larger than that, so let's flush everything.
338 ret
= walk_page_range_novma(&init_mm
, start
, end
, &pageattr_ops
, NULL
,
341 mmap_write_unlock(&init_mm
);
343 flush_tlb_kernel_range(start
, end
);
349 int set_memory_rw_nx(unsigned long addr
, int numpages
)
351 return __set_memory(addr
, numpages
, __pgprot(_PAGE_READ
| _PAGE_WRITE
),
352 __pgprot(_PAGE_EXEC
));
355 int set_memory_ro(unsigned long addr
, int numpages
)
357 return __set_memory(addr
, numpages
, __pgprot(_PAGE_READ
),
358 __pgprot(_PAGE_WRITE
));
361 int set_memory_rw(unsigned long addr
, int numpages
)
363 return __set_memory(addr
, numpages
, __pgprot(_PAGE_READ
| _PAGE_WRITE
),
367 int set_memory_x(unsigned long addr
, int numpages
)
369 return __set_memory(addr
, numpages
, __pgprot(_PAGE_EXEC
), __pgprot(0));
372 int set_memory_nx(unsigned long addr
, int numpages
)
374 return __set_memory(addr
, numpages
, __pgprot(0), __pgprot(_PAGE_EXEC
));
377 int set_direct_map_invalid_noflush(struct page
*page
)
379 return __set_memory((unsigned long)page_address(page
), 1,
380 __pgprot(0), __pgprot(_PAGE_PRESENT
));
383 int set_direct_map_default_noflush(struct page
*page
)
385 return __set_memory((unsigned long)page_address(page
), 1,
386 PAGE_KERNEL
, __pgprot(_PAGE_EXEC
));
389 #ifdef CONFIG_DEBUG_PAGEALLOC
390 static int debug_pagealloc_set_page(pte_t
*pte
, unsigned long addr
, void *data
)
392 int enable
= *(int *)data
;
394 unsigned long val
= pte_val(ptep_get(pte
));
397 val
|= _PAGE_PRESENT
;
399 val
&= ~_PAGE_PRESENT
;
401 set_pte(pte
, __pte(val
));
406 void __kernel_map_pages(struct page
*page
, int numpages
, int enable
)
408 if (!debug_pagealloc_enabled())
411 unsigned long start
= (unsigned long)page_address(page
);
412 unsigned long size
= PAGE_SIZE
* numpages
;
414 apply_to_existing_page_range(&init_mm
, start
, size
, debug_pagealloc_set_page
, &enable
);
416 flush_tlb_kernel_range(start
, start
+ size
);
420 bool kernel_page_present(struct page
*page
)
422 unsigned long addr
= (unsigned long)page_address(page
);
429 pgd
= pgd_offset_k(addr
);
430 if (!pgd_present(pgdp_get(pgd
)))
432 if (pgd_leaf(pgdp_get(pgd
)))
435 p4d
= p4d_offset(pgd
, addr
);
436 if (!p4d_present(p4dp_get(p4d
)))
438 if (p4d_leaf(p4dp_get(p4d
)))
441 pud
= pud_offset(p4d
, addr
);
442 if (!pud_present(pudp_get(pud
)))
444 if (pud_leaf(pudp_get(pud
)))
447 pmd
= pmd_offset(pud
, addr
);
448 if (!pmd_present(pmdp_get(pmd
)))
450 if (pmd_leaf(pmdp_get(pmd
)))
453 pte
= pte_offset_kernel(pmd
, addr
);
454 return pte_present(ptep_get(pte
));