1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 SiFive
6 #include <linux/pagewalk.h>
7 #include <asm/pgtable.h>
8 #include <asm/tlbflush.h>
9 #include <asm/bitops.h>
11 struct pageattr_masks
{
16 static unsigned long set_pageattr_masks(unsigned long val
, struct mm_walk
*walk
)
18 struct pageattr_masks
*masks
= walk
->private;
19 unsigned long new_val
= val
;
21 new_val
&= ~(pgprot_val(masks
->clear_mask
));
22 new_val
|= (pgprot_val(masks
->set_mask
));
27 static int pageattr_pgd_entry(pgd_t
*pgd
, unsigned long addr
,
28 unsigned long next
, struct mm_walk
*walk
)
30 pgd_t val
= READ_ONCE(*pgd
);
33 val
= __pgd(set_pageattr_masks(pgd_val(val
), walk
));
40 static int pageattr_p4d_entry(p4d_t
*p4d
, unsigned long addr
,
41 unsigned long next
, struct mm_walk
*walk
)
43 p4d_t val
= READ_ONCE(*p4d
);
46 val
= __p4d(set_pageattr_masks(p4d_val(val
), walk
));
53 static int pageattr_pud_entry(pud_t
*pud
, unsigned long addr
,
54 unsigned long next
, struct mm_walk
*walk
)
56 pud_t val
= READ_ONCE(*pud
);
59 val
= __pud(set_pageattr_masks(pud_val(val
), walk
));
66 static int pageattr_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
67 unsigned long next
, struct mm_walk
*walk
)
69 pmd_t val
= READ_ONCE(*pmd
);
72 val
= __pmd(set_pageattr_masks(pmd_val(val
), walk
));
79 static int pageattr_pte_entry(pte_t
*pte
, unsigned long addr
,
80 unsigned long next
, struct mm_walk
*walk
)
82 pte_t val
= READ_ONCE(*pte
);
84 val
= __pte(set_pageattr_masks(pte_val(val
), walk
));
90 static int pageattr_pte_hole(unsigned long addr
, unsigned long next
,
91 int depth
, struct mm_walk
*walk
)
93 /* Nothing to do here */
97 const static struct mm_walk_ops pageattr_ops
= {
98 .pgd_entry
= pageattr_pgd_entry
,
99 .p4d_entry
= pageattr_p4d_entry
,
100 .pud_entry
= pageattr_pud_entry
,
101 .pmd_entry
= pageattr_pmd_entry
,
102 .pte_entry
= pageattr_pte_entry
,
103 .pte_hole
= pageattr_pte_hole
,
106 static int __set_memory(unsigned long addr
, int numpages
, pgprot_t set_mask
,
110 unsigned long start
= addr
;
111 unsigned long end
= start
+ PAGE_SIZE
* numpages
;
112 struct pageattr_masks masks
= {
113 .set_mask
= set_mask
,
114 .clear_mask
= clear_mask
120 down_read(&init_mm
.mmap_sem
);
121 ret
= walk_page_range_novma(&init_mm
, start
, end
, &pageattr_ops
, NULL
,
123 up_read(&init_mm
.mmap_sem
);
125 flush_tlb_kernel_range(start
, end
);
130 int set_memory_ro(unsigned long addr
, int numpages
)
132 return __set_memory(addr
, numpages
, __pgprot(_PAGE_READ
),
133 __pgprot(_PAGE_WRITE
));
136 int set_memory_rw(unsigned long addr
, int numpages
)
138 return __set_memory(addr
, numpages
, __pgprot(_PAGE_READ
| _PAGE_WRITE
),
142 int set_memory_x(unsigned long addr
, int numpages
)
144 return __set_memory(addr
, numpages
, __pgprot(_PAGE_EXEC
), __pgprot(0));
147 int set_memory_nx(unsigned long addr
, int numpages
)
149 return __set_memory(addr
, numpages
, __pgprot(0), __pgprot(_PAGE_EXEC
));
152 int set_direct_map_invalid_noflush(struct page
*page
)
154 unsigned long start
= (unsigned long)page_address(page
);
155 unsigned long end
= start
+ PAGE_SIZE
;
156 struct pageattr_masks masks
= {
157 .set_mask
= __pgprot(0),
158 .clear_mask
= __pgprot(_PAGE_PRESENT
)
161 return walk_page_range(&init_mm
, start
, end
, &pageattr_ops
, &masks
);
164 int set_direct_map_default_noflush(struct page
*page
)
166 unsigned long start
= (unsigned long)page_address(page
);
167 unsigned long end
= start
+ PAGE_SIZE
;
168 struct pageattr_masks masks
= {
169 .set_mask
= PAGE_KERNEL
,
170 .clear_mask
= __pgprot(0)
173 return walk_page_range(&init_mm
, start
, end
, &pageattr_ops
, &masks
);
176 void __kernel_map_pages(struct page
*page
, int numpages
, int enable
)
178 if (!debug_pagealloc_enabled())
182 __set_memory((unsigned long)page_address(page
), numpages
,
183 __pgprot(_PAGE_PRESENT
), __pgprot(0));
185 __set_memory((unsigned long)page_address(page
), numpages
,
186 __pgprot(0), __pgprot(_PAGE_PRESENT
));