1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2007-2008 Paul Mackerras, IBM Corp.
6 #include <linux/errno.h>
7 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/pagewalk.h>
11 #include <linux/hugetlb.h>
12 #include <linux/syscalls.h>
14 #include <linux/pgtable.h>
15 #include <linux/uaccess.h>
18 * Free all pages allocated for subpage protection maps and pointers.
19 * Also makes sure that the subpage_prot_table structure is
20 * reinitialized for the next user.
22 void subpage_prot_free(struct mm_struct
*mm
)
24 struct subpage_prot_table
*spt
= mm_ctx_subpage_prot(&mm
->context
);
25 unsigned long i
, j
, addr
;
31 for (i
= 0; i
< 4; ++i
) {
32 if (spt
->low_prot
[i
]) {
33 free_page((unsigned long)spt
->low_prot
[i
]);
34 spt
->low_prot
[i
] = NULL
;
38 for (i
= 0; i
< (TASK_SIZE_USER64
>> 43); ++i
) {
42 spt
->protptrs
[i
] = NULL
;
43 for (j
= 0; j
< SBP_L2_COUNT
&& addr
< spt
->maxaddr
;
44 ++j
, addr
+= PAGE_SIZE
)
46 free_page((unsigned long)p
[j
]);
47 free_page((unsigned long)p
);
53 static void hpte_flush_range(struct mm_struct
*mm
, unsigned long addr
,
63 pgd
= pgd_offset(mm
, addr
);
64 p4d
= p4d_offset(pgd
, addr
);
67 pud
= pud_offset(p4d
, addr
);
70 pmd
= pmd_offset(pud
, addr
);
73 pte
= pte_offset_map_lock(mm
, pmd
, addr
, &ptl
);
76 arch_enter_lazy_mmu_mode();
77 for (; npages
> 0; --npages
) {
78 pte_update(mm
, addr
, pte
, 0, 0, 0);
82 arch_leave_lazy_mmu_mode();
83 pte_unmap_unlock(pte
- 1, ptl
);
87 * Clear the subpage protection map for an address range, allowing
88 * all accesses that are allowed by the pte permissions.
90 static void subpage_prot_clear(unsigned long addr
, unsigned long len
)
92 struct mm_struct
*mm
= current
->mm
;
93 struct subpage_prot_table
*spt
;
97 unsigned long next
, limit
;
101 spt
= mm_ctx_subpage_prot(&mm
->context
);
106 if (limit
> spt
->maxaddr
)
107 limit
= spt
->maxaddr
;
108 for (; addr
< limit
; addr
= next
) {
109 next
= pmd_addr_end(addr
, limit
);
110 if (addr
< 0x100000000UL
) {
113 spm
= spt
->protptrs
[addr
>> SBP_L3_SHIFT
];
117 spp
= spm
[(addr
>> SBP_L2_SHIFT
) & (SBP_L2_COUNT
- 1)];
120 spp
+= (addr
>> PAGE_SHIFT
) & (SBP_L1_COUNT
- 1);
122 i
= (addr
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
123 nw
= PTRS_PER_PTE
- i
;
124 if (addr
+ (nw
<< PAGE_SHIFT
) > next
)
125 nw
= (next
- addr
) >> PAGE_SHIFT
;
127 memset(spp
, 0, nw
* sizeof(u32
));
129 /* now flush any existing HPTEs for the range */
130 hpte_flush_range(mm
, addr
, nw
);
134 mmap_write_unlock(mm
);
137 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
138 static int subpage_walk_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
139 unsigned long end
, struct mm_walk
*walk
)
141 struct vm_area_struct
*vma
= walk
->vma
;
142 split_huge_pmd(vma
, pmd
, addr
);
146 static const struct mm_walk_ops subpage_walk_ops
= {
147 .pmd_entry
= subpage_walk_pmd_entry
,
148 .walk_lock
= PGWALK_WRLOCK_VERIFY
,
151 static void subpage_mark_vma_nohuge(struct mm_struct
*mm
, unsigned long addr
,
154 struct vm_area_struct
*vma
;
155 VMA_ITERATOR(vmi
, mm
, addr
);
158 * We don't try too hard, we just mark all the vma in that range
159 * VM_NOHUGEPAGE and split them.
161 for_each_vma_range(vmi
, vma
, addr
+ len
) {
162 vm_flags_set(vma
, VM_NOHUGEPAGE
);
163 walk_page_vma(vma
, &subpage_walk_ops
, NULL
);
167 static void subpage_mark_vma_nohuge(struct mm_struct
*mm
, unsigned long addr
,
175 * Copy in a subpage protection map for an address range.
176 * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
177 * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
178 * 2 or 3 to prevent all accesses.
179 * Note that the normal page protections also apply; the subpage
180 * protection mechanism is an additional constraint, so putting 0
181 * in a 2-bit field won't allow writes to a page that is otherwise
184 SYSCALL_DEFINE3(subpage_prot
, unsigned long, addr
,
185 unsigned long, len
, u32 __user
*, map
)
187 struct mm_struct
*mm
= current
->mm
;
188 struct subpage_prot_table
*spt
;
192 unsigned long next
, limit
;
198 /* Check parameters */
199 if ((addr
& ~PAGE_MASK
) || (len
& ~PAGE_MASK
) ||
200 addr
>= mm
->task_size
|| len
>= mm
->task_size
||
201 addr
+ len
> mm
->task_size
)
204 if (is_hugepage_only_range(mm
, addr
, len
))
208 /* Clear out the protection map for the address range */
209 subpage_prot_clear(addr
, len
);
213 if (!access_ok(map
, (len
>> PAGE_SHIFT
) * sizeof(u32
)))
218 spt
= mm_ctx_subpage_prot(&mm
->context
);
221 * Allocate subpage prot table if not already done.
222 * Do this with mmap_lock held
224 spt
= kzalloc(sizeof(struct subpage_prot_table
), GFP_KERNEL
);
229 mm
->context
.hash_context
->spt
= spt
;
232 subpage_mark_vma_nohuge(mm
, addr
, len
);
233 for (limit
= addr
+ len
; addr
< limit
; addr
= next
) {
234 next
= pmd_addr_end(addr
, limit
);
236 if (addr
< 0x100000000UL
) {
239 spm
= spt
->protptrs
[addr
>> SBP_L3_SHIFT
];
241 spm
= (u32
**)get_zeroed_page(GFP_KERNEL
);
244 spt
->protptrs
[addr
>> SBP_L3_SHIFT
] = spm
;
247 spm
+= (addr
>> SBP_L2_SHIFT
) & (SBP_L2_COUNT
- 1);
250 spp
= (u32
*)get_zeroed_page(GFP_KERNEL
);
255 spp
+= (addr
>> PAGE_SHIFT
) & (SBP_L1_COUNT
- 1);
258 demote_segment_4k(mm
, addr
);
261 i
= (addr
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
262 nw
= PTRS_PER_PTE
- i
;
263 if (addr
+ (nw
<< PAGE_SHIFT
) > next
)
264 nw
= (next
- addr
) >> PAGE_SHIFT
;
266 mmap_write_unlock(mm
);
267 if (__copy_from_user(spp
, map
, nw
* sizeof(u32
)))
272 /* now flush any existing HPTEs for the range */
273 hpte_flush_range(mm
, addr
, nw
);
275 if (limit
> spt
->maxaddr
)
276 spt
->maxaddr
= limit
;
279 mmap_write_unlock(mm
);