4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
16 #include <linux/highmem.h>
17 #include <linux/security.h>
18 #include <linux/mempolicy.h>
19 #include <linux/personality.h>
20 #include <linux/syscalls.h>
21 #include <linux/swap.h>
22 #include <linux/swapops.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/migrate.h>
25 #include <linux/perf_event.h>
26 #include <linux/pkeys.h>
27 #include <linux/ksm.h>
28 #include <linux/uaccess.h>
29 #include <asm/pgtable.h>
30 #include <asm/cacheflush.h>
31 #include <asm/mmu_context.h>
32 #include <asm/tlbflush.h>
36 static unsigned long change_pte_range(struct vm_area_struct
*vma
, pmd_t
*pmd
,
37 unsigned long addr
, unsigned long end
, pgprot_t newprot
,
38 int dirty_accountable
, int prot_numa
)
40 struct mm_struct
*mm
= vma
->vm_mm
;
43 unsigned long pages
= 0;
44 int target_node
= NUMA_NO_NODE
;
47 * Can be called with only the mmap_sem for reading by
48 * prot_numa so we must check the pmd isn't constantly
49 * changing from under us from pmd_none to pmd_trans_huge
50 * and/or the other way around.
52 if (pmd_trans_unstable(pmd
))
56 * The pmd points to a regular pte so the pmd can't change
57 * from under us even if the mmap_sem is only hold for
60 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
62 /* Get target node for single threaded private VMAs */
63 if (prot_numa
&& !(vma
->vm_flags
& VM_SHARED
) &&
64 atomic_read(&vma
->vm_mm
->mm_users
) == 1)
65 target_node
= numa_node_id();
67 arch_enter_lazy_mmu_mode();
70 if (pte_present(oldpte
)) {
72 bool preserve_write
= prot_numa
&& pte_write(oldpte
);
75 * Avoid trapping faults against the zero or KSM
76 * pages. See similar comment in change_huge_pmd.
81 page
= vm_normal_page(vma
, addr
, oldpte
);
82 if (!page
|| PageKsm(page
))
85 /* Avoid TLB flush if possible */
86 if (pte_protnone(oldpte
))
90 * Don't mess with PTEs if page is already on the node
91 * a single-threaded process is running on.
93 if (target_node
== page_to_nid(page
))
97 ptent
= ptep_modify_prot_start(mm
, addr
, pte
);
98 ptent
= pte_modify(ptent
, newprot
);
100 ptent
= pte_mk_savedwrite(ptent
);
102 /* Avoid taking write faults for known dirty pages */
103 if (dirty_accountable
&& pte_dirty(ptent
) &&
104 (pte_soft_dirty(ptent
) ||
105 !(vma
->vm_flags
& VM_SOFTDIRTY
))) {
106 ptent
= pte_mkwrite(ptent
);
108 ptep_modify_prot_commit(mm
, addr
, pte
, ptent
);
110 } else if (IS_ENABLED(CONFIG_MIGRATION
)) {
111 swp_entry_t entry
= pte_to_swp_entry(oldpte
);
113 if (is_write_migration_entry(entry
)) {
116 * A protection check is difficult so
117 * just be safe and disable write
119 make_migration_entry_read(&entry
);
120 newpte
= swp_entry_to_pte(entry
);
121 if (pte_swp_soft_dirty(oldpte
))
122 newpte
= pte_swp_mksoft_dirty(newpte
);
123 set_pte_at(mm
, addr
, pte
, newpte
);
128 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
129 arch_leave_lazy_mmu_mode();
130 pte_unmap_unlock(pte
- 1, ptl
);
135 static inline unsigned long change_pmd_range(struct vm_area_struct
*vma
,
136 pud_t
*pud
, unsigned long addr
, unsigned long end
,
137 pgprot_t newprot
, int dirty_accountable
, int prot_numa
)
140 struct mm_struct
*mm
= vma
->vm_mm
;
142 unsigned long pages
= 0;
143 unsigned long nr_huge_updates
= 0;
144 unsigned long mni_start
= 0;
146 pmd
= pmd_offset(pud
, addr
);
148 unsigned long this_pages
;
150 next
= pmd_addr_end(addr
, end
);
151 if (!pmd_trans_huge(*pmd
) && !pmd_devmap(*pmd
)
152 && pmd_none_or_clear_bad(pmd
))
155 /* invoke the mmu notifier if the pmd is populated */
158 mmu_notifier_invalidate_range_start(mm
, mni_start
, end
);
161 if (pmd_trans_huge(*pmd
) || pmd_devmap(*pmd
)) {
162 if (next
- addr
!= HPAGE_PMD_SIZE
) {
163 __split_huge_pmd(vma
, pmd
, addr
, false, NULL
);
165 int nr_ptes
= change_huge_pmd(vma
, pmd
, addr
,
169 if (nr_ptes
== HPAGE_PMD_NR
) {
170 pages
+= HPAGE_PMD_NR
;
174 /* huge pmd was handled */
178 /* fall through, the trans huge pmd just split */
180 this_pages
= change_pte_range(vma
, pmd
, addr
, next
, newprot
,
181 dirty_accountable
, prot_numa
);
183 } while (pmd
++, addr
= next
, addr
!= end
);
186 mmu_notifier_invalidate_range_end(mm
, mni_start
, end
);
189 count_vm_numa_events(NUMA_HUGE_PTE_UPDATES
, nr_huge_updates
);
193 static inline unsigned long change_pud_range(struct vm_area_struct
*vma
,
194 p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
195 pgprot_t newprot
, int dirty_accountable
, int prot_numa
)
199 unsigned long pages
= 0;
201 pud
= pud_offset(p4d
, addr
);
203 next
= pud_addr_end(addr
, end
);
204 if (pud_none_or_clear_bad(pud
))
206 pages
+= change_pmd_range(vma
, pud
, addr
, next
, newprot
,
207 dirty_accountable
, prot_numa
);
208 } while (pud
++, addr
= next
, addr
!= end
);
213 static inline unsigned long change_p4d_range(struct vm_area_struct
*vma
,
214 pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
215 pgprot_t newprot
, int dirty_accountable
, int prot_numa
)
219 unsigned long pages
= 0;
221 p4d
= p4d_offset(pgd
, addr
);
223 next
= p4d_addr_end(addr
, end
);
224 if (p4d_none_or_clear_bad(p4d
))
226 pages
+= change_pud_range(vma
, p4d
, addr
, next
, newprot
,
227 dirty_accountable
, prot_numa
);
228 } while (p4d
++, addr
= next
, addr
!= end
);
233 static unsigned long change_protection_range(struct vm_area_struct
*vma
,
234 unsigned long addr
, unsigned long end
, pgprot_t newprot
,
235 int dirty_accountable
, int prot_numa
)
237 struct mm_struct
*mm
= vma
->vm_mm
;
240 unsigned long start
= addr
;
241 unsigned long pages
= 0;
244 pgd
= pgd_offset(mm
, addr
);
245 flush_cache_range(vma
, addr
, end
);
246 set_tlb_flush_pending(mm
);
248 next
= pgd_addr_end(addr
, end
);
249 if (pgd_none_or_clear_bad(pgd
))
251 pages
+= change_p4d_range(vma
, pgd
, addr
, next
, newprot
,
252 dirty_accountable
, prot_numa
);
253 } while (pgd
++, addr
= next
, addr
!= end
);
255 /* Only flush the TLB if we actually modified any entries: */
257 flush_tlb_range(vma
, start
, end
);
258 clear_tlb_flush_pending(mm
);
263 unsigned long change_protection(struct vm_area_struct
*vma
, unsigned long start
,
264 unsigned long end
, pgprot_t newprot
,
265 int dirty_accountable
, int prot_numa
)
269 if (is_vm_hugetlb_page(vma
))
270 pages
= hugetlb_change_protection(vma
, start
, end
, newprot
);
272 pages
= change_protection_range(vma
, start
, end
, newprot
, dirty_accountable
, prot_numa
);
278 mprotect_fixup(struct vm_area_struct
*vma
, struct vm_area_struct
**pprev
,
279 unsigned long start
, unsigned long end
, unsigned long newflags
)
281 struct mm_struct
*mm
= vma
->vm_mm
;
282 unsigned long oldflags
= vma
->vm_flags
;
283 long nrpages
= (end
- start
) >> PAGE_SHIFT
;
284 unsigned long charged
= 0;
287 int dirty_accountable
= 0;
289 if (newflags
== oldflags
) {
295 * If we make a private mapping writable we increase our commit;
296 * but (without finer accounting) cannot reduce our commit if we
297 * make it unwritable again. hugetlb mapping were accounted for
298 * even if read-only so there is no need to account for them here
300 if (newflags
& VM_WRITE
) {
301 /* Check space limits when area turns into data. */
302 if (!may_expand_vm(mm
, newflags
, nrpages
) &&
303 may_expand_vm(mm
, oldflags
, nrpages
))
305 if (!(oldflags
& (VM_ACCOUNT
|VM_WRITE
|VM_HUGETLB
|
306 VM_SHARED
|VM_NORESERVE
))) {
308 if (security_vm_enough_memory_mm(mm
, charged
))
310 newflags
|= VM_ACCOUNT
;
315 * First try to merge with previous and/or next vma.
317 pgoff
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
318 *pprev
= vma_merge(mm
, *pprev
, start
, end
, newflags
,
319 vma
->anon_vma
, vma
->vm_file
, pgoff
, vma_policy(vma
),
320 vma
->vm_userfaultfd_ctx
);
323 VM_WARN_ON((vma
->vm_flags
^ newflags
) & ~VM_SOFTDIRTY
);
329 if (start
!= vma
->vm_start
) {
330 error
= split_vma(mm
, vma
, start
, 1);
335 if (end
!= vma
->vm_end
) {
336 error
= split_vma(mm
, vma
, end
, 0);
343 * vm_flags and vm_page_prot are protected by the mmap_sem
344 * held in write mode.
346 vma
->vm_flags
= newflags
;
347 dirty_accountable
= vma_wants_writenotify(vma
, vma
->vm_page_prot
);
348 vma_set_page_prot(vma
);
350 change_protection(vma
, start
, end
, vma
->vm_page_prot
,
351 dirty_accountable
, 0);
354 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
357 if ((oldflags
& (VM_WRITE
| VM_SHARED
| VM_LOCKED
)) == VM_LOCKED
&&
358 (newflags
& VM_WRITE
)) {
359 populate_vma_page_range(vma
, start
, end
, NULL
);
362 vm_stat_account(mm
, oldflags
, -nrpages
);
363 vm_stat_account(mm
, newflags
, nrpages
);
364 perf_event_mmap(vma
);
368 vm_unacct_memory(charged
);
373 * pkey==-1 when doing a legacy mprotect()
375 static int do_mprotect_pkey(unsigned long start
, size_t len
,
376 unsigned long prot
, int pkey
)
378 unsigned long nstart
, end
, tmp
, reqprot
;
379 struct vm_area_struct
*vma
, *prev
;
381 const int grows
= prot
& (PROT_GROWSDOWN
|PROT_GROWSUP
);
382 const bool rier
= (current
->personality
& READ_IMPLIES_EXEC
) &&
385 prot
&= ~(PROT_GROWSDOWN
|PROT_GROWSUP
);
386 if (grows
== (PROT_GROWSDOWN
|PROT_GROWSUP
)) /* can't be both */
389 if (start
& ~PAGE_MASK
)
393 len
= PAGE_ALIGN(len
);
397 if (!arch_validate_prot(prot
))
402 if (down_write_killable(¤t
->mm
->mmap_sem
))
406 * If userspace did not allocate the pkey, do not let
410 if ((pkey
!= -1) && !mm_pkey_is_allocated(current
->mm
, pkey
))
413 vma
= find_vma(current
->mm
, start
);
418 if (unlikely(grows
& PROT_GROWSDOWN
)) {
419 if (vma
->vm_start
>= end
)
421 start
= vma
->vm_start
;
423 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
426 if (vma
->vm_start
> start
)
428 if (unlikely(grows
& PROT_GROWSUP
)) {
431 if (!(vma
->vm_flags
& VM_GROWSUP
))
435 if (start
> vma
->vm_start
)
438 for (nstart
= start
; ; ) {
439 unsigned long mask_off_old_flags
;
440 unsigned long newflags
;
443 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
445 /* Does the application expect PROT_READ to imply PROT_EXEC */
446 if (rier
&& (vma
->vm_flags
& VM_MAYEXEC
))
450 * Each mprotect() call explicitly passes r/w/x permissions.
451 * If a permission is not passed to mprotect(), it must be
452 * cleared from the VMA.
454 mask_off_old_flags
= VM_READ
| VM_WRITE
| VM_EXEC
|
457 new_vma_pkey
= arch_override_mprotect_pkey(vma
, prot
, pkey
);
458 newflags
= calc_vm_prot_bits(prot
, new_vma_pkey
);
459 newflags
|= (vma
->vm_flags
& ~mask_off_old_flags
);
461 /* newflags >> 4 shift VM_MAY% in place of VM_% */
462 if ((newflags
& ~(newflags
>> 4)) & (VM_READ
| VM_WRITE
| VM_EXEC
)) {
467 error
= security_file_mprotect(vma
, reqprot
, prot
);
474 error
= mprotect_fixup(vma
, &prev
, nstart
, tmp
, newflags
);
479 if (nstart
< prev
->vm_end
)
480 nstart
= prev
->vm_end
;
485 if (!vma
|| vma
->vm_start
!= nstart
) {
492 up_write(¤t
->mm
->mmap_sem
);
496 SYSCALL_DEFINE3(mprotect
, unsigned long, start
, size_t, len
,
499 return do_mprotect_pkey(start
, len
, prot
, -1);
502 #ifdef CONFIG_ARCH_HAS_PKEYS
504 SYSCALL_DEFINE4(pkey_mprotect
, unsigned long, start
, size_t, len
,
505 unsigned long, prot
, int, pkey
)
507 return do_mprotect_pkey(start
, len
, prot
, pkey
);
510 SYSCALL_DEFINE2(pkey_alloc
, unsigned long, flags
, unsigned long, init_val
)
515 /* No flags supported yet. */
518 /* check for unsupported init values */
519 if (init_val
& ~PKEY_ACCESS_MASK
)
522 down_write(¤t
->mm
->mmap_sem
);
523 pkey
= mm_pkey_alloc(current
->mm
);
529 ret
= arch_set_user_pkey_access(current
, pkey
, init_val
);
531 mm_pkey_free(current
->mm
, pkey
);
536 up_write(¤t
->mm
->mmap_sem
);
540 SYSCALL_DEFINE1(pkey_free
, int, pkey
)
544 down_write(¤t
->mm
->mmap_sem
);
545 ret
= mm_pkey_free(current
->mm
, pkey
);
546 up_write(¤t
->mm
->mmap_sem
);
549 * We could provie warnings or errors if any VMA still
550 * has the pkey set here.
555 #endif /* CONFIG_ARCH_HAS_PKEYS */