4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
16 #include <linux/highmem.h>
17 #include <linux/security.h>
18 #include <linux/mempolicy.h>
19 #include <linux/personality.h>
20 #include <linux/syscalls.h>
21 #include <linux/swap.h>
22 #include <linux/swapops.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/migrate.h>
25 #include <linux/perf_event.h>
26 #include <asm/uaccess.h>
27 #include <asm/pgtable.h>
28 #include <asm/cacheflush.h>
29 #include <asm/tlbflush.h>
32 static inline pgprot_t
pgprot_modify(pgprot_t oldprot
, pgprot_t newprot
)
38 static unsigned long change_pte_range(struct vm_area_struct
*vma
, pmd_t
*pmd
,
39 unsigned long addr
, unsigned long end
, pgprot_t newprot
,
40 int dirty_accountable
, int prot_numa
, bool *ret_all_same_node
)
42 struct mm_struct
*mm
= vma
->vm_mm
;
45 unsigned long pages
= 0;
46 bool all_same_node
= true;
49 pte
= pte_offset_map_lock(mm
, pmd
, addr
, &ptl
);
50 arch_enter_lazy_mmu_mode();
53 if (pte_present(oldpte
)) {
57 ptent
= ptep_modify_prot_start(mm
, addr
, pte
);
59 ptent
= pte_modify(ptent
, newprot
);
64 page
= vm_normal_page(vma
, addr
, oldpte
);
66 int this_nid
= page_to_nid(page
);
69 if (last_nid
!= this_nid
)
70 all_same_node
= false;
72 /* only check non-shared pages */
73 if (!pte_numa(oldpte
) &&
74 page_mapcount(page
) == 1) {
75 ptent
= pte_mknuma(ptent
);
82 * Avoid taking write faults for pages we know to be
85 if (dirty_accountable
&& pte_dirty(ptent
)) {
86 ptent
= pte_mkwrite(ptent
);
92 ptep_modify_prot_commit(mm
, addr
, pte
, ptent
);
93 } else if (IS_ENABLED(CONFIG_MIGRATION
) && !pte_file(oldpte
)) {
94 swp_entry_t entry
= pte_to_swp_entry(oldpte
);
96 if (is_write_migration_entry(entry
)) {
98 * A protection check is difficult so
99 * just be safe and disable write
101 make_migration_entry_read(&entry
);
102 set_pte_at(mm
, addr
, pte
,
103 swp_entry_to_pte(entry
));
107 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
108 arch_leave_lazy_mmu_mode();
109 pte_unmap_unlock(pte
- 1, ptl
);
111 *ret_all_same_node
= all_same_node
;
115 #ifdef CONFIG_NUMA_BALANCING
116 static inline void change_pmd_protnuma(struct mm_struct
*mm
, unsigned long addr
,
119 spin_lock(&mm
->page_table_lock
);
120 set_pmd_at(mm
, addr
& PMD_MASK
, pmd
, pmd_mknuma(*pmd
));
121 spin_unlock(&mm
->page_table_lock
);
124 static inline void change_pmd_protnuma(struct mm_struct
*mm
, unsigned long addr
,
129 #endif /* CONFIG_NUMA_BALANCING */
131 static inline unsigned long change_pmd_range(struct vm_area_struct
*vma
, pud_t
*pud
,
132 unsigned long addr
, unsigned long end
, pgprot_t newprot
,
133 int dirty_accountable
, int prot_numa
)
137 unsigned long pages
= 0;
140 pmd
= pmd_offset(pud
, addr
);
142 next
= pmd_addr_end(addr
, end
);
143 if (pmd_trans_huge(*pmd
)) {
144 if (next
- addr
!= HPAGE_PMD_SIZE
)
145 split_huge_page_pmd(vma
, addr
, pmd
);
146 else if (change_huge_pmd(vma
, pmd
, addr
, newprot
, prot_numa
)) {
147 pages
+= HPAGE_PMD_NR
;
152 if (pmd_none_or_clear_bad(pmd
))
154 pages
+= change_pte_range(vma
, pmd
, addr
, next
, newprot
,
155 dirty_accountable
, prot_numa
, &all_same_node
);
158 * If we are changing protections for NUMA hinting faults then
159 * set pmd_numa if the examined pages were all on the same
160 * node. This allows a regular PMD to be handled as one fault
161 * and effectively batches the taking of the PTL
163 if (prot_numa
&& all_same_node
)
164 change_pmd_protnuma(vma
->vm_mm
, addr
, pmd
);
165 } while (pmd
++, addr
= next
, addr
!= end
);
170 static inline unsigned long change_pud_range(struct vm_area_struct
*vma
, pgd_t
*pgd
,
171 unsigned long addr
, unsigned long end
, pgprot_t newprot
,
172 int dirty_accountable
, int prot_numa
)
176 unsigned long pages
= 0;
178 pud
= pud_offset(pgd
, addr
);
180 next
= pud_addr_end(addr
, end
);
181 if (pud_none_or_clear_bad(pud
))
183 pages
+= change_pmd_range(vma
, pud
, addr
, next
, newprot
,
184 dirty_accountable
, prot_numa
);
185 } while (pud
++, addr
= next
, addr
!= end
);
190 static unsigned long change_protection_range(struct vm_area_struct
*vma
,
191 unsigned long addr
, unsigned long end
, pgprot_t newprot
,
192 int dirty_accountable
, int prot_numa
)
194 struct mm_struct
*mm
= vma
->vm_mm
;
197 unsigned long start
= addr
;
198 unsigned long pages
= 0;
201 pgd
= pgd_offset(mm
, addr
);
202 flush_cache_range(vma
, addr
, end
);
204 next
= pgd_addr_end(addr
, end
);
205 if (pgd_none_or_clear_bad(pgd
))
207 pages
+= change_pud_range(vma
, pgd
, addr
, next
, newprot
,
208 dirty_accountable
, prot_numa
);
209 } while (pgd
++, addr
= next
, addr
!= end
);
211 /* Only flush the TLB if we actually modified any entries: */
213 flush_tlb_range(vma
, start
, end
);
218 unsigned long change_protection(struct vm_area_struct
*vma
, unsigned long start
,
219 unsigned long end
, pgprot_t newprot
,
220 int dirty_accountable
, int prot_numa
)
222 struct mm_struct
*mm
= vma
->vm_mm
;
225 mmu_notifier_invalidate_range_start(mm
, start
, end
);
226 if (is_vm_hugetlb_page(vma
))
227 pages
= hugetlb_change_protection(vma
, start
, end
, newprot
);
229 pages
= change_protection_range(vma
, start
, end
, newprot
, dirty_accountable
, prot_numa
);
230 mmu_notifier_invalidate_range_end(mm
, start
, end
);
236 mprotect_fixup(struct vm_area_struct
*vma
, struct vm_area_struct
**pprev
,
237 unsigned long start
, unsigned long end
, unsigned long newflags
)
239 struct mm_struct
*mm
= vma
->vm_mm
;
240 unsigned long oldflags
= vma
->vm_flags
;
241 long nrpages
= (end
- start
) >> PAGE_SHIFT
;
242 unsigned long charged
= 0;
245 int dirty_accountable
= 0;
247 if (newflags
== oldflags
) {
253 * If we make a private mapping writable we increase our commit;
254 * but (without finer accounting) cannot reduce our commit if we
255 * make it unwritable again. hugetlb mapping were accounted for
256 * even if read-only so there is no need to account for them here
258 if (newflags
& VM_WRITE
) {
259 if (!(oldflags
& (VM_ACCOUNT
|VM_WRITE
|VM_HUGETLB
|
260 VM_SHARED
|VM_NORESERVE
))) {
262 if (security_vm_enough_memory_mm(mm
, charged
))
264 newflags
|= VM_ACCOUNT
;
269 * First try to merge with previous and/or next vma.
271 pgoff
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
272 *pprev
= vma_merge(mm
, *pprev
, start
, end
, newflags
,
273 vma
->anon_vma
, vma
->vm_file
, pgoff
, vma_policy(vma
));
281 if (start
!= vma
->vm_start
) {
282 error
= split_vma(mm
, vma
, start
, 1);
287 if (end
!= vma
->vm_end
) {
288 error
= split_vma(mm
, vma
, end
, 0);
295 * vm_flags and vm_page_prot are protected by the mmap_sem
296 * held in write mode.
298 vma
->vm_flags
= newflags
;
299 vma
->vm_page_prot
= pgprot_modify(vma
->vm_page_prot
,
300 vm_get_page_prot(newflags
));
302 if (vma_wants_writenotify(vma
)) {
303 vma
->vm_page_prot
= vm_get_page_prot(newflags
& ~VM_SHARED
);
304 dirty_accountable
= 1;
307 change_protection(vma
, start
, end
, vma
->vm_page_prot
, dirty_accountable
, 0);
309 vm_stat_account(mm
, oldflags
, vma
->vm_file
, -nrpages
);
310 vm_stat_account(mm
, newflags
, vma
->vm_file
, nrpages
);
311 perf_event_mmap(vma
);
315 vm_unacct_memory(charged
);
319 SYSCALL_DEFINE3(mprotect
, unsigned long, start
, size_t, len
,
322 unsigned long vm_flags
, nstart
, end
, tmp
, reqprot
;
323 struct vm_area_struct
*vma
, *prev
;
325 const int grows
= prot
& (PROT_GROWSDOWN
|PROT_GROWSUP
);
326 prot
&= ~(PROT_GROWSDOWN
|PROT_GROWSUP
);
327 if (grows
== (PROT_GROWSDOWN
|PROT_GROWSUP
)) /* can't be both */
330 if (start
& ~PAGE_MASK
)
334 len
= PAGE_ALIGN(len
);
338 if (!arch_validate_prot(prot
))
343 * Does the application expect PROT_READ to imply PROT_EXEC:
345 if ((prot
& PROT_READ
) && (current
->personality
& READ_IMPLIES_EXEC
))
348 vm_flags
= calc_vm_prot_bits(prot
);
350 down_write(¤t
->mm
->mmap_sem
);
352 vma
= find_vma(current
->mm
, start
);
357 if (unlikely(grows
& PROT_GROWSDOWN
)) {
358 if (vma
->vm_start
>= end
)
360 start
= vma
->vm_start
;
362 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
366 if (vma
->vm_start
> start
)
368 if (unlikely(grows
& PROT_GROWSUP
)) {
371 if (!(vma
->vm_flags
& VM_GROWSUP
))
375 if (start
> vma
->vm_start
)
378 for (nstart
= start
; ; ) {
379 unsigned long newflags
;
381 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
383 newflags
= vm_flags
| (vma
->vm_flags
& ~(VM_READ
| VM_WRITE
| VM_EXEC
));
385 /* newflags >> 4 shift VM_MAY% in place of VM_% */
386 if ((newflags
& ~(newflags
>> 4)) & (VM_READ
| VM_WRITE
| VM_EXEC
)) {
391 error
= security_file_mprotect(vma
, reqprot
, prot
);
398 error
= mprotect_fixup(vma
, &prev
, nstart
, tmp
, newflags
);
403 if (nstart
< prev
->vm_end
)
404 nstart
= prev
->vm_end
;
409 if (!vma
|| vma
->vm_start
!= nstart
) {
415 up_write(¤t
->mm
->mmap_sem
);