4 * (C) Copyright 1995 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
8 #include <linux/capability.h>
9 #include <linux/mman.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/syscalls.h>
16 #include <linux/sched.h>
17 #include <linux/module.h>
18 #include <linux/rmap.h>
19 #include <linux/mmzone.h>
20 #include <linux/hugetlb.h>
24 int can_do_mlock(void)
26 if (capable(CAP_IPC_LOCK
))
28 if (rlimit(RLIMIT_MEMLOCK
) != 0)
32 EXPORT_SYMBOL(can_do_mlock
);
35 * Mlocked pages are marked with PageMlocked() flag for efficient testing
36 * in vmscan and, possibly, the fault path; and to support semi-accurate
39 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
40 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
41 * The unevictable list is an LRU sibling list to the [in]active lists.
42 * PageUnevictable is set to indicate the unevictable state.
44 * When lazy mlocking via vmscan, it is important to ensure that the
45 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
46 * may have mlocked a page that is being munlocked. So lazy mlock must take
47 * the mmap_sem for read, and verify that the vma really is locked
52 * LRU accounting for clear_page_mlock()
54 void __clear_page_mlock(struct page
*page
)
56 VM_BUG_ON(!PageLocked(page
));
58 if (!page
->mapping
) { /* truncated ? */
62 dec_zone_page_state(page
, NR_MLOCK
);
63 count_vm_event(UNEVICTABLE_PGCLEARED
);
64 if (!isolate_lru_page(page
)) {
65 putback_lru_page(page
);
68 * We lost the race. the page already moved to evictable list.
70 if (PageUnevictable(page
))
71 count_vm_event(UNEVICTABLE_PGSTRANDED
);
76 * Mark page as mlocked if not already.
77 * If page on LRU, isolate and putback to move to unevictable list.
79 void mlock_vma_page(struct page
*page
)
81 BUG_ON(!PageLocked(page
));
83 if (!TestSetPageMlocked(page
)) {
84 inc_zone_page_state(page
, NR_MLOCK
);
85 count_vm_event(UNEVICTABLE_PGMLOCKED
);
86 if (!isolate_lru_page(page
))
87 putback_lru_page(page
);
92 * munlock_vma_page - munlock a vma page
93 * @page - page to be unlocked
95 * called from munlock()/munmap() path with page supposedly on the LRU.
96 * When we munlock a page, because the vma where we found the page is being
97 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
98 * page locked so that we can leave it on the unevictable lru list and not
99 * bother vmscan with it. However, to walk the page's rmap list in
100 * try_to_munlock() we must isolate the page from the LRU. If some other
101 * task has removed the page from the LRU, we won't be able to do that.
102 * So we clear the PageMlocked as we might not get another chance. If we
103 * can't isolate the page, we leave it for putback_lru_page() and vmscan
104 * [page_referenced()/try_to_unmap()] to deal with.
106 void munlock_vma_page(struct page
*page
)
108 BUG_ON(!PageLocked(page
));
110 if (TestClearPageMlocked(page
)) {
111 dec_zone_page_state(page
, NR_MLOCK
);
112 if (!isolate_lru_page(page
)) {
113 int ret
= try_to_munlock(page
);
115 * did try_to_unlock() succeed or punt?
117 if (ret
!= SWAP_MLOCK
)
118 count_vm_event(UNEVICTABLE_PGMUNLOCKED
);
120 putback_lru_page(page
);
123 * Some other task has removed the page from the LRU.
124 * putback_lru_page() will take care of removing the
125 * page from the unevictable list, if necessary.
126 * vmscan [page_referenced()] will move the page back
127 * to the unevictable list if some other vma has it
130 if (PageUnevictable(page
))
131 count_vm_event(UNEVICTABLE_PGSTRANDED
);
133 count_vm_event(UNEVICTABLE_PGMUNLOCKED
);
138 static inline int stack_guard_page(struct vm_area_struct
*vma
, unsigned long addr
)
140 return (vma
->vm_flags
& VM_GROWSDOWN
) &&
141 (vma
->vm_start
== addr
) &&
142 !vma_stack_continue(vma
->vm_prev
, addr
);
146 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
148 * @start: start address
151 * This takes care of making the pages present too.
153 * return 0 on success, negative error code on error.
155 * vma->vm_mm->mmap_sem must be held for at least read.
157 static long __mlock_vma_pages_range(struct vm_area_struct
*vma
,
158 unsigned long start
, unsigned long end
)
160 struct mm_struct
*mm
= vma
->vm_mm
;
161 unsigned long addr
= start
;
162 struct page
*pages
[16]; /* 16 gives a reasonable batch */
163 int nr_pages
= (end
- start
) / PAGE_SIZE
;
167 VM_BUG_ON(start
& ~PAGE_MASK
);
168 VM_BUG_ON(end
& ~PAGE_MASK
);
169 VM_BUG_ON(start
< vma
->vm_start
);
170 VM_BUG_ON(end
> vma
->vm_end
);
171 VM_BUG_ON(!rwsem_is_locked(&mm
->mmap_sem
));
173 gup_flags
= FOLL_TOUCH
| FOLL_GET
;
175 * We want to touch writable mappings with a write fault in order
176 * to break COW, except for shared mappings because these don't COW
177 * and we would not want to dirty them for nothing.
179 if ((vma
->vm_flags
& (VM_WRITE
| VM_SHARED
)) == VM_WRITE
)
180 gup_flags
|= FOLL_WRITE
;
182 /* We don't try to access the guard page of a stack vma */
183 if (stack_guard_page(vma
, start
)) {
188 while (nr_pages
> 0) {
194 * get_user_pages makes pages present if we are
195 * setting mlock. and this extra reference count will
196 * disable migration of this page. However, page may
197 * still be truncated out from under us.
199 ret
= __get_user_pages(current
, mm
, addr
,
200 min_t(int, nr_pages
, ARRAY_SIZE(pages
)),
201 gup_flags
, pages
, NULL
);
203 * This can happen for, e.g., VM_NONLINEAR regions before
204 * a page has been allocated and mapped at a given offset,
205 * or for addresses that map beyond end of a file.
206 * We'll mlock the pages if/when they get faulted in.
211 lru_add_drain(); /* push cached pages to LRU */
213 for (i
= 0; i
< ret
; i
++) {
214 struct page
*page
= pages
[i
];
218 * That preliminary check is mainly to avoid
219 * the pointless overhead of lock_page on the
220 * ZERO_PAGE: which might bounce very badly if
221 * there is contention. However, we're still
222 * dirtying its cacheline with get/put_page:
223 * we'll add another __get_user_pages flag to
224 * avoid it if that case turns out to matter.
228 * Because we lock page here and migration is
229 * blocked by the elevated reference, we need
230 * only check for file-cache page truncation.
233 mlock_vma_page(page
);
236 put_page(page
); /* ref from get_user_pages() */
239 addr
+= ret
* PAGE_SIZE
;
244 return ret
; /* 0 or negative error code */
248 * convert get_user_pages() return value to posix mlock() error
250 static int __mlock_posix_error_return(long retval
)
252 if (retval
== -EFAULT
)
254 else if (retval
== -ENOMEM
)
260 * mlock_vma_pages_range() - mlock pages in specified vma range.
261 * @vma - the vma containing the specfied address range
262 * @start - starting address in @vma to mlock
263 * @end - end address [+1] in @vma to mlock
265 * For mmap()/mremap()/expansion of mlocked vma.
267 * return 0 on success for "normal" vmas.
269 * return number of pages [> 0] to be removed from locked_vm on success
272 long mlock_vma_pages_range(struct vm_area_struct
*vma
,
273 unsigned long start
, unsigned long end
)
275 int nr_pages
= (end
- start
) / PAGE_SIZE
;
276 BUG_ON(!(vma
->vm_flags
& VM_LOCKED
));
279 * filter unlockable vmas
281 if (vma
->vm_flags
& (VM_IO
| VM_PFNMAP
))
284 if (!((vma
->vm_flags
& (VM_DONTEXPAND
| VM_RESERVED
)) ||
285 is_vm_hugetlb_page(vma
) ||
286 vma
== get_gate_vma(current
))) {
288 __mlock_vma_pages_range(vma
, start
, end
);
290 /* Hide errors from mmap() and other callers */
295 * User mapped kernel pages or huge pages:
296 * make these pages present to populate the ptes, but
297 * fall thru' to reset VM_LOCKED--no need to unlock, and
298 * return nr_pages so these don't get counted against task's
299 * locked limit. huge pages are already counted against
302 make_pages_present(start
, end
);
305 vma
->vm_flags
&= ~VM_LOCKED
; /* and don't come back! */
306 return nr_pages
; /* error or pages NOT mlocked */
310 * munlock_vma_pages_range() - munlock all pages in the vma range.'
311 * @vma - vma containing range to be munlock()ed.
312 * @start - start address in @vma of the range
313 * @end - end of range in @vma.
315 * For mremap(), munmap() and exit().
317 * Called with @vma VM_LOCKED.
319 * Returns with VM_LOCKED cleared. Callers must be prepared to
322 * We don't save and restore VM_LOCKED here because pages are
323 * still on lru. In unmap path, pages might be scanned by reclaim
324 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
325 * free them. This will result in freeing mlocked pages.
327 void munlock_vma_pages_range(struct vm_area_struct
*vma
,
328 unsigned long start
, unsigned long end
)
333 vma
->vm_flags
&= ~VM_LOCKED
;
335 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
) {
338 * Although FOLL_DUMP is intended for get_dump_page(),
339 * it just so happens that its special treatment of the
340 * ZERO_PAGE (returning an error instead of doing get_page)
341 * suits munlock very well (and if somehow an abnormal page
342 * has sneaked into the range, we won't oops here: great).
344 page
= follow_page(vma
, addr
, FOLL_GET
| FOLL_DUMP
);
345 if (page
&& !IS_ERR(page
)) {
348 * Like in __mlock_vma_pages_range(),
349 * because we lock page here and migration is
350 * blocked by the elevated reference, we need
351 * only check for file-cache page truncation.
354 munlock_vma_page(page
);
363 * mlock_fixup - handle mlock[all]/munlock[all] requests.
365 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
366 * munlock is a no-op. However, for some special vmas, we go ahead and
367 * populate the ptes via make_pages_present().
369 * For vmas that pass the filters, merge/split as appropriate.
371 static int mlock_fixup(struct vm_area_struct
*vma
, struct vm_area_struct
**prev
,
372 unsigned long start
, unsigned long end
, unsigned int newflags
)
374 struct mm_struct
*mm
= vma
->vm_mm
;
378 int lock
= newflags
& VM_LOCKED
;
380 if (newflags
== vma
->vm_flags
||
381 (vma
->vm_flags
& (VM_IO
| VM_PFNMAP
)))
382 goto out
; /* don't set VM_LOCKED, don't count */
384 if ((vma
->vm_flags
& (VM_DONTEXPAND
| VM_RESERVED
)) ||
385 is_vm_hugetlb_page(vma
) ||
386 vma
== get_gate_vma(current
)) {
388 make_pages_present(start
, end
);
389 goto out
; /* don't set VM_LOCKED, don't count */
392 pgoff
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
393 *prev
= vma_merge(mm
, *prev
, start
, end
, newflags
, vma
->anon_vma
,
394 vma
->vm_file
, pgoff
, vma_policy(vma
));
400 if (start
!= vma
->vm_start
) {
401 ret
= split_vma(mm
, vma
, start
, 1);
406 if (end
!= vma
->vm_end
) {
407 ret
= split_vma(mm
, vma
, end
, 0);
414 * Keep track of amount of locked VM.
416 nr_pages
= (end
- start
) >> PAGE_SHIFT
;
418 nr_pages
= -nr_pages
;
419 mm
->locked_vm
+= nr_pages
;
422 * vm_flags is protected by the mmap_sem held in write mode.
423 * It's okay if try_to_unmap_one unmaps a page just after we
424 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
428 vma
->vm_flags
= newflags
;
429 ret
= __mlock_vma_pages_range(vma
, start
, end
);
431 ret
= __mlock_posix_error_return(ret
);
433 munlock_vma_pages_range(vma
, start
, end
);
441 static int do_mlock(unsigned long start
, size_t len
, int on
)
443 unsigned long nstart
, end
, tmp
;
444 struct vm_area_struct
* vma
, * prev
;
447 len
= PAGE_ALIGN(len
);
453 vma
= find_vma_prev(current
->mm
, start
, &prev
);
454 if (!vma
|| vma
->vm_start
> start
)
457 if (start
> vma
->vm_start
)
460 for (nstart
= start
; ; ) {
461 unsigned int newflags
;
463 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
465 newflags
= vma
->vm_flags
| VM_LOCKED
;
467 newflags
&= ~VM_LOCKED
;
472 error
= mlock_fixup(vma
, &prev
, nstart
, tmp
, newflags
);
476 if (nstart
< prev
->vm_end
)
477 nstart
= prev
->vm_end
;
482 if (!vma
|| vma
->vm_start
!= nstart
) {
490 SYSCALL_DEFINE2(mlock
, unsigned long, start
, size_t, len
)
492 unsigned long locked
;
493 unsigned long lock_limit
;
499 lru_add_drain_all(); /* flush pagevec */
501 down_write(¤t
->mm
->mmap_sem
);
502 len
= PAGE_ALIGN(len
+ (start
& ~PAGE_MASK
));
505 locked
= len
>> PAGE_SHIFT
;
506 locked
+= current
->mm
->locked_vm
;
508 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
509 lock_limit
>>= PAGE_SHIFT
;
511 /* check against resource limits */
512 if ((locked
<= lock_limit
) || capable(CAP_IPC_LOCK
))
513 error
= do_mlock(start
, len
, 1);
514 up_write(¤t
->mm
->mmap_sem
);
518 SYSCALL_DEFINE2(munlock
, unsigned long, start
, size_t, len
)
522 down_write(¤t
->mm
->mmap_sem
);
523 len
= PAGE_ALIGN(len
+ (start
& ~PAGE_MASK
));
525 ret
= do_mlock(start
, len
, 0);
526 up_write(¤t
->mm
->mmap_sem
);
530 static int do_mlockall(int flags
)
532 struct vm_area_struct
* vma
, * prev
= NULL
;
533 unsigned int def_flags
= 0;
535 if (flags
& MCL_FUTURE
)
536 def_flags
= VM_LOCKED
;
537 current
->mm
->def_flags
= def_flags
;
538 if (flags
== MCL_FUTURE
)
541 for (vma
= current
->mm
->mmap
; vma
; vma
= prev
->vm_next
) {
542 unsigned int newflags
;
544 newflags
= vma
->vm_flags
| VM_LOCKED
;
545 if (!(flags
& MCL_CURRENT
))
546 newflags
&= ~VM_LOCKED
;
549 mlock_fixup(vma
, &prev
, vma
->vm_start
, vma
->vm_end
, newflags
);
555 SYSCALL_DEFINE1(mlockall
, int, flags
)
557 unsigned long lock_limit
;
560 if (!flags
|| (flags
& ~(MCL_CURRENT
| MCL_FUTURE
)))
567 lru_add_drain_all(); /* flush pagevec */
569 down_write(¤t
->mm
->mmap_sem
);
571 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
572 lock_limit
>>= PAGE_SHIFT
;
575 if (!(flags
& MCL_CURRENT
) || (current
->mm
->total_vm
<= lock_limit
) ||
576 capable(CAP_IPC_LOCK
))
577 ret
= do_mlockall(flags
);
578 up_write(¤t
->mm
->mmap_sem
);
583 SYSCALL_DEFINE0(munlockall
)
587 down_write(¤t
->mm
->mmap_sem
);
588 ret
= do_mlockall(0);
589 up_write(¤t
->mm
->mmap_sem
);
594 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
595 * shm segments) get accounted against the user_struct instead.
597 static DEFINE_SPINLOCK(shmlock_user_lock
);
599 int user_shm_lock(size_t size
, struct user_struct
*user
)
601 unsigned long lock_limit
, locked
;
604 locked
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
605 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
606 if (lock_limit
== RLIM_INFINITY
)
608 lock_limit
>>= PAGE_SHIFT
;
609 spin_lock(&shmlock_user_lock
);
611 locked
+ user
->locked_shm
> lock_limit
&& !capable(CAP_IPC_LOCK
))
614 user
->locked_shm
+= locked
;
617 spin_unlock(&shmlock_user_lock
);
621 void user_shm_unlock(size_t size
, struct user_struct
*user
)
623 spin_lock(&shmlock_user_lock
);
624 user
->locked_shm
-= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
625 spin_unlock(&shmlock_user_lock
);