bnx2x: Reset HW before use
[linux-2.6/next.git] / mm / mlock.c
blob2904a347e4761169655696120751c371e4d74959
1 /*
2 * linux/mm/mlock.c
4 * (C) Copyright 1995 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
6 */
8 #include <linux/capability.h>
9 #include <linux/mman.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/syscalls.h>
16 #include <linux/sched.h>
17 #include <linux/module.h>
18 #include <linux/rmap.h>
19 #include <linux/mmzone.h>
20 #include <linux/hugetlb.h>
22 #include "internal.h"
24 int can_do_mlock(void)
26 if (capable(CAP_IPC_LOCK))
27 return 1;
28 if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0)
29 return 1;
30 return 0;
32 EXPORT_SYMBOL(can_do_mlock);
34 #ifdef CONFIG_UNEVICTABLE_LRU
36 * Mlocked pages are marked with PageMlocked() flag for efficient testing
37 * in vmscan and, possibly, the fault path; and to support semi-accurate
38 * statistics.
40 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
41 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
42 * The unevictable list is an LRU sibling list to the [in]active lists.
43 * PageUnevictable is set to indicate the unevictable state.
45 * When lazy mlocking via vmscan, it is important to ensure that the
46 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
47 * may have mlocked a page that is being munlocked. So lazy mlock must take
48 * the mmap_sem for read, and verify that the vma really is locked
49 * (see mm/rmap.c).
53 * LRU accounting for clear_page_mlock()
55 void __clear_page_mlock(struct page *page)
57 VM_BUG_ON(!PageLocked(page));
59 if (!page->mapping) { /* truncated ? */
60 return;
63 dec_zone_page_state(page, NR_MLOCK);
64 count_vm_event(UNEVICTABLE_PGCLEARED);
65 if (!isolate_lru_page(page)) {
66 putback_lru_page(page);
67 } else {
69 * We lost the race. the page already moved to evictable list.
71 if (PageUnevictable(page))
72 count_vm_event(UNEVICTABLE_PGSTRANDED);
77 * Mark page as mlocked if not already.
78 * If page on LRU, isolate and putback to move to unevictable list.
80 void mlock_vma_page(struct page *page)
82 BUG_ON(!PageLocked(page));
84 if (!TestSetPageMlocked(page)) {
85 inc_zone_page_state(page, NR_MLOCK);
86 count_vm_event(UNEVICTABLE_PGMLOCKED);
87 if (!isolate_lru_page(page))
88 putback_lru_page(page);
93 * called from munlock()/munmap() path with page supposedly on the LRU.
95 * Note: unlike mlock_vma_page(), we can't just clear the PageMlocked
96 * [in try_to_munlock()] and then attempt to isolate the page. We must
97 * isolate the page to keep others from messing with its unevictable
98 * and mlocked state while trying to munlock. However, we pre-clear the
99 * mlocked state anyway as we might lose the isolation race and we might
100 * not get another chance to clear PageMlocked. If we successfully
101 * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
102 * mapping the page, it will restore the PageMlocked state, unless the page
103 * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(),
104 * perhaps redundantly.
105 * If we lose the isolation race, and the page is mapped by other VM_LOCKED
106 * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
107 * either of which will restore the PageMlocked state by calling
108 * mlock_vma_page() above, if it can grab the vma's mmap sem.
110 static void munlock_vma_page(struct page *page)
112 BUG_ON(!PageLocked(page));
114 if (TestClearPageMlocked(page)) {
115 dec_zone_page_state(page, NR_MLOCK);
116 if (!isolate_lru_page(page)) {
117 int ret = try_to_munlock(page);
119 * did try_to_unlock() succeed or punt?
121 if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN)
122 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
124 putback_lru_page(page);
125 } else {
127 * We lost the race. let try_to_unmap() deal
128 * with it. At least we get the page state and
129 * mlock stats right. However, page is still on
130 * the noreclaim list. We'll fix that up when
131 * the page is eventually freed or we scan the
132 * noreclaim list.
134 if (PageUnevictable(page))
135 count_vm_event(UNEVICTABLE_PGSTRANDED);
136 else
137 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
143 * __mlock_vma_pages_range() - mlock/munlock a range of pages in the vma.
144 * @vma: target vma
145 * @start: start address
146 * @end: end address
147 * @mlock: 0 indicate munlock, otherwise mlock.
149 * If @mlock == 0, unlock an mlocked range;
150 * else mlock the range of pages. This takes care of making the pages present ,
151 * too.
153 * return 0 on success, negative error code on error.
155 * vma->vm_mm->mmap_sem must be held for at least read.
157 static long __mlock_vma_pages_range(struct vm_area_struct *vma,
158 unsigned long start, unsigned long end,
159 int mlock)
161 struct mm_struct *mm = vma->vm_mm;
162 unsigned long addr = start;
163 struct page *pages[16]; /* 16 gives a reasonable batch */
164 int nr_pages = (end - start) / PAGE_SIZE;
165 int ret = 0;
166 int gup_flags = 0;
168 VM_BUG_ON(start & ~PAGE_MASK);
169 VM_BUG_ON(end & ~PAGE_MASK);
170 VM_BUG_ON(start < vma->vm_start);
171 VM_BUG_ON(end > vma->vm_end);
172 VM_BUG_ON((!rwsem_is_locked(&mm->mmap_sem)) &&
173 (atomic_read(&mm->mm_users) != 0));
176 * mlock: don't page populate if vma has PROT_NONE permission.
177 * munlock: always do munlock although the vma has PROT_NONE
178 * permission, or SIGKILL is pending.
180 if (!mlock)
181 gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS |
182 GUP_FLAGS_IGNORE_SIGKILL;
184 if (vma->vm_flags & VM_WRITE)
185 gup_flags |= GUP_FLAGS_WRITE;
187 while (nr_pages > 0) {
188 int i;
190 cond_resched();
193 * get_user_pages makes pages present if we are
194 * setting mlock. and this extra reference count will
195 * disable migration of this page. However, page may
196 * still be truncated out from under us.
198 ret = __get_user_pages(current, mm, addr,
199 min_t(int, nr_pages, ARRAY_SIZE(pages)),
200 gup_flags, pages, NULL);
202 * This can happen for, e.g., VM_NONLINEAR regions before
203 * a page has been allocated and mapped at a given offset,
204 * or for addresses that map beyond end of a file.
205 * We'll mlock the the pages if/when they get faulted in.
207 if (ret < 0)
208 break;
209 if (ret == 0) {
211 * We know the vma is there, so the only time
212 * we cannot get a single page should be an
213 * error (ret < 0) case.
215 WARN_ON(1);
216 break;
219 lru_add_drain(); /* push cached pages to LRU */
221 for (i = 0; i < ret; i++) {
222 struct page *page = pages[i];
224 lock_page(page);
226 * Because we lock page here and migration is blocked
227 * by the elevated reference, we need only check for
228 * page truncation (file-cache only).
230 if (page->mapping) {
231 if (mlock)
232 mlock_vma_page(page);
233 else
234 munlock_vma_page(page);
236 unlock_page(page);
237 put_page(page); /* ref from get_user_pages() */
240 * here we assume that get_user_pages() has given us
241 * a list of virtually contiguous pages.
243 addr += PAGE_SIZE; /* for next get_user_pages() */
244 nr_pages--;
246 ret = 0;
249 return ret; /* count entire vma as locked_vm */
253 * convert get_user_pages() return value to posix mlock() error
255 static int __mlock_posix_error_return(long retval)
257 if (retval == -EFAULT)
258 retval = -ENOMEM;
259 else if (retval == -ENOMEM)
260 retval = -EAGAIN;
261 return retval;
264 #else /* CONFIG_UNEVICTABLE_LRU */
267 * Just make pages present if VM_LOCKED. No-op if unlocking.
269 static long __mlock_vma_pages_range(struct vm_area_struct *vma,
270 unsigned long start, unsigned long end,
271 int mlock)
273 if (mlock && (vma->vm_flags & VM_LOCKED))
274 return make_pages_present(start, end);
275 return 0;
278 static inline int __mlock_posix_error_return(long retval)
280 return 0;
283 #endif /* CONFIG_UNEVICTABLE_LRU */
286 * mlock_vma_pages_range() - mlock pages in specified vma range.
287 * @vma - the vma containing the specfied address range
288 * @start - starting address in @vma to mlock
289 * @end - end address [+1] in @vma to mlock
291 * For mmap()/mremap()/expansion of mlocked vma.
293 * return 0 on success for "normal" vmas.
295 * return number of pages [> 0] to be removed from locked_vm on success
296 * of "special" vmas.
298 * return negative error if vma spanning @start-@range disappears while
299 * mmap semaphore is dropped. Unlikely?
301 long mlock_vma_pages_range(struct vm_area_struct *vma,
302 unsigned long start, unsigned long end)
304 struct mm_struct *mm = vma->vm_mm;
305 int nr_pages = (end - start) / PAGE_SIZE;
306 BUG_ON(!(vma->vm_flags & VM_LOCKED));
309 * filter unlockable vmas
311 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
312 goto no_mlock;
314 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
315 is_vm_hugetlb_page(vma) ||
316 vma == get_gate_vma(current))) {
317 long error;
318 downgrade_write(&mm->mmap_sem);
320 error = __mlock_vma_pages_range(vma, start, end, 1);
322 up_read(&mm->mmap_sem);
323 /* vma can change or disappear */
324 down_write(&mm->mmap_sem);
325 vma = find_vma(mm, start);
326 /* non-NULL vma must contain @start, but need to check @end */
327 if (!vma || end > vma->vm_end)
328 return -ENOMEM;
330 return 0; /* hide other errors from mmap(), et al */
334 * User mapped kernel pages or huge pages:
335 * make these pages present to populate the ptes, but
336 * fall thru' to reset VM_LOCKED--no need to unlock, and
337 * return nr_pages so these don't get counted against task's
338 * locked limit. huge pages are already counted against
339 * locked vm limit.
341 make_pages_present(start, end);
343 no_mlock:
344 vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */
345 return nr_pages; /* error or pages NOT mlocked */
350 * munlock_vma_pages_range() - munlock all pages in the vma range.'
351 * @vma - vma containing range to be munlock()ed.
352 * @start - start address in @vma of the range
353 * @end - end of range in @vma.
355 * For mremap(), munmap() and exit().
357 * Called with @vma VM_LOCKED.
359 * Returns with VM_LOCKED cleared. Callers must be prepared to
360 * deal with this.
362 * We don't save and restore VM_LOCKED here because pages are
363 * still on lru. In unmap path, pages might be scanned by reclaim
364 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
365 * free them. This will result in freeing mlocked pages.
367 void munlock_vma_pages_range(struct vm_area_struct *vma,
368 unsigned long start, unsigned long end)
370 vma->vm_flags &= ~VM_LOCKED;
371 __mlock_vma_pages_range(vma, start, end, 0);
375 * mlock_fixup - handle mlock[all]/munlock[all] requests.
377 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
378 * munlock is a no-op. However, for some special vmas, we go ahead and
379 * populate the ptes via make_pages_present().
381 * For vmas that pass the filters, merge/split as appropriate.
383 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
384 unsigned long start, unsigned long end, unsigned int newflags)
386 struct mm_struct *mm = vma->vm_mm;
387 pgoff_t pgoff;
388 int nr_pages;
389 int ret = 0;
390 int lock = newflags & VM_LOCKED;
392 if (newflags == vma->vm_flags ||
393 (vma->vm_flags & (VM_IO | VM_PFNMAP)))
394 goto out; /* don't set VM_LOCKED, don't count */
396 if ((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
397 is_vm_hugetlb_page(vma) ||
398 vma == get_gate_vma(current)) {
399 if (lock)
400 make_pages_present(start, end);
401 goto out; /* don't set VM_LOCKED, don't count */
404 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
405 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
406 vma->vm_file, pgoff, vma_policy(vma));
407 if (*prev) {
408 vma = *prev;
409 goto success;
412 if (start != vma->vm_start) {
413 ret = split_vma(mm, vma, start, 1);
414 if (ret)
415 goto out;
418 if (end != vma->vm_end) {
419 ret = split_vma(mm, vma, end, 0);
420 if (ret)
421 goto out;
424 success:
426 * Keep track of amount of locked VM.
428 nr_pages = (end - start) >> PAGE_SHIFT;
429 if (!lock)
430 nr_pages = -nr_pages;
431 mm->locked_vm += nr_pages;
434 * vm_flags is protected by the mmap_sem held in write mode.
435 * It's okay if try_to_unmap_one unmaps a page just after we
436 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
438 vma->vm_flags = newflags;
440 if (lock) {
442 * mmap_sem is currently held for write. Downgrade the write
443 * lock to a read lock so that other faults, mmap scans, ...
444 * while we fault in all pages.
446 downgrade_write(&mm->mmap_sem);
448 ret = __mlock_vma_pages_range(vma, start, end, 1);
451 * Need to reacquire mmap sem in write mode, as our callers
452 * expect this. We have no support for atomically upgrading
453 * a sem to write, so we need to check for ranges while sem
454 * is unlocked.
456 up_read(&mm->mmap_sem);
457 /* vma can change or disappear */
458 down_write(&mm->mmap_sem);
459 *prev = find_vma(mm, start);
460 /* non-NULL *prev must contain @start, but need to check @end */
461 if (!(*prev) || end > (*prev)->vm_end)
462 ret = -ENOMEM;
463 else if (ret > 0) {
464 mm->locked_vm -= ret;
465 ret = 0;
466 } else
467 ret = __mlock_posix_error_return(ret); /* translate if needed */
468 } else {
470 * TODO: for unlocking, pages will already be resident, so
471 * we don't need to wait for allocations/reclaim/pagein, ...
472 * However, unlocking a very large region can still take a
473 * while. Should we downgrade the semaphore for both lock
474 * AND unlock ?
476 __mlock_vma_pages_range(vma, start, end, 0);
479 out:
480 *prev = vma;
481 return ret;
484 static int do_mlock(unsigned long start, size_t len, int on)
486 unsigned long nstart, end, tmp;
487 struct vm_area_struct * vma, * prev;
488 int error;
490 len = PAGE_ALIGN(len);
491 end = start + len;
492 if (end < start)
493 return -EINVAL;
494 if (end == start)
495 return 0;
496 vma = find_vma_prev(current->mm, start, &prev);
497 if (!vma || vma->vm_start > start)
498 return -ENOMEM;
500 if (start > vma->vm_start)
501 prev = vma;
503 for (nstart = start ; ; ) {
504 unsigned int newflags;
506 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
508 newflags = vma->vm_flags | VM_LOCKED;
509 if (!on)
510 newflags &= ~VM_LOCKED;
512 tmp = vma->vm_end;
513 if (tmp > end)
514 tmp = end;
515 error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
516 if (error)
517 break;
518 nstart = tmp;
519 if (nstart < prev->vm_end)
520 nstart = prev->vm_end;
521 if (nstart >= end)
522 break;
524 vma = prev->vm_next;
525 if (!vma || vma->vm_start != nstart) {
526 error = -ENOMEM;
527 break;
530 return error;
533 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
535 unsigned long locked;
536 unsigned long lock_limit;
537 int error = -ENOMEM;
539 if (!can_do_mlock())
540 return -EPERM;
542 lru_add_drain_all(); /* flush pagevec */
544 down_write(&current->mm->mmap_sem);
545 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
546 start &= PAGE_MASK;
548 locked = len >> PAGE_SHIFT;
549 locked += current->mm->locked_vm;
551 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
552 lock_limit >>= PAGE_SHIFT;
554 /* check against resource limits */
555 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
556 error = do_mlock(start, len, 1);
557 up_write(&current->mm->mmap_sem);
558 return error;
561 SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
563 int ret;
565 down_write(&current->mm->mmap_sem);
566 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
567 start &= PAGE_MASK;
568 ret = do_mlock(start, len, 0);
569 up_write(&current->mm->mmap_sem);
570 return ret;
573 static int do_mlockall(int flags)
575 struct vm_area_struct * vma, * prev = NULL;
576 unsigned int def_flags = 0;
578 if (flags & MCL_FUTURE)
579 def_flags = VM_LOCKED;
580 current->mm->def_flags = def_flags;
581 if (flags == MCL_FUTURE)
582 goto out;
584 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
585 unsigned int newflags;
587 newflags = vma->vm_flags | VM_LOCKED;
588 if (!(flags & MCL_CURRENT))
589 newflags &= ~VM_LOCKED;
591 /* Ignore errors */
592 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
594 out:
595 return 0;
598 SYSCALL_DEFINE1(mlockall, int, flags)
600 unsigned long lock_limit;
601 int ret = -EINVAL;
603 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
604 goto out;
606 ret = -EPERM;
607 if (!can_do_mlock())
608 goto out;
610 lru_add_drain_all(); /* flush pagevec */
612 down_write(&current->mm->mmap_sem);
614 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
615 lock_limit >>= PAGE_SHIFT;
617 ret = -ENOMEM;
618 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
619 capable(CAP_IPC_LOCK))
620 ret = do_mlockall(flags);
621 up_write(&current->mm->mmap_sem);
622 out:
623 return ret;
626 SYSCALL_DEFINE0(munlockall)
628 int ret;
630 down_write(&current->mm->mmap_sem);
631 ret = do_mlockall(0);
632 up_write(&current->mm->mmap_sem);
633 return ret;
637 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
638 * shm segments) get accounted against the user_struct instead.
640 static DEFINE_SPINLOCK(shmlock_user_lock);
642 int user_shm_lock(size_t size, struct user_struct *user)
644 unsigned long lock_limit, locked;
645 int allowed = 0;
647 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
648 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
649 if (lock_limit == RLIM_INFINITY)
650 allowed = 1;
651 lock_limit >>= PAGE_SHIFT;
652 spin_lock(&shmlock_user_lock);
653 if (!allowed &&
654 locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
655 goto out;
656 get_uid(user);
657 user->locked_shm += locked;
658 allowed = 1;
659 out:
660 spin_unlock(&shmlock_user_lock);
661 return allowed;
664 void user_shm_unlock(size_t size, struct user_struct *user)
666 spin_lock(&shmlock_user_lock);
667 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
668 spin_unlock(&shmlock_user_lock);
669 free_uid(user);
672 void *alloc_locked_buffer(size_t size)
674 unsigned long rlim, vm, pgsz;
675 void *buffer = NULL;
677 pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
679 down_write(&current->mm->mmap_sem);
681 rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
682 vm = current->mm->total_vm + pgsz;
683 if (rlim < vm)
684 goto out;
686 rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
687 vm = current->mm->locked_vm + pgsz;
688 if (rlim < vm)
689 goto out;
691 buffer = kzalloc(size, GFP_KERNEL);
692 if (!buffer)
693 goto out;
695 current->mm->total_vm += pgsz;
696 current->mm->locked_vm += pgsz;
698 out:
699 up_write(&current->mm->mmap_sem);
700 return buffer;
703 void free_locked_buffer(void *buffer, size_t size)
705 unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
707 down_write(&current->mm->mmap_sem);
709 current->mm->total_vm -= pgsz;
710 current->mm->locked_vm -= pgsz;
712 up_write(&current->mm->mmap_sem);
714 kfree(buffer);