1 // SPDX-License-Identifier: GPL-2.0
5 * (C) Copyright 1995 Linus Torvalds
6 * (C) Copyright 2002 Christoph Hellwig
9 #include <linux/capability.h>
10 #include <linux/mman.h>
12 #include <linux/sched/user.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/pagewalk.h>
18 #include <linux/mempolicy.h>
19 #include <linux/syscalls.h>
20 #include <linux/sched.h>
21 #include <linux/export.h>
22 #include <linux/rmap.h>
23 #include <linux/mmzone.h>
24 #include <linux/hugetlb.h>
25 #include <linux/memcontrol.h>
26 #include <linux/mm_inline.h>
27 #include <linux/secretmem.h>
33 struct folio_batch fbatch
;
36 static DEFINE_PER_CPU(struct mlock_fbatch
, mlock_fbatch
) = {
37 .lock
= INIT_LOCAL_LOCK(lock
),
40 bool can_do_mlock(void)
42 if (rlimit(RLIMIT_MEMLOCK
) != 0)
44 if (capable(CAP_IPC_LOCK
))
48 EXPORT_SYMBOL(can_do_mlock
);
51 * Mlocked folios are marked with the PG_mlocked flag for efficient testing
52 * in vmscan and, possibly, the fault path; and to support semi-accurate
55 * An mlocked folio [folio_test_mlocked(folio)] is unevictable. As such, it
56 * will be ostensibly placed on the LRU "unevictable" list (actually no such
57 * list exists), rather than the [in]active lists. PG_unevictable is set to
58 * indicate the unevictable state.
61 static struct lruvec
*__mlock_folio(struct folio
*folio
, struct lruvec
*lruvec
)
63 /* There is nothing more we can do while it's off LRU */
64 if (!folio_test_clear_lru(folio
))
67 lruvec
= folio_lruvec_relock_irq(folio
, lruvec
);
69 if (unlikely(folio_evictable(folio
))) {
71 * This is a little surprising, but quite possible: PG_mlocked
72 * must have got cleared already by another CPU. Could this
73 * folio be unevictable? I'm not sure, but move it now if so.
75 if (folio_test_unevictable(folio
)) {
76 lruvec_del_folio(lruvec
, folio
);
77 folio_clear_unevictable(folio
);
78 lruvec_add_folio(lruvec
, folio
);
80 __count_vm_events(UNEVICTABLE_PGRESCUED
,
81 folio_nr_pages(folio
));
86 if (folio_test_unevictable(folio
)) {
87 if (folio_test_mlocked(folio
))
92 lruvec_del_folio(lruvec
, folio
);
93 folio_clear_active(folio
);
94 folio_set_unevictable(folio
);
95 folio
->mlock_count
= !!folio_test_mlocked(folio
);
96 lruvec_add_folio(lruvec
, folio
);
97 __count_vm_events(UNEVICTABLE_PGCULLED
, folio_nr_pages(folio
));
103 static struct lruvec
*__mlock_new_folio(struct folio
*folio
, struct lruvec
*lruvec
)
105 VM_BUG_ON_FOLIO(folio_test_lru(folio
), folio
);
107 lruvec
= folio_lruvec_relock_irq(folio
, lruvec
);
109 /* As above, this is a little surprising, but possible */
110 if (unlikely(folio_evictable(folio
)))
113 folio_set_unevictable(folio
);
114 folio
->mlock_count
= !!folio_test_mlocked(folio
);
115 __count_vm_events(UNEVICTABLE_PGCULLED
, folio_nr_pages(folio
));
117 lruvec_add_folio(lruvec
, folio
);
118 folio_set_lru(folio
);
122 static struct lruvec
*__munlock_folio(struct folio
*folio
, struct lruvec
*lruvec
)
124 int nr_pages
= folio_nr_pages(folio
);
125 bool isolated
= false;
127 if (!folio_test_clear_lru(folio
))
131 lruvec
= folio_lruvec_relock_irq(folio
, lruvec
);
133 if (folio_test_unevictable(folio
)) {
134 /* Then mlock_count is maintained, but might undercount */
135 if (folio
->mlock_count
)
136 folio
->mlock_count
--;
137 if (folio
->mlock_count
)
140 /* else assume that was the last mlock: reclaim will fix it if not */
143 if (folio_test_clear_mlocked(folio
)) {
144 __zone_stat_mod_folio(folio
, NR_MLOCK
, -nr_pages
);
145 if (isolated
|| !folio_test_unevictable(folio
))
146 __count_vm_events(UNEVICTABLE_PGMUNLOCKED
, nr_pages
);
148 __count_vm_events(UNEVICTABLE_PGSTRANDED
, nr_pages
);
151 /* folio_evictable() has to be checked *after* clearing Mlocked */
152 if (isolated
&& folio_test_unevictable(folio
) && folio_evictable(folio
)) {
153 lruvec_del_folio(lruvec
, folio
);
154 folio_clear_unevictable(folio
);
155 lruvec_add_folio(lruvec
, folio
);
156 __count_vm_events(UNEVICTABLE_PGRESCUED
, nr_pages
);
160 folio_set_lru(folio
);
165 * Flags held in the low bits of a struct folio pointer on the mlock_fbatch.
167 #define LRU_FOLIO 0x1
168 #define NEW_FOLIO 0x2
169 static inline struct folio
*mlock_lru(struct folio
*folio
)
171 return (struct folio
*)((unsigned long)folio
+ LRU_FOLIO
);
174 static inline struct folio
*mlock_new(struct folio
*folio
)
176 return (struct folio
*)((unsigned long)folio
+ NEW_FOLIO
);
180 * mlock_folio_batch() is derived from folio_batch_move_lru(): perhaps that can
181 * make use of such folio pointer flags in future, but for now just keep it for
182 * mlock. We could use three separate folio batches instead, but one feels
183 * better (munlocking a full folio batch does not need to drain mlocking folio
186 static void mlock_folio_batch(struct folio_batch
*fbatch
)
188 struct lruvec
*lruvec
= NULL
;
193 for (i
= 0; i
< folio_batch_count(fbatch
); i
++) {
194 folio
= fbatch
->folios
[i
];
195 mlock
= (unsigned long)folio
& (LRU_FOLIO
| NEW_FOLIO
);
196 folio
= (struct folio
*)((unsigned long)folio
- mlock
);
197 fbatch
->folios
[i
] = folio
;
199 if (mlock
& LRU_FOLIO
)
200 lruvec
= __mlock_folio(folio
, lruvec
);
201 else if (mlock
& NEW_FOLIO
)
202 lruvec
= __mlock_new_folio(folio
, lruvec
);
204 lruvec
= __munlock_folio(folio
, lruvec
);
208 unlock_page_lruvec_irq(lruvec
);
212 void mlock_drain_local(void)
214 struct folio_batch
*fbatch
;
216 local_lock(&mlock_fbatch
.lock
);
217 fbatch
= this_cpu_ptr(&mlock_fbatch
.fbatch
);
218 if (folio_batch_count(fbatch
))
219 mlock_folio_batch(fbatch
);
220 local_unlock(&mlock_fbatch
.lock
);
223 void mlock_drain_remote(int cpu
)
225 struct folio_batch
*fbatch
;
227 WARN_ON_ONCE(cpu_online(cpu
));
228 fbatch
= &per_cpu(mlock_fbatch
.fbatch
, cpu
);
229 if (folio_batch_count(fbatch
))
230 mlock_folio_batch(fbatch
);
233 bool need_mlock_drain(int cpu
)
235 return folio_batch_count(&per_cpu(mlock_fbatch
.fbatch
, cpu
));
239 * mlock_folio - mlock a folio already on (or temporarily off) LRU
240 * @folio: folio to be mlocked.
242 void mlock_folio(struct folio
*folio
)
244 struct folio_batch
*fbatch
;
246 local_lock(&mlock_fbatch
.lock
);
247 fbatch
= this_cpu_ptr(&mlock_fbatch
.fbatch
);
249 if (!folio_test_set_mlocked(folio
)) {
250 int nr_pages
= folio_nr_pages(folio
);
252 zone_stat_mod_folio(folio
, NR_MLOCK
, nr_pages
);
253 __count_vm_events(UNEVICTABLE_PGMLOCKED
, nr_pages
);
257 if (!folio_batch_add(fbatch
, mlock_lru(folio
)) ||
258 folio_test_large(folio
) || lru_cache_disabled())
259 mlock_folio_batch(fbatch
);
260 local_unlock(&mlock_fbatch
.lock
);
264 * mlock_new_folio - mlock a newly allocated folio not yet on LRU
265 * @folio: folio to be mlocked, either normal or a THP head.
267 void mlock_new_folio(struct folio
*folio
)
269 struct folio_batch
*fbatch
;
270 int nr_pages
= folio_nr_pages(folio
);
272 local_lock(&mlock_fbatch
.lock
);
273 fbatch
= this_cpu_ptr(&mlock_fbatch
.fbatch
);
274 folio_set_mlocked(folio
);
276 zone_stat_mod_folio(folio
, NR_MLOCK
, nr_pages
);
277 __count_vm_events(UNEVICTABLE_PGMLOCKED
, nr_pages
);
280 if (!folio_batch_add(fbatch
, mlock_new(folio
)) ||
281 folio_test_large(folio
) || lru_cache_disabled())
282 mlock_folio_batch(fbatch
);
283 local_unlock(&mlock_fbatch
.lock
);
287 * munlock_folio - munlock a folio
288 * @folio: folio to be munlocked, either normal or a THP head.
290 void munlock_folio(struct folio
*folio
)
292 struct folio_batch
*fbatch
;
294 local_lock(&mlock_fbatch
.lock
);
295 fbatch
= this_cpu_ptr(&mlock_fbatch
.fbatch
);
297 * folio_test_clear_mlocked(folio) must be left to __munlock_folio(),
298 * which will check whether the folio is multiply mlocked.
301 if (!folio_batch_add(fbatch
, folio
) ||
302 folio_test_large(folio
) || lru_cache_disabled())
303 mlock_folio_batch(fbatch
);
304 local_unlock(&mlock_fbatch
.lock
);
307 static inline unsigned int folio_mlock_step(struct folio
*folio
,
308 pte_t
*pte
, unsigned long addr
, unsigned long end
)
310 const fpb_t fpb_flags
= FPB_IGNORE_DIRTY
| FPB_IGNORE_SOFT_DIRTY
;
311 unsigned int count
= (end
- addr
) >> PAGE_SHIFT
;
312 pte_t ptent
= ptep_get(pte
);
314 if (!folio_test_large(folio
))
317 return folio_pte_batch(folio
, addr
, pte
, ptent
, count
, fpb_flags
, NULL
,
321 static inline bool allow_mlock_munlock(struct folio
*folio
,
322 struct vm_area_struct
*vma
, unsigned long start
,
323 unsigned long end
, unsigned int step
)
326 * For unlock, allow munlock large folio which is partially
327 * mapped to VMA. As it's possible that large folio is
328 * mlocked and VMA is split later.
330 * During memory pressure, such kind of large folio can
331 * be split. And the pages are not in VM_LOCKed VMA
334 if (!(vma
->vm_flags
& VM_LOCKED
))
337 /* folio_within_range() cannot take KSM, but any small folio is OK */
338 if (!folio_test_large(folio
))
341 /* folio not in range [start, end), skip mlock */
342 if (!folio_within_range(folio
, vma
, start
, end
))
345 /* folio is not fully mapped, skip mlock */
346 if (step
!= folio_nr_pages(folio
))
352 static int mlock_pte_range(pmd_t
*pmd
, unsigned long addr
,
353 unsigned long end
, struct mm_walk
*walk
)
356 struct vm_area_struct
*vma
= walk
->vma
;
358 pte_t
*start_pte
, *pte
;
361 unsigned int step
= 1;
362 unsigned long start
= addr
;
364 ptl
= pmd_trans_huge_lock(pmd
, vma
);
366 if (!pmd_present(*pmd
))
368 if (is_huge_zero_pmd(*pmd
))
370 folio
= pmd_folio(*pmd
);
371 if (vma
->vm_flags
& VM_LOCKED
)
374 munlock_folio(folio
);
378 start_pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
380 walk
->action
= ACTION_AGAIN
;
384 for (pte
= start_pte
; addr
!= end
; pte
++, addr
+= PAGE_SIZE
) {
385 ptent
= ptep_get(pte
);
386 if (!pte_present(ptent
))
388 folio
= vm_normal_folio(vma
, addr
, ptent
);
389 if (!folio
|| folio_is_zone_device(folio
))
392 step
= folio_mlock_step(folio
, pte
, addr
, end
);
393 if (!allow_mlock_munlock(folio
, vma
, start
, end
, step
))
396 if (vma
->vm_flags
& VM_LOCKED
)
399 munlock_folio(folio
);
403 addr
+= (step
- 1) << PAGE_SHIFT
;
405 pte_unmap(start_pte
);
413 * mlock_vma_pages_range() - mlock any pages already in the range,
414 * or munlock all pages in the range.
415 * @vma - vma containing range to be mlock()ed or munlock()ed
416 * @start - start address in @vma of the range
417 * @end - end of range in @vma
418 * @newflags - the new set of flags for @vma.
420 * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED;
421 * called for munlock() and munlockall(), to clear VM_LOCKED from @vma.
423 static void mlock_vma_pages_range(struct vm_area_struct
*vma
,
424 unsigned long start
, unsigned long end
, vm_flags_t newflags
)
426 static const struct mm_walk_ops mlock_walk_ops
= {
427 .pmd_entry
= mlock_pte_range
,
428 .walk_lock
= PGWALK_WRLOCK_VERIFY
,
432 * There is a slight chance that concurrent page migration,
433 * or page reclaim finding a page of this now-VM_LOCKED vma,
434 * will call mlock_vma_folio() and raise page's mlock_count:
435 * double counting, leaving the page unevictable indefinitely.
436 * Communicate this danger to mlock_vma_folio() with VM_IO,
437 * which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas.
438 * mmap_lock is held in write mode here, so this weird
439 * combination should not be visible to other mmap_lock users;
440 * but WRITE_ONCE so rmap walkers must see VM_IO if VM_LOCKED.
442 if (newflags
& VM_LOCKED
)
444 vma_start_write(vma
);
445 vm_flags_reset_once(vma
, newflags
);
448 walk_page_range(vma
->vm_mm
, start
, end
, &mlock_walk_ops
, NULL
);
451 if (newflags
& VM_IO
) {
453 vm_flags_reset_once(vma
, newflags
);
458 * mlock_fixup - handle mlock[all]/munlock[all] requests.
460 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
461 * munlock is a no-op. However, for some special vmas, we go ahead and
464 * For vmas that pass the filters, merge/split as appropriate.
466 static int mlock_fixup(struct vma_iterator
*vmi
, struct vm_area_struct
*vma
,
467 struct vm_area_struct
**prev
, unsigned long start
,
468 unsigned long end
, vm_flags_t newflags
)
470 struct mm_struct
*mm
= vma
->vm_mm
;
473 vm_flags_t oldflags
= vma
->vm_flags
;
475 if (newflags
== oldflags
|| (oldflags
& VM_SPECIAL
) ||
476 is_vm_hugetlb_page(vma
) || vma
== get_gate_vma(current
->mm
) ||
477 vma_is_dax(vma
) || vma_is_secretmem(vma
) || (oldflags
& VM_DROPPABLE
))
478 /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
481 vma
= vma_modify_flags(vmi
, *prev
, vma
, start
, end
, newflags
);
488 * Keep track of amount of locked VM.
490 nr_pages
= (end
- start
) >> PAGE_SHIFT
;
491 if (!(newflags
& VM_LOCKED
))
492 nr_pages
= -nr_pages
;
493 else if (oldflags
& VM_LOCKED
)
495 mm
->locked_vm
+= nr_pages
;
498 * vm_flags is protected by the mmap_lock held in write mode.
499 * It's okay if try_to_unmap_one unmaps a page just after we
500 * set VM_LOCKED, populate_vma_page_range will bring it back.
502 if ((newflags
& VM_LOCKED
) && (oldflags
& VM_LOCKED
)) {
503 /* No work to do, and mlocking twice would be wrong */
504 vma_start_write(vma
);
505 vm_flags_reset(vma
, newflags
);
507 mlock_vma_pages_range(vma
, start
, end
, newflags
);
514 static int apply_vma_lock_flags(unsigned long start
, size_t len
,
517 unsigned long nstart
, end
, tmp
;
518 struct vm_area_struct
*vma
, *prev
;
519 VMA_ITERATOR(vmi
, current
->mm
, start
);
521 VM_BUG_ON(offset_in_page(start
));
522 VM_BUG_ON(len
!= PAGE_ALIGN(len
));
528 vma
= vma_iter_load(&vmi
);
532 prev
= vma_prev(&vmi
);
533 if (start
> vma
->vm_start
)
538 for_each_vma_range(vmi
, vma
, end
) {
542 if (vma
->vm_start
!= tmp
)
545 newflags
= vma
->vm_flags
& ~VM_LOCKED_MASK
;
547 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
551 error
= mlock_fixup(&vmi
, vma
, &prev
, nstart
, tmp
, newflags
);
554 tmp
= vma_iter_end(&vmi
);
565 * Go through vma areas and sum size of mlocked
566 * vma pages, as return value.
567 * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
569 * Return value: previously mlocked page counts
571 static unsigned long count_mm_mlocked_page_nr(struct mm_struct
*mm
,
572 unsigned long start
, size_t len
)
574 struct vm_area_struct
*vma
;
575 unsigned long count
= 0;
577 VMA_ITERATOR(vmi
, mm
, start
);
579 /* Don't overflow past ULONG_MAX */
580 if (unlikely(ULONG_MAX
- len
< start
))
585 for_each_vma_range(vmi
, vma
, end
) {
586 if (vma
->vm_flags
& VM_LOCKED
) {
587 if (start
> vma
->vm_start
)
588 count
-= (start
- vma
->vm_start
);
589 if (end
< vma
->vm_end
) {
590 count
+= end
- vma
->vm_start
;
593 count
+= vma
->vm_end
- vma
->vm_start
;
597 return count
>> PAGE_SHIFT
;
601 * convert get_user_pages() return value to posix mlock() error
603 static int __mlock_posix_error_return(long retval
)
605 if (retval
== -EFAULT
)
607 else if (retval
== -ENOMEM
)
612 static __must_check
int do_mlock(unsigned long start
, size_t len
, vm_flags_t flags
)
614 unsigned long locked
;
615 unsigned long lock_limit
;
618 start
= untagged_addr(start
);
623 len
= PAGE_ALIGN(len
+ (offset_in_page(start
)));
626 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
627 lock_limit
>>= PAGE_SHIFT
;
628 locked
= len
>> PAGE_SHIFT
;
630 if (mmap_write_lock_killable(current
->mm
))
633 locked
+= current
->mm
->locked_vm
;
634 if ((locked
> lock_limit
) && (!capable(CAP_IPC_LOCK
))) {
636 * It is possible that the regions requested intersect with
637 * previously mlocked areas, that part area in "mm->locked_vm"
638 * should not be counted to new mlock increment count. So check
639 * and adjust locked count if necessary.
641 locked
-= count_mm_mlocked_page_nr(current
->mm
,
645 /* check against resource limits */
646 if ((locked
<= lock_limit
) || capable(CAP_IPC_LOCK
))
647 error
= apply_vma_lock_flags(start
, len
, flags
);
649 mmap_write_unlock(current
->mm
);
653 error
= __mm_populate(start
, len
, 0);
655 return __mlock_posix_error_return(error
);
659 SYSCALL_DEFINE2(mlock
, unsigned long, start
, size_t, len
)
661 return do_mlock(start
, len
, VM_LOCKED
);
664 SYSCALL_DEFINE3(mlock2
, unsigned long, start
, size_t, len
, int, flags
)
666 vm_flags_t vm_flags
= VM_LOCKED
;
668 if (flags
& ~MLOCK_ONFAULT
)
671 if (flags
& MLOCK_ONFAULT
)
672 vm_flags
|= VM_LOCKONFAULT
;
674 return do_mlock(start
, len
, vm_flags
);
677 SYSCALL_DEFINE2(munlock
, unsigned long, start
, size_t, len
)
681 start
= untagged_addr(start
);
683 len
= PAGE_ALIGN(len
+ (offset_in_page(start
)));
686 if (mmap_write_lock_killable(current
->mm
))
688 ret
= apply_vma_lock_flags(start
, len
, 0);
689 mmap_write_unlock(current
->mm
);
695 * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
696 * and translate into the appropriate modifications to mm->def_flags and/or the
697 * flags for all current VMAs.
699 * There are a couple of subtleties with this. If mlockall() is called multiple
700 * times with different flags, the values do not necessarily stack. If mlockall
701 * is called once including the MCL_FUTURE flag and then a second time without
702 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
704 static int apply_mlockall_flags(int flags
)
706 VMA_ITERATOR(vmi
, current
->mm
, 0);
707 struct vm_area_struct
*vma
, *prev
= NULL
;
708 vm_flags_t to_add
= 0;
710 current
->mm
->def_flags
&= ~VM_LOCKED_MASK
;
711 if (flags
& MCL_FUTURE
) {
712 current
->mm
->def_flags
|= VM_LOCKED
;
714 if (flags
& MCL_ONFAULT
)
715 current
->mm
->def_flags
|= VM_LOCKONFAULT
;
717 if (!(flags
& MCL_CURRENT
))
721 if (flags
& MCL_CURRENT
) {
723 if (flags
& MCL_ONFAULT
)
724 to_add
|= VM_LOCKONFAULT
;
727 for_each_vma(vmi
, vma
) {
731 newflags
= vma
->vm_flags
& ~VM_LOCKED_MASK
;
734 error
= mlock_fixup(&vmi
, vma
, &prev
, vma
->vm_start
, vma
->vm_end
,
736 /* Ignore errors, but prev needs fixing up. */
745 SYSCALL_DEFINE1(mlockall
, int, flags
)
747 unsigned long lock_limit
;
750 if (!flags
|| (flags
& ~(MCL_CURRENT
| MCL_FUTURE
| MCL_ONFAULT
)) ||
751 flags
== MCL_ONFAULT
)
757 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
758 lock_limit
>>= PAGE_SHIFT
;
760 if (mmap_write_lock_killable(current
->mm
))
764 if (!(flags
& MCL_CURRENT
) || (current
->mm
->total_vm
<= lock_limit
) ||
765 capable(CAP_IPC_LOCK
))
766 ret
= apply_mlockall_flags(flags
);
767 mmap_write_unlock(current
->mm
);
768 if (!ret
&& (flags
& MCL_CURRENT
))
769 mm_populate(0, TASK_SIZE
);
774 SYSCALL_DEFINE0(munlockall
)
778 if (mmap_write_lock_killable(current
->mm
))
780 ret
= apply_mlockall_flags(0);
781 mmap_write_unlock(current
->mm
);
786 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
787 * shm segments) get accounted against the user_struct instead.
789 static DEFINE_SPINLOCK(shmlock_user_lock
);
791 int user_shm_lock(size_t size
, struct ucounts
*ucounts
)
793 unsigned long lock_limit
, locked
;
797 locked
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
798 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
799 if (lock_limit
!= RLIM_INFINITY
)
800 lock_limit
>>= PAGE_SHIFT
;
801 spin_lock(&shmlock_user_lock
);
802 memlock
= inc_rlimit_ucounts(ucounts
, UCOUNT_RLIMIT_MEMLOCK
, locked
);
804 if ((memlock
== LONG_MAX
|| memlock
> lock_limit
) && !capable(CAP_IPC_LOCK
)) {
805 dec_rlimit_ucounts(ucounts
, UCOUNT_RLIMIT_MEMLOCK
, locked
);
808 if (!get_ucounts(ucounts
)) {
809 dec_rlimit_ucounts(ucounts
, UCOUNT_RLIMIT_MEMLOCK
, locked
);
815 spin_unlock(&shmlock_user_lock
);
819 void user_shm_unlock(size_t size
, struct ucounts
*ucounts
)
821 spin_lock(&shmlock_user_lock
);
822 dec_rlimit_ucounts(ucounts
, UCOUNT_RLIMIT_MEMLOCK
, (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
);
823 spin_unlock(&shmlock_user_lock
);
824 put_ucounts(ucounts
);