1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 1992, 1993 Krishna Balasubramanian
5 * Many improvements/fixes by Bruno Haible.
6 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
7 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
9 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
10 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
11 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
12 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
13 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
14 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
15 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
22 * Pavel Emelianov <xemul@openvz.org>
24 * Better ipc lock (kern_ipc_perm.lock) handling
25 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
28 #include <linux/slab.h>
30 #include <linux/hugetlb.h>
31 #include <linux/shm.h>
32 #include <linux/init.h>
33 #include <linux/file.h>
34 #include <linux/mman.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/security.h>
37 #include <linux/syscalls.h>
38 #include <linux/audit.h>
39 #include <linux/capability.h>
40 #include <linux/ptrace.h>
41 #include <linux/seq_file.h>
42 #include <linux/rwsem.h>
43 #include <linux/nsproxy.h>
44 #include <linux/mount.h>
45 #include <linux/ipc_namespace.h>
46 #include <linux/rhashtable.h>
48 #include <linux/uaccess.h>
52 struct shmid_kernel
/* private to the kernel */
54 struct kern_ipc_perm shm_perm
;
55 struct file
*shm_file
;
56 unsigned long shm_nattch
;
57 unsigned long shm_segsz
;
61 struct pid
*shm_cprid
;
62 struct pid
*shm_lprid
;
63 struct user_struct
*mlock_user
;
65 /* The task created the shm object. NULL if the task is dead. */
66 struct task_struct
*shm_creator
;
67 struct list_head shm_clist
; /* list by creator */
70 /* shm_mode upper byte flags */
71 #define SHM_DEST 01000 /* segment will be destroyed on last detach */
72 #define SHM_LOCKED 02000 /* segment will not be swapped */
74 struct shm_file_data
{
76 struct ipc_namespace
*ns
;
78 const struct vm_operations_struct
*vm_ops
;
81 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
83 static const struct file_operations shm_file_operations
;
84 static const struct vm_operations_struct shm_vm_ops
;
86 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
88 #define shm_unlock(shp) \
89 ipc_unlock(&(shp)->shm_perm)
91 static int newseg(struct ipc_namespace
*, struct ipc_params
*);
92 static void shm_open(struct vm_area_struct
*vma
);
93 static void shm_close(struct vm_area_struct
*vma
);
94 static void shm_destroy(struct ipc_namespace
*ns
, struct shmid_kernel
*shp
);
96 static int sysvipc_shm_proc_show(struct seq_file
*s
, void *it
);
99 void shm_init_ns(struct ipc_namespace
*ns
)
101 ns
->shm_ctlmax
= SHMMAX
;
102 ns
->shm_ctlall
= SHMALL
;
103 ns
->shm_ctlmni
= SHMMNI
;
104 ns
->shm_rmid_forced
= 0;
106 ipc_init_ids(&shm_ids(ns
));
110 * Called with shm_ids.rwsem (writer) and the shp structure locked.
111 * Only shm_ids.rwsem remains locked on exit.
113 static void do_shm_rmid(struct ipc_namespace
*ns
, struct kern_ipc_perm
*ipcp
)
115 struct shmid_kernel
*shp
;
117 shp
= container_of(ipcp
, struct shmid_kernel
, shm_perm
);
119 if (shp
->shm_nattch
) {
120 shp
->shm_perm
.mode
|= SHM_DEST
;
121 /* Do not find it any more */
122 ipc_set_key_private(&shm_ids(ns
), &shp
->shm_perm
);
125 shm_destroy(ns
, shp
);
129 void shm_exit_ns(struct ipc_namespace
*ns
)
131 free_ipcs(ns
, &shm_ids(ns
), do_shm_rmid
);
132 idr_destroy(&ns
->ids
[IPC_SHM_IDS
].ipcs_idr
);
133 rhashtable_destroy(&ns
->ids
[IPC_SHM_IDS
].key_ht
);
137 static int __init
ipc_ns_init(void)
139 shm_init_ns(&init_ipc_ns
);
143 pure_initcall(ipc_ns_init
);
145 void __init
shm_init(void)
147 ipc_init_proc_interface("sysvipc/shm",
148 #if BITS_PER_LONG <= 32
149 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
151 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
153 IPC_SHM_IDS
, sysvipc_shm_proc_show
);
156 static inline struct shmid_kernel
*shm_obtain_object(struct ipc_namespace
*ns
, int id
)
158 struct kern_ipc_perm
*ipcp
= ipc_obtain_object_idr(&shm_ids(ns
), id
);
161 return ERR_CAST(ipcp
);
163 return container_of(ipcp
, struct shmid_kernel
, shm_perm
);
166 static inline struct shmid_kernel
*shm_obtain_object_check(struct ipc_namespace
*ns
, int id
)
168 struct kern_ipc_perm
*ipcp
= ipc_obtain_object_check(&shm_ids(ns
), id
);
171 return ERR_CAST(ipcp
);
173 return container_of(ipcp
, struct shmid_kernel
, shm_perm
);
177 * shm_lock_(check_) routines are called in the paths where the rwsem
178 * is not necessarily held.
180 static inline struct shmid_kernel
*shm_lock(struct ipc_namespace
*ns
, int id
)
182 struct kern_ipc_perm
*ipcp
;
185 ipcp
= ipc_obtain_object_idr(&shm_ids(ns
), id
);
189 ipc_lock_object(ipcp
);
191 * ipc_rmid() may have already freed the ID while ipc_lock_object()
192 * was spinning: here verify that the structure is still valid.
193 * Upon races with RMID, return -EIDRM, thus indicating that
194 * the ID points to a removed identifier.
196 if (ipc_valid_object(ipcp
)) {
197 /* return a locked ipc object upon success */
198 return container_of(ipcp
, struct shmid_kernel
, shm_perm
);
201 ipc_unlock_object(ipcp
);
202 ipcp
= ERR_PTR(-EIDRM
);
206 * Callers of shm_lock() must validate the status of the returned ipc
207 * object pointer and error out as appropriate.
209 return ERR_CAST(ipcp
);
212 static inline void shm_lock_by_ptr(struct shmid_kernel
*ipcp
)
215 ipc_lock_object(&ipcp
->shm_perm
);
218 static void shm_rcu_free(struct rcu_head
*head
)
220 struct kern_ipc_perm
*ptr
= container_of(head
, struct kern_ipc_perm
,
222 struct shmid_kernel
*shp
= container_of(ptr
, struct shmid_kernel
,
224 security_shm_free(&shp
->shm_perm
);
228 static inline void shm_rmid(struct ipc_namespace
*ns
, struct shmid_kernel
*s
)
230 list_del(&s
->shm_clist
);
231 ipc_rmid(&shm_ids(ns
), &s
->shm_perm
);
235 static int __shm_open(struct vm_area_struct
*vma
)
237 struct file
*file
= vma
->vm_file
;
238 struct shm_file_data
*sfd
= shm_file_data(file
);
239 struct shmid_kernel
*shp
;
241 shp
= shm_lock(sfd
->ns
, sfd
->id
);
246 if (shp
->shm_file
!= sfd
->file
) {
252 shp
->shm_atim
= ktime_get_real_seconds();
253 ipc_update_pid(&shp
->shm_lprid
, task_tgid(current
));
259 /* This is called by fork, once for every shm attach. */
260 static void shm_open(struct vm_area_struct
*vma
)
262 int err
= __shm_open(vma
);
264 * We raced in the idr lookup or with shm_destroy().
265 * Either way, the ID is busted.
271 * shm_destroy - free the struct shmid_kernel
274 * @shp: struct to free
276 * It has to be called with shp and shm_ids.rwsem (writer) locked,
277 * but returns with shp unlocked and freed.
279 static void shm_destroy(struct ipc_namespace
*ns
, struct shmid_kernel
*shp
)
281 struct file
*shm_file
;
283 shm_file
= shp
->shm_file
;
284 shp
->shm_file
= NULL
;
285 ns
->shm_tot
-= (shp
->shm_segsz
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
288 if (!is_file_hugepages(shm_file
))
289 shmem_lock(shm_file
, 0, shp
->mlock_user
);
290 else if (shp
->mlock_user
)
291 user_shm_unlock(i_size_read(file_inode(shm_file
)),
294 ipc_update_pid(&shp
->shm_cprid
, NULL
);
295 ipc_update_pid(&shp
->shm_lprid
, NULL
);
296 ipc_rcu_putref(&shp
->shm_perm
, shm_rcu_free
);
300 * shm_may_destroy - identifies whether shm segment should be destroyed now
302 * Returns true if and only if there are no active users of the segment and
303 * one of the following is true:
305 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
307 * 2) sysctl kernel.shm_rmid_forced is set to 1.
309 static bool shm_may_destroy(struct ipc_namespace
*ns
, struct shmid_kernel
*shp
)
311 return (shp
->shm_nattch
== 0) &&
312 (ns
->shm_rmid_forced
||
313 (shp
->shm_perm
.mode
& SHM_DEST
));
317 * remove the attach descriptor vma.
318 * free memory for segment if it is marked destroyed.
319 * The descriptor has already been removed from the current->mm->mmap list
320 * and will later be kfree()d.
322 static void shm_close(struct vm_area_struct
*vma
)
324 struct file
*file
= vma
->vm_file
;
325 struct shm_file_data
*sfd
= shm_file_data(file
);
326 struct shmid_kernel
*shp
;
327 struct ipc_namespace
*ns
= sfd
->ns
;
329 down_write(&shm_ids(ns
).rwsem
);
330 /* remove from the list of attaches of the shm segment */
331 shp
= shm_lock(ns
, sfd
->id
);
334 * We raced in the idr lookup or with shm_destroy().
335 * Either way, the ID is busted.
337 if (WARN_ON_ONCE(IS_ERR(shp
)))
338 goto done
; /* no-op */
340 ipc_update_pid(&shp
->shm_lprid
, task_tgid(current
));
341 shp
->shm_dtim
= ktime_get_real_seconds();
343 if (shm_may_destroy(ns
, shp
))
344 shm_destroy(ns
, shp
);
348 up_write(&shm_ids(ns
).rwsem
);
351 /* Called with ns->shm_ids(ns).rwsem locked */
352 static int shm_try_destroy_orphaned(int id
, void *p
, void *data
)
354 struct ipc_namespace
*ns
= data
;
355 struct kern_ipc_perm
*ipcp
= p
;
356 struct shmid_kernel
*shp
= container_of(ipcp
, struct shmid_kernel
, shm_perm
);
359 * We want to destroy segments without users and with already
360 * exit'ed originating process.
362 * As shp->* are changed under rwsem, it's safe to skip shp locking.
364 if (shp
->shm_creator
!= NULL
)
367 if (shm_may_destroy(ns
, shp
)) {
368 shm_lock_by_ptr(shp
);
369 shm_destroy(ns
, shp
);
374 void shm_destroy_orphaned(struct ipc_namespace
*ns
)
376 down_write(&shm_ids(ns
).rwsem
);
377 if (shm_ids(ns
).in_use
)
378 idr_for_each(&shm_ids(ns
).ipcs_idr
, &shm_try_destroy_orphaned
, ns
);
379 up_write(&shm_ids(ns
).rwsem
);
382 /* Locking assumes this will only be called with task == current */
383 void exit_shm(struct task_struct
*task
)
385 struct ipc_namespace
*ns
= task
->nsproxy
->ipc_ns
;
386 struct shmid_kernel
*shp
, *n
;
388 if (list_empty(&task
->sysvshm
.shm_clist
))
392 * If kernel.shm_rmid_forced is not set then only keep track of
393 * which shmids are orphaned, so that a later set of the sysctl
396 if (!ns
->shm_rmid_forced
) {
397 down_read(&shm_ids(ns
).rwsem
);
398 list_for_each_entry(shp
, &task
->sysvshm
.shm_clist
, shm_clist
)
399 shp
->shm_creator
= NULL
;
401 * Only under read lock but we are only called on current
402 * so no entry on the list will be shared.
404 list_del(&task
->sysvshm
.shm_clist
);
405 up_read(&shm_ids(ns
).rwsem
);
410 * Destroy all already created segments, that were not yet mapped,
411 * and mark any mapped as orphan to cover the sysctl toggling.
412 * Destroy is skipped if shm_may_destroy() returns false.
414 down_write(&shm_ids(ns
).rwsem
);
415 list_for_each_entry_safe(shp
, n
, &task
->sysvshm
.shm_clist
, shm_clist
) {
416 shp
->shm_creator
= NULL
;
418 if (shm_may_destroy(ns
, shp
)) {
419 shm_lock_by_ptr(shp
);
420 shm_destroy(ns
, shp
);
424 /* Remove the list head from any segments still attached. */
425 list_del(&task
->sysvshm
.shm_clist
);
426 up_write(&shm_ids(ns
).rwsem
);
429 static vm_fault_t
shm_fault(struct vm_fault
*vmf
)
431 struct file
*file
= vmf
->vma
->vm_file
;
432 struct shm_file_data
*sfd
= shm_file_data(file
);
434 return sfd
->vm_ops
->fault(vmf
);
437 static int shm_may_split(struct vm_area_struct
*vma
, unsigned long addr
)
439 struct file
*file
= vma
->vm_file
;
440 struct shm_file_data
*sfd
= shm_file_data(file
);
442 if (sfd
->vm_ops
->may_split
)
443 return sfd
->vm_ops
->may_split(vma
, addr
);
448 static unsigned long shm_pagesize(struct vm_area_struct
*vma
)
450 struct file
*file
= vma
->vm_file
;
451 struct shm_file_data
*sfd
= shm_file_data(file
);
453 if (sfd
->vm_ops
->pagesize
)
454 return sfd
->vm_ops
->pagesize(vma
);
460 static int shm_set_policy(struct vm_area_struct
*vma
, struct mempolicy
*new)
462 struct file
*file
= vma
->vm_file
;
463 struct shm_file_data
*sfd
= shm_file_data(file
);
466 if (sfd
->vm_ops
->set_policy
)
467 err
= sfd
->vm_ops
->set_policy(vma
, new);
471 static struct mempolicy
*shm_get_policy(struct vm_area_struct
*vma
,
474 struct file
*file
= vma
->vm_file
;
475 struct shm_file_data
*sfd
= shm_file_data(file
);
476 struct mempolicy
*pol
= NULL
;
478 if (sfd
->vm_ops
->get_policy
)
479 pol
= sfd
->vm_ops
->get_policy(vma
, addr
);
480 else if (vma
->vm_policy
)
481 pol
= vma
->vm_policy
;
487 static int shm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
489 struct shm_file_data
*sfd
= shm_file_data(file
);
493 * In case of remap_file_pages() emulation, the file can represent an
494 * IPC ID that was removed, and possibly even reused by another shm
495 * segment already. Propagate this case as an error to caller.
497 ret
= __shm_open(vma
);
501 ret
= call_mmap(sfd
->file
, vma
);
506 sfd
->vm_ops
= vma
->vm_ops
;
508 WARN_ON(!sfd
->vm_ops
->fault
);
510 vma
->vm_ops
= &shm_vm_ops
;
514 static int shm_release(struct inode
*ino
, struct file
*file
)
516 struct shm_file_data
*sfd
= shm_file_data(file
);
520 shm_file_data(file
) = NULL
;
525 static int shm_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
527 struct shm_file_data
*sfd
= shm_file_data(file
);
529 if (!sfd
->file
->f_op
->fsync
)
531 return sfd
->file
->f_op
->fsync(sfd
->file
, start
, end
, datasync
);
534 static long shm_fallocate(struct file
*file
, int mode
, loff_t offset
,
537 struct shm_file_data
*sfd
= shm_file_data(file
);
539 if (!sfd
->file
->f_op
->fallocate
)
541 return sfd
->file
->f_op
->fallocate(file
, mode
, offset
, len
);
544 static unsigned long shm_get_unmapped_area(struct file
*file
,
545 unsigned long addr
, unsigned long len
, unsigned long pgoff
,
548 struct shm_file_data
*sfd
= shm_file_data(file
);
550 return sfd
->file
->f_op
->get_unmapped_area(sfd
->file
, addr
, len
,
554 static const struct file_operations shm_file_operations
= {
557 .release
= shm_release
,
558 .get_unmapped_area
= shm_get_unmapped_area
,
559 .llseek
= noop_llseek
,
560 .fallocate
= shm_fallocate
,
564 * shm_file_operations_huge is now identical to shm_file_operations,
565 * but we keep it distinct for the sake of is_file_shm_hugepages().
567 static const struct file_operations shm_file_operations_huge
= {
570 .release
= shm_release
,
571 .get_unmapped_area
= shm_get_unmapped_area
,
572 .llseek
= noop_llseek
,
573 .fallocate
= shm_fallocate
,
576 bool is_file_shm_hugepages(struct file
*file
)
578 return file
->f_op
== &shm_file_operations_huge
;
581 static const struct vm_operations_struct shm_vm_ops
= {
582 .open
= shm_open
, /* callback for a new vm-area open */
583 .close
= shm_close
, /* callback for when the vm-area is released */
585 .may_split
= shm_may_split
,
586 .pagesize
= shm_pagesize
,
587 #if defined(CONFIG_NUMA)
588 .set_policy
= shm_set_policy
,
589 .get_policy
= shm_get_policy
,
594 * newseg - Create a new shared memory segment
596 * @params: ptr to the structure that contains key, size and shmflg
598 * Called with shm_ids.rwsem held as a writer.
600 static int newseg(struct ipc_namespace
*ns
, struct ipc_params
*params
)
602 key_t key
= params
->key
;
603 int shmflg
= params
->flg
;
604 size_t size
= params
->u
.size
;
606 struct shmid_kernel
*shp
;
607 size_t numpages
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
610 vm_flags_t acctflag
= 0;
612 if (size
< SHMMIN
|| size
> ns
->shm_ctlmax
)
615 if (numpages
<< PAGE_SHIFT
< size
)
618 if (ns
->shm_tot
+ numpages
< ns
->shm_tot
||
619 ns
->shm_tot
+ numpages
> ns
->shm_ctlall
)
622 shp
= kvmalloc(sizeof(*shp
), GFP_KERNEL
);
626 shp
->shm_perm
.key
= key
;
627 shp
->shm_perm
.mode
= (shmflg
& S_IRWXUGO
);
628 shp
->mlock_user
= NULL
;
630 shp
->shm_perm
.security
= NULL
;
631 error
= security_shm_alloc(&shp
->shm_perm
);
637 sprintf(name
, "SYSV%08x", key
);
638 if (shmflg
& SHM_HUGETLB
) {
642 hs
= hstate_sizelog((shmflg
>> SHM_HUGE_SHIFT
) & SHM_HUGE_MASK
);
647 hugesize
= ALIGN(size
, huge_page_size(hs
));
649 /* hugetlb_file_setup applies strict accounting */
650 if (shmflg
& SHM_NORESERVE
)
651 acctflag
= VM_NORESERVE
;
652 file
= hugetlb_file_setup(name
, hugesize
, acctflag
,
653 &shp
->mlock_user
, HUGETLB_SHMFS_INODE
,
654 (shmflg
>> SHM_HUGE_SHIFT
) & SHM_HUGE_MASK
);
657 * Do not allow no accounting for OVERCOMMIT_NEVER, even
660 if ((shmflg
& SHM_NORESERVE
) &&
661 sysctl_overcommit_memory
!= OVERCOMMIT_NEVER
)
662 acctflag
= VM_NORESERVE
;
663 file
= shmem_kernel_file_setup(name
, size
, acctflag
);
665 error
= PTR_ERR(file
);
669 shp
->shm_cprid
= get_pid(task_tgid(current
));
670 shp
->shm_lprid
= NULL
;
671 shp
->shm_atim
= shp
->shm_dtim
= 0;
672 shp
->shm_ctim
= ktime_get_real_seconds();
673 shp
->shm_segsz
= size
;
675 shp
->shm_file
= file
;
676 shp
->shm_creator
= current
;
678 /* ipc_addid() locks shp upon success. */
679 error
= ipc_addid(&shm_ids(ns
), &shp
->shm_perm
, ns
->shm_ctlmni
);
683 list_add(&shp
->shm_clist
, ¤t
->sysvshm
.shm_clist
);
686 * shmid gets reported as "inode#" in /proc/pid/maps.
687 * proc-ps tools use this. Changing this will break them.
689 file_inode(file
)->i_ino
= shp
->shm_perm
.id
;
691 ns
->shm_tot
+= numpages
;
692 error
= shp
->shm_perm
.id
;
694 ipc_unlock_object(&shp
->shm_perm
);
699 ipc_update_pid(&shp
->shm_cprid
, NULL
);
700 ipc_update_pid(&shp
->shm_lprid
, NULL
);
701 if (is_file_hugepages(file
) && shp
->mlock_user
)
702 user_shm_unlock(size
, shp
->mlock_user
);
704 ipc_rcu_putref(&shp
->shm_perm
, shm_rcu_free
);
707 call_rcu(&shp
->shm_perm
.rcu
, shm_rcu_free
);
712 * Called with shm_ids.rwsem and ipcp locked.
714 static int shm_more_checks(struct kern_ipc_perm
*ipcp
, struct ipc_params
*params
)
716 struct shmid_kernel
*shp
;
718 shp
= container_of(ipcp
, struct shmid_kernel
, shm_perm
);
719 if (shp
->shm_segsz
< params
->u
.size
)
725 long ksys_shmget(key_t key
, size_t size
, int shmflg
)
727 struct ipc_namespace
*ns
;
728 static const struct ipc_ops shm_ops
= {
730 .associate
= security_shm_associate
,
731 .more_checks
= shm_more_checks
,
733 struct ipc_params shm_params
;
735 ns
= current
->nsproxy
->ipc_ns
;
737 shm_params
.key
= key
;
738 shm_params
.flg
= shmflg
;
739 shm_params
.u
.size
= size
;
741 return ipcget(ns
, &shm_ids(ns
), &shm_ops
, &shm_params
);
744 SYSCALL_DEFINE3(shmget
, key_t
, key
, size_t, size
, int, shmflg
)
746 return ksys_shmget(key
, size
, shmflg
);
749 static inline unsigned long copy_shmid_to_user(void __user
*buf
, struct shmid64_ds
*in
, int version
)
753 return copy_to_user(buf
, in
, sizeof(*in
));
758 memset(&out
, 0, sizeof(out
));
759 ipc64_perm_to_ipc_perm(&in
->shm_perm
, &out
.shm_perm
);
760 out
.shm_segsz
= in
->shm_segsz
;
761 out
.shm_atime
= in
->shm_atime
;
762 out
.shm_dtime
= in
->shm_dtime
;
763 out
.shm_ctime
= in
->shm_ctime
;
764 out
.shm_cpid
= in
->shm_cpid
;
765 out
.shm_lpid
= in
->shm_lpid
;
766 out
.shm_nattch
= in
->shm_nattch
;
768 return copy_to_user(buf
, &out
, sizeof(out
));
775 static inline unsigned long
776 copy_shmid_from_user(struct shmid64_ds
*out
, void __user
*buf
, int version
)
780 if (copy_from_user(out
, buf
, sizeof(*out
)))
785 struct shmid_ds tbuf_old
;
787 if (copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
790 out
->shm_perm
.uid
= tbuf_old
.shm_perm
.uid
;
791 out
->shm_perm
.gid
= tbuf_old
.shm_perm
.gid
;
792 out
->shm_perm
.mode
= tbuf_old
.shm_perm
.mode
;
801 static inline unsigned long copy_shminfo_to_user(void __user
*buf
, struct shminfo64
*in
, int version
)
805 return copy_to_user(buf
, in
, sizeof(*in
));
810 if (in
->shmmax
> INT_MAX
)
811 out
.shmmax
= INT_MAX
;
813 out
.shmmax
= (int)in
->shmmax
;
815 out
.shmmin
= in
->shmmin
;
816 out
.shmmni
= in
->shmmni
;
817 out
.shmseg
= in
->shmseg
;
818 out
.shmall
= in
->shmall
;
820 return copy_to_user(buf
, &out
, sizeof(out
));
828 * Calculate and add used RSS and swap pages of a shm.
829 * Called with shm_ids.rwsem held as a reader
831 static void shm_add_rss_swap(struct shmid_kernel
*shp
,
832 unsigned long *rss_add
, unsigned long *swp_add
)
836 inode
= file_inode(shp
->shm_file
);
838 if (is_file_hugepages(shp
->shm_file
)) {
839 struct address_space
*mapping
= inode
->i_mapping
;
840 struct hstate
*h
= hstate_file(shp
->shm_file
);
841 *rss_add
+= pages_per_huge_page(h
) * mapping
->nrpages
;
844 struct shmem_inode_info
*info
= SHMEM_I(inode
);
846 spin_lock_irq(&info
->lock
);
847 *rss_add
+= inode
->i_mapping
->nrpages
;
848 *swp_add
+= info
->swapped
;
849 spin_unlock_irq(&info
->lock
);
851 *rss_add
+= inode
->i_mapping
->nrpages
;
857 * Called with shm_ids.rwsem held as a reader
859 static void shm_get_stat(struct ipc_namespace
*ns
, unsigned long *rss
,
868 in_use
= shm_ids(ns
).in_use
;
870 for (total
= 0, next_id
= 0; total
< in_use
; next_id
++) {
871 struct kern_ipc_perm
*ipc
;
872 struct shmid_kernel
*shp
;
874 ipc
= idr_find(&shm_ids(ns
).ipcs_idr
, next_id
);
877 shp
= container_of(ipc
, struct shmid_kernel
, shm_perm
);
879 shm_add_rss_swap(shp
, rss
, swp
);
886 * This function handles some shmctl commands which require the rwsem
887 * to be held in write mode.
888 * NOTE: no locks must be held, the rwsem is taken inside this function.
890 static int shmctl_down(struct ipc_namespace
*ns
, int shmid
, int cmd
,
891 struct shmid64_ds
*shmid64
)
893 struct kern_ipc_perm
*ipcp
;
894 struct shmid_kernel
*shp
;
897 down_write(&shm_ids(ns
).rwsem
);
900 ipcp
= ipcctl_obtain_check(ns
, &shm_ids(ns
), shmid
, cmd
,
901 &shmid64
->shm_perm
, 0);
907 shp
= container_of(ipcp
, struct shmid_kernel
, shm_perm
);
909 err
= security_shm_shmctl(&shp
->shm_perm
, cmd
);
915 ipc_lock_object(&shp
->shm_perm
);
916 /* do_shm_rmid unlocks the ipc object and rcu */
917 do_shm_rmid(ns
, ipcp
);
920 ipc_lock_object(&shp
->shm_perm
);
921 err
= ipc_update_perm(&shmid64
->shm_perm
, ipcp
);
924 shp
->shm_ctim
= ktime_get_real_seconds();
932 ipc_unlock_object(&shp
->shm_perm
);
936 up_write(&shm_ids(ns
).rwsem
);
940 static int shmctl_ipc_info(struct ipc_namespace
*ns
,
941 struct shminfo64
*shminfo
)
943 int err
= security_shm_shmctl(NULL
, IPC_INFO
);
945 memset(shminfo
, 0, sizeof(*shminfo
));
946 shminfo
->shmmni
= shminfo
->shmseg
= ns
->shm_ctlmni
;
947 shminfo
->shmmax
= ns
->shm_ctlmax
;
948 shminfo
->shmall
= ns
->shm_ctlall
;
949 shminfo
->shmmin
= SHMMIN
;
950 down_read(&shm_ids(ns
).rwsem
);
951 err
= ipc_get_maxidx(&shm_ids(ns
));
952 up_read(&shm_ids(ns
).rwsem
);
959 static int shmctl_shm_info(struct ipc_namespace
*ns
,
960 struct shm_info
*shm_info
)
962 int err
= security_shm_shmctl(NULL
, SHM_INFO
);
964 memset(shm_info
, 0, sizeof(*shm_info
));
965 down_read(&shm_ids(ns
).rwsem
);
966 shm_info
->used_ids
= shm_ids(ns
).in_use
;
967 shm_get_stat(ns
, &shm_info
->shm_rss
, &shm_info
->shm_swp
);
968 shm_info
->shm_tot
= ns
->shm_tot
;
969 shm_info
->swap_attempts
= 0;
970 shm_info
->swap_successes
= 0;
971 err
= ipc_get_maxidx(&shm_ids(ns
));
972 up_read(&shm_ids(ns
).rwsem
);
979 static int shmctl_stat(struct ipc_namespace
*ns
, int shmid
,
980 int cmd
, struct shmid64_ds
*tbuf
)
982 struct shmid_kernel
*shp
;
985 memset(tbuf
, 0, sizeof(*tbuf
));
988 if (cmd
== SHM_STAT
|| cmd
== SHM_STAT_ANY
) {
989 shp
= shm_obtain_object(ns
, shmid
);
994 } else { /* IPC_STAT */
995 shp
= shm_obtain_object_check(ns
, shmid
);
1003 * Semantically SHM_STAT_ANY ought to be identical to
1004 * that functionality provided by the /proc/sysvipc/
1005 * interface. As such, only audit these calls and
1006 * do not do traditional S_IRUGO permission checks on
1009 if (cmd
== SHM_STAT_ANY
)
1010 audit_ipc_obj(&shp
->shm_perm
);
1013 if (ipcperms(ns
, &shp
->shm_perm
, S_IRUGO
))
1017 err
= security_shm_shmctl(&shp
->shm_perm
, cmd
);
1021 ipc_lock_object(&shp
->shm_perm
);
1023 if (!ipc_valid_object(&shp
->shm_perm
)) {
1024 ipc_unlock_object(&shp
->shm_perm
);
1029 kernel_to_ipc64_perm(&shp
->shm_perm
, &tbuf
->shm_perm
);
1030 tbuf
->shm_segsz
= shp
->shm_segsz
;
1031 tbuf
->shm_atime
= shp
->shm_atim
;
1032 tbuf
->shm_dtime
= shp
->shm_dtim
;
1033 tbuf
->shm_ctime
= shp
->shm_ctim
;
1034 #ifndef CONFIG_64BIT
1035 tbuf
->shm_atime_high
= shp
->shm_atim
>> 32;
1036 tbuf
->shm_dtime_high
= shp
->shm_dtim
>> 32;
1037 tbuf
->shm_ctime_high
= shp
->shm_ctim
>> 32;
1039 tbuf
->shm_cpid
= pid_vnr(shp
->shm_cprid
);
1040 tbuf
->shm_lpid
= pid_vnr(shp
->shm_lprid
);
1041 tbuf
->shm_nattch
= shp
->shm_nattch
;
1043 if (cmd
== IPC_STAT
) {
1045 * As defined in SUS:
1046 * Return 0 on success
1051 * SHM_STAT and SHM_STAT_ANY (both Linux specific)
1052 * Return the full id, including the sequence number
1054 err
= shp
->shm_perm
.id
;
1057 ipc_unlock_object(&shp
->shm_perm
);
1063 static int shmctl_do_lock(struct ipc_namespace
*ns
, int shmid
, int cmd
)
1065 struct shmid_kernel
*shp
;
1066 struct file
*shm_file
;
1070 shp
= shm_obtain_object_check(ns
, shmid
);
1076 audit_ipc_obj(&(shp
->shm_perm
));
1077 err
= security_shm_shmctl(&shp
->shm_perm
, cmd
);
1081 ipc_lock_object(&shp
->shm_perm
);
1083 /* check if shm_destroy() is tearing down shp */
1084 if (!ipc_valid_object(&shp
->shm_perm
)) {
1089 if (!ns_capable(ns
->user_ns
, CAP_IPC_LOCK
)) {
1090 kuid_t euid
= current_euid();
1092 if (!uid_eq(euid
, shp
->shm_perm
.uid
) &&
1093 !uid_eq(euid
, shp
->shm_perm
.cuid
)) {
1097 if (cmd
== SHM_LOCK
&& !rlimit(RLIMIT_MEMLOCK
)) {
1103 shm_file
= shp
->shm_file
;
1104 if (is_file_hugepages(shm_file
))
1107 if (cmd
== SHM_LOCK
) {
1108 struct user_struct
*user
= current_user();
1110 err
= shmem_lock(shm_file
, 1, user
);
1111 if (!err
&& !(shp
->shm_perm
.mode
& SHM_LOCKED
)) {
1112 shp
->shm_perm
.mode
|= SHM_LOCKED
;
1113 shp
->mlock_user
= user
;
1119 if (!(shp
->shm_perm
.mode
& SHM_LOCKED
))
1121 shmem_lock(shm_file
, 0, shp
->mlock_user
);
1122 shp
->shm_perm
.mode
&= ~SHM_LOCKED
;
1123 shp
->mlock_user
= NULL
;
1125 ipc_unlock_object(&shp
->shm_perm
);
1127 shmem_unlock_mapping(shm_file
->f_mapping
);
1133 ipc_unlock_object(&shp
->shm_perm
);
1139 static long ksys_shmctl(int shmid
, int cmd
, struct shmid_ds __user
*buf
, int version
)
1142 struct ipc_namespace
*ns
;
1143 struct shmid64_ds sem64
;
1145 if (cmd
< 0 || shmid
< 0)
1148 ns
= current
->nsproxy
->ipc_ns
;
1152 struct shminfo64 shminfo
;
1153 err
= shmctl_ipc_info(ns
, &shminfo
);
1156 if (copy_shminfo_to_user(buf
, &shminfo
, version
))
1161 struct shm_info shm_info
;
1162 err
= shmctl_shm_info(ns
, &shm_info
);
1165 if (copy_to_user(buf
, &shm_info
, sizeof(shm_info
)))
1172 err
= shmctl_stat(ns
, shmid
, cmd
, &sem64
);
1175 if (copy_shmid_to_user(buf
, &sem64
, version
))
1180 if (copy_shmid_from_user(&sem64
, buf
, version
))
1184 return shmctl_down(ns
, shmid
, cmd
, &sem64
);
1187 return shmctl_do_lock(ns
, shmid
, cmd
);
1193 SYSCALL_DEFINE3(shmctl
, int, shmid
, int, cmd
, struct shmid_ds __user
*, buf
)
1195 return ksys_shmctl(shmid
, cmd
, buf
, IPC_64
);
1198 #ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1199 long ksys_old_shmctl(int shmid
, int cmd
, struct shmid_ds __user
*buf
)
1201 int version
= ipc_parse_version(&cmd
);
1203 return ksys_shmctl(shmid
, cmd
, buf
, version
);
1206 SYSCALL_DEFINE3(old_shmctl
, int, shmid
, int, cmd
, struct shmid_ds __user
*, buf
)
1208 return ksys_old_shmctl(shmid
, cmd
, buf
);
1212 #ifdef CONFIG_COMPAT
1214 struct compat_shmid_ds
{
1215 struct compat_ipc_perm shm_perm
;
1217 old_time32_t shm_atime
;
1218 old_time32_t shm_dtime
;
1219 old_time32_t shm_ctime
;
1220 compat_ipc_pid_t shm_cpid
;
1221 compat_ipc_pid_t shm_lpid
;
1222 unsigned short shm_nattch
;
1223 unsigned short shm_unused
;
1224 compat_uptr_t shm_unused2
;
1225 compat_uptr_t shm_unused3
;
1228 struct compat_shminfo64
{
1229 compat_ulong_t shmmax
;
1230 compat_ulong_t shmmin
;
1231 compat_ulong_t shmmni
;
1232 compat_ulong_t shmseg
;
1233 compat_ulong_t shmall
;
1234 compat_ulong_t __unused1
;
1235 compat_ulong_t __unused2
;
1236 compat_ulong_t __unused3
;
1237 compat_ulong_t __unused4
;
1240 struct compat_shm_info
{
1241 compat_int_t used_ids
;
1242 compat_ulong_t shm_tot
, shm_rss
, shm_swp
;
1243 compat_ulong_t swap_attempts
, swap_successes
;
1246 static int copy_compat_shminfo_to_user(void __user
*buf
, struct shminfo64
*in
,
1249 if (in
->shmmax
> INT_MAX
)
1250 in
->shmmax
= INT_MAX
;
1251 if (version
== IPC_64
) {
1252 struct compat_shminfo64 info
;
1253 memset(&info
, 0, sizeof(info
));
1254 info
.shmmax
= in
->shmmax
;
1255 info
.shmmin
= in
->shmmin
;
1256 info
.shmmni
= in
->shmmni
;
1257 info
.shmseg
= in
->shmseg
;
1258 info
.shmall
= in
->shmall
;
1259 return copy_to_user(buf
, &info
, sizeof(info
));
1261 struct shminfo info
;
1262 memset(&info
, 0, sizeof(info
));
1263 info
.shmmax
= in
->shmmax
;
1264 info
.shmmin
= in
->shmmin
;
1265 info
.shmmni
= in
->shmmni
;
1266 info
.shmseg
= in
->shmseg
;
1267 info
.shmall
= in
->shmall
;
1268 return copy_to_user(buf
, &info
, sizeof(info
));
1272 static int put_compat_shm_info(struct shm_info
*ip
,
1273 struct compat_shm_info __user
*uip
)
1275 struct compat_shm_info info
;
1277 memset(&info
, 0, sizeof(info
));
1278 info
.used_ids
= ip
->used_ids
;
1279 info
.shm_tot
= ip
->shm_tot
;
1280 info
.shm_rss
= ip
->shm_rss
;
1281 info
.shm_swp
= ip
->shm_swp
;
1282 info
.swap_attempts
= ip
->swap_attempts
;
1283 info
.swap_successes
= ip
->swap_successes
;
1284 return copy_to_user(uip
, &info
, sizeof(info
));
1287 static int copy_compat_shmid_to_user(void __user
*buf
, struct shmid64_ds
*in
,
1290 if (version
== IPC_64
) {
1291 struct compat_shmid64_ds v
;
1292 memset(&v
, 0, sizeof(v
));
1293 to_compat_ipc64_perm(&v
.shm_perm
, &in
->shm_perm
);
1294 v
.shm_atime
= lower_32_bits(in
->shm_atime
);
1295 v
.shm_atime_high
= upper_32_bits(in
->shm_atime
);
1296 v
.shm_dtime
= lower_32_bits(in
->shm_dtime
);
1297 v
.shm_dtime_high
= upper_32_bits(in
->shm_dtime
);
1298 v
.shm_ctime
= lower_32_bits(in
->shm_ctime
);
1299 v
.shm_ctime_high
= upper_32_bits(in
->shm_ctime
);
1300 v
.shm_segsz
= in
->shm_segsz
;
1301 v
.shm_nattch
= in
->shm_nattch
;
1302 v
.shm_cpid
= in
->shm_cpid
;
1303 v
.shm_lpid
= in
->shm_lpid
;
1304 return copy_to_user(buf
, &v
, sizeof(v
));
1306 struct compat_shmid_ds v
;
1307 memset(&v
, 0, sizeof(v
));
1308 to_compat_ipc_perm(&v
.shm_perm
, &in
->shm_perm
);
1309 v
.shm_perm
.key
= in
->shm_perm
.key
;
1310 v
.shm_atime
= in
->shm_atime
;
1311 v
.shm_dtime
= in
->shm_dtime
;
1312 v
.shm_ctime
= in
->shm_ctime
;
1313 v
.shm_segsz
= in
->shm_segsz
;
1314 v
.shm_nattch
= in
->shm_nattch
;
1315 v
.shm_cpid
= in
->shm_cpid
;
1316 v
.shm_lpid
= in
->shm_lpid
;
1317 return copy_to_user(buf
, &v
, sizeof(v
));
1321 static int copy_compat_shmid_from_user(struct shmid64_ds
*out
, void __user
*buf
,
1324 memset(out
, 0, sizeof(*out
));
1325 if (version
== IPC_64
) {
1326 struct compat_shmid64_ds __user
*p
= buf
;
1327 return get_compat_ipc64_perm(&out
->shm_perm
, &p
->shm_perm
);
1329 struct compat_shmid_ds __user
*p
= buf
;
1330 return get_compat_ipc_perm(&out
->shm_perm
, &p
->shm_perm
);
1334 static long compat_ksys_shmctl(int shmid
, int cmd
, void __user
*uptr
, int version
)
1336 struct ipc_namespace
*ns
;
1337 struct shmid64_ds sem64
;
1340 ns
= current
->nsproxy
->ipc_ns
;
1342 if (cmd
< 0 || shmid
< 0)
1347 struct shminfo64 shminfo
;
1348 err
= shmctl_ipc_info(ns
, &shminfo
);
1351 if (copy_compat_shminfo_to_user(uptr
, &shminfo
, version
))
1356 struct shm_info shm_info
;
1357 err
= shmctl_shm_info(ns
, &shm_info
);
1360 if (put_compat_shm_info(&shm_info
, uptr
))
1367 err
= shmctl_stat(ns
, shmid
, cmd
, &sem64
);
1370 if (copy_compat_shmid_to_user(uptr
, &sem64
, version
))
1375 if (copy_compat_shmid_from_user(&sem64
, uptr
, version
))
1379 return shmctl_down(ns
, shmid
, cmd
, &sem64
);
1382 return shmctl_do_lock(ns
, shmid
, cmd
);
1389 COMPAT_SYSCALL_DEFINE3(shmctl
, int, shmid
, int, cmd
, void __user
*, uptr
)
1391 return compat_ksys_shmctl(shmid
, cmd
, uptr
, IPC_64
);
1394 #ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1395 long compat_ksys_old_shmctl(int shmid
, int cmd
, void __user
*uptr
)
1397 int version
= compat_ipc_parse_version(&cmd
);
1399 return compat_ksys_shmctl(shmid
, cmd
, uptr
, version
);
1402 COMPAT_SYSCALL_DEFINE3(old_shmctl
, int, shmid
, int, cmd
, void __user
*, uptr
)
1404 return compat_ksys_old_shmctl(shmid
, cmd
, uptr
);
1410 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1412 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1413 * "raddr" thing points to kernel space, and there has to be a wrapper around
1416 long do_shmat(int shmid
, char __user
*shmaddr
, int shmflg
,
1417 ulong
*raddr
, unsigned long shmlba
)
1419 struct shmid_kernel
*shp
;
1420 unsigned long addr
= (unsigned long)shmaddr
;
1422 struct file
*file
, *base
;
1424 unsigned long flags
= MAP_SHARED
;
1427 struct ipc_namespace
*ns
;
1428 struct shm_file_data
*sfd
;
1430 unsigned long populate
= 0;
1437 if (addr
& (shmlba
- 1)) {
1438 if (shmflg
& SHM_RND
) {
1439 addr
&= ~(shmlba
- 1); /* round down */
1442 * Ensure that the round-down is non-nil
1443 * when remapping. This can happen for
1444 * cases when addr < shmlba.
1446 if (!addr
&& (shmflg
& SHM_REMAP
))
1449 #ifndef __ARCH_FORCE_SHMLBA
1450 if (addr
& ~PAGE_MASK
)
1456 } else if ((shmflg
& SHM_REMAP
))
1459 if (shmflg
& SHM_RDONLY
) {
1464 prot
= PROT_READ
| PROT_WRITE
;
1465 acc_mode
= S_IRUGO
| S_IWUGO
;
1468 if (shmflg
& SHM_EXEC
) {
1470 acc_mode
|= S_IXUGO
;
1474 * We cannot rely on the fs check since SYSV IPC does have an
1475 * additional creator id...
1477 ns
= current
->nsproxy
->ipc_ns
;
1479 shp
= shm_obtain_object_check(ns
, shmid
);
1486 if (ipcperms(ns
, &shp
->shm_perm
, acc_mode
))
1489 err
= security_shm_shmat(&shp
->shm_perm
, shmaddr
, shmflg
);
1493 ipc_lock_object(&shp
->shm_perm
);
1495 /* check if shm_destroy() is tearing down shp */
1496 if (!ipc_valid_object(&shp
->shm_perm
)) {
1497 ipc_unlock_object(&shp
->shm_perm
);
1503 * We need to take a reference to the real shm file to prevent the
1504 * pointer from becoming stale in cases where the lifetime of the outer
1505 * file extends beyond that of the shm segment. It's not usually
1506 * possible, but it can happen during remap_file_pages() emulation as
1507 * that unmaps the memory, then does ->mmap() via file reference only.
1508 * We'll deny the ->mmap() if the shm segment was since removed, but to
1509 * detect shm ID reuse we need to compare the file pointers.
1511 base
= get_file(shp
->shm_file
);
1513 size
= i_size_read(file_inode(base
));
1514 ipc_unlock_object(&shp
->shm_perm
);
1518 sfd
= kzalloc(sizeof(*sfd
), GFP_KERNEL
);
1524 file
= alloc_file_clone(base
, f_flags
,
1525 is_file_hugepages(base
) ?
1526 &shm_file_operations_huge
:
1527 &shm_file_operations
);
1528 err
= PTR_ERR(file
);
1535 sfd
->id
= shp
->shm_perm
.id
;
1536 sfd
->ns
= get_ipc_ns(ns
);
1539 file
->private_data
= sfd
;
1541 err
= security_mmap_file(file
, prot
, flags
);
1545 if (mmap_write_lock_killable(current
->mm
)) {
1550 if (addr
&& !(shmflg
& SHM_REMAP
)) {
1552 if (addr
+ size
< addr
)
1555 if (find_vma_intersection(current
->mm
, addr
, addr
+ size
))
1559 addr
= do_mmap(file
, addr
, size
, prot
, flags
, 0, &populate
, NULL
);
1562 if (IS_ERR_VALUE(addr
))
1565 mmap_write_unlock(current
->mm
);
1567 mm_populate(addr
, populate
);
1573 down_write(&shm_ids(ns
).rwsem
);
1574 shp
= shm_lock(ns
, shmid
);
1576 if (shm_may_destroy(ns
, shp
))
1577 shm_destroy(ns
, shp
);
1580 up_write(&shm_ids(ns
).rwsem
);
1589 SYSCALL_DEFINE3(shmat
, int, shmid
, char __user
*, shmaddr
, int, shmflg
)
1594 err
= do_shmat(shmid
, shmaddr
, shmflg
, &ret
, SHMLBA
);
1597 force_successful_syscall_return();
1601 #ifdef CONFIG_COMPAT
1603 #ifndef COMPAT_SHMLBA
1604 #define COMPAT_SHMLBA SHMLBA
1607 COMPAT_SYSCALL_DEFINE3(shmat
, int, shmid
, compat_uptr_t
, shmaddr
, int, shmflg
)
1612 err
= do_shmat(shmid
, compat_ptr(shmaddr
), shmflg
, &ret
, COMPAT_SHMLBA
);
1615 force_successful_syscall_return();
1621 * detach and kill segment if marked destroyed.
1622 * The work is done in shm_close.
1624 long ksys_shmdt(char __user
*shmaddr
)
1626 struct mm_struct
*mm
= current
->mm
;
1627 struct vm_area_struct
*vma
;
1628 unsigned long addr
= (unsigned long)shmaddr
;
1629 int retval
= -EINVAL
;
1633 struct vm_area_struct
*next
;
1636 if (addr
& ~PAGE_MASK
)
1639 if (mmap_write_lock_killable(mm
))
1643 * This function tries to be smart and unmap shm segments that
1644 * were modified by partial mlock or munmap calls:
1645 * - It first determines the size of the shm segment that should be
1646 * unmapped: It searches for a vma that is backed by shm and that
1647 * started at address shmaddr. It records it's size and then unmaps
1649 * - Then it unmaps all shm vmas that started at shmaddr and that
1650 * are within the initially determined size and that are from the
1651 * same shm segment from which we determined the size.
1652 * Errors from do_munmap are ignored: the function only fails if
1653 * it's called with invalid parameters or if it's called to unmap
1654 * a part of a vma. Both calls in this function are for full vmas,
1655 * the parameters are directly copied from the vma itself and always
1656 * valid - therefore do_munmap cannot fail. (famous last words?)
1659 * If it had been mremap()'d, the starting address would not
1660 * match the usual checks anyway. So assume all vma's are
1661 * above the starting address given.
1663 vma
= find_vma(mm
, addr
);
1667 next
= vma
->vm_next
;
1670 * Check if the starting address would match, i.e. it's
1671 * a fragment created by mprotect() and/or munmap(), or it
1672 * otherwise it starts at this address with no hassles.
1674 if ((vma
->vm_ops
== &shm_vm_ops
) &&
1675 (vma
->vm_start
- addr
)/PAGE_SIZE
== vma
->vm_pgoff
) {
1678 * Record the file of the shm segment being
1679 * unmapped. With mremap(), someone could place
1680 * page from another segment but with equal offsets
1681 * in the range we are unmapping.
1683 file
= vma
->vm_file
;
1684 size
= i_size_read(file_inode(vma
->vm_file
));
1685 do_munmap(mm
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
, NULL
);
1687 * We discovered the size of the shm segment, so
1688 * break out of here and fall through to the next
1689 * loop that uses the size information to stop
1690 * searching for matching vma's.
1700 * We need look no further than the maximum address a fragment
1701 * could possibly have landed at. Also cast things to loff_t to
1702 * prevent overflows and make comparisons vs. equal-width types.
1704 size
= PAGE_ALIGN(size
);
1705 while (vma
&& (loff_t
)(vma
->vm_end
- addr
) <= size
) {
1706 next
= vma
->vm_next
;
1708 /* finding a matching vma now does not alter retval */
1709 if ((vma
->vm_ops
== &shm_vm_ops
) &&
1710 ((vma
->vm_start
- addr
)/PAGE_SIZE
== vma
->vm_pgoff
) &&
1711 (vma
->vm_file
== file
))
1712 do_munmap(mm
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
, NULL
);
1716 #else /* CONFIG_MMU */
1717 /* under NOMMU conditions, the exact address to be destroyed must be
1720 if (vma
&& vma
->vm_start
== addr
&& vma
->vm_ops
== &shm_vm_ops
) {
1721 do_munmap(mm
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
, NULL
);
1727 mmap_write_unlock(mm
);
1731 SYSCALL_DEFINE1(shmdt
, char __user
*, shmaddr
)
1733 return ksys_shmdt(shmaddr
);
1736 #ifdef CONFIG_PROC_FS
1737 static int sysvipc_shm_proc_show(struct seq_file
*s
, void *it
)
1739 struct pid_namespace
*pid_ns
= ipc_seq_pid_ns(s
);
1740 struct user_namespace
*user_ns
= seq_user_ns(s
);
1741 struct kern_ipc_perm
*ipcp
= it
;
1742 struct shmid_kernel
*shp
;
1743 unsigned long rss
= 0, swp
= 0;
1745 shp
= container_of(ipcp
, struct shmid_kernel
, shm_perm
);
1746 shm_add_rss_swap(shp
, &rss
, &swp
);
1748 #if BITS_PER_LONG <= 32
1749 #define SIZE_SPEC "%10lu"
1751 #define SIZE_SPEC "%21lu"
1755 "%10d %10d %4o " SIZE_SPEC
" %5u %5u "
1756 "%5lu %5u %5u %5u %5u %10llu %10llu %10llu "
1757 SIZE_SPEC
" " SIZE_SPEC
"\n",
1762 pid_nr_ns(shp
->shm_cprid
, pid_ns
),
1763 pid_nr_ns(shp
->shm_lprid
, pid_ns
),
1765 from_kuid_munged(user_ns
, shp
->shm_perm
.uid
),
1766 from_kgid_munged(user_ns
, shp
->shm_perm
.gid
),
1767 from_kuid_munged(user_ns
, shp
->shm_perm
.cuid
),
1768 from_kgid_munged(user_ns
, shp
->shm_perm
.cgid
),