staging: et131x: Remove yet more forward declarations
[zen-stable.git] / ipc / shm.c
blob02ecf2c078fce9b62ee2b1ec0ef252c0320acdf5
1 /*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 * namespaces support
20 * OpenVZ, SWsoft Inc.
21 * Pavel Emelianov <xemul@openvz.org>
24 #include <linux/slab.h>
25 #include <linux/mm.h>
26 #include <linux/hugetlb.h>
27 #include <linux/shm.h>
28 #include <linux/init.h>
29 #include <linux/file.h>
30 #include <linux/mman.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/security.h>
33 #include <linux/syscalls.h>
34 #include <linux/audit.h>
35 #include <linux/capability.h>
36 #include <linux/ptrace.h>
37 #include <linux/seq_file.h>
38 #include <linux/rwsem.h>
39 #include <linux/nsproxy.h>
40 #include <linux/mount.h>
41 #include <linux/ipc_namespace.h>
43 #include <asm/uaccess.h>
45 #include "util.h"
47 struct shm_file_data {
48 int id;
49 struct ipc_namespace *ns;
50 struct file *file;
51 const struct vm_operations_struct *vm_ops;
54 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
56 static const struct file_operations shm_file_operations;
57 static const struct vm_operations_struct shm_vm_ops;
59 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
61 #define shm_unlock(shp) \
62 ipc_unlock(&(shp)->shm_perm)
64 static int newseg(struct ipc_namespace *, struct ipc_params *);
65 static void shm_open(struct vm_area_struct *vma);
66 static void shm_close(struct vm_area_struct *vma);
67 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
68 #ifdef CONFIG_PROC_FS
69 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
70 #endif
72 void shm_init_ns(struct ipc_namespace *ns)
74 ns->shm_ctlmax = SHMMAX;
75 ns->shm_ctlall = SHMALL;
76 ns->shm_ctlmni = SHMMNI;
77 ns->shm_rmid_forced = 0;
78 ns->shm_tot = 0;
79 ipc_init_ids(&shm_ids(ns));
83 * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
84 * Only shm_ids.rw_mutex remains locked on exit.
86 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
88 struct shmid_kernel *shp;
89 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
91 if (shp->shm_nattch){
92 shp->shm_perm.mode |= SHM_DEST;
93 /* Do not find it any more */
94 shp->shm_perm.key = IPC_PRIVATE;
95 shm_unlock(shp);
96 } else
97 shm_destroy(ns, shp);
100 #ifdef CONFIG_IPC_NS
101 void shm_exit_ns(struct ipc_namespace *ns)
103 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
104 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
106 #endif
108 static int __init ipc_ns_init(void)
110 shm_init_ns(&init_ipc_ns);
111 return 0;
114 pure_initcall(ipc_ns_init);
116 void __init shm_init (void)
118 ipc_init_proc_interface("sysvipc/shm",
119 #if BITS_PER_LONG <= 32
120 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
121 #else
122 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
123 #endif
124 IPC_SHM_IDS, sysvipc_shm_proc_show);
128 * shm_lock_(check_) routines are called in the paths where the rw_mutex
129 * is not necessarily held.
131 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
133 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
135 if (IS_ERR(ipcp))
136 return (struct shmid_kernel *)ipcp;
138 return container_of(ipcp, struct shmid_kernel, shm_perm);
141 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
143 rcu_read_lock();
144 spin_lock(&ipcp->shm_perm.lock);
147 static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
148 int id)
150 struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
152 if (IS_ERR(ipcp))
153 return (struct shmid_kernel *)ipcp;
155 return container_of(ipcp, struct shmid_kernel, shm_perm);
158 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
160 ipc_rmid(&shm_ids(ns), &s->shm_perm);
164 /* This is called by fork, once for every shm attach. */
165 static void shm_open(struct vm_area_struct *vma)
167 struct file *file = vma->vm_file;
168 struct shm_file_data *sfd = shm_file_data(file);
169 struct shmid_kernel *shp;
171 shp = shm_lock(sfd->ns, sfd->id);
172 BUG_ON(IS_ERR(shp));
173 shp->shm_atim = get_seconds();
174 shp->shm_lprid = task_tgid_vnr(current);
175 shp->shm_nattch++;
176 shm_unlock(shp);
180 * shm_destroy - free the struct shmid_kernel
182 * @ns: namespace
183 * @shp: struct to free
185 * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
186 * but returns with shp unlocked and freed.
188 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
190 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
191 shm_rmid(ns, shp);
192 shm_unlock(shp);
193 if (!is_file_hugepages(shp->shm_file))
194 shmem_lock(shp->shm_file, 0, shp->mlock_user);
195 else if (shp->mlock_user)
196 user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
197 shp->mlock_user);
198 fput (shp->shm_file);
199 security_shm_free(shp);
200 ipc_rcu_putref(shp);
204 * shm_may_destroy - identifies whether shm segment should be destroyed now
206 * Returns true if and only if there are no active users of the segment and
207 * one of the following is true:
209 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
211 * 2) sysctl kernel.shm_rmid_forced is set to 1.
213 static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
215 return (shp->shm_nattch == 0) &&
216 (ns->shm_rmid_forced ||
217 (shp->shm_perm.mode & SHM_DEST));
221 * remove the attach descriptor vma.
222 * free memory for segment if it is marked destroyed.
223 * The descriptor has already been removed from the current->mm->mmap list
224 * and will later be kfree()d.
226 static void shm_close(struct vm_area_struct *vma)
228 struct file * file = vma->vm_file;
229 struct shm_file_data *sfd = shm_file_data(file);
230 struct shmid_kernel *shp;
231 struct ipc_namespace *ns = sfd->ns;
233 down_write(&shm_ids(ns).rw_mutex);
234 /* remove from the list of attaches of the shm segment */
235 shp = shm_lock(ns, sfd->id);
236 BUG_ON(IS_ERR(shp));
237 shp->shm_lprid = task_tgid_vnr(current);
238 shp->shm_dtim = get_seconds();
239 shp->shm_nattch--;
240 if (shm_may_destroy(ns, shp))
241 shm_destroy(ns, shp);
242 else
243 shm_unlock(shp);
244 up_write(&shm_ids(ns).rw_mutex);
247 /* Called with ns->shm_ids(ns).rw_mutex locked */
248 static int shm_try_destroy_current(int id, void *p, void *data)
250 struct ipc_namespace *ns = data;
251 struct kern_ipc_perm *ipcp = p;
252 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
254 if (shp->shm_creator != current)
255 return 0;
258 * Mark it as orphaned to destroy the segment when
259 * kernel.shm_rmid_forced is changed.
260 * It is noop if the following shm_may_destroy() returns true.
262 shp->shm_creator = NULL;
265 * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID
266 * is not set, it shouldn't be deleted here.
268 if (!ns->shm_rmid_forced)
269 return 0;
271 if (shm_may_destroy(ns, shp)) {
272 shm_lock_by_ptr(shp);
273 shm_destroy(ns, shp);
275 return 0;
278 /* Called with ns->shm_ids(ns).rw_mutex locked */
279 static int shm_try_destroy_orphaned(int id, void *p, void *data)
281 struct ipc_namespace *ns = data;
282 struct kern_ipc_perm *ipcp = p;
283 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
286 * We want to destroy segments without users and with already
287 * exit'ed originating process.
289 * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
291 if (shp->shm_creator != NULL)
292 return 0;
294 if (shm_may_destroy(ns, shp)) {
295 shm_lock_by_ptr(shp);
296 shm_destroy(ns, shp);
298 return 0;
301 void shm_destroy_orphaned(struct ipc_namespace *ns)
303 down_write(&shm_ids(ns).rw_mutex);
304 if (shm_ids(ns).in_use)
305 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
306 up_write(&shm_ids(ns).rw_mutex);
310 void exit_shm(struct task_struct *task)
312 struct ipc_namespace *ns = task->nsproxy->ipc_ns;
314 if (shm_ids(ns).in_use == 0)
315 return;
317 /* Destroy all already created segments, but not mapped yet */
318 down_write(&shm_ids(ns).rw_mutex);
319 if (shm_ids(ns).in_use)
320 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
321 up_write(&shm_ids(ns).rw_mutex);
324 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
326 struct file *file = vma->vm_file;
327 struct shm_file_data *sfd = shm_file_data(file);
329 return sfd->vm_ops->fault(vma, vmf);
332 #ifdef CONFIG_NUMA
333 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
335 struct file *file = vma->vm_file;
336 struct shm_file_data *sfd = shm_file_data(file);
337 int err = 0;
338 if (sfd->vm_ops->set_policy)
339 err = sfd->vm_ops->set_policy(vma, new);
340 return err;
343 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
344 unsigned long addr)
346 struct file *file = vma->vm_file;
347 struct shm_file_data *sfd = shm_file_data(file);
348 struct mempolicy *pol = NULL;
350 if (sfd->vm_ops->get_policy)
351 pol = sfd->vm_ops->get_policy(vma, addr);
352 else if (vma->vm_policy)
353 pol = vma->vm_policy;
355 return pol;
357 #endif
359 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
361 struct shm_file_data *sfd = shm_file_data(file);
362 int ret;
364 ret = sfd->file->f_op->mmap(sfd->file, vma);
365 if (ret != 0)
366 return ret;
367 sfd->vm_ops = vma->vm_ops;
368 #ifdef CONFIG_MMU
369 BUG_ON(!sfd->vm_ops->fault);
370 #endif
371 vma->vm_ops = &shm_vm_ops;
372 shm_open(vma);
374 return ret;
377 static int shm_release(struct inode *ino, struct file *file)
379 struct shm_file_data *sfd = shm_file_data(file);
381 put_ipc_ns(sfd->ns);
382 shm_file_data(file) = NULL;
383 kfree(sfd);
384 return 0;
387 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
389 struct shm_file_data *sfd = shm_file_data(file);
391 if (!sfd->file->f_op->fsync)
392 return -EINVAL;
393 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
396 static unsigned long shm_get_unmapped_area(struct file *file,
397 unsigned long addr, unsigned long len, unsigned long pgoff,
398 unsigned long flags)
400 struct shm_file_data *sfd = shm_file_data(file);
401 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
402 pgoff, flags);
405 static const struct file_operations shm_file_operations = {
406 .mmap = shm_mmap,
407 .fsync = shm_fsync,
408 .release = shm_release,
409 #ifndef CONFIG_MMU
410 .get_unmapped_area = shm_get_unmapped_area,
411 #endif
412 .llseek = noop_llseek,
415 static const struct file_operations shm_file_operations_huge = {
416 .mmap = shm_mmap,
417 .fsync = shm_fsync,
418 .release = shm_release,
419 .get_unmapped_area = shm_get_unmapped_area,
420 .llseek = noop_llseek,
423 int is_file_shm_hugepages(struct file *file)
425 return file->f_op == &shm_file_operations_huge;
428 static const struct vm_operations_struct shm_vm_ops = {
429 .open = shm_open, /* callback for a new vm-area open */
430 .close = shm_close, /* callback for when the vm-area is released */
431 .fault = shm_fault,
432 #if defined(CONFIG_NUMA)
433 .set_policy = shm_set_policy,
434 .get_policy = shm_get_policy,
435 #endif
439 * newseg - Create a new shared memory segment
440 * @ns: namespace
441 * @params: ptr to the structure that contains key, size and shmflg
443 * Called with shm_ids.rw_mutex held as a writer.
446 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
448 key_t key = params->key;
449 int shmflg = params->flg;
450 size_t size = params->u.size;
451 int error;
452 struct shmid_kernel *shp;
453 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
454 struct file * file;
455 char name[13];
456 int id;
457 vm_flags_t acctflag = 0;
459 if (size < SHMMIN || size > ns->shm_ctlmax)
460 return -EINVAL;
462 if (ns->shm_tot + numpages > ns->shm_ctlall)
463 return -ENOSPC;
465 shp = ipc_rcu_alloc(sizeof(*shp));
466 if (!shp)
467 return -ENOMEM;
469 shp->shm_perm.key = key;
470 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
471 shp->mlock_user = NULL;
473 shp->shm_perm.security = NULL;
474 error = security_shm_alloc(shp);
475 if (error) {
476 ipc_rcu_putref(shp);
477 return error;
480 sprintf (name, "SYSV%08x", key);
481 if (shmflg & SHM_HUGETLB) {
482 /* hugetlb_file_setup applies strict accounting */
483 if (shmflg & SHM_NORESERVE)
484 acctflag = VM_NORESERVE;
485 file = hugetlb_file_setup(name, size, acctflag,
486 &shp->mlock_user, HUGETLB_SHMFS_INODE);
487 } else {
489 * Do not allow no accounting for OVERCOMMIT_NEVER, even
490 * if it's asked for.
492 if ((shmflg & SHM_NORESERVE) &&
493 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
494 acctflag = VM_NORESERVE;
495 file = shmem_file_setup(name, size, acctflag);
497 error = PTR_ERR(file);
498 if (IS_ERR(file))
499 goto no_file;
501 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
502 if (id < 0) {
503 error = id;
504 goto no_id;
507 shp->shm_cprid = task_tgid_vnr(current);
508 shp->shm_lprid = 0;
509 shp->shm_atim = shp->shm_dtim = 0;
510 shp->shm_ctim = get_seconds();
511 shp->shm_segsz = size;
512 shp->shm_nattch = 0;
513 shp->shm_file = file;
514 shp->shm_creator = current;
516 * shmid gets reported as "inode#" in /proc/pid/maps.
517 * proc-ps tools use this. Changing this will break them.
519 file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
521 ns->shm_tot += numpages;
522 error = shp->shm_perm.id;
523 shm_unlock(shp);
524 return error;
526 no_id:
527 if (is_file_hugepages(file) && shp->mlock_user)
528 user_shm_unlock(size, shp->mlock_user);
529 fput(file);
530 no_file:
531 security_shm_free(shp);
532 ipc_rcu_putref(shp);
533 return error;
537 * Called with shm_ids.rw_mutex and ipcp locked.
539 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
541 struct shmid_kernel *shp;
543 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
544 return security_shm_associate(shp, shmflg);
548 * Called with shm_ids.rw_mutex and ipcp locked.
550 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
551 struct ipc_params *params)
553 struct shmid_kernel *shp;
555 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
556 if (shp->shm_segsz < params->u.size)
557 return -EINVAL;
559 return 0;
562 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
564 struct ipc_namespace *ns;
565 struct ipc_ops shm_ops;
566 struct ipc_params shm_params;
568 ns = current->nsproxy->ipc_ns;
570 shm_ops.getnew = newseg;
571 shm_ops.associate = shm_security;
572 shm_ops.more_checks = shm_more_checks;
574 shm_params.key = key;
575 shm_params.flg = shmflg;
576 shm_params.u.size = size;
578 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
581 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
583 switch(version) {
584 case IPC_64:
585 return copy_to_user(buf, in, sizeof(*in));
586 case IPC_OLD:
588 struct shmid_ds out;
590 memset(&out, 0, sizeof(out));
591 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
592 out.shm_segsz = in->shm_segsz;
593 out.shm_atime = in->shm_atime;
594 out.shm_dtime = in->shm_dtime;
595 out.shm_ctime = in->shm_ctime;
596 out.shm_cpid = in->shm_cpid;
597 out.shm_lpid = in->shm_lpid;
598 out.shm_nattch = in->shm_nattch;
600 return copy_to_user(buf, &out, sizeof(out));
602 default:
603 return -EINVAL;
607 static inline unsigned long
608 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
610 switch(version) {
611 case IPC_64:
612 if (copy_from_user(out, buf, sizeof(*out)))
613 return -EFAULT;
614 return 0;
615 case IPC_OLD:
617 struct shmid_ds tbuf_old;
619 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
620 return -EFAULT;
622 out->shm_perm.uid = tbuf_old.shm_perm.uid;
623 out->shm_perm.gid = tbuf_old.shm_perm.gid;
624 out->shm_perm.mode = tbuf_old.shm_perm.mode;
626 return 0;
628 default:
629 return -EINVAL;
633 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
635 switch(version) {
636 case IPC_64:
637 return copy_to_user(buf, in, sizeof(*in));
638 case IPC_OLD:
640 struct shminfo out;
642 if(in->shmmax > INT_MAX)
643 out.shmmax = INT_MAX;
644 else
645 out.shmmax = (int)in->shmmax;
647 out.shmmin = in->shmmin;
648 out.shmmni = in->shmmni;
649 out.shmseg = in->shmseg;
650 out.shmall = in->shmall;
652 return copy_to_user(buf, &out, sizeof(out));
654 default:
655 return -EINVAL;
660 * Calculate and add used RSS and swap pages of a shm.
661 * Called with shm_ids.rw_mutex held as a reader
663 static void shm_add_rss_swap(struct shmid_kernel *shp,
664 unsigned long *rss_add, unsigned long *swp_add)
666 struct inode *inode;
668 inode = shp->shm_file->f_path.dentry->d_inode;
670 if (is_file_hugepages(shp->shm_file)) {
671 struct address_space *mapping = inode->i_mapping;
672 struct hstate *h = hstate_file(shp->shm_file);
673 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
674 } else {
675 #ifdef CONFIG_SHMEM
676 struct shmem_inode_info *info = SHMEM_I(inode);
677 spin_lock(&info->lock);
678 *rss_add += inode->i_mapping->nrpages;
679 *swp_add += info->swapped;
680 spin_unlock(&info->lock);
681 #else
682 *rss_add += inode->i_mapping->nrpages;
683 #endif
688 * Called with shm_ids.rw_mutex held as a reader
690 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
691 unsigned long *swp)
693 int next_id;
694 int total, in_use;
696 *rss = 0;
697 *swp = 0;
699 in_use = shm_ids(ns).in_use;
701 for (total = 0, next_id = 0; total < in_use; next_id++) {
702 struct kern_ipc_perm *ipc;
703 struct shmid_kernel *shp;
705 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
706 if (ipc == NULL)
707 continue;
708 shp = container_of(ipc, struct shmid_kernel, shm_perm);
710 shm_add_rss_swap(shp, rss, swp);
712 total++;
717 * This function handles some shmctl commands which require the rw_mutex
718 * to be held in write mode.
719 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
721 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
722 struct shmid_ds __user *buf, int version)
724 struct kern_ipc_perm *ipcp;
725 struct shmid64_ds shmid64;
726 struct shmid_kernel *shp;
727 int err;
729 if (cmd == IPC_SET) {
730 if (copy_shmid_from_user(&shmid64, buf, version))
731 return -EFAULT;
734 ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd,
735 &shmid64.shm_perm, 0);
736 if (IS_ERR(ipcp))
737 return PTR_ERR(ipcp);
739 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
741 err = security_shm_shmctl(shp, cmd);
742 if (err)
743 goto out_unlock;
744 switch (cmd) {
745 case IPC_RMID:
746 do_shm_rmid(ns, ipcp);
747 goto out_up;
748 case IPC_SET:
749 ipc_update_perm(&shmid64.shm_perm, ipcp);
750 shp->shm_ctim = get_seconds();
751 break;
752 default:
753 err = -EINVAL;
755 out_unlock:
756 shm_unlock(shp);
757 out_up:
758 up_write(&shm_ids(ns).rw_mutex);
759 return err;
762 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
764 struct shmid_kernel *shp;
765 int err, version;
766 struct ipc_namespace *ns;
768 if (cmd < 0 || shmid < 0) {
769 err = -EINVAL;
770 goto out;
773 version = ipc_parse_version(&cmd);
774 ns = current->nsproxy->ipc_ns;
776 switch (cmd) { /* replace with proc interface ? */
777 case IPC_INFO:
779 struct shminfo64 shminfo;
781 err = security_shm_shmctl(NULL, cmd);
782 if (err)
783 return err;
785 memset(&shminfo, 0, sizeof(shminfo));
786 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
787 shminfo.shmmax = ns->shm_ctlmax;
788 shminfo.shmall = ns->shm_ctlall;
790 shminfo.shmmin = SHMMIN;
791 if(copy_shminfo_to_user (buf, &shminfo, version))
792 return -EFAULT;
794 down_read(&shm_ids(ns).rw_mutex);
795 err = ipc_get_maxid(&shm_ids(ns));
796 up_read(&shm_ids(ns).rw_mutex);
798 if(err<0)
799 err = 0;
800 goto out;
802 case SHM_INFO:
804 struct shm_info shm_info;
806 err = security_shm_shmctl(NULL, cmd);
807 if (err)
808 return err;
810 memset(&shm_info, 0, sizeof(shm_info));
811 down_read(&shm_ids(ns).rw_mutex);
812 shm_info.used_ids = shm_ids(ns).in_use;
813 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
814 shm_info.shm_tot = ns->shm_tot;
815 shm_info.swap_attempts = 0;
816 shm_info.swap_successes = 0;
817 err = ipc_get_maxid(&shm_ids(ns));
818 up_read(&shm_ids(ns).rw_mutex);
819 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
820 err = -EFAULT;
821 goto out;
824 err = err < 0 ? 0 : err;
825 goto out;
827 case SHM_STAT:
828 case IPC_STAT:
830 struct shmid64_ds tbuf;
831 int result;
833 if (cmd == SHM_STAT) {
834 shp = shm_lock(ns, shmid);
835 if (IS_ERR(shp)) {
836 err = PTR_ERR(shp);
837 goto out;
839 result = shp->shm_perm.id;
840 } else {
841 shp = shm_lock_check(ns, shmid);
842 if (IS_ERR(shp)) {
843 err = PTR_ERR(shp);
844 goto out;
846 result = 0;
848 err = -EACCES;
849 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
850 goto out_unlock;
851 err = security_shm_shmctl(shp, cmd);
852 if (err)
853 goto out_unlock;
854 memset(&tbuf, 0, sizeof(tbuf));
855 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
856 tbuf.shm_segsz = shp->shm_segsz;
857 tbuf.shm_atime = shp->shm_atim;
858 tbuf.shm_dtime = shp->shm_dtim;
859 tbuf.shm_ctime = shp->shm_ctim;
860 tbuf.shm_cpid = shp->shm_cprid;
861 tbuf.shm_lpid = shp->shm_lprid;
862 tbuf.shm_nattch = shp->shm_nattch;
863 shm_unlock(shp);
864 if(copy_shmid_to_user (buf, &tbuf, version))
865 err = -EFAULT;
866 else
867 err = result;
868 goto out;
870 case SHM_LOCK:
871 case SHM_UNLOCK:
873 struct file *uninitialized_var(shm_file);
875 lru_add_drain_all(); /* drain pagevecs to lru lists */
877 shp = shm_lock_check(ns, shmid);
878 if (IS_ERR(shp)) {
879 err = PTR_ERR(shp);
880 goto out;
883 audit_ipc_obj(&(shp->shm_perm));
885 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
886 uid_t euid = current_euid();
887 err = -EPERM;
888 if (euid != shp->shm_perm.uid &&
889 euid != shp->shm_perm.cuid)
890 goto out_unlock;
891 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
892 goto out_unlock;
895 err = security_shm_shmctl(shp, cmd);
896 if (err)
897 goto out_unlock;
899 if(cmd==SHM_LOCK) {
900 struct user_struct *user = current_user();
901 if (!is_file_hugepages(shp->shm_file)) {
902 err = shmem_lock(shp->shm_file, 1, user);
903 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
904 shp->shm_perm.mode |= SHM_LOCKED;
905 shp->mlock_user = user;
908 } else if (!is_file_hugepages(shp->shm_file)) {
909 shmem_lock(shp->shm_file, 0, shp->mlock_user);
910 shp->shm_perm.mode &= ~SHM_LOCKED;
911 shp->mlock_user = NULL;
913 shm_unlock(shp);
914 goto out;
916 case IPC_RMID:
917 case IPC_SET:
918 err = shmctl_down(ns, shmid, cmd, buf, version);
919 return err;
920 default:
921 return -EINVAL;
924 out_unlock:
925 shm_unlock(shp);
926 out:
927 return err;
931 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
933 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
934 * "raddr" thing points to kernel space, and there has to be a wrapper around
935 * this.
937 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
939 struct shmid_kernel *shp;
940 unsigned long addr;
941 unsigned long size;
942 struct file * file;
943 int err;
944 unsigned long flags;
945 unsigned long prot;
946 int acc_mode;
947 unsigned long user_addr;
948 struct ipc_namespace *ns;
949 struct shm_file_data *sfd;
950 struct path path;
951 fmode_t f_mode;
953 err = -EINVAL;
954 if (shmid < 0)
955 goto out;
956 else if ((addr = (ulong)shmaddr)) {
957 if (addr & (SHMLBA-1)) {
958 if (shmflg & SHM_RND)
959 addr &= ~(SHMLBA-1); /* round down */
960 else
961 #ifndef __ARCH_FORCE_SHMLBA
962 if (addr & ~PAGE_MASK)
963 #endif
964 goto out;
966 flags = MAP_SHARED | MAP_FIXED;
967 } else {
968 if ((shmflg & SHM_REMAP))
969 goto out;
971 flags = MAP_SHARED;
974 if (shmflg & SHM_RDONLY) {
975 prot = PROT_READ;
976 acc_mode = S_IRUGO;
977 f_mode = FMODE_READ;
978 } else {
979 prot = PROT_READ | PROT_WRITE;
980 acc_mode = S_IRUGO | S_IWUGO;
981 f_mode = FMODE_READ | FMODE_WRITE;
983 if (shmflg & SHM_EXEC) {
984 prot |= PROT_EXEC;
985 acc_mode |= S_IXUGO;
989 * We cannot rely on the fs check since SYSV IPC does have an
990 * additional creator id...
992 ns = current->nsproxy->ipc_ns;
993 shp = shm_lock_check(ns, shmid);
994 if (IS_ERR(shp)) {
995 err = PTR_ERR(shp);
996 goto out;
999 err = -EACCES;
1000 if (ipcperms(ns, &shp->shm_perm, acc_mode))
1001 goto out_unlock;
1003 err = security_shm_shmat(shp, shmaddr, shmflg);
1004 if (err)
1005 goto out_unlock;
1007 path = shp->shm_file->f_path;
1008 path_get(&path);
1009 shp->shm_nattch++;
1010 size = i_size_read(path.dentry->d_inode);
1011 shm_unlock(shp);
1013 err = -ENOMEM;
1014 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1015 if (!sfd)
1016 goto out_put_dentry;
1018 file = alloc_file(&path, f_mode,
1019 is_file_hugepages(shp->shm_file) ?
1020 &shm_file_operations_huge :
1021 &shm_file_operations);
1022 if (!file)
1023 goto out_free;
1025 file->private_data = sfd;
1026 file->f_mapping = shp->shm_file->f_mapping;
1027 sfd->id = shp->shm_perm.id;
1028 sfd->ns = get_ipc_ns(ns);
1029 sfd->file = shp->shm_file;
1030 sfd->vm_ops = NULL;
1032 down_write(&current->mm->mmap_sem);
1033 if (addr && !(shmflg & SHM_REMAP)) {
1034 err = -EINVAL;
1035 if (find_vma_intersection(current->mm, addr, addr + size))
1036 goto invalid;
1038 * If shm segment goes below stack, make sure there is some
1039 * space left for the stack to grow (at least 4 pages).
1041 if (addr < current->mm->start_stack &&
1042 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
1043 goto invalid;
1046 user_addr = do_mmap (file, addr, size, prot, flags, 0);
1047 *raddr = user_addr;
1048 err = 0;
1049 if (IS_ERR_VALUE(user_addr))
1050 err = (long)user_addr;
1051 invalid:
1052 up_write(&current->mm->mmap_sem);
1054 fput(file);
1056 out_nattch:
1057 down_write(&shm_ids(ns).rw_mutex);
1058 shp = shm_lock(ns, shmid);
1059 BUG_ON(IS_ERR(shp));
1060 shp->shm_nattch--;
1061 if (shm_may_destroy(ns, shp))
1062 shm_destroy(ns, shp);
1063 else
1064 shm_unlock(shp);
1065 up_write(&shm_ids(ns).rw_mutex);
1067 out:
1068 return err;
1070 out_unlock:
1071 shm_unlock(shp);
1072 goto out;
1074 out_free:
1075 kfree(sfd);
1076 out_put_dentry:
1077 path_put(&path);
1078 goto out_nattch;
1081 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1083 unsigned long ret;
1084 long err;
1086 err = do_shmat(shmid, shmaddr, shmflg, &ret);
1087 if (err)
1088 return err;
1089 force_successful_syscall_return();
1090 return (long)ret;
1094 * detach and kill segment if marked destroyed.
1095 * The work is done in shm_close.
1097 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1099 struct mm_struct *mm = current->mm;
1100 struct vm_area_struct *vma;
1101 unsigned long addr = (unsigned long)shmaddr;
1102 int retval = -EINVAL;
1103 #ifdef CONFIG_MMU
1104 loff_t size = 0;
1105 struct vm_area_struct *next;
1106 #endif
1108 if (addr & ~PAGE_MASK)
1109 return retval;
1111 down_write(&mm->mmap_sem);
1114 * This function tries to be smart and unmap shm segments that
1115 * were modified by partial mlock or munmap calls:
1116 * - It first determines the size of the shm segment that should be
1117 * unmapped: It searches for a vma that is backed by shm and that
1118 * started at address shmaddr. It records it's size and then unmaps
1119 * it.
1120 * - Then it unmaps all shm vmas that started at shmaddr and that
1121 * are within the initially determined size.
1122 * Errors from do_munmap are ignored: the function only fails if
1123 * it's called with invalid parameters or if it's called to unmap
1124 * a part of a vma. Both calls in this function are for full vmas,
1125 * the parameters are directly copied from the vma itself and always
1126 * valid - therefore do_munmap cannot fail. (famous last words?)
1129 * If it had been mremap()'d, the starting address would not
1130 * match the usual checks anyway. So assume all vma's are
1131 * above the starting address given.
1133 vma = find_vma(mm, addr);
1135 #ifdef CONFIG_MMU
1136 while (vma) {
1137 next = vma->vm_next;
1140 * Check if the starting address would match, i.e. it's
1141 * a fragment created by mprotect() and/or munmap(), or it
1142 * otherwise it starts at this address with no hassles.
1144 if ((vma->vm_ops == &shm_vm_ops) &&
1145 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1148 size = vma->vm_file->f_path.dentry->d_inode->i_size;
1149 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1151 * We discovered the size of the shm segment, so
1152 * break out of here and fall through to the next
1153 * loop that uses the size information to stop
1154 * searching for matching vma's.
1156 retval = 0;
1157 vma = next;
1158 break;
1160 vma = next;
1164 * We need look no further than the maximum address a fragment
1165 * could possibly have landed at. Also cast things to loff_t to
1166 * prevent overflows and make comparisons vs. equal-width types.
1168 size = PAGE_ALIGN(size);
1169 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1170 next = vma->vm_next;
1172 /* finding a matching vma now does not alter retval */
1173 if ((vma->vm_ops == &shm_vm_ops) &&
1174 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1176 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1177 vma = next;
1180 #else /* CONFIG_MMU */
1181 /* under NOMMU conditions, the exact address to be destroyed must be
1182 * given */
1183 retval = -EINVAL;
1184 if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1185 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1186 retval = 0;
1189 #endif
1191 up_write(&mm->mmap_sem);
1192 return retval;
1195 #ifdef CONFIG_PROC_FS
1196 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1198 struct shmid_kernel *shp = it;
1199 unsigned long rss = 0, swp = 0;
1201 shm_add_rss_swap(shp, &rss, &swp);
1203 #if BITS_PER_LONG <= 32
1204 #define SIZE_SPEC "%10lu"
1205 #else
1206 #define SIZE_SPEC "%21lu"
1207 #endif
1209 return seq_printf(s,
1210 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1211 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1212 SIZE_SPEC " " SIZE_SPEC "\n",
1213 shp->shm_perm.key,
1214 shp->shm_perm.id,
1215 shp->shm_perm.mode,
1216 shp->shm_segsz,
1217 shp->shm_cprid,
1218 shp->shm_lprid,
1219 shp->shm_nattch,
1220 shp->shm_perm.uid,
1221 shp->shm_perm.gid,
1222 shp->shm_perm.cuid,
1223 shp->shm_perm.cgid,
1224 shp->shm_atim,
1225 shp->shm_dtim,
1226 shp->shm_ctim,
1227 rss * PAGE_SIZE,
1228 swp * PAGE_SIZE);
1230 #endif