3 * Copyright (C) 1992 Krishna Balasubramanian
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
22 * Pavel Emelianov <xemul@openvz.org>
25 #include <linux/capability.h>
26 #include <linux/msg.h>
27 #include <linux/spinlock.h>
28 #include <linux/init.h>
30 #include <linux/proc_fs.h>
31 #include <linux/list.h>
32 #include <linux/security.h>
33 #include <linux/sched.h>
34 #include <linux/syscalls.h>
35 #include <linux/audit.h>
36 #include <linux/seq_file.h>
37 #include <linux/rwsem.h>
38 #include <linux/nsproxy.h>
39 #include <linux/ipc_namespace.h>
41 #include <asm/current.h>
42 #include <asm/uaccess.h>
46 * one msg_receiver structure for each sleeping receiver:
49 struct list_head r_list
;
50 struct task_struct
*r_tsk
;
56 struct msg_msg
*volatile r_msg
;
59 /* one msg_sender for each sleeping sender */
61 struct list_head list
;
62 struct task_struct
*tsk
;
66 #define SEARCH_EQUAL 2
67 #define SEARCH_NOTEQUAL 3
68 #define SEARCH_LESSEQUAL 4
69 #define SEARCH_NUMBER 5
71 #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
73 static void freeque(struct ipc_namespace
*, struct kern_ipc_perm
*);
74 static int newque(struct ipc_namespace
*, struct ipc_params
*);
76 static int sysvipc_msg_proc_show(struct seq_file
*s
, void *it
);
80 * Scale msgmni with the available lowmem size: the memory dedicated to msg
81 * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
82 * Also take into account the number of nsproxies created so far.
83 * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
85 void recompute_msgmni(struct ipc_namespace
*ns
)
88 unsigned long allowed
;
92 allowed
= (((i
.totalram
- i
.totalhigh
) / MSG_MEM_SCALE
) * i
.mem_unit
)
94 nb_ns
= atomic_read(&nr_ipc_ns
);
97 if (allowed
< MSGMNI
) {
98 ns
->msg_ctlmni
= MSGMNI
;
102 if (allowed
> IPCMNI
/ nb_ns
) {
103 ns
->msg_ctlmni
= IPCMNI
/ nb_ns
;
107 ns
->msg_ctlmni
= allowed
;
110 void msg_init_ns(struct ipc_namespace
*ns
)
112 ns
->msg_ctlmax
= MSGMAX
;
113 ns
->msg_ctlmnb
= MSGMNB
;
115 recompute_msgmni(ns
);
117 atomic_set(&ns
->msg_bytes
, 0);
118 atomic_set(&ns
->msg_hdrs
, 0);
119 ipc_init_ids(&ns
->ids
[IPC_MSG_IDS
]);
123 void msg_exit_ns(struct ipc_namespace
*ns
)
125 free_ipcs(ns
, &msg_ids(ns
), freeque
);
126 idr_destroy(&ns
->ids
[IPC_MSG_IDS
].ipcs_idr
);
130 void __init
msg_init(void)
132 msg_init_ns(&init_ipc_ns
);
134 printk(KERN_INFO
"msgmni has been set to %d\n",
135 init_ipc_ns
.msg_ctlmni
);
137 ipc_init_proc_interface("sysvipc/msg",
138 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
139 IPC_MSG_IDS
, sysvipc_msg_proc_show
);
142 static inline struct msg_queue
*msq_obtain_object(struct ipc_namespace
*ns
, int id
)
144 struct kern_ipc_perm
*ipcp
= ipc_obtain_object(&msg_ids(ns
), id
);
147 return ERR_CAST(ipcp
);
149 return container_of(ipcp
, struct msg_queue
, q_perm
);
152 static inline struct msg_queue
*msq_obtain_object_check(struct ipc_namespace
*ns
,
155 struct kern_ipc_perm
*ipcp
= ipc_obtain_object_check(&msg_ids(ns
), id
);
158 return ERR_CAST(ipcp
);
160 return container_of(ipcp
, struct msg_queue
, q_perm
);
163 static inline void msg_rmid(struct ipc_namespace
*ns
, struct msg_queue
*s
)
165 ipc_rmid(&msg_ids(ns
), &s
->q_perm
);
168 static void msg_rcu_free(struct rcu_head
*head
)
170 struct ipc_rcu
*p
= container_of(head
, struct ipc_rcu
, rcu
);
171 struct msg_queue
*msq
= ipc_rcu_to_struct(p
);
173 security_msg_queue_free(msq
);
178 * newque - Create a new msg queue
180 * @params: ptr to the structure that contains the key and msgflg
182 * Called with msg_ids.rwsem held (writer)
184 static int newque(struct ipc_namespace
*ns
, struct ipc_params
*params
)
186 struct msg_queue
*msq
;
188 key_t key
= params
->key
;
189 int msgflg
= params
->flg
;
191 msq
= ipc_rcu_alloc(sizeof(*msq
));
195 msq
->q_perm
.mode
= msgflg
& S_IRWXUGO
;
196 msq
->q_perm
.key
= key
;
198 msq
->q_perm
.security
= NULL
;
199 retval
= security_msg_queue_alloc(msq
);
201 ipc_rcu_putref(msq
, ipc_rcu_free
);
205 /* ipc_addid() locks msq upon success. */
206 id
= ipc_addid(&msg_ids(ns
), &msq
->q_perm
, ns
->msg_ctlmni
);
208 ipc_rcu_putref(msq
, msg_rcu_free
);
212 msq
->q_stime
= msq
->q_rtime
= 0;
213 msq
->q_ctime
= get_seconds();
214 msq
->q_cbytes
= msq
->q_qnum
= 0;
215 msq
->q_qbytes
= ns
->msg_ctlmnb
;
216 msq
->q_lspid
= msq
->q_lrpid
= 0;
217 INIT_LIST_HEAD(&msq
->q_messages
);
218 INIT_LIST_HEAD(&msq
->q_receivers
);
219 INIT_LIST_HEAD(&msq
->q_senders
);
221 ipc_unlock_object(&msq
->q_perm
);
224 return msq
->q_perm
.id
;
227 static inline void ss_add(struct msg_queue
*msq
, struct msg_sender
*mss
)
230 current
->state
= TASK_INTERRUPTIBLE
;
231 list_add_tail(&mss
->list
, &msq
->q_senders
);
234 static inline void ss_del(struct msg_sender
*mss
)
236 if (mss
->list
.next
!= NULL
)
237 list_del(&mss
->list
);
240 static void ss_wakeup(struct list_head
*h
, int kill
)
242 struct msg_sender
*mss
, *t
;
244 list_for_each_entry_safe(mss
, t
, h
, list
) {
246 mss
->list
.next
= NULL
;
247 wake_up_process(mss
->tsk
);
251 static void expunge_all(struct msg_queue
*msq
, int res
)
253 struct msg_receiver
*msr
, *t
;
255 list_for_each_entry_safe(msr
, t
, &msq
->q_receivers
, r_list
) {
256 msr
->r_msg
= NULL
; /* initialize expunge ordering */
257 wake_up_process(msr
->r_tsk
);
259 * Ensure that the wakeup is visible before setting r_msg as
260 * the receiving end depends on it: either spinning on a nil,
261 * or dealing with -EAGAIN cases. See lockless receive part 1
262 * and 2 in do_msgrcv().
265 msr
->r_msg
= ERR_PTR(res
);
270 * freeque() wakes up waiters on the sender and receiver waiting queue,
271 * removes the message queue from message queue ID IDR, and cleans up all the
272 * messages associated with this queue.
274 * msg_ids.rwsem (writer) and the spinlock for this message queue are held
275 * before freeque() is called. msg_ids.rwsem remains locked on exit.
277 static void freeque(struct ipc_namespace
*ns
, struct kern_ipc_perm
*ipcp
)
279 struct msg_msg
*msg
, *t
;
280 struct msg_queue
*msq
= container_of(ipcp
, struct msg_queue
, q_perm
);
282 expunge_all(msq
, -EIDRM
);
283 ss_wakeup(&msq
->q_senders
, 1);
285 ipc_unlock_object(&msq
->q_perm
);
288 list_for_each_entry_safe(msg
, t
, &msq
->q_messages
, m_list
) {
289 atomic_dec(&ns
->msg_hdrs
);
292 atomic_sub(msq
->q_cbytes
, &ns
->msg_bytes
);
293 ipc_rcu_putref(msq
, msg_rcu_free
);
297 * Called with msg_ids.rwsem and ipcp locked.
299 static inline int msg_security(struct kern_ipc_perm
*ipcp
, int msgflg
)
301 struct msg_queue
*msq
= container_of(ipcp
, struct msg_queue
, q_perm
);
303 return security_msg_queue_associate(msq
, msgflg
);
306 SYSCALL_DEFINE2(msgget
, key_t
, key
, int, msgflg
)
308 struct ipc_namespace
*ns
;
309 struct ipc_ops msg_ops
;
310 struct ipc_params msg_params
;
312 ns
= current
->nsproxy
->ipc_ns
;
314 msg_ops
.getnew
= newque
;
315 msg_ops
.associate
= msg_security
;
316 msg_ops
.more_checks
= NULL
;
318 msg_params
.key
= key
;
319 msg_params
.flg
= msgflg
;
321 return ipcget(ns
, &msg_ids(ns
), &msg_ops
, &msg_params
);
324 static inline unsigned long
325 copy_msqid_to_user(void __user
*buf
, struct msqid64_ds
*in
, int version
)
329 return copy_to_user(buf
, in
, sizeof(*in
));
334 memset(&out
, 0, sizeof(out
));
336 ipc64_perm_to_ipc_perm(&in
->msg_perm
, &out
.msg_perm
);
338 out
.msg_stime
= in
->msg_stime
;
339 out
.msg_rtime
= in
->msg_rtime
;
340 out
.msg_ctime
= in
->msg_ctime
;
342 if (in
->msg_cbytes
> USHRT_MAX
)
343 out
.msg_cbytes
= USHRT_MAX
;
345 out
.msg_cbytes
= in
->msg_cbytes
;
346 out
.msg_lcbytes
= in
->msg_cbytes
;
348 if (in
->msg_qnum
> USHRT_MAX
)
349 out
.msg_qnum
= USHRT_MAX
;
351 out
.msg_qnum
= in
->msg_qnum
;
353 if (in
->msg_qbytes
> USHRT_MAX
)
354 out
.msg_qbytes
= USHRT_MAX
;
356 out
.msg_qbytes
= in
->msg_qbytes
;
357 out
.msg_lqbytes
= in
->msg_qbytes
;
359 out
.msg_lspid
= in
->msg_lspid
;
360 out
.msg_lrpid
= in
->msg_lrpid
;
362 return copy_to_user(buf
, &out
, sizeof(out
));
369 static inline unsigned long
370 copy_msqid_from_user(struct msqid64_ds
*out
, void __user
*buf
, int version
)
374 if (copy_from_user(out
, buf
, sizeof(*out
)))
379 struct msqid_ds tbuf_old
;
381 if (copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
384 out
->msg_perm
.uid
= tbuf_old
.msg_perm
.uid
;
385 out
->msg_perm
.gid
= tbuf_old
.msg_perm
.gid
;
386 out
->msg_perm
.mode
= tbuf_old
.msg_perm
.mode
;
388 if (tbuf_old
.msg_qbytes
== 0)
389 out
->msg_qbytes
= tbuf_old
.msg_lqbytes
;
391 out
->msg_qbytes
= tbuf_old
.msg_qbytes
;
401 * This function handles some msgctl commands which require the rwsem
402 * to be held in write mode.
403 * NOTE: no locks must be held, the rwsem is taken inside this function.
405 static int msgctl_down(struct ipc_namespace
*ns
, int msqid
, int cmd
,
406 struct msqid_ds __user
*buf
, int version
)
408 struct kern_ipc_perm
*ipcp
;
409 struct msqid64_ds
uninitialized_var(msqid64
);
410 struct msg_queue
*msq
;
413 if (cmd
== IPC_SET
) {
414 if (copy_msqid_from_user(&msqid64
, buf
, version
))
418 down_write(&msg_ids(ns
).rwsem
);
421 ipcp
= ipcctl_pre_down_nolock(ns
, &msg_ids(ns
), msqid
, cmd
,
422 &msqid64
.msg_perm
, msqid64
.msg_qbytes
);
428 msq
= container_of(ipcp
, struct msg_queue
, q_perm
);
430 err
= security_msg_queue_msgctl(msq
, cmd
);
436 ipc_lock_object(&msq
->q_perm
);
437 /* freeque unlocks the ipc object and rcu */
441 if (msqid64
.msg_qbytes
> ns
->msg_ctlmnb
&&
442 !capable(CAP_SYS_RESOURCE
)) {
447 ipc_lock_object(&msq
->q_perm
);
448 err
= ipc_update_perm(&msqid64
.msg_perm
, ipcp
);
452 msq
->q_qbytes
= msqid64
.msg_qbytes
;
454 msq
->q_ctime
= get_seconds();
455 /* sleeping receivers might be excluded by
456 * stricter permissions.
458 expunge_all(msq
, -EAGAIN
);
459 /* sleeping senders might be able to send
460 * due to a larger queue size.
462 ss_wakeup(&msq
->q_senders
, 0);
470 ipc_unlock_object(&msq
->q_perm
);
474 up_write(&msg_ids(ns
).rwsem
);
478 static int msgctl_nolock(struct ipc_namespace
*ns
, int msqid
,
479 int cmd
, int version
, void __user
*buf
)
482 struct msg_queue
*msq
;
488 struct msginfo msginfo
;
495 * We must not return kernel stack data.
496 * due to padding, it's not enough
497 * to set all member fields.
499 err
= security_msg_queue_msgctl(NULL
, cmd
);
503 memset(&msginfo
, 0, sizeof(msginfo
));
504 msginfo
.msgmni
= ns
->msg_ctlmni
;
505 msginfo
.msgmax
= ns
->msg_ctlmax
;
506 msginfo
.msgmnb
= ns
->msg_ctlmnb
;
507 msginfo
.msgssz
= MSGSSZ
;
508 msginfo
.msgseg
= MSGSEG
;
509 down_read(&msg_ids(ns
).rwsem
);
510 if (cmd
== MSG_INFO
) {
511 msginfo
.msgpool
= msg_ids(ns
).in_use
;
512 msginfo
.msgmap
= atomic_read(&ns
->msg_hdrs
);
513 msginfo
.msgtql
= atomic_read(&ns
->msg_bytes
);
515 msginfo
.msgmap
= MSGMAP
;
516 msginfo
.msgpool
= MSGPOOL
;
517 msginfo
.msgtql
= MSGTQL
;
519 max_id
= ipc_get_maxid(&msg_ids(ns
));
520 up_read(&msg_ids(ns
).rwsem
);
521 if (copy_to_user(buf
, &msginfo
, sizeof(struct msginfo
)))
523 return (max_id
< 0) ? 0 : max_id
;
529 struct msqid64_ds tbuf
;
535 memset(&tbuf
, 0, sizeof(tbuf
));
538 if (cmd
== MSG_STAT
) {
539 msq
= msq_obtain_object(ns
, msqid
);
544 success_return
= msq
->q_perm
.id
;
546 msq
= msq_obtain_object_check(ns
, msqid
);
555 if (ipcperms(ns
, &msq
->q_perm
, S_IRUGO
))
558 err
= security_msg_queue_msgctl(msq
, cmd
);
562 kernel_to_ipc64_perm(&msq
->q_perm
, &tbuf
.msg_perm
);
563 tbuf
.msg_stime
= msq
->q_stime
;
564 tbuf
.msg_rtime
= msq
->q_rtime
;
565 tbuf
.msg_ctime
= msq
->q_ctime
;
566 tbuf
.msg_cbytes
= msq
->q_cbytes
;
567 tbuf
.msg_qnum
= msq
->q_qnum
;
568 tbuf
.msg_qbytes
= msq
->q_qbytes
;
569 tbuf
.msg_lspid
= msq
->q_lspid
;
570 tbuf
.msg_lrpid
= msq
->q_lrpid
;
573 if (copy_msqid_to_user(buf
, &tbuf
, version
))
575 return success_return
;
588 SYSCALL_DEFINE3(msgctl
, int, msqid
, int, cmd
, struct msqid_ds __user
*, buf
)
591 struct ipc_namespace
*ns
;
593 if (msqid
< 0 || cmd
< 0)
596 version
= ipc_parse_version(&cmd
);
597 ns
= current
->nsproxy
->ipc_ns
;
602 case MSG_STAT
: /* msqid is an index rather than a msg queue id */
604 return msgctl_nolock(ns
, msqid
, cmd
, version
, buf
);
607 return msgctl_down(ns
, msqid
, cmd
, buf
, version
);
613 static int testmsg(struct msg_msg
*msg
, long type
, int mode
)
620 case SEARCH_LESSEQUAL
:
621 if (msg
->m_type
<= type
)
625 if (msg
->m_type
== type
)
628 case SEARCH_NOTEQUAL
:
629 if (msg
->m_type
!= type
)
636 static inline int pipelined_send(struct msg_queue
*msq
, struct msg_msg
*msg
)
638 struct msg_receiver
*msr
, *t
;
640 list_for_each_entry_safe(msr
, t
, &msq
->q_receivers
, r_list
) {
641 if (testmsg(msg
, msr
->r_msgtype
, msr
->r_mode
) &&
642 !security_msg_queue_msgrcv(msq
, msg
, msr
->r_tsk
,
643 msr
->r_msgtype
, msr
->r_mode
)) {
645 list_del(&msr
->r_list
);
646 if (msr
->r_maxsize
< msg
->m_ts
) {
647 /* initialize pipelined send ordering */
649 wake_up_process(msr
->r_tsk
);
650 smp_mb(); /* see barrier comment below */
651 msr
->r_msg
= ERR_PTR(-E2BIG
);
654 msq
->q_lrpid
= task_pid_vnr(msr
->r_tsk
);
655 msq
->q_rtime
= get_seconds();
656 wake_up_process(msr
->r_tsk
);
658 * Ensure that the wakeup is visible before
659 * setting r_msg, as the receiving end depends
660 * on it. See lockless receive part 1 and 2 in
674 long do_msgsnd(int msqid
, long mtype
, void __user
*mtext
,
675 size_t msgsz
, int msgflg
)
677 struct msg_queue
*msq
;
680 struct ipc_namespace
*ns
;
682 ns
= current
->nsproxy
->ipc_ns
;
684 if (msgsz
> ns
->msg_ctlmax
|| (long) msgsz
< 0 || msqid
< 0)
689 msg
= load_msg(mtext
, msgsz
);
697 msq
= msq_obtain_object_check(ns
, msqid
);
703 ipc_lock_object(&msq
->q_perm
);
709 if (ipcperms(ns
, &msq
->q_perm
, S_IWUGO
))
712 /* raced with RMID? */
713 if (!ipc_valid_object(&msq
->q_perm
)) {
718 err
= security_msg_queue_msgsnd(msq
, msg
, msgflg
);
722 if (msgsz
+ msq
->q_cbytes
<= msq
->q_qbytes
&&
723 1 + msq
->q_qnum
<= msq
->q_qbytes
) {
727 /* queue full, wait: */
728 if (msgflg
& IPC_NOWAIT
) {
733 /* enqueue the sender and prepare to block */
736 if (!ipc_rcu_getref(msq
)) {
741 ipc_unlock_object(&msq
->q_perm
);
746 ipc_lock_object(&msq
->q_perm
);
748 ipc_rcu_putref(msq
, ipc_rcu_free
);
749 /* raced with RMID? */
750 if (!ipc_valid_object(&msq
->q_perm
)) {
757 if (signal_pending(current
)) {
758 err
= -ERESTARTNOHAND
;
763 msq
->q_lspid
= task_tgid_vnr(current
);
764 msq
->q_stime
= get_seconds();
766 if (!pipelined_send(msq
, msg
)) {
767 /* no one is waiting for this message, enqueue it */
768 list_add_tail(&msg
->m_list
, &msq
->q_messages
);
769 msq
->q_cbytes
+= msgsz
;
771 atomic_add(msgsz
, &ns
->msg_bytes
);
772 atomic_inc(&ns
->msg_hdrs
);
779 ipc_unlock_object(&msq
->q_perm
);
787 SYSCALL_DEFINE4(msgsnd
, int, msqid
, struct msgbuf __user
*, msgp
, size_t, msgsz
,
792 if (get_user(mtype
, &msgp
->mtype
))
794 return do_msgsnd(msqid
, mtype
, msgp
->mtext
, msgsz
, msgflg
);
797 static inline int convert_mode(long *msgtyp
, int msgflg
)
799 if (msgflg
& MSG_COPY
)
800 return SEARCH_NUMBER
;
802 * find message of correct type.
803 * msgtyp = 0 => get first.
804 * msgtyp > 0 => get first message of matching type.
805 * msgtyp < 0 => get message with least type must be < abs(msgtype).
811 return SEARCH_LESSEQUAL
;
813 if (msgflg
& MSG_EXCEPT
)
814 return SEARCH_NOTEQUAL
;
818 static long do_msg_fill(void __user
*dest
, struct msg_msg
*msg
, size_t bufsz
)
820 struct msgbuf __user
*msgp
= dest
;
823 if (put_user(msg
->m_type
, &msgp
->mtype
))
826 msgsz
= (bufsz
> msg
->m_ts
) ? msg
->m_ts
: bufsz
;
827 if (store_msg(msgp
->mtext
, msg
, msgsz
))
832 #ifdef CONFIG_CHECKPOINT_RESTORE
834 * This function creates new kernel message structure, large enough to store
835 * bufsz message bytes.
837 static inline struct msg_msg
*prepare_copy(void __user
*buf
, size_t bufsz
)
839 struct msg_msg
*copy
;
842 * Create dummy message to copy real message to.
844 copy
= load_msg(buf
, bufsz
);
850 static inline void free_copy(struct msg_msg
*copy
)
856 static inline struct msg_msg
*prepare_copy(void __user
*buf
, size_t bufsz
)
858 return ERR_PTR(-ENOSYS
);
861 static inline void free_copy(struct msg_msg
*copy
)
866 static struct msg_msg
*find_msg(struct msg_queue
*msq
, long *msgtyp
, int mode
)
868 struct msg_msg
*msg
, *found
= NULL
;
871 list_for_each_entry(msg
, &msq
->q_messages
, m_list
) {
872 if (testmsg(msg
, *msgtyp
, mode
) &&
873 !security_msg_queue_msgrcv(msq
, msg
, current
,
875 if (mode
== SEARCH_LESSEQUAL
&& msg
->m_type
!= 1) {
876 *msgtyp
= msg
->m_type
- 1;
878 } else if (mode
== SEARCH_NUMBER
) {
879 if (*msgtyp
== count
)
887 return found
?: ERR_PTR(-EAGAIN
);
890 long do_msgrcv(int msqid
, void __user
*buf
, size_t bufsz
, long msgtyp
, int msgflg
,
891 long (*msg_handler
)(void __user
*, struct msg_msg
*, size_t))
894 struct msg_queue
*msq
;
895 struct ipc_namespace
*ns
;
896 struct msg_msg
*msg
, *copy
= NULL
;
898 ns
= current
->nsproxy
->ipc_ns
;
900 if (msqid
< 0 || (long) bufsz
< 0)
903 if (msgflg
& MSG_COPY
) {
904 if ((msgflg
& MSG_EXCEPT
) || !(msgflg
& IPC_NOWAIT
))
906 copy
= prepare_copy(buf
, min_t(size_t, bufsz
, ns
->msg_ctlmax
));
908 return PTR_ERR(copy
);
910 mode
= convert_mode(&msgtyp
, msgflg
);
913 msq
= msq_obtain_object_check(ns
, msqid
);
921 struct msg_receiver msr_d
;
923 msg
= ERR_PTR(-EACCES
);
924 if (ipcperms(ns
, &msq
->q_perm
, S_IRUGO
))
927 ipc_lock_object(&msq
->q_perm
);
929 /* raced with RMID? */
930 if (!ipc_valid_object(&msq
->q_perm
)) {
931 msg
= ERR_PTR(-EIDRM
);
935 msg
= find_msg(msq
, &msgtyp
, mode
);
938 * Found a suitable message.
939 * Unlink it from the queue.
941 if ((bufsz
< msg
->m_ts
) && !(msgflg
& MSG_NOERROR
)) {
942 msg
= ERR_PTR(-E2BIG
);
946 * If we are copying, then do not unlink message and do
947 * not update queue parameters.
949 if (msgflg
& MSG_COPY
) {
950 msg
= copy_msg(msg
, copy
);
954 list_del(&msg
->m_list
);
956 msq
->q_rtime
= get_seconds();
957 msq
->q_lrpid
= task_tgid_vnr(current
);
958 msq
->q_cbytes
-= msg
->m_ts
;
959 atomic_sub(msg
->m_ts
, &ns
->msg_bytes
);
960 atomic_dec(&ns
->msg_hdrs
);
961 ss_wakeup(&msq
->q_senders
, 0);
966 /* No message waiting. Wait for a message */
967 if (msgflg
& IPC_NOWAIT
) {
968 msg
= ERR_PTR(-ENOMSG
);
972 list_add_tail(&msr_d
.r_list
, &msq
->q_receivers
);
973 msr_d
.r_tsk
= current
;
974 msr_d
.r_msgtype
= msgtyp
;
976 if (msgflg
& MSG_NOERROR
)
977 msr_d
.r_maxsize
= INT_MAX
;
979 msr_d
.r_maxsize
= bufsz
;
980 msr_d
.r_msg
= ERR_PTR(-EAGAIN
);
981 current
->state
= TASK_INTERRUPTIBLE
;
983 ipc_unlock_object(&msq
->q_perm
);
987 /* Lockless receive, part 1:
988 * Disable preemption. We don't hold a reference to the queue
989 * and getting a reference would defeat the idea of a lockless
990 * operation, thus the code relies on rcu to guarantee the
992 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
993 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
994 * rcu_read_lock() prevents preemption between reading r_msg
995 * and acquiring the q_perm.lock in ipc_lock_object().
999 /* Lockless receive, part 2:
1000 * Wait until pipelined_send or expunge_all are outside of
1001 * wake_up_process(). There is a race with exit(), see
1002 * ipc/mqueue.c for the details.
1004 msg
= (struct msg_msg
*)msr_d
.r_msg
;
1005 while (msg
== NULL
) {
1007 msg
= (struct msg_msg
*)msr_d
.r_msg
;
1010 /* Lockless receive, part 3:
1011 * If there is a message or an error then accept it without
1014 if (msg
!= ERR_PTR(-EAGAIN
))
1017 /* Lockless receive, part 3:
1018 * Acquire the queue spinlock.
1020 ipc_lock_object(&msq
->q_perm
);
1022 /* Lockless receive, part 4:
1023 * Repeat test after acquiring the spinlock.
1025 msg
= (struct msg_msg
*)msr_d
.r_msg
;
1026 if (msg
!= ERR_PTR(-EAGAIN
))
1029 list_del(&msr_d
.r_list
);
1030 if (signal_pending(current
)) {
1031 msg
= ERR_PTR(-ERESTARTNOHAND
);
1035 ipc_unlock_object(&msq
->q_perm
);
1039 ipc_unlock_object(&msq
->q_perm
);
1044 return PTR_ERR(msg
);
1047 bufsz
= msg_handler(buf
, msg
, bufsz
);
1053 SYSCALL_DEFINE5(msgrcv
, int, msqid
, struct msgbuf __user
*, msgp
, size_t, msgsz
,
1054 long, msgtyp
, int, msgflg
)
1056 return do_msgrcv(msqid
, msgp
, msgsz
, msgtyp
, msgflg
, do_msg_fill
);
1059 #ifdef CONFIG_PROC_FS
1060 static int sysvipc_msg_proc_show(struct seq_file
*s
, void *it
)
1062 struct user_namespace
*user_ns
= seq_user_ns(s
);
1063 struct msg_queue
*msq
= it
;
1065 return seq_printf(s
,
1066 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
1074 from_kuid_munged(user_ns
, msq
->q_perm
.uid
),
1075 from_kgid_munged(user_ns
, msq
->q_perm
.gid
),
1076 from_kuid_munged(user_ns
, msq
->q_perm
.cuid
),
1077 from_kgid_munged(user_ns
, msq
->q_perm
.cgid
),