2 * POSIX message queues filesystem for Linux.
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (michal.wronski@gmail.com)
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
13 * This file is released under the GPL.
16 #include <linux/capability.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 #include <linux/file.h>
20 #include <linux/mount.h>
21 #include <linux/namei.h>
22 #include <linux/sysctl.h>
23 #include <linux/poll.h>
24 #include <linux/mqueue.h>
25 #include <linux/msg.h>
26 #include <linux/skbuff.h>
27 #include <linux/vmalloc.h>
28 #include <linux/netlink.h>
29 #include <linux/syscalls.h>
30 #include <linux/audit.h>
31 #include <linux/signal.h>
32 #include <linux/mutex.h>
33 #include <linux/nsproxy.h>
34 #include <linux/pid.h>
35 #include <linux/ipc_namespace.h>
36 #include <linux/user_namespace.h>
37 #include <linux/slab.h>
38 #include <linux/sched/wake_q.h>
39 #include <linux/sched/signal.h>
40 #include <linux/sched/user.h>
45 #define MQUEUE_MAGIC 0x19800202
46 #define DIRENT_SIZE 20
47 #define FILENT_SIZE 80
55 struct posix_msg_tree_node
{
56 struct rb_node rb_node
;
57 struct list_head msg_list
;
61 struct ext_wait_queue
{ /* queue of sleeping tasks */
62 struct task_struct
*task
;
63 struct list_head list
;
64 struct msg_msg
*msg
; /* ptr of loaded message */
65 int state
; /* one of STATE_* values */
68 struct mqueue_inode_info
{
70 struct inode vfs_inode
;
71 wait_queue_head_t wait_q
;
73 struct rb_root msg_tree
;
74 struct posix_msg_tree_node
*node_cache
;
77 struct sigevent notify
;
78 struct pid
*notify_owner
;
79 struct user_namespace
*notify_user_ns
;
80 struct user_struct
*user
; /* user who created, for accounting */
81 struct sock
*notify_sock
;
82 struct sk_buff
*notify_cookie
;
84 /* for tasks waiting for free space and messages, respectively */
85 struct ext_wait_queue e_wait_q
[2];
87 unsigned long qsize
; /* size of queue in memory (sum of all msgs) */
90 static const struct inode_operations mqueue_dir_inode_operations
;
91 static const struct file_operations mqueue_file_operations
;
92 static const struct super_operations mqueue_super_ops
;
93 static void remove_notification(struct mqueue_inode_info
*info
);
95 static struct kmem_cache
*mqueue_inode_cachep
;
97 static struct ctl_table_header
*mq_sysctl_table
;
99 static inline struct mqueue_inode_info
*MQUEUE_I(struct inode
*inode
)
101 return container_of(inode
, struct mqueue_inode_info
, vfs_inode
);
105 * This routine should be called with the mq_lock held.
107 static inline struct ipc_namespace
*__get_ns_from_inode(struct inode
*inode
)
109 return get_ipc_ns(inode
->i_sb
->s_fs_info
);
112 static struct ipc_namespace
*get_ns_from_inode(struct inode
*inode
)
114 struct ipc_namespace
*ns
;
117 ns
= __get_ns_from_inode(inode
);
118 spin_unlock(&mq_lock
);
122 /* Auxiliary functions to manipulate messages' list */
123 static int msg_insert(struct msg_msg
*msg
, struct mqueue_inode_info
*info
)
125 struct rb_node
**p
, *parent
= NULL
;
126 struct posix_msg_tree_node
*leaf
;
128 p
= &info
->msg_tree
.rb_node
;
131 leaf
= rb_entry(parent
, struct posix_msg_tree_node
, rb_node
);
133 if (likely(leaf
->priority
== msg
->m_type
))
135 else if (msg
->m_type
< leaf
->priority
)
140 if (info
->node_cache
) {
141 leaf
= info
->node_cache
;
142 info
->node_cache
= NULL
;
144 leaf
= kmalloc(sizeof(*leaf
), GFP_ATOMIC
);
147 INIT_LIST_HEAD(&leaf
->msg_list
);
149 leaf
->priority
= msg
->m_type
;
150 rb_link_node(&leaf
->rb_node
, parent
, p
);
151 rb_insert_color(&leaf
->rb_node
, &info
->msg_tree
);
153 info
->attr
.mq_curmsgs
++;
154 info
->qsize
+= msg
->m_ts
;
155 list_add_tail(&msg
->m_list
, &leaf
->msg_list
);
159 static inline struct msg_msg
*msg_get(struct mqueue_inode_info
*info
)
161 struct rb_node
**p
, *parent
= NULL
;
162 struct posix_msg_tree_node
*leaf
;
166 p
= &info
->msg_tree
.rb_node
;
170 * During insert, low priorities go to the left and high to the
171 * right. On receive, we want the highest priorities first, so
172 * walk all the way to the right.
177 if (info
->attr
.mq_curmsgs
) {
178 pr_warn_once("Inconsistency in POSIX message queue, "
179 "no tree element, but supposedly messages "
181 info
->attr
.mq_curmsgs
= 0;
185 leaf
= rb_entry(parent
, struct posix_msg_tree_node
, rb_node
);
186 if (unlikely(list_empty(&leaf
->msg_list
))) {
187 pr_warn_once("Inconsistency in POSIX message queue, "
188 "empty leaf node but we haven't implemented "
189 "lazy leaf delete!\n");
190 rb_erase(&leaf
->rb_node
, &info
->msg_tree
);
191 if (info
->node_cache
) {
194 info
->node_cache
= leaf
;
198 msg
= list_first_entry(&leaf
->msg_list
,
199 struct msg_msg
, m_list
);
200 list_del(&msg
->m_list
);
201 if (list_empty(&leaf
->msg_list
)) {
202 rb_erase(&leaf
->rb_node
, &info
->msg_tree
);
203 if (info
->node_cache
) {
206 info
->node_cache
= leaf
;
210 info
->attr
.mq_curmsgs
--;
211 info
->qsize
-= msg
->m_ts
;
215 static struct inode
*mqueue_get_inode(struct super_block
*sb
,
216 struct ipc_namespace
*ipc_ns
, umode_t mode
,
217 struct mq_attr
*attr
)
219 struct user_struct
*u
= current_user();
223 inode
= new_inode(sb
);
227 inode
->i_ino
= get_next_ino();
228 inode
->i_mode
= mode
;
229 inode
->i_uid
= current_fsuid();
230 inode
->i_gid
= current_fsgid();
231 inode
->i_mtime
= inode
->i_ctime
= inode
->i_atime
= current_time(inode
);
234 struct mqueue_inode_info
*info
;
235 unsigned long mq_bytes
, mq_treesize
;
237 inode
->i_fop
= &mqueue_file_operations
;
238 inode
->i_size
= FILENT_SIZE
;
239 /* mqueue specific info */
240 info
= MQUEUE_I(inode
);
241 spin_lock_init(&info
->lock
);
242 init_waitqueue_head(&info
->wait_q
);
243 INIT_LIST_HEAD(&info
->e_wait_q
[0].list
);
244 INIT_LIST_HEAD(&info
->e_wait_q
[1].list
);
245 info
->notify_owner
= NULL
;
246 info
->notify_user_ns
= NULL
;
248 info
->user
= NULL
; /* set when all is ok */
249 info
->msg_tree
= RB_ROOT
;
250 info
->node_cache
= NULL
;
251 memset(&info
->attr
, 0, sizeof(info
->attr
));
252 info
->attr
.mq_maxmsg
= min(ipc_ns
->mq_msg_max
,
253 ipc_ns
->mq_msg_default
);
254 info
->attr
.mq_msgsize
= min(ipc_ns
->mq_msgsize_max
,
255 ipc_ns
->mq_msgsize_default
);
257 info
->attr
.mq_maxmsg
= attr
->mq_maxmsg
;
258 info
->attr
.mq_msgsize
= attr
->mq_msgsize
;
261 * We used to allocate a static array of pointers and account
262 * the size of that array as well as one msg_msg struct per
263 * possible message into the queue size. That's no longer
264 * accurate as the queue is now an rbtree and will grow and
265 * shrink depending on usage patterns. We can, however, still
266 * account one msg_msg struct per message, but the nodes are
267 * allocated depending on priority usage, and most programs
268 * only use one, or a handful, of priorities. However, since
269 * this is pinned memory, we need to assume worst case, so
270 * that means the min(mq_maxmsg, max_priorities) * struct
271 * posix_msg_tree_node.
275 if (info
->attr
.mq_maxmsg
<= 0 || info
->attr
.mq_msgsize
<= 0)
277 if (capable(CAP_SYS_RESOURCE
)) {
278 if (info
->attr
.mq_maxmsg
> HARD_MSGMAX
||
279 info
->attr
.mq_msgsize
> HARD_MSGSIZEMAX
)
282 if (info
->attr
.mq_maxmsg
> ipc_ns
->mq_msg_max
||
283 info
->attr
.mq_msgsize
> ipc_ns
->mq_msgsize_max
)
287 /* check for overflow */
288 if (info
->attr
.mq_msgsize
> ULONG_MAX
/info
->attr
.mq_maxmsg
)
290 mq_treesize
= info
->attr
.mq_maxmsg
* sizeof(struct msg_msg
) +
291 min_t(unsigned int, info
->attr
.mq_maxmsg
, MQ_PRIO_MAX
) *
292 sizeof(struct posix_msg_tree_node
);
293 mq_bytes
= info
->attr
.mq_maxmsg
* info
->attr
.mq_msgsize
;
294 if (mq_bytes
+ mq_treesize
< mq_bytes
)
296 mq_bytes
+= mq_treesize
;
298 if (u
->mq_bytes
+ mq_bytes
< u
->mq_bytes
||
299 u
->mq_bytes
+ mq_bytes
> rlimit(RLIMIT_MSGQUEUE
)) {
300 spin_unlock(&mq_lock
);
301 /* mqueue_evict_inode() releases info->messages */
305 u
->mq_bytes
+= mq_bytes
;
306 spin_unlock(&mq_lock
);
309 info
->user
= get_uid(u
);
310 } else if (S_ISDIR(mode
)) {
312 /* Some things misbehave if size == 0 on a directory */
313 inode
->i_size
= 2 * DIRENT_SIZE
;
314 inode
->i_op
= &mqueue_dir_inode_operations
;
315 inode
->i_fop
= &simple_dir_operations
;
325 static int mqueue_fill_super(struct super_block
*sb
, void *data
, int silent
)
328 struct ipc_namespace
*ns
= data
;
331 sb
->s_iflags
|= SB_I_NOEXEC
| SB_I_NODEV
;
332 sb
->s_blocksize
= PAGE_SIZE
;
333 sb
->s_blocksize_bits
= PAGE_SHIFT
;
334 sb
->s_magic
= MQUEUE_MAGIC
;
335 sb
->s_op
= &mqueue_super_ops
;
337 inode
= mqueue_get_inode(sb
, ns
, S_IFDIR
| S_ISVTX
| S_IRWXUGO
, NULL
);
339 return PTR_ERR(inode
);
341 sb
->s_root
= d_make_root(inode
);
347 static struct file_system_type mqueue_fs_type
;
349 * Return value is pinned only by reference in ->mq_mnt; it will
350 * live until ipcns dies. Caller does not need to drop it.
352 static struct vfsmount
*mq_internal_mount(void)
354 struct ipc_namespace
*ns
= current
->nsproxy
->ipc_ns
;
355 struct vfsmount
*m
= ns
->mq_mnt
;
358 m
= kern_mount_data(&mqueue_fs_type
, ns
);
360 if (unlikely(ns
->mq_mnt
)) {
361 spin_unlock(&mq_lock
);
368 spin_unlock(&mq_lock
);
372 static struct dentry
*mqueue_mount(struct file_system_type
*fs_type
,
373 int flags
, const char *dev_name
,
377 if (flags
& SB_KERNMOUNT
)
378 return mount_nodev(fs_type
, flags
, data
, mqueue_fill_super
);
379 m
= mq_internal_mount();
382 atomic_inc(&m
->mnt_sb
->s_active
);
383 down_write(&m
->mnt_sb
->s_umount
);
384 return dget(m
->mnt_root
);
387 static void init_once(void *foo
)
389 struct mqueue_inode_info
*p
= (struct mqueue_inode_info
*) foo
;
391 inode_init_once(&p
->vfs_inode
);
394 static struct inode
*mqueue_alloc_inode(struct super_block
*sb
)
396 struct mqueue_inode_info
*ei
;
398 ei
= kmem_cache_alloc(mqueue_inode_cachep
, GFP_KERNEL
);
401 return &ei
->vfs_inode
;
404 static void mqueue_i_callback(struct rcu_head
*head
)
406 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
407 kmem_cache_free(mqueue_inode_cachep
, MQUEUE_I(inode
));
410 static void mqueue_destroy_inode(struct inode
*inode
)
412 call_rcu(&inode
->i_rcu
, mqueue_i_callback
);
415 static void mqueue_evict_inode(struct inode
*inode
)
417 struct mqueue_inode_info
*info
;
418 struct user_struct
*user
;
419 unsigned long mq_bytes
, mq_treesize
;
420 struct ipc_namespace
*ipc_ns
;
425 if (S_ISDIR(inode
->i_mode
))
428 ipc_ns
= get_ns_from_inode(inode
);
429 info
= MQUEUE_I(inode
);
430 spin_lock(&info
->lock
);
431 while ((msg
= msg_get(info
)) != NULL
)
433 kfree(info
->node_cache
);
434 spin_unlock(&info
->lock
);
436 /* Total amount of bytes accounted for the mqueue */
437 mq_treesize
= info
->attr
.mq_maxmsg
* sizeof(struct msg_msg
) +
438 min_t(unsigned int, info
->attr
.mq_maxmsg
, MQ_PRIO_MAX
) *
439 sizeof(struct posix_msg_tree_node
);
441 mq_bytes
= mq_treesize
+ (info
->attr
.mq_maxmsg
*
442 info
->attr
.mq_msgsize
);
447 user
->mq_bytes
-= mq_bytes
;
449 * get_ns_from_inode() ensures that the
450 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
451 * to which we now hold a reference, or it is NULL.
452 * We can't put it here under mq_lock, though.
455 ipc_ns
->mq_queues_count
--;
456 spin_unlock(&mq_lock
);
463 static int mqueue_create_attr(struct dentry
*dentry
, umode_t mode
, void *arg
)
465 struct inode
*dir
= dentry
->d_parent
->d_inode
;
467 struct mq_attr
*attr
= arg
;
469 struct ipc_namespace
*ipc_ns
;
472 ipc_ns
= __get_ns_from_inode(dir
);
478 if (ipc_ns
->mq_queues_count
>= ipc_ns
->mq_queues_max
&&
479 !capable(CAP_SYS_RESOURCE
)) {
483 ipc_ns
->mq_queues_count
++;
484 spin_unlock(&mq_lock
);
486 inode
= mqueue_get_inode(dir
->i_sb
, ipc_ns
, mode
, attr
);
488 error
= PTR_ERR(inode
);
490 ipc_ns
->mq_queues_count
--;
495 dir
->i_size
+= DIRENT_SIZE
;
496 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= current_time(dir
);
498 d_instantiate(dentry
, inode
);
502 spin_unlock(&mq_lock
);
508 static int mqueue_create(struct inode
*dir
, struct dentry
*dentry
,
509 umode_t mode
, bool excl
)
511 return mqueue_create_attr(dentry
, mode
, NULL
);
514 static int mqueue_unlink(struct inode
*dir
, struct dentry
*dentry
)
516 struct inode
*inode
= d_inode(dentry
);
518 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= current_time(dir
);
519 dir
->i_size
-= DIRENT_SIZE
;
526 * This is routine for system read from queue file.
527 * To avoid mess with doing here some sort of mq_receive we allow
528 * to read only queue size & notification info (the only values
529 * that are interesting from user point of view and aren't accessible
530 * through std routines)
532 static ssize_t
mqueue_read_file(struct file
*filp
, char __user
*u_data
,
533 size_t count
, loff_t
*off
)
535 struct mqueue_inode_info
*info
= MQUEUE_I(file_inode(filp
));
536 char buffer
[FILENT_SIZE
];
539 spin_lock(&info
->lock
);
540 snprintf(buffer
, sizeof(buffer
),
541 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
543 info
->notify_owner
? info
->notify
.sigev_notify
: 0,
544 (info
->notify_owner
&&
545 info
->notify
.sigev_notify
== SIGEV_SIGNAL
) ?
546 info
->notify
.sigev_signo
: 0,
547 pid_vnr(info
->notify_owner
));
548 spin_unlock(&info
->lock
);
549 buffer
[sizeof(buffer
)-1] = '\0';
551 ret
= simple_read_from_buffer(u_data
, count
, off
, buffer
,
556 file_inode(filp
)->i_atime
= file_inode(filp
)->i_ctime
= current_time(file_inode(filp
));
560 static int mqueue_flush_file(struct file
*filp
, fl_owner_t id
)
562 struct mqueue_inode_info
*info
= MQUEUE_I(file_inode(filp
));
564 spin_lock(&info
->lock
);
565 if (task_tgid(current
) == info
->notify_owner
)
566 remove_notification(info
);
568 spin_unlock(&info
->lock
);
572 static __poll_t
mqueue_poll_file(struct file
*filp
, struct poll_table_struct
*poll_tab
)
574 struct mqueue_inode_info
*info
= MQUEUE_I(file_inode(filp
));
577 poll_wait(filp
, &info
->wait_q
, poll_tab
);
579 spin_lock(&info
->lock
);
580 if (info
->attr
.mq_curmsgs
)
581 retval
= EPOLLIN
| EPOLLRDNORM
;
583 if (info
->attr
.mq_curmsgs
< info
->attr
.mq_maxmsg
)
584 retval
|= EPOLLOUT
| EPOLLWRNORM
;
585 spin_unlock(&info
->lock
);
590 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
591 static void wq_add(struct mqueue_inode_info
*info
, int sr
,
592 struct ext_wait_queue
*ewp
)
594 struct ext_wait_queue
*walk
;
598 list_for_each_entry(walk
, &info
->e_wait_q
[sr
].list
, list
) {
599 if (walk
->task
->prio
<= current
->prio
) {
600 list_add_tail(&ewp
->list
, &walk
->list
);
604 list_add_tail(&ewp
->list
, &info
->e_wait_q
[sr
].list
);
608 * Puts current task to sleep. Caller must hold queue lock. After return
612 static int wq_sleep(struct mqueue_inode_info
*info
, int sr
,
613 ktime_t
*timeout
, struct ext_wait_queue
*ewp
)
614 __releases(&info
->lock
)
619 wq_add(info
, sr
, ewp
);
622 __set_current_state(TASK_INTERRUPTIBLE
);
624 spin_unlock(&info
->lock
);
625 time
= schedule_hrtimeout_range_clock(timeout
, 0,
626 HRTIMER_MODE_ABS
, CLOCK_REALTIME
);
628 if (ewp
->state
== STATE_READY
) {
632 spin_lock(&info
->lock
);
633 if (ewp
->state
== STATE_READY
) {
637 if (signal_pending(current
)) {
638 retval
= -ERESTARTSYS
;
646 list_del(&ewp
->list
);
648 spin_unlock(&info
->lock
);
654 * Returns waiting task that should be serviced first or NULL if none exists
656 static struct ext_wait_queue
*wq_get_first_waiter(
657 struct mqueue_inode_info
*info
, int sr
)
659 struct list_head
*ptr
;
661 ptr
= info
->e_wait_q
[sr
].list
.prev
;
662 if (ptr
== &info
->e_wait_q
[sr
].list
)
664 return list_entry(ptr
, struct ext_wait_queue
, list
);
668 static inline void set_cookie(struct sk_buff
*skb
, char code
)
670 ((char *)skb
->data
)[NOTIFY_COOKIE_LEN
-1] = code
;
674 * The next function is only to split too long sys_mq_timedsend
676 static void __do_notify(struct mqueue_inode_info
*info
)
679 * invoked when there is registered process and there isn't process
680 * waiting synchronously for message AND state of queue changed from
681 * empty to not empty. Here we are sure that no one is waiting
683 if (info
->notify_owner
&&
684 info
->attr
.mq_curmsgs
== 1) {
685 struct siginfo sig_i
;
686 switch (info
->notify
.sigev_notify
) {
692 clear_siginfo(&sig_i
);
693 sig_i
.si_signo
= info
->notify
.sigev_signo
;
695 sig_i
.si_code
= SI_MESGQ
;
696 sig_i
.si_value
= info
->notify
.sigev_value
;
697 /* map current pid/uid into info->owner's namespaces */
699 sig_i
.si_pid
= task_tgid_nr_ns(current
,
700 ns_of_pid(info
->notify_owner
));
701 sig_i
.si_uid
= from_kuid_munged(info
->notify_user_ns
, current_uid());
704 kill_pid_info(info
->notify
.sigev_signo
,
705 &sig_i
, info
->notify_owner
);
708 set_cookie(info
->notify_cookie
, NOTIFY_WOKENUP
);
709 netlink_sendskb(info
->notify_sock
, info
->notify_cookie
);
712 /* after notification unregisters process */
713 put_pid(info
->notify_owner
);
714 put_user_ns(info
->notify_user_ns
);
715 info
->notify_owner
= NULL
;
716 info
->notify_user_ns
= NULL
;
718 wake_up(&info
->wait_q
);
721 static int prepare_timeout(const struct timespec __user
*u_abs_timeout
,
722 struct timespec64
*ts
)
724 if (get_timespec64(ts
, u_abs_timeout
))
726 if (!timespec64_valid(ts
))
731 static void remove_notification(struct mqueue_inode_info
*info
)
733 if (info
->notify_owner
!= NULL
&&
734 info
->notify
.sigev_notify
== SIGEV_THREAD
) {
735 set_cookie(info
->notify_cookie
, NOTIFY_REMOVED
);
736 netlink_sendskb(info
->notify_sock
, info
->notify_cookie
);
738 put_pid(info
->notify_owner
);
739 put_user_ns(info
->notify_user_ns
);
740 info
->notify_owner
= NULL
;
741 info
->notify_user_ns
= NULL
;
744 static int prepare_open(struct dentry
*dentry
, int oflag
, int ro
,
745 umode_t mode
, struct filename
*name
,
746 struct mq_attr
*attr
)
748 static const int oflag2acc
[O_ACCMODE
] = { MAY_READ
, MAY_WRITE
,
749 MAY_READ
| MAY_WRITE
};
752 if (d_really_is_negative(dentry
)) {
753 if (!(oflag
& O_CREAT
))
757 audit_inode_parent_hidden(name
, dentry
->d_parent
);
758 return vfs_mkobj(dentry
, mode
& ~current_umask(),
759 mqueue_create_attr
, attr
);
761 /* it already existed */
762 audit_inode(name
, dentry
, 0);
763 if ((oflag
& (O_CREAT
|O_EXCL
)) == (O_CREAT
|O_EXCL
))
765 if ((oflag
& O_ACCMODE
) == (O_RDWR
| O_WRONLY
))
767 acc
= oflag2acc
[oflag
& O_ACCMODE
];
768 return inode_permission(d_inode(dentry
), acc
);
771 static int do_mq_open(const char __user
*u_name
, int oflag
, umode_t mode
,
772 struct mq_attr
*attr
)
774 struct vfsmount
*mnt
= mq_internal_mount();
776 struct filename
*name
;
784 audit_mq_open(oflag
, mode
, attr
);
786 if (IS_ERR(name
= getname(u_name
)))
787 return PTR_ERR(name
);
789 fd
= get_unused_fd_flags(O_CLOEXEC
);
793 ro
= mnt_want_write(mnt
); /* we'll drop it in any case */
794 root
= mnt
->mnt_root
;
795 inode_lock(d_inode(root
));
796 path
.dentry
= lookup_one_len(name
->name
, root
, strlen(name
->name
));
797 if (IS_ERR(path
.dentry
)) {
798 error
= PTR_ERR(path
.dentry
);
801 path
.mnt
= mntget(mnt
);
802 error
= prepare_open(path
.dentry
, oflag
, ro
, mode
, name
, attr
);
804 struct file
*file
= dentry_open(&path
, oflag
, current_cred());
806 fd_install(fd
, file
);
808 error
= PTR_ERR(file
);
816 inode_unlock(d_inode(root
));
824 SYSCALL_DEFINE4(mq_open
, const char __user
*, u_name
, int, oflag
, umode_t
, mode
,
825 struct mq_attr __user
*, u_attr
)
828 if (u_attr
&& copy_from_user(&attr
, u_attr
, sizeof(struct mq_attr
)))
831 return do_mq_open(u_name
, oflag
, mode
, u_attr
? &attr
: NULL
);
834 SYSCALL_DEFINE1(mq_unlink
, const char __user
*, u_name
)
837 struct filename
*name
;
838 struct dentry
*dentry
;
839 struct inode
*inode
= NULL
;
840 struct ipc_namespace
*ipc_ns
= current
->nsproxy
->ipc_ns
;
841 struct vfsmount
*mnt
= ipc_ns
->mq_mnt
;
846 name
= getname(u_name
);
848 return PTR_ERR(name
);
850 audit_inode_parent_hidden(name
, mnt
->mnt_root
);
851 err
= mnt_want_write(mnt
);
854 inode_lock_nested(d_inode(mnt
->mnt_root
), I_MUTEX_PARENT
);
855 dentry
= lookup_one_len(name
->name
, mnt
->mnt_root
,
857 if (IS_ERR(dentry
)) {
858 err
= PTR_ERR(dentry
);
862 inode
= d_inode(dentry
);
867 err
= vfs_unlink(d_inode(dentry
->d_parent
), dentry
, NULL
);
872 inode_unlock(d_inode(mnt
->mnt_root
));
882 /* Pipelined send and receive functions.
884 * If a receiver finds no waiting message, then it registers itself in the
885 * list of waiting receivers. A sender checks that list before adding the new
886 * message into the message array. If there is a waiting receiver, then it
887 * bypasses the message array and directly hands the message over to the
888 * receiver. The receiver accepts the message and returns without grabbing the
891 * - Set pointer to message.
892 * - Queue the receiver task for later wakeup (without the info->lock).
893 * - Update its state to STATE_READY. Now the receiver can continue.
894 * - Wake up the process after the lock is dropped. Should the process wake up
895 * before this wakeup (due to a timeout or a signal) it will either see
896 * STATE_READY and continue or acquire the lock to check the state again.
898 * The same algorithm is used for senders.
901 /* pipelined_send() - send a message directly to the task waiting in
902 * sys_mq_timedreceive() (without inserting message into a queue).
904 static inline void pipelined_send(struct wake_q_head
*wake_q
,
905 struct mqueue_inode_info
*info
,
906 struct msg_msg
*message
,
907 struct ext_wait_queue
*receiver
)
909 receiver
->msg
= message
;
910 list_del(&receiver
->list
);
911 wake_q_add(wake_q
, receiver
->task
);
913 * Rely on the implicit cmpxchg barrier from wake_q_add such
914 * that we can ensure that updating receiver->state is the last
915 * write operation: As once set, the receiver can continue,
916 * and if we don't have the reference count from the wake_q,
917 * yet, at that point we can later have a use-after-free
918 * condition and bogus wakeup.
920 receiver
->state
= STATE_READY
;
923 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
924 * gets its message and put to the queue (we have one free place for sure). */
925 static inline void pipelined_receive(struct wake_q_head
*wake_q
,
926 struct mqueue_inode_info
*info
)
928 struct ext_wait_queue
*sender
= wq_get_first_waiter(info
, SEND
);
932 wake_up_interruptible(&info
->wait_q
);
935 if (msg_insert(sender
->msg
, info
))
938 list_del(&sender
->list
);
939 wake_q_add(wake_q
, sender
->task
);
940 sender
->state
= STATE_READY
;
943 static int do_mq_timedsend(mqd_t mqdes
, const char __user
*u_msg_ptr
,
944 size_t msg_len
, unsigned int msg_prio
,
945 struct timespec64
*ts
)
949 struct ext_wait_queue wait
;
950 struct ext_wait_queue
*receiver
;
951 struct msg_msg
*msg_ptr
;
952 struct mqueue_inode_info
*info
;
953 ktime_t expires
, *timeout
= NULL
;
954 struct posix_msg_tree_node
*new_leaf
= NULL
;
956 DEFINE_WAKE_Q(wake_q
);
958 if (unlikely(msg_prio
>= (unsigned long) MQ_PRIO_MAX
))
962 expires
= timespec64_to_ktime(*ts
);
966 audit_mq_sendrecv(mqdes
, msg_len
, msg_prio
, ts
);
969 if (unlikely(!f
.file
)) {
974 inode
= file_inode(f
.file
);
975 if (unlikely(f
.file
->f_op
!= &mqueue_file_operations
)) {
979 info
= MQUEUE_I(inode
);
982 if (unlikely(!(f
.file
->f_mode
& FMODE_WRITE
))) {
987 if (unlikely(msg_len
> info
->attr
.mq_msgsize
)) {
992 /* First try to allocate memory, before doing anything with
993 * existing queues. */
994 msg_ptr
= load_msg(u_msg_ptr
, msg_len
);
995 if (IS_ERR(msg_ptr
)) {
996 ret
= PTR_ERR(msg_ptr
);
999 msg_ptr
->m_ts
= msg_len
;
1000 msg_ptr
->m_type
= msg_prio
;
1003 * msg_insert really wants us to have a valid, spare node struct so
1004 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1005 * fall back to that if necessary.
1007 if (!info
->node_cache
)
1008 new_leaf
= kmalloc(sizeof(*new_leaf
), GFP_KERNEL
);
1010 spin_lock(&info
->lock
);
1012 if (!info
->node_cache
&& new_leaf
) {
1013 /* Save our speculative allocation into the cache */
1014 INIT_LIST_HEAD(&new_leaf
->msg_list
);
1015 info
->node_cache
= new_leaf
;
1021 if (info
->attr
.mq_curmsgs
== info
->attr
.mq_maxmsg
) {
1022 if (f
.file
->f_flags
& O_NONBLOCK
) {
1025 wait
.task
= current
;
1026 wait
.msg
= (void *) msg_ptr
;
1027 wait
.state
= STATE_NONE
;
1028 ret
= wq_sleep(info
, SEND
, timeout
, &wait
);
1030 * wq_sleep must be called with info->lock held, and
1031 * returns with the lock released
1036 receiver
= wq_get_first_waiter(info
, RECV
);
1038 pipelined_send(&wake_q
, info
, msg_ptr
, receiver
);
1040 /* adds message to the queue */
1041 ret
= msg_insert(msg_ptr
, info
);
1046 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
=
1047 current_time(inode
);
1050 spin_unlock(&info
->lock
);
1061 static int do_mq_timedreceive(mqd_t mqdes
, char __user
*u_msg_ptr
,
1062 size_t msg_len
, unsigned int __user
*u_msg_prio
,
1063 struct timespec64
*ts
)
1066 struct msg_msg
*msg_ptr
;
1068 struct inode
*inode
;
1069 struct mqueue_inode_info
*info
;
1070 struct ext_wait_queue wait
;
1071 ktime_t expires
, *timeout
= NULL
;
1072 struct posix_msg_tree_node
*new_leaf
= NULL
;
1075 expires
= timespec64_to_ktime(*ts
);
1079 audit_mq_sendrecv(mqdes
, msg_len
, 0, ts
);
1082 if (unlikely(!f
.file
)) {
1087 inode
= file_inode(f
.file
);
1088 if (unlikely(f
.file
->f_op
!= &mqueue_file_operations
)) {
1092 info
= MQUEUE_I(inode
);
1095 if (unlikely(!(f
.file
->f_mode
& FMODE_READ
))) {
1100 /* checks if buffer is big enough */
1101 if (unlikely(msg_len
< info
->attr
.mq_msgsize
)) {
1107 * msg_insert really wants us to have a valid, spare node struct so
1108 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1109 * fall back to that if necessary.
1111 if (!info
->node_cache
)
1112 new_leaf
= kmalloc(sizeof(*new_leaf
), GFP_KERNEL
);
1114 spin_lock(&info
->lock
);
1116 if (!info
->node_cache
&& new_leaf
) {
1117 /* Save our speculative allocation into the cache */
1118 INIT_LIST_HEAD(&new_leaf
->msg_list
);
1119 info
->node_cache
= new_leaf
;
1124 if (info
->attr
.mq_curmsgs
== 0) {
1125 if (f
.file
->f_flags
& O_NONBLOCK
) {
1126 spin_unlock(&info
->lock
);
1129 wait
.task
= current
;
1130 wait
.state
= STATE_NONE
;
1131 ret
= wq_sleep(info
, RECV
, timeout
, &wait
);
1135 DEFINE_WAKE_Q(wake_q
);
1137 msg_ptr
= msg_get(info
);
1139 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
=
1140 current_time(inode
);
1142 /* There is now free space in queue. */
1143 pipelined_receive(&wake_q
, info
);
1144 spin_unlock(&info
->lock
);
1149 ret
= msg_ptr
->m_ts
;
1151 if ((u_msg_prio
&& put_user(msg_ptr
->m_type
, u_msg_prio
)) ||
1152 store_msg(u_msg_ptr
, msg_ptr
, msg_ptr
->m_ts
)) {
1163 SYSCALL_DEFINE5(mq_timedsend
, mqd_t
, mqdes
, const char __user
*, u_msg_ptr
,
1164 size_t, msg_len
, unsigned int, msg_prio
,
1165 const struct timespec __user
*, u_abs_timeout
)
1167 struct timespec64 ts
, *p
= NULL
;
1168 if (u_abs_timeout
) {
1169 int res
= prepare_timeout(u_abs_timeout
, &ts
);
1174 return do_mq_timedsend(mqdes
, u_msg_ptr
, msg_len
, msg_prio
, p
);
1177 SYSCALL_DEFINE5(mq_timedreceive
, mqd_t
, mqdes
, char __user
*, u_msg_ptr
,
1178 size_t, msg_len
, unsigned int __user
*, u_msg_prio
,
1179 const struct timespec __user
*, u_abs_timeout
)
1181 struct timespec64 ts
, *p
= NULL
;
1182 if (u_abs_timeout
) {
1183 int res
= prepare_timeout(u_abs_timeout
, &ts
);
1188 return do_mq_timedreceive(mqdes
, u_msg_ptr
, msg_len
, u_msg_prio
, p
);
1192 * Notes: the case when user wants us to deregister (with NULL as pointer)
1193 * and he isn't currently owner of notification, will be silently discarded.
1194 * It isn't explicitly defined in the POSIX.
1196 static int do_mq_notify(mqd_t mqdes
, const struct sigevent
*notification
)
1201 struct inode
*inode
;
1202 struct mqueue_inode_info
*info
;
1205 audit_mq_notify(mqdes
, notification
);
1209 if (notification
!= NULL
) {
1210 if (unlikely(notification
->sigev_notify
!= SIGEV_NONE
&&
1211 notification
->sigev_notify
!= SIGEV_SIGNAL
&&
1212 notification
->sigev_notify
!= SIGEV_THREAD
))
1214 if (notification
->sigev_notify
== SIGEV_SIGNAL
&&
1215 !valid_signal(notification
->sigev_signo
)) {
1218 if (notification
->sigev_notify
== SIGEV_THREAD
) {
1221 /* create the notify skb */
1222 nc
= alloc_skb(NOTIFY_COOKIE_LEN
, GFP_KERNEL
);
1227 if (copy_from_user(nc
->data
,
1228 notification
->sigev_value
.sival_ptr
,
1229 NOTIFY_COOKIE_LEN
)) {
1234 /* TODO: add a header? */
1235 skb_put(nc
, NOTIFY_COOKIE_LEN
);
1236 /* and attach it to the socket */
1238 f
= fdget(notification
->sigev_signo
);
1243 sock
= netlink_getsockbyfilp(f
.file
);
1246 ret
= PTR_ERR(sock
);
1251 timeo
= MAX_SCHEDULE_TIMEOUT
;
1252 ret
= netlink_attachskb(sock
, nc
, &timeo
, NULL
);
1271 inode
= file_inode(f
.file
);
1272 if (unlikely(f
.file
->f_op
!= &mqueue_file_operations
)) {
1276 info
= MQUEUE_I(inode
);
1279 spin_lock(&info
->lock
);
1280 if (notification
== NULL
) {
1281 if (info
->notify_owner
== task_tgid(current
)) {
1282 remove_notification(info
);
1283 inode
->i_atime
= inode
->i_ctime
= current_time(inode
);
1285 } else if (info
->notify_owner
!= NULL
) {
1288 switch (notification
->sigev_notify
) {
1290 info
->notify
.sigev_notify
= SIGEV_NONE
;
1293 info
->notify_sock
= sock
;
1294 info
->notify_cookie
= nc
;
1297 info
->notify
.sigev_notify
= SIGEV_THREAD
;
1300 info
->notify
.sigev_signo
= notification
->sigev_signo
;
1301 info
->notify
.sigev_value
= notification
->sigev_value
;
1302 info
->notify
.sigev_notify
= SIGEV_SIGNAL
;
1306 info
->notify_owner
= get_pid(task_tgid(current
));
1307 info
->notify_user_ns
= get_user_ns(current_user_ns());
1308 inode
->i_atime
= inode
->i_ctime
= current_time(inode
);
1310 spin_unlock(&info
->lock
);
1315 netlink_detachskb(sock
, nc
);
1322 SYSCALL_DEFINE2(mq_notify
, mqd_t
, mqdes
,
1323 const struct sigevent __user
*, u_notification
)
1325 struct sigevent n
, *p
= NULL
;
1326 if (u_notification
) {
1327 if (copy_from_user(&n
, u_notification
, sizeof(struct sigevent
)))
1331 return do_mq_notify(mqdes
, p
);
1334 static int do_mq_getsetattr(int mqdes
, struct mq_attr
*new, struct mq_attr
*old
)
1337 struct inode
*inode
;
1338 struct mqueue_inode_info
*info
;
1340 if (new && (new->mq_flags
& (~O_NONBLOCK
)))
1347 if (unlikely(f
.file
->f_op
!= &mqueue_file_operations
)) {
1352 inode
= file_inode(f
.file
);
1353 info
= MQUEUE_I(inode
);
1355 spin_lock(&info
->lock
);
1359 old
->mq_flags
= f
.file
->f_flags
& O_NONBLOCK
;
1362 audit_mq_getsetattr(mqdes
, new);
1363 spin_lock(&f
.file
->f_lock
);
1364 if (new->mq_flags
& O_NONBLOCK
)
1365 f
.file
->f_flags
|= O_NONBLOCK
;
1367 f
.file
->f_flags
&= ~O_NONBLOCK
;
1368 spin_unlock(&f
.file
->f_lock
);
1370 inode
->i_atime
= inode
->i_ctime
= current_time(inode
);
1373 spin_unlock(&info
->lock
);
1378 SYSCALL_DEFINE3(mq_getsetattr
, mqd_t
, mqdes
,
1379 const struct mq_attr __user
*, u_mqstat
,
1380 struct mq_attr __user
*, u_omqstat
)
1383 struct mq_attr mqstat
, omqstat
;
1384 struct mq_attr
*new = NULL
, *old
= NULL
;
1388 if (copy_from_user(new, u_mqstat
, sizeof(struct mq_attr
)))
1394 ret
= do_mq_getsetattr(mqdes
, new, old
);
1398 if (copy_to_user(u_omqstat
, old
, sizeof(struct mq_attr
)))
1403 #ifdef CONFIG_COMPAT
1405 struct compat_mq_attr
{
1406 compat_long_t mq_flags
; /* message queue flags */
1407 compat_long_t mq_maxmsg
; /* maximum number of messages */
1408 compat_long_t mq_msgsize
; /* maximum message size */
1409 compat_long_t mq_curmsgs
; /* number of messages currently queued */
1410 compat_long_t __reserved
[4]; /* ignored for input, zeroed for output */
1413 static inline int get_compat_mq_attr(struct mq_attr
*attr
,
1414 const struct compat_mq_attr __user
*uattr
)
1416 struct compat_mq_attr v
;
1418 if (copy_from_user(&v
, uattr
, sizeof(*uattr
)))
1421 memset(attr
, 0, sizeof(*attr
));
1422 attr
->mq_flags
= v
.mq_flags
;
1423 attr
->mq_maxmsg
= v
.mq_maxmsg
;
1424 attr
->mq_msgsize
= v
.mq_msgsize
;
1425 attr
->mq_curmsgs
= v
.mq_curmsgs
;
1429 static inline int put_compat_mq_attr(const struct mq_attr
*attr
,
1430 struct compat_mq_attr __user
*uattr
)
1432 struct compat_mq_attr v
;
1434 memset(&v
, 0, sizeof(v
));
1435 v
.mq_flags
= attr
->mq_flags
;
1436 v
.mq_maxmsg
= attr
->mq_maxmsg
;
1437 v
.mq_msgsize
= attr
->mq_msgsize
;
1438 v
.mq_curmsgs
= attr
->mq_curmsgs
;
1439 if (copy_to_user(uattr
, &v
, sizeof(*uattr
)))
1444 COMPAT_SYSCALL_DEFINE4(mq_open
, const char __user
*, u_name
,
1445 int, oflag
, compat_mode_t
, mode
,
1446 struct compat_mq_attr __user
*, u_attr
)
1448 struct mq_attr attr
, *p
= NULL
;
1449 if (u_attr
&& oflag
& O_CREAT
) {
1451 if (get_compat_mq_attr(&attr
, u_attr
))
1454 return do_mq_open(u_name
, oflag
, mode
, p
);
1457 static int compat_prepare_timeout(const struct compat_timespec __user
*p
,
1458 struct timespec64
*ts
)
1460 if (compat_get_timespec64(ts
, p
))
1462 if (!timespec64_valid(ts
))
1467 COMPAT_SYSCALL_DEFINE5(mq_timedsend
, mqd_t
, mqdes
,
1468 const char __user
*, u_msg_ptr
,
1469 compat_size_t
, msg_len
, unsigned int, msg_prio
,
1470 const struct compat_timespec __user
*, u_abs_timeout
)
1472 struct timespec64 ts
, *p
= NULL
;
1473 if (u_abs_timeout
) {
1474 int res
= compat_prepare_timeout(u_abs_timeout
, &ts
);
1479 return do_mq_timedsend(mqdes
, u_msg_ptr
, msg_len
, msg_prio
, p
);
1482 COMPAT_SYSCALL_DEFINE5(mq_timedreceive
, mqd_t
, mqdes
,
1483 char __user
*, u_msg_ptr
,
1484 compat_size_t
, msg_len
, unsigned int __user
*, u_msg_prio
,
1485 const struct compat_timespec __user
*, u_abs_timeout
)
1487 struct timespec64 ts
, *p
= NULL
;
1488 if (u_abs_timeout
) {
1489 int res
= compat_prepare_timeout(u_abs_timeout
, &ts
);
1494 return do_mq_timedreceive(mqdes
, u_msg_ptr
, msg_len
, u_msg_prio
, p
);
1497 COMPAT_SYSCALL_DEFINE2(mq_notify
, mqd_t
, mqdes
,
1498 const struct compat_sigevent __user
*, u_notification
)
1500 struct sigevent n
, *p
= NULL
;
1501 if (u_notification
) {
1502 if (get_compat_sigevent(&n
, u_notification
))
1504 if (n
.sigev_notify
== SIGEV_THREAD
)
1505 n
.sigev_value
.sival_ptr
= compat_ptr(n
.sigev_value
.sival_int
);
1508 return do_mq_notify(mqdes
, p
);
1511 COMPAT_SYSCALL_DEFINE3(mq_getsetattr
, mqd_t
, mqdes
,
1512 const struct compat_mq_attr __user
*, u_mqstat
,
1513 struct compat_mq_attr __user
*, u_omqstat
)
1516 struct mq_attr mqstat
, omqstat
;
1517 struct mq_attr
*new = NULL
, *old
= NULL
;
1521 if (get_compat_mq_attr(new, u_mqstat
))
1527 ret
= do_mq_getsetattr(mqdes
, new, old
);
1531 if (put_compat_mq_attr(old
, u_omqstat
))
1537 static const struct inode_operations mqueue_dir_inode_operations
= {
1538 .lookup
= simple_lookup
,
1539 .create
= mqueue_create
,
1540 .unlink
= mqueue_unlink
,
1543 static const struct file_operations mqueue_file_operations
= {
1544 .flush
= mqueue_flush_file
,
1545 .poll
= mqueue_poll_file
,
1546 .read
= mqueue_read_file
,
1547 .llseek
= default_llseek
,
1550 static const struct super_operations mqueue_super_ops
= {
1551 .alloc_inode
= mqueue_alloc_inode
,
1552 .destroy_inode
= mqueue_destroy_inode
,
1553 .evict_inode
= mqueue_evict_inode
,
1554 .statfs
= simple_statfs
,
1557 static struct file_system_type mqueue_fs_type
= {
1559 .mount
= mqueue_mount
,
1560 .kill_sb
= kill_litter_super
,
1561 .fs_flags
= FS_USERNS_MOUNT
,
1564 int mq_init_ns(struct ipc_namespace
*ns
)
1566 ns
->mq_queues_count
= 0;
1567 ns
->mq_queues_max
= DFLT_QUEUESMAX
;
1568 ns
->mq_msg_max
= DFLT_MSGMAX
;
1569 ns
->mq_msgsize_max
= DFLT_MSGSIZEMAX
;
1570 ns
->mq_msg_default
= DFLT_MSG
;
1571 ns
->mq_msgsize_default
= DFLT_MSGSIZE
;
1577 void mq_clear_sbinfo(struct ipc_namespace
*ns
)
1580 ns
->mq_mnt
->mnt_sb
->s_fs_info
= NULL
;
1583 void mq_put_mnt(struct ipc_namespace
*ns
)
1586 kern_unmount(ns
->mq_mnt
);
1589 static int __init
init_mqueue_fs(void)
1594 mqueue_inode_cachep
= kmem_cache_create("mqueue_inode_cache",
1595 sizeof(struct mqueue_inode_info
), 0,
1596 SLAB_HWCACHE_ALIGN
|SLAB_ACCOUNT
, init_once
);
1597 if (mqueue_inode_cachep
== NULL
)
1600 /* ignore failures - they are not fatal */
1601 mq_sysctl_table
= mq_register_sysctl_table();
1603 error
= register_filesystem(&mqueue_fs_type
);
1607 spin_lock_init(&mq_lock
);
1609 error
= mq_init_ns(&init_ipc_ns
);
1611 goto out_filesystem
;
1613 m
= kern_mount_data(&mqueue_fs_type
, &init_ipc_ns
);
1615 goto out_filesystem
;
1616 init_ipc_ns
.mq_mnt
= m
;
1620 unregister_filesystem(&mqueue_fs_type
);
1622 if (mq_sysctl_table
)
1623 unregister_sysctl_table(mq_sysctl_table
);
1624 kmem_cache_destroy(mqueue_inode_cachep
);
1628 device_initcall(init_mqueue_fs
);