2 * POSIX message queues filesystem for Linux.
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (michal.wronski@gmail.com)
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
13 * This file is released under the GPL.
16 #include <linux/capability.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 #include <linux/file.h>
20 #include <linux/mount.h>
21 #include <linux/fs_context.h>
22 #include <linux/namei.h>
23 #include <linux/sysctl.h>
24 #include <linux/poll.h>
25 #include <linux/mqueue.h>
26 #include <linux/msg.h>
27 #include <linux/skbuff.h>
28 #include <linux/vmalloc.h>
29 #include <linux/netlink.h>
30 #include <linux/syscalls.h>
31 #include <linux/audit.h>
32 #include <linux/signal.h>
33 #include <linux/mutex.h>
34 #include <linux/nsproxy.h>
35 #include <linux/pid.h>
36 #include <linux/ipc_namespace.h>
37 #include <linux/user_namespace.h>
38 #include <linux/slab.h>
39 #include <linux/sched/wake_q.h>
40 #include <linux/sched/signal.h>
41 #include <linux/sched/user.h>
46 struct mqueue_fs_context
{
47 struct ipc_namespace
*ipc_ns
;
50 #define MQUEUE_MAGIC 0x19800202
51 #define DIRENT_SIZE 20
52 #define FILENT_SIZE 80
60 struct posix_msg_tree_node
{
61 struct rb_node rb_node
;
62 struct list_head msg_list
;
69 * Accesses to a message queue are synchronized by acquiring info->lock.
71 * There are two notable exceptions:
72 * - The actual wakeup of a sleeping task is performed using the wake_q
73 * framework. info->lock is already released when wake_up_q is called.
74 * - The exit codepaths after sleeping check ext_wait_queue->state without
75 * any locks. If it is STATE_READY, then the syscall is completed without
76 * acquiring info->lock.
79 * To achieve proper release/acquire memory barrier pairing, the state is set to
80 * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed
81 * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used.
83 * This prevents the following races:
85 * 1) With the simple wake_q_add(), the task could be gone already before
86 * the increase of the reference happens
89 * WRITE_ONCE(wait.state, STATE_NONE);
90 * schedule_hrtimeout()
92 * if (cmpxchg()) // success
93 * ->state = STATE_READY (reordered)
95 * if (wait.state == STATE_READY) return;
96 * sysret to user space
98 * get_task_struct() // UaF
100 * Solution: Use wake_q_add_safe() and perform the get_task_struct() before
101 * the smp_store_release() that does ->state = STATE_READY.
103 * 2) Without proper _release/_acquire barriers, the woken up task
104 * could read stale data
109 * WRITE_ONCE(wait.state, STATE_NONE);
110 * schedule_hrtimeout()
111 * state = STATE_READY;
113 * if (wait.state == STATE_READY) return;
114 * msg_ptr = wait.msg; // Access to stale data!
115 * receiver->msg = message; (reordered)
117 * Solution: use _release and _acquire barriers.
119 * 3) There is intentionally no barrier when setting current->state
120 * to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the
121 * release memory barrier, and the wakeup is triggered when holding
122 * info->lock, i.e. spin_lock(&info->lock) provided a pairing
123 * acquire memory barrier.
126 struct ext_wait_queue
{ /* queue of sleeping tasks */
127 struct task_struct
*task
;
128 struct list_head list
;
129 struct msg_msg
*msg
; /* ptr of loaded message */
130 int state
; /* one of STATE_* values */
133 struct mqueue_inode_info
{
135 struct inode vfs_inode
;
136 wait_queue_head_t wait_q
;
138 struct rb_root msg_tree
;
139 struct rb_node
*msg_tree_rightmost
;
140 struct posix_msg_tree_node
*node_cache
;
143 struct sigevent notify
;
144 struct pid
*notify_owner
;
145 u32 notify_self_exec_id
;
146 struct user_namespace
*notify_user_ns
;
147 struct user_struct
*user
; /* user who created, for accounting */
148 struct sock
*notify_sock
;
149 struct sk_buff
*notify_cookie
;
151 /* for tasks waiting for free space and messages, respectively */
152 struct ext_wait_queue e_wait_q
[2];
154 unsigned long qsize
; /* size of queue in memory (sum of all msgs) */
157 static struct file_system_type mqueue_fs_type
;
158 static const struct inode_operations mqueue_dir_inode_operations
;
159 static const struct file_operations mqueue_file_operations
;
160 static const struct super_operations mqueue_super_ops
;
161 static const struct fs_context_operations mqueue_fs_context_ops
;
162 static void remove_notification(struct mqueue_inode_info
*info
);
164 static struct kmem_cache
*mqueue_inode_cachep
;
166 static struct ctl_table_header
*mq_sysctl_table
;
168 static inline struct mqueue_inode_info
*MQUEUE_I(struct inode
*inode
)
170 return container_of(inode
, struct mqueue_inode_info
, vfs_inode
);
174 * This routine should be called with the mq_lock held.
176 static inline struct ipc_namespace
*__get_ns_from_inode(struct inode
*inode
)
178 return get_ipc_ns(inode
->i_sb
->s_fs_info
);
181 static struct ipc_namespace
*get_ns_from_inode(struct inode
*inode
)
183 struct ipc_namespace
*ns
;
186 ns
= __get_ns_from_inode(inode
);
187 spin_unlock(&mq_lock
);
191 /* Auxiliary functions to manipulate messages' list */
192 static int msg_insert(struct msg_msg
*msg
, struct mqueue_inode_info
*info
)
194 struct rb_node
**p
, *parent
= NULL
;
195 struct posix_msg_tree_node
*leaf
;
196 bool rightmost
= true;
198 p
= &info
->msg_tree
.rb_node
;
201 leaf
= rb_entry(parent
, struct posix_msg_tree_node
, rb_node
);
203 if (likely(leaf
->priority
== msg
->m_type
))
205 else if (msg
->m_type
< leaf
->priority
) {
211 if (info
->node_cache
) {
212 leaf
= info
->node_cache
;
213 info
->node_cache
= NULL
;
215 leaf
= kmalloc(sizeof(*leaf
), GFP_ATOMIC
);
218 INIT_LIST_HEAD(&leaf
->msg_list
);
220 leaf
->priority
= msg
->m_type
;
223 info
->msg_tree_rightmost
= &leaf
->rb_node
;
225 rb_link_node(&leaf
->rb_node
, parent
, p
);
226 rb_insert_color(&leaf
->rb_node
, &info
->msg_tree
);
228 info
->attr
.mq_curmsgs
++;
229 info
->qsize
+= msg
->m_ts
;
230 list_add_tail(&msg
->m_list
, &leaf
->msg_list
);
234 static inline void msg_tree_erase(struct posix_msg_tree_node
*leaf
,
235 struct mqueue_inode_info
*info
)
237 struct rb_node
*node
= &leaf
->rb_node
;
239 if (info
->msg_tree_rightmost
== node
)
240 info
->msg_tree_rightmost
= rb_prev(node
);
242 rb_erase(node
, &info
->msg_tree
);
243 if (info
->node_cache
)
246 info
->node_cache
= leaf
;
249 static inline struct msg_msg
*msg_get(struct mqueue_inode_info
*info
)
251 struct rb_node
*parent
= NULL
;
252 struct posix_msg_tree_node
*leaf
;
257 * During insert, low priorities go to the left and high to the
258 * right. On receive, we want the highest priorities first, so
259 * walk all the way to the right.
261 parent
= info
->msg_tree_rightmost
;
263 if (info
->attr
.mq_curmsgs
) {
264 pr_warn_once("Inconsistency in POSIX message queue, "
265 "no tree element, but supposedly messages "
267 info
->attr
.mq_curmsgs
= 0;
271 leaf
= rb_entry(parent
, struct posix_msg_tree_node
, rb_node
);
272 if (unlikely(list_empty(&leaf
->msg_list
))) {
273 pr_warn_once("Inconsistency in POSIX message queue, "
274 "empty leaf node but we haven't implemented "
275 "lazy leaf delete!\n");
276 msg_tree_erase(leaf
, info
);
279 msg
= list_first_entry(&leaf
->msg_list
,
280 struct msg_msg
, m_list
);
281 list_del(&msg
->m_list
);
282 if (list_empty(&leaf
->msg_list
)) {
283 msg_tree_erase(leaf
, info
);
286 info
->attr
.mq_curmsgs
--;
287 info
->qsize
-= msg
->m_ts
;
291 static struct inode
*mqueue_get_inode(struct super_block
*sb
,
292 struct ipc_namespace
*ipc_ns
, umode_t mode
,
293 struct mq_attr
*attr
)
295 struct user_struct
*u
= current_user();
299 inode
= new_inode(sb
);
303 inode
->i_ino
= get_next_ino();
304 inode
->i_mode
= mode
;
305 inode
->i_uid
= current_fsuid();
306 inode
->i_gid
= current_fsgid();
307 inode
->i_mtime
= inode
->i_ctime
= inode
->i_atime
= current_time(inode
);
310 struct mqueue_inode_info
*info
;
311 unsigned long mq_bytes
, mq_treesize
;
313 inode
->i_fop
= &mqueue_file_operations
;
314 inode
->i_size
= FILENT_SIZE
;
315 /* mqueue specific info */
316 info
= MQUEUE_I(inode
);
317 spin_lock_init(&info
->lock
);
318 init_waitqueue_head(&info
->wait_q
);
319 INIT_LIST_HEAD(&info
->e_wait_q
[0].list
);
320 INIT_LIST_HEAD(&info
->e_wait_q
[1].list
);
321 info
->notify_owner
= NULL
;
322 info
->notify_user_ns
= NULL
;
324 info
->user
= NULL
; /* set when all is ok */
325 info
->msg_tree
= RB_ROOT
;
326 info
->msg_tree_rightmost
= NULL
;
327 info
->node_cache
= NULL
;
328 memset(&info
->attr
, 0, sizeof(info
->attr
));
329 info
->attr
.mq_maxmsg
= min(ipc_ns
->mq_msg_max
,
330 ipc_ns
->mq_msg_default
);
331 info
->attr
.mq_msgsize
= min(ipc_ns
->mq_msgsize_max
,
332 ipc_ns
->mq_msgsize_default
);
334 info
->attr
.mq_maxmsg
= attr
->mq_maxmsg
;
335 info
->attr
.mq_msgsize
= attr
->mq_msgsize
;
338 * We used to allocate a static array of pointers and account
339 * the size of that array as well as one msg_msg struct per
340 * possible message into the queue size. That's no longer
341 * accurate as the queue is now an rbtree and will grow and
342 * shrink depending on usage patterns. We can, however, still
343 * account one msg_msg struct per message, but the nodes are
344 * allocated depending on priority usage, and most programs
345 * only use one, or a handful, of priorities. However, since
346 * this is pinned memory, we need to assume worst case, so
347 * that means the min(mq_maxmsg, max_priorities) * struct
348 * posix_msg_tree_node.
352 if (info
->attr
.mq_maxmsg
<= 0 || info
->attr
.mq_msgsize
<= 0)
354 if (capable(CAP_SYS_RESOURCE
)) {
355 if (info
->attr
.mq_maxmsg
> HARD_MSGMAX
||
356 info
->attr
.mq_msgsize
> HARD_MSGSIZEMAX
)
359 if (info
->attr
.mq_maxmsg
> ipc_ns
->mq_msg_max
||
360 info
->attr
.mq_msgsize
> ipc_ns
->mq_msgsize_max
)
364 /* check for overflow */
365 if (info
->attr
.mq_msgsize
> ULONG_MAX
/info
->attr
.mq_maxmsg
)
367 mq_treesize
= info
->attr
.mq_maxmsg
* sizeof(struct msg_msg
) +
368 min_t(unsigned int, info
->attr
.mq_maxmsg
, MQ_PRIO_MAX
) *
369 sizeof(struct posix_msg_tree_node
);
370 mq_bytes
= info
->attr
.mq_maxmsg
* info
->attr
.mq_msgsize
;
371 if (mq_bytes
+ mq_treesize
< mq_bytes
)
373 mq_bytes
+= mq_treesize
;
375 if (u
->mq_bytes
+ mq_bytes
< u
->mq_bytes
||
376 u
->mq_bytes
+ mq_bytes
> rlimit(RLIMIT_MSGQUEUE
)) {
377 spin_unlock(&mq_lock
);
378 /* mqueue_evict_inode() releases info->messages */
382 u
->mq_bytes
+= mq_bytes
;
383 spin_unlock(&mq_lock
);
386 info
->user
= get_uid(u
);
387 } else if (S_ISDIR(mode
)) {
389 /* Some things misbehave if size == 0 on a directory */
390 inode
->i_size
= 2 * DIRENT_SIZE
;
391 inode
->i_op
= &mqueue_dir_inode_operations
;
392 inode
->i_fop
= &simple_dir_operations
;
402 static int mqueue_fill_super(struct super_block
*sb
, struct fs_context
*fc
)
405 struct ipc_namespace
*ns
= sb
->s_fs_info
;
407 sb
->s_iflags
|= SB_I_NOEXEC
| SB_I_NODEV
;
408 sb
->s_blocksize
= PAGE_SIZE
;
409 sb
->s_blocksize_bits
= PAGE_SHIFT
;
410 sb
->s_magic
= MQUEUE_MAGIC
;
411 sb
->s_op
= &mqueue_super_ops
;
413 inode
= mqueue_get_inode(sb
, ns
, S_IFDIR
| S_ISVTX
| S_IRWXUGO
, NULL
);
415 return PTR_ERR(inode
);
417 sb
->s_root
= d_make_root(inode
);
423 static int mqueue_get_tree(struct fs_context
*fc
)
425 struct mqueue_fs_context
*ctx
= fc
->fs_private
;
427 return get_tree_keyed(fc
, mqueue_fill_super
, ctx
->ipc_ns
);
430 static void mqueue_fs_context_free(struct fs_context
*fc
)
432 struct mqueue_fs_context
*ctx
= fc
->fs_private
;
434 put_ipc_ns(ctx
->ipc_ns
);
438 static int mqueue_init_fs_context(struct fs_context
*fc
)
440 struct mqueue_fs_context
*ctx
;
442 ctx
= kzalloc(sizeof(struct mqueue_fs_context
), GFP_KERNEL
);
446 ctx
->ipc_ns
= get_ipc_ns(current
->nsproxy
->ipc_ns
);
447 put_user_ns(fc
->user_ns
);
448 fc
->user_ns
= get_user_ns(ctx
->ipc_ns
->user_ns
);
449 fc
->fs_private
= ctx
;
450 fc
->ops
= &mqueue_fs_context_ops
;
454 static struct vfsmount
*mq_create_mount(struct ipc_namespace
*ns
)
456 struct mqueue_fs_context
*ctx
;
457 struct fs_context
*fc
;
458 struct vfsmount
*mnt
;
460 fc
= fs_context_for_mount(&mqueue_fs_type
, SB_KERNMOUNT
);
464 ctx
= fc
->fs_private
;
465 put_ipc_ns(ctx
->ipc_ns
);
466 ctx
->ipc_ns
= get_ipc_ns(ns
);
467 put_user_ns(fc
->user_ns
);
468 fc
->user_ns
= get_user_ns(ctx
->ipc_ns
->user_ns
);
475 static void init_once(void *foo
)
477 struct mqueue_inode_info
*p
= (struct mqueue_inode_info
*) foo
;
479 inode_init_once(&p
->vfs_inode
);
482 static struct inode
*mqueue_alloc_inode(struct super_block
*sb
)
484 struct mqueue_inode_info
*ei
;
486 ei
= kmem_cache_alloc(mqueue_inode_cachep
, GFP_KERNEL
);
489 return &ei
->vfs_inode
;
492 static void mqueue_free_inode(struct inode
*inode
)
494 kmem_cache_free(mqueue_inode_cachep
, MQUEUE_I(inode
));
497 static void mqueue_evict_inode(struct inode
*inode
)
499 struct mqueue_inode_info
*info
;
500 struct user_struct
*user
;
501 struct ipc_namespace
*ipc_ns
;
502 struct msg_msg
*msg
, *nmsg
;
507 if (S_ISDIR(inode
->i_mode
))
510 ipc_ns
= get_ns_from_inode(inode
);
511 info
= MQUEUE_I(inode
);
512 spin_lock(&info
->lock
);
513 while ((msg
= msg_get(info
)) != NULL
)
514 list_add_tail(&msg
->m_list
, &tmp_msg
);
515 kfree(info
->node_cache
);
516 spin_unlock(&info
->lock
);
518 list_for_each_entry_safe(msg
, nmsg
, &tmp_msg
, m_list
) {
519 list_del(&msg
->m_list
);
525 unsigned long mq_bytes
, mq_treesize
;
527 /* Total amount of bytes accounted for the mqueue */
528 mq_treesize
= info
->attr
.mq_maxmsg
* sizeof(struct msg_msg
) +
529 min_t(unsigned int, info
->attr
.mq_maxmsg
, MQ_PRIO_MAX
) *
530 sizeof(struct posix_msg_tree_node
);
532 mq_bytes
= mq_treesize
+ (info
->attr
.mq_maxmsg
*
533 info
->attr
.mq_msgsize
);
536 user
->mq_bytes
-= mq_bytes
;
538 * get_ns_from_inode() ensures that the
539 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
540 * to which we now hold a reference, or it is NULL.
541 * We can't put it here under mq_lock, though.
544 ipc_ns
->mq_queues_count
--;
545 spin_unlock(&mq_lock
);
552 static int mqueue_create_attr(struct dentry
*dentry
, umode_t mode
, void *arg
)
554 struct inode
*dir
= dentry
->d_parent
->d_inode
;
556 struct mq_attr
*attr
= arg
;
558 struct ipc_namespace
*ipc_ns
;
561 ipc_ns
= __get_ns_from_inode(dir
);
567 if (ipc_ns
->mq_queues_count
>= ipc_ns
->mq_queues_max
&&
568 !capable(CAP_SYS_RESOURCE
)) {
572 ipc_ns
->mq_queues_count
++;
573 spin_unlock(&mq_lock
);
575 inode
= mqueue_get_inode(dir
->i_sb
, ipc_ns
, mode
, attr
);
577 error
= PTR_ERR(inode
);
579 ipc_ns
->mq_queues_count
--;
584 dir
->i_size
+= DIRENT_SIZE
;
585 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= current_time(dir
);
587 d_instantiate(dentry
, inode
);
591 spin_unlock(&mq_lock
);
597 static int mqueue_create(struct inode
*dir
, struct dentry
*dentry
,
598 umode_t mode
, bool excl
)
600 return mqueue_create_attr(dentry
, mode
, NULL
);
603 static int mqueue_unlink(struct inode
*dir
, struct dentry
*dentry
)
605 struct inode
*inode
= d_inode(dentry
);
607 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= current_time(dir
);
608 dir
->i_size
-= DIRENT_SIZE
;
615 * This is routine for system read from queue file.
616 * To avoid mess with doing here some sort of mq_receive we allow
617 * to read only queue size & notification info (the only values
618 * that are interesting from user point of view and aren't accessible
619 * through std routines)
621 static ssize_t
mqueue_read_file(struct file
*filp
, char __user
*u_data
,
622 size_t count
, loff_t
*off
)
624 struct mqueue_inode_info
*info
= MQUEUE_I(file_inode(filp
));
625 char buffer
[FILENT_SIZE
];
628 spin_lock(&info
->lock
);
629 snprintf(buffer
, sizeof(buffer
),
630 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
632 info
->notify_owner
? info
->notify
.sigev_notify
: 0,
633 (info
->notify_owner
&&
634 info
->notify
.sigev_notify
== SIGEV_SIGNAL
) ?
635 info
->notify
.sigev_signo
: 0,
636 pid_vnr(info
->notify_owner
));
637 spin_unlock(&info
->lock
);
638 buffer
[sizeof(buffer
)-1] = '\0';
640 ret
= simple_read_from_buffer(u_data
, count
, off
, buffer
,
645 file_inode(filp
)->i_atime
= file_inode(filp
)->i_ctime
= current_time(file_inode(filp
));
649 static int mqueue_flush_file(struct file
*filp
, fl_owner_t id
)
651 struct mqueue_inode_info
*info
= MQUEUE_I(file_inode(filp
));
653 spin_lock(&info
->lock
);
654 if (task_tgid(current
) == info
->notify_owner
)
655 remove_notification(info
);
657 spin_unlock(&info
->lock
);
661 static __poll_t
mqueue_poll_file(struct file
*filp
, struct poll_table_struct
*poll_tab
)
663 struct mqueue_inode_info
*info
= MQUEUE_I(file_inode(filp
));
666 poll_wait(filp
, &info
->wait_q
, poll_tab
);
668 spin_lock(&info
->lock
);
669 if (info
->attr
.mq_curmsgs
)
670 retval
= EPOLLIN
| EPOLLRDNORM
;
672 if (info
->attr
.mq_curmsgs
< info
->attr
.mq_maxmsg
)
673 retval
|= EPOLLOUT
| EPOLLWRNORM
;
674 spin_unlock(&info
->lock
);
679 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
680 static void wq_add(struct mqueue_inode_info
*info
, int sr
,
681 struct ext_wait_queue
*ewp
)
683 struct ext_wait_queue
*walk
;
685 list_for_each_entry(walk
, &info
->e_wait_q
[sr
].list
, list
) {
686 if (walk
->task
->prio
<= current
->prio
) {
687 list_add_tail(&ewp
->list
, &walk
->list
);
691 list_add_tail(&ewp
->list
, &info
->e_wait_q
[sr
].list
);
695 * Puts current task to sleep. Caller must hold queue lock. After return
699 static int wq_sleep(struct mqueue_inode_info
*info
, int sr
,
700 ktime_t
*timeout
, struct ext_wait_queue
*ewp
)
701 __releases(&info
->lock
)
706 wq_add(info
, sr
, ewp
);
709 /* memory barrier not required, we hold info->lock */
710 __set_current_state(TASK_INTERRUPTIBLE
);
712 spin_unlock(&info
->lock
);
713 time
= schedule_hrtimeout_range_clock(timeout
, 0,
714 HRTIMER_MODE_ABS
, CLOCK_REALTIME
);
716 if (READ_ONCE(ewp
->state
) == STATE_READY
) {
717 /* see MQ_BARRIER for purpose/pairing */
718 smp_acquire__after_ctrl_dep();
722 spin_lock(&info
->lock
);
724 /* we hold info->lock, so no memory barrier required */
725 if (READ_ONCE(ewp
->state
) == STATE_READY
) {
729 if (signal_pending(current
)) {
730 retval
= -ERESTARTSYS
;
738 list_del(&ewp
->list
);
740 spin_unlock(&info
->lock
);
746 * Returns waiting task that should be serviced first or NULL if none exists
748 static struct ext_wait_queue
*wq_get_first_waiter(
749 struct mqueue_inode_info
*info
, int sr
)
751 struct list_head
*ptr
;
753 ptr
= info
->e_wait_q
[sr
].list
.prev
;
754 if (ptr
== &info
->e_wait_q
[sr
].list
)
756 return list_entry(ptr
, struct ext_wait_queue
, list
);
760 static inline void set_cookie(struct sk_buff
*skb
, char code
)
762 ((char *)skb
->data
)[NOTIFY_COOKIE_LEN
-1] = code
;
766 * The next function is only to split too long sys_mq_timedsend
768 static void __do_notify(struct mqueue_inode_info
*info
)
771 * invoked when there is registered process and there isn't process
772 * waiting synchronously for message AND state of queue changed from
773 * empty to not empty. Here we are sure that no one is waiting
775 if (info
->notify_owner
&&
776 info
->attr
.mq_curmsgs
== 1) {
777 switch (info
->notify
.sigev_notify
) {
781 struct kernel_siginfo sig_i
;
782 struct task_struct
*task
;
784 /* do_mq_notify() accepts sigev_signo == 0, why?? */
785 if (!info
->notify
.sigev_signo
)
788 clear_siginfo(&sig_i
);
789 sig_i
.si_signo
= info
->notify
.sigev_signo
;
791 sig_i
.si_code
= SI_MESGQ
;
792 sig_i
.si_value
= info
->notify
.sigev_value
;
794 /* map current pid/uid into info->owner's namespaces */
795 sig_i
.si_pid
= task_tgid_nr_ns(current
,
796 ns_of_pid(info
->notify_owner
));
797 sig_i
.si_uid
= from_kuid_munged(info
->notify_user_ns
,
800 * We can't use kill_pid_info(), this signal should
801 * bypass check_kill_permission(). It is from kernel
802 * but si_fromuser() can't know this.
803 * We do check the self_exec_id, to avoid sending
804 * signals to programs that don't expect them.
806 task
= pid_task(info
->notify_owner
, PIDTYPE_TGID
);
807 if (task
&& task
->self_exec_id
==
808 info
->notify_self_exec_id
) {
809 do_send_sig_info(info
->notify
.sigev_signo
,
810 &sig_i
, task
, PIDTYPE_TGID
);
816 set_cookie(info
->notify_cookie
, NOTIFY_WOKENUP
);
817 netlink_sendskb(info
->notify_sock
, info
->notify_cookie
);
820 /* after notification unregisters process */
821 put_pid(info
->notify_owner
);
822 put_user_ns(info
->notify_user_ns
);
823 info
->notify_owner
= NULL
;
824 info
->notify_user_ns
= NULL
;
826 wake_up(&info
->wait_q
);
829 static int prepare_timeout(const struct __kernel_timespec __user
*u_abs_timeout
,
830 struct timespec64
*ts
)
832 if (get_timespec64(ts
, u_abs_timeout
))
834 if (!timespec64_valid(ts
))
839 static void remove_notification(struct mqueue_inode_info
*info
)
841 if (info
->notify_owner
!= NULL
&&
842 info
->notify
.sigev_notify
== SIGEV_THREAD
) {
843 set_cookie(info
->notify_cookie
, NOTIFY_REMOVED
);
844 netlink_sendskb(info
->notify_sock
, info
->notify_cookie
);
846 put_pid(info
->notify_owner
);
847 put_user_ns(info
->notify_user_ns
);
848 info
->notify_owner
= NULL
;
849 info
->notify_user_ns
= NULL
;
852 static int prepare_open(struct dentry
*dentry
, int oflag
, int ro
,
853 umode_t mode
, struct filename
*name
,
854 struct mq_attr
*attr
)
856 static const int oflag2acc
[O_ACCMODE
] = { MAY_READ
, MAY_WRITE
,
857 MAY_READ
| MAY_WRITE
};
860 if (d_really_is_negative(dentry
)) {
861 if (!(oflag
& O_CREAT
))
865 audit_inode_parent_hidden(name
, dentry
->d_parent
);
866 return vfs_mkobj(dentry
, mode
& ~current_umask(),
867 mqueue_create_attr
, attr
);
869 /* it already existed */
870 audit_inode(name
, dentry
, 0);
871 if ((oflag
& (O_CREAT
|O_EXCL
)) == (O_CREAT
|O_EXCL
))
873 if ((oflag
& O_ACCMODE
) == (O_RDWR
| O_WRONLY
))
875 acc
= oflag2acc
[oflag
& O_ACCMODE
];
876 return inode_permission(d_inode(dentry
), acc
);
879 static int do_mq_open(const char __user
*u_name
, int oflag
, umode_t mode
,
880 struct mq_attr
*attr
)
882 struct vfsmount
*mnt
= current
->nsproxy
->ipc_ns
->mq_mnt
;
883 struct dentry
*root
= mnt
->mnt_root
;
884 struct filename
*name
;
889 audit_mq_open(oflag
, mode
, attr
);
891 if (IS_ERR(name
= getname(u_name
)))
892 return PTR_ERR(name
);
894 fd
= get_unused_fd_flags(O_CLOEXEC
);
898 ro
= mnt_want_write(mnt
); /* we'll drop it in any case */
899 inode_lock(d_inode(root
));
900 path
.dentry
= lookup_one_len(name
->name
, root
, strlen(name
->name
));
901 if (IS_ERR(path
.dentry
)) {
902 error
= PTR_ERR(path
.dentry
);
905 path
.mnt
= mntget(mnt
);
906 error
= prepare_open(path
.dentry
, oflag
, ro
, mode
, name
, attr
);
908 struct file
*file
= dentry_open(&path
, oflag
, current_cred());
910 fd_install(fd
, file
);
912 error
= PTR_ERR(file
);
920 inode_unlock(d_inode(root
));
928 SYSCALL_DEFINE4(mq_open
, const char __user
*, u_name
, int, oflag
, umode_t
, mode
,
929 struct mq_attr __user
*, u_attr
)
932 if (u_attr
&& copy_from_user(&attr
, u_attr
, sizeof(struct mq_attr
)))
935 return do_mq_open(u_name
, oflag
, mode
, u_attr
? &attr
: NULL
);
938 SYSCALL_DEFINE1(mq_unlink
, const char __user
*, u_name
)
941 struct filename
*name
;
942 struct dentry
*dentry
;
943 struct inode
*inode
= NULL
;
944 struct ipc_namespace
*ipc_ns
= current
->nsproxy
->ipc_ns
;
945 struct vfsmount
*mnt
= ipc_ns
->mq_mnt
;
947 name
= getname(u_name
);
949 return PTR_ERR(name
);
951 audit_inode_parent_hidden(name
, mnt
->mnt_root
);
952 err
= mnt_want_write(mnt
);
955 inode_lock_nested(d_inode(mnt
->mnt_root
), I_MUTEX_PARENT
);
956 dentry
= lookup_one_len(name
->name
, mnt
->mnt_root
,
958 if (IS_ERR(dentry
)) {
959 err
= PTR_ERR(dentry
);
963 inode
= d_inode(dentry
);
968 err
= vfs_unlink(d_inode(dentry
->d_parent
), dentry
, NULL
);
973 inode_unlock(d_inode(mnt
->mnt_root
));
983 /* Pipelined send and receive functions.
985 * If a receiver finds no waiting message, then it registers itself in the
986 * list of waiting receivers. A sender checks that list before adding the new
987 * message into the message array. If there is a waiting receiver, then it
988 * bypasses the message array and directly hands the message over to the
989 * receiver. The receiver accepts the message and returns without grabbing the
992 * - Set pointer to message.
993 * - Queue the receiver task for later wakeup (without the info->lock).
994 * - Update its state to STATE_READY. Now the receiver can continue.
995 * - Wake up the process after the lock is dropped. Should the process wake up
996 * before this wakeup (due to a timeout or a signal) it will either see
997 * STATE_READY and continue or acquire the lock to check the state again.
999 * The same algorithm is used for senders.
1002 static inline void __pipelined_op(struct wake_q_head
*wake_q
,
1003 struct mqueue_inode_info
*info
,
1004 struct ext_wait_queue
*this)
1006 list_del(&this->list
);
1007 get_task_struct(this->task
);
1009 /* see MQ_BARRIER for purpose/pairing */
1010 smp_store_release(&this->state
, STATE_READY
);
1011 wake_q_add_safe(wake_q
, this->task
);
1014 /* pipelined_send() - send a message directly to the task waiting in
1015 * sys_mq_timedreceive() (without inserting message into a queue).
1017 static inline void pipelined_send(struct wake_q_head
*wake_q
,
1018 struct mqueue_inode_info
*info
,
1019 struct msg_msg
*message
,
1020 struct ext_wait_queue
*receiver
)
1022 receiver
->msg
= message
;
1023 __pipelined_op(wake_q
, info
, receiver
);
1026 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
1027 * gets its message and put to the queue (we have one free place for sure). */
1028 static inline void pipelined_receive(struct wake_q_head
*wake_q
,
1029 struct mqueue_inode_info
*info
)
1031 struct ext_wait_queue
*sender
= wq_get_first_waiter(info
, SEND
);
1035 wake_up_interruptible(&info
->wait_q
);
1038 if (msg_insert(sender
->msg
, info
))
1041 __pipelined_op(wake_q
, info
, sender
);
1044 static int do_mq_timedsend(mqd_t mqdes
, const char __user
*u_msg_ptr
,
1045 size_t msg_len
, unsigned int msg_prio
,
1046 struct timespec64
*ts
)
1049 struct inode
*inode
;
1050 struct ext_wait_queue wait
;
1051 struct ext_wait_queue
*receiver
;
1052 struct msg_msg
*msg_ptr
;
1053 struct mqueue_inode_info
*info
;
1054 ktime_t expires
, *timeout
= NULL
;
1055 struct posix_msg_tree_node
*new_leaf
= NULL
;
1057 DEFINE_WAKE_Q(wake_q
);
1059 if (unlikely(msg_prio
>= (unsigned long) MQ_PRIO_MAX
))
1063 expires
= timespec64_to_ktime(*ts
);
1067 audit_mq_sendrecv(mqdes
, msg_len
, msg_prio
, ts
);
1070 if (unlikely(!f
.file
)) {
1075 inode
= file_inode(f
.file
);
1076 if (unlikely(f
.file
->f_op
!= &mqueue_file_operations
)) {
1080 info
= MQUEUE_I(inode
);
1083 if (unlikely(!(f
.file
->f_mode
& FMODE_WRITE
))) {
1088 if (unlikely(msg_len
> info
->attr
.mq_msgsize
)) {
1093 /* First try to allocate memory, before doing anything with
1094 * existing queues. */
1095 msg_ptr
= load_msg(u_msg_ptr
, msg_len
);
1096 if (IS_ERR(msg_ptr
)) {
1097 ret
= PTR_ERR(msg_ptr
);
1100 msg_ptr
->m_ts
= msg_len
;
1101 msg_ptr
->m_type
= msg_prio
;
1104 * msg_insert really wants us to have a valid, spare node struct so
1105 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1106 * fall back to that if necessary.
1108 if (!info
->node_cache
)
1109 new_leaf
= kmalloc(sizeof(*new_leaf
), GFP_KERNEL
);
1111 spin_lock(&info
->lock
);
1113 if (!info
->node_cache
&& new_leaf
) {
1114 /* Save our speculative allocation into the cache */
1115 INIT_LIST_HEAD(&new_leaf
->msg_list
);
1116 info
->node_cache
= new_leaf
;
1122 if (info
->attr
.mq_curmsgs
== info
->attr
.mq_maxmsg
) {
1123 if (f
.file
->f_flags
& O_NONBLOCK
) {
1126 wait
.task
= current
;
1127 wait
.msg
= (void *) msg_ptr
;
1129 /* memory barrier not required, we hold info->lock */
1130 WRITE_ONCE(wait
.state
, STATE_NONE
);
1131 ret
= wq_sleep(info
, SEND
, timeout
, &wait
);
1133 * wq_sleep must be called with info->lock held, and
1134 * returns with the lock released
1139 receiver
= wq_get_first_waiter(info
, RECV
);
1141 pipelined_send(&wake_q
, info
, msg_ptr
, receiver
);
1143 /* adds message to the queue */
1144 ret
= msg_insert(msg_ptr
, info
);
1149 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
=
1150 current_time(inode
);
1153 spin_unlock(&info
->lock
);
1164 static int do_mq_timedreceive(mqd_t mqdes
, char __user
*u_msg_ptr
,
1165 size_t msg_len
, unsigned int __user
*u_msg_prio
,
1166 struct timespec64
*ts
)
1169 struct msg_msg
*msg_ptr
;
1171 struct inode
*inode
;
1172 struct mqueue_inode_info
*info
;
1173 struct ext_wait_queue wait
;
1174 ktime_t expires
, *timeout
= NULL
;
1175 struct posix_msg_tree_node
*new_leaf
= NULL
;
1178 expires
= timespec64_to_ktime(*ts
);
1182 audit_mq_sendrecv(mqdes
, msg_len
, 0, ts
);
1185 if (unlikely(!f
.file
)) {
1190 inode
= file_inode(f
.file
);
1191 if (unlikely(f
.file
->f_op
!= &mqueue_file_operations
)) {
1195 info
= MQUEUE_I(inode
);
1198 if (unlikely(!(f
.file
->f_mode
& FMODE_READ
))) {
1203 /* checks if buffer is big enough */
1204 if (unlikely(msg_len
< info
->attr
.mq_msgsize
)) {
1210 * msg_insert really wants us to have a valid, spare node struct so
1211 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1212 * fall back to that if necessary.
1214 if (!info
->node_cache
)
1215 new_leaf
= kmalloc(sizeof(*new_leaf
), GFP_KERNEL
);
1217 spin_lock(&info
->lock
);
1219 if (!info
->node_cache
&& new_leaf
) {
1220 /* Save our speculative allocation into the cache */
1221 INIT_LIST_HEAD(&new_leaf
->msg_list
);
1222 info
->node_cache
= new_leaf
;
1227 if (info
->attr
.mq_curmsgs
== 0) {
1228 if (f
.file
->f_flags
& O_NONBLOCK
) {
1229 spin_unlock(&info
->lock
);
1232 wait
.task
= current
;
1234 /* memory barrier not required, we hold info->lock */
1235 WRITE_ONCE(wait
.state
, STATE_NONE
);
1236 ret
= wq_sleep(info
, RECV
, timeout
, &wait
);
1240 DEFINE_WAKE_Q(wake_q
);
1242 msg_ptr
= msg_get(info
);
1244 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
=
1245 current_time(inode
);
1247 /* There is now free space in queue. */
1248 pipelined_receive(&wake_q
, info
);
1249 spin_unlock(&info
->lock
);
1254 ret
= msg_ptr
->m_ts
;
1256 if ((u_msg_prio
&& put_user(msg_ptr
->m_type
, u_msg_prio
)) ||
1257 store_msg(u_msg_ptr
, msg_ptr
, msg_ptr
->m_ts
)) {
1268 SYSCALL_DEFINE5(mq_timedsend
, mqd_t
, mqdes
, const char __user
*, u_msg_ptr
,
1269 size_t, msg_len
, unsigned int, msg_prio
,
1270 const struct __kernel_timespec __user
*, u_abs_timeout
)
1272 struct timespec64 ts
, *p
= NULL
;
1273 if (u_abs_timeout
) {
1274 int res
= prepare_timeout(u_abs_timeout
, &ts
);
1279 return do_mq_timedsend(mqdes
, u_msg_ptr
, msg_len
, msg_prio
, p
);
1282 SYSCALL_DEFINE5(mq_timedreceive
, mqd_t
, mqdes
, char __user
*, u_msg_ptr
,
1283 size_t, msg_len
, unsigned int __user
*, u_msg_prio
,
1284 const struct __kernel_timespec __user
*, u_abs_timeout
)
1286 struct timespec64 ts
, *p
= NULL
;
1287 if (u_abs_timeout
) {
1288 int res
= prepare_timeout(u_abs_timeout
, &ts
);
1293 return do_mq_timedreceive(mqdes
, u_msg_ptr
, msg_len
, u_msg_prio
, p
);
1297 * Notes: the case when user wants us to deregister (with NULL as pointer)
1298 * and he isn't currently owner of notification, will be silently discarded.
1299 * It isn't explicitly defined in the POSIX.
1301 static int do_mq_notify(mqd_t mqdes
, const struct sigevent
*notification
)
1306 struct inode
*inode
;
1307 struct mqueue_inode_info
*info
;
1310 audit_mq_notify(mqdes
, notification
);
1314 if (notification
!= NULL
) {
1315 if (unlikely(notification
->sigev_notify
!= SIGEV_NONE
&&
1316 notification
->sigev_notify
!= SIGEV_SIGNAL
&&
1317 notification
->sigev_notify
!= SIGEV_THREAD
))
1319 if (notification
->sigev_notify
== SIGEV_SIGNAL
&&
1320 !valid_signal(notification
->sigev_signo
)) {
1323 if (notification
->sigev_notify
== SIGEV_THREAD
) {
1326 /* create the notify skb */
1327 nc
= alloc_skb(NOTIFY_COOKIE_LEN
, GFP_KERNEL
);
1331 if (copy_from_user(nc
->data
,
1332 notification
->sigev_value
.sival_ptr
,
1333 NOTIFY_COOKIE_LEN
)) {
1338 /* TODO: add a header? */
1339 skb_put(nc
, NOTIFY_COOKIE_LEN
);
1340 /* and attach it to the socket */
1342 f
= fdget(notification
->sigev_signo
);
1347 sock
= netlink_getsockbyfilp(f
.file
);
1350 ret
= PTR_ERR(sock
);
1354 timeo
= MAX_SCHEDULE_TIMEOUT
;
1355 ret
= netlink_attachskb(sock
, nc
, &timeo
, NULL
);
1371 inode
= file_inode(f
.file
);
1372 if (unlikely(f
.file
->f_op
!= &mqueue_file_operations
)) {
1376 info
= MQUEUE_I(inode
);
1379 spin_lock(&info
->lock
);
1380 if (notification
== NULL
) {
1381 if (info
->notify_owner
== task_tgid(current
)) {
1382 remove_notification(info
);
1383 inode
->i_atime
= inode
->i_ctime
= current_time(inode
);
1385 } else if (info
->notify_owner
!= NULL
) {
1388 switch (notification
->sigev_notify
) {
1390 info
->notify
.sigev_notify
= SIGEV_NONE
;
1393 info
->notify_sock
= sock
;
1394 info
->notify_cookie
= nc
;
1397 info
->notify
.sigev_notify
= SIGEV_THREAD
;
1400 info
->notify
.sigev_signo
= notification
->sigev_signo
;
1401 info
->notify
.sigev_value
= notification
->sigev_value
;
1402 info
->notify
.sigev_notify
= SIGEV_SIGNAL
;
1403 info
->notify_self_exec_id
= current
->self_exec_id
;
1407 info
->notify_owner
= get_pid(task_tgid(current
));
1408 info
->notify_user_ns
= get_user_ns(current_user_ns());
1409 inode
->i_atime
= inode
->i_ctime
= current_time(inode
);
1411 spin_unlock(&info
->lock
);
1416 netlink_detachskb(sock
, nc
);
1424 SYSCALL_DEFINE2(mq_notify
, mqd_t
, mqdes
,
1425 const struct sigevent __user
*, u_notification
)
1427 struct sigevent n
, *p
= NULL
;
1428 if (u_notification
) {
1429 if (copy_from_user(&n
, u_notification
, sizeof(struct sigevent
)))
1433 return do_mq_notify(mqdes
, p
);
1436 static int do_mq_getsetattr(int mqdes
, struct mq_attr
*new, struct mq_attr
*old
)
1439 struct inode
*inode
;
1440 struct mqueue_inode_info
*info
;
1442 if (new && (new->mq_flags
& (~O_NONBLOCK
)))
1449 if (unlikely(f
.file
->f_op
!= &mqueue_file_operations
)) {
1454 inode
= file_inode(f
.file
);
1455 info
= MQUEUE_I(inode
);
1457 spin_lock(&info
->lock
);
1461 old
->mq_flags
= f
.file
->f_flags
& O_NONBLOCK
;
1464 audit_mq_getsetattr(mqdes
, new);
1465 spin_lock(&f
.file
->f_lock
);
1466 if (new->mq_flags
& O_NONBLOCK
)
1467 f
.file
->f_flags
|= O_NONBLOCK
;
1469 f
.file
->f_flags
&= ~O_NONBLOCK
;
1470 spin_unlock(&f
.file
->f_lock
);
1472 inode
->i_atime
= inode
->i_ctime
= current_time(inode
);
1475 spin_unlock(&info
->lock
);
1480 SYSCALL_DEFINE3(mq_getsetattr
, mqd_t
, mqdes
,
1481 const struct mq_attr __user
*, u_mqstat
,
1482 struct mq_attr __user
*, u_omqstat
)
1485 struct mq_attr mqstat
, omqstat
;
1486 struct mq_attr
*new = NULL
, *old
= NULL
;
1490 if (copy_from_user(new, u_mqstat
, sizeof(struct mq_attr
)))
1496 ret
= do_mq_getsetattr(mqdes
, new, old
);
1500 if (copy_to_user(u_omqstat
, old
, sizeof(struct mq_attr
)))
1505 #ifdef CONFIG_COMPAT
1507 struct compat_mq_attr
{
1508 compat_long_t mq_flags
; /* message queue flags */
1509 compat_long_t mq_maxmsg
; /* maximum number of messages */
1510 compat_long_t mq_msgsize
; /* maximum message size */
1511 compat_long_t mq_curmsgs
; /* number of messages currently queued */
1512 compat_long_t __reserved
[4]; /* ignored for input, zeroed for output */
1515 static inline int get_compat_mq_attr(struct mq_attr
*attr
,
1516 const struct compat_mq_attr __user
*uattr
)
1518 struct compat_mq_attr v
;
1520 if (copy_from_user(&v
, uattr
, sizeof(*uattr
)))
1523 memset(attr
, 0, sizeof(*attr
));
1524 attr
->mq_flags
= v
.mq_flags
;
1525 attr
->mq_maxmsg
= v
.mq_maxmsg
;
1526 attr
->mq_msgsize
= v
.mq_msgsize
;
1527 attr
->mq_curmsgs
= v
.mq_curmsgs
;
1531 static inline int put_compat_mq_attr(const struct mq_attr
*attr
,
1532 struct compat_mq_attr __user
*uattr
)
1534 struct compat_mq_attr v
;
1536 memset(&v
, 0, sizeof(v
));
1537 v
.mq_flags
= attr
->mq_flags
;
1538 v
.mq_maxmsg
= attr
->mq_maxmsg
;
1539 v
.mq_msgsize
= attr
->mq_msgsize
;
1540 v
.mq_curmsgs
= attr
->mq_curmsgs
;
1541 if (copy_to_user(uattr
, &v
, sizeof(*uattr
)))
1546 COMPAT_SYSCALL_DEFINE4(mq_open
, const char __user
*, u_name
,
1547 int, oflag
, compat_mode_t
, mode
,
1548 struct compat_mq_attr __user
*, u_attr
)
1550 struct mq_attr attr
, *p
= NULL
;
1551 if (u_attr
&& oflag
& O_CREAT
) {
1553 if (get_compat_mq_attr(&attr
, u_attr
))
1556 return do_mq_open(u_name
, oflag
, mode
, p
);
1559 COMPAT_SYSCALL_DEFINE2(mq_notify
, mqd_t
, mqdes
,
1560 const struct compat_sigevent __user
*, u_notification
)
1562 struct sigevent n
, *p
= NULL
;
1563 if (u_notification
) {
1564 if (get_compat_sigevent(&n
, u_notification
))
1566 if (n
.sigev_notify
== SIGEV_THREAD
)
1567 n
.sigev_value
.sival_ptr
= compat_ptr(n
.sigev_value
.sival_int
);
1570 return do_mq_notify(mqdes
, p
);
1573 COMPAT_SYSCALL_DEFINE3(mq_getsetattr
, mqd_t
, mqdes
,
1574 const struct compat_mq_attr __user
*, u_mqstat
,
1575 struct compat_mq_attr __user
*, u_omqstat
)
1578 struct mq_attr mqstat
, omqstat
;
1579 struct mq_attr
*new = NULL
, *old
= NULL
;
1583 if (get_compat_mq_attr(new, u_mqstat
))
1589 ret
= do_mq_getsetattr(mqdes
, new, old
);
1593 if (put_compat_mq_attr(old
, u_omqstat
))
1599 #ifdef CONFIG_COMPAT_32BIT_TIME
1600 static int compat_prepare_timeout(const struct old_timespec32 __user
*p
,
1601 struct timespec64
*ts
)
1603 if (get_old_timespec32(ts
, p
))
1605 if (!timespec64_valid(ts
))
1610 SYSCALL_DEFINE5(mq_timedsend_time32
, mqd_t
, mqdes
,
1611 const char __user
*, u_msg_ptr
,
1612 unsigned int, msg_len
, unsigned int, msg_prio
,
1613 const struct old_timespec32 __user
*, u_abs_timeout
)
1615 struct timespec64 ts
, *p
= NULL
;
1616 if (u_abs_timeout
) {
1617 int res
= compat_prepare_timeout(u_abs_timeout
, &ts
);
1622 return do_mq_timedsend(mqdes
, u_msg_ptr
, msg_len
, msg_prio
, p
);
1625 SYSCALL_DEFINE5(mq_timedreceive_time32
, mqd_t
, mqdes
,
1626 char __user
*, u_msg_ptr
,
1627 unsigned int, msg_len
, unsigned int __user
*, u_msg_prio
,
1628 const struct old_timespec32 __user
*, u_abs_timeout
)
1630 struct timespec64 ts
, *p
= NULL
;
1631 if (u_abs_timeout
) {
1632 int res
= compat_prepare_timeout(u_abs_timeout
, &ts
);
1637 return do_mq_timedreceive(mqdes
, u_msg_ptr
, msg_len
, u_msg_prio
, p
);
1641 static const struct inode_operations mqueue_dir_inode_operations
= {
1642 .lookup
= simple_lookup
,
1643 .create
= mqueue_create
,
1644 .unlink
= mqueue_unlink
,
1647 static const struct file_operations mqueue_file_operations
= {
1648 .flush
= mqueue_flush_file
,
1649 .poll
= mqueue_poll_file
,
1650 .read
= mqueue_read_file
,
1651 .llseek
= default_llseek
,
1654 static const struct super_operations mqueue_super_ops
= {
1655 .alloc_inode
= mqueue_alloc_inode
,
1656 .free_inode
= mqueue_free_inode
,
1657 .evict_inode
= mqueue_evict_inode
,
1658 .statfs
= simple_statfs
,
1661 static const struct fs_context_operations mqueue_fs_context_ops
= {
1662 .free
= mqueue_fs_context_free
,
1663 .get_tree
= mqueue_get_tree
,
1666 static struct file_system_type mqueue_fs_type
= {
1668 .init_fs_context
= mqueue_init_fs_context
,
1669 .kill_sb
= kill_litter_super
,
1670 .fs_flags
= FS_USERNS_MOUNT
,
1673 int mq_init_ns(struct ipc_namespace
*ns
)
1677 ns
->mq_queues_count
= 0;
1678 ns
->mq_queues_max
= DFLT_QUEUESMAX
;
1679 ns
->mq_msg_max
= DFLT_MSGMAX
;
1680 ns
->mq_msgsize_max
= DFLT_MSGSIZEMAX
;
1681 ns
->mq_msg_default
= DFLT_MSG
;
1682 ns
->mq_msgsize_default
= DFLT_MSGSIZE
;
1684 m
= mq_create_mount(ns
);
1691 void mq_clear_sbinfo(struct ipc_namespace
*ns
)
1693 ns
->mq_mnt
->mnt_sb
->s_fs_info
= NULL
;
1696 void mq_put_mnt(struct ipc_namespace
*ns
)
1698 kern_unmount(ns
->mq_mnt
);
1701 static int __init
init_mqueue_fs(void)
1705 mqueue_inode_cachep
= kmem_cache_create("mqueue_inode_cache",
1706 sizeof(struct mqueue_inode_info
), 0,
1707 SLAB_HWCACHE_ALIGN
|SLAB_ACCOUNT
, init_once
);
1708 if (mqueue_inode_cachep
== NULL
)
1711 /* ignore failures - they are not fatal */
1712 mq_sysctl_table
= mq_register_sysctl_table();
1714 error
= register_filesystem(&mqueue_fs_type
);
1718 spin_lock_init(&mq_lock
);
1720 error
= mq_init_ns(&init_ipc_ns
);
1722 goto out_filesystem
;
1727 unregister_filesystem(&mqueue_fs_type
);
1729 if (mq_sysctl_table
)
1730 unregister_sysctl_table(mq_sysctl_table
);
1731 kmem_cache_destroy(mqueue_inode_cachep
);
1735 device_initcall(init_mqueue_fs
);