2 * POSIX message queues filesystem for Linux.
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (michal.wronski@gmail.com)
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
13 * This file is released under the GPL.
16 #include <linux/capability.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 #include <linux/file.h>
20 #include <linux/mount.h>
21 #include <linux/fs_context.h>
22 #include <linux/namei.h>
23 #include <linux/sysctl.h>
24 #include <linux/poll.h>
25 #include <linux/mqueue.h>
26 #include <linux/msg.h>
27 #include <linux/skbuff.h>
28 #include <linux/vmalloc.h>
29 #include <linux/netlink.h>
30 #include <linux/syscalls.h>
31 #include <linux/audit.h>
32 #include <linux/signal.h>
33 #include <linux/mutex.h>
34 #include <linux/nsproxy.h>
35 #include <linux/pid.h>
36 #include <linux/ipc_namespace.h>
37 #include <linux/user_namespace.h>
38 #include <linux/slab.h>
39 #include <linux/sched/wake_q.h>
40 #include <linux/sched/signal.h>
41 #include <linux/sched/user.h>
46 struct mqueue_fs_context
{
47 struct ipc_namespace
*ipc_ns
;
50 #define MQUEUE_MAGIC 0x19800202
51 #define DIRENT_SIZE 20
52 #define FILENT_SIZE 80
60 struct posix_msg_tree_node
{
61 struct rb_node rb_node
;
62 struct list_head msg_list
;
69 * Accesses to a message queue are synchronized by acquiring info->lock.
71 * There are two notable exceptions:
72 * - The actual wakeup of a sleeping task is performed using the wake_q
73 * framework. info->lock is already released when wake_up_q is called.
74 * - The exit codepaths after sleeping check ext_wait_queue->state without
75 * any locks. If it is STATE_READY, then the syscall is completed without
76 * acquiring info->lock.
79 * To achieve proper release/acquire memory barrier pairing, the state is set to
80 * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed
81 * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used.
83 * This prevents the following races:
85 * 1) With the simple wake_q_add(), the task could be gone already before
86 * the increase of the reference happens
89 * WRITE_ONCE(wait.state, STATE_NONE);
90 * schedule_hrtimeout()
92 * if (cmpxchg()) // success
93 * ->state = STATE_READY (reordered)
95 * if (wait.state == STATE_READY) return;
96 * sysret to user space
98 * get_task_struct() // UaF
100 * Solution: Use wake_q_add_safe() and perform the get_task_struct() before
101 * the smp_store_release() that does ->state = STATE_READY.
103 * 2) Without proper _release/_acquire barriers, the woken up task
104 * could read stale data
109 * WRITE_ONCE(wait.state, STATE_NONE);
110 * schedule_hrtimeout()
111 * state = STATE_READY;
113 * if (wait.state == STATE_READY) return;
114 * msg_ptr = wait.msg; // Access to stale data!
115 * receiver->msg = message; (reordered)
117 * Solution: use _release and _acquire barriers.
119 * 3) There is intentionally no barrier when setting current->state
120 * to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the
121 * release memory barrier, and the wakeup is triggered when holding
122 * info->lock, i.e. spin_lock(&info->lock) provided a pairing
123 * acquire memory barrier.
126 struct ext_wait_queue
{ /* queue of sleeping tasks */
127 struct task_struct
*task
;
128 struct list_head list
;
129 struct msg_msg
*msg
; /* ptr of loaded message */
130 int state
; /* one of STATE_* values */
133 struct mqueue_inode_info
{
135 struct inode vfs_inode
;
136 wait_queue_head_t wait_q
;
138 struct rb_root msg_tree
;
139 struct rb_node
*msg_tree_rightmost
;
140 struct posix_msg_tree_node
*node_cache
;
143 struct sigevent notify
;
144 struct pid
*notify_owner
;
145 struct user_namespace
*notify_user_ns
;
146 struct user_struct
*user
; /* user who created, for accounting */
147 struct sock
*notify_sock
;
148 struct sk_buff
*notify_cookie
;
150 /* for tasks waiting for free space and messages, respectively */
151 struct ext_wait_queue e_wait_q
[2];
153 unsigned long qsize
; /* size of queue in memory (sum of all msgs) */
156 static struct file_system_type mqueue_fs_type
;
157 static const struct inode_operations mqueue_dir_inode_operations
;
158 static const struct file_operations mqueue_file_operations
;
159 static const struct super_operations mqueue_super_ops
;
160 static const struct fs_context_operations mqueue_fs_context_ops
;
161 static void remove_notification(struct mqueue_inode_info
*info
);
163 static struct kmem_cache
*mqueue_inode_cachep
;
165 static struct ctl_table_header
*mq_sysctl_table
;
167 static inline struct mqueue_inode_info
*MQUEUE_I(struct inode
*inode
)
169 return container_of(inode
, struct mqueue_inode_info
, vfs_inode
);
173 * This routine should be called with the mq_lock held.
175 static inline struct ipc_namespace
*__get_ns_from_inode(struct inode
*inode
)
177 return get_ipc_ns(inode
->i_sb
->s_fs_info
);
180 static struct ipc_namespace
*get_ns_from_inode(struct inode
*inode
)
182 struct ipc_namespace
*ns
;
185 ns
= __get_ns_from_inode(inode
);
186 spin_unlock(&mq_lock
);
190 /* Auxiliary functions to manipulate messages' list */
191 static int msg_insert(struct msg_msg
*msg
, struct mqueue_inode_info
*info
)
193 struct rb_node
**p
, *parent
= NULL
;
194 struct posix_msg_tree_node
*leaf
;
195 bool rightmost
= true;
197 p
= &info
->msg_tree
.rb_node
;
200 leaf
= rb_entry(parent
, struct posix_msg_tree_node
, rb_node
);
202 if (likely(leaf
->priority
== msg
->m_type
))
204 else if (msg
->m_type
< leaf
->priority
) {
210 if (info
->node_cache
) {
211 leaf
= info
->node_cache
;
212 info
->node_cache
= NULL
;
214 leaf
= kmalloc(sizeof(*leaf
), GFP_ATOMIC
);
217 INIT_LIST_HEAD(&leaf
->msg_list
);
219 leaf
->priority
= msg
->m_type
;
222 info
->msg_tree_rightmost
= &leaf
->rb_node
;
224 rb_link_node(&leaf
->rb_node
, parent
, p
);
225 rb_insert_color(&leaf
->rb_node
, &info
->msg_tree
);
227 info
->attr
.mq_curmsgs
++;
228 info
->qsize
+= msg
->m_ts
;
229 list_add_tail(&msg
->m_list
, &leaf
->msg_list
);
233 static inline void msg_tree_erase(struct posix_msg_tree_node
*leaf
,
234 struct mqueue_inode_info
*info
)
236 struct rb_node
*node
= &leaf
->rb_node
;
238 if (info
->msg_tree_rightmost
== node
)
239 info
->msg_tree_rightmost
= rb_prev(node
);
241 rb_erase(node
, &info
->msg_tree
);
242 if (info
->node_cache
)
245 info
->node_cache
= leaf
;
248 static inline struct msg_msg
*msg_get(struct mqueue_inode_info
*info
)
250 struct rb_node
*parent
= NULL
;
251 struct posix_msg_tree_node
*leaf
;
256 * During insert, low priorities go to the left and high to the
257 * right. On receive, we want the highest priorities first, so
258 * walk all the way to the right.
260 parent
= info
->msg_tree_rightmost
;
262 if (info
->attr
.mq_curmsgs
) {
263 pr_warn_once("Inconsistency in POSIX message queue, "
264 "no tree element, but supposedly messages "
266 info
->attr
.mq_curmsgs
= 0;
270 leaf
= rb_entry(parent
, struct posix_msg_tree_node
, rb_node
);
271 if (unlikely(list_empty(&leaf
->msg_list
))) {
272 pr_warn_once("Inconsistency in POSIX message queue, "
273 "empty leaf node but we haven't implemented "
274 "lazy leaf delete!\n");
275 msg_tree_erase(leaf
, info
);
278 msg
= list_first_entry(&leaf
->msg_list
,
279 struct msg_msg
, m_list
);
280 list_del(&msg
->m_list
);
281 if (list_empty(&leaf
->msg_list
)) {
282 msg_tree_erase(leaf
, info
);
285 info
->attr
.mq_curmsgs
--;
286 info
->qsize
-= msg
->m_ts
;
290 static struct inode
*mqueue_get_inode(struct super_block
*sb
,
291 struct ipc_namespace
*ipc_ns
, umode_t mode
,
292 struct mq_attr
*attr
)
294 struct user_struct
*u
= current_user();
298 inode
= new_inode(sb
);
302 inode
->i_ino
= get_next_ino();
303 inode
->i_mode
= mode
;
304 inode
->i_uid
= current_fsuid();
305 inode
->i_gid
= current_fsgid();
306 inode
->i_mtime
= inode
->i_ctime
= inode
->i_atime
= current_time(inode
);
309 struct mqueue_inode_info
*info
;
310 unsigned long mq_bytes
, mq_treesize
;
312 inode
->i_fop
= &mqueue_file_operations
;
313 inode
->i_size
= FILENT_SIZE
;
314 /* mqueue specific info */
315 info
= MQUEUE_I(inode
);
316 spin_lock_init(&info
->lock
);
317 init_waitqueue_head(&info
->wait_q
);
318 INIT_LIST_HEAD(&info
->e_wait_q
[0].list
);
319 INIT_LIST_HEAD(&info
->e_wait_q
[1].list
);
320 info
->notify_owner
= NULL
;
321 info
->notify_user_ns
= NULL
;
323 info
->user
= NULL
; /* set when all is ok */
324 info
->msg_tree
= RB_ROOT
;
325 info
->msg_tree_rightmost
= NULL
;
326 info
->node_cache
= NULL
;
327 memset(&info
->attr
, 0, sizeof(info
->attr
));
328 info
->attr
.mq_maxmsg
= min(ipc_ns
->mq_msg_max
,
329 ipc_ns
->mq_msg_default
);
330 info
->attr
.mq_msgsize
= min(ipc_ns
->mq_msgsize_max
,
331 ipc_ns
->mq_msgsize_default
);
333 info
->attr
.mq_maxmsg
= attr
->mq_maxmsg
;
334 info
->attr
.mq_msgsize
= attr
->mq_msgsize
;
337 * We used to allocate a static array of pointers and account
338 * the size of that array as well as one msg_msg struct per
339 * possible message into the queue size. That's no longer
340 * accurate as the queue is now an rbtree and will grow and
341 * shrink depending on usage patterns. We can, however, still
342 * account one msg_msg struct per message, but the nodes are
343 * allocated depending on priority usage, and most programs
344 * only use one, or a handful, of priorities. However, since
345 * this is pinned memory, we need to assume worst case, so
346 * that means the min(mq_maxmsg, max_priorities) * struct
347 * posix_msg_tree_node.
351 if (info
->attr
.mq_maxmsg
<= 0 || info
->attr
.mq_msgsize
<= 0)
353 if (capable(CAP_SYS_RESOURCE
)) {
354 if (info
->attr
.mq_maxmsg
> HARD_MSGMAX
||
355 info
->attr
.mq_msgsize
> HARD_MSGSIZEMAX
)
358 if (info
->attr
.mq_maxmsg
> ipc_ns
->mq_msg_max
||
359 info
->attr
.mq_msgsize
> ipc_ns
->mq_msgsize_max
)
363 /* check for overflow */
364 if (info
->attr
.mq_msgsize
> ULONG_MAX
/info
->attr
.mq_maxmsg
)
366 mq_treesize
= info
->attr
.mq_maxmsg
* sizeof(struct msg_msg
) +
367 min_t(unsigned int, info
->attr
.mq_maxmsg
, MQ_PRIO_MAX
) *
368 sizeof(struct posix_msg_tree_node
);
369 mq_bytes
= info
->attr
.mq_maxmsg
* info
->attr
.mq_msgsize
;
370 if (mq_bytes
+ mq_treesize
< mq_bytes
)
372 mq_bytes
+= mq_treesize
;
374 if (u
->mq_bytes
+ mq_bytes
< u
->mq_bytes
||
375 u
->mq_bytes
+ mq_bytes
> rlimit(RLIMIT_MSGQUEUE
)) {
376 spin_unlock(&mq_lock
);
377 /* mqueue_evict_inode() releases info->messages */
381 u
->mq_bytes
+= mq_bytes
;
382 spin_unlock(&mq_lock
);
385 info
->user
= get_uid(u
);
386 } else if (S_ISDIR(mode
)) {
388 /* Some things misbehave if size == 0 on a directory */
389 inode
->i_size
= 2 * DIRENT_SIZE
;
390 inode
->i_op
= &mqueue_dir_inode_operations
;
391 inode
->i_fop
= &simple_dir_operations
;
401 static int mqueue_fill_super(struct super_block
*sb
, struct fs_context
*fc
)
404 struct ipc_namespace
*ns
= sb
->s_fs_info
;
406 sb
->s_iflags
|= SB_I_NOEXEC
| SB_I_NODEV
;
407 sb
->s_blocksize
= PAGE_SIZE
;
408 sb
->s_blocksize_bits
= PAGE_SHIFT
;
409 sb
->s_magic
= MQUEUE_MAGIC
;
410 sb
->s_op
= &mqueue_super_ops
;
412 inode
= mqueue_get_inode(sb
, ns
, S_IFDIR
| S_ISVTX
| S_IRWXUGO
, NULL
);
414 return PTR_ERR(inode
);
416 sb
->s_root
= d_make_root(inode
);
422 static int mqueue_get_tree(struct fs_context
*fc
)
424 struct mqueue_fs_context
*ctx
= fc
->fs_private
;
426 return get_tree_keyed(fc
, mqueue_fill_super
, ctx
->ipc_ns
);
429 static void mqueue_fs_context_free(struct fs_context
*fc
)
431 struct mqueue_fs_context
*ctx
= fc
->fs_private
;
433 put_ipc_ns(ctx
->ipc_ns
);
437 static int mqueue_init_fs_context(struct fs_context
*fc
)
439 struct mqueue_fs_context
*ctx
;
441 ctx
= kzalloc(sizeof(struct mqueue_fs_context
), GFP_KERNEL
);
445 ctx
->ipc_ns
= get_ipc_ns(current
->nsproxy
->ipc_ns
);
446 put_user_ns(fc
->user_ns
);
447 fc
->user_ns
= get_user_ns(ctx
->ipc_ns
->user_ns
);
448 fc
->fs_private
= ctx
;
449 fc
->ops
= &mqueue_fs_context_ops
;
453 static struct vfsmount
*mq_create_mount(struct ipc_namespace
*ns
)
455 struct mqueue_fs_context
*ctx
;
456 struct fs_context
*fc
;
457 struct vfsmount
*mnt
;
459 fc
= fs_context_for_mount(&mqueue_fs_type
, SB_KERNMOUNT
);
463 ctx
= fc
->fs_private
;
464 put_ipc_ns(ctx
->ipc_ns
);
465 ctx
->ipc_ns
= get_ipc_ns(ns
);
466 put_user_ns(fc
->user_ns
);
467 fc
->user_ns
= get_user_ns(ctx
->ipc_ns
->user_ns
);
474 static void init_once(void *foo
)
476 struct mqueue_inode_info
*p
= (struct mqueue_inode_info
*) foo
;
478 inode_init_once(&p
->vfs_inode
);
481 static struct inode
*mqueue_alloc_inode(struct super_block
*sb
)
483 struct mqueue_inode_info
*ei
;
485 ei
= kmem_cache_alloc(mqueue_inode_cachep
, GFP_KERNEL
);
488 return &ei
->vfs_inode
;
491 static void mqueue_free_inode(struct inode
*inode
)
493 kmem_cache_free(mqueue_inode_cachep
, MQUEUE_I(inode
));
496 static void mqueue_evict_inode(struct inode
*inode
)
498 struct mqueue_inode_info
*info
;
499 struct user_struct
*user
;
500 struct ipc_namespace
*ipc_ns
;
501 struct msg_msg
*msg
, *nmsg
;
506 if (S_ISDIR(inode
->i_mode
))
509 ipc_ns
= get_ns_from_inode(inode
);
510 info
= MQUEUE_I(inode
);
511 spin_lock(&info
->lock
);
512 while ((msg
= msg_get(info
)) != NULL
)
513 list_add_tail(&msg
->m_list
, &tmp_msg
);
514 kfree(info
->node_cache
);
515 spin_unlock(&info
->lock
);
517 list_for_each_entry_safe(msg
, nmsg
, &tmp_msg
, m_list
) {
518 list_del(&msg
->m_list
);
524 unsigned long mq_bytes
, mq_treesize
;
526 /* Total amount of bytes accounted for the mqueue */
527 mq_treesize
= info
->attr
.mq_maxmsg
* sizeof(struct msg_msg
) +
528 min_t(unsigned int, info
->attr
.mq_maxmsg
, MQ_PRIO_MAX
) *
529 sizeof(struct posix_msg_tree_node
);
531 mq_bytes
= mq_treesize
+ (info
->attr
.mq_maxmsg
*
532 info
->attr
.mq_msgsize
);
535 user
->mq_bytes
-= mq_bytes
;
537 * get_ns_from_inode() ensures that the
538 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
539 * to which we now hold a reference, or it is NULL.
540 * We can't put it here under mq_lock, though.
543 ipc_ns
->mq_queues_count
--;
544 spin_unlock(&mq_lock
);
551 static int mqueue_create_attr(struct dentry
*dentry
, umode_t mode
, void *arg
)
553 struct inode
*dir
= dentry
->d_parent
->d_inode
;
555 struct mq_attr
*attr
= arg
;
557 struct ipc_namespace
*ipc_ns
;
560 ipc_ns
= __get_ns_from_inode(dir
);
566 if (ipc_ns
->mq_queues_count
>= ipc_ns
->mq_queues_max
&&
567 !capable(CAP_SYS_RESOURCE
)) {
571 ipc_ns
->mq_queues_count
++;
572 spin_unlock(&mq_lock
);
574 inode
= mqueue_get_inode(dir
->i_sb
, ipc_ns
, mode
, attr
);
576 error
= PTR_ERR(inode
);
578 ipc_ns
->mq_queues_count
--;
583 dir
->i_size
+= DIRENT_SIZE
;
584 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= current_time(dir
);
586 d_instantiate(dentry
, inode
);
590 spin_unlock(&mq_lock
);
596 static int mqueue_create(struct inode
*dir
, struct dentry
*dentry
,
597 umode_t mode
, bool excl
)
599 return mqueue_create_attr(dentry
, mode
, NULL
);
602 static int mqueue_unlink(struct inode
*dir
, struct dentry
*dentry
)
604 struct inode
*inode
= d_inode(dentry
);
606 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= current_time(dir
);
607 dir
->i_size
-= DIRENT_SIZE
;
614 * This is routine for system read from queue file.
615 * To avoid mess with doing here some sort of mq_receive we allow
616 * to read only queue size & notification info (the only values
617 * that are interesting from user point of view and aren't accessible
618 * through std routines)
620 static ssize_t
mqueue_read_file(struct file
*filp
, char __user
*u_data
,
621 size_t count
, loff_t
*off
)
623 struct mqueue_inode_info
*info
= MQUEUE_I(file_inode(filp
));
624 char buffer
[FILENT_SIZE
];
627 spin_lock(&info
->lock
);
628 snprintf(buffer
, sizeof(buffer
),
629 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
631 info
->notify_owner
? info
->notify
.sigev_notify
: 0,
632 (info
->notify_owner
&&
633 info
->notify
.sigev_notify
== SIGEV_SIGNAL
) ?
634 info
->notify
.sigev_signo
: 0,
635 pid_vnr(info
->notify_owner
));
636 spin_unlock(&info
->lock
);
637 buffer
[sizeof(buffer
)-1] = '\0';
639 ret
= simple_read_from_buffer(u_data
, count
, off
, buffer
,
644 file_inode(filp
)->i_atime
= file_inode(filp
)->i_ctime
= current_time(file_inode(filp
));
648 static int mqueue_flush_file(struct file
*filp
, fl_owner_t id
)
650 struct mqueue_inode_info
*info
= MQUEUE_I(file_inode(filp
));
652 spin_lock(&info
->lock
);
653 if (task_tgid(current
) == info
->notify_owner
)
654 remove_notification(info
);
656 spin_unlock(&info
->lock
);
660 static __poll_t
mqueue_poll_file(struct file
*filp
, struct poll_table_struct
*poll_tab
)
662 struct mqueue_inode_info
*info
= MQUEUE_I(file_inode(filp
));
665 poll_wait(filp
, &info
->wait_q
, poll_tab
);
667 spin_lock(&info
->lock
);
668 if (info
->attr
.mq_curmsgs
)
669 retval
= EPOLLIN
| EPOLLRDNORM
;
671 if (info
->attr
.mq_curmsgs
< info
->attr
.mq_maxmsg
)
672 retval
|= EPOLLOUT
| EPOLLWRNORM
;
673 spin_unlock(&info
->lock
);
678 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
679 static void wq_add(struct mqueue_inode_info
*info
, int sr
,
680 struct ext_wait_queue
*ewp
)
682 struct ext_wait_queue
*walk
;
684 list_for_each_entry(walk
, &info
->e_wait_q
[sr
].list
, list
) {
685 if (walk
->task
->prio
<= current
->prio
) {
686 list_add_tail(&ewp
->list
, &walk
->list
);
690 list_add_tail(&ewp
->list
, &info
->e_wait_q
[sr
].list
);
694 * Puts current task to sleep. Caller must hold queue lock. After return
698 static int wq_sleep(struct mqueue_inode_info
*info
, int sr
,
699 ktime_t
*timeout
, struct ext_wait_queue
*ewp
)
700 __releases(&info
->lock
)
705 wq_add(info
, sr
, ewp
);
708 /* memory barrier not required, we hold info->lock */
709 __set_current_state(TASK_INTERRUPTIBLE
);
711 spin_unlock(&info
->lock
);
712 time
= schedule_hrtimeout_range_clock(timeout
, 0,
713 HRTIMER_MODE_ABS
, CLOCK_REALTIME
);
715 if (READ_ONCE(ewp
->state
) == STATE_READY
) {
716 /* see MQ_BARRIER for purpose/pairing */
717 smp_acquire__after_ctrl_dep();
721 spin_lock(&info
->lock
);
723 /* we hold info->lock, so no memory barrier required */
724 if (READ_ONCE(ewp
->state
) == STATE_READY
) {
728 if (signal_pending(current
)) {
729 retval
= -ERESTARTSYS
;
737 list_del(&ewp
->list
);
739 spin_unlock(&info
->lock
);
745 * Returns waiting task that should be serviced first or NULL if none exists
747 static struct ext_wait_queue
*wq_get_first_waiter(
748 struct mqueue_inode_info
*info
, int sr
)
750 struct list_head
*ptr
;
752 ptr
= info
->e_wait_q
[sr
].list
.prev
;
753 if (ptr
== &info
->e_wait_q
[sr
].list
)
755 return list_entry(ptr
, struct ext_wait_queue
, list
);
759 static inline void set_cookie(struct sk_buff
*skb
, char code
)
761 ((char *)skb
->data
)[NOTIFY_COOKIE_LEN
-1] = code
;
765 * The next function is only to split too long sys_mq_timedsend
767 static void __do_notify(struct mqueue_inode_info
*info
)
770 * invoked when there is registered process and there isn't process
771 * waiting synchronously for message AND state of queue changed from
772 * empty to not empty. Here we are sure that no one is waiting
774 if (info
->notify_owner
&&
775 info
->attr
.mq_curmsgs
== 1) {
776 struct kernel_siginfo sig_i
;
777 switch (info
->notify
.sigev_notify
) {
783 clear_siginfo(&sig_i
);
784 sig_i
.si_signo
= info
->notify
.sigev_signo
;
786 sig_i
.si_code
= SI_MESGQ
;
787 sig_i
.si_value
= info
->notify
.sigev_value
;
788 /* map current pid/uid into info->owner's namespaces */
790 sig_i
.si_pid
= task_tgid_nr_ns(current
,
791 ns_of_pid(info
->notify_owner
));
792 sig_i
.si_uid
= from_kuid_munged(info
->notify_user_ns
, current_uid());
795 kill_pid_info(info
->notify
.sigev_signo
,
796 &sig_i
, info
->notify_owner
);
799 set_cookie(info
->notify_cookie
, NOTIFY_WOKENUP
);
800 netlink_sendskb(info
->notify_sock
, info
->notify_cookie
);
803 /* after notification unregisters process */
804 put_pid(info
->notify_owner
);
805 put_user_ns(info
->notify_user_ns
);
806 info
->notify_owner
= NULL
;
807 info
->notify_user_ns
= NULL
;
809 wake_up(&info
->wait_q
);
812 static int prepare_timeout(const struct __kernel_timespec __user
*u_abs_timeout
,
813 struct timespec64
*ts
)
815 if (get_timespec64(ts
, u_abs_timeout
))
817 if (!timespec64_valid(ts
))
822 static void remove_notification(struct mqueue_inode_info
*info
)
824 if (info
->notify_owner
!= NULL
&&
825 info
->notify
.sigev_notify
== SIGEV_THREAD
) {
826 set_cookie(info
->notify_cookie
, NOTIFY_REMOVED
);
827 netlink_sendskb(info
->notify_sock
, info
->notify_cookie
);
829 put_pid(info
->notify_owner
);
830 put_user_ns(info
->notify_user_ns
);
831 info
->notify_owner
= NULL
;
832 info
->notify_user_ns
= NULL
;
835 static int prepare_open(struct dentry
*dentry
, int oflag
, int ro
,
836 umode_t mode
, struct filename
*name
,
837 struct mq_attr
*attr
)
839 static const int oflag2acc
[O_ACCMODE
] = { MAY_READ
, MAY_WRITE
,
840 MAY_READ
| MAY_WRITE
};
843 if (d_really_is_negative(dentry
)) {
844 if (!(oflag
& O_CREAT
))
848 audit_inode_parent_hidden(name
, dentry
->d_parent
);
849 return vfs_mkobj(dentry
, mode
& ~current_umask(),
850 mqueue_create_attr
, attr
);
852 /* it already existed */
853 audit_inode(name
, dentry
, 0);
854 if ((oflag
& (O_CREAT
|O_EXCL
)) == (O_CREAT
|O_EXCL
))
856 if ((oflag
& O_ACCMODE
) == (O_RDWR
| O_WRONLY
))
858 acc
= oflag2acc
[oflag
& O_ACCMODE
];
859 return inode_permission(d_inode(dentry
), acc
);
862 static int do_mq_open(const char __user
*u_name
, int oflag
, umode_t mode
,
863 struct mq_attr
*attr
)
865 struct vfsmount
*mnt
= current
->nsproxy
->ipc_ns
->mq_mnt
;
866 struct dentry
*root
= mnt
->mnt_root
;
867 struct filename
*name
;
872 audit_mq_open(oflag
, mode
, attr
);
874 if (IS_ERR(name
= getname(u_name
)))
875 return PTR_ERR(name
);
877 fd
= get_unused_fd_flags(O_CLOEXEC
);
881 ro
= mnt_want_write(mnt
); /* we'll drop it in any case */
882 inode_lock(d_inode(root
));
883 path
.dentry
= lookup_one_len(name
->name
, root
, strlen(name
->name
));
884 if (IS_ERR(path
.dentry
)) {
885 error
= PTR_ERR(path
.dentry
);
888 path
.mnt
= mntget(mnt
);
889 error
= prepare_open(path
.dentry
, oflag
, ro
, mode
, name
, attr
);
891 struct file
*file
= dentry_open(&path
, oflag
, current_cred());
893 fd_install(fd
, file
);
895 error
= PTR_ERR(file
);
903 inode_unlock(d_inode(root
));
911 SYSCALL_DEFINE4(mq_open
, const char __user
*, u_name
, int, oflag
, umode_t
, mode
,
912 struct mq_attr __user
*, u_attr
)
915 if (u_attr
&& copy_from_user(&attr
, u_attr
, sizeof(struct mq_attr
)))
918 return do_mq_open(u_name
, oflag
, mode
, u_attr
? &attr
: NULL
);
921 SYSCALL_DEFINE1(mq_unlink
, const char __user
*, u_name
)
924 struct filename
*name
;
925 struct dentry
*dentry
;
926 struct inode
*inode
= NULL
;
927 struct ipc_namespace
*ipc_ns
= current
->nsproxy
->ipc_ns
;
928 struct vfsmount
*mnt
= ipc_ns
->mq_mnt
;
930 name
= getname(u_name
);
932 return PTR_ERR(name
);
934 audit_inode_parent_hidden(name
, mnt
->mnt_root
);
935 err
= mnt_want_write(mnt
);
938 inode_lock_nested(d_inode(mnt
->mnt_root
), I_MUTEX_PARENT
);
939 dentry
= lookup_one_len(name
->name
, mnt
->mnt_root
,
941 if (IS_ERR(dentry
)) {
942 err
= PTR_ERR(dentry
);
946 inode
= d_inode(dentry
);
951 err
= vfs_unlink(d_inode(dentry
->d_parent
), dentry
, NULL
);
956 inode_unlock(d_inode(mnt
->mnt_root
));
966 /* Pipelined send and receive functions.
968 * If a receiver finds no waiting message, then it registers itself in the
969 * list of waiting receivers. A sender checks that list before adding the new
970 * message into the message array. If there is a waiting receiver, then it
971 * bypasses the message array and directly hands the message over to the
972 * receiver. The receiver accepts the message and returns without grabbing the
975 * - Set pointer to message.
976 * - Queue the receiver task for later wakeup (without the info->lock).
977 * - Update its state to STATE_READY. Now the receiver can continue.
978 * - Wake up the process after the lock is dropped. Should the process wake up
979 * before this wakeup (due to a timeout or a signal) it will either see
980 * STATE_READY and continue or acquire the lock to check the state again.
982 * The same algorithm is used for senders.
985 static inline void __pipelined_op(struct wake_q_head
*wake_q
,
986 struct mqueue_inode_info
*info
,
987 struct ext_wait_queue
*this)
989 list_del(&this->list
);
990 get_task_struct(this->task
);
992 /* see MQ_BARRIER for purpose/pairing */
993 smp_store_release(&this->state
, STATE_READY
);
994 wake_q_add_safe(wake_q
, this->task
);
997 /* pipelined_send() - send a message directly to the task waiting in
998 * sys_mq_timedreceive() (without inserting message into a queue).
1000 static inline void pipelined_send(struct wake_q_head
*wake_q
,
1001 struct mqueue_inode_info
*info
,
1002 struct msg_msg
*message
,
1003 struct ext_wait_queue
*receiver
)
1005 receiver
->msg
= message
;
1006 __pipelined_op(wake_q
, info
, receiver
);
1009 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
1010 * gets its message and put to the queue (we have one free place for sure). */
1011 static inline void pipelined_receive(struct wake_q_head
*wake_q
,
1012 struct mqueue_inode_info
*info
)
1014 struct ext_wait_queue
*sender
= wq_get_first_waiter(info
, SEND
);
1018 wake_up_interruptible(&info
->wait_q
);
1021 if (msg_insert(sender
->msg
, info
))
1024 __pipelined_op(wake_q
, info
, sender
);
1027 static int do_mq_timedsend(mqd_t mqdes
, const char __user
*u_msg_ptr
,
1028 size_t msg_len
, unsigned int msg_prio
,
1029 struct timespec64
*ts
)
1032 struct inode
*inode
;
1033 struct ext_wait_queue wait
;
1034 struct ext_wait_queue
*receiver
;
1035 struct msg_msg
*msg_ptr
;
1036 struct mqueue_inode_info
*info
;
1037 ktime_t expires
, *timeout
= NULL
;
1038 struct posix_msg_tree_node
*new_leaf
= NULL
;
1040 DEFINE_WAKE_Q(wake_q
);
1042 if (unlikely(msg_prio
>= (unsigned long) MQ_PRIO_MAX
))
1046 expires
= timespec64_to_ktime(*ts
);
1050 audit_mq_sendrecv(mqdes
, msg_len
, msg_prio
, ts
);
1053 if (unlikely(!f
.file
)) {
1058 inode
= file_inode(f
.file
);
1059 if (unlikely(f
.file
->f_op
!= &mqueue_file_operations
)) {
1063 info
= MQUEUE_I(inode
);
1066 if (unlikely(!(f
.file
->f_mode
& FMODE_WRITE
))) {
1071 if (unlikely(msg_len
> info
->attr
.mq_msgsize
)) {
1076 /* First try to allocate memory, before doing anything with
1077 * existing queues. */
1078 msg_ptr
= load_msg(u_msg_ptr
, msg_len
);
1079 if (IS_ERR(msg_ptr
)) {
1080 ret
= PTR_ERR(msg_ptr
);
1083 msg_ptr
->m_ts
= msg_len
;
1084 msg_ptr
->m_type
= msg_prio
;
1087 * msg_insert really wants us to have a valid, spare node struct so
1088 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1089 * fall back to that if necessary.
1091 if (!info
->node_cache
)
1092 new_leaf
= kmalloc(sizeof(*new_leaf
), GFP_KERNEL
);
1094 spin_lock(&info
->lock
);
1096 if (!info
->node_cache
&& new_leaf
) {
1097 /* Save our speculative allocation into the cache */
1098 INIT_LIST_HEAD(&new_leaf
->msg_list
);
1099 info
->node_cache
= new_leaf
;
1105 if (info
->attr
.mq_curmsgs
== info
->attr
.mq_maxmsg
) {
1106 if (f
.file
->f_flags
& O_NONBLOCK
) {
1109 wait
.task
= current
;
1110 wait
.msg
= (void *) msg_ptr
;
1112 /* memory barrier not required, we hold info->lock */
1113 WRITE_ONCE(wait
.state
, STATE_NONE
);
1114 ret
= wq_sleep(info
, SEND
, timeout
, &wait
);
1116 * wq_sleep must be called with info->lock held, and
1117 * returns with the lock released
1122 receiver
= wq_get_first_waiter(info
, RECV
);
1124 pipelined_send(&wake_q
, info
, msg_ptr
, receiver
);
1126 /* adds message to the queue */
1127 ret
= msg_insert(msg_ptr
, info
);
1132 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
=
1133 current_time(inode
);
1136 spin_unlock(&info
->lock
);
1147 static int do_mq_timedreceive(mqd_t mqdes
, char __user
*u_msg_ptr
,
1148 size_t msg_len
, unsigned int __user
*u_msg_prio
,
1149 struct timespec64
*ts
)
1152 struct msg_msg
*msg_ptr
;
1154 struct inode
*inode
;
1155 struct mqueue_inode_info
*info
;
1156 struct ext_wait_queue wait
;
1157 ktime_t expires
, *timeout
= NULL
;
1158 struct posix_msg_tree_node
*new_leaf
= NULL
;
1161 expires
= timespec64_to_ktime(*ts
);
1165 audit_mq_sendrecv(mqdes
, msg_len
, 0, ts
);
1168 if (unlikely(!f
.file
)) {
1173 inode
= file_inode(f
.file
);
1174 if (unlikely(f
.file
->f_op
!= &mqueue_file_operations
)) {
1178 info
= MQUEUE_I(inode
);
1181 if (unlikely(!(f
.file
->f_mode
& FMODE_READ
))) {
1186 /* checks if buffer is big enough */
1187 if (unlikely(msg_len
< info
->attr
.mq_msgsize
)) {
1193 * msg_insert really wants us to have a valid, spare node struct so
1194 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1195 * fall back to that if necessary.
1197 if (!info
->node_cache
)
1198 new_leaf
= kmalloc(sizeof(*new_leaf
), GFP_KERNEL
);
1200 spin_lock(&info
->lock
);
1202 if (!info
->node_cache
&& new_leaf
) {
1203 /* Save our speculative allocation into the cache */
1204 INIT_LIST_HEAD(&new_leaf
->msg_list
);
1205 info
->node_cache
= new_leaf
;
1210 if (info
->attr
.mq_curmsgs
== 0) {
1211 if (f
.file
->f_flags
& O_NONBLOCK
) {
1212 spin_unlock(&info
->lock
);
1215 wait
.task
= current
;
1217 /* memory barrier not required, we hold info->lock */
1218 WRITE_ONCE(wait
.state
, STATE_NONE
);
1219 ret
= wq_sleep(info
, RECV
, timeout
, &wait
);
1223 DEFINE_WAKE_Q(wake_q
);
1225 msg_ptr
= msg_get(info
);
1227 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
=
1228 current_time(inode
);
1230 /* There is now free space in queue. */
1231 pipelined_receive(&wake_q
, info
);
1232 spin_unlock(&info
->lock
);
1237 ret
= msg_ptr
->m_ts
;
1239 if ((u_msg_prio
&& put_user(msg_ptr
->m_type
, u_msg_prio
)) ||
1240 store_msg(u_msg_ptr
, msg_ptr
, msg_ptr
->m_ts
)) {
1251 SYSCALL_DEFINE5(mq_timedsend
, mqd_t
, mqdes
, const char __user
*, u_msg_ptr
,
1252 size_t, msg_len
, unsigned int, msg_prio
,
1253 const struct __kernel_timespec __user
*, u_abs_timeout
)
1255 struct timespec64 ts
, *p
= NULL
;
1256 if (u_abs_timeout
) {
1257 int res
= prepare_timeout(u_abs_timeout
, &ts
);
1262 return do_mq_timedsend(mqdes
, u_msg_ptr
, msg_len
, msg_prio
, p
);
1265 SYSCALL_DEFINE5(mq_timedreceive
, mqd_t
, mqdes
, char __user
*, u_msg_ptr
,
1266 size_t, msg_len
, unsigned int __user
*, u_msg_prio
,
1267 const struct __kernel_timespec __user
*, u_abs_timeout
)
1269 struct timespec64 ts
, *p
= NULL
;
1270 if (u_abs_timeout
) {
1271 int res
= prepare_timeout(u_abs_timeout
, &ts
);
1276 return do_mq_timedreceive(mqdes
, u_msg_ptr
, msg_len
, u_msg_prio
, p
);
1280 * Notes: the case when user wants us to deregister (with NULL as pointer)
1281 * and he isn't currently owner of notification, will be silently discarded.
1282 * It isn't explicitly defined in the POSIX.
1284 static int do_mq_notify(mqd_t mqdes
, const struct sigevent
*notification
)
1289 struct inode
*inode
;
1290 struct mqueue_inode_info
*info
;
1293 audit_mq_notify(mqdes
, notification
);
1297 if (notification
!= NULL
) {
1298 if (unlikely(notification
->sigev_notify
!= SIGEV_NONE
&&
1299 notification
->sigev_notify
!= SIGEV_SIGNAL
&&
1300 notification
->sigev_notify
!= SIGEV_THREAD
))
1302 if (notification
->sigev_notify
== SIGEV_SIGNAL
&&
1303 !valid_signal(notification
->sigev_signo
)) {
1306 if (notification
->sigev_notify
== SIGEV_THREAD
) {
1309 /* create the notify skb */
1310 nc
= alloc_skb(NOTIFY_COOKIE_LEN
, GFP_KERNEL
);
1314 if (copy_from_user(nc
->data
,
1315 notification
->sigev_value
.sival_ptr
,
1316 NOTIFY_COOKIE_LEN
)) {
1321 /* TODO: add a header? */
1322 skb_put(nc
, NOTIFY_COOKIE_LEN
);
1323 /* and attach it to the socket */
1325 f
= fdget(notification
->sigev_signo
);
1330 sock
= netlink_getsockbyfilp(f
.file
);
1333 ret
= PTR_ERR(sock
);
1337 timeo
= MAX_SCHEDULE_TIMEOUT
;
1338 ret
= netlink_attachskb(sock
, nc
, &timeo
, NULL
);
1354 inode
= file_inode(f
.file
);
1355 if (unlikely(f
.file
->f_op
!= &mqueue_file_operations
)) {
1359 info
= MQUEUE_I(inode
);
1362 spin_lock(&info
->lock
);
1363 if (notification
== NULL
) {
1364 if (info
->notify_owner
== task_tgid(current
)) {
1365 remove_notification(info
);
1366 inode
->i_atime
= inode
->i_ctime
= current_time(inode
);
1368 } else if (info
->notify_owner
!= NULL
) {
1371 switch (notification
->sigev_notify
) {
1373 info
->notify
.sigev_notify
= SIGEV_NONE
;
1376 info
->notify_sock
= sock
;
1377 info
->notify_cookie
= nc
;
1380 info
->notify
.sigev_notify
= SIGEV_THREAD
;
1383 info
->notify
.sigev_signo
= notification
->sigev_signo
;
1384 info
->notify
.sigev_value
= notification
->sigev_value
;
1385 info
->notify
.sigev_notify
= SIGEV_SIGNAL
;
1389 info
->notify_owner
= get_pid(task_tgid(current
));
1390 info
->notify_user_ns
= get_user_ns(current_user_ns());
1391 inode
->i_atime
= inode
->i_ctime
= current_time(inode
);
1393 spin_unlock(&info
->lock
);
1398 netlink_detachskb(sock
, nc
);
1406 SYSCALL_DEFINE2(mq_notify
, mqd_t
, mqdes
,
1407 const struct sigevent __user
*, u_notification
)
1409 struct sigevent n
, *p
= NULL
;
1410 if (u_notification
) {
1411 if (copy_from_user(&n
, u_notification
, sizeof(struct sigevent
)))
1415 return do_mq_notify(mqdes
, p
);
1418 static int do_mq_getsetattr(int mqdes
, struct mq_attr
*new, struct mq_attr
*old
)
1421 struct inode
*inode
;
1422 struct mqueue_inode_info
*info
;
1424 if (new && (new->mq_flags
& (~O_NONBLOCK
)))
1431 if (unlikely(f
.file
->f_op
!= &mqueue_file_operations
)) {
1436 inode
= file_inode(f
.file
);
1437 info
= MQUEUE_I(inode
);
1439 spin_lock(&info
->lock
);
1443 old
->mq_flags
= f
.file
->f_flags
& O_NONBLOCK
;
1446 audit_mq_getsetattr(mqdes
, new);
1447 spin_lock(&f
.file
->f_lock
);
1448 if (new->mq_flags
& O_NONBLOCK
)
1449 f
.file
->f_flags
|= O_NONBLOCK
;
1451 f
.file
->f_flags
&= ~O_NONBLOCK
;
1452 spin_unlock(&f
.file
->f_lock
);
1454 inode
->i_atime
= inode
->i_ctime
= current_time(inode
);
1457 spin_unlock(&info
->lock
);
1462 SYSCALL_DEFINE3(mq_getsetattr
, mqd_t
, mqdes
,
1463 const struct mq_attr __user
*, u_mqstat
,
1464 struct mq_attr __user
*, u_omqstat
)
1467 struct mq_attr mqstat
, omqstat
;
1468 struct mq_attr
*new = NULL
, *old
= NULL
;
1472 if (copy_from_user(new, u_mqstat
, sizeof(struct mq_attr
)))
1478 ret
= do_mq_getsetattr(mqdes
, new, old
);
1482 if (copy_to_user(u_omqstat
, old
, sizeof(struct mq_attr
)))
1487 #ifdef CONFIG_COMPAT
1489 struct compat_mq_attr
{
1490 compat_long_t mq_flags
; /* message queue flags */
1491 compat_long_t mq_maxmsg
; /* maximum number of messages */
1492 compat_long_t mq_msgsize
; /* maximum message size */
1493 compat_long_t mq_curmsgs
; /* number of messages currently queued */
1494 compat_long_t __reserved
[4]; /* ignored for input, zeroed for output */
1497 static inline int get_compat_mq_attr(struct mq_attr
*attr
,
1498 const struct compat_mq_attr __user
*uattr
)
1500 struct compat_mq_attr v
;
1502 if (copy_from_user(&v
, uattr
, sizeof(*uattr
)))
1505 memset(attr
, 0, sizeof(*attr
));
1506 attr
->mq_flags
= v
.mq_flags
;
1507 attr
->mq_maxmsg
= v
.mq_maxmsg
;
1508 attr
->mq_msgsize
= v
.mq_msgsize
;
1509 attr
->mq_curmsgs
= v
.mq_curmsgs
;
1513 static inline int put_compat_mq_attr(const struct mq_attr
*attr
,
1514 struct compat_mq_attr __user
*uattr
)
1516 struct compat_mq_attr v
;
1518 memset(&v
, 0, sizeof(v
));
1519 v
.mq_flags
= attr
->mq_flags
;
1520 v
.mq_maxmsg
= attr
->mq_maxmsg
;
1521 v
.mq_msgsize
= attr
->mq_msgsize
;
1522 v
.mq_curmsgs
= attr
->mq_curmsgs
;
1523 if (copy_to_user(uattr
, &v
, sizeof(*uattr
)))
1528 COMPAT_SYSCALL_DEFINE4(mq_open
, const char __user
*, u_name
,
1529 int, oflag
, compat_mode_t
, mode
,
1530 struct compat_mq_attr __user
*, u_attr
)
1532 struct mq_attr attr
, *p
= NULL
;
1533 if (u_attr
&& oflag
& O_CREAT
) {
1535 if (get_compat_mq_attr(&attr
, u_attr
))
1538 return do_mq_open(u_name
, oflag
, mode
, p
);
1541 COMPAT_SYSCALL_DEFINE2(mq_notify
, mqd_t
, mqdes
,
1542 const struct compat_sigevent __user
*, u_notification
)
1544 struct sigevent n
, *p
= NULL
;
1545 if (u_notification
) {
1546 if (get_compat_sigevent(&n
, u_notification
))
1548 if (n
.sigev_notify
== SIGEV_THREAD
)
1549 n
.sigev_value
.sival_ptr
= compat_ptr(n
.sigev_value
.sival_int
);
1552 return do_mq_notify(mqdes
, p
);
1555 COMPAT_SYSCALL_DEFINE3(mq_getsetattr
, mqd_t
, mqdes
,
1556 const struct compat_mq_attr __user
*, u_mqstat
,
1557 struct compat_mq_attr __user
*, u_omqstat
)
1560 struct mq_attr mqstat
, omqstat
;
1561 struct mq_attr
*new = NULL
, *old
= NULL
;
1565 if (get_compat_mq_attr(new, u_mqstat
))
1571 ret
= do_mq_getsetattr(mqdes
, new, old
);
1575 if (put_compat_mq_attr(old
, u_omqstat
))
1581 #ifdef CONFIG_COMPAT_32BIT_TIME
1582 static int compat_prepare_timeout(const struct old_timespec32 __user
*p
,
1583 struct timespec64
*ts
)
1585 if (get_old_timespec32(ts
, p
))
1587 if (!timespec64_valid(ts
))
1592 SYSCALL_DEFINE5(mq_timedsend_time32
, mqd_t
, mqdes
,
1593 const char __user
*, u_msg_ptr
,
1594 unsigned int, msg_len
, unsigned int, msg_prio
,
1595 const struct old_timespec32 __user
*, u_abs_timeout
)
1597 struct timespec64 ts
, *p
= NULL
;
1598 if (u_abs_timeout
) {
1599 int res
= compat_prepare_timeout(u_abs_timeout
, &ts
);
1604 return do_mq_timedsend(mqdes
, u_msg_ptr
, msg_len
, msg_prio
, p
);
1607 SYSCALL_DEFINE5(mq_timedreceive_time32
, mqd_t
, mqdes
,
1608 char __user
*, u_msg_ptr
,
1609 unsigned int, msg_len
, unsigned int __user
*, u_msg_prio
,
1610 const struct old_timespec32 __user
*, u_abs_timeout
)
1612 struct timespec64 ts
, *p
= NULL
;
1613 if (u_abs_timeout
) {
1614 int res
= compat_prepare_timeout(u_abs_timeout
, &ts
);
1619 return do_mq_timedreceive(mqdes
, u_msg_ptr
, msg_len
, u_msg_prio
, p
);
1623 static const struct inode_operations mqueue_dir_inode_operations
= {
1624 .lookup
= simple_lookup
,
1625 .create
= mqueue_create
,
1626 .unlink
= mqueue_unlink
,
1629 static const struct file_operations mqueue_file_operations
= {
1630 .flush
= mqueue_flush_file
,
1631 .poll
= mqueue_poll_file
,
1632 .read
= mqueue_read_file
,
1633 .llseek
= default_llseek
,
1636 static const struct super_operations mqueue_super_ops
= {
1637 .alloc_inode
= mqueue_alloc_inode
,
1638 .free_inode
= mqueue_free_inode
,
1639 .evict_inode
= mqueue_evict_inode
,
1640 .statfs
= simple_statfs
,
1643 static const struct fs_context_operations mqueue_fs_context_ops
= {
1644 .free
= mqueue_fs_context_free
,
1645 .get_tree
= mqueue_get_tree
,
1648 static struct file_system_type mqueue_fs_type
= {
1650 .init_fs_context
= mqueue_init_fs_context
,
1651 .kill_sb
= kill_litter_super
,
1652 .fs_flags
= FS_USERNS_MOUNT
,
1655 int mq_init_ns(struct ipc_namespace
*ns
)
1659 ns
->mq_queues_count
= 0;
1660 ns
->mq_queues_max
= DFLT_QUEUESMAX
;
1661 ns
->mq_msg_max
= DFLT_MSGMAX
;
1662 ns
->mq_msgsize_max
= DFLT_MSGSIZEMAX
;
1663 ns
->mq_msg_default
= DFLT_MSG
;
1664 ns
->mq_msgsize_default
= DFLT_MSGSIZE
;
1666 m
= mq_create_mount(ns
);
1673 void mq_clear_sbinfo(struct ipc_namespace
*ns
)
1675 ns
->mq_mnt
->mnt_sb
->s_fs_info
= NULL
;
1678 void mq_put_mnt(struct ipc_namespace
*ns
)
1680 kern_unmount(ns
->mq_mnt
);
1683 static int __init
init_mqueue_fs(void)
1687 mqueue_inode_cachep
= kmem_cache_create("mqueue_inode_cache",
1688 sizeof(struct mqueue_inode_info
), 0,
1689 SLAB_HWCACHE_ALIGN
|SLAB_ACCOUNT
, init_once
);
1690 if (mqueue_inode_cachep
== NULL
)
1693 /* ignore failures - they are not fatal */
1694 mq_sysctl_table
= mq_register_sysctl_table();
1696 error
= register_filesystem(&mqueue_fs_type
);
1700 spin_lock_init(&mq_lock
);
1702 error
= mq_init_ns(&init_ipc_ns
);
1704 goto out_filesystem
;
1709 unregister_filesystem(&mqueue_fs_type
);
1711 if (mq_sysctl_table
)
1712 unregister_sysctl_table(mq_sysctl_table
);
1713 kmem_cache_destroy(mqueue_inode_cachep
);
1717 device_initcall(init_mqueue_fs
);