2 * POSIX message queues filesystem for Linux.
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (michal.wronski@gmail.com)
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
13 * This file is released under the GPL.
16 #include <linux/capability.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 #include <linux/file.h>
20 #include <linux/mount.h>
21 #include <linux/fs_context.h>
22 #include <linux/namei.h>
23 #include <linux/sysctl.h>
24 #include <linux/poll.h>
25 #include <linux/mqueue.h>
26 #include <linux/msg.h>
27 #include <linux/skbuff.h>
28 #include <linux/vmalloc.h>
29 #include <linux/netlink.h>
30 #include <linux/syscalls.h>
31 #include <linux/audit.h>
32 #include <linux/signal.h>
33 #include <linux/mutex.h>
34 #include <linux/nsproxy.h>
35 #include <linux/pid.h>
36 #include <linux/ipc_namespace.h>
37 #include <linux/user_namespace.h>
38 #include <linux/slab.h>
39 #include <linux/sched/wake_q.h>
40 #include <linux/sched/signal.h>
41 #include <linux/sched/user.h>
46 struct mqueue_fs_context
{
47 struct ipc_namespace
*ipc_ns
;
50 #define MQUEUE_MAGIC 0x19800202
51 #define DIRENT_SIZE 20
52 #define FILENT_SIZE 80
60 struct posix_msg_tree_node
{
61 struct rb_node rb_node
;
62 struct list_head msg_list
;
66 struct ext_wait_queue
{ /* queue of sleeping tasks */
67 struct task_struct
*task
;
68 struct list_head list
;
69 struct msg_msg
*msg
; /* ptr of loaded message */
70 int state
; /* one of STATE_* values */
73 struct mqueue_inode_info
{
75 struct inode vfs_inode
;
76 wait_queue_head_t wait_q
;
78 struct rb_root msg_tree
;
79 struct rb_node
*msg_tree_rightmost
;
80 struct posix_msg_tree_node
*node_cache
;
83 struct sigevent notify
;
84 struct pid
*notify_owner
;
85 struct user_namespace
*notify_user_ns
;
86 struct user_struct
*user
; /* user who created, for accounting */
87 struct sock
*notify_sock
;
88 struct sk_buff
*notify_cookie
;
90 /* for tasks waiting for free space and messages, respectively */
91 struct ext_wait_queue e_wait_q
[2];
93 unsigned long qsize
; /* size of queue in memory (sum of all msgs) */
96 static struct file_system_type mqueue_fs_type
;
97 static const struct inode_operations mqueue_dir_inode_operations
;
98 static const struct file_operations mqueue_file_operations
;
99 static const struct super_operations mqueue_super_ops
;
100 static const struct fs_context_operations mqueue_fs_context_ops
;
101 static void remove_notification(struct mqueue_inode_info
*info
);
103 static struct kmem_cache
*mqueue_inode_cachep
;
105 static struct ctl_table_header
*mq_sysctl_table
;
107 static inline struct mqueue_inode_info
*MQUEUE_I(struct inode
*inode
)
109 return container_of(inode
, struct mqueue_inode_info
, vfs_inode
);
113 * This routine should be called with the mq_lock held.
115 static inline struct ipc_namespace
*__get_ns_from_inode(struct inode
*inode
)
117 return get_ipc_ns(inode
->i_sb
->s_fs_info
);
120 static struct ipc_namespace
*get_ns_from_inode(struct inode
*inode
)
122 struct ipc_namespace
*ns
;
125 ns
= __get_ns_from_inode(inode
);
126 spin_unlock(&mq_lock
);
130 /* Auxiliary functions to manipulate messages' list */
131 static int msg_insert(struct msg_msg
*msg
, struct mqueue_inode_info
*info
)
133 struct rb_node
**p
, *parent
= NULL
;
134 struct posix_msg_tree_node
*leaf
;
135 bool rightmost
= true;
137 p
= &info
->msg_tree
.rb_node
;
140 leaf
= rb_entry(parent
, struct posix_msg_tree_node
, rb_node
);
142 if (likely(leaf
->priority
== msg
->m_type
))
144 else if (msg
->m_type
< leaf
->priority
) {
150 if (info
->node_cache
) {
151 leaf
= info
->node_cache
;
152 info
->node_cache
= NULL
;
154 leaf
= kmalloc(sizeof(*leaf
), GFP_ATOMIC
);
157 INIT_LIST_HEAD(&leaf
->msg_list
);
159 leaf
->priority
= msg
->m_type
;
162 info
->msg_tree_rightmost
= &leaf
->rb_node
;
164 rb_link_node(&leaf
->rb_node
, parent
, p
);
165 rb_insert_color(&leaf
->rb_node
, &info
->msg_tree
);
167 info
->attr
.mq_curmsgs
++;
168 info
->qsize
+= msg
->m_ts
;
169 list_add_tail(&msg
->m_list
, &leaf
->msg_list
);
173 static inline void msg_tree_erase(struct posix_msg_tree_node
*leaf
,
174 struct mqueue_inode_info
*info
)
176 struct rb_node
*node
= &leaf
->rb_node
;
178 if (info
->msg_tree_rightmost
== node
)
179 info
->msg_tree_rightmost
= rb_prev(node
);
181 rb_erase(node
, &info
->msg_tree
);
182 if (info
->node_cache
) {
185 info
->node_cache
= leaf
;
189 static inline struct msg_msg
*msg_get(struct mqueue_inode_info
*info
)
191 struct rb_node
*parent
= NULL
;
192 struct posix_msg_tree_node
*leaf
;
197 * During insert, low priorities go to the left and high to the
198 * right. On receive, we want the highest priorities first, so
199 * walk all the way to the right.
201 parent
= info
->msg_tree_rightmost
;
203 if (info
->attr
.mq_curmsgs
) {
204 pr_warn_once("Inconsistency in POSIX message queue, "
205 "no tree element, but supposedly messages "
207 info
->attr
.mq_curmsgs
= 0;
211 leaf
= rb_entry(parent
, struct posix_msg_tree_node
, rb_node
);
212 if (unlikely(list_empty(&leaf
->msg_list
))) {
213 pr_warn_once("Inconsistency in POSIX message queue, "
214 "empty leaf node but we haven't implemented "
215 "lazy leaf delete!\n");
216 msg_tree_erase(leaf
, info
);
219 msg
= list_first_entry(&leaf
->msg_list
,
220 struct msg_msg
, m_list
);
221 list_del(&msg
->m_list
);
222 if (list_empty(&leaf
->msg_list
)) {
223 msg_tree_erase(leaf
, info
);
226 info
->attr
.mq_curmsgs
--;
227 info
->qsize
-= msg
->m_ts
;
231 static struct inode
*mqueue_get_inode(struct super_block
*sb
,
232 struct ipc_namespace
*ipc_ns
, umode_t mode
,
233 struct mq_attr
*attr
)
235 struct user_struct
*u
= current_user();
239 inode
= new_inode(sb
);
243 inode
->i_ino
= get_next_ino();
244 inode
->i_mode
= mode
;
245 inode
->i_uid
= current_fsuid();
246 inode
->i_gid
= current_fsgid();
247 inode
->i_mtime
= inode
->i_ctime
= inode
->i_atime
= current_time(inode
);
250 struct mqueue_inode_info
*info
;
251 unsigned long mq_bytes
, mq_treesize
;
253 inode
->i_fop
= &mqueue_file_operations
;
254 inode
->i_size
= FILENT_SIZE
;
255 /* mqueue specific info */
256 info
= MQUEUE_I(inode
);
257 spin_lock_init(&info
->lock
);
258 init_waitqueue_head(&info
->wait_q
);
259 INIT_LIST_HEAD(&info
->e_wait_q
[0].list
);
260 INIT_LIST_HEAD(&info
->e_wait_q
[1].list
);
261 info
->notify_owner
= NULL
;
262 info
->notify_user_ns
= NULL
;
264 info
->user
= NULL
; /* set when all is ok */
265 info
->msg_tree
= RB_ROOT
;
266 info
->msg_tree_rightmost
= NULL
;
267 info
->node_cache
= NULL
;
268 memset(&info
->attr
, 0, sizeof(info
->attr
));
269 info
->attr
.mq_maxmsg
= min(ipc_ns
->mq_msg_max
,
270 ipc_ns
->mq_msg_default
);
271 info
->attr
.mq_msgsize
= min(ipc_ns
->mq_msgsize_max
,
272 ipc_ns
->mq_msgsize_default
);
274 info
->attr
.mq_maxmsg
= attr
->mq_maxmsg
;
275 info
->attr
.mq_msgsize
= attr
->mq_msgsize
;
278 * We used to allocate a static array of pointers and account
279 * the size of that array as well as one msg_msg struct per
280 * possible message into the queue size. That's no longer
281 * accurate as the queue is now an rbtree and will grow and
282 * shrink depending on usage patterns. We can, however, still
283 * account one msg_msg struct per message, but the nodes are
284 * allocated depending on priority usage, and most programs
285 * only use one, or a handful, of priorities. However, since
286 * this is pinned memory, we need to assume worst case, so
287 * that means the min(mq_maxmsg, max_priorities) * struct
288 * posix_msg_tree_node.
292 if (info
->attr
.mq_maxmsg
<= 0 || info
->attr
.mq_msgsize
<= 0)
294 if (capable(CAP_SYS_RESOURCE
)) {
295 if (info
->attr
.mq_maxmsg
> HARD_MSGMAX
||
296 info
->attr
.mq_msgsize
> HARD_MSGSIZEMAX
)
299 if (info
->attr
.mq_maxmsg
> ipc_ns
->mq_msg_max
||
300 info
->attr
.mq_msgsize
> ipc_ns
->mq_msgsize_max
)
304 /* check for overflow */
305 if (info
->attr
.mq_msgsize
> ULONG_MAX
/info
->attr
.mq_maxmsg
)
307 mq_treesize
= info
->attr
.mq_maxmsg
* sizeof(struct msg_msg
) +
308 min_t(unsigned int, info
->attr
.mq_maxmsg
, MQ_PRIO_MAX
) *
309 sizeof(struct posix_msg_tree_node
);
310 mq_bytes
= info
->attr
.mq_maxmsg
* info
->attr
.mq_msgsize
;
311 if (mq_bytes
+ mq_treesize
< mq_bytes
)
313 mq_bytes
+= mq_treesize
;
315 if (u
->mq_bytes
+ mq_bytes
< u
->mq_bytes
||
316 u
->mq_bytes
+ mq_bytes
> rlimit(RLIMIT_MSGQUEUE
)) {
317 spin_unlock(&mq_lock
);
318 /* mqueue_evict_inode() releases info->messages */
322 u
->mq_bytes
+= mq_bytes
;
323 spin_unlock(&mq_lock
);
326 info
->user
= get_uid(u
);
327 } else if (S_ISDIR(mode
)) {
329 /* Some things misbehave if size == 0 on a directory */
330 inode
->i_size
= 2 * DIRENT_SIZE
;
331 inode
->i_op
= &mqueue_dir_inode_operations
;
332 inode
->i_fop
= &simple_dir_operations
;
342 static int mqueue_fill_super(struct super_block
*sb
, struct fs_context
*fc
)
345 struct ipc_namespace
*ns
= sb
->s_fs_info
;
347 sb
->s_iflags
|= SB_I_NOEXEC
| SB_I_NODEV
;
348 sb
->s_blocksize
= PAGE_SIZE
;
349 sb
->s_blocksize_bits
= PAGE_SHIFT
;
350 sb
->s_magic
= MQUEUE_MAGIC
;
351 sb
->s_op
= &mqueue_super_ops
;
353 inode
= mqueue_get_inode(sb
, ns
, S_IFDIR
| S_ISVTX
| S_IRWXUGO
, NULL
);
355 return PTR_ERR(inode
);
357 sb
->s_root
= d_make_root(inode
);
363 static int mqueue_get_tree(struct fs_context
*fc
)
365 struct mqueue_fs_context
*ctx
= fc
->fs_private
;
367 put_user_ns(fc
->user_ns
);
368 fc
->user_ns
= get_user_ns(ctx
->ipc_ns
->user_ns
);
369 fc
->s_fs_info
= ctx
->ipc_ns
;
370 return vfs_get_super(fc
, vfs_get_keyed_super
, mqueue_fill_super
);
373 static void mqueue_fs_context_free(struct fs_context
*fc
)
375 struct mqueue_fs_context
*ctx
= fc
->fs_private
;
378 put_ipc_ns(ctx
->ipc_ns
);
382 static int mqueue_init_fs_context(struct fs_context
*fc
)
384 struct mqueue_fs_context
*ctx
;
386 ctx
= kzalloc(sizeof(struct mqueue_fs_context
), GFP_KERNEL
);
390 ctx
->ipc_ns
= get_ipc_ns(current
->nsproxy
->ipc_ns
);
391 fc
->fs_private
= ctx
;
392 fc
->ops
= &mqueue_fs_context_ops
;
396 static struct vfsmount
*mq_create_mount(struct ipc_namespace
*ns
)
398 struct mqueue_fs_context
*ctx
;
399 struct fs_context
*fc
;
400 struct vfsmount
*mnt
;
402 fc
= fs_context_for_mount(&mqueue_fs_type
, SB_KERNMOUNT
);
406 ctx
= fc
->fs_private
;
407 put_ipc_ns(ctx
->ipc_ns
);
408 ctx
->ipc_ns
= get_ipc_ns(ns
);
415 static void init_once(void *foo
)
417 struct mqueue_inode_info
*p
= (struct mqueue_inode_info
*) foo
;
419 inode_init_once(&p
->vfs_inode
);
422 static struct inode
*mqueue_alloc_inode(struct super_block
*sb
)
424 struct mqueue_inode_info
*ei
;
426 ei
= kmem_cache_alloc(mqueue_inode_cachep
, GFP_KERNEL
);
429 return &ei
->vfs_inode
;
432 static void mqueue_free_inode(struct inode
*inode
)
434 kmem_cache_free(mqueue_inode_cachep
, MQUEUE_I(inode
));
437 static void mqueue_evict_inode(struct inode
*inode
)
439 struct mqueue_inode_info
*info
;
440 struct user_struct
*user
;
441 unsigned long mq_bytes
, mq_treesize
;
442 struct ipc_namespace
*ipc_ns
;
443 struct msg_msg
*msg
, *nmsg
;
448 if (S_ISDIR(inode
->i_mode
))
451 ipc_ns
= get_ns_from_inode(inode
);
452 info
= MQUEUE_I(inode
);
453 spin_lock(&info
->lock
);
454 while ((msg
= msg_get(info
)) != NULL
)
455 list_add_tail(&msg
->m_list
, &tmp_msg
);
456 kfree(info
->node_cache
);
457 spin_unlock(&info
->lock
);
459 list_for_each_entry_safe(msg
, nmsg
, &tmp_msg
, m_list
) {
460 list_del(&msg
->m_list
);
464 /* Total amount of bytes accounted for the mqueue */
465 mq_treesize
= info
->attr
.mq_maxmsg
* sizeof(struct msg_msg
) +
466 min_t(unsigned int, info
->attr
.mq_maxmsg
, MQ_PRIO_MAX
) *
467 sizeof(struct posix_msg_tree_node
);
469 mq_bytes
= mq_treesize
+ (info
->attr
.mq_maxmsg
*
470 info
->attr
.mq_msgsize
);
475 user
->mq_bytes
-= mq_bytes
;
477 * get_ns_from_inode() ensures that the
478 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
479 * to which we now hold a reference, or it is NULL.
480 * We can't put it here under mq_lock, though.
483 ipc_ns
->mq_queues_count
--;
484 spin_unlock(&mq_lock
);
491 static int mqueue_create_attr(struct dentry
*dentry
, umode_t mode
, void *arg
)
493 struct inode
*dir
= dentry
->d_parent
->d_inode
;
495 struct mq_attr
*attr
= arg
;
497 struct ipc_namespace
*ipc_ns
;
500 ipc_ns
= __get_ns_from_inode(dir
);
506 if (ipc_ns
->mq_queues_count
>= ipc_ns
->mq_queues_max
&&
507 !capable(CAP_SYS_RESOURCE
)) {
511 ipc_ns
->mq_queues_count
++;
512 spin_unlock(&mq_lock
);
514 inode
= mqueue_get_inode(dir
->i_sb
, ipc_ns
, mode
, attr
);
516 error
= PTR_ERR(inode
);
518 ipc_ns
->mq_queues_count
--;
523 dir
->i_size
+= DIRENT_SIZE
;
524 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= current_time(dir
);
526 d_instantiate(dentry
, inode
);
530 spin_unlock(&mq_lock
);
536 static int mqueue_create(struct inode
*dir
, struct dentry
*dentry
,
537 umode_t mode
, bool excl
)
539 return mqueue_create_attr(dentry
, mode
, NULL
);
542 static int mqueue_unlink(struct inode
*dir
, struct dentry
*dentry
)
544 struct inode
*inode
= d_inode(dentry
);
546 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= current_time(dir
);
547 dir
->i_size
-= DIRENT_SIZE
;
554 * This is routine for system read from queue file.
555 * To avoid mess with doing here some sort of mq_receive we allow
556 * to read only queue size & notification info (the only values
557 * that are interesting from user point of view and aren't accessible
558 * through std routines)
560 static ssize_t
mqueue_read_file(struct file
*filp
, char __user
*u_data
,
561 size_t count
, loff_t
*off
)
563 struct mqueue_inode_info
*info
= MQUEUE_I(file_inode(filp
));
564 char buffer
[FILENT_SIZE
];
567 spin_lock(&info
->lock
);
568 snprintf(buffer
, sizeof(buffer
),
569 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
571 info
->notify_owner
? info
->notify
.sigev_notify
: 0,
572 (info
->notify_owner
&&
573 info
->notify
.sigev_notify
== SIGEV_SIGNAL
) ?
574 info
->notify
.sigev_signo
: 0,
575 pid_vnr(info
->notify_owner
));
576 spin_unlock(&info
->lock
);
577 buffer
[sizeof(buffer
)-1] = '\0';
579 ret
= simple_read_from_buffer(u_data
, count
, off
, buffer
,
584 file_inode(filp
)->i_atime
= file_inode(filp
)->i_ctime
= current_time(file_inode(filp
));
588 static int mqueue_flush_file(struct file
*filp
, fl_owner_t id
)
590 struct mqueue_inode_info
*info
= MQUEUE_I(file_inode(filp
));
592 spin_lock(&info
->lock
);
593 if (task_tgid(current
) == info
->notify_owner
)
594 remove_notification(info
);
596 spin_unlock(&info
->lock
);
600 static __poll_t
mqueue_poll_file(struct file
*filp
, struct poll_table_struct
*poll_tab
)
602 struct mqueue_inode_info
*info
= MQUEUE_I(file_inode(filp
));
605 poll_wait(filp
, &info
->wait_q
, poll_tab
);
607 spin_lock(&info
->lock
);
608 if (info
->attr
.mq_curmsgs
)
609 retval
= EPOLLIN
| EPOLLRDNORM
;
611 if (info
->attr
.mq_curmsgs
< info
->attr
.mq_maxmsg
)
612 retval
|= EPOLLOUT
| EPOLLWRNORM
;
613 spin_unlock(&info
->lock
);
618 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
619 static void wq_add(struct mqueue_inode_info
*info
, int sr
,
620 struct ext_wait_queue
*ewp
)
622 struct ext_wait_queue
*walk
;
624 list_for_each_entry(walk
, &info
->e_wait_q
[sr
].list
, list
) {
625 if (walk
->task
->prio
<= current
->prio
) {
626 list_add_tail(&ewp
->list
, &walk
->list
);
630 list_add_tail(&ewp
->list
, &info
->e_wait_q
[sr
].list
);
634 * Puts current task to sleep. Caller must hold queue lock. After return
638 static int wq_sleep(struct mqueue_inode_info
*info
, int sr
,
639 ktime_t
*timeout
, struct ext_wait_queue
*ewp
)
640 __releases(&info
->lock
)
645 wq_add(info
, sr
, ewp
);
648 __set_current_state(TASK_INTERRUPTIBLE
);
650 spin_unlock(&info
->lock
);
651 time
= schedule_hrtimeout_range_clock(timeout
, 0,
652 HRTIMER_MODE_ABS
, CLOCK_REALTIME
);
654 if (ewp
->state
== STATE_READY
) {
658 spin_lock(&info
->lock
);
659 if (ewp
->state
== STATE_READY
) {
663 if (signal_pending(current
)) {
664 retval
= -ERESTARTSYS
;
672 list_del(&ewp
->list
);
674 spin_unlock(&info
->lock
);
680 * Returns waiting task that should be serviced first or NULL if none exists
682 static struct ext_wait_queue
*wq_get_first_waiter(
683 struct mqueue_inode_info
*info
, int sr
)
685 struct list_head
*ptr
;
687 ptr
= info
->e_wait_q
[sr
].list
.prev
;
688 if (ptr
== &info
->e_wait_q
[sr
].list
)
690 return list_entry(ptr
, struct ext_wait_queue
, list
);
694 static inline void set_cookie(struct sk_buff
*skb
, char code
)
696 ((char *)skb
->data
)[NOTIFY_COOKIE_LEN
-1] = code
;
700 * The next function is only to split too long sys_mq_timedsend
702 static void __do_notify(struct mqueue_inode_info
*info
)
705 * invoked when there is registered process and there isn't process
706 * waiting synchronously for message AND state of queue changed from
707 * empty to not empty. Here we are sure that no one is waiting
709 if (info
->notify_owner
&&
710 info
->attr
.mq_curmsgs
== 1) {
711 struct kernel_siginfo sig_i
;
712 switch (info
->notify
.sigev_notify
) {
718 clear_siginfo(&sig_i
);
719 sig_i
.si_signo
= info
->notify
.sigev_signo
;
721 sig_i
.si_code
= SI_MESGQ
;
722 sig_i
.si_value
= info
->notify
.sigev_value
;
723 /* map current pid/uid into info->owner's namespaces */
725 sig_i
.si_pid
= task_tgid_nr_ns(current
,
726 ns_of_pid(info
->notify_owner
));
727 sig_i
.si_uid
= from_kuid_munged(info
->notify_user_ns
, current_uid());
730 kill_pid_info(info
->notify
.sigev_signo
,
731 &sig_i
, info
->notify_owner
);
734 set_cookie(info
->notify_cookie
, NOTIFY_WOKENUP
);
735 netlink_sendskb(info
->notify_sock
, info
->notify_cookie
);
738 /* after notification unregisters process */
739 put_pid(info
->notify_owner
);
740 put_user_ns(info
->notify_user_ns
);
741 info
->notify_owner
= NULL
;
742 info
->notify_user_ns
= NULL
;
744 wake_up(&info
->wait_q
);
747 static int prepare_timeout(const struct __kernel_timespec __user
*u_abs_timeout
,
748 struct timespec64
*ts
)
750 if (get_timespec64(ts
, u_abs_timeout
))
752 if (!timespec64_valid(ts
))
757 static void remove_notification(struct mqueue_inode_info
*info
)
759 if (info
->notify_owner
!= NULL
&&
760 info
->notify
.sigev_notify
== SIGEV_THREAD
) {
761 set_cookie(info
->notify_cookie
, NOTIFY_REMOVED
);
762 netlink_sendskb(info
->notify_sock
, info
->notify_cookie
);
764 put_pid(info
->notify_owner
);
765 put_user_ns(info
->notify_user_ns
);
766 info
->notify_owner
= NULL
;
767 info
->notify_user_ns
= NULL
;
770 static int prepare_open(struct dentry
*dentry
, int oflag
, int ro
,
771 umode_t mode
, struct filename
*name
,
772 struct mq_attr
*attr
)
774 static const int oflag2acc
[O_ACCMODE
] = { MAY_READ
, MAY_WRITE
,
775 MAY_READ
| MAY_WRITE
};
778 if (d_really_is_negative(dentry
)) {
779 if (!(oflag
& O_CREAT
))
783 audit_inode_parent_hidden(name
, dentry
->d_parent
);
784 return vfs_mkobj(dentry
, mode
& ~current_umask(),
785 mqueue_create_attr
, attr
);
787 /* it already existed */
788 audit_inode(name
, dentry
, 0);
789 if ((oflag
& (O_CREAT
|O_EXCL
)) == (O_CREAT
|O_EXCL
))
791 if ((oflag
& O_ACCMODE
) == (O_RDWR
| O_WRONLY
))
793 acc
= oflag2acc
[oflag
& O_ACCMODE
];
794 return inode_permission(d_inode(dentry
), acc
);
797 static int do_mq_open(const char __user
*u_name
, int oflag
, umode_t mode
,
798 struct mq_attr
*attr
)
800 struct vfsmount
*mnt
= current
->nsproxy
->ipc_ns
->mq_mnt
;
801 struct dentry
*root
= mnt
->mnt_root
;
802 struct filename
*name
;
807 audit_mq_open(oflag
, mode
, attr
);
809 if (IS_ERR(name
= getname(u_name
)))
810 return PTR_ERR(name
);
812 fd
= get_unused_fd_flags(O_CLOEXEC
);
816 ro
= mnt_want_write(mnt
); /* we'll drop it in any case */
817 inode_lock(d_inode(root
));
818 path
.dentry
= lookup_one_len(name
->name
, root
, strlen(name
->name
));
819 if (IS_ERR(path
.dentry
)) {
820 error
= PTR_ERR(path
.dentry
);
823 path
.mnt
= mntget(mnt
);
824 error
= prepare_open(path
.dentry
, oflag
, ro
, mode
, name
, attr
);
826 struct file
*file
= dentry_open(&path
, oflag
, current_cred());
828 fd_install(fd
, file
);
830 error
= PTR_ERR(file
);
838 inode_unlock(d_inode(root
));
846 SYSCALL_DEFINE4(mq_open
, const char __user
*, u_name
, int, oflag
, umode_t
, mode
,
847 struct mq_attr __user
*, u_attr
)
850 if (u_attr
&& copy_from_user(&attr
, u_attr
, sizeof(struct mq_attr
)))
853 return do_mq_open(u_name
, oflag
, mode
, u_attr
? &attr
: NULL
);
856 SYSCALL_DEFINE1(mq_unlink
, const char __user
*, u_name
)
859 struct filename
*name
;
860 struct dentry
*dentry
;
861 struct inode
*inode
= NULL
;
862 struct ipc_namespace
*ipc_ns
= current
->nsproxy
->ipc_ns
;
863 struct vfsmount
*mnt
= ipc_ns
->mq_mnt
;
865 name
= getname(u_name
);
867 return PTR_ERR(name
);
869 audit_inode_parent_hidden(name
, mnt
->mnt_root
);
870 err
= mnt_want_write(mnt
);
873 inode_lock_nested(d_inode(mnt
->mnt_root
), I_MUTEX_PARENT
);
874 dentry
= lookup_one_len(name
->name
, mnt
->mnt_root
,
876 if (IS_ERR(dentry
)) {
877 err
= PTR_ERR(dentry
);
881 inode
= d_inode(dentry
);
886 err
= vfs_unlink(d_inode(dentry
->d_parent
), dentry
, NULL
);
891 inode_unlock(d_inode(mnt
->mnt_root
));
901 /* Pipelined send and receive functions.
903 * If a receiver finds no waiting message, then it registers itself in the
904 * list of waiting receivers. A sender checks that list before adding the new
905 * message into the message array. If there is a waiting receiver, then it
906 * bypasses the message array and directly hands the message over to the
907 * receiver. The receiver accepts the message and returns without grabbing the
910 * - Set pointer to message.
911 * - Queue the receiver task for later wakeup (without the info->lock).
912 * - Update its state to STATE_READY. Now the receiver can continue.
913 * - Wake up the process after the lock is dropped. Should the process wake up
914 * before this wakeup (due to a timeout or a signal) it will either see
915 * STATE_READY and continue or acquire the lock to check the state again.
917 * The same algorithm is used for senders.
920 /* pipelined_send() - send a message directly to the task waiting in
921 * sys_mq_timedreceive() (without inserting message into a queue).
923 static inline void pipelined_send(struct wake_q_head
*wake_q
,
924 struct mqueue_inode_info
*info
,
925 struct msg_msg
*message
,
926 struct ext_wait_queue
*receiver
)
928 receiver
->msg
= message
;
929 list_del(&receiver
->list
);
930 wake_q_add(wake_q
, receiver
->task
);
932 * Rely on the implicit cmpxchg barrier from wake_q_add such
933 * that we can ensure that updating receiver->state is the last
934 * write operation: As once set, the receiver can continue,
935 * and if we don't have the reference count from the wake_q,
936 * yet, at that point we can later have a use-after-free
937 * condition and bogus wakeup.
939 receiver
->state
= STATE_READY
;
942 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
943 * gets its message and put to the queue (we have one free place for sure). */
944 static inline void pipelined_receive(struct wake_q_head
*wake_q
,
945 struct mqueue_inode_info
*info
)
947 struct ext_wait_queue
*sender
= wq_get_first_waiter(info
, SEND
);
951 wake_up_interruptible(&info
->wait_q
);
954 if (msg_insert(sender
->msg
, info
))
957 list_del(&sender
->list
);
958 wake_q_add(wake_q
, sender
->task
);
959 sender
->state
= STATE_READY
;
962 static int do_mq_timedsend(mqd_t mqdes
, const char __user
*u_msg_ptr
,
963 size_t msg_len
, unsigned int msg_prio
,
964 struct timespec64
*ts
)
968 struct ext_wait_queue wait
;
969 struct ext_wait_queue
*receiver
;
970 struct msg_msg
*msg_ptr
;
971 struct mqueue_inode_info
*info
;
972 ktime_t expires
, *timeout
= NULL
;
973 struct posix_msg_tree_node
*new_leaf
= NULL
;
975 DEFINE_WAKE_Q(wake_q
);
977 if (unlikely(msg_prio
>= (unsigned long) MQ_PRIO_MAX
))
981 expires
= timespec64_to_ktime(*ts
);
985 audit_mq_sendrecv(mqdes
, msg_len
, msg_prio
, ts
);
988 if (unlikely(!f
.file
)) {
993 inode
= file_inode(f
.file
);
994 if (unlikely(f
.file
->f_op
!= &mqueue_file_operations
)) {
998 info
= MQUEUE_I(inode
);
1001 if (unlikely(!(f
.file
->f_mode
& FMODE_WRITE
))) {
1006 if (unlikely(msg_len
> info
->attr
.mq_msgsize
)) {
1011 /* First try to allocate memory, before doing anything with
1012 * existing queues. */
1013 msg_ptr
= load_msg(u_msg_ptr
, msg_len
);
1014 if (IS_ERR(msg_ptr
)) {
1015 ret
= PTR_ERR(msg_ptr
);
1018 msg_ptr
->m_ts
= msg_len
;
1019 msg_ptr
->m_type
= msg_prio
;
1022 * msg_insert really wants us to have a valid, spare node struct so
1023 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1024 * fall back to that if necessary.
1026 if (!info
->node_cache
)
1027 new_leaf
= kmalloc(sizeof(*new_leaf
), GFP_KERNEL
);
1029 spin_lock(&info
->lock
);
1031 if (!info
->node_cache
&& new_leaf
) {
1032 /* Save our speculative allocation into the cache */
1033 INIT_LIST_HEAD(&new_leaf
->msg_list
);
1034 info
->node_cache
= new_leaf
;
1040 if (info
->attr
.mq_curmsgs
== info
->attr
.mq_maxmsg
) {
1041 if (f
.file
->f_flags
& O_NONBLOCK
) {
1044 wait
.task
= current
;
1045 wait
.msg
= (void *) msg_ptr
;
1046 wait
.state
= STATE_NONE
;
1047 ret
= wq_sleep(info
, SEND
, timeout
, &wait
);
1049 * wq_sleep must be called with info->lock held, and
1050 * returns with the lock released
1055 receiver
= wq_get_first_waiter(info
, RECV
);
1057 pipelined_send(&wake_q
, info
, msg_ptr
, receiver
);
1059 /* adds message to the queue */
1060 ret
= msg_insert(msg_ptr
, info
);
1065 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
=
1066 current_time(inode
);
1069 spin_unlock(&info
->lock
);
1080 static int do_mq_timedreceive(mqd_t mqdes
, char __user
*u_msg_ptr
,
1081 size_t msg_len
, unsigned int __user
*u_msg_prio
,
1082 struct timespec64
*ts
)
1085 struct msg_msg
*msg_ptr
;
1087 struct inode
*inode
;
1088 struct mqueue_inode_info
*info
;
1089 struct ext_wait_queue wait
;
1090 ktime_t expires
, *timeout
= NULL
;
1091 struct posix_msg_tree_node
*new_leaf
= NULL
;
1094 expires
= timespec64_to_ktime(*ts
);
1098 audit_mq_sendrecv(mqdes
, msg_len
, 0, ts
);
1101 if (unlikely(!f
.file
)) {
1106 inode
= file_inode(f
.file
);
1107 if (unlikely(f
.file
->f_op
!= &mqueue_file_operations
)) {
1111 info
= MQUEUE_I(inode
);
1114 if (unlikely(!(f
.file
->f_mode
& FMODE_READ
))) {
1119 /* checks if buffer is big enough */
1120 if (unlikely(msg_len
< info
->attr
.mq_msgsize
)) {
1126 * msg_insert really wants us to have a valid, spare node struct so
1127 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1128 * fall back to that if necessary.
1130 if (!info
->node_cache
)
1131 new_leaf
= kmalloc(sizeof(*new_leaf
), GFP_KERNEL
);
1133 spin_lock(&info
->lock
);
1135 if (!info
->node_cache
&& new_leaf
) {
1136 /* Save our speculative allocation into the cache */
1137 INIT_LIST_HEAD(&new_leaf
->msg_list
);
1138 info
->node_cache
= new_leaf
;
1143 if (info
->attr
.mq_curmsgs
== 0) {
1144 if (f
.file
->f_flags
& O_NONBLOCK
) {
1145 spin_unlock(&info
->lock
);
1148 wait
.task
= current
;
1149 wait
.state
= STATE_NONE
;
1150 ret
= wq_sleep(info
, RECV
, timeout
, &wait
);
1154 DEFINE_WAKE_Q(wake_q
);
1156 msg_ptr
= msg_get(info
);
1158 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
=
1159 current_time(inode
);
1161 /* There is now free space in queue. */
1162 pipelined_receive(&wake_q
, info
);
1163 spin_unlock(&info
->lock
);
1168 ret
= msg_ptr
->m_ts
;
1170 if ((u_msg_prio
&& put_user(msg_ptr
->m_type
, u_msg_prio
)) ||
1171 store_msg(u_msg_ptr
, msg_ptr
, msg_ptr
->m_ts
)) {
1182 SYSCALL_DEFINE5(mq_timedsend
, mqd_t
, mqdes
, const char __user
*, u_msg_ptr
,
1183 size_t, msg_len
, unsigned int, msg_prio
,
1184 const struct __kernel_timespec __user
*, u_abs_timeout
)
1186 struct timespec64 ts
, *p
= NULL
;
1187 if (u_abs_timeout
) {
1188 int res
= prepare_timeout(u_abs_timeout
, &ts
);
1193 return do_mq_timedsend(mqdes
, u_msg_ptr
, msg_len
, msg_prio
, p
);
1196 SYSCALL_DEFINE5(mq_timedreceive
, mqd_t
, mqdes
, char __user
*, u_msg_ptr
,
1197 size_t, msg_len
, unsigned int __user
*, u_msg_prio
,
1198 const struct __kernel_timespec __user
*, u_abs_timeout
)
1200 struct timespec64 ts
, *p
= NULL
;
1201 if (u_abs_timeout
) {
1202 int res
= prepare_timeout(u_abs_timeout
, &ts
);
1207 return do_mq_timedreceive(mqdes
, u_msg_ptr
, msg_len
, u_msg_prio
, p
);
1211 * Notes: the case when user wants us to deregister (with NULL as pointer)
1212 * and he isn't currently owner of notification, will be silently discarded.
1213 * It isn't explicitly defined in the POSIX.
1215 static int do_mq_notify(mqd_t mqdes
, const struct sigevent
*notification
)
1220 struct inode
*inode
;
1221 struct mqueue_inode_info
*info
;
1224 audit_mq_notify(mqdes
, notification
);
1228 if (notification
!= NULL
) {
1229 if (unlikely(notification
->sigev_notify
!= SIGEV_NONE
&&
1230 notification
->sigev_notify
!= SIGEV_SIGNAL
&&
1231 notification
->sigev_notify
!= SIGEV_THREAD
))
1233 if (notification
->sigev_notify
== SIGEV_SIGNAL
&&
1234 !valid_signal(notification
->sigev_signo
)) {
1237 if (notification
->sigev_notify
== SIGEV_THREAD
) {
1240 /* create the notify skb */
1241 nc
= alloc_skb(NOTIFY_COOKIE_LEN
, GFP_KERNEL
);
1246 if (copy_from_user(nc
->data
,
1247 notification
->sigev_value
.sival_ptr
,
1248 NOTIFY_COOKIE_LEN
)) {
1253 /* TODO: add a header? */
1254 skb_put(nc
, NOTIFY_COOKIE_LEN
);
1255 /* and attach it to the socket */
1257 f
= fdget(notification
->sigev_signo
);
1262 sock
= netlink_getsockbyfilp(f
.file
);
1265 ret
= PTR_ERR(sock
);
1270 timeo
= MAX_SCHEDULE_TIMEOUT
;
1271 ret
= netlink_attachskb(sock
, nc
, &timeo
, NULL
);
1290 inode
= file_inode(f
.file
);
1291 if (unlikely(f
.file
->f_op
!= &mqueue_file_operations
)) {
1295 info
= MQUEUE_I(inode
);
1298 spin_lock(&info
->lock
);
1299 if (notification
== NULL
) {
1300 if (info
->notify_owner
== task_tgid(current
)) {
1301 remove_notification(info
);
1302 inode
->i_atime
= inode
->i_ctime
= current_time(inode
);
1304 } else if (info
->notify_owner
!= NULL
) {
1307 switch (notification
->sigev_notify
) {
1309 info
->notify
.sigev_notify
= SIGEV_NONE
;
1312 info
->notify_sock
= sock
;
1313 info
->notify_cookie
= nc
;
1316 info
->notify
.sigev_notify
= SIGEV_THREAD
;
1319 info
->notify
.sigev_signo
= notification
->sigev_signo
;
1320 info
->notify
.sigev_value
= notification
->sigev_value
;
1321 info
->notify
.sigev_notify
= SIGEV_SIGNAL
;
1325 info
->notify_owner
= get_pid(task_tgid(current
));
1326 info
->notify_user_ns
= get_user_ns(current_user_ns());
1327 inode
->i_atime
= inode
->i_ctime
= current_time(inode
);
1329 spin_unlock(&info
->lock
);
1334 netlink_detachskb(sock
, nc
);
1341 SYSCALL_DEFINE2(mq_notify
, mqd_t
, mqdes
,
1342 const struct sigevent __user
*, u_notification
)
1344 struct sigevent n
, *p
= NULL
;
1345 if (u_notification
) {
1346 if (copy_from_user(&n
, u_notification
, sizeof(struct sigevent
)))
1350 return do_mq_notify(mqdes
, p
);
1353 static int do_mq_getsetattr(int mqdes
, struct mq_attr
*new, struct mq_attr
*old
)
1356 struct inode
*inode
;
1357 struct mqueue_inode_info
*info
;
1359 if (new && (new->mq_flags
& (~O_NONBLOCK
)))
1366 if (unlikely(f
.file
->f_op
!= &mqueue_file_operations
)) {
1371 inode
= file_inode(f
.file
);
1372 info
= MQUEUE_I(inode
);
1374 spin_lock(&info
->lock
);
1378 old
->mq_flags
= f
.file
->f_flags
& O_NONBLOCK
;
1381 audit_mq_getsetattr(mqdes
, new);
1382 spin_lock(&f
.file
->f_lock
);
1383 if (new->mq_flags
& O_NONBLOCK
)
1384 f
.file
->f_flags
|= O_NONBLOCK
;
1386 f
.file
->f_flags
&= ~O_NONBLOCK
;
1387 spin_unlock(&f
.file
->f_lock
);
1389 inode
->i_atime
= inode
->i_ctime
= current_time(inode
);
1392 spin_unlock(&info
->lock
);
1397 SYSCALL_DEFINE3(mq_getsetattr
, mqd_t
, mqdes
,
1398 const struct mq_attr __user
*, u_mqstat
,
1399 struct mq_attr __user
*, u_omqstat
)
1402 struct mq_attr mqstat
, omqstat
;
1403 struct mq_attr
*new = NULL
, *old
= NULL
;
1407 if (copy_from_user(new, u_mqstat
, sizeof(struct mq_attr
)))
1413 ret
= do_mq_getsetattr(mqdes
, new, old
);
1417 if (copy_to_user(u_omqstat
, old
, sizeof(struct mq_attr
)))
1422 #ifdef CONFIG_COMPAT
1424 struct compat_mq_attr
{
1425 compat_long_t mq_flags
; /* message queue flags */
1426 compat_long_t mq_maxmsg
; /* maximum number of messages */
1427 compat_long_t mq_msgsize
; /* maximum message size */
1428 compat_long_t mq_curmsgs
; /* number of messages currently queued */
1429 compat_long_t __reserved
[4]; /* ignored for input, zeroed for output */
1432 static inline int get_compat_mq_attr(struct mq_attr
*attr
,
1433 const struct compat_mq_attr __user
*uattr
)
1435 struct compat_mq_attr v
;
1437 if (copy_from_user(&v
, uattr
, sizeof(*uattr
)))
1440 memset(attr
, 0, sizeof(*attr
));
1441 attr
->mq_flags
= v
.mq_flags
;
1442 attr
->mq_maxmsg
= v
.mq_maxmsg
;
1443 attr
->mq_msgsize
= v
.mq_msgsize
;
1444 attr
->mq_curmsgs
= v
.mq_curmsgs
;
1448 static inline int put_compat_mq_attr(const struct mq_attr
*attr
,
1449 struct compat_mq_attr __user
*uattr
)
1451 struct compat_mq_attr v
;
1453 memset(&v
, 0, sizeof(v
));
1454 v
.mq_flags
= attr
->mq_flags
;
1455 v
.mq_maxmsg
= attr
->mq_maxmsg
;
1456 v
.mq_msgsize
= attr
->mq_msgsize
;
1457 v
.mq_curmsgs
= attr
->mq_curmsgs
;
1458 if (copy_to_user(uattr
, &v
, sizeof(*uattr
)))
1463 COMPAT_SYSCALL_DEFINE4(mq_open
, const char __user
*, u_name
,
1464 int, oflag
, compat_mode_t
, mode
,
1465 struct compat_mq_attr __user
*, u_attr
)
1467 struct mq_attr attr
, *p
= NULL
;
1468 if (u_attr
&& oflag
& O_CREAT
) {
1470 if (get_compat_mq_attr(&attr
, u_attr
))
1473 return do_mq_open(u_name
, oflag
, mode
, p
);
1476 COMPAT_SYSCALL_DEFINE2(mq_notify
, mqd_t
, mqdes
,
1477 const struct compat_sigevent __user
*, u_notification
)
1479 struct sigevent n
, *p
= NULL
;
1480 if (u_notification
) {
1481 if (get_compat_sigevent(&n
, u_notification
))
1483 if (n
.sigev_notify
== SIGEV_THREAD
)
1484 n
.sigev_value
.sival_ptr
= compat_ptr(n
.sigev_value
.sival_int
);
1487 return do_mq_notify(mqdes
, p
);
1490 COMPAT_SYSCALL_DEFINE3(mq_getsetattr
, mqd_t
, mqdes
,
1491 const struct compat_mq_attr __user
*, u_mqstat
,
1492 struct compat_mq_attr __user
*, u_omqstat
)
1495 struct mq_attr mqstat
, omqstat
;
1496 struct mq_attr
*new = NULL
, *old
= NULL
;
1500 if (get_compat_mq_attr(new, u_mqstat
))
1506 ret
= do_mq_getsetattr(mqdes
, new, old
);
1510 if (put_compat_mq_attr(old
, u_omqstat
))
1516 #ifdef CONFIG_COMPAT_32BIT_TIME
1517 static int compat_prepare_timeout(const struct old_timespec32 __user
*p
,
1518 struct timespec64
*ts
)
1520 if (get_old_timespec32(ts
, p
))
1522 if (!timespec64_valid(ts
))
1527 SYSCALL_DEFINE5(mq_timedsend_time32
, mqd_t
, mqdes
,
1528 const char __user
*, u_msg_ptr
,
1529 unsigned int, msg_len
, unsigned int, msg_prio
,
1530 const struct old_timespec32 __user
*, u_abs_timeout
)
1532 struct timespec64 ts
, *p
= NULL
;
1533 if (u_abs_timeout
) {
1534 int res
= compat_prepare_timeout(u_abs_timeout
, &ts
);
1539 return do_mq_timedsend(mqdes
, u_msg_ptr
, msg_len
, msg_prio
, p
);
1542 SYSCALL_DEFINE5(mq_timedreceive_time32
, mqd_t
, mqdes
,
1543 char __user
*, u_msg_ptr
,
1544 unsigned int, msg_len
, unsigned int __user
*, u_msg_prio
,
1545 const struct old_timespec32 __user
*, u_abs_timeout
)
1547 struct timespec64 ts
, *p
= NULL
;
1548 if (u_abs_timeout
) {
1549 int res
= compat_prepare_timeout(u_abs_timeout
, &ts
);
1554 return do_mq_timedreceive(mqdes
, u_msg_ptr
, msg_len
, u_msg_prio
, p
);
1558 static const struct inode_operations mqueue_dir_inode_operations
= {
1559 .lookup
= simple_lookup
,
1560 .create
= mqueue_create
,
1561 .unlink
= mqueue_unlink
,
1564 static const struct file_operations mqueue_file_operations
= {
1565 .flush
= mqueue_flush_file
,
1566 .poll
= mqueue_poll_file
,
1567 .read
= mqueue_read_file
,
1568 .llseek
= default_llseek
,
1571 static const struct super_operations mqueue_super_ops
= {
1572 .alloc_inode
= mqueue_alloc_inode
,
1573 .free_inode
= mqueue_free_inode
,
1574 .evict_inode
= mqueue_evict_inode
,
1575 .statfs
= simple_statfs
,
1578 static const struct fs_context_operations mqueue_fs_context_ops
= {
1579 .free
= mqueue_fs_context_free
,
1580 .get_tree
= mqueue_get_tree
,
1583 static struct file_system_type mqueue_fs_type
= {
1585 .init_fs_context
= mqueue_init_fs_context
,
1586 .kill_sb
= kill_litter_super
,
1587 .fs_flags
= FS_USERNS_MOUNT
,
1590 int mq_init_ns(struct ipc_namespace
*ns
)
1594 ns
->mq_queues_count
= 0;
1595 ns
->mq_queues_max
= DFLT_QUEUESMAX
;
1596 ns
->mq_msg_max
= DFLT_MSGMAX
;
1597 ns
->mq_msgsize_max
= DFLT_MSGSIZEMAX
;
1598 ns
->mq_msg_default
= DFLT_MSG
;
1599 ns
->mq_msgsize_default
= DFLT_MSGSIZE
;
1601 m
= mq_create_mount(ns
);
1608 void mq_clear_sbinfo(struct ipc_namespace
*ns
)
1610 ns
->mq_mnt
->mnt_sb
->s_fs_info
= NULL
;
1613 void mq_put_mnt(struct ipc_namespace
*ns
)
1615 kern_unmount(ns
->mq_mnt
);
1618 static int __init
init_mqueue_fs(void)
1622 mqueue_inode_cachep
= kmem_cache_create("mqueue_inode_cache",
1623 sizeof(struct mqueue_inode_info
), 0,
1624 SLAB_HWCACHE_ALIGN
|SLAB_ACCOUNT
, init_once
);
1625 if (mqueue_inode_cachep
== NULL
)
1628 /* ignore failures - they are not fatal */
1629 mq_sysctl_table
= mq_register_sysctl_table();
1631 error
= register_filesystem(&mqueue_fs_type
);
1635 spin_lock_init(&mq_lock
);
1637 error
= mq_init_ns(&init_ipc_ns
);
1639 goto out_filesystem
;
1644 unregister_filesystem(&mqueue_fs_type
);
1646 if (mq_sysctl_table
)
1647 unregister_sysctl_table(mq_sysctl_table
);
1648 kmem_cache_destroy(mqueue_inode_cachep
);
1652 device_initcall(init_mqueue_fs
);