2 kmod, the new module loader (replaces kerneld)
5 Reorganized not to be a daemon by Adam Richter, with guidance
8 Modified to avoid chroot and file sharing problems.
11 Limit the concurrent number of kmod modprobes to catch loops from
12 "modprobe needs a service that is in a module".
13 Keith Owens <kaos@ocs.com.au> December 1999
15 Unblock all signals when we exec a usermode process.
16 Shuu Yamaguchi <shuu@wondernetworkresources.com> December 2000
18 call_usermodehelper wait flag, and remove exec_usermodehelper.
19 Rusty Russell <rusty@rustcorp.com.au> Jan 2003
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/syscalls.h>
24 #include <linux/unistd.h>
25 #include <linux/kmod.h>
26 #include <linux/slab.h>
27 #include <linux/completion.h>
28 #include <linux/cred.h>
29 #include <linux/file.h>
30 #include <linux/fdtable.h>
31 #include <linux/workqueue.h>
32 #include <linux/security.h>
33 #include <linux/mount.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/resource.h>
37 #include <linux/notifier.h>
38 #include <linux/suspend.h>
39 #include <linux/rwsem.h>
40 #include <asm/uaccess.h>
42 #include <trace/events/module.h>
44 extern int max_threads
;
46 static struct workqueue_struct
*khelper_wq
;
48 #define CAP_BSET (void *)1
49 #define CAP_PI (void *)2
51 static kernel_cap_t usermodehelper_bset
= CAP_FULL_SET
;
52 static kernel_cap_t usermodehelper_inheritable
= CAP_FULL_SET
;
53 static DEFINE_SPINLOCK(umh_sysctl_lock
);
54 static DECLARE_RWSEM(umhelper_sem
);
59 modprobe_path is set via /proc/sys.
61 char modprobe_path
[KMOD_PATH_LEN
] = "/sbin/modprobe";
63 static void free_modprobe_argv(struct subprocess_info
*info
)
65 kfree(info
->argv
[3]); /* check call_modprobe() */
69 static int call_modprobe(char *module_name
, int wait
)
71 static char *envp
[] = {
74 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
78 char **argv
= kmalloc(sizeof(char *[5]), GFP_KERNEL
);
82 module_name
= kstrdup(module_name
, GFP_KERNEL
);
86 argv
[0] = modprobe_path
;
89 argv
[3] = module_name
; /* check free_modprobe_argv() */
92 return call_usermodehelper_fns(modprobe_path
, argv
, envp
,
93 wait
| UMH_KILLABLE
, NULL
, free_modprobe_argv
, NULL
);
101 * __request_module - try to load a kernel module
102 * @wait: wait (or not) for the operation to complete
103 * @fmt: printf style format string for the name of the module
104 * @...: arguments as specified in the format string
106 * Load a module using the user mode module loader. The function returns
107 * zero on success or a negative errno code on failure. Note that a
108 * successful module load does not mean the module did not then unload
109 * and exit on an error of its own. Callers must check that the service
110 * they requested is now available not blindly invoke it.
112 * If module auto-loading support is disabled then this function
113 * becomes a no-operation.
115 int __request_module(bool wait
, const char *fmt
, ...)
118 char module_name
[MODULE_NAME_LEN
];
119 unsigned int max_modprobes
;
121 static atomic_t kmod_concurrent
= ATOMIC_INIT(0);
122 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
123 static int kmod_loop_msg
;
126 ret
= vsnprintf(module_name
, MODULE_NAME_LEN
, fmt
, args
);
128 if (ret
>= MODULE_NAME_LEN
)
129 return -ENAMETOOLONG
;
131 ret
= security_kernel_module_request(module_name
);
135 /* If modprobe needs a service that is in a module, we get a recursive
136 * loop. Limit the number of running kmod threads to max_threads/2 or
137 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
138 * would be to run the parents of this process, counting how many times
139 * kmod was invoked. That would mean accessing the internals of the
140 * process tables to get the command line, proc_pid_cmdline is static
141 * and it is not worth changing the proc code just to handle this case.
144 * "trace the ppid" is simple, but will fail if someone's
145 * parent exits. I think this is as good as it gets. --RR
147 max_modprobes
= min(max_threads
/2, MAX_KMOD_CONCURRENT
);
148 atomic_inc(&kmod_concurrent
);
149 if (atomic_read(&kmod_concurrent
) > max_modprobes
) {
150 /* We may be blaming an innocent here, but unlikely */
151 if (kmod_loop_msg
< 5) {
153 "request_module: runaway loop modprobe %s\n",
157 atomic_dec(&kmod_concurrent
);
161 trace_module_request(module_name
, wait
, _RET_IP_
);
163 ret
= call_modprobe(module_name
, wait
? UMH_WAIT_PROC
: UMH_WAIT_EXEC
);
165 atomic_dec(&kmod_concurrent
);
168 EXPORT_SYMBOL(__request_module
);
169 #endif /* CONFIG_MODULES */
172 * This is the task which runs the usermode application
174 static int ____call_usermodehelper(void *data
)
176 struct subprocess_info
*sub_info
= data
;
180 spin_lock_irq(¤t
->sighand
->siglock
);
181 flush_signal_handlers(current
, 1);
182 spin_unlock_irq(¤t
->sighand
->siglock
);
184 /* We can run anywhere, unlike our parent keventd(). */
185 set_cpus_allowed_ptr(current
, cpu_all_mask
);
188 * Our parent is keventd, which runs with elevated scheduling priority.
189 * Avoid propagating that into the userspace child.
191 set_user_nice(current
, 0);
194 new = prepare_kernel_cred(current
);
198 spin_lock(&umh_sysctl_lock
);
199 new->cap_bset
= cap_intersect(usermodehelper_bset
, new->cap_bset
);
200 new->cap_inheritable
= cap_intersect(usermodehelper_inheritable
,
201 new->cap_inheritable
);
202 spin_unlock(&umh_sysctl_lock
);
204 if (sub_info
->init
) {
205 retval
= sub_info
->init(sub_info
, new);
214 retval
= kernel_execve(sub_info
->path
,
215 (const char *const *)sub_info
->argv
,
216 (const char *const *)sub_info
->envp
);
220 sub_info
->retval
= retval
;
224 void call_usermodehelper_freeinfo(struct subprocess_info
*info
)
227 (*info
->cleanup
)(info
);
230 EXPORT_SYMBOL(call_usermodehelper_freeinfo
);
232 static void umh_complete(struct subprocess_info
*sub_info
)
234 struct completion
*comp
= xchg(&sub_info
->complete
, NULL
);
236 * See call_usermodehelper_exec(). If xchg() returns NULL
237 * we own sub_info, the UMH_KILLABLE caller has gone away.
242 call_usermodehelper_freeinfo(sub_info
);
245 /* Keventd can't block, but this (a child) can. */
246 static int wait_for_helper(void *data
)
248 struct subprocess_info
*sub_info
= data
;
251 /* If SIGCLD is ignored sys_wait4 won't populate the status. */
252 spin_lock_irq(¤t
->sighand
->siglock
);
253 current
->sighand
->action
[SIGCHLD
-1].sa
.sa_handler
= SIG_DFL
;
254 spin_unlock_irq(¤t
->sighand
->siglock
);
256 pid
= kernel_thread(____call_usermodehelper
, sub_info
, SIGCHLD
);
258 sub_info
->retval
= pid
;
262 * Normally it is bogus to call wait4() from in-kernel because
263 * wait4() wants to write the exit code to a userspace address.
264 * But wait_for_helper() always runs as keventd, and put_user()
265 * to a kernel address works OK for kernel threads, due to their
266 * having an mm_segment_t which spans the entire address space.
268 * Thus the __user pointer cast is valid here.
270 sys_wait4(pid
, (int __user
*)&ret
, 0, NULL
);
273 * If ret is 0, either ____call_usermodehelper failed and the
274 * real error code is already in sub_info->retval or
275 * sub_info->retval is 0 anyway, so don't mess with it then.
278 sub_info
->retval
= ret
;
281 umh_complete(sub_info
);
285 /* This is run by khelper thread */
286 static void __call_usermodehelper(struct work_struct
*work
)
288 struct subprocess_info
*sub_info
=
289 container_of(work
, struct subprocess_info
, work
);
290 int wait
= sub_info
->wait
& ~UMH_KILLABLE
;
293 /* CLONE_VFORK: wait until the usermode helper has execve'd
294 * successfully We need the data structures to stay around
295 * until that is done. */
296 if (wait
== UMH_WAIT_PROC
)
297 pid
= kernel_thread(wait_for_helper
, sub_info
,
298 CLONE_FS
| CLONE_FILES
| SIGCHLD
);
300 pid
= kernel_thread(____call_usermodehelper
, sub_info
,
301 CLONE_VFORK
| SIGCHLD
);
305 call_usermodehelper_freeinfo(sub_info
);
314 sub_info
->retval
= pid
;
315 umh_complete(sub_info
);
320 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
321 * (used for preventing user land processes from being created after the user
322 * land has been frozen during a system-wide hibernation or suspend operation).
323 * Should always be manipulated under umhelper_sem acquired for write.
325 static enum umh_disable_depth usermodehelper_disabled
= UMH_DISABLED
;
327 /* Number of helpers running */
328 static atomic_t running_helpers
= ATOMIC_INIT(0);
331 * Wait queue head used by usermodehelper_disable() to wait for all running
334 static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq
);
337 * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
340 static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq
);
343 * Time to wait for running_helpers to become zero before the setting of
344 * usermodehelper_disabled in usermodehelper_disable() fails
346 #define RUNNING_HELPERS_TIMEOUT (5 * HZ)
348 int usermodehelper_read_trylock(void)
353 down_read(&umhelper_sem
);
355 prepare_to_wait(&usermodehelper_disabled_waitq
, &wait
,
357 if (!usermodehelper_disabled
)
360 if (usermodehelper_disabled
== UMH_DISABLED
)
363 up_read(&umhelper_sem
);
371 down_read(&umhelper_sem
);
373 finish_wait(&usermodehelper_disabled_waitq
, &wait
);
376 EXPORT_SYMBOL_GPL(usermodehelper_read_trylock
);
378 long usermodehelper_read_lock_wait(long timeout
)
385 down_read(&umhelper_sem
);
387 prepare_to_wait(&usermodehelper_disabled_waitq
, &wait
,
388 TASK_UNINTERRUPTIBLE
);
389 if (!usermodehelper_disabled
)
392 up_read(&umhelper_sem
);
394 timeout
= schedule_timeout(timeout
);
398 down_read(&umhelper_sem
);
400 finish_wait(&usermodehelper_disabled_waitq
, &wait
);
403 EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait
);
405 void usermodehelper_read_unlock(void)
407 up_read(&umhelper_sem
);
409 EXPORT_SYMBOL_GPL(usermodehelper_read_unlock
);
412 * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
413 * depth: New value to assign to usermodehelper_disabled.
415 * Change the value of usermodehelper_disabled (under umhelper_sem locked for
416 * writing) and wakeup tasks waiting for it to change.
418 void __usermodehelper_set_disable_depth(enum umh_disable_depth depth
)
420 down_write(&umhelper_sem
);
421 usermodehelper_disabled
= depth
;
422 wake_up(&usermodehelper_disabled_waitq
);
423 up_write(&umhelper_sem
);
427 * __usermodehelper_disable - Prevent new helpers from being started.
428 * @depth: New value to assign to usermodehelper_disabled.
430 * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
432 int __usermodehelper_disable(enum umh_disable_depth depth
)
439 down_write(&umhelper_sem
);
440 usermodehelper_disabled
= depth
;
441 up_write(&umhelper_sem
);
444 * From now on call_usermodehelper_exec() won't start any new
445 * helpers, so it is sufficient if running_helpers turns out to
446 * be zero at one point (it may be increased later, but that
449 retval
= wait_event_timeout(running_helpers_waitq
,
450 atomic_read(&running_helpers
) == 0,
451 RUNNING_HELPERS_TIMEOUT
);
455 __usermodehelper_set_disable_depth(UMH_ENABLED
);
459 static void helper_lock(void)
461 atomic_inc(&running_helpers
);
462 smp_mb__after_atomic_inc();
465 static void helper_unlock(void)
467 if (atomic_dec_and_test(&running_helpers
))
468 wake_up(&running_helpers_waitq
);
472 * call_usermodehelper_setup - prepare to call a usermode helper
473 * @path: path to usermode executable
474 * @argv: arg vector for process
475 * @envp: environment for process
476 * @gfp_mask: gfp mask for memory allocation
478 * Returns either %NULL on allocation failure, or a subprocess_info
479 * structure. This should be passed to call_usermodehelper_exec to
480 * exec the process and free the structure.
482 struct subprocess_info
*call_usermodehelper_setup(char *path
, char **argv
,
483 char **envp
, gfp_t gfp_mask
)
485 struct subprocess_info
*sub_info
;
486 sub_info
= kzalloc(sizeof(struct subprocess_info
), gfp_mask
);
490 INIT_WORK(&sub_info
->work
, __call_usermodehelper
);
491 sub_info
->path
= path
;
492 sub_info
->argv
= argv
;
493 sub_info
->envp
= envp
;
497 EXPORT_SYMBOL(call_usermodehelper_setup
);
500 * call_usermodehelper_setfns - set a cleanup/init function
501 * @info: a subprocess_info returned by call_usermodehelper_setup
502 * @cleanup: a cleanup function
503 * @init: an init function
504 * @data: arbitrary context sensitive data
506 * The init function is used to customize the helper process prior to
507 * exec. A non-zero return code causes the process to error out, exit,
508 * and return the failure to the calling process
510 * The cleanup function is just before ethe subprocess_info is about to
511 * be freed. This can be used for freeing the argv and envp. The
512 * Function must be runnable in either a process context or the
513 * context in which call_usermodehelper_exec is called.
515 void call_usermodehelper_setfns(struct subprocess_info
*info
,
516 int (*init
)(struct subprocess_info
*info
, struct cred
*new),
517 void (*cleanup
)(struct subprocess_info
*info
),
520 info
->cleanup
= cleanup
;
524 EXPORT_SYMBOL(call_usermodehelper_setfns
);
527 * call_usermodehelper_exec - start a usermode application
528 * @sub_info: information about the subprocessa
529 * @wait: wait for the application to finish and return status.
530 * when -1 don't wait at all, but you get no useful error back when
531 * the program couldn't be exec'ed. This makes it safe to call
532 * from interrupt context.
534 * Runs a user-space application. The application is started
535 * asynchronously if wait is not set, and runs as a child of keventd.
536 * (ie. it runs with full root capabilities).
538 int call_usermodehelper_exec(struct subprocess_info
*sub_info
, int wait
)
540 DECLARE_COMPLETION_ONSTACK(done
);
544 if (sub_info
->path
[0] == '\0')
547 if (!khelper_wq
|| usermodehelper_disabled
) {
552 sub_info
->complete
= &done
;
553 sub_info
->wait
= wait
;
555 queue_work(khelper_wq
, &sub_info
->work
);
556 if (wait
== UMH_NO_WAIT
) /* task has freed sub_info */
559 if (wait
& UMH_KILLABLE
) {
560 retval
= wait_for_completion_killable(&done
);
564 /* umh_complete() will see NULL and free sub_info */
565 if (xchg(&sub_info
->complete
, NULL
))
567 /* fallthrough, umh_complete() was already called */
570 wait_for_completion(&done
);
572 retval
= sub_info
->retval
;
574 call_usermodehelper_freeinfo(sub_info
);
579 EXPORT_SYMBOL(call_usermodehelper_exec
);
581 static int proc_cap_handler(struct ctl_table
*table
, int write
,
582 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
585 unsigned long cap_array
[_KERNEL_CAPABILITY_U32S
];
586 kernel_cap_t new_cap
;
589 if (write
&& (!capable(CAP_SETPCAP
) ||
590 !capable(CAP_SYS_MODULE
)))
594 * convert from the global kernel_cap_t to the ulong array to print to
595 * userspace if this is a read.
597 spin_lock(&umh_sysctl_lock
);
598 for (i
= 0; i
< _KERNEL_CAPABILITY_U32S
; i
++) {
599 if (table
->data
== CAP_BSET
)
600 cap_array
[i
] = usermodehelper_bset
.cap
[i
];
601 else if (table
->data
== CAP_PI
)
602 cap_array
[i
] = usermodehelper_inheritable
.cap
[i
];
606 spin_unlock(&umh_sysctl_lock
);
612 * actually read or write and array of ulongs from userspace. Remember
613 * these are least significant 32 bits first
615 err
= proc_doulongvec_minmax(&t
, write
, buffer
, lenp
, ppos
);
620 * convert from the sysctl array of ulongs to the kernel_cap_t
621 * internal representation
623 for (i
= 0; i
< _KERNEL_CAPABILITY_U32S
; i
++)
624 new_cap
.cap
[i
] = cap_array
[i
];
627 * Drop everything not in the new_cap (but don't add things)
629 spin_lock(&umh_sysctl_lock
);
631 if (table
->data
== CAP_BSET
)
632 usermodehelper_bset
= cap_intersect(usermodehelper_bset
, new_cap
);
633 if (table
->data
== CAP_PI
)
634 usermodehelper_inheritable
= cap_intersect(usermodehelper_inheritable
, new_cap
);
636 spin_unlock(&umh_sysctl_lock
);
641 struct ctl_table usermodehelper_table
[] = {
645 .maxlen
= _KERNEL_CAPABILITY_U32S
* sizeof(unsigned long),
647 .proc_handler
= proc_cap_handler
,
650 .procname
= "inheritable",
652 .maxlen
= _KERNEL_CAPABILITY_U32S
* sizeof(unsigned long),
654 .proc_handler
= proc_cap_handler
,
659 void __init
usermodehelper_init(void)
661 khelper_wq
= create_singlethread_workqueue("khelper");