gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / kernel / module.c
blob6920d1080cddf8431d79b0fe86ae3c39244c6ad5
1 /*
2 Copyright (C) 2002 Richard Henderson
3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/export.h>
20 #include <linux/moduleloader.h>
21 #include <linux/ftrace_event.h>
22 #include <linux/init.h>
23 #include <linux/kallsyms.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/sysfs.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/elf.h>
31 #include <linux/proc_fs.h>
32 #include <linux/security.h>
33 #include <linux/seq_file.h>
34 #include <linux/syscalls.h>
35 #include <linux/fcntl.h>
36 #include <linux/rcupdate.h>
37 #include <linux/capability.h>
38 #include <linux/cpu.h>
39 #include <linux/moduleparam.h>
40 #include <linux/errno.h>
41 #include <linux/err.h>
42 #include <linux/vermagic.h>
43 #include <linux/notifier.h>
44 #include <linux/sched.h>
45 #include <linux/device.h>
46 #include <linux/string.h>
47 #include <linux/mutex.h>
48 #include <linux/rculist.h>
49 #include <asm/uaccess.h>
50 #include <asm/cacheflush.h>
51 #include <asm/mmu_context.h>
52 #include <linux/license.h>
53 #include <asm/sections.h>
54 #include <linux/tracepoint.h>
55 #include <linux/ftrace.h>
56 #include <linux/async.h>
57 #include <linux/percpu.h>
58 #include <linux/kmemleak.h>
59 #include <linux/jump_label.h>
60 #include <linux/pfn.h>
61 #include <linux/bsearch.h>
62 #include <uapi/linux/module.h>
63 #include "module-internal.h"
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/module.h>
68 #ifndef ARCH_SHF_SMALL
69 #define ARCH_SHF_SMALL 0
70 #endif
73 * Modules' sections will be aligned on page boundaries
74 * to ensure complete separation of code and data, but
75 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
77 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
78 # define debug_align(X) ALIGN(X, PAGE_SIZE)
79 #else
80 # define debug_align(X) (X)
81 #endif
84 * Given BASE and SIZE this macro calculates the number of pages the
85 * memory regions occupies
87 #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \
88 (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \
89 PFN_DOWN((unsigned long)BASE) + 1) \
90 : (0UL))
92 /* If this is set, the section belongs in the init part of the module */
93 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
96 * Mutex protects:
97 * 1) List of modules (also safely readable with preempt_disable),
98 * 2) module_use links,
99 * 3) module_addr_min/module_addr_max.
100 * (delete and add uses RCU list operations). */
101 DEFINE_MUTEX(module_mutex);
102 EXPORT_SYMBOL_GPL(module_mutex);
103 static LIST_HEAD(modules);
104 #ifdef CONFIG_KGDB_KDB
105 struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
106 #endif /* CONFIG_KGDB_KDB */
108 #ifdef CONFIG_MODULE_SIG
109 #ifdef CONFIG_MODULE_SIG_FORCE
110 static bool sig_enforce = true;
111 #else
112 static bool sig_enforce = false;
114 static int param_set_bool_enable_only(const char *val,
115 const struct kernel_param *kp)
117 int err;
118 bool test;
119 struct kernel_param dummy_kp = *kp;
121 dummy_kp.arg = &test;
123 err = param_set_bool(val, &dummy_kp);
124 if (err)
125 return err;
127 /* Don't let them unset it once it's set! */
128 if (!test && sig_enforce)
129 return -EROFS;
131 if (test)
132 sig_enforce = true;
133 return 0;
136 static const struct kernel_param_ops param_ops_bool_enable_only = {
137 .flags = KERNEL_PARAM_OPS_FL_NOARG,
138 .set = param_set_bool_enable_only,
139 .get = param_get_bool,
141 #define param_check_bool_enable_only param_check_bool
143 module_param(sig_enforce, bool_enable_only, 0644);
144 #endif /* !CONFIG_MODULE_SIG_FORCE */
145 #endif /* CONFIG_MODULE_SIG */
147 /* Block module loading/unloading? */
148 int modules_disabled = 0;
149 core_param(nomodule, modules_disabled, bint, 0);
151 /* Waiting for a module to finish initializing? */
152 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
154 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
156 /* Bounds of module allocation, for speeding __module_address.
157 * Protected by module_mutex. */
158 static unsigned long module_addr_min = -1UL, module_addr_max = 0;
160 int register_module_notifier(struct notifier_block *nb)
162 return blocking_notifier_chain_register(&module_notify_list, nb);
164 EXPORT_SYMBOL(register_module_notifier);
166 int unregister_module_notifier(struct notifier_block *nb)
168 return blocking_notifier_chain_unregister(&module_notify_list, nb);
170 EXPORT_SYMBOL(unregister_module_notifier);
172 struct load_info {
173 Elf_Ehdr *hdr;
174 unsigned long len;
175 Elf_Shdr *sechdrs;
176 char *secstrings, *strtab;
177 unsigned long symoffs, stroffs;
178 struct _ddebug *debug;
179 unsigned int num_debug;
180 bool sig_ok;
181 #ifdef CONFIG_KALLSYMS
182 unsigned long mod_kallsyms_init_off;
183 #endif
184 struct {
185 unsigned int sym, str, mod, vers, info, pcpu;
186 } index;
189 /* We require a truly strong try_module_get(): 0 means failure due to
190 ongoing or failed initialization etc. */
191 static inline int strong_try_module_get(struct module *mod)
193 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
194 if (mod && mod->state == MODULE_STATE_COMING)
195 return -EBUSY;
196 if (try_module_get(mod))
197 return 0;
198 else
199 return -ENOENT;
202 static inline void add_taint_module(struct module *mod, unsigned flag,
203 enum lockdep_ok lockdep_ok)
205 add_taint(flag, lockdep_ok);
206 mod->taints |= (1U << flag);
210 * A thread that wants to hold a reference to a module only while it
211 * is running can call this to safely exit. nfsd and lockd use this.
213 void __module_put_and_exit(struct module *mod, long code)
215 module_put(mod);
216 do_exit(code);
218 EXPORT_SYMBOL(__module_put_and_exit);
220 /* Find a module section: 0 means not found. */
221 static unsigned int find_sec(const struct load_info *info, const char *name)
223 unsigned int i;
225 for (i = 1; i < info->hdr->e_shnum; i++) {
226 Elf_Shdr *shdr = &info->sechdrs[i];
227 /* Alloc bit cleared means "ignore it." */
228 if ((shdr->sh_flags & SHF_ALLOC)
229 && strcmp(info->secstrings + shdr->sh_name, name) == 0)
230 return i;
232 return 0;
235 /* Find a module section, or NULL. */
236 static void *section_addr(const struct load_info *info, const char *name)
238 /* Section 0 has sh_addr 0. */
239 return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
242 /* Find a module section, or NULL. Fill in number of "objects" in section. */
243 static void *section_objs(const struct load_info *info,
244 const char *name,
245 size_t object_size,
246 unsigned int *num)
248 unsigned int sec = find_sec(info, name);
250 /* Section 0 has sh_addr 0 and sh_size 0. */
251 *num = info->sechdrs[sec].sh_size / object_size;
252 return (void *)info->sechdrs[sec].sh_addr;
255 /* Provided by the linker */
256 extern const struct kernel_symbol __start___ksymtab[];
257 extern const struct kernel_symbol __stop___ksymtab[];
258 extern const struct kernel_symbol __start___ksymtab_gpl[];
259 extern const struct kernel_symbol __stop___ksymtab_gpl[];
260 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
261 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
262 extern const unsigned long __start___kcrctab[];
263 extern const unsigned long __start___kcrctab_gpl[];
264 extern const unsigned long __start___kcrctab_gpl_future[];
265 #ifdef CONFIG_UNUSED_SYMBOLS
266 extern const struct kernel_symbol __start___ksymtab_unused[];
267 extern const struct kernel_symbol __stop___ksymtab_unused[];
268 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
269 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
270 extern const unsigned long __start___kcrctab_unused[];
271 extern const unsigned long __start___kcrctab_unused_gpl[];
272 #endif
274 #ifndef CONFIG_MODVERSIONS
275 #define symversion(base, idx) NULL
276 #else
277 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
278 #endif
280 static bool each_symbol_in_section(const struct symsearch *arr,
281 unsigned int arrsize,
282 struct module *owner,
283 bool (*fn)(const struct symsearch *syms,
284 struct module *owner,
285 void *data),
286 void *data)
288 unsigned int j;
290 for (j = 0; j < arrsize; j++) {
291 if (fn(&arr[j], owner, data))
292 return true;
295 return false;
298 /* Returns true as soon as fn returns true, otherwise false. */
299 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
300 struct module *owner,
301 void *data),
302 void *data)
304 struct module *mod;
305 static const struct symsearch arr[] = {
306 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
307 NOT_GPL_ONLY, false },
308 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
309 __start___kcrctab_gpl,
310 GPL_ONLY, false },
311 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
312 __start___kcrctab_gpl_future,
313 WILL_BE_GPL_ONLY, false },
314 #ifdef CONFIG_UNUSED_SYMBOLS
315 { __start___ksymtab_unused, __stop___ksymtab_unused,
316 __start___kcrctab_unused,
317 NOT_GPL_ONLY, true },
318 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
319 __start___kcrctab_unused_gpl,
320 GPL_ONLY, true },
321 #endif
324 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
325 return true;
327 list_for_each_entry_rcu(mod, &modules, list) {
328 struct symsearch arr[] = {
329 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
330 NOT_GPL_ONLY, false },
331 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
332 mod->gpl_crcs,
333 GPL_ONLY, false },
334 { mod->gpl_future_syms,
335 mod->gpl_future_syms + mod->num_gpl_future_syms,
336 mod->gpl_future_crcs,
337 WILL_BE_GPL_ONLY, false },
338 #ifdef CONFIG_UNUSED_SYMBOLS
339 { mod->unused_syms,
340 mod->unused_syms + mod->num_unused_syms,
341 mod->unused_crcs,
342 NOT_GPL_ONLY, true },
343 { mod->unused_gpl_syms,
344 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
345 mod->unused_gpl_crcs,
346 GPL_ONLY, true },
347 #endif
350 if (mod->state == MODULE_STATE_UNFORMED)
351 continue;
353 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
354 return true;
356 return false;
358 EXPORT_SYMBOL_GPL(each_symbol_section);
360 struct find_symbol_arg {
361 /* Input */
362 const char *name;
363 bool gplok;
364 bool warn;
366 /* Output */
367 struct module *owner;
368 const unsigned long *crc;
369 const struct kernel_symbol *sym;
372 static bool check_symbol(const struct symsearch *syms,
373 struct module *owner,
374 unsigned int symnum, void *data)
376 struct find_symbol_arg *fsa = data;
378 if (!fsa->gplok) {
379 if (syms->licence == GPL_ONLY)
380 return false;
381 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
382 pr_warn("Symbol %s is being used by a non-GPL module, "
383 "which will not be allowed in the future\n",
384 fsa->name);
388 #ifdef CONFIG_UNUSED_SYMBOLS
389 if (syms->unused && fsa->warn) {
390 pr_warn("Symbol %s is marked as UNUSED, however this module is "
391 "using it.\n", fsa->name);
392 pr_warn("This symbol will go away in the future.\n");
393 pr_warn("Please evaluate if this is the right api to use and "
394 "if it really is, submit a report to the linux kernel "
395 "mailing list together with submitting your code for "
396 "inclusion.\n");
398 #endif
400 fsa->owner = owner;
401 fsa->crc = symversion(syms->crcs, symnum);
402 fsa->sym = &syms->start[symnum];
403 return true;
406 static int cmp_name(const void *va, const void *vb)
408 const char *a;
409 const struct kernel_symbol *b;
410 a = va; b = vb;
411 return strcmp(a, b->name);
414 static bool find_symbol_in_section(const struct symsearch *syms,
415 struct module *owner,
416 void *data)
418 struct find_symbol_arg *fsa = data;
419 struct kernel_symbol *sym;
421 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
422 sizeof(struct kernel_symbol), cmp_name);
424 if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
425 return true;
427 return false;
430 /* Find a symbol and return it, along with, (optional) crc and
431 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
432 const struct kernel_symbol *find_symbol(const char *name,
433 struct module **owner,
434 const unsigned long **crc,
435 bool gplok,
436 bool warn)
438 struct find_symbol_arg fsa;
440 fsa.name = name;
441 fsa.gplok = gplok;
442 fsa.warn = warn;
444 if (each_symbol_section(find_symbol_in_section, &fsa)) {
445 if (owner)
446 *owner = fsa.owner;
447 if (crc)
448 *crc = fsa.crc;
449 return fsa.sym;
452 pr_debug("Failed to find symbol %s\n", name);
453 return NULL;
455 EXPORT_SYMBOL_GPL(find_symbol);
457 /* Search for module by name: must hold module_mutex. */
458 static struct module *find_module_all(const char *name, size_t len,
459 bool even_unformed)
461 struct module *mod;
463 list_for_each_entry(mod, &modules, list) {
464 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
465 continue;
466 if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
467 return mod;
469 return NULL;
472 struct module *find_module(const char *name)
474 return find_module_all(name, strlen(name), false);
476 EXPORT_SYMBOL_GPL(find_module);
478 #ifdef CONFIG_SMP
480 static inline void __percpu *mod_percpu(struct module *mod)
482 return mod->percpu;
485 static int percpu_modalloc(struct module *mod, struct load_info *info)
487 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
488 unsigned long align = pcpusec->sh_addralign;
490 if (!pcpusec->sh_size)
491 return 0;
493 if (align > PAGE_SIZE) {
494 pr_warn("%s: per-cpu alignment %li > %li\n",
495 mod->name, align, PAGE_SIZE);
496 align = PAGE_SIZE;
499 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
500 if (!mod->percpu) {
501 pr_warn("%s: Could not allocate %lu bytes percpu data\n",
502 mod->name, (unsigned long)pcpusec->sh_size);
503 return -ENOMEM;
505 mod->percpu_size = pcpusec->sh_size;
506 return 0;
509 static void percpu_modfree(struct module *mod)
511 free_percpu(mod->percpu);
514 static unsigned int find_pcpusec(struct load_info *info)
516 return find_sec(info, ".data..percpu");
519 static void percpu_modcopy(struct module *mod,
520 const void *from, unsigned long size)
522 int cpu;
524 for_each_possible_cpu(cpu)
525 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
529 * is_module_percpu_address - test whether address is from module static percpu
530 * @addr: address to test
532 * Test whether @addr belongs to module static percpu area.
534 * RETURNS:
535 * %true if @addr is from module static percpu area
537 bool is_module_percpu_address(unsigned long addr)
539 struct module *mod;
540 unsigned int cpu;
542 preempt_disable();
544 list_for_each_entry_rcu(mod, &modules, list) {
545 if (mod->state == MODULE_STATE_UNFORMED)
546 continue;
547 if (!mod->percpu_size)
548 continue;
549 for_each_possible_cpu(cpu) {
550 void *start = per_cpu_ptr(mod->percpu, cpu);
552 if ((void *)addr >= start &&
553 (void *)addr < start + mod->percpu_size) {
554 preempt_enable();
555 return true;
560 preempt_enable();
561 return false;
564 #else /* ... !CONFIG_SMP */
566 static inline void __percpu *mod_percpu(struct module *mod)
568 return NULL;
570 static int percpu_modalloc(struct module *mod, struct load_info *info)
572 /* UP modules shouldn't have this section: ENOMEM isn't quite right */
573 if (info->sechdrs[info->index.pcpu].sh_size != 0)
574 return -ENOMEM;
575 return 0;
577 static inline void percpu_modfree(struct module *mod)
580 static unsigned int find_pcpusec(struct load_info *info)
582 return 0;
584 static inline void percpu_modcopy(struct module *mod,
585 const void *from, unsigned long size)
587 /* pcpusec should be 0, and size of that section should be 0. */
588 BUG_ON(size != 0);
590 bool is_module_percpu_address(unsigned long addr)
592 return false;
595 #endif /* CONFIG_SMP */
597 #define MODINFO_ATTR(field) \
598 static void setup_modinfo_##field(struct module *mod, const char *s) \
600 mod->field = kstrdup(s, GFP_KERNEL); \
602 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
603 struct module_kobject *mk, char *buffer) \
605 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
607 static int modinfo_##field##_exists(struct module *mod) \
609 return mod->field != NULL; \
611 static void free_modinfo_##field(struct module *mod) \
613 kfree(mod->field); \
614 mod->field = NULL; \
616 static struct module_attribute modinfo_##field = { \
617 .attr = { .name = __stringify(field), .mode = 0444 }, \
618 .show = show_modinfo_##field, \
619 .setup = setup_modinfo_##field, \
620 .test = modinfo_##field##_exists, \
621 .free = free_modinfo_##field, \
624 MODINFO_ATTR(version);
625 MODINFO_ATTR(srcversion);
627 static char last_unloaded_module[MODULE_NAME_LEN+1];
629 #ifdef CONFIG_MODULE_UNLOAD
631 EXPORT_TRACEPOINT_SYMBOL(module_get);
633 /* MODULE_REF_BASE is the base reference count by kmodule loader. */
634 #define MODULE_REF_BASE 1
636 /* Init the unload section of the module. */
637 static int module_unload_init(struct module *mod)
640 * Initialize reference counter to MODULE_REF_BASE.
641 * refcnt == 0 means module is going.
643 atomic_set(&mod->refcnt, MODULE_REF_BASE);
645 INIT_LIST_HEAD(&mod->source_list);
646 INIT_LIST_HEAD(&mod->target_list);
648 /* Hold reference count during initialization. */
649 atomic_inc(&mod->refcnt);
651 return 0;
654 /* Does a already use b? */
655 static int already_uses(struct module *a, struct module *b)
657 struct module_use *use;
659 list_for_each_entry(use, &b->source_list, source_list) {
660 if (use->source == a) {
661 pr_debug("%s uses %s!\n", a->name, b->name);
662 return 1;
665 pr_debug("%s does not use %s!\n", a->name, b->name);
666 return 0;
670 * Module a uses b
671 * - we add 'a' as a "source", 'b' as a "target" of module use
672 * - the module_use is added to the list of 'b' sources (so
673 * 'b' can walk the list to see who sourced them), and of 'a'
674 * targets (so 'a' can see what modules it targets).
676 static int add_module_usage(struct module *a, struct module *b)
678 struct module_use *use;
680 pr_debug("Allocating new usage for %s.\n", a->name);
681 use = kmalloc(sizeof(*use), GFP_ATOMIC);
682 if (!use) {
683 pr_warn("%s: out of memory loading\n", a->name);
684 return -ENOMEM;
687 use->source = a;
688 use->target = b;
689 list_add(&use->source_list, &b->source_list);
690 list_add(&use->target_list, &a->target_list);
691 return 0;
694 /* Module a uses b: caller needs module_mutex() */
695 int ref_module(struct module *a, struct module *b)
697 int err;
699 if (b == NULL || already_uses(a, b))
700 return 0;
702 /* If module isn't available, we fail. */
703 err = strong_try_module_get(b);
704 if (err)
705 return err;
707 err = add_module_usage(a, b);
708 if (err) {
709 module_put(b);
710 return err;
712 return 0;
714 EXPORT_SYMBOL_GPL(ref_module);
716 /* Clear the unload stuff of the module. */
717 static void module_unload_free(struct module *mod)
719 struct module_use *use, *tmp;
721 mutex_lock(&module_mutex);
722 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
723 struct module *i = use->target;
724 pr_debug("%s unusing %s\n", mod->name, i->name);
725 module_put(i);
726 list_del(&use->source_list);
727 list_del(&use->target_list);
728 kfree(use);
730 mutex_unlock(&module_mutex);
733 #ifdef CONFIG_MODULE_FORCE_UNLOAD
734 static inline int try_force_unload(unsigned int flags)
736 int ret = (flags & O_TRUNC);
737 if (ret)
738 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
739 return ret;
741 #else
742 static inline int try_force_unload(unsigned int flags)
744 return 0;
746 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
748 /* Try to release refcount of module, 0 means success. */
749 static int try_release_module_ref(struct module *mod)
751 int ret;
753 /* Try to decrement refcnt which we set at loading */
754 ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
755 BUG_ON(ret < 0);
756 if (ret)
757 /* Someone can put this right now, recover with checking */
758 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
760 return ret;
763 static int try_stop_module(struct module *mod, int flags, int *forced)
765 /* If it's not unused, quit unless we're forcing. */
766 if (try_release_module_ref(mod) != 0) {
767 *forced = try_force_unload(flags);
768 if (!(*forced))
769 return -EWOULDBLOCK;
772 /* Mark it as dying. */
773 mod->state = MODULE_STATE_GOING;
775 return 0;
779 * module_refcount - return the refcount or -1 if unloading
781 * @mod: the module we're checking
783 * Returns:
784 * -1 if the module is in the process of unloading
785 * otherwise the number of references in the kernel to the module
787 int module_refcount(struct module *mod)
789 return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
791 EXPORT_SYMBOL(module_refcount);
793 /* This exists whether we can unload or not */
794 static void free_module(struct module *mod);
796 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
797 unsigned int, flags)
799 struct module *mod;
800 char name[MODULE_NAME_LEN];
801 int ret, forced = 0;
803 if (!capable(CAP_SYS_MODULE) || modules_disabled)
804 return -EPERM;
806 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
807 return -EFAULT;
808 name[MODULE_NAME_LEN-1] = '\0';
810 if (mutex_lock_interruptible(&module_mutex) != 0)
811 return -EINTR;
813 mod = find_module(name);
814 if (!mod) {
815 ret = -ENOENT;
816 goto out;
819 if (!list_empty(&mod->source_list)) {
820 /* Other modules depend on us: get rid of them first. */
821 ret = -EWOULDBLOCK;
822 goto out;
825 /* Doing init or already dying? */
826 if (mod->state != MODULE_STATE_LIVE) {
827 /* FIXME: if (force), slam module count damn the torpedoes */
828 pr_debug("%s already dying\n", mod->name);
829 ret = -EBUSY;
830 goto out;
833 /* If it has an init func, it must have an exit func to unload */
834 if (mod->init && !mod->exit) {
835 forced = try_force_unload(flags);
836 if (!forced) {
837 /* This module can't be removed */
838 ret = -EBUSY;
839 goto out;
843 /* Stop the machine so refcounts can't move and disable module. */
844 ret = try_stop_module(mod, flags, &forced);
845 if (ret != 0)
846 goto out;
848 mutex_unlock(&module_mutex);
849 /* Final destruction now no one is using it. */
850 if (mod->exit != NULL)
851 mod->exit();
852 blocking_notifier_call_chain(&module_notify_list,
853 MODULE_STATE_GOING, mod);
854 async_synchronize_full();
856 /* Store the name of the last unloaded module for diagnostic purposes */
857 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
859 free_module(mod);
860 return 0;
861 out:
862 mutex_unlock(&module_mutex);
863 return ret;
866 static inline void print_unload_info(struct seq_file *m, struct module *mod)
868 struct module_use *use;
869 int printed_something = 0;
871 seq_printf(m, " %i ", module_refcount(mod));
874 * Always include a trailing , so userspace can differentiate
875 * between this and the old multi-field proc format.
877 list_for_each_entry(use, &mod->source_list, source_list) {
878 printed_something = 1;
879 seq_printf(m, "%s,", use->source->name);
882 if (mod->init != NULL && mod->exit == NULL) {
883 printed_something = 1;
884 seq_puts(m, "[permanent],");
887 if (!printed_something)
888 seq_puts(m, "-");
891 void __symbol_put(const char *symbol)
893 struct module *owner;
895 preempt_disable();
896 if (!find_symbol(symbol, &owner, NULL, true, false))
897 BUG();
898 module_put(owner);
899 preempt_enable();
901 EXPORT_SYMBOL(__symbol_put);
903 /* Note this assumes addr is a function, which it currently always is. */
904 void symbol_put_addr(void *addr)
906 struct module *modaddr;
907 unsigned long a = (unsigned long)dereference_function_descriptor(addr);
909 if (core_kernel_text(a))
910 return;
913 * Even though we hold a reference on the module; we still need to
914 * disable preemption in order to safely traverse the data structure.
916 preempt_disable();
917 modaddr = __module_text_address(a);
918 BUG_ON(!modaddr);
919 module_put(modaddr);
920 preempt_enable();
922 EXPORT_SYMBOL_GPL(symbol_put_addr);
924 static ssize_t show_refcnt(struct module_attribute *mattr,
925 struct module_kobject *mk, char *buffer)
927 return sprintf(buffer, "%i\n", module_refcount(mk->mod));
930 static struct module_attribute modinfo_refcnt =
931 __ATTR(refcnt, 0444, show_refcnt, NULL);
933 void __module_get(struct module *module)
935 if (module) {
936 preempt_disable();
937 atomic_inc(&module->refcnt);
938 trace_module_get(module, _RET_IP_);
939 preempt_enable();
942 EXPORT_SYMBOL(__module_get);
944 bool try_module_get(struct module *module)
946 bool ret = true;
948 if (module) {
949 preempt_disable();
950 /* Note: here, we can fail to get a reference */
951 if (likely(module_is_live(module) &&
952 atomic_inc_not_zero(&module->refcnt) != 0))
953 trace_module_get(module, _RET_IP_);
954 else
955 ret = false;
957 preempt_enable();
959 return ret;
961 EXPORT_SYMBOL(try_module_get);
963 void module_put(struct module *module)
965 int ret;
967 if (module) {
968 preempt_disable();
969 ret = atomic_dec_if_positive(&module->refcnt);
970 WARN_ON(ret < 0); /* Failed to put refcount */
971 trace_module_put(module, _RET_IP_);
972 preempt_enable();
975 EXPORT_SYMBOL(module_put);
977 #else /* !CONFIG_MODULE_UNLOAD */
978 static inline void print_unload_info(struct seq_file *m, struct module *mod)
980 /* We don't know the usage count, or what modules are using. */
981 seq_puts(m, " - -");
984 static inline void module_unload_free(struct module *mod)
988 int ref_module(struct module *a, struct module *b)
990 return strong_try_module_get(b);
992 EXPORT_SYMBOL_GPL(ref_module);
994 static inline int module_unload_init(struct module *mod)
996 return 0;
998 #endif /* CONFIG_MODULE_UNLOAD */
1000 static size_t module_flags_taint(struct module *mod, char *buf)
1002 size_t l = 0;
1004 if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
1005 buf[l++] = 'P';
1006 if (mod->taints & (1 << TAINT_OOT_MODULE))
1007 buf[l++] = 'O';
1008 if (mod->taints & (1 << TAINT_FORCED_MODULE))
1009 buf[l++] = 'F';
1010 if (mod->taints & (1 << TAINT_CRAP))
1011 buf[l++] = 'C';
1012 if (mod->taints & (1 << TAINT_UNSIGNED_MODULE))
1013 buf[l++] = 'E';
1015 * TAINT_FORCED_RMMOD: could be added.
1016 * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
1017 * apply to modules.
1019 return l;
1022 static ssize_t show_initstate(struct module_attribute *mattr,
1023 struct module_kobject *mk, char *buffer)
1025 const char *state = "unknown";
1027 switch (mk->mod->state) {
1028 case MODULE_STATE_LIVE:
1029 state = "live";
1030 break;
1031 case MODULE_STATE_COMING:
1032 state = "coming";
1033 break;
1034 case MODULE_STATE_GOING:
1035 state = "going";
1036 break;
1037 default:
1038 BUG();
1040 return sprintf(buffer, "%s\n", state);
1043 static struct module_attribute modinfo_initstate =
1044 __ATTR(initstate, 0444, show_initstate, NULL);
1046 static ssize_t store_uevent(struct module_attribute *mattr,
1047 struct module_kobject *mk,
1048 const char *buffer, size_t count)
1050 enum kobject_action action;
1052 if (kobject_action_type(buffer, count, &action) == 0)
1053 kobject_uevent(&mk->kobj, action);
1054 return count;
1057 struct module_attribute module_uevent =
1058 __ATTR(uevent, 0200, NULL, store_uevent);
1060 static ssize_t show_coresize(struct module_attribute *mattr,
1061 struct module_kobject *mk, char *buffer)
1063 return sprintf(buffer, "%u\n", mk->mod->core_size);
1066 static struct module_attribute modinfo_coresize =
1067 __ATTR(coresize, 0444, show_coresize, NULL);
1069 static ssize_t show_initsize(struct module_attribute *mattr,
1070 struct module_kobject *mk, char *buffer)
1072 return sprintf(buffer, "%u\n", mk->mod->init_size);
1075 static struct module_attribute modinfo_initsize =
1076 __ATTR(initsize, 0444, show_initsize, NULL);
1078 static ssize_t show_taint(struct module_attribute *mattr,
1079 struct module_kobject *mk, char *buffer)
1081 size_t l;
1083 l = module_flags_taint(mk->mod, buffer);
1084 buffer[l++] = '\n';
1085 return l;
1088 static struct module_attribute modinfo_taint =
1089 __ATTR(taint, 0444, show_taint, NULL);
1091 static struct module_attribute *modinfo_attrs[] = {
1092 &module_uevent,
1093 &modinfo_version,
1094 &modinfo_srcversion,
1095 &modinfo_initstate,
1096 &modinfo_coresize,
1097 &modinfo_initsize,
1098 &modinfo_taint,
1099 #ifdef CONFIG_MODULE_UNLOAD
1100 &modinfo_refcnt,
1101 #endif
1102 NULL,
1105 static const char vermagic[] = VERMAGIC_STRING;
1107 static int try_to_force_load(struct module *mod, const char *reason)
1109 #ifdef CONFIG_MODULE_FORCE_LOAD
1110 if (!test_taint(TAINT_FORCED_MODULE))
1111 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
1112 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
1113 return 0;
1114 #else
1115 return -ENOEXEC;
1116 #endif
1119 #ifdef CONFIG_MODVERSIONS
1120 /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
1121 static unsigned long maybe_relocated(unsigned long crc,
1122 const struct module *crc_owner)
1124 #ifdef ARCH_RELOCATES_KCRCTAB
1125 if (crc_owner == NULL)
1126 return crc - (unsigned long)reloc_start;
1127 #endif
1128 return crc;
1131 static int check_version(Elf_Shdr *sechdrs,
1132 unsigned int versindex,
1133 const char *symname,
1134 struct module *mod,
1135 const unsigned long *crc,
1136 const struct module *crc_owner)
1138 unsigned int i, num_versions;
1139 struct modversion_info *versions;
1141 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1142 if (!crc)
1143 return 1;
1145 /* No versions at all? modprobe --force does this. */
1146 if (versindex == 0)
1147 return try_to_force_load(mod, symname) == 0;
1149 versions = (void *) sechdrs[versindex].sh_addr;
1150 num_versions = sechdrs[versindex].sh_size
1151 / sizeof(struct modversion_info);
1153 for (i = 0; i < num_versions; i++) {
1154 if (strcmp(versions[i].name, symname) != 0)
1155 continue;
1157 if (versions[i].crc == maybe_relocated(*crc, crc_owner))
1158 return 1;
1159 pr_debug("Found checksum %lX vs module %lX\n",
1160 maybe_relocated(*crc, crc_owner), versions[i].crc);
1161 goto bad_version;
1164 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
1165 return 0;
1167 bad_version:
1168 pr_warn("%s: disagrees about version of symbol %s\n",
1169 mod->name, symname);
1170 return 0;
1173 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1174 unsigned int versindex,
1175 struct module *mod)
1177 const unsigned long *crc;
1179 /* Since this should be found in kernel (which can't be removed),
1180 * no locking is necessary. */
1181 if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL,
1182 &crc, true, false))
1183 BUG();
1184 return check_version(sechdrs, versindex,
1185 VMLINUX_SYMBOL_STR(module_layout), mod, crc,
1186 NULL);
1189 /* First part is kernel version, which we ignore if module has crcs. */
1190 static inline int same_magic(const char *amagic, const char *bmagic,
1191 bool has_crcs)
1193 if (has_crcs) {
1194 amagic += strcspn(amagic, " ");
1195 bmagic += strcspn(bmagic, " ");
1197 return strcmp(amagic, bmagic) == 0;
1199 #else
1200 static inline int check_version(Elf_Shdr *sechdrs,
1201 unsigned int versindex,
1202 const char *symname,
1203 struct module *mod,
1204 const unsigned long *crc,
1205 const struct module *crc_owner)
1207 return 1;
1210 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1211 unsigned int versindex,
1212 struct module *mod)
1214 return 1;
1217 static inline int same_magic(const char *amagic, const char *bmagic,
1218 bool has_crcs)
1220 return strcmp(amagic, bmagic) == 0;
1222 #endif /* CONFIG_MODVERSIONS */
1224 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1225 static const struct kernel_symbol *resolve_symbol(struct module *mod,
1226 const struct load_info *info,
1227 const char *name,
1228 char ownername[])
1230 struct module *owner;
1231 const struct kernel_symbol *sym;
1232 const unsigned long *crc;
1233 int err;
1236 * The module_mutex should not be a heavily contended lock;
1237 * if we get the occasional sleep here, we'll go an extra iteration
1238 * in the wait_event_interruptible(), which is harmless.
1240 sched_annotate_sleep();
1241 mutex_lock(&module_mutex);
1242 sym = find_symbol(name, &owner, &crc,
1243 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1244 if (!sym)
1245 goto unlock;
1247 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
1248 owner)) {
1249 sym = ERR_PTR(-EINVAL);
1250 goto getname;
1253 err = ref_module(mod, owner);
1254 if (err) {
1255 sym = ERR_PTR(err);
1256 goto getname;
1259 getname:
1260 /* We must make copy under the lock if we failed to get ref. */
1261 strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1262 unlock:
1263 mutex_unlock(&module_mutex);
1264 return sym;
1267 static const struct kernel_symbol *
1268 resolve_symbol_wait(struct module *mod,
1269 const struct load_info *info,
1270 const char *name)
1272 const struct kernel_symbol *ksym;
1273 char owner[MODULE_NAME_LEN];
1275 if (wait_event_interruptible_timeout(module_wq,
1276 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1277 || PTR_ERR(ksym) != -EBUSY,
1278 30 * HZ) <= 0) {
1279 pr_warn("%s: gave up waiting for init of module %s.\n",
1280 mod->name, owner);
1282 return ksym;
1286 * /sys/module/foo/sections stuff
1287 * J. Corbet <corbet@lwn.net>
1289 #ifdef CONFIG_SYSFS
1291 #ifdef CONFIG_KALLSYMS
1292 static inline bool sect_empty(const Elf_Shdr *sect)
1294 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1297 struct module_sect_attr {
1298 struct module_attribute mattr;
1299 char *name;
1300 unsigned long address;
1303 struct module_sect_attrs {
1304 struct attribute_group grp;
1305 unsigned int nsections;
1306 struct module_sect_attr attrs[0];
1309 static ssize_t module_sect_show(struct module_attribute *mattr,
1310 struct module_kobject *mk, char *buf)
1312 struct module_sect_attr *sattr =
1313 container_of(mattr, struct module_sect_attr, mattr);
1314 return sprintf(buf, "0x%pK\n", (void *)sattr->address);
1317 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1319 unsigned int section;
1321 for (section = 0; section < sect_attrs->nsections; section++)
1322 kfree(sect_attrs->attrs[section].name);
1323 kfree(sect_attrs);
1326 static void add_sect_attrs(struct module *mod, const struct load_info *info)
1328 unsigned int nloaded = 0, i, size[2];
1329 struct module_sect_attrs *sect_attrs;
1330 struct module_sect_attr *sattr;
1331 struct attribute **gattr;
1333 /* Count loaded sections and allocate structures */
1334 for (i = 0; i < info->hdr->e_shnum; i++)
1335 if (!sect_empty(&info->sechdrs[i]))
1336 nloaded++;
1337 size[0] = ALIGN(sizeof(*sect_attrs)
1338 + nloaded * sizeof(sect_attrs->attrs[0]),
1339 sizeof(sect_attrs->grp.attrs[0]));
1340 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1341 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1342 if (sect_attrs == NULL)
1343 return;
1345 /* Setup section attributes. */
1346 sect_attrs->grp.name = "sections";
1347 sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1349 sect_attrs->nsections = 0;
1350 sattr = &sect_attrs->attrs[0];
1351 gattr = &sect_attrs->grp.attrs[0];
1352 for (i = 0; i < info->hdr->e_shnum; i++) {
1353 Elf_Shdr *sec = &info->sechdrs[i];
1354 if (sect_empty(sec))
1355 continue;
1356 sattr->address = sec->sh_addr;
1357 sattr->name = kstrdup(info->secstrings + sec->sh_name,
1358 GFP_KERNEL);
1359 if (sattr->name == NULL)
1360 goto out;
1361 sect_attrs->nsections++;
1362 sysfs_attr_init(&sattr->mattr.attr);
1363 sattr->mattr.show = module_sect_show;
1364 sattr->mattr.store = NULL;
1365 sattr->mattr.attr.name = sattr->name;
1366 sattr->mattr.attr.mode = S_IRUGO;
1367 *(gattr++) = &(sattr++)->mattr.attr;
1369 *gattr = NULL;
1371 if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1372 goto out;
1374 mod->sect_attrs = sect_attrs;
1375 return;
1376 out:
1377 free_sect_attrs(sect_attrs);
1380 static void remove_sect_attrs(struct module *mod)
1382 if (mod->sect_attrs) {
1383 sysfs_remove_group(&mod->mkobj.kobj,
1384 &mod->sect_attrs->grp);
1385 /* We are positive that no one is using any sect attrs
1386 * at this point. Deallocate immediately. */
1387 free_sect_attrs(mod->sect_attrs);
1388 mod->sect_attrs = NULL;
1393 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1396 struct module_notes_attrs {
1397 struct kobject *dir;
1398 unsigned int notes;
1399 struct bin_attribute attrs[0];
1402 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1403 struct bin_attribute *bin_attr,
1404 char *buf, loff_t pos, size_t count)
1407 * The caller checked the pos and count against our size.
1409 memcpy(buf, bin_attr->private + pos, count);
1410 return count;
1413 static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1414 unsigned int i)
1416 if (notes_attrs->dir) {
1417 while (i-- > 0)
1418 sysfs_remove_bin_file(notes_attrs->dir,
1419 &notes_attrs->attrs[i]);
1420 kobject_put(notes_attrs->dir);
1422 kfree(notes_attrs);
1425 static void add_notes_attrs(struct module *mod, const struct load_info *info)
1427 unsigned int notes, loaded, i;
1428 struct module_notes_attrs *notes_attrs;
1429 struct bin_attribute *nattr;
1431 /* failed to create section attributes, so can't create notes */
1432 if (!mod->sect_attrs)
1433 return;
1435 /* Count notes sections and allocate structures. */
1436 notes = 0;
1437 for (i = 0; i < info->hdr->e_shnum; i++)
1438 if (!sect_empty(&info->sechdrs[i]) &&
1439 (info->sechdrs[i].sh_type == SHT_NOTE))
1440 ++notes;
1442 if (notes == 0)
1443 return;
1445 notes_attrs = kzalloc(sizeof(*notes_attrs)
1446 + notes * sizeof(notes_attrs->attrs[0]),
1447 GFP_KERNEL);
1448 if (notes_attrs == NULL)
1449 return;
1451 notes_attrs->notes = notes;
1452 nattr = &notes_attrs->attrs[0];
1453 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1454 if (sect_empty(&info->sechdrs[i]))
1455 continue;
1456 if (info->sechdrs[i].sh_type == SHT_NOTE) {
1457 sysfs_bin_attr_init(nattr);
1458 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1459 nattr->attr.mode = S_IRUGO;
1460 nattr->size = info->sechdrs[i].sh_size;
1461 nattr->private = (void *) info->sechdrs[i].sh_addr;
1462 nattr->read = module_notes_read;
1463 ++nattr;
1465 ++loaded;
1468 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1469 if (!notes_attrs->dir)
1470 goto out;
1472 for (i = 0; i < notes; ++i)
1473 if (sysfs_create_bin_file(notes_attrs->dir,
1474 &notes_attrs->attrs[i]))
1475 goto out;
1477 mod->notes_attrs = notes_attrs;
1478 return;
1480 out:
1481 free_notes_attrs(notes_attrs, i);
1484 static void remove_notes_attrs(struct module *mod)
1486 if (mod->notes_attrs)
1487 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1490 #else
1492 static inline void add_sect_attrs(struct module *mod,
1493 const struct load_info *info)
1497 static inline void remove_sect_attrs(struct module *mod)
1501 static inline void add_notes_attrs(struct module *mod,
1502 const struct load_info *info)
1506 static inline void remove_notes_attrs(struct module *mod)
1509 #endif /* CONFIG_KALLSYMS */
1511 static void add_usage_links(struct module *mod)
1513 #ifdef CONFIG_MODULE_UNLOAD
1514 struct module_use *use;
1515 int nowarn;
1517 mutex_lock(&module_mutex);
1518 list_for_each_entry(use, &mod->target_list, target_list) {
1519 nowarn = sysfs_create_link(use->target->holders_dir,
1520 &mod->mkobj.kobj, mod->name);
1522 mutex_unlock(&module_mutex);
1523 #endif
1526 static void del_usage_links(struct module *mod)
1528 #ifdef CONFIG_MODULE_UNLOAD
1529 struct module_use *use;
1531 mutex_lock(&module_mutex);
1532 list_for_each_entry(use, &mod->target_list, target_list)
1533 sysfs_remove_link(use->target->holders_dir, mod->name);
1534 mutex_unlock(&module_mutex);
1535 #endif
1538 static int module_add_modinfo_attrs(struct module *mod)
1540 struct module_attribute *attr;
1541 struct module_attribute *temp_attr;
1542 int error = 0;
1543 int i;
1545 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1546 (ARRAY_SIZE(modinfo_attrs) + 1)),
1547 GFP_KERNEL);
1548 if (!mod->modinfo_attrs)
1549 return -ENOMEM;
1551 temp_attr = mod->modinfo_attrs;
1552 for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1553 if (!attr->test ||
1554 (attr->test && attr->test(mod))) {
1555 memcpy(temp_attr, attr, sizeof(*temp_attr));
1556 sysfs_attr_init(&temp_attr->attr);
1557 error = sysfs_create_file(&mod->mkobj.kobj,
1558 &temp_attr->attr);
1559 ++temp_attr;
1562 return error;
1565 static void module_remove_modinfo_attrs(struct module *mod)
1567 struct module_attribute *attr;
1568 int i;
1570 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1571 /* pick a field to test for end of list */
1572 if (!attr->attr.name)
1573 break;
1574 sysfs_remove_file(&mod->mkobj.kobj, &attr->attr);
1575 if (attr->free)
1576 attr->free(mod);
1578 kfree(mod->modinfo_attrs);
1581 static void mod_kobject_put(struct module *mod)
1583 DECLARE_COMPLETION_ONSTACK(c);
1584 mod->mkobj.kobj_completion = &c;
1585 kobject_put(&mod->mkobj.kobj);
1586 wait_for_completion(&c);
1589 static int mod_sysfs_init(struct module *mod)
1591 int err;
1592 struct kobject *kobj;
1594 if (!module_sysfs_initialized) {
1595 pr_err("%s: module sysfs not initialized\n", mod->name);
1596 err = -EINVAL;
1597 goto out;
1600 kobj = kset_find_obj(module_kset, mod->name);
1601 if (kobj) {
1602 pr_err("%s: module is already loaded\n", mod->name);
1603 kobject_put(kobj);
1604 err = -EINVAL;
1605 goto out;
1608 mod->mkobj.mod = mod;
1610 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1611 mod->mkobj.kobj.kset = module_kset;
1612 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1613 "%s", mod->name);
1614 if (err)
1615 mod_kobject_put(mod);
1617 /* delay uevent until full sysfs population */
1618 out:
1619 return err;
1622 static int mod_sysfs_setup(struct module *mod,
1623 const struct load_info *info,
1624 struct kernel_param *kparam,
1625 unsigned int num_params)
1627 int err;
1629 err = mod_sysfs_init(mod);
1630 if (err)
1631 goto out;
1633 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1634 if (!mod->holders_dir) {
1635 err = -ENOMEM;
1636 goto out_unreg;
1639 err = module_param_sysfs_setup(mod, kparam, num_params);
1640 if (err)
1641 goto out_unreg_holders;
1643 err = module_add_modinfo_attrs(mod);
1644 if (err)
1645 goto out_unreg_param;
1647 add_usage_links(mod);
1648 add_sect_attrs(mod, info);
1649 add_notes_attrs(mod, info);
1651 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1652 return 0;
1654 out_unreg_param:
1655 module_param_sysfs_remove(mod);
1656 out_unreg_holders:
1657 kobject_put(mod->holders_dir);
1658 out_unreg:
1659 mod_kobject_put(mod);
1660 out:
1661 return err;
1664 static void mod_sysfs_fini(struct module *mod)
1666 remove_notes_attrs(mod);
1667 remove_sect_attrs(mod);
1668 mod_kobject_put(mod);
1671 #else /* !CONFIG_SYSFS */
1673 static int mod_sysfs_setup(struct module *mod,
1674 const struct load_info *info,
1675 struct kernel_param *kparam,
1676 unsigned int num_params)
1678 return 0;
1681 static void mod_sysfs_fini(struct module *mod)
1685 static void module_remove_modinfo_attrs(struct module *mod)
1689 static void del_usage_links(struct module *mod)
1693 #endif /* CONFIG_SYSFS */
1695 static void mod_sysfs_teardown(struct module *mod)
1697 del_usage_links(mod);
1698 module_remove_modinfo_attrs(mod);
1699 module_param_sysfs_remove(mod);
1700 kobject_put(mod->mkobj.drivers_dir);
1701 kobject_put(mod->holders_dir);
1702 mod_sysfs_fini(mod);
1705 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
1707 * LKM RO/NX protection: protect module's text/ro-data
1708 * from modification and any data from execution.
1710 void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages))
1712 unsigned long begin_pfn = PFN_DOWN((unsigned long)start);
1713 unsigned long end_pfn = PFN_DOWN((unsigned long)end);
1715 if (end_pfn > begin_pfn)
1716 set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1719 static void set_section_ro_nx(void *base,
1720 unsigned long text_size,
1721 unsigned long ro_size,
1722 unsigned long total_size)
1724 /* begin and end PFNs of the current subsection */
1725 unsigned long begin_pfn;
1726 unsigned long end_pfn;
1729 * Set RO for module text and RO-data:
1730 * - Always protect first page.
1731 * - Do not protect last partial page.
1733 if (ro_size > 0)
1734 set_page_attributes(base, base + ro_size, set_memory_ro);
1737 * Set NX permissions for module data:
1738 * - Do not protect first partial page.
1739 * - Always protect last page.
1741 if (total_size > text_size) {
1742 begin_pfn = PFN_UP((unsigned long)base + text_size);
1743 end_pfn = PFN_UP((unsigned long)base + total_size);
1744 if (end_pfn > begin_pfn)
1745 set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1749 static void unset_module_core_ro_nx(struct module *mod)
1751 set_page_attributes(mod->module_core + mod->core_text_size,
1752 mod->module_core + mod->core_size,
1753 set_memory_x);
1754 set_page_attributes(mod->module_core,
1755 mod->module_core + mod->core_ro_size,
1756 set_memory_rw);
1759 static void unset_module_init_ro_nx(struct module *mod)
1761 set_page_attributes(mod->module_init + mod->init_text_size,
1762 mod->module_init + mod->init_size,
1763 set_memory_x);
1764 set_page_attributes(mod->module_init,
1765 mod->module_init + mod->init_ro_size,
1766 set_memory_rw);
1769 /* Iterate through all modules and set each module's text as RW */
1770 void set_all_modules_text_rw(void)
1772 struct module *mod;
1774 mutex_lock(&module_mutex);
1775 list_for_each_entry_rcu(mod, &modules, list) {
1776 if (mod->state == MODULE_STATE_UNFORMED)
1777 continue;
1778 if ((mod->module_core) && (mod->core_text_size)) {
1779 set_page_attributes(mod->module_core,
1780 mod->module_core + mod->core_text_size,
1781 set_memory_rw);
1783 if ((mod->module_init) && (mod->init_text_size)) {
1784 set_page_attributes(mod->module_init,
1785 mod->module_init + mod->init_text_size,
1786 set_memory_rw);
1789 mutex_unlock(&module_mutex);
1792 /* Iterate through all modules and set each module's text as RO */
1793 void set_all_modules_text_ro(void)
1795 struct module *mod;
1797 mutex_lock(&module_mutex);
1798 list_for_each_entry_rcu(mod, &modules, list) {
1799 if (mod->state == MODULE_STATE_UNFORMED)
1800 continue;
1801 if ((mod->module_core) && (mod->core_text_size)) {
1802 set_page_attributes(mod->module_core,
1803 mod->module_core + mod->core_text_size,
1804 set_memory_ro);
1806 if ((mod->module_init) && (mod->init_text_size)) {
1807 set_page_attributes(mod->module_init,
1808 mod->module_init + mod->init_text_size,
1809 set_memory_ro);
1812 mutex_unlock(&module_mutex);
1814 #else
1815 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
1816 static void unset_module_core_ro_nx(struct module *mod) { }
1817 static void unset_module_init_ro_nx(struct module *mod) { }
1818 #endif
1820 void __weak module_memfree(void *module_region)
1822 vfree(module_region);
1825 void __weak module_arch_cleanup(struct module *mod)
1829 void __weak module_arch_freeing_init(struct module *mod)
1833 /* Free a module, remove from lists, etc. */
1834 static void free_module(struct module *mod)
1836 trace_module_free(mod);
1838 mod_sysfs_teardown(mod);
1840 /* We leave it in list to prevent duplicate loads, but make sure
1841 * that noone uses it while it's being deconstructed. */
1842 mutex_lock(&module_mutex);
1843 mod->state = MODULE_STATE_UNFORMED;
1844 mutex_unlock(&module_mutex);
1846 /* Remove dynamic debug info */
1847 ddebug_remove_module(mod->name);
1849 /* Arch-specific cleanup. */
1850 module_arch_cleanup(mod);
1852 /* Module unload stuff */
1853 module_unload_free(mod);
1855 /* Free any allocated parameters. */
1856 destroy_params(mod->kp, mod->num_kp);
1858 /* Now we can delete it from the lists */
1859 mutex_lock(&module_mutex);
1860 /* Unlink carefully: kallsyms could be walking list. */
1861 list_del_rcu(&mod->list);
1862 /* Remove this module from bug list, this uses list_del_rcu */
1863 module_bug_cleanup(mod);
1864 /* Wait for RCU synchronizing before releasing mod->list and buglist. */
1865 synchronize_rcu();
1866 mutex_unlock(&module_mutex);
1868 /* This may be NULL, but that's OK */
1869 unset_module_init_ro_nx(mod);
1870 module_arch_freeing_init(mod);
1871 module_memfree(mod->module_init);
1872 kfree(mod->args);
1873 percpu_modfree(mod);
1875 /* Free lock-classes; relies on the preceding sync_rcu(). */
1876 lockdep_free_key_range(mod->module_core, mod->core_size);
1878 /* Finally, free the core (containing the module structure) */
1879 unset_module_core_ro_nx(mod);
1880 module_memfree(mod->module_core);
1882 #ifdef CONFIG_MPU
1883 update_protections(current->mm);
1884 #endif
1887 void *__symbol_get(const char *symbol)
1889 struct module *owner;
1890 const struct kernel_symbol *sym;
1892 preempt_disable();
1893 sym = find_symbol(symbol, &owner, NULL, true, true);
1894 if (sym && strong_try_module_get(owner))
1895 sym = NULL;
1896 preempt_enable();
1898 return sym ? (void *)sym->value : NULL;
1900 EXPORT_SYMBOL_GPL(__symbol_get);
1903 * Ensure that an exported symbol [global namespace] does not already exist
1904 * in the kernel or in some other module's exported symbol table.
1906 * You must hold the module_mutex.
1908 static int verify_export_symbols(struct module *mod)
1910 unsigned int i;
1911 struct module *owner;
1912 const struct kernel_symbol *s;
1913 struct {
1914 const struct kernel_symbol *sym;
1915 unsigned int num;
1916 } arr[] = {
1917 { mod->syms, mod->num_syms },
1918 { mod->gpl_syms, mod->num_gpl_syms },
1919 { mod->gpl_future_syms, mod->num_gpl_future_syms },
1920 #ifdef CONFIG_UNUSED_SYMBOLS
1921 { mod->unused_syms, mod->num_unused_syms },
1922 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
1923 #endif
1926 for (i = 0; i < ARRAY_SIZE(arr); i++) {
1927 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
1928 if (find_symbol(s->name, &owner, NULL, true, false)) {
1929 pr_err("%s: exports duplicate symbol %s"
1930 " (owned by %s)\n",
1931 mod->name, s->name, module_name(owner));
1932 return -ENOEXEC;
1936 return 0;
1939 /* Change all symbols so that st_value encodes the pointer directly. */
1940 static int simplify_symbols(struct module *mod, const struct load_info *info)
1942 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
1943 Elf_Sym *sym = (void *)symsec->sh_addr;
1944 unsigned long secbase;
1945 unsigned int i;
1946 int ret = 0;
1947 const struct kernel_symbol *ksym;
1949 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
1950 const char *name = info->strtab + sym[i].st_name;
1952 switch (sym[i].st_shndx) {
1953 case SHN_COMMON:
1954 /* Ignore common symbols */
1955 if (!strncmp(name, "__gnu_lto", 9))
1956 break;
1958 /* We compiled with -fno-common. These are not
1959 supposed to happen. */
1960 pr_debug("Common symbol: %s\n", name);
1961 pr_warn("%s: please compile with -fno-common\n",
1962 mod->name);
1963 ret = -ENOEXEC;
1964 break;
1966 case SHN_ABS:
1967 /* Don't need to do anything */
1968 pr_debug("Absolute symbol: 0x%08lx\n",
1969 (long)sym[i].st_value);
1970 break;
1972 case SHN_UNDEF:
1973 ksym = resolve_symbol_wait(mod, info, name);
1974 /* Ok if resolved. */
1975 if (ksym && !IS_ERR(ksym)) {
1976 sym[i].st_value = ksym->value;
1977 break;
1980 /* Ok if weak. */
1981 if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
1982 break;
1984 pr_warn("%s: Unknown symbol %s (err %li)\n",
1985 mod->name, name, PTR_ERR(ksym));
1986 ret = PTR_ERR(ksym) ?: -ENOENT;
1987 break;
1989 default:
1990 /* Divert to percpu allocation if a percpu var. */
1991 if (sym[i].st_shndx == info->index.pcpu)
1992 secbase = (unsigned long)mod_percpu(mod);
1993 else
1994 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
1995 sym[i].st_value += secbase;
1996 break;
2000 return ret;
2003 static int apply_relocations(struct module *mod, const struct load_info *info)
2005 unsigned int i;
2006 int err = 0;
2008 /* Now do relocations. */
2009 for (i = 1; i < info->hdr->e_shnum; i++) {
2010 unsigned int infosec = info->sechdrs[i].sh_info;
2012 /* Not a valid relocation section? */
2013 if (infosec >= info->hdr->e_shnum)
2014 continue;
2016 /* Don't bother with non-allocated sections */
2017 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2018 continue;
2020 if (info->sechdrs[i].sh_type == SHT_REL)
2021 err = apply_relocate(info->sechdrs, info->strtab,
2022 info->index.sym, i, mod);
2023 else if (info->sechdrs[i].sh_type == SHT_RELA)
2024 err = apply_relocate_add(info->sechdrs, info->strtab,
2025 info->index.sym, i, mod);
2026 if (err < 0)
2027 break;
2029 return err;
2032 /* Additional bytes needed by arch in front of individual sections */
2033 unsigned int __weak arch_mod_section_prepend(struct module *mod,
2034 unsigned int section)
2036 /* default implementation just returns zero */
2037 return 0;
2040 /* Update size with this section: return offset. */
2041 static long get_offset(struct module *mod, unsigned int *size,
2042 Elf_Shdr *sechdr, unsigned int section)
2044 long ret;
2046 *size += arch_mod_section_prepend(mod, section);
2047 ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2048 *size = ret + sechdr->sh_size;
2049 return ret;
2052 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2053 might -- code, read-only data, read-write data, small data. Tally
2054 sizes, and place the offsets into sh_entsize fields: high bit means it
2055 belongs in init. */
2056 static void layout_sections(struct module *mod, struct load_info *info)
2058 static unsigned long const masks[][2] = {
2059 /* NOTE: all executable code must be the first section
2060 * in this array; otherwise modify the text_size
2061 * finder in the two loops below */
2062 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2063 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2064 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2065 { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2067 unsigned int m, i;
2069 for (i = 0; i < info->hdr->e_shnum; i++)
2070 info->sechdrs[i].sh_entsize = ~0UL;
2072 pr_debug("Core section allocation order:\n");
2073 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2074 for (i = 0; i < info->hdr->e_shnum; ++i) {
2075 Elf_Shdr *s = &info->sechdrs[i];
2076 const char *sname = info->secstrings + s->sh_name;
2078 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2079 || (s->sh_flags & masks[m][1])
2080 || s->sh_entsize != ~0UL
2081 || strstarts(sname, ".init"))
2082 continue;
2083 s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
2084 pr_debug("\t%s\n", sname);
2086 switch (m) {
2087 case 0: /* executable */
2088 mod->core_size = debug_align(mod->core_size);
2089 mod->core_text_size = mod->core_size;
2090 break;
2091 case 1: /* RO: text and ro-data */
2092 mod->core_size = debug_align(mod->core_size);
2093 mod->core_ro_size = mod->core_size;
2094 break;
2095 case 3: /* whole core */
2096 mod->core_size = debug_align(mod->core_size);
2097 break;
2101 pr_debug("Init section allocation order:\n");
2102 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2103 for (i = 0; i < info->hdr->e_shnum; ++i) {
2104 Elf_Shdr *s = &info->sechdrs[i];
2105 const char *sname = info->secstrings + s->sh_name;
2107 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2108 || (s->sh_flags & masks[m][1])
2109 || s->sh_entsize != ~0UL
2110 || !strstarts(sname, ".init"))
2111 continue;
2112 s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
2113 | INIT_OFFSET_MASK);
2114 pr_debug("\t%s\n", sname);
2116 switch (m) {
2117 case 0: /* executable */
2118 mod->init_size = debug_align(mod->init_size);
2119 mod->init_text_size = mod->init_size;
2120 break;
2121 case 1: /* RO: text and ro-data */
2122 mod->init_size = debug_align(mod->init_size);
2123 mod->init_ro_size = mod->init_size;
2124 break;
2125 case 3: /* whole init */
2126 mod->init_size = debug_align(mod->init_size);
2127 break;
2132 static void set_license(struct module *mod, const char *license)
2134 if (!license)
2135 license = "unspecified";
2137 if (!license_is_gpl_compatible(license)) {
2138 if (!test_taint(TAINT_PROPRIETARY_MODULE))
2139 pr_warn("%s: module license '%s' taints kernel.\n",
2140 mod->name, license);
2141 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2142 LOCKDEP_NOW_UNRELIABLE);
2146 /* Parse tag=value strings from .modinfo section */
2147 static char *next_string(char *string, unsigned long *secsize)
2149 /* Skip non-zero chars */
2150 while (string[0]) {
2151 string++;
2152 if ((*secsize)-- <= 1)
2153 return NULL;
2156 /* Skip any zero padding. */
2157 while (!string[0]) {
2158 string++;
2159 if ((*secsize)-- <= 1)
2160 return NULL;
2162 return string;
2165 static char *get_modinfo(struct load_info *info, const char *tag)
2167 char *p;
2168 unsigned int taglen = strlen(tag);
2169 Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2170 unsigned long size = infosec->sh_size;
2172 for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
2173 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2174 return p + taglen + 1;
2176 return NULL;
2179 static void setup_modinfo(struct module *mod, struct load_info *info)
2181 struct module_attribute *attr;
2182 int i;
2184 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2185 if (attr->setup)
2186 attr->setup(mod, get_modinfo(info, attr->attr.name));
2190 static void free_modinfo(struct module *mod)
2192 struct module_attribute *attr;
2193 int i;
2195 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2196 if (attr->free)
2197 attr->free(mod);
2201 #ifdef CONFIG_KALLSYMS
2203 /* lookup symbol in given range of kernel_symbols */
2204 static const struct kernel_symbol *lookup_symbol(const char *name,
2205 const struct kernel_symbol *start,
2206 const struct kernel_symbol *stop)
2208 return bsearch(name, start, stop - start,
2209 sizeof(struct kernel_symbol), cmp_name);
2212 static int is_exported(const char *name, unsigned long value,
2213 const struct module *mod)
2215 const struct kernel_symbol *ks;
2216 if (!mod)
2217 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2218 else
2219 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2220 return ks != NULL && ks->value == value;
2223 /* As per nm */
2224 static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2226 const Elf_Shdr *sechdrs = info->sechdrs;
2228 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2229 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2230 return 'v';
2231 else
2232 return 'w';
2234 if (sym->st_shndx == SHN_UNDEF)
2235 return 'U';
2236 if (sym->st_shndx == SHN_ABS)
2237 return 'a';
2238 if (sym->st_shndx >= SHN_LORESERVE)
2239 return '?';
2240 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2241 return 't';
2242 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2243 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2244 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2245 return 'r';
2246 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2247 return 'g';
2248 else
2249 return 'd';
2251 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2252 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2253 return 's';
2254 else
2255 return 'b';
2257 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2258 ".debug")) {
2259 return 'n';
2261 return '?';
2264 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2265 unsigned int shnum)
2267 const Elf_Shdr *sec;
2269 if (src->st_shndx == SHN_UNDEF
2270 || src->st_shndx >= shnum
2271 || !src->st_name)
2272 return false;
2274 sec = sechdrs + src->st_shndx;
2275 if (!(sec->sh_flags & SHF_ALLOC)
2276 #ifndef CONFIG_KALLSYMS_ALL
2277 || !(sec->sh_flags & SHF_EXECINSTR)
2278 #endif
2279 || (sec->sh_entsize & INIT_OFFSET_MASK))
2280 return false;
2282 return true;
2286 * We only allocate and copy the strings needed by the parts of symtab
2287 * we keep. This is simple, but has the effect of making multiple
2288 * copies of duplicates. We could be more sophisticated, see
2289 * linux-kernel thread starting with
2290 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2292 static void layout_symtab(struct module *mod, struct load_info *info)
2294 Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2295 Elf_Shdr *strsect = info->sechdrs + info->index.str;
2296 const Elf_Sym *src;
2297 unsigned int i, nsrc, ndst, strtab_size = 0;
2299 /* Put symbol section at end of init part of module. */
2300 symsect->sh_flags |= SHF_ALLOC;
2301 symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
2302 info->index.sym) | INIT_OFFSET_MASK;
2303 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2305 src = (void *)info->hdr + symsect->sh_offset;
2306 nsrc = symsect->sh_size / sizeof(*src);
2308 /* Compute total space required for the core symbols' strtab. */
2309 for (ndst = i = 0; i < nsrc; i++) {
2310 if (i == 0 ||
2311 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2312 strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2313 ndst++;
2317 /* Append room for core symbols at end of core part. */
2318 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
2319 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
2320 mod->core_size += strtab_size;
2321 mod->core_size = debug_align(mod->core_size);
2323 /* Put string table section at end of init part of module. */
2324 strsect->sh_flags |= SHF_ALLOC;
2325 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
2326 info->index.str) | INIT_OFFSET_MASK;
2327 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2329 /* We'll tack temporary mod_kallsyms on the end. */
2330 mod->init_size = ALIGN(mod->init_size,
2331 __alignof__(struct mod_kallsyms));
2332 info->mod_kallsyms_init_off = mod->init_size;
2333 mod->init_size += sizeof(struct mod_kallsyms);
2334 mod->init_size = debug_align(mod->init_size);
2338 * We use the full symtab and strtab which layout_symtab arranged to
2339 * be appended to the init section. Later we switch to the cut-down
2340 * core-only ones.
2342 static void add_kallsyms(struct module *mod, const struct load_info *info)
2344 unsigned int i, ndst;
2345 const Elf_Sym *src;
2346 Elf_Sym *dst;
2347 char *s;
2348 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2350 /* Set up to point into init section. */
2351 mod->kallsyms = mod->module_init + info->mod_kallsyms_init_off;
2353 mod->kallsyms->symtab = (void *)symsec->sh_addr;
2354 mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2355 /* Make sure we get permanent strtab: don't use info->strtab. */
2356 mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2358 /* Set types up while we still have access to sections. */
2359 for (i = 0; i < mod->kallsyms->num_symtab; i++)
2360 mod->kallsyms->symtab[i].st_info
2361 = elf_type(&mod->kallsyms->symtab[i], info);
2363 /* Now populate the cut down core kallsyms for after init. */
2364 mod->core_kallsyms.symtab = dst = mod->module_core + info->symoffs;
2365 mod->core_kallsyms.strtab = s = mod->module_core + info->stroffs;
2366 src = mod->kallsyms->symtab;
2367 for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
2368 if (i == 0 ||
2369 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2370 dst[ndst] = src[i];
2371 dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
2372 s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
2373 KSYM_NAME_LEN) + 1;
2376 mod->core_kallsyms.num_symtab = ndst;
2378 #else
2379 static inline void layout_symtab(struct module *mod, struct load_info *info)
2383 static void add_kallsyms(struct module *mod, const struct load_info *info)
2386 #endif /* CONFIG_KALLSYMS */
2388 static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2390 if (!debug)
2391 return;
2392 #ifdef CONFIG_DYNAMIC_DEBUG
2393 if (ddebug_add_module(debug, num, debug->modname))
2394 pr_err("dynamic debug error adding module: %s\n",
2395 debug->modname);
2396 #endif
2399 static void dynamic_debug_remove(struct _ddebug *debug)
2401 if (debug)
2402 ddebug_remove_module(debug->modname);
2405 void * __weak module_alloc(unsigned long size)
2407 return vmalloc_exec(size);
2410 static void *module_alloc_update_bounds(unsigned long size)
2412 void *ret = module_alloc(size);
2414 if (ret) {
2415 mutex_lock(&module_mutex);
2416 /* Update module bounds. */
2417 if ((unsigned long)ret < module_addr_min)
2418 module_addr_min = (unsigned long)ret;
2419 if ((unsigned long)ret + size > module_addr_max)
2420 module_addr_max = (unsigned long)ret + size;
2421 mutex_unlock(&module_mutex);
2423 return ret;
2426 #ifdef CONFIG_DEBUG_KMEMLEAK
2427 static void kmemleak_load_module(const struct module *mod,
2428 const struct load_info *info)
2430 unsigned int i;
2432 /* only scan the sections containing data */
2433 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2435 for (i = 1; i < info->hdr->e_shnum; i++) {
2436 /* Scan all writable sections that's not executable */
2437 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2438 !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2439 (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2440 continue;
2442 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2443 info->sechdrs[i].sh_size, GFP_KERNEL);
2446 #else
2447 static inline void kmemleak_load_module(const struct module *mod,
2448 const struct load_info *info)
2451 #endif
2453 #ifdef CONFIG_MODULE_SIG
2454 static int module_sig_check(struct load_info *info, int flags)
2456 int err = -ENOKEY;
2457 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2458 const void *mod = info->hdr;
2461 * Require flags == 0, as a module with version information
2462 * removed is no longer the module that was signed
2464 if (flags == 0 &&
2465 info->len > markerlen &&
2466 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2467 /* We truncate the module to discard the signature */
2468 info->len -= markerlen;
2469 err = mod_verify_sig(mod, &info->len);
2472 if (!err) {
2473 info->sig_ok = true;
2474 return 0;
2477 /* Not having a signature is only an error if we're strict. */
2478 if (err == -ENOKEY && !sig_enforce)
2479 err = 0;
2481 return err;
2483 #else /* !CONFIG_MODULE_SIG */
2484 static int module_sig_check(struct load_info *info, int flags)
2486 return 0;
2488 #endif /* !CONFIG_MODULE_SIG */
2490 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2491 static int elf_header_check(struct load_info *info)
2493 if (info->len < sizeof(*(info->hdr)))
2494 return -ENOEXEC;
2496 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2497 || info->hdr->e_type != ET_REL
2498 || !elf_check_arch(info->hdr)
2499 || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2500 return -ENOEXEC;
2502 if (info->hdr->e_shoff >= info->len
2503 || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2504 info->len - info->hdr->e_shoff))
2505 return -ENOEXEC;
2507 return 0;
2510 #define COPY_CHUNK_SIZE (16*PAGE_SIZE)
2512 static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len)
2514 do {
2515 unsigned long n = min(len, COPY_CHUNK_SIZE);
2517 if (copy_from_user(dst, usrc, n) != 0)
2518 return -EFAULT;
2519 cond_resched();
2520 dst += n;
2521 usrc += n;
2522 len -= n;
2523 } while (len);
2524 return 0;
2527 /* Sets info->hdr and info->len. */
2528 static int copy_module_from_user(const void __user *umod, unsigned long len,
2529 struct load_info *info)
2531 int err;
2533 info->len = len;
2534 if (info->len < sizeof(*(info->hdr)))
2535 return -ENOEXEC;
2537 err = security_kernel_module_from_file(NULL);
2538 if (err)
2539 return err;
2541 /* Suck in entire file: we'll want most of it. */
2542 info->hdr = __vmalloc(info->len,
2543 GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, PAGE_KERNEL);
2544 if (!info->hdr)
2545 return -ENOMEM;
2547 if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
2548 vfree(info->hdr);
2549 return -EFAULT;
2552 return 0;
2555 /* Sets info->hdr and info->len. */
2556 static int copy_module_from_fd(int fd, struct load_info *info)
2558 struct fd f = fdget(fd);
2559 int err;
2560 struct kstat stat;
2561 loff_t pos;
2562 ssize_t bytes = 0;
2564 if (!f.file)
2565 return -ENOEXEC;
2567 err = security_kernel_module_from_file(f.file);
2568 if (err)
2569 goto out;
2571 err = vfs_getattr(&f.file->f_path, &stat);
2572 if (err)
2573 goto out;
2575 if (stat.size > INT_MAX) {
2576 err = -EFBIG;
2577 goto out;
2580 /* Don't hand 0 to vmalloc, it whines. */
2581 if (stat.size == 0) {
2582 err = -EINVAL;
2583 goto out;
2586 info->hdr = vmalloc(stat.size);
2587 if (!info->hdr) {
2588 err = -ENOMEM;
2589 goto out;
2592 pos = 0;
2593 while (pos < stat.size) {
2594 bytes = kernel_read(f.file, pos, (char *)(info->hdr) + pos,
2595 stat.size - pos);
2596 if (bytes < 0) {
2597 vfree(info->hdr);
2598 err = bytes;
2599 goto out;
2601 if (bytes == 0)
2602 break;
2603 pos += bytes;
2605 info->len = pos;
2607 out:
2608 fdput(f);
2609 return err;
2612 static void free_copy(struct load_info *info)
2614 vfree(info->hdr);
2617 static int rewrite_section_headers(struct load_info *info, int flags)
2619 unsigned int i;
2621 /* This should always be true, but let's be sure. */
2622 info->sechdrs[0].sh_addr = 0;
2624 for (i = 1; i < info->hdr->e_shnum; i++) {
2625 Elf_Shdr *shdr = &info->sechdrs[i];
2626 if (shdr->sh_type != SHT_NOBITS
2627 && info->len < shdr->sh_offset + shdr->sh_size) {
2628 pr_err("Module len %lu truncated\n", info->len);
2629 return -ENOEXEC;
2632 /* Mark all sections sh_addr with their address in the
2633 temporary image. */
2634 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2636 #ifndef CONFIG_MODULE_UNLOAD
2637 /* Don't load .exit sections */
2638 if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2639 shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2640 #endif
2643 /* Track but don't keep modinfo and version sections. */
2644 if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
2645 info->index.vers = 0; /* Pretend no __versions section! */
2646 else
2647 info->index.vers = find_sec(info, "__versions");
2648 info->index.info = find_sec(info, ".modinfo");
2649 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2650 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2651 return 0;
2655 * Set up our basic convenience variables (pointers to section headers,
2656 * search for module section index etc), and do some basic section
2657 * verification.
2659 * Return the temporary module pointer (we'll replace it with the final
2660 * one when we move the module sections around).
2662 static struct module *setup_load_info(struct load_info *info, int flags)
2664 unsigned int i;
2665 int err;
2666 struct module *mod;
2668 /* Set up the convenience variables */
2669 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2670 info->secstrings = (void *)info->hdr
2671 + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2673 err = rewrite_section_headers(info, flags);
2674 if (err)
2675 return ERR_PTR(err);
2677 /* Find internal symbols and strings. */
2678 for (i = 1; i < info->hdr->e_shnum; i++) {
2679 if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2680 info->index.sym = i;
2681 info->index.str = info->sechdrs[i].sh_link;
2682 info->strtab = (char *)info->hdr
2683 + info->sechdrs[info->index.str].sh_offset;
2684 break;
2688 info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2689 if (!info->index.mod) {
2690 pr_warn("No module found in object\n");
2691 return ERR_PTR(-ENOEXEC);
2693 /* This is temporary: point mod into copy of data. */
2694 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2696 if (info->index.sym == 0) {
2697 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
2698 return ERR_PTR(-ENOEXEC);
2701 info->index.pcpu = find_pcpusec(info);
2703 /* Check module struct version now, before we try to use module. */
2704 if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
2705 return ERR_PTR(-ENOEXEC);
2707 return mod;
2710 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
2712 const char *modmagic = get_modinfo(info, "vermagic");
2713 int err;
2715 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
2716 modmagic = NULL;
2718 /* This is allowed: modprobe --force will invalidate it. */
2719 if (!modmagic) {
2720 err = try_to_force_load(mod, "bad vermagic");
2721 if (err)
2722 return err;
2723 } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2724 pr_err("%s: version magic '%s' should be '%s'\n",
2725 mod->name, modmagic, vermagic);
2726 return -ENOEXEC;
2729 if (!get_modinfo(info, "intree"))
2730 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
2732 if (get_modinfo(info, "staging")) {
2733 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
2734 pr_warn("%s: module is from the staging directory, the quality "
2735 "is unknown, you have been warned.\n", mod->name);
2738 /* Set up license info based on the info section */
2739 set_license(mod, get_modinfo(info, "license"));
2741 return 0;
2744 static int find_module_sections(struct module *mod, struct load_info *info)
2746 mod->kp = section_objs(info, "__param",
2747 sizeof(*mod->kp), &mod->num_kp);
2748 mod->syms = section_objs(info, "__ksymtab",
2749 sizeof(*mod->syms), &mod->num_syms);
2750 mod->crcs = section_addr(info, "__kcrctab");
2751 mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
2752 sizeof(*mod->gpl_syms),
2753 &mod->num_gpl_syms);
2754 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
2755 mod->gpl_future_syms = section_objs(info,
2756 "__ksymtab_gpl_future",
2757 sizeof(*mod->gpl_future_syms),
2758 &mod->num_gpl_future_syms);
2759 mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
2761 #ifdef CONFIG_UNUSED_SYMBOLS
2762 mod->unused_syms = section_objs(info, "__ksymtab_unused",
2763 sizeof(*mod->unused_syms),
2764 &mod->num_unused_syms);
2765 mod->unused_crcs = section_addr(info, "__kcrctab_unused");
2766 mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
2767 sizeof(*mod->unused_gpl_syms),
2768 &mod->num_unused_gpl_syms);
2769 mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
2770 #endif
2771 #ifdef CONFIG_CONSTRUCTORS
2772 mod->ctors = section_objs(info, ".ctors",
2773 sizeof(*mod->ctors), &mod->num_ctors);
2774 if (!mod->ctors)
2775 mod->ctors = section_objs(info, ".init_array",
2776 sizeof(*mod->ctors), &mod->num_ctors);
2777 else if (find_sec(info, ".init_array")) {
2779 * This shouldn't happen with same compiler and binutils
2780 * building all parts of the module.
2782 pr_warn("%s: has both .ctors and .init_array.\n",
2783 mod->name);
2784 return -EINVAL;
2786 #endif
2788 #ifdef CONFIG_TRACEPOINTS
2789 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
2790 sizeof(*mod->tracepoints_ptrs),
2791 &mod->num_tracepoints);
2792 #endif
2793 #ifdef HAVE_JUMP_LABEL
2794 mod->jump_entries = section_objs(info, "__jump_table",
2795 sizeof(*mod->jump_entries),
2796 &mod->num_jump_entries);
2797 #endif
2798 #ifdef CONFIG_EVENT_TRACING
2799 mod->trace_events = section_objs(info, "_ftrace_events",
2800 sizeof(*mod->trace_events),
2801 &mod->num_trace_events);
2802 mod->trace_enums = section_objs(info, "_ftrace_enum_map",
2803 sizeof(*mod->trace_enums),
2804 &mod->num_trace_enums);
2805 #endif
2806 #ifdef CONFIG_TRACING
2807 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
2808 sizeof(*mod->trace_bprintk_fmt_start),
2809 &mod->num_trace_bprintk_fmt);
2810 #endif
2811 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
2812 /* sechdrs[0].sh_size is always zero */
2813 mod->ftrace_callsites = section_objs(info, "__mcount_loc",
2814 sizeof(*mod->ftrace_callsites),
2815 &mod->num_ftrace_callsites);
2816 #endif
2818 mod->extable = section_objs(info, "__ex_table",
2819 sizeof(*mod->extable), &mod->num_exentries);
2821 if (section_addr(info, "__obsparm"))
2822 pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
2824 info->debug = section_objs(info, "__verbose",
2825 sizeof(*info->debug), &info->num_debug);
2827 return 0;
2830 static int move_module(struct module *mod, struct load_info *info)
2832 int i;
2833 void *ptr;
2835 /* Do the allocs. */
2836 ptr = module_alloc_update_bounds(mod->core_size);
2838 * The pointer to this block is stored in the module structure
2839 * which is inside the block. Just mark it as not being a
2840 * leak.
2842 kmemleak_not_leak(ptr);
2843 if (!ptr)
2844 return -ENOMEM;
2846 memset(ptr, 0, mod->core_size);
2847 mod->module_core = ptr;
2849 if (mod->init_size) {
2850 ptr = module_alloc_update_bounds(mod->init_size);
2852 * The pointer to this block is stored in the module structure
2853 * which is inside the block. This block doesn't need to be
2854 * scanned as it contains data and code that will be freed
2855 * after the module is initialized.
2857 kmemleak_ignore(ptr);
2858 if (!ptr) {
2859 module_memfree(mod->module_core);
2860 return -ENOMEM;
2862 memset(ptr, 0, mod->init_size);
2863 mod->module_init = ptr;
2864 } else
2865 mod->module_init = NULL;
2867 /* Transfer each section which specifies SHF_ALLOC */
2868 pr_debug("final section addresses:\n");
2869 for (i = 0; i < info->hdr->e_shnum; i++) {
2870 void *dest;
2871 Elf_Shdr *shdr = &info->sechdrs[i];
2873 if (!(shdr->sh_flags & SHF_ALLOC))
2874 continue;
2876 if (shdr->sh_entsize & INIT_OFFSET_MASK)
2877 dest = mod->module_init
2878 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
2879 else
2880 dest = mod->module_core + shdr->sh_entsize;
2882 if (shdr->sh_type != SHT_NOBITS)
2883 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
2884 /* Update sh_addr to point to copy in image. */
2885 shdr->sh_addr = (unsigned long)dest;
2886 pr_debug("\t0x%lx %s\n",
2887 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
2890 return 0;
2893 static int check_module_license_and_versions(struct module *mod)
2896 * ndiswrapper is under GPL by itself, but loads proprietary modules.
2897 * Don't use add_taint_module(), as it would prevent ndiswrapper from
2898 * using GPL-only symbols it needs.
2900 if (strcmp(mod->name, "ndiswrapper") == 0)
2901 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
2903 /* driverloader was caught wrongly pretending to be under GPL */
2904 if (strcmp(mod->name, "driverloader") == 0)
2905 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2906 LOCKDEP_NOW_UNRELIABLE);
2908 /* lve claims to be GPL but upstream won't provide source */
2909 if (strcmp(mod->name, "lve") == 0)
2910 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2911 LOCKDEP_NOW_UNRELIABLE);
2913 #ifdef CONFIG_MODVERSIONS
2914 if ((mod->num_syms && !mod->crcs)
2915 || (mod->num_gpl_syms && !mod->gpl_crcs)
2916 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
2917 #ifdef CONFIG_UNUSED_SYMBOLS
2918 || (mod->num_unused_syms && !mod->unused_crcs)
2919 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
2920 #endif
2922 return try_to_force_load(mod,
2923 "no versions for exported symbols");
2925 #endif
2926 return 0;
2929 static void flush_module_icache(const struct module *mod)
2931 mm_segment_t old_fs;
2933 /* flush the icache in correct context */
2934 old_fs = get_fs();
2935 set_fs(KERNEL_DS);
2938 * Flush the instruction cache, since we've played with text.
2939 * Do it before processing of module parameters, so the module
2940 * can provide parameter accessor functions of its own.
2942 if (mod->module_init)
2943 flush_icache_range((unsigned long)mod->module_init,
2944 (unsigned long)mod->module_init
2945 + mod->init_size);
2946 flush_icache_range((unsigned long)mod->module_core,
2947 (unsigned long)mod->module_core + mod->core_size);
2949 set_fs(old_fs);
2952 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
2953 Elf_Shdr *sechdrs,
2954 char *secstrings,
2955 struct module *mod)
2957 return 0;
2960 static struct module *layout_and_allocate(struct load_info *info, int flags)
2962 /* Module within temporary copy. */
2963 struct module *mod;
2964 int err;
2966 mod = setup_load_info(info, flags);
2967 if (IS_ERR(mod))
2968 return mod;
2970 err = check_modinfo(mod, info, flags);
2971 if (err)
2972 return ERR_PTR(err);
2974 /* Allow arches to frob section contents and sizes. */
2975 err = module_frob_arch_sections(info->hdr, info->sechdrs,
2976 info->secstrings, mod);
2977 if (err < 0)
2978 return ERR_PTR(err);
2980 /* We will do a special allocation for per-cpu sections later. */
2981 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
2983 /* Determine total sizes, and put offsets in sh_entsize. For now
2984 this is done generically; there doesn't appear to be any
2985 special cases for the architectures. */
2986 layout_sections(mod, info);
2987 layout_symtab(mod, info);
2989 /* Allocate and move to the final place */
2990 err = move_module(mod, info);
2991 if (err)
2992 return ERR_PTR(err);
2994 /* Module has been copied to its final place now: return it. */
2995 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2996 kmemleak_load_module(mod, info);
2997 return mod;
3000 /* mod is no longer valid after this! */
3001 static void module_deallocate(struct module *mod, struct load_info *info)
3003 percpu_modfree(mod);
3004 module_arch_freeing_init(mod);
3005 module_memfree(mod->module_init);
3006 module_memfree(mod->module_core);
3009 int __weak module_finalize(const Elf_Ehdr *hdr,
3010 const Elf_Shdr *sechdrs,
3011 struct module *me)
3013 return 0;
3016 static int post_relocation(struct module *mod, const struct load_info *info)
3018 /* Sort exception table now relocations are done. */
3019 sort_extable(mod->extable, mod->extable + mod->num_exentries);
3021 /* Copy relocated percpu area over. */
3022 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
3023 info->sechdrs[info->index.pcpu].sh_size);
3025 /* Setup kallsyms-specific fields. */
3026 add_kallsyms(mod, info);
3028 /* Arch-specific module finalizing. */
3029 return module_finalize(info->hdr, info->sechdrs, mod);
3032 /* Is this module of this name done loading? No locks held. */
3033 static bool finished_loading(const char *name)
3035 struct module *mod;
3036 bool ret;
3039 * The module_mutex should not be a heavily contended lock;
3040 * if we get the occasional sleep here, we'll go an extra iteration
3041 * in the wait_event_interruptible(), which is harmless.
3043 sched_annotate_sleep();
3044 mutex_lock(&module_mutex);
3045 mod = find_module_all(name, strlen(name), true);
3046 ret = !mod || mod->state == MODULE_STATE_LIVE
3047 || mod->state == MODULE_STATE_GOING;
3048 mutex_unlock(&module_mutex);
3050 return ret;
3053 /* Call module constructors. */
3054 static void do_mod_ctors(struct module *mod)
3056 #ifdef CONFIG_CONSTRUCTORS
3057 unsigned long i;
3059 for (i = 0; i < mod->num_ctors; i++)
3060 mod->ctors[i]();
3061 #endif
3064 /* For freeing module_init on success, in case kallsyms traversing */
3065 struct mod_initfree {
3066 struct rcu_head rcu;
3067 void *module_init;
3070 static void do_free_init(struct rcu_head *head)
3072 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
3073 module_memfree(m->module_init);
3074 kfree(m);
3078 * This is where the real work happens.
3080 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
3081 * helper command 'lx-symbols'.
3083 static noinline int do_init_module(struct module *mod)
3085 int ret = 0;
3086 struct mod_initfree *freeinit;
3088 freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
3089 if (!freeinit) {
3090 ret = -ENOMEM;
3091 goto fail;
3093 freeinit->module_init = mod->module_init;
3096 * We want to find out whether @mod uses async during init. Clear
3097 * PF_USED_ASYNC. async_schedule*() will set it.
3099 current->flags &= ~PF_USED_ASYNC;
3101 do_mod_ctors(mod);
3102 /* Start the module */
3103 if (mod->init != NULL)
3104 ret = do_one_initcall(mod->init);
3105 if (ret < 0) {
3106 goto fail_free_freeinit;
3108 if (ret > 0) {
3109 pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3110 "follow 0/-E convention\n"
3111 "%s: loading module anyway...\n",
3112 __func__, mod->name, ret, __func__);
3113 dump_stack();
3116 /* Now it's a first class citizen! */
3117 mod->state = MODULE_STATE_LIVE;
3118 blocking_notifier_call_chain(&module_notify_list,
3119 MODULE_STATE_LIVE, mod);
3122 * We need to finish all async code before the module init sequence
3123 * is done. This has potential to deadlock. For example, a newly
3124 * detected block device can trigger request_module() of the
3125 * default iosched from async probing task. Once userland helper
3126 * reaches here, async_synchronize_full() will wait on the async
3127 * task waiting on request_module() and deadlock.
3129 * This deadlock is avoided by perfomring async_synchronize_full()
3130 * iff module init queued any async jobs. This isn't a full
3131 * solution as it will deadlock the same if module loading from
3132 * async jobs nests more than once; however, due to the various
3133 * constraints, this hack seems to be the best option for now.
3134 * Please refer to the following thread for details.
3136 * http://thread.gmane.org/gmane.linux.kernel/1420814
3138 if (current->flags & PF_USED_ASYNC)
3139 async_synchronize_full();
3141 mutex_lock(&module_mutex);
3142 /* Drop initial reference. */
3143 module_put(mod);
3144 trim_init_extable(mod);
3145 #ifdef CONFIG_KALLSYMS
3146 /* Switch to core kallsyms now init is done: kallsyms may be walking! */
3147 rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
3148 #endif
3149 unset_module_init_ro_nx(mod);
3150 module_arch_freeing_init(mod);
3151 mod->module_init = NULL;
3152 mod->init_size = 0;
3153 mod->init_ro_size = 0;
3154 mod->init_text_size = 0;
3156 * We want to free module_init, but be aware that kallsyms may be
3157 * walking this with preempt disabled. In all the failure paths,
3158 * we call synchronize_rcu/synchronize_sched, but we don't want
3159 * to slow down the success path, so use actual RCU here.
3161 call_rcu(&freeinit->rcu, do_free_init);
3162 mutex_unlock(&module_mutex);
3163 wake_up_all(&module_wq);
3165 return 0;
3167 fail_free_freeinit:
3168 kfree(freeinit);
3169 fail:
3170 /* Try to protect us from buggy refcounters. */
3171 mod->state = MODULE_STATE_GOING;
3172 synchronize_sched();
3173 module_put(mod);
3174 blocking_notifier_call_chain(&module_notify_list,
3175 MODULE_STATE_GOING, mod);
3176 free_module(mod);
3177 wake_up_all(&module_wq);
3178 return ret;
3181 static int may_init_module(void)
3183 if (!capable(CAP_SYS_MODULE) || modules_disabled)
3184 return -EPERM;
3186 return 0;
3190 * We try to place it in the list now to make sure it's unique before
3191 * we dedicate too many resources. In particular, temporary percpu
3192 * memory exhaustion.
3194 static int add_unformed_module(struct module *mod)
3196 int err;
3197 struct module *old;
3199 mod->state = MODULE_STATE_UNFORMED;
3201 again:
3202 mutex_lock(&module_mutex);
3203 old = find_module_all(mod->name, strlen(mod->name), true);
3204 if (old != NULL) {
3205 if (old->state == MODULE_STATE_COMING
3206 || old->state == MODULE_STATE_UNFORMED) {
3207 /* Wait in case it fails to load. */
3208 mutex_unlock(&module_mutex);
3209 err = wait_event_interruptible(module_wq,
3210 finished_loading(mod->name));
3211 if (err)
3212 goto out_unlocked;
3213 goto again;
3215 err = -EEXIST;
3216 goto out;
3218 list_add_rcu(&mod->list, &modules);
3219 err = 0;
3221 out:
3222 mutex_unlock(&module_mutex);
3223 out_unlocked:
3224 return err;
3227 static int complete_formation(struct module *mod, struct load_info *info)
3229 int err;
3231 mutex_lock(&module_mutex);
3233 /* Find duplicate symbols (must be called under lock). */
3234 err = verify_export_symbols(mod);
3235 if (err < 0)
3236 goto out;
3238 /* This relies on module_mutex for list integrity. */
3239 module_bug_finalize(info->hdr, info->sechdrs, mod);
3241 /* Set RO and NX regions for core */
3242 set_section_ro_nx(mod->module_core,
3243 mod->core_text_size,
3244 mod->core_ro_size,
3245 mod->core_size);
3247 /* Set RO and NX regions for init */
3248 set_section_ro_nx(mod->module_init,
3249 mod->init_text_size,
3250 mod->init_ro_size,
3251 mod->init_size);
3253 /* Mark state as coming so strong_try_module_get() ignores us,
3254 * but kallsyms etc. can see us. */
3255 mod->state = MODULE_STATE_COMING;
3256 mutex_unlock(&module_mutex);
3258 blocking_notifier_call_chain(&module_notify_list,
3259 MODULE_STATE_COMING, mod);
3260 return 0;
3262 out:
3263 mutex_unlock(&module_mutex);
3264 return err;
3267 static int unknown_module_param_cb(char *param, char *val, const char *modname)
3269 /* Check for magic 'dyndbg' arg */
3270 int ret = ddebug_dyndbg_module_param_cb(param, val, modname);
3271 if (ret != 0)
3272 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
3273 return 0;
3276 /* Allocate and load the module: note that size of section 0 is always
3277 zero, and we rely on this for optional sections. */
3278 static int load_module(struct load_info *info, const char __user *uargs,
3279 int flags)
3281 struct module *mod;
3282 long err;
3283 char *after_dashes;
3285 err = module_sig_check(info, flags);
3286 if (err)
3287 goto free_copy;
3289 err = elf_header_check(info);
3290 if (err)
3291 goto free_copy;
3293 /* Figure out module layout, and allocate all the memory. */
3294 mod = layout_and_allocate(info, flags);
3295 if (IS_ERR(mod)) {
3296 err = PTR_ERR(mod);
3297 goto free_copy;
3300 /* Reserve our place in the list. */
3301 err = add_unformed_module(mod);
3302 if (err)
3303 goto free_module;
3305 #ifdef CONFIG_MODULE_SIG
3306 mod->sig_ok = info->sig_ok;
3307 if (!mod->sig_ok) {
3308 pr_notice_once("%s: module verification failed: signature "
3309 "and/or required key missing - tainting "
3310 "kernel\n", mod->name);
3311 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
3313 #endif
3315 /* To avoid stressing percpu allocator, do this once we're unique. */
3316 err = percpu_modalloc(mod, info);
3317 if (err)
3318 goto unlink_mod;
3320 /* Now module is in final location, initialize linked lists, etc. */
3321 err = module_unload_init(mod);
3322 if (err)
3323 goto unlink_mod;
3325 /* Now we've got everything in the final locations, we can
3326 * find optional sections. */
3327 err = find_module_sections(mod, info);
3328 if (err)
3329 goto free_unload;
3331 err = check_module_license_and_versions(mod);
3332 if (err)
3333 goto free_unload;
3335 /* Set up MODINFO_ATTR fields */
3336 setup_modinfo(mod, info);
3338 /* Fix up syms, so that st_value is a pointer to location. */
3339 err = simplify_symbols(mod, info);
3340 if (err < 0)
3341 goto free_modinfo;
3343 err = apply_relocations(mod, info);
3344 if (err < 0)
3345 goto free_modinfo;
3347 err = post_relocation(mod, info);
3348 if (err < 0)
3349 goto free_modinfo;
3351 flush_module_icache(mod);
3353 /* Now copy in args */
3354 mod->args = strndup_user(uargs, ~0UL >> 1);
3355 if (IS_ERR(mod->args)) {
3356 err = PTR_ERR(mod->args);
3357 goto free_arch_cleanup;
3360 dynamic_debug_setup(info->debug, info->num_debug);
3362 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3363 ftrace_module_init(mod);
3365 /* Finally it's fully formed, ready to start executing. */
3366 err = complete_formation(mod, info);
3367 if (err)
3368 goto ddebug_cleanup;
3370 /* Module is ready to execute: parsing args may do that. */
3371 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3372 -32768, 32767, unknown_module_param_cb);
3373 if (IS_ERR(after_dashes)) {
3374 err = PTR_ERR(after_dashes);
3375 goto bug_cleanup;
3376 } else if (after_dashes) {
3377 pr_warn("%s: parameters '%s' after `--' ignored\n",
3378 mod->name, after_dashes);
3381 /* Link in to syfs. */
3382 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3383 if (err < 0)
3384 goto bug_cleanup;
3386 /* Get rid of temporary copy. */
3387 free_copy(info);
3389 /* Done! */
3390 trace_module_load(mod);
3392 return do_init_module(mod);
3394 bug_cleanup:
3395 /* module_bug_cleanup needs module_mutex protection */
3396 mutex_lock(&module_mutex);
3397 module_bug_cleanup(mod);
3398 mutex_unlock(&module_mutex);
3400 blocking_notifier_call_chain(&module_notify_list,
3401 MODULE_STATE_GOING, mod);
3403 /* we can't deallocate the module until we clear memory protection */
3404 unset_module_init_ro_nx(mod);
3405 unset_module_core_ro_nx(mod);
3407 ddebug_cleanup:
3408 dynamic_debug_remove(info->debug);
3409 synchronize_sched();
3410 kfree(mod->args);
3411 free_arch_cleanup:
3412 module_arch_cleanup(mod);
3413 free_modinfo:
3414 free_modinfo(mod);
3415 free_unload:
3416 module_unload_free(mod);
3417 unlink_mod:
3418 mutex_lock(&module_mutex);
3419 /* Unlink carefully: kallsyms could be walking list. */
3420 list_del_rcu(&mod->list);
3421 wake_up_all(&module_wq);
3422 /* Wait for RCU synchronizing before releasing mod->list. */
3423 synchronize_rcu();
3424 mutex_unlock(&module_mutex);
3425 free_module:
3426 /* Free lock-classes; relies on the preceding sync_rcu() */
3427 lockdep_free_key_range(mod->module_core, mod->core_size);
3429 module_deallocate(mod, info);
3430 free_copy:
3431 free_copy(info);
3432 return err;
3435 SYSCALL_DEFINE3(init_module, void __user *, umod,
3436 unsigned long, len, const char __user *, uargs)
3438 int err;
3439 struct load_info info = { };
3441 err = may_init_module();
3442 if (err)
3443 return err;
3445 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3446 umod, len, uargs);
3448 err = copy_module_from_user(umod, len, &info);
3449 if (err)
3450 return err;
3452 return load_module(&info, uargs, 0);
3455 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
3457 int err;
3458 struct load_info info = { };
3460 err = may_init_module();
3461 if (err)
3462 return err;
3464 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
3466 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
3467 |MODULE_INIT_IGNORE_VERMAGIC))
3468 return -EINVAL;
3470 err = copy_module_from_fd(fd, &info);
3471 if (err)
3472 return err;
3474 return load_module(&info, uargs, flags);
3477 static inline int within(unsigned long addr, void *start, unsigned long size)
3479 return ((void *)addr >= start && (void *)addr < start + size);
3482 #ifdef CONFIG_KALLSYMS
3484 * This ignores the intensely annoying "mapping symbols" found
3485 * in ARM ELF files: $a, $t and $d.
3487 static inline int is_arm_mapping_symbol(const char *str)
3489 if (str[0] == '.' && str[1] == 'L')
3490 return true;
3491 return str[0] == '$' && strchr("axtd", str[1])
3492 && (str[2] == '\0' || str[2] == '.');
3495 static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
3497 return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
3500 static const char *get_ksymbol(struct module *mod,
3501 unsigned long addr,
3502 unsigned long *size,
3503 unsigned long *offset)
3505 unsigned int i, best = 0;
3506 unsigned long nextval;
3507 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
3509 /* At worse, next value is at end of module */
3510 if (within_module_init(addr, mod))
3511 nextval = (unsigned long)mod->module_init+mod->init_text_size;
3512 else
3513 nextval = (unsigned long)mod->module_core+mod->core_text_size;
3515 /* Scan for closest preceding symbol, and next symbol. (ELF
3516 starts real symbols at 1). */
3517 for (i = 1; i < kallsyms->num_symtab; i++) {
3518 if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
3519 continue;
3521 /* We ignore unnamed symbols: they're uninformative
3522 * and inserted at a whim. */
3523 if (*symname(kallsyms, i) == '\0'
3524 || is_arm_mapping_symbol(symname(kallsyms, i)))
3525 continue;
3527 if (kallsyms->symtab[i].st_value <= addr
3528 && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
3529 best = i;
3530 if (kallsyms->symtab[i].st_value > addr
3531 && kallsyms->symtab[i].st_value < nextval)
3532 nextval = kallsyms->symtab[i].st_value;
3535 if (!best)
3536 return NULL;
3538 if (size)
3539 *size = nextval - kallsyms->symtab[best].st_value;
3540 if (offset)
3541 *offset = addr - kallsyms->symtab[best].st_value;
3542 return symname(kallsyms, best);
3545 /* For kallsyms to ask for address resolution. NULL means not found. Careful
3546 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3547 const char *module_address_lookup(unsigned long addr,
3548 unsigned long *size,
3549 unsigned long *offset,
3550 char **modname,
3551 char *namebuf)
3553 struct module *mod;
3554 const char *ret = NULL;
3556 preempt_disable();
3557 list_for_each_entry_rcu(mod, &modules, list) {
3558 if (mod->state == MODULE_STATE_UNFORMED)
3559 continue;
3560 if (within_module(addr, mod)) {
3561 if (modname)
3562 *modname = mod->name;
3563 ret = get_ksymbol(mod, addr, size, offset);
3564 break;
3567 /* Make a copy in here where it's safe */
3568 if (ret) {
3569 strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
3570 ret = namebuf;
3572 preempt_enable();
3573 return ret;
3576 int lookup_module_symbol_name(unsigned long addr, char *symname)
3578 struct module *mod;
3580 preempt_disable();
3581 list_for_each_entry_rcu(mod, &modules, list) {
3582 if (mod->state == MODULE_STATE_UNFORMED)
3583 continue;
3584 if (within_module(addr, mod)) {
3585 const char *sym;
3587 sym = get_ksymbol(mod, addr, NULL, NULL);
3588 if (!sym)
3589 goto out;
3590 strlcpy(symname, sym, KSYM_NAME_LEN);
3591 preempt_enable();
3592 return 0;
3595 out:
3596 preempt_enable();
3597 return -ERANGE;
3600 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3601 unsigned long *offset, char *modname, char *name)
3603 struct module *mod;
3605 preempt_disable();
3606 list_for_each_entry_rcu(mod, &modules, list) {
3607 if (mod->state == MODULE_STATE_UNFORMED)
3608 continue;
3609 if (within_module(addr, mod)) {
3610 const char *sym;
3612 sym = get_ksymbol(mod, addr, size, offset);
3613 if (!sym)
3614 goto out;
3615 if (modname)
3616 strlcpy(modname, mod->name, MODULE_NAME_LEN);
3617 if (name)
3618 strlcpy(name, sym, KSYM_NAME_LEN);
3619 preempt_enable();
3620 return 0;
3623 out:
3624 preempt_enable();
3625 return -ERANGE;
3628 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3629 char *name, char *module_name, int *exported)
3631 struct module *mod;
3633 preempt_disable();
3634 list_for_each_entry_rcu(mod, &modules, list) {
3635 struct mod_kallsyms *kallsyms;
3637 if (mod->state == MODULE_STATE_UNFORMED)
3638 continue;
3639 kallsyms = rcu_dereference_sched(mod->kallsyms);
3640 if (symnum < kallsyms->num_symtab) {
3641 *value = kallsyms->symtab[symnum].st_value;
3642 *type = kallsyms->symtab[symnum].st_info;
3643 strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
3644 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3645 *exported = is_exported(name, *value, mod);
3646 preempt_enable();
3647 return 0;
3649 symnum -= kallsyms->num_symtab;
3651 preempt_enable();
3652 return -ERANGE;
3655 static unsigned long mod_find_symname(struct module *mod, const char *name)
3657 unsigned int i;
3658 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
3660 for (i = 0; i < kallsyms->num_symtab; i++)
3661 if (strcmp(name, symname(kallsyms, i)) == 0 &&
3662 kallsyms->symtab[i].st_info != 'U')
3663 return kallsyms->symtab[i].st_value;
3664 return 0;
3667 /* Look for this name: can be of form module:name. */
3668 unsigned long module_kallsyms_lookup_name(const char *name)
3670 struct module *mod;
3671 char *colon;
3672 unsigned long ret = 0;
3674 /* Don't lock: we're in enough trouble already. */
3675 preempt_disable();
3676 if ((colon = strchr(name, ':')) != NULL) {
3677 if ((mod = find_module_all(name, colon - name, false)) != NULL)
3678 ret = mod_find_symname(mod, colon+1);
3679 } else {
3680 list_for_each_entry_rcu(mod, &modules, list) {
3681 if (mod->state == MODULE_STATE_UNFORMED)
3682 continue;
3683 if ((ret = mod_find_symname(mod, name)) != 0)
3684 break;
3687 preempt_enable();
3688 return ret;
3691 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3692 struct module *, unsigned long),
3693 void *data)
3695 struct module *mod;
3696 unsigned int i;
3697 int ret;
3699 list_for_each_entry(mod, &modules, list) {
3700 /* We hold module_mutex: no need for rcu_dereference_sched */
3701 struct mod_kallsyms *kallsyms = mod->kallsyms;
3703 if (mod->state == MODULE_STATE_UNFORMED)
3704 continue;
3705 for (i = 0; i < kallsyms->num_symtab; i++) {
3706 ret = fn(data, symname(kallsyms, i),
3707 mod, kallsyms->symtab[i].st_value);
3708 if (ret != 0)
3709 return ret;
3712 return 0;
3714 #endif /* CONFIG_KALLSYMS */
3716 static char *module_flags(struct module *mod, char *buf)
3718 int bx = 0;
3720 BUG_ON(mod->state == MODULE_STATE_UNFORMED);
3721 if (mod->taints ||
3722 mod->state == MODULE_STATE_GOING ||
3723 mod->state == MODULE_STATE_COMING) {
3724 buf[bx++] = '(';
3725 bx += module_flags_taint(mod, buf + bx);
3726 /* Show a - for module-is-being-unloaded */
3727 if (mod->state == MODULE_STATE_GOING)
3728 buf[bx++] = '-';
3729 /* Show a + for module-is-being-loaded */
3730 if (mod->state == MODULE_STATE_COMING)
3731 buf[bx++] = '+';
3732 buf[bx++] = ')';
3734 buf[bx] = '\0';
3736 return buf;
3739 #ifdef CONFIG_PROC_FS
3740 /* Called by the /proc file system to return a list of modules. */
3741 static void *m_start(struct seq_file *m, loff_t *pos)
3743 mutex_lock(&module_mutex);
3744 return seq_list_start(&modules, *pos);
3747 static void *m_next(struct seq_file *m, void *p, loff_t *pos)
3749 return seq_list_next(p, &modules, pos);
3752 static void m_stop(struct seq_file *m, void *p)
3754 mutex_unlock(&module_mutex);
3757 static int m_show(struct seq_file *m, void *p)
3759 struct module *mod = list_entry(p, struct module, list);
3760 char buf[8];
3762 /* We always ignore unformed modules. */
3763 if (mod->state == MODULE_STATE_UNFORMED)
3764 return 0;
3766 seq_printf(m, "%s %u",
3767 mod->name, mod->init_size + mod->core_size);
3768 print_unload_info(m, mod);
3770 /* Informative for users. */
3771 seq_printf(m, " %s",
3772 mod->state == MODULE_STATE_GOING ? "Unloading" :
3773 mod->state == MODULE_STATE_COMING ? "Loading" :
3774 "Live");
3775 /* Used by oprofile and other similar tools. */
3776 seq_printf(m, " 0x%pK", mod->module_core);
3778 /* Taints info */
3779 if (mod->taints)
3780 seq_printf(m, " %s", module_flags(mod, buf));
3782 seq_puts(m, "\n");
3783 return 0;
3786 /* Format: modulename size refcount deps address
3788 Where refcount is a number or -, and deps is a comma-separated list
3789 of depends or -.
3791 static const struct seq_operations modules_op = {
3792 .start = m_start,
3793 .next = m_next,
3794 .stop = m_stop,
3795 .show = m_show
3798 static int modules_open(struct inode *inode, struct file *file)
3800 return seq_open(file, &modules_op);
3803 static const struct file_operations proc_modules_operations = {
3804 .open = modules_open,
3805 .read = seq_read,
3806 .llseek = seq_lseek,
3807 .release = seq_release,
3810 static int __init proc_modules_init(void)
3812 proc_create("modules", 0, NULL, &proc_modules_operations);
3813 return 0;
3815 module_init(proc_modules_init);
3816 #endif
3818 /* Given an address, look for it in the module exception tables. */
3819 const struct exception_table_entry *search_module_extables(unsigned long addr)
3821 const struct exception_table_entry *e = NULL;
3822 struct module *mod;
3824 preempt_disable();
3825 list_for_each_entry_rcu(mod, &modules, list) {
3826 if (mod->state == MODULE_STATE_UNFORMED)
3827 continue;
3828 if (mod->num_exentries == 0)
3829 continue;
3831 e = search_extable(mod->extable,
3832 mod->extable + mod->num_exentries - 1,
3833 addr);
3834 if (e)
3835 break;
3837 preempt_enable();
3839 /* Now, if we found one, we are running inside it now, hence
3840 we cannot unload the module, hence no refcnt needed. */
3841 return e;
3845 * is_module_address - is this address inside a module?
3846 * @addr: the address to check.
3848 * See is_module_text_address() if you simply want to see if the address
3849 * is code (not data).
3851 bool is_module_address(unsigned long addr)
3853 bool ret;
3855 preempt_disable();
3856 ret = __module_address(addr) != NULL;
3857 preempt_enable();
3859 return ret;
3863 * __module_address - get the module which contains an address.
3864 * @addr: the address.
3866 * Must be called with preempt disabled or module mutex held so that
3867 * module doesn't get freed during this.
3869 struct module *__module_address(unsigned long addr)
3871 struct module *mod;
3873 if (addr < module_addr_min || addr > module_addr_max)
3874 return NULL;
3876 list_for_each_entry_rcu(mod, &modules, list) {
3877 if (mod->state == MODULE_STATE_UNFORMED)
3878 continue;
3879 if (within_module(addr, mod))
3880 return mod;
3882 return NULL;
3884 EXPORT_SYMBOL_GPL(__module_address);
3887 * is_module_text_address - is this address inside module code?
3888 * @addr: the address to check.
3890 * See is_module_address() if you simply want to see if the address is
3891 * anywhere in a module. See kernel_text_address() for testing if an
3892 * address corresponds to kernel or module code.
3894 bool is_module_text_address(unsigned long addr)
3896 bool ret;
3898 preempt_disable();
3899 ret = __module_text_address(addr) != NULL;
3900 preempt_enable();
3902 return ret;
3906 * __module_text_address - get the module whose code contains an address.
3907 * @addr: the address.
3909 * Must be called with preempt disabled or module mutex held so that
3910 * module doesn't get freed during this.
3912 struct module *__module_text_address(unsigned long addr)
3914 struct module *mod = __module_address(addr);
3915 if (mod) {
3916 /* Make sure it's within the text section. */
3917 if (!within(addr, mod->module_init, mod->init_text_size)
3918 && !within(addr, mod->module_core, mod->core_text_size))
3919 mod = NULL;
3921 return mod;
3923 EXPORT_SYMBOL_GPL(__module_text_address);
3925 /* Don't grab lock, we're oopsing. */
3926 void print_modules(void)
3928 struct module *mod;
3929 char buf[8];
3931 printk(KERN_DEFAULT "Modules linked in:");
3932 /* Most callers should already have preempt disabled, but make sure */
3933 preempt_disable();
3934 list_for_each_entry_rcu(mod, &modules, list) {
3935 if (mod->state == MODULE_STATE_UNFORMED)
3936 continue;
3937 pr_cont(" %s%s", mod->name, module_flags(mod, buf));
3939 preempt_enable();
3940 if (last_unloaded_module[0])
3941 pr_cont(" [last unloaded: %s]", last_unloaded_module);
3942 pr_cont("\n");
3945 #ifdef CONFIG_MODVERSIONS
3946 /* Generate the signature for all relevant module structures here.
3947 * If these change, we don't want to try to parse the module. */
3948 void module_layout(struct module *mod,
3949 struct modversion_info *ver,
3950 struct kernel_param *kp,
3951 struct kernel_symbol *ks,
3952 struct tracepoint * const *tp)
3955 EXPORT_SYMBOL(module_layout);
3956 #endif