Btrfs: fix list transaction->pending_ordered corruption
[linux/fpc-iii.git] / kernel / module.c
blob1df11b175a24a329032e6f0fff740a8caba3e7d8
1 /*
2 Copyright (C) 2002 Richard Henderson
3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/export.h>
20 #include <linux/moduleloader.h>
21 #include <linux/ftrace_event.h>
22 #include <linux/init.h>
23 #include <linux/kallsyms.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/sysfs.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/elf.h>
31 #include <linux/proc_fs.h>
32 #include <linux/security.h>
33 #include <linux/seq_file.h>
34 #include <linux/syscalls.h>
35 #include <linux/fcntl.h>
36 #include <linux/rcupdate.h>
37 #include <linux/capability.h>
38 #include <linux/cpu.h>
39 #include <linux/moduleparam.h>
40 #include <linux/errno.h>
41 #include <linux/err.h>
42 #include <linux/vermagic.h>
43 #include <linux/notifier.h>
44 #include <linux/sched.h>
45 #include <linux/stop_machine.h>
46 #include <linux/device.h>
47 #include <linux/string.h>
48 #include <linux/mutex.h>
49 #include <linux/rculist.h>
50 #include <asm/uaccess.h>
51 #include <asm/cacheflush.h>
52 #include <asm/mmu_context.h>
53 #include <linux/license.h>
54 #include <asm/sections.h>
55 #include <linux/tracepoint.h>
56 #include <linux/ftrace.h>
57 #include <linux/async.h>
58 #include <linux/percpu.h>
59 #include <linux/kmemleak.h>
60 #include <linux/jump_label.h>
61 #include <linux/pfn.h>
62 #include <linux/bsearch.h>
63 #include <uapi/linux/module.h>
64 #include "module-internal.h"
66 #define CREATE_TRACE_POINTS
67 #include <trace/events/module.h>
69 #ifndef ARCH_SHF_SMALL
70 #define ARCH_SHF_SMALL 0
71 #endif
74 * Modules' sections will be aligned on page boundaries
75 * to ensure complete separation of code and data, but
76 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
78 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
79 # define debug_align(X) ALIGN(X, PAGE_SIZE)
80 #else
81 # define debug_align(X) (X)
82 #endif
85 * Given BASE and SIZE this macro calculates the number of pages the
86 * memory regions occupies
88 #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \
89 (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \
90 PFN_DOWN((unsigned long)BASE) + 1) \
91 : (0UL))
93 /* If this is set, the section belongs in the init part of the module */
94 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
97 * Mutex protects:
98 * 1) List of modules (also safely readable with preempt_disable),
99 * 2) module_use links,
100 * 3) module_addr_min/module_addr_max.
101 * (delete uses stop_machine/add uses RCU list operations). */
102 DEFINE_MUTEX(module_mutex);
103 EXPORT_SYMBOL_GPL(module_mutex);
104 static LIST_HEAD(modules);
105 #ifdef CONFIG_KGDB_KDB
106 struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
107 #endif /* CONFIG_KGDB_KDB */
109 #ifdef CONFIG_MODULE_SIG
110 #ifdef CONFIG_MODULE_SIG_FORCE
111 static bool sig_enforce = true;
112 #else
113 static bool sig_enforce = false;
115 static int param_set_bool_enable_only(const char *val,
116 const struct kernel_param *kp)
118 int err;
119 bool test;
120 struct kernel_param dummy_kp = *kp;
122 dummy_kp.arg = &test;
124 err = param_set_bool(val, &dummy_kp);
125 if (err)
126 return err;
128 /* Don't let them unset it once it's set! */
129 if (!test && sig_enforce)
130 return -EROFS;
132 if (test)
133 sig_enforce = true;
134 return 0;
137 static const struct kernel_param_ops param_ops_bool_enable_only = {
138 .flags = KERNEL_PARAM_OPS_FL_NOARG,
139 .set = param_set_bool_enable_only,
140 .get = param_get_bool,
142 #define param_check_bool_enable_only param_check_bool
144 module_param(sig_enforce, bool_enable_only, 0644);
145 #endif /* !CONFIG_MODULE_SIG_FORCE */
146 #endif /* CONFIG_MODULE_SIG */
148 /* Block module loading/unloading? */
149 int modules_disabled = 0;
150 core_param(nomodule, modules_disabled, bint, 0);
152 /* Waiting for a module to finish initializing? */
153 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
155 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
157 /* Bounds of module allocation, for speeding __module_address.
158 * Protected by module_mutex. */
159 static unsigned long module_addr_min = -1UL, module_addr_max = 0;
161 int register_module_notifier(struct notifier_block * nb)
163 return blocking_notifier_chain_register(&module_notify_list, nb);
165 EXPORT_SYMBOL(register_module_notifier);
167 int unregister_module_notifier(struct notifier_block * nb)
169 return blocking_notifier_chain_unregister(&module_notify_list, nb);
171 EXPORT_SYMBOL(unregister_module_notifier);
173 struct load_info {
174 Elf_Ehdr *hdr;
175 unsigned long len;
176 Elf_Shdr *sechdrs;
177 char *secstrings, *strtab;
178 unsigned long symoffs, stroffs;
179 struct _ddebug *debug;
180 unsigned int num_debug;
181 bool sig_ok;
182 #ifdef CONFIG_KALLSYMS
183 unsigned long mod_kallsyms_init_off;
184 #endif
185 struct {
186 unsigned int sym, str, mod, vers, info, pcpu;
187 } index;
190 /* We require a truly strong try_module_get(): 0 means failure due to
191 ongoing or failed initialization etc. */
192 static inline int strong_try_module_get(struct module *mod)
194 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
195 if (mod && mod->state == MODULE_STATE_COMING)
196 return -EBUSY;
197 if (try_module_get(mod))
198 return 0;
199 else
200 return -ENOENT;
203 static inline void add_taint_module(struct module *mod, unsigned flag,
204 enum lockdep_ok lockdep_ok)
206 add_taint(flag, lockdep_ok);
207 mod->taints |= (1U << flag);
211 * A thread that wants to hold a reference to a module only while it
212 * is running can call this to safely exit. nfsd and lockd use this.
214 void __module_put_and_exit(struct module *mod, long code)
216 module_put(mod);
217 do_exit(code);
219 EXPORT_SYMBOL(__module_put_and_exit);
221 /* Find a module section: 0 means not found. */
222 static unsigned int find_sec(const struct load_info *info, const char *name)
224 unsigned int i;
226 for (i = 1; i < info->hdr->e_shnum; i++) {
227 Elf_Shdr *shdr = &info->sechdrs[i];
228 /* Alloc bit cleared means "ignore it." */
229 if ((shdr->sh_flags & SHF_ALLOC)
230 && strcmp(info->secstrings + shdr->sh_name, name) == 0)
231 return i;
233 return 0;
236 /* Find a module section, or NULL. */
237 static void *section_addr(const struct load_info *info, const char *name)
239 /* Section 0 has sh_addr 0. */
240 return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
243 /* Find a module section, or NULL. Fill in number of "objects" in section. */
244 static void *section_objs(const struct load_info *info,
245 const char *name,
246 size_t object_size,
247 unsigned int *num)
249 unsigned int sec = find_sec(info, name);
251 /* Section 0 has sh_addr 0 and sh_size 0. */
252 *num = info->sechdrs[sec].sh_size / object_size;
253 return (void *)info->sechdrs[sec].sh_addr;
256 /* Provided by the linker */
257 extern const struct kernel_symbol __start___ksymtab[];
258 extern const struct kernel_symbol __stop___ksymtab[];
259 extern const struct kernel_symbol __start___ksymtab_gpl[];
260 extern const struct kernel_symbol __stop___ksymtab_gpl[];
261 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
262 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
263 extern const unsigned long __start___kcrctab[];
264 extern const unsigned long __start___kcrctab_gpl[];
265 extern const unsigned long __start___kcrctab_gpl_future[];
266 #ifdef CONFIG_UNUSED_SYMBOLS
267 extern const struct kernel_symbol __start___ksymtab_unused[];
268 extern const struct kernel_symbol __stop___ksymtab_unused[];
269 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
270 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
271 extern const unsigned long __start___kcrctab_unused[];
272 extern const unsigned long __start___kcrctab_unused_gpl[];
273 #endif
275 #ifndef CONFIG_MODVERSIONS
276 #define symversion(base, idx) NULL
277 #else
278 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
279 #endif
281 static bool each_symbol_in_section(const struct symsearch *arr,
282 unsigned int arrsize,
283 struct module *owner,
284 bool (*fn)(const struct symsearch *syms,
285 struct module *owner,
286 void *data),
287 void *data)
289 unsigned int j;
291 for (j = 0; j < arrsize; j++) {
292 if (fn(&arr[j], owner, data))
293 return true;
296 return false;
299 /* Returns true as soon as fn returns true, otherwise false. */
300 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
301 struct module *owner,
302 void *data),
303 void *data)
305 struct module *mod;
306 static const struct symsearch arr[] = {
307 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
308 NOT_GPL_ONLY, false },
309 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
310 __start___kcrctab_gpl,
311 GPL_ONLY, false },
312 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
313 __start___kcrctab_gpl_future,
314 WILL_BE_GPL_ONLY, false },
315 #ifdef CONFIG_UNUSED_SYMBOLS
316 { __start___ksymtab_unused, __stop___ksymtab_unused,
317 __start___kcrctab_unused,
318 NOT_GPL_ONLY, true },
319 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
320 __start___kcrctab_unused_gpl,
321 GPL_ONLY, true },
322 #endif
325 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
326 return true;
328 list_for_each_entry_rcu(mod, &modules, list) {
329 struct symsearch arr[] = {
330 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
331 NOT_GPL_ONLY, false },
332 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
333 mod->gpl_crcs,
334 GPL_ONLY, false },
335 { mod->gpl_future_syms,
336 mod->gpl_future_syms + mod->num_gpl_future_syms,
337 mod->gpl_future_crcs,
338 WILL_BE_GPL_ONLY, false },
339 #ifdef CONFIG_UNUSED_SYMBOLS
340 { mod->unused_syms,
341 mod->unused_syms + mod->num_unused_syms,
342 mod->unused_crcs,
343 NOT_GPL_ONLY, true },
344 { mod->unused_gpl_syms,
345 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
346 mod->unused_gpl_crcs,
347 GPL_ONLY, true },
348 #endif
351 if (mod->state == MODULE_STATE_UNFORMED)
352 continue;
354 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
355 return true;
357 return false;
359 EXPORT_SYMBOL_GPL(each_symbol_section);
361 struct find_symbol_arg {
362 /* Input */
363 const char *name;
364 bool gplok;
365 bool warn;
367 /* Output */
368 struct module *owner;
369 const unsigned long *crc;
370 const struct kernel_symbol *sym;
373 static bool check_symbol(const struct symsearch *syms,
374 struct module *owner,
375 unsigned int symnum, void *data)
377 struct find_symbol_arg *fsa = data;
379 if (!fsa->gplok) {
380 if (syms->licence == GPL_ONLY)
381 return false;
382 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
383 pr_warn("Symbol %s is being used by a non-GPL module, "
384 "which will not be allowed in the future\n",
385 fsa->name);
389 #ifdef CONFIG_UNUSED_SYMBOLS
390 if (syms->unused && fsa->warn) {
391 pr_warn("Symbol %s is marked as UNUSED, however this module is "
392 "using it.\n", fsa->name);
393 pr_warn("This symbol will go away in the future.\n");
394 pr_warn("Please evalute if this is the right api to use and if "
395 "it really is, submit a report the linux kernel "
396 "mailinglist together with submitting your code for "
397 "inclusion.\n");
399 #endif
401 fsa->owner = owner;
402 fsa->crc = symversion(syms->crcs, symnum);
403 fsa->sym = &syms->start[symnum];
404 return true;
407 static int cmp_name(const void *va, const void *vb)
409 const char *a;
410 const struct kernel_symbol *b;
411 a = va; b = vb;
412 return strcmp(a, b->name);
415 static bool find_symbol_in_section(const struct symsearch *syms,
416 struct module *owner,
417 void *data)
419 struct find_symbol_arg *fsa = data;
420 struct kernel_symbol *sym;
422 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
423 sizeof(struct kernel_symbol), cmp_name);
425 if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
426 return true;
428 return false;
431 /* Find a symbol and return it, along with, (optional) crc and
432 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
433 const struct kernel_symbol *find_symbol(const char *name,
434 struct module **owner,
435 const unsigned long **crc,
436 bool gplok,
437 bool warn)
439 struct find_symbol_arg fsa;
441 fsa.name = name;
442 fsa.gplok = gplok;
443 fsa.warn = warn;
445 if (each_symbol_section(find_symbol_in_section, &fsa)) {
446 if (owner)
447 *owner = fsa.owner;
448 if (crc)
449 *crc = fsa.crc;
450 return fsa.sym;
453 pr_debug("Failed to find symbol %s\n", name);
454 return NULL;
456 EXPORT_SYMBOL_GPL(find_symbol);
458 /* Search for module by name: must hold module_mutex. */
459 static struct module *find_module_all(const char *name, size_t len,
460 bool even_unformed)
462 struct module *mod;
464 list_for_each_entry(mod, &modules, list) {
465 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
466 continue;
467 if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
468 return mod;
470 return NULL;
473 struct module *find_module(const char *name)
475 return find_module_all(name, strlen(name), false);
477 EXPORT_SYMBOL_GPL(find_module);
479 #ifdef CONFIG_SMP
481 static inline void __percpu *mod_percpu(struct module *mod)
483 return mod->percpu;
486 static int percpu_modalloc(struct module *mod, struct load_info *info)
488 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
489 unsigned long align = pcpusec->sh_addralign;
491 if (!pcpusec->sh_size)
492 return 0;
494 if (align > PAGE_SIZE) {
495 pr_warn("%s: per-cpu alignment %li > %li\n",
496 mod->name, align, PAGE_SIZE);
497 align = PAGE_SIZE;
500 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
501 if (!mod->percpu) {
502 pr_warn("%s: Could not allocate %lu bytes percpu data\n",
503 mod->name, (unsigned long)pcpusec->sh_size);
504 return -ENOMEM;
506 mod->percpu_size = pcpusec->sh_size;
507 return 0;
510 static void percpu_modfree(struct module *mod)
512 free_percpu(mod->percpu);
515 static unsigned int find_pcpusec(struct load_info *info)
517 return find_sec(info, ".data..percpu");
520 static void percpu_modcopy(struct module *mod,
521 const void *from, unsigned long size)
523 int cpu;
525 for_each_possible_cpu(cpu)
526 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
530 * is_module_percpu_address - test whether address is from module static percpu
531 * @addr: address to test
533 * Test whether @addr belongs to module static percpu area.
535 * RETURNS:
536 * %true if @addr is from module static percpu area
538 bool is_module_percpu_address(unsigned long addr)
540 struct module *mod;
541 unsigned int cpu;
543 preempt_disable();
545 list_for_each_entry_rcu(mod, &modules, list) {
546 if (mod->state == MODULE_STATE_UNFORMED)
547 continue;
548 if (!mod->percpu_size)
549 continue;
550 for_each_possible_cpu(cpu) {
551 void *start = per_cpu_ptr(mod->percpu, cpu);
553 if ((void *)addr >= start &&
554 (void *)addr < start + mod->percpu_size) {
555 preempt_enable();
556 return true;
561 preempt_enable();
562 return false;
565 #else /* ... !CONFIG_SMP */
567 static inline void __percpu *mod_percpu(struct module *mod)
569 return NULL;
571 static int percpu_modalloc(struct module *mod, struct load_info *info)
573 /* UP modules shouldn't have this section: ENOMEM isn't quite right */
574 if (info->sechdrs[info->index.pcpu].sh_size != 0)
575 return -ENOMEM;
576 return 0;
578 static inline void percpu_modfree(struct module *mod)
581 static unsigned int find_pcpusec(struct load_info *info)
583 return 0;
585 static inline void percpu_modcopy(struct module *mod,
586 const void *from, unsigned long size)
588 /* pcpusec should be 0, and size of that section should be 0. */
589 BUG_ON(size != 0);
591 bool is_module_percpu_address(unsigned long addr)
593 return false;
596 #endif /* CONFIG_SMP */
598 #define MODINFO_ATTR(field) \
599 static void setup_modinfo_##field(struct module *mod, const char *s) \
601 mod->field = kstrdup(s, GFP_KERNEL); \
603 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
604 struct module_kobject *mk, char *buffer) \
606 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
608 static int modinfo_##field##_exists(struct module *mod) \
610 return mod->field != NULL; \
612 static void free_modinfo_##field(struct module *mod) \
614 kfree(mod->field); \
615 mod->field = NULL; \
617 static struct module_attribute modinfo_##field = { \
618 .attr = { .name = __stringify(field), .mode = 0444 }, \
619 .show = show_modinfo_##field, \
620 .setup = setup_modinfo_##field, \
621 .test = modinfo_##field##_exists, \
622 .free = free_modinfo_##field, \
625 MODINFO_ATTR(version);
626 MODINFO_ATTR(srcversion);
628 static char last_unloaded_module[MODULE_NAME_LEN+1];
630 #ifdef CONFIG_MODULE_UNLOAD
632 EXPORT_TRACEPOINT_SYMBOL(module_get);
634 /* Init the unload section of the module. */
635 static int module_unload_init(struct module *mod)
637 mod->refptr = alloc_percpu(struct module_ref);
638 if (!mod->refptr)
639 return -ENOMEM;
641 INIT_LIST_HEAD(&mod->source_list);
642 INIT_LIST_HEAD(&mod->target_list);
644 /* Hold reference count during initialization. */
645 raw_cpu_write(mod->refptr->incs, 1);
647 return 0;
650 /* Does a already use b? */
651 static int already_uses(struct module *a, struct module *b)
653 struct module_use *use;
655 list_for_each_entry(use, &b->source_list, source_list) {
656 if (use->source == a) {
657 pr_debug("%s uses %s!\n", a->name, b->name);
658 return 1;
661 pr_debug("%s does not use %s!\n", a->name, b->name);
662 return 0;
666 * Module a uses b
667 * - we add 'a' as a "source", 'b' as a "target" of module use
668 * - the module_use is added to the list of 'b' sources (so
669 * 'b' can walk the list to see who sourced them), and of 'a'
670 * targets (so 'a' can see what modules it targets).
672 static int add_module_usage(struct module *a, struct module *b)
674 struct module_use *use;
676 pr_debug("Allocating new usage for %s.\n", a->name);
677 use = kmalloc(sizeof(*use), GFP_ATOMIC);
678 if (!use) {
679 pr_warn("%s: out of memory loading\n", a->name);
680 return -ENOMEM;
683 use->source = a;
684 use->target = b;
685 list_add(&use->source_list, &b->source_list);
686 list_add(&use->target_list, &a->target_list);
687 return 0;
690 /* Module a uses b: caller needs module_mutex() */
691 int ref_module(struct module *a, struct module *b)
693 int err;
695 if (b == NULL || already_uses(a, b))
696 return 0;
698 /* If module isn't available, we fail. */
699 err = strong_try_module_get(b);
700 if (err)
701 return err;
703 err = add_module_usage(a, b);
704 if (err) {
705 module_put(b);
706 return err;
708 return 0;
710 EXPORT_SYMBOL_GPL(ref_module);
712 /* Clear the unload stuff of the module. */
713 static void module_unload_free(struct module *mod)
715 struct module_use *use, *tmp;
717 mutex_lock(&module_mutex);
718 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
719 struct module *i = use->target;
720 pr_debug("%s unusing %s\n", mod->name, i->name);
721 module_put(i);
722 list_del(&use->source_list);
723 list_del(&use->target_list);
724 kfree(use);
726 mutex_unlock(&module_mutex);
728 free_percpu(mod->refptr);
731 #ifdef CONFIG_MODULE_FORCE_UNLOAD
732 static inline int try_force_unload(unsigned int flags)
734 int ret = (flags & O_TRUNC);
735 if (ret)
736 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
737 return ret;
739 #else
740 static inline int try_force_unload(unsigned int flags)
742 return 0;
744 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
746 struct stopref
748 struct module *mod;
749 int flags;
750 int *forced;
753 /* Whole machine is stopped with interrupts off when this runs. */
754 static int __try_stop_module(void *_sref)
756 struct stopref *sref = _sref;
758 /* If it's not unused, quit unless we're forcing. */
759 if (module_refcount(sref->mod) != 0) {
760 if (!(*sref->forced = try_force_unload(sref->flags)))
761 return -EWOULDBLOCK;
764 /* Mark it as dying. */
765 sref->mod->state = MODULE_STATE_GOING;
766 return 0;
769 static int try_stop_module(struct module *mod, int flags, int *forced)
771 struct stopref sref = { mod, flags, forced };
773 return stop_machine(__try_stop_module, &sref, NULL);
776 unsigned long module_refcount(struct module *mod)
778 unsigned long incs = 0, decs = 0;
779 int cpu;
781 for_each_possible_cpu(cpu)
782 decs += per_cpu_ptr(mod->refptr, cpu)->decs;
784 * ensure the incs are added up after the decs.
785 * module_put ensures incs are visible before decs with smp_wmb.
787 * This 2-count scheme avoids the situation where the refcount
788 * for CPU0 is read, then CPU0 increments the module refcount,
789 * then CPU1 drops that refcount, then the refcount for CPU1 is
790 * read. We would record a decrement but not its corresponding
791 * increment so we would see a low count (disaster).
793 * Rare situation? But module_refcount can be preempted, and we
794 * might be tallying up 4096+ CPUs. So it is not impossible.
796 smp_rmb();
797 for_each_possible_cpu(cpu)
798 incs += per_cpu_ptr(mod->refptr, cpu)->incs;
799 return incs - decs;
801 EXPORT_SYMBOL(module_refcount);
803 /* This exists whether we can unload or not */
804 static void free_module(struct module *mod);
806 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
807 unsigned int, flags)
809 struct module *mod;
810 char name[MODULE_NAME_LEN];
811 int ret, forced = 0;
813 if (!capable(CAP_SYS_MODULE) || modules_disabled)
814 return -EPERM;
816 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
817 return -EFAULT;
818 name[MODULE_NAME_LEN-1] = '\0';
820 if (mutex_lock_interruptible(&module_mutex) != 0)
821 return -EINTR;
823 mod = find_module(name);
824 if (!mod) {
825 ret = -ENOENT;
826 goto out;
829 if (!list_empty(&mod->source_list)) {
830 /* Other modules depend on us: get rid of them first. */
831 ret = -EWOULDBLOCK;
832 goto out;
835 /* Doing init or already dying? */
836 if (mod->state != MODULE_STATE_LIVE) {
837 /* FIXME: if (force), slam module count damn the torpedoes */
838 pr_debug("%s already dying\n", mod->name);
839 ret = -EBUSY;
840 goto out;
843 /* If it has an init func, it must have an exit func to unload */
844 if (mod->init && !mod->exit) {
845 forced = try_force_unload(flags);
846 if (!forced) {
847 /* This module can't be removed */
848 ret = -EBUSY;
849 goto out;
853 /* Stop the machine so refcounts can't move and disable module. */
854 ret = try_stop_module(mod, flags, &forced);
855 if (ret != 0)
856 goto out;
858 mutex_unlock(&module_mutex);
859 /* Final destruction now no one is using it. */
860 if (mod->exit != NULL)
861 mod->exit();
862 blocking_notifier_call_chain(&module_notify_list,
863 MODULE_STATE_GOING, mod);
864 async_synchronize_full();
866 /* Store the name of the last unloaded module for diagnostic purposes */
867 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
869 free_module(mod);
870 return 0;
871 out:
872 mutex_unlock(&module_mutex);
873 return ret;
876 static inline void print_unload_info(struct seq_file *m, struct module *mod)
878 struct module_use *use;
879 int printed_something = 0;
881 seq_printf(m, " %lu ", module_refcount(mod));
883 /* Always include a trailing , so userspace can differentiate
884 between this and the old multi-field proc format. */
885 list_for_each_entry(use, &mod->source_list, source_list) {
886 printed_something = 1;
887 seq_printf(m, "%s,", use->source->name);
890 if (mod->init != NULL && mod->exit == NULL) {
891 printed_something = 1;
892 seq_printf(m, "[permanent],");
895 if (!printed_something)
896 seq_printf(m, "-");
899 void __symbol_put(const char *symbol)
901 struct module *owner;
903 preempt_disable();
904 if (!find_symbol(symbol, &owner, NULL, true, false))
905 BUG();
906 module_put(owner);
907 preempt_enable();
909 EXPORT_SYMBOL(__symbol_put);
911 /* Note this assumes addr is a function, which it currently always is. */
912 void symbol_put_addr(void *addr)
914 struct module *modaddr;
915 unsigned long a = (unsigned long)dereference_function_descriptor(addr);
917 if (core_kernel_text(a))
918 return;
921 * Even though we hold a reference on the module; we still need to
922 * disable preemption in order to safely traverse the data structure.
924 preempt_disable();
925 modaddr = __module_text_address(a);
926 BUG_ON(!modaddr);
927 module_put(modaddr);
928 preempt_enable();
930 EXPORT_SYMBOL_GPL(symbol_put_addr);
932 static ssize_t show_refcnt(struct module_attribute *mattr,
933 struct module_kobject *mk, char *buffer)
935 return sprintf(buffer, "%lu\n", module_refcount(mk->mod));
938 static struct module_attribute modinfo_refcnt =
939 __ATTR(refcnt, 0444, show_refcnt, NULL);
941 void __module_get(struct module *module)
943 if (module) {
944 preempt_disable();
945 __this_cpu_inc(module->refptr->incs);
946 trace_module_get(module, _RET_IP_);
947 preempt_enable();
950 EXPORT_SYMBOL(__module_get);
952 bool try_module_get(struct module *module)
954 bool ret = true;
956 if (module) {
957 preempt_disable();
959 if (likely(module_is_live(module))) {
960 __this_cpu_inc(module->refptr->incs);
961 trace_module_get(module, _RET_IP_);
962 } else
963 ret = false;
965 preempt_enable();
967 return ret;
969 EXPORT_SYMBOL(try_module_get);
971 void module_put(struct module *module)
973 if (module) {
974 preempt_disable();
975 smp_wmb(); /* see comment in module_refcount */
976 __this_cpu_inc(module->refptr->decs);
978 trace_module_put(module, _RET_IP_);
979 preempt_enable();
982 EXPORT_SYMBOL(module_put);
984 #else /* !CONFIG_MODULE_UNLOAD */
985 static inline void print_unload_info(struct seq_file *m, struct module *mod)
987 /* We don't know the usage count, or what modules are using. */
988 seq_printf(m, " - -");
991 static inline void module_unload_free(struct module *mod)
995 int ref_module(struct module *a, struct module *b)
997 return strong_try_module_get(b);
999 EXPORT_SYMBOL_GPL(ref_module);
1001 static inline int module_unload_init(struct module *mod)
1003 return 0;
1005 #endif /* CONFIG_MODULE_UNLOAD */
1007 static size_t module_flags_taint(struct module *mod, char *buf)
1009 size_t l = 0;
1011 if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
1012 buf[l++] = 'P';
1013 if (mod->taints & (1 << TAINT_OOT_MODULE))
1014 buf[l++] = 'O';
1015 if (mod->taints & (1 << TAINT_FORCED_MODULE))
1016 buf[l++] = 'F';
1017 if (mod->taints & (1 << TAINT_CRAP))
1018 buf[l++] = 'C';
1019 if (mod->taints & (1 << TAINT_UNSIGNED_MODULE))
1020 buf[l++] = 'E';
1022 * TAINT_FORCED_RMMOD: could be added.
1023 * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
1024 * apply to modules.
1026 return l;
1029 static ssize_t show_initstate(struct module_attribute *mattr,
1030 struct module_kobject *mk, char *buffer)
1032 const char *state = "unknown";
1034 switch (mk->mod->state) {
1035 case MODULE_STATE_LIVE:
1036 state = "live";
1037 break;
1038 case MODULE_STATE_COMING:
1039 state = "coming";
1040 break;
1041 case MODULE_STATE_GOING:
1042 state = "going";
1043 break;
1044 default:
1045 BUG();
1047 return sprintf(buffer, "%s\n", state);
1050 static struct module_attribute modinfo_initstate =
1051 __ATTR(initstate, 0444, show_initstate, NULL);
1053 static ssize_t store_uevent(struct module_attribute *mattr,
1054 struct module_kobject *mk,
1055 const char *buffer, size_t count)
1057 enum kobject_action action;
1059 if (kobject_action_type(buffer, count, &action) == 0)
1060 kobject_uevent(&mk->kobj, action);
1061 return count;
1064 struct module_attribute module_uevent =
1065 __ATTR(uevent, 0200, NULL, store_uevent);
1067 static ssize_t show_coresize(struct module_attribute *mattr,
1068 struct module_kobject *mk, char *buffer)
1070 return sprintf(buffer, "%u\n", mk->mod->core_size);
1073 static struct module_attribute modinfo_coresize =
1074 __ATTR(coresize, 0444, show_coresize, NULL);
1076 static ssize_t show_initsize(struct module_attribute *mattr,
1077 struct module_kobject *mk, char *buffer)
1079 return sprintf(buffer, "%u\n", mk->mod->init_size);
1082 static struct module_attribute modinfo_initsize =
1083 __ATTR(initsize, 0444, show_initsize, NULL);
1085 static ssize_t show_taint(struct module_attribute *mattr,
1086 struct module_kobject *mk, char *buffer)
1088 size_t l;
1090 l = module_flags_taint(mk->mod, buffer);
1091 buffer[l++] = '\n';
1092 return l;
1095 static struct module_attribute modinfo_taint =
1096 __ATTR(taint, 0444, show_taint, NULL);
1098 static struct module_attribute *modinfo_attrs[] = {
1099 &module_uevent,
1100 &modinfo_version,
1101 &modinfo_srcversion,
1102 &modinfo_initstate,
1103 &modinfo_coresize,
1104 &modinfo_initsize,
1105 &modinfo_taint,
1106 #ifdef CONFIG_MODULE_UNLOAD
1107 &modinfo_refcnt,
1108 #endif
1109 NULL,
1112 static const char vermagic[] = VERMAGIC_STRING;
1114 static int try_to_force_load(struct module *mod, const char *reason)
1116 #ifdef CONFIG_MODULE_FORCE_LOAD
1117 if (!test_taint(TAINT_FORCED_MODULE))
1118 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
1119 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
1120 return 0;
1121 #else
1122 return -ENOEXEC;
1123 #endif
1126 #ifdef CONFIG_MODVERSIONS
1127 /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
1128 static unsigned long maybe_relocated(unsigned long crc,
1129 const struct module *crc_owner)
1131 #ifdef ARCH_RELOCATES_KCRCTAB
1132 if (crc_owner == NULL)
1133 return crc - (unsigned long)reloc_start;
1134 #endif
1135 return crc;
1138 static int check_version(Elf_Shdr *sechdrs,
1139 unsigned int versindex,
1140 const char *symname,
1141 struct module *mod,
1142 const unsigned long *crc,
1143 const struct module *crc_owner)
1145 unsigned int i, num_versions;
1146 struct modversion_info *versions;
1148 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1149 if (!crc)
1150 return 1;
1152 /* No versions at all? modprobe --force does this. */
1153 if (versindex == 0)
1154 return try_to_force_load(mod, symname) == 0;
1156 versions = (void *) sechdrs[versindex].sh_addr;
1157 num_versions = sechdrs[versindex].sh_size
1158 / sizeof(struct modversion_info);
1160 for (i = 0; i < num_versions; i++) {
1161 if (strcmp(versions[i].name, symname) != 0)
1162 continue;
1164 if (versions[i].crc == maybe_relocated(*crc, crc_owner))
1165 return 1;
1166 pr_debug("Found checksum %lX vs module %lX\n",
1167 maybe_relocated(*crc, crc_owner), versions[i].crc);
1168 goto bad_version;
1171 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
1172 return 0;
1174 bad_version:
1175 printk("%s: disagrees about version of symbol %s\n",
1176 mod->name, symname);
1177 return 0;
1180 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1181 unsigned int versindex,
1182 struct module *mod)
1184 const unsigned long *crc;
1186 /* Since this should be found in kernel (which can't be removed),
1187 * no locking is necessary. */
1188 if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL,
1189 &crc, true, false))
1190 BUG();
1191 return check_version(sechdrs, versindex,
1192 VMLINUX_SYMBOL_STR(module_layout), mod, crc,
1193 NULL);
1196 /* First part is kernel version, which we ignore if module has crcs. */
1197 static inline int same_magic(const char *amagic, const char *bmagic,
1198 bool has_crcs)
1200 if (has_crcs) {
1201 amagic += strcspn(amagic, " ");
1202 bmagic += strcspn(bmagic, " ");
1204 return strcmp(amagic, bmagic) == 0;
1206 #else
1207 static inline int check_version(Elf_Shdr *sechdrs,
1208 unsigned int versindex,
1209 const char *symname,
1210 struct module *mod,
1211 const unsigned long *crc,
1212 const struct module *crc_owner)
1214 return 1;
1217 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1218 unsigned int versindex,
1219 struct module *mod)
1221 return 1;
1224 static inline int same_magic(const char *amagic, const char *bmagic,
1225 bool has_crcs)
1227 return strcmp(amagic, bmagic) == 0;
1229 #endif /* CONFIG_MODVERSIONS */
1231 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1232 static const struct kernel_symbol *resolve_symbol(struct module *mod,
1233 const struct load_info *info,
1234 const char *name,
1235 char ownername[])
1237 struct module *owner;
1238 const struct kernel_symbol *sym;
1239 const unsigned long *crc;
1240 int err;
1242 mutex_lock(&module_mutex);
1243 sym = find_symbol(name, &owner, &crc,
1244 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1245 if (!sym)
1246 goto unlock;
1248 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
1249 owner)) {
1250 sym = ERR_PTR(-EINVAL);
1251 goto getname;
1254 err = ref_module(mod, owner);
1255 if (err) {
1256 sym = ERR_PTR(err);
1257 goto getname;
1260 getname:
1261 /* We must make copy under the lock if we failed to get ref. */
1262 strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1263 unlock:
1264 mutex_unlock(&module_mutex);
1265 return sym;
1268 static const struct kernel_symbol *
1269 resolve_symbol_wait(struct module *mod,
1270 const struct load_info *info,
1271 const char *name)
1273 const struct kernel_symbol *ksym;
1274 char owner[MODULE_NAME_LEN];
1276 if (wait_event_interruptible_timeout(module_wq,
1277 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1278 || PTR_ERR(ksym) != -EBUSY,
1279 30 * HZ) <= 0) {
1280 pr_warn("%s: gave up waiting for init of module %s.\n",
1281 mod->name, owner);
1283 return ksym;
1287 * /sys/module/foo/sections stuff
1288 * J. Corbet <corbet@lwn.net>
1290 #ifdef CONFIG_SYSFS
1292 #ifdef CONFIG_KALLSYMS
1293 static inline bool sect_empty(const Elf_Shdr *sect)
1295 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1298 struct module_sect_attr
1300 struct module_attribute mattr;
1301 char *name;
1302 unsigned long address;
1305 struct module_sect_attrs
1307 struct attribute_group grp;
1308 unsigned int nsections;
1309 struct module_sect_attr attrs[0];
1312 static ssize_t module_sect_show(struct module_attribute *mattr,
1313 struct module_kobject *mk, char *buf)
1315 struct module_sect_attr *sattr =
1316 container_of(mattr, struct module_sect_attr, mattr);
1317 return sprintf(buf, "0x%pK\n", (void *)sattr->address);
1320 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1322 unsigned int section;
1324 for (section = 0; section < sect_attrs->nsections; section++)
1325 kfree(sect_attrs->attrs[section].name);
1326 kfree(sect_attrs);
1329 static void add_sect_attrs(struct module *mod, const struct load_info *info)
1331 unsigned int nloaded = 0, i, size[2];
1332 struct module_sect_attrs *sect_attrs;
1333 struct module_sect_attr *sattr;
1334 struct attribute **gattr;
1336 /* Count loaded sections and allocate structures */
1337 for (i = 0; i < info->hdr->e_shnum; i++)
1338 if (!sect_empty(&info->sechdrs[i]))
1339 nloaded++;
1340 size[0] = ALIGN(sizeof(*sect_attrs)
1341 + nloaded * sizeof(sect_attrs->attrs[0]),
1342 sizeof(sect_attrs->grp.attrs[0]));
1343 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1344 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1345 if (sect_attrs == NULL)
1346 return;
1348 /* Setup section attributes. */
1349 sect_attrs->grp.name = "sections";
1350 sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1352 sect_attrs->nsections = 0;
1353 sattr = &sect_attrs->attrs[0];
1354 gattr = &sect_attrs->grp.attrs[0];
1355 for (i = 0; i < info->hdr->e_shnum; i++) {
1356 Elf_Shdr *sec = &info->sechdrs[i];
1357 if (sect_empty(sec))
1358 continue;
1359 sattr->address = sec->sh_addr;
1360 sattr->name = kstrdup(info->secstrings + sec->sh_name,
1361 GFP_KERNEL);
1362 if (sattr->name == NULL)
1363 goto out;
1364 sect_attrs->nsections++;
1365 sysfs_attr_init(&sattr->mattr.attr);
1366 sattr->mattr.show = module_sect_show;
1367 sattr->mattr.store = NULL;
1368 sattr->mattr.attr.name = sattr->name;
1369 sattr->mattr.attr.mode = S_IRUGO;
1370 *(gattr++) = &(sattr++)->mattr.attr;
1372 *gattr = NULL;
1374 if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1375 goto out;
1377 mod->sect_attrs = sect_attrs;
1378 return;
1379 out:
1380 free_sect_attrs(sect_attrs);
1383 static void remove_sect_attrs(struct module *mod)
1385 if (mod->sect_attrs) {
1386 sysfs_remove_group(&mod->mkobj.kobj,
1387 &mod->sect_attrs->grp);
1388 /* We are positive that no one is using any sect attrs
1389 * at this point. Deallocate immediately. */
1390 free_sect_attrs(mod->sect_attrs);
1391 mod->sect_attrs = NULL;
1396 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1399 struct module_notes_attrs {
1400 struct kobject *dir;
1401 unsigned int notes;
1402 struct bin_attribute attrs[0];
1405 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1406 struct bin_attribute *bin_attr,
1407 char *buf, loff_t pos, size_t count)
1410 * The caller checked the pos and count against our size.
1412 memcpy(buf, bin_attr->private + pos, count);
1413 return count;
1416 static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1417 unsigned int i)
1419 if (notes_attrs->dir) {
1420 while (i-- > 0)
1421 sysfs_remove_bin_file(notes_attrs->dir,
1422 &notes_attrs->attrs[i]);
1423 kobject_put(notes_attrs->dir);
1425 kfree(notes_attrs);
1428 static void add_notes_attrs(struct module *mod, const struct load_info *info)
1430 unsigned int notes, loaded, i;
1431 struct module_notes_attrs *notes_attrs;
1432 struct bin_attribute *nattr;
1434 /* failed to create section attributes, so can't create notes */
1435 if (!mod->sect_attrs)
1436 return;
1438 /* Count notes sections and allocate structures. */
1439 notes = 0;
1440 for (i = 0; i < info->hdr->e_shnum; i++)
1441 if (!sect_empty(&info->sechdrs[i]) &&
1442 (info->sechdrs[i].sh_type == SHT_NOTE))
1443 ++notes;
1445 if (notes == 0)
1446 return;
1448 notes_attrs = kzalloc(sizeof(*notes_attrs)
1449 + notes * sizeof(notes_attrs->attrs[0]),
1450 GFP_KERNEL);
1451 if (notes_attrs == NULL)
1452 return;
1454 notes_attrs->notes = notes;
1455 nattr = &notes_attrs->attrs[0];
1456 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1457 if (sect_empty(&info->sechdrs[i]))
1458 continue;
1459 if (info->sechdrs[i].sh_type == SHT_NOTE) {
1460 sysfs_bin_attr_init(nattr);
1461 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1462 nattr->attr.mode = S_IRUGO;
1463 nattr->size = info->sechdrs[i].sh_size;
1464 nattr->private = (void *) info->sechdrs[i].sh_addr;
1465 nattr->read = module_notes_read;
1466 ++nattr;
1468 ++loaded;
1471 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1472 if (!notes_attrs->dir)
1473 goto out;
1475 for (i = 0; i < notes; ++i)
1476 if (sysfs_create_bin_file(notes_attrs->dir,
1477 &notes_attrs->attrs[i]))
1478 goto out;
1480 mod->notes_attrs = notes_attrs;
1481 return;
1483 out:
1484 free_notes_attrs(notes_attrs, i);
1487 static void remove_notes_attrs(struct module *mod)
1489 if (mod->notes_attrs)
1490 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1493 #else
1495 static inline void add_sect_attrs(struct module *mod,
1496 const struct load_info *info)
1500 static inline void remove_sect_attrs(struct module *mod)
1504 static inline void add_notes_attrs(struct module *mod,
1505 const struct load_info *info)
1509 static inline void remove_notes_attrs(struct module *mod)
1512 #endif /* CONFIG_KALLSYMS */
1514 static void add_usage_links(struct module *mod)
1516 #ifdef CONFIG_MODULE_UNLOAD
1517 struct module_use *use;
1518 int nowarn;
1520 mutex_lock(&module_mutex);
1521 list_for_each_entry(use, &mod->target_list, target_list) {
1522 nowarn = sysfs_create_link(use->target->holders_dir,
1523 &mod->mkobj.kobj, mod->name);
1525 mutex_unlock(&module_mutex);
1526 #endif
1529 static void del_usage_links(struct module *mod)
1531 #ifdef CONFIG_MODULE_UNLOAD
1532 struct module_use *use;
1534 mutex_lock(&module_mutex);
1535 list_for_each_entry(use, &mod->target_list, target_list)
1536 sysfs_remove_link(use->target->holders_dir, mod->name);
1537 mutex_unlock(&module_mutex);
1538 #endif
1541 static int module_add_modinfo_attrs(struct module *mod)
1543 struct module_attribute *attr;
1544 struct module_attribute *temp_attr;
1545 int error = 0;
1546 int i;
1548 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1549 (ARRAY_SIZE(modinfo_attrs) + 1)),
1550 GFP_KERNEL);
1551 if (!mod->modinfo_attrs)
1552 return -ENOMEM;
1554 temp_attr = mod->modinfo_attrs;
1555 for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1556 if (!attr->test ||
1557 (attr->test && attr->test(mod))) {
1558 memcpy(temp_attr, attr, sizeof(*temp_attr));
1559 sysfs_attr_init(&temp_attr->attr);
1560 error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr);
1561 ++temp_attr;
1564 return error;
1567 static void module_remove_modinfo_attrs(struct module *mod)
1569 struct module_attribute *attr;
1570 int i;
1572 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1573 /* pick a field to test for end of list */
1574 if (!attr->attr.name)
1575 break;
1576 sysfs_remove_file(&mod->mkobj.kobj,&attr->attr);
1577 if (attr->free)
1578 attr->free(mod);
1580 kfree(mod->modinfo_attrs);
1583 static void mod_kobject_put(struct module *mod)
1585 DECLARE_COMPLETION_ONSTACK(c);
1586 mod->mkobj.kobj_completion = &c;
1587 kobject_put(&mod->mkobj.kobj);
1588 wait_for_completion(&c);
1591 static int mod_sysfs_init(struct module *mod)
1593 int err;
1594 struct kobject *kobj;
1596 if (!module_sysfs_initialized) {
1597 pr_err("%s: module sysfs not initialized\n", mod->name);
1598 err = -EINVAL;
1599 goto out;
1602 kobj = kset_find_obj(module_kset, mod->name);
1603 if (kobj) {
1604 pr_err("%s: module is already loaded\n", mod->name);
1605 kobject_put(kobj);
1606 err = -EINVAL;
1607 goto out;
1610 mod->mkobj.mod = mod;
1612 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1613 mod->mkobj.kobj.kset = module_kset;
1614 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1615 "%s", mod->name);
1616 if (err)
1617 mod_kobject_put(mod);
1619 /* delay uevent until full sysfs population */
1620 out:
1621 return err;
1624 static int mod_sysfs_setup(struct module *mod,
1625 const struct load_info *info,
1626 struct kernel_param *kparam,
1627 unsigned int num_params)
1629 int err;
1631 err = mod_sysfs_init(mod);
1632 if (err)
1633 goto out;
1635 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1636 if (!mod->holders_dir) {
1637 err = -ENOMEM;
1638 goto out_unreg;
1641 err = module_param_sysfs_setup(mod, kparam, num_params);
1642 if (err)
1643 goto out_unreg_holders;
1645 err = module_add_modinfo_attrs(mod);
1646 if (err)
1647 goto out_unreg_param;
1649 add_usage_links(mod);
1650 add_sect_attrs(mod, info);
1651 add_notes_attrs(mod, info);
1653 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1654 return 0;
1656 out_unreg_param:
1657 module_param_sysfs_remove(mod);
1658 out_unreg_holders:
1659 kobject_put(mod->holders_dir);
1660 out_unreg:
1661 mod_kobject_put(mod);
1662 out:
1663 return err;
1666 static void mod_sysfs_fini(struct module *mod)
1668 remove_notes_attrs(mod);
1669 remove_sect_attrs(mod);
1670 mod_kobject_put(mod);
1673 #else /* !CONFIG_SYSFS */
1675 static int mod_sysfs_setup(struct module *mod,
1676 const struct load_info *info,
1677 struct kernel_param *kparam,
1678 unsigned int num_params)
1680 return 0;
1683 static void mod_sysfs_fini(struct module *mod)
1687 static void module_remove_modinfo_attrs(struct module *mod)
1691 static void del_usage_links(struct module *mod)
1695 #endif /* CONFIG_SYSFS */
1697 static void mod_sysfs_teardown(struct module *mod)
1699 del_usage_links(mod);
1700 module_remove_modinfo_attrs(mod);
1701 module_param_sysfs_remove(mod);
1702 kobject_put(mod->mkobj.drivers_dir);
1703 kobject_put(mod->holders_dir);
1704 mod_sysfs_fini(mod);
1708 * unlink the module with the whole machine is stopped with interrupts off
1709 * - this defends against kallsyms not taking locks
1711 static int __unlink_module(void *_mod)
1713 struct module *mod = _mod;
1714 list_del(&mod->list);
1715 module_bug_cleanup(mod);
1716 return 0;
1719 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
1721 * LKM RO/NX protection: protect module's text/ro-data
1722 * from modification and any data from execution.
1724 void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages))
1726 unsigned long begin_pfn = PFN_DOWN((unsigned long)start);
1727 unsigned long end_pfn = PFN_DOWN((unsigned long)end);
1729 if (end_pfn > begin_pfn)
1730 set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1733 static void set_section_ro_nx(void *base,
1734 unsigned long text_size,
1735 unsigned long ro_size,
1736 unsigned long total_size)
1738 /* begin and end PFNs of the current subsection */
1739 unsigned long begin_pfn;
1740 unsigned long end_pfn;
1743 * Set RO for module text and RO-data:
1744 * - Always protect first page.
1745 * - Do not protect last partial page.
1747 if (ro_size > 0)
1748 set_page_attributes(base, base + ro_size, set_memory_ro);
1751 * Set NX permissions for module data:
1752 * - Do not protect first partial page.
1753 * - Always protect last page.
1755 if (total_size > text_size) {
1756 begin_pfn = PFN_UP((unsigned long)base + text_size);
1757 end_pfn = PFN_UP((unsigned long)base + total_size);
1758 if (end_pfn > begin_pfn)
1759 set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1763 static void unset_module_core_ro_nx(struct module *mod)
1765 set_page_attributes(mod->module_core + mod->core_text_size,
1766 mod->module_core + mod->core_size,
1767 set_memory_x);
1768 set_page_attributes(mod->module_core,
1769 mod->module_core + mod->core_ro_size,
1770 set_memory_rw);
1773 static void unset_module_init_ro_nx(struct module *mod)
1775 set_page_attributes(mod->module_init + mod->init_text_size,
1776 mod->module_init + mod->init_size,
1777 set_memory_x);
1778 set_page_attributes(mod->module_init,
1779 mod->module_init + mod->init_ro_size,
1780 set_memory_rw);
1783 /* Iterate through all modules and set each module's text as RW */
1784 void set_all_modules_text_rw(void)
1786 struct module *mod;
1788 mutex_lock(&module_mutex);
1789 list_for_each_entry_rcu(mod, &modules, list) {
1790 if (mod->state == MODULE_STATE_UNFORMED)
1791 continue;
1792 if ((mod->module_core) && (mod->core_text_size)) {
1793 set_page_attributes(mod->module_core,
1794 mod->module_core + mod->core_text_size,
1795 set_memory_rw);
1797 if ((mod->module_init) && (mod->init_text_size)) {
1798 set_page_attributes(mod->module_init,
1799 mod->module_init + mod->init_text_size,
1800 set_memory_rw);
1803 mutex_unlock(&module_mutex);
1806 /* Iterate through all modules and set each module's text as RO */
1807 void set_all_modules_text_ro(void)
1809 struct module *mod;
1811 mutex_lock(&module_mutex);
1812 list_for_each_entry_rcu(mod, &modules, list) {
1813 if (mod->state == MODULE_STATE_UNFORMED)
1814 continue;
1815 if ((mod->module_core) && (mod->core_text_size)) {
1816 set_page_attributes(mod->module_core,
1817 mod->module_core + mod->core_text_size,
1818 set_memory_ro);
1820 if ((mod->module_init) && (mod->init_text_size)) {
1821 set_page_attributes(mod->module_init,
1822 mod->module_init + mod->init_text_size,
1823 set_memory_ro);
1826 mutex_unlock(&module_mutex);
1828 #else
1829 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
1830 static void unset_module_core_ro_nx(struct module *mod) { }
1831 static void unset_module_init_ro_nx(struct module *mod) { }
1832 #endif
1834 void __weak module_free(struct module *mod, void *module_region)
1836 vfree(module_region);
1839 void __weak module_arch_cleanup(struct module *mod)
1843 /* Free a module, remove from lists, etc. */
1844 static void free_module(struct module *mod)
1846 trace_module_free(mod);
1848 mod_sysfs_teardown(mod);
1850 /* We leave it in list to prevent duplicate loads, but make sure
1851 * that noone uses it while it's being deconstructed. */
1852 mutex_lock(&module_mutex);
1853 mod->state = MODULE_STATE_UNFORMED;
1854 mutex_unlock(&module_mutex);
1856 /* Remove dynamic debug info */
1857 ddebug_remove_module(mod->name);
1859 /* Arch-specific cleanup. */
1860 module_arch_cleanup(mod);
1862 /* Module unload stuff */
1863 module_unload_free(mod);
1865 /* Free any allocated parameters. */
1866 destroy_params(mod->kp, mod->num_kp);
1868 /* Now we can delete it from the lists */
1869 mutex_lock(&module_mutex);
1870 stop_machine(__unlink_module, mod, NULL);
1871 mutex_unlock(&module_mutex);
1873 /* This may be NULL, but that's OK */
1874 unset_module_init_ro_nx(mod);
1875 module_free(mod, mod->module_init);
1876 kfree(mod->args);
1877 percpu_modfree(mod);
1879 /* Free lock-classes: */
1880 lockdep_free_key_range(mod->module_core, mod->core_size);
1882 /* Finally, free the core (containing the module structure) */
1883 unset_module_core_ro_nx(mod);
1884 module_free(mod, mod->module_core);
1886 #ifdef CONFIG_MPU
1887 update_protections(current->mm);
1888 #endif
1891 void *__symbol_get(const char *symbol)
1893 struct module *owner;
1894 const struct kernel_symbol *sym;
1896 preempt_disable();
1897 sym = find_symbol(symbol, &owner, NULL, true, true);
1898 if (sym && strong_try_module_get(owner))
1899 sym = NULL;
1900 preempt_enable();
1902 return sym ? (void *)sym->value : NULL;
1904 EXPORT_SYMBOL_GPL(__symbol_get);
1907 * Ensure that an exported symbol [global namespace] does not already exist
1908 * in the kernel or in some other module's exported symbol table.
1910 * You must hold the module_mutex.
1912 static int verify_export_symbols(struct module *mod)
1914 unsigned int i;
1915 struct module *owner;
1916 const struct kernel_symbol *s;
1917 struct {
1918 const struct kernel_symbol *sym;
1919 unsigned int num;
1920 } arr[] = {
1921 { mod->syms, mod->num_syms },
1922 { mod->gpl_syms, mod->num_gpl_syms },
1923 { mod->gpl_future_syms, mod->num_gpl_future_syms },
1924 #ifdef CONFIG_UNUSED_SYMBOLS
1925 { mod->unused_syms, mod->num_unused_syms },
1926 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
1927 #endif
1930 for (i = 0; i < ARRAY_SIZE(arr); i++) {
1931 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
1932 if (find_symbol(s->name, &owner, NULL, true, false)) {
1933 pr_err("%s: exports duplicate symbol %s"
1934 " (owned by %s)\n",
1935 mod->name, s->name, module_name(owner));
1936 return -ENOEXEC;
1940 return 0;
1943 /* Change all symbols so that st_value encodes the pointer directly. */
1944 static int simplify_symbols(struct module *mod, const struct load_info *info)
1946 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
1947 Elf_Sym *sym = (void *)symsec->sh_addr;
1948 unsigned long secbase;
1949 unsigned int i;
1950 int ret = 0;
1951 const struct kernel_symbol *ksym;
1953 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
1954 const char *name = info->strtab + sym[i].st_name;
1956 switch (sym[i].st_shndx) {
1957 case SHN_COMMON:
1958 /* Ignore common symbols */
1959 if (!strncmp(name, "__gnu_lto", 9))
1960 break;
1962 /* We compiled with -fno-common. These are not
1963 supposed to happen. */
1964 pr_debug("Common symbol: %s\n", name);
1965 printk("%s: please compile with -fno-common\n",
1966 mod->name);
1967 ret = -ENOEXEC;
1968 break;
1970 case SHN_ABS:
1971 /* Don't need to do anything */
1972 pr_debug("Absolute symbol: 0x%08lx\n",
1973 (long)sym[i].st_value);
1974 break;
1976 case SHN_UNDEF:
1977 ksym = resolve_symbol_wait(mod, info, name);
1978 /* Ok if resolved. */
1979 if (ksym && !IS_ERR(ksym)) {
1980 sym[i].st_value = ksym->value;
1981 break;
1984 /* Ok if weak. */
1985 if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
1986 break;
1988 pr_warn("%s: Unknown symbol %s (err %li)\n",
1989 mod->name, name, PTR_ERR(ksym));
1990 ret = PTR_ERR(ksym) ?: -ENOENT;
1991 break;
1993 default:
1994 /* Divert to percpu allocation if a percpu var. */
1995 if (sym[i].st_shndx == info->index.pcpu)
1996 secbase = (unsigned long)mod_percpu(mod);
1997 else
1998 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
1999 sym[i].st_value += secbase;
2000 break;
2004 return ret;
2007 static int apply_relocations(struct module *mod, const struct load_info *info)
2009 unsigned int i;
2010 int err = 0;
2012 /* Now do relocations. */
2013 for (i = 1; i < info->hdr->e_shnum; i++) {
2014 unsigned int infosec = info->sechdrs[i].sh_info;
2016 /* Not a valid relocation section? */
2017 if (infosec >= info->hdr->e_shnum)
2018 continue;
2020 /* Don't bother with non-allocated sections */
2021 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2022 continue;
2024 if (info->sechdrs[i].sh_type == SHT_REL)
2025 err = apply_relocate(info->sechdrs, info->strtab,
2026 info->index.sym, i, mod);
2027 else if (info->sechdrs[i].sh_type == SHT_RELA)
2028 err = apply_relocate_add(info->sechdrs, info->strtab,
2029 info->index.sym, i, mod);
2030 if (err < 0)
2031 break;
2033 return err;
2036 /* Additional bytes needed by arch in front of individual sections */
2037 unsigned int __weak arch_mod_section_prepend(struct module *mod,
2038 unsigned int section)
2040 /* default implementation just returns zero */
2041 return 0;
2044 /* Update size with this section: return offset. */
2045 static long get_offset(struct module *mod, unsigned int *size,
2046 Elf_Shdr *sechdr, unsigned int section)
2048 long ret;
2050 *size += arch_mod_section_prepend(mod, section);
2051 ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2052 *size = ret + sechdr->sh_size;
2053 return ret;
2056 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2057 might -- code, read-only data, read-write data, small data. Tally
2058 sizes, and place the offsets into sh_entsize fields: high bit means it
2059 belongs in init. */
2060 static void layout_sections(struct module *mod, struct load_info *info)
2062 static unsigned long const masks[][2] = {
2063 /* NOTE: all executable code must be the first section
2064 * in this array; otherwise modify the text_size
2065 * finder in the two loops below */
2066 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2067 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2068 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2069 { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2071 unsigned int m, i;
2073 for (i = 0; i < info->hdr->e_shnum; i++)
2074 info->sechdrs[i].sh_entsize = ~0UL;
2076 pr_debug("Core section allocation order:\n");
2077 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2078 for (i = 0; i < info->hdr->e_shnum; ++i) {
2079 Elf_Shdr *s = &info->sechdrs[i];
2080 const char *sname = info->secstrings + s->sh_name;
2082 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2083 || (s->sh_flags & masks[m][1])
2084 || s->sh_entsize != ~0UL
2085 || strstarts(sname, ".init"))
2086 continue;
2087 s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
2088 pr_debug("\t%s\n", sname);
2090 switch (m) {
2091 case 0: /* executable */
2092 mod->core_size = debug_align(mod->core_size);
2093 mod->core_text_size = mod->core_size;
2094 break;
2095 case 1: /* RO: text and ro-data */
2096 mod->core_size = debug_align(mod->core_size);
2097 mod->core_ro_size = mod->core_size;
2098 break;
2099 case 3: /* whole core */
2100 mod->core_size = debug_align(mod->core_size);
2101 break;
2105 pr_debug("Init section allocation order:\n");
2106 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2107 for (i = 0; i < info->hdr->e_shnum; ++i) {
2108 Elf_Shdr *s = &info->sechdrs[i];
2109 const char *sname = info->secstrings + s->sh_name;
2111 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2112 || (s->sh_flags & masks[m][1])
2113 || s->sh_entsize != ~0UL
2114 || !strstarts(sname, ".init"))
2115 continue;
2116 s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
2117 | INIT_OFFSET_MASK);
2118 pr_debug("\t%s\n", sname);
2120 switch (m) {
2121 case 0: /* executable */
2122 mod->init_size = debug_align(mod->init_size);
2123 mod->init_text_size = mod->init_size;
2124 break;
2125 case 1: /* RO: text and ro-data */
2126 mod->init_size = debug_align(mod->init_size);
2127 mod->init_ro_size = mod->init_size;
2128 break;
2129 case 3: /* whole init */
2130 mod->init_size = debug_align(mod->init_size);
2131 break;
2136 static void set_license(struct module *mod, const char *license)
2138 if (!license)
2139 license = "unspecified";
2141 if (!license_is_gpl_compatible(license)) {
2142 if (!test_taint(TAINT_PROPRIETARY_MODULE))
2143 pr_warn("%s: module license '%s' taints kernel.\n",
2144 mod->name, license);
2145 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2146 LOCKDEP_NOW_UNRELIABLE);
2150 /* Parse tag=value strings from .modinfo section */
2151 static char *next_string(char *string, unsigned long *secsize)
2153 /* Skip non-zero chars */
2154 while (string[0]) {
2155 string++;
2156 if ((*secsize)-- <= 1)
2157 return NULL;
2160 /* Skip any zero padding. */
2161 while (!string[0]) {
2162 string++;
2163 if ((*secsize)-- <= 1)
2164 return NULL;
2166 return string;
2169 static char *get_modinfo(struct load_info *info, const char *tag)
2171 char *p;
2172 unsigned int taglen = strlen(tag);
2173 Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2174 unsigned long size = infosec->sh_size;
2176 for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
2177 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2178 return p + taglen + 1;
2180 return NULL;
2183 static void setup_modinfo(struct module *mod, struct load_info *info)
2185 struct module_attribute *attr;
2186 int i;
2188 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2189 if (attr->setup)
2190 attr->setup(mod, get_modinfo(info, attr->attr.name));
2194 static void free_modinfo(struct module *mod)
2196 struct module_attribute *attr;
2197 int i;
2199 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2200 if (attr->free)
2201 attr->free(mod);
2205 #ifdef CONFIG_KALLSYMS
2207 /* lookup symbol in given range of kernel_symbols */
2208 static const struct kernel_symbol *lookup_symbol(const char *name,
2209 const struct kernel_symbol *start,
2210 const struct kernel_symbol *stop)
2212 return bsearch(name, start, stop - start,
2213 sizeof(struct kernel_symbol), cmp_name);
2216 static int is_exported(const char *name, unsigned long value,
2217 const struct module *mod)
2219 const struct kernel_symbol *ks;
2220 if (!mod)
2221 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2222 else
2223 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2224 return ks != NULL && ks->value == value;
2227 /* As per nm */
2228 static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2230 const Elf_Shdr *sechdrs = info->sechdrs;
2232 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2233 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2234 return 'v';
2235 else
2236 return 'w';
2238 if (sym->st_shndx == SHN_UNDEF)
2239 return 'U';
2240 if (sym->st_shndx == SHN_ABS)
2241 return 'a';
2242 if (sym->st_shndx >= SHN_LORESERVE)
2243 return '?';
2244 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2245 return 't';
2246 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2247 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2248 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2249 return 'r';
2250 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2251 return 'g';
2252 else
2253 return 'd';
2255 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2256 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2257 return 's';
2258 else
2259 return 'b';
2261 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2262 ".debug")) {
2263 return 'n';
2265 return '?';
2268 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2269 unsigned int shnum)
2271 const Elf_Shdr *sec;
2273 if (src->st_shndx == SHN_UNDEF
2274 || src->st_shndx >= shnum
2275 || !src->st_name)
2276 return false;
2278 sec = sechdrs + src->st_shndx;
2279 if (!(sec->sh_flags & SHF_ALLOC)
2280 #ifndef CONFIG_KALLSYMS_ALL
2281 || !(sec->sh_flags & SHF_EXECINSTR)
2282 #endif
2283 || (sec->sh_entsize & INIT_OFFSET_MASK))
2284 return false;
2286 return true;
2290 * We only allocate and copy the strings needed by the parts of symtab
2291 * we keep. This is simple, but has the effect of making multiple
2292 * copies of duplicates. We could be more sophisticated, see
2293 * linux-kernel thread starting with
2294 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2296 static void layout_symtab(struct module *mod, struct load_info *info)
2298 Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2299 Elf_Shdr *strsect = info->sechdrs + info->index.str;
2300 const Elf_Sym *src;
2301 unsigned int i, nsrc, ndst, strtab_size = 0;
2303 /* Put symbol section at end of init part of module. */
2304 symsect->sh_flags |= SHF_ALLOC;
2305 symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
2306 info->index.sym) | INIT_OFFSET_MASK;
2307 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2309 src = (void *)info->hdr + symsect->sh_offset;
2310 nsrc = symsect->sh_size / sizeof(*src);
2312 /* Compute total space required for the core symbols' strtab. */
2313 for (ndst = i = 0; i < nsrc; i++) {
2314 if (i == 0 ||
2315 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2316 strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2317 ndst++;
2321 /* Append room for core symbols at end of core part. */
2322 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
2323 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
2324 mod->core_size += strtab_size;
2326 /* Put string table section at end of init part of module. */
2327 strsect->sh_flags |= SHF_ALLOC;
2328 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
2329 info->index.str) | INIT_OFFSET_MASK;
2330 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2332 /* We'll tack temporary mod_kallsyms on the end. */
2333 mod->init_size = ALIGN(mod->init_size,
2334 __alignof__(struct mod_kallsyms));
2335 info->mod_kallsyms_init_off = mod->init_size;
2336 mod->init_size += sizeof(struct mod_kallsyms);
2337 mod->init_size = debug_align(mod->init_size);
2341 * We use the full symtab and strtab which layout_symtab arranged to
2342 * be appended to the init section. Later we switch to the cut-down
2343 * core-only ones.
2345 static void add_kallsyms(struct module *mod, const struct load_info *info)
2347 unsigned int i, ndst;
2348 const Elf_Sym *src;
2349 Elf_Sym *dst;
2350 char *s;
2351 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2353 /* Set up to point into init section. */
2354 mod->kallsyms = mod->module_init + info->mod_kallsyms_init_off;
2356 mod->kallsyms->symtab = (void *)symsec->sh_addr;
2357 mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2358 /* Make sure we get permanent strtab: don't use info->strtab. */
2359 mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2361 /* Set types up while we still have access to sections. */
2362 for (i = 0; i < mod->kallsyms->num_symtab; i++)
2363 mod->kallsyms->symtab[i].st_info
2364 = elf_type(&mod->kallsyms->symtab[i], info);
2366 /* Now populate the cut down core kallsyms for after init. */
2367 mod->core_kallsyms.symtab = dst = mod->module_core + info->symoffs;
2368 mod->core_kallsyms.strtab = s = mod->module_core + info->stroffs;
2369 src = mod->kallsyms->symtab;
2370 for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
2371 if (i == 0 ||
2372 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2373 dst[ndst] = src[i];
2374 dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
2375 s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
2376 KSYM_NAME_LEN) + 1;
2379 mod->core_kallsyms.num_symtab = ndst;
2381 #else
2382 static inline void layout_symtab(struct module *mod, struct load_info *info)
2386 static void add_kallsyms(struct module *mod, const struct load_info *info)
2389 #endif /* CONFIG_KALLSYMS */
2391 static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2393 if (!debug)
2394 return;
2395 #ifdef CONFIG_DYNAMIC_DEBUG
2396 if (ddebug_add_module(debug, num, debug->modname))
2397 pr_err("dynamic debug error adding module: %s\n",
2398 debug->modname);
2399 #endif
2402 static void dynamic_debug_remove(struct _ddebug *debug)
2404 if (debug)
2405 ddebug_remove_module(debug->modname);
2408 void * __weak module_alloc(unsigned long size)
2410 return vmalloc_exec(size);
2413 static void *module_alloc_update_bounds(unsigned long size)
2415 void *ret = module_alloc(size);
2417 if (ret) {
2418 mutex_lock(&module_mutex);
2419 /* Update module bounds. */
2420 if ((unsigned long)ret < module_addr_min)
2421 module_addr_min = (unsigned long)ret;
2422 if ((unsigned long)ret + size > module_addr_max)
2423 module_addr_max = (unsigned long)ret + size;
2424 mutex_unlock(&module_mutex);
2426 return ret;
2429 #ifdef CONFIG_DEBUG_KMEMLEAK
2430 static void kmemleak_load_module(const struct module *mod,
2431 const struct load_info *info)
2433 unsigned int i;
2435 /* only scan the sections containing data */
2436 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2438 for (i = 1; i < info->hdr->e_shnum; i++) {
2439 /* Scan all writable sections that's not executable */
2440 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2441 !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2442 (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2443 continue;
2445 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2446 info->sechdrs[i].sh_size, GFP_KERNEL);
2449 #else
2450 static inline void kmemleak_load_module(const struct module *mod,
2451 const struct load_info *info)
2454 #endif
2456 #ifdef CONFIG_MODULE_SIG
2457 static int module_sig_check(struct load_info *info)
2459 int err = -ENOKEY;
2460 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2461 const void *mod = info->hdr;
2463 if (info->len > markerlen &&
2464 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2465 /* We truncate the module to discard the signature */
2466 info->len -= markerlen;
2467 err = mod_verify_sig(mod, &info->len);
2470 if (!err) {
2471 info->sig_ok = true;
2472 return 0;
2475 /* Not having a signature is only an error if we're strict. */
2476 if (err == -ENOKEY && !sig_enforce)
2477 err = 0;
2479 return err;
2481 #else /* !CONFIG_MODULE_SIG */
2482 static int module_sig_check(struct load_info *info)
2484 return 0;
2486 #endif /* !CONFIG_MODULE_SIG */
2488 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2489 static int elf_header_check(struct load_info *info)
2491 if (info->len < sizeof(*(info->hdr)))
2492 return -ENOEXEC;
2494 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2495 || info->hdr->e_type != ET_REL
2496 || !elf_check_arch(info->hdr)
2497 || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2498 return -ENOEXEC;
2500 if (info->hdr->e_shoff >= info->len
2501 || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2502 info->len - info->hdr->e_shoff))
2503 return -ENOEXEC;
2505 return 0;
2508 /* Sets info->hdr and info->len. */
2509 static int copy_module_from_user(const void __user *umod, unsigned long len,
2510 struct load_info *info)
2512 int err;
2514 info->len = len;
2515 if (info->len < sizeof(*(info->hdr)))
2516 return -ENOEXEC;
2518 err = security_kernel_module_from_file(NULL);
2519 if (err)
2520 return err;
2522 /* Suck in entire file: we'll want most of it. */
2523 info->hdr = vmalloc(info->len);
2524 if (!info->hdr)
2525 return -ENOMEM;
2527 if (copy_from_user(info->hdr, umod, info->len) != 0) {
2528 vfree(info->hdr);
2529 return -EFAULT;
2532 return 0;
2535 /* Sets info->hdr and info->len. */
2536 static int copy_module_from_fd(int fd, struct load_info *info)
2538 struct fd f = fdget(fd);
2539 int err;
2540 struct kstat stat;
2541 loff_t pos;
2542 ssize_t bytes = 0;
2544 if (!f.file)
2545 return -ENOEXEC;
2547 err = security_kernel_module_from_file(f.file);
2548 if (err)
2549 goto out;
2551 err = vfs_getattr(&f.file->f_path, &stat);
2552 if (err)
2553 goto out;
2555 if (stat.size > INT_MAX) {
2556 err = -EFBIG;
2557 goto out;
2560 /* Don't hand 0 to vmalloc, it whines. */
2561 if (stat.size == 0) {
2562 err = -EINVAL;
2563 goto out;
2566 info->hdr = vmalloc(stat.size);
2567 if (!info->hdr) {
2568 err = -ENOMEM;
2569 goto out;
2572 pos = 0;
2573 while (pos < stat.size) {
2574 bytes = kernel_read(f.file, pos, (char *)(info->hdr) + pos,
2575 stat.size - pos);
2576 if (bytes < 0) {
2577 vfree(info->hdr);
2578 err = bytes;
2579 goto out;
2581 if (bytes == 0)
2582 break;
2583 pos += bytes;
2585 info->len = pos;
2587 out:
2588 fdput(f);
2589 return err;
2592 static void free_copy(struct load_info *info)
2594 vfree(info->hdr);
2597 static int rewrite_section_headers(struct load_info *info, int flags)
2599 unsigned int i;
2601 /* This should always be true, but let's be sure. */
2602 info->sechdrs[0].sh_addr = 0;
2604 for (i = 1; i < info->hdr->e_shnum; i++) {
2605 Elf_Shdr *shdr = &info->sechdrs[i];
2606 if (shdr->sh_type != SHT_NOBITS
2607 && info->len < shdr->sh_offset + shdr->sh_size) {
2608 pr_err("Module len %lu truncated\n", info->len);
2609 return -ENOEXEC;
2612 /* Mark all sections sh_addr with their address in the
2613 temporary image. */
2614 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2616 #ifndef CONFIG_MODULE_UNLOAD
2617 /* Don't load .exit sections */
2618 if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2619 shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2620 #endif
2623 /* Track but don't keep modinfo and version sections. */
2624 if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
2625 info->index.vers = 0; /* Pretend no __versions section! */
2626 else
2627 info->index.vers = find_sec(info, "__versions");
2628 info->index.info = find_sec(info, ".modinfo");
2629 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2630 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2631 return 0;
2635 * Set up our basic convenience variables (pointers to section headers,
2636 * search for module section index etc), and do some basic section
2637 * verification.
2639 * Return the temporary module pointer (we'll replace it with the final
2640 * one when we move the module sections around).
2642 static struct module *setup_load_info(struct load_info *info, int flags)
2644 unsigned int i;
2645 int err;
2646 struct module *mod;
2648 /* Set up the convenience variables */
2649 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2650 info->secstrings = (void *)info->hdr
2651 + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2653 err = rewrite_section_headers(info, flags);
2654 if (err)
2655 return ERR_PTR(err);
2657 /* Find internal symbols and strings. */
2658 for (i = 1; i < info->hdr->e_shnum; i++) {
2659 if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2660 info->index.sym = i;
2661 info->index.str = info->sechdrs[i].sh_link;
2662 info->strtab = (char *)info->hdr
2663 + info->sechdrs[info->index.str].sh_offset;
2664 break;
2668 info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2669 if (!info->index.mod) {
2670 pr_warn("No module found in object\n");
2671 return ERR_PTR(-ENOEXEC);
2673 /* This is temporary: point mod into copy of data. */
2674 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2676 if (info->index.sym == 0) {
2677 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
2678 return ERR_PTR(-ENOEXEC);
2681 info->index.pcpu = find_pcpusec(info);
2683 /* Check module struct version now, before we try to use module. */
2684 if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
2685 return ERR_PTR(-ENOEXEC);
2687 return mod;
2690 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
2692 const char *modmagic = get_modinfo(info, "vermagic");
2693 int err;
2695 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
2696 modmagic = NULL;
2698 /* This is allowed: modprobe --force will invalidate it. */
2699 if (!modmagic) {
2700 err = try_to_force_load(mod, "bad vermagic");
2701 if (err)
2702 return err;
2703 } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2704 pr_err("%s: version magic '%s' should be '%s'\n",
2705 mod->name, modmagic, vermagic);
2706 return -ENOEXEC;
2709 if (!get_modinfo(info, "intree"))
2710 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
2712 if (get_modinfo(info, "staging")) {
2713 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
2714 pr_warn("%s: module is from the staging directory, the quality "
2715 "is unknown, you have been warned.\n", mod->name);
2718 /* Set up license info based on the info section */
2719 set_license(mod, get_modinfo(info, "license"));
2721 return 0;
2724 static int find_module_sections(struct module *mod, struct load_info *info)
2726 mod->kp = section_objs(info, "__param",
2727 sizeof(*mod->kp), &mod->num_kp);
2728 mod->syms = section_objs(info, "__ksymtab",
2729 sizeof(*mod->syms), &mod->num_syms);
2730 mod->crcs = section_addr(info, "__kcrctab");
2731 mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
2732 sizeof(*mod->gpl_syms),
2733 &mod->num_gpl_syms);
2734 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
2735 mod->gpl_future_syms = section_objs(info,
2736 "__ksymtab_gpl_future",
2737 sizeof(*mod->gpl_future_syms),
2738 &mod->num_gpl_future_syms);
2739 mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
2741 #ifdef CONFIG_UNUSED_SYMBOLS
2742 mod->unused_syms = section_objs(info, "__ksymtab_unused",
2743 sizeof(*mod->unused_syms),
2744 &mod->num_unused_syms);
2745 mod->unused_crcs = section_addr(info, "__kcrctab_unused");
2746 mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
2747 sizeof(*mod->unused_gpl_syms),
2748 &mod->num_unused_gpl_syms);
2749 mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
2750 #endif
2751 #ifdef CONFIG_CONSTRUCTORS
2752 mod->ctors = section_objs(info, ".ctors",
2753 sizeof(*mod->ctors), &mod->num_ctors);
2754 if (!mod->ctors)
2755 mod->ctors = section_objs(info, ".init_array",
2756 sizeof(*mod->ctors), &mod->num_ctors);
2757 else if (find_sec(info, ".init_array")) {
2759 * This shouldn't happen with same compiler and binutils
2760 * building all parts of the module.
2762 printk(KERN_WARNING "%s: has both .ctors and .init_array.\n",
2763 mod->name);
2764 return -EINVAL;
2766 #endif
2768 #ifdef CONFIG_TRACEPOINTS
2769 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
2770 sizeof(*mod->tracepoints_ptrs),
2771 &mod->num_tracepoints);
2772 #endif
2773 #ifdef HAVE_JUMP_LABEL
2774 mod->jump_entries = section_objs(info, "__jump_table",
2775 sizeof(*mod->jump_entries),
2776 &mod->num_jump_entries);
2777 #endif
2778 #ifdef CONFIG_EVENT_TRACING
2779 mod->trace_events = section_objs(info, "_ftrace_events",
2780 sizeof(*mod->trace_events),
2781 &mod->num_trace_events);
2782 #endif
2783 #ifdef CONFIG_TRACING
2784 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
2785 sizeof(*mod->trace_bprintk_fmt_start),
2786 &mod->num_trace_bprintk_fmt);
2787 #endif
2788 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
2789 /* sechdrs[0].sh_size is always zero */
2790 mod->ftrace_callsites = section_objs(info, "__mcount_loc",
2791 sizeof(*mod->ftrace_callsites),
2792 &mod->num_ftrace_callsites);
2793 #endif
2795 mod->extable = section_objs(info, "__ex_table",
2796 sizeof(*mod->extable), &mod->num_exentries);
2798 if (section_addr(info, "__obsparm"))
2799 pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
2801 info->debug = section_objs(info, "__verbose",
2802 sizeof(*info->debug), &info->num_debug);
2804 return 0;
2807 static int move_module(struct module *mod, struct load_info *info)
2809 int i;
2810 void *ptr;
2812 /* Do the allocs. */
2813 ptr = module_alloc_update_bounds(mod->core_size);
2815 * The pointer to this block is stored in the module structure
2816 * which is inside the block. Just mark it as not being a
2817 * leak.
2819 kmemleak_not_leak(ptr);
2820 if (!ptr)
2821 return -ENOMEM;
2823 memset(ptr, 0, mod->core_size);
2824 mod->module_core = ptr;
2826 if (mod->init_size) {
2827 ptr = module_alloc_update_bounds(mod->init_size);
2829 * The pointer to this block is stored in the module structure
2830 * which is inside the block. This block doesn't need to be
2831 * scanned as it contains data and code that will be freed
2832 * after the module is initialized.
2834 kmemleak_ignore(ptr);
2835 if (!ptr) {
2836 module_free(mod, mod->module_core);
2837 return -ENOMEM;
2839 memset(ptr, 0, mod->init_size);
2840 mod->module_init = ptr;
2841 } else
2842 mod->module_init = NULL;
2844 /* Transfer each section which specifies SHF_ALLOC */
2845 pr_debug("final section addresses:\n");
2846 for (i = 0; i < info->hdr->e_shnum; i++) {
2847 void *dest;
2848 Elf_Shdr *shdr = &info->sechdrs[i];
2850 if (!(shdr->sh_flags & SHF_ALLOC))
2851 continue;
2853 if (shdr->sh_entsize & INIT_OFFSET_MASK)
2854 dest = mod->module_init
2855 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
2856 else
2857 dest = mod->module_core + shdr->sh_entsize;
2859 if (shdr->sh_type != SHT_NOBITS)
2860 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
2861 /* Update sh_addr to point to copy in image. */
2862 shdr->sh_addr = (unsigned long)dest;
2863 pr_debug("\t0x%lx %s\n",
2864 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
2867 return 0;
2870 static int check_module_license_and_versions(struct module *mod)
2873 * ndiswrapper is under GPL by itself, but loads proprietary modules.
2874 * Don't use add_taint_module(), as it would prevent ndiswrapper from
2875 * using GPL-only symbols it needs.
2877 if (strcmp(mod->name, "ndiswrapper") == 0)
2878 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
2880 /* driverloader was caught wrongly pretending to be under GPL */
2881 if (strcmp(mod->name, "driverloader") == 0)
2882 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2883 LOCKDEP_NOW_UNRELIABLE);
2885 /* lve claims to be GPL but upstream won't provide source */
2886 if (strcmp(mod->name, "lve") == 0)
2887 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2888 LOCKDEP_NOW_UNRELIABLE);
2890 #ifdef CONFIG_MODVERSIONS
2891 if ((mod->num_syms && !mod->crcs)
2892 || (mod->num_gpl_syms && !mod->gpl_crcs)
2893 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
2894 #ifdef CONFIG_UNUSED_SYMBOLS
2895 || (mod->num_unused_syms && !mod->unused_crcs)
2896 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
2897 #endif
2899 return try_to_force_load(mod,
2900 "no versions for exported symbols");
2902 #endif
2903 return 0;
2906 static void flush_module_icache(const struct module *mod)
2908 mm_segment_t old_fs;
2910 /* flush the icache in correct context */
2911 old_fs = get_fs();
2912 set_fs(KERNEL_DS);
2915 * Flush the instruction cache, since we've played with text.
2916 * Do it before processing of module parameters, so the module
2917 * can provide parameter accessor functions of its own.
2919 if (mod->module_init)
2920 flush_icache_range((unsigned long)mod->module_init,
2921 (unsigned long)mod->module_init
2922 + mod->init_size);
2923 flush_icache_range((unsigned long)mod->module_core,
2924 (unsigned long)mod->module_core + mod->core_size);
2926 set_fs(old_fs);
2929 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
2930 Elf_Shdr *sechdrs,
2931 char *secstrings,
2932 struct module *mod)
2934 return 0;
2937 static struct module *layout_and_allocate(struct load_info *info, int flags)
2939 /* Module within temporary copy. */
2940 struct module *mod;
2941 int err;
2943 mod = setup_load_info(info, flags);
2944 if (IS_ERR(mod))
2945 return mod;
2947 err = check_modinfo(mod, info, flags);
2948 if (err)
2949 return ERR_PTR(err);
2951 /* Allow arches to frob section contents and sizes. */
2952 err = module_frob_arch_sections(info->hdr, info->sechdrs,
2953 info->secstrings, mod);
2954 if (err < 0)
2955 return ERR_PTR(err);
2957 /* We will do a special allocation for per-cpu sections later. */
2958 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
2960 /* Determine total sizes, and put offsets in sh_entsize. For now
2961 this is done generically; there doesn't appear to be any
2962 special cases for the architectures. */
2963 layout_sections(mod, info);
2964 layout_symtab(mod, info);
2966 /* Allocate and move to the final place */
2967 err = move_module(mod, info);
2968 if (err)
2969 return ERR_PTR(err);
2971 /* Module has been copied to its final place now: return it. */
2972 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2973 kmemleak_load_module(mod, info);
2974 return mod;
2977 /* mod is no longer valid after this! */
2978 static void module_deallocate(struct module *mod, struct load_info *info)
2980 percpu_modfree(mod);
2981 module_free(mod, mod->module_init);
2982 module_free(mod, mod->module_core);
2985 int __weak module_finalize(const Elf_Ehdr *hdr,
2986 const Elf_Shdr *sechdrs,
2987 struct module *me)
2989 return 0;
2992 static int post_relocation(struct module *mod, const struct load_info *info)
2994 /* Sort exception table now relocations are done. */
2995 sort_extable(mod->extable, mod->extable + mod->num_exentries);
2997 /* Copy relocated percpu area over. */
2998 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
2999 info->sechdrs[info->index.pcpu].sh_size);
3001 /* Setup kallsyms-specific fields. */
3002 add_kallsyms(mod, info);
3004 /* Arch-specific module finalizing. */
3005 return module_finalize(info->hdr, info->sechdrs, mod);
3008 /* Is this module of this name done loading? No locks held. */
3009 static bool finished_loading(const char *name)
3011 struct module *mod;
3012 bool ret;
3014 mutex_lock(&module_mutex);
3015 mod = find_module_all(name, strlen(name), true);
3016 ret = !mod || mod->state == MODULE_STATE_LIVE
3017 || mod->state == MODULE_STATE_GOING;
3018 mutex_unlock(&module_mutex);
3020 return ret;
3023 /* Call module constructors. */
3024 static void do_mod_ctors(struct module *mod)
3026 #ifdef CONFIG_CONSTRUCTORS
3027 unsigned long i;
3029 for (i = 0; i < mod->num_ctors; i++)
3030 mod->ctors[i]();
3031 #endif
3034 /* This is where the real work happens */
3035 static int do_init_module(struct module *mod)
3037 int ret = 0;
3040 * We want to find out whether @mod uses async during init. Clear
3041 * PF_USED_ASYNC. async_schedule*() will set it.
3043 current->flags &= ~PF_USED_ASYNC;
3045 do_mod_ctors(mod);
3046 /* Start the module */
3047 if (mod->init != NULL)
3048 ret = do_one_initcall(mod->init);
3049 if (ret < 0) {
3050 /* Init routine failed: abort. Try to protect us from
3051 buggy refcounters. */
3052 mod->state = MODULE_STATE_GOING;
3053 synchronize_sched();
3054 module_put(mod);
3055 blocking_notifier_call_chain(&module_notify_list,
3056 MODULE_STATE_GOING, mod);
3057 free_module(mod);
3058 wake_up_all(&module_wq);
3059 return ret;
3061 if (ret > 0) {
3062 pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3063 "follow 0/-E convention\n"
3064 "%s: loading module anyway...\n",
3065 __func__, mod->name, ret, __func__);
3066 dump_stack();
3069 /* Now it's a first class citizen! */
3070 mod->state = MODULE_STATE_LIVE;
3071 blocking_notifier_call_chain(&module_notify_list,
3072 MODULE_STATE_LIVE, mod);
3075 * We need to finish all async code before the module init sequence
3076 * is done. This has potential to deadlock. For example, a newly
3077 * detected block device can trigger request_module() of the
3078 * default iosched from async probing task. Once userland helper
3079 * reaches here, async_synchronize_full() will wait on the async
3080 * task waiting on request_module() and deadlock.
3082 * This deadlock is avoided by perfomring async_synchronize_full()
3083 * iff module init queued any async jobs. This isn't a full
3084 * solution as it will deadlock the same if module loading from
3085 * async jobs nests more than once; however, due to the various
3086 * constraints, this hack seems to be the best option for now.
3087 * Please refer to the following thread for details.
3089 * http://thread.gmane.org/gmane.linux.kernel/1420814
3091 if (current->flags & PF_USED_ASYNC)
3092 async_synchronize_full();
3094 mutex_lock(&module_mutex);
3095 /* Drop initial reference. */
3096 module_put(mod);
3097 trim_init_extable(mod);
3098 #ifdef CONFIG_KALLSYMS
3099 /* Switch to core kallsyms now init is done: kallsyms may be walking! */
3100 rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
3101 #endif
3102 unset_module_init_ro_nx(mod);
3103 module_free(mod, mod->module_init);
3104 mod->module_init = NULL;
3105 mod->init_size = 0;
3106 mod->init_ro_size = 0;
3107 mod->init_text_size = 0;
3108 mutex_unlock(&module_mutex);
3109 wake_up_all(&module_wq);
3111 return 0;
3114 static int may_init_module(void)
3116 if (!capable(CAP_SYS_MODULE) || modules_disabled)
3117 return -EPERM;
3119 return 0;
3123 * We try to place it in the list now to make sure it's unique before
3124 * we dedicate too many resources. In particular, temporary percpu
3125 * memory exhaustion.
3127 static int add_unformed_module(struct module *mod)
3129 int err;
3130 struct module *old;
3132 mod->state = MODULE_STATE_UNFORMED;
3134 again:
3135 mutex_lock(&module_mutex);
3136 old = find_module_all(mod->name, strlen(mod->name), true);
3137 if (old != NULL) {
3138 if (old->state == MODULE_STATE_COMING
3139 || old->state == MODULE_STATE_UNFORMED) {
3140 /* Wait in case it fails to load. */
3141 mutex_unlock(&module_mutex);
3142 err = wait_event_interruptible(module_wq,
3143 finished_loading(mod->name));
3144 if (err)
3145 goto out_unlocked;
3146 goto again;
3148 err = -EEXIST;
3149 goto out;
3151 list_add_rcu(&mod->list, &modules);
3152 err = 0;
3154 out:
3155 mutex_unlock(&module_mutex);
3156 out_unlocked:
3157 return err;
3160 static int complete_formation(struct module *mod, struct load_info *info)
3162 int err;
3164 mutex_lock(&module_mutex);
3166 /* Find duplicate symbols (must be called under lock). */
3167 err = verify_export_symbols(mod);
3168 if (err < 0)
3169 goto out;
3171 /* This relies on module_mutex for list integrity. */
3172 module_bug_finalize(info->hdr, info->sechdrs, mod);
3174 /* Set RO and NX regions for core */
3175 set_section_ro_nx(mod->module_core,
3176 mod->core_text_size,
3177 mod->core_ro_size,
3178 mod->core_size);
3180 /* Set RO and NX regions for init */
3181 set_section_ro_nx(mod->module_init,
3182 mod->init_text_size,
3183 mod->init_ro_size,
3184 mod->init_size);
3186 /* Mark state as coming so strong_try_module_get() ignores us,
3187 * but kallsyms etc. can see us. */
3188 mod->state = MODULE_STATE_COMING;
3189 mutex_unlock(&module_mutex);
3191 blocking_notifier_call_chain(&module_notify_list,
3192 MODULE_STATE_COMING, mod);
3193 return 0;
3195 out:
3196 mutex_unlock(&module_mutex);
3197 return err;
3200 static int unknown_module_param_cb(char *param, char *val, const char *modname)
3202 /* Check for magic 'dyndbg' arg */
3203 int ret = ddebug_dyndbg_module_param_cb(param, val, modname);
3204 if (ret != 0)
3205 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
3206 return 0;
3209 /* Allocate and load the module: note that size of section 0 is always
3210 zero, and we rely on this for optional sections. */
3211 static int load_module(struct load_info *info, const char __user *uargs,
3212 int flags)
3214 struct module *mod;
3215 long err;
3216 char *after_dashes;
3218 err = module_sig_check(info);
3219 if (err)
3220 goto free_copy;
3222 err = elf_header_check(info);
3223 if (err)
3224 goto free_copy;
3226 /* Figure out module layout, and allocate all the memory. */
3227 mod = layout_and_allocate(info, flags);
3228 if (IS_ERR(mod)) {
3229 err = PTR_ERR(mod);
3230 goto free_copy;
3233 /* Reserve our place in the list. */
3234 err = add_unformed_module(mod);
3235 if (err)
3236 goto free_module;
3238 #ifdef CONFIG_MODULE_SIG
3239 mod->sig_ok = info->sig_ok;
3240 if (!mod->sig_ok) {
3241 pr_notice_once("%s: module verification failed: signature "
3242 "and/or required key missing - tainting "
3243 "kernel\n", mod->name);
3244 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
3246 #endif
3248 /* To avoid stressing percpu allocator, do this once we're unique. */
3249 err = percpu_modalloc(mod, info);
3250 if (err)
3251 goto unlink_mod;
3253 /* Now module is in final location, initialize linked lists, etc. */
3254 err = module_unload_init(mod);
3255 if (err)
3256 goto unlink_mod;
3258 /* Now we've got everything in the final locations, we can
3259 * find optional sections. */
3260 err = find_module_sections(mod, info);
3261 if (err)
3262 goto free_unload;
3264 err = check_module_license_and_versions(mod);
3265 if (err)
3266 goto free_unload;
3268 /* Set up MODINFO_ATTR fields */
3269 setup_modinfo(mod, info);
3271 /* Fix up syms, so that st_value is a pointer to location. */
3272 err = simplify_symbols(mod, info);
3273 if (err < 0)
3274 goto free_modinfo;
3276 err = apply_relocations(mod, info);
3277 if (err < 0)
3278 goto free_modinfo;
3280 err = post_relocation(mod, info);
3281 if (err < 0)
3282 goto free_modinfo;
3284 flush_module_icache(mod);
3286 /* Now copy in args */
3287 mod->args = strndup_user(uargs, ~0UL >> 1);
3288 if (IS_ERR(mod->args)) {
3289 err = PTR_ERR(mod->args);
3290 goto free_arch_cleanup;
3293 dynamic_debug_setup(info->debug, info->num_debug);
3295 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3296 ftrace_module_init(mod);
3298 /* Finally it's fully formed, ready to start executing. */
3299 err = complete_formation(mod, info);
3300 if (err)
3301 goto ddebug_cleanup;
3303 /* Module is ready to execute: parsing args may do that. */
3304 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3305 -32768, 32767, unknown_module_param_cb);
3306 if (IS_ERR(after_dashes)) {
3307 err = PTR_ERR(after_dashes);
3308 goto bug_cleanup;
3309 } else if (after_dashes) {
3310 pr_warn("%s: parameters '%s' after `--' ignored\n",
3311 mod->name, after_dashes);
3314 /* Link in to syfs. */
3315 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3316 if (err < 0)
3317 goto bug_cleanup;
3319 /* Get rid of temporary copy. */
3320 free_copy(info);
3322 /* Done! */
3323 trace_module_load(mod);
3325 return do_init_module(mod);
3327 bug_cleanup:
3328 /* module_bug_cleanup needs module_mutex protection */
3329 mutex_lock(&module_mutex);
3330 module_bug_cleanup(mod);
3331 mutex_unlock(&module_mutex);
3333 blocking_notifier_call_chain(&module_notify_list,
3334 MODULE_STATE_GOING, mod);
3336 /* we can't deallocate the module until we clear memory protection */
3337 unset_module_init_ro_nx(mod);
3338 unset_module_core_ro_nx(mod);
3340 ddebug_cleanup:
3341 dynamic_debug_remove(info->debug);
3342 synchronize_sched();
3343 kfree(mod->args);
3344 free_arch_cleanup:
3345 module_arch_cleanup(mod);
3346 free_modinfo:
3347 free_modinfo(mod);
3348 free_unload:
3349 module_unload_free(mod);
3350 unlink_mod:
3351 mutex_lock(&module_mutex);
3352 /* Unlink carefully: kallsyms could be walking list. */
3353 list_del_rcu(&mod->list);
3354 wake_up_all(&module_wq);
3355 mutex_unlock(&module_mutex);
3356 free_module:
3357 module_deallocate(mod, info);
3358 free_copy:
3359 free_copy(info);
3360 return err;
3363 SYSCALL_DEFINE3(init_module, void __user *, umod,
3364 unsigned long, len, const char __user *, uargs)
3366 int err;
3367 struct load_info info = { };
3369 err = may_init_module();
3370 if (err)
3371 return err;
3373 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3374 umod, len, uargs);
3376 err = copy_module_from_user(umod, len, &info);
3377 if (err)
3378 return err;
3380 return load_module(&info, uargs, 0);
3383 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
3385 int err;
3386 struct load_info info = { };
3388 err = may_init_module();
3389 if (err)
3390 return err;
3392 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
3394 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
3395 |MODULE_INIT_IGNORE_VERMAGIC))
3396 return -EINVAL;
3398 err = copy_module_from_fd(fd, &info);
3399 if (err)
3400 return err;
3402 return load_module(&info, uargs, flags);
3405 static inline int within(unsigned long addr, void *start, unsigned long size)
3407 return ((void *)addr >= start && (void *)addr < start + size);
3410 #ifdef CONFIG_KALLSYMS
3412 * This ignores the intensely annoying "mapping symbols" found
3413 * in ARM ELF files: $a, $t and $d.
3415 static inline int is_arm_mapping_symbol(const char *str)
3417 if (str[0] == '.' && str[1] == 'L')
3418 return true;
3419 return str[0] == '$' && strchr("axtd", str[1])
3420 && (str[2] == '\0' || str[2] == '.');
3423 static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
3425 return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
3428 static const char *get_ksymbol(struct module *mod,
3429 unsigned long addr,
3430 unsigned long *size,
3431 unsigned long *offset)
3433 unsigned int i, best = 0;
3434 unsigned long nextval;
3435 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
3437 /* At worse, next value is at end of module */
3438 if (within_module_init(addr, mod))
3439 nextval = (unsigned long)mod->module_init+mod->init_text_size;
3440 else
3441 nextval = (unsigned long)mod->module_core+mod->core_text_size;
3443 /* Scan for closest preceding symbol, and next symbol. (ELF
3444 starts real symbols at 1). */
3445 for (i = 1; i < kallsyms->num_symtab; i++) {
3446 if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
3447 continue;
3449 /* We ignore unnamed symbols: they're uninformative
3450 * and inserted at a whim. */
3451 if (*symname(kallsyms, i) == '\0'
3452 || is_arm_mapping_symbol(symname(kallsyms, i)))
3453 continue;
3455 if (kallsyms->symtab[i].st_value <= addr
3456 && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
3457 best = i;
3458 if (kallsyms->symtab[i].st_value > addr
3459 && kallsyms->symtab[i].st_value < nextval)
3460 nextval = kallsyms->symtab[i].st_value;
3463 if (!best)
3464 return NULL;
3466 if (size)
3467 *size = nextval - kallsyms->symtab[best].st_value;
3468 if (offset)
3469 *offset = addr - kallsyms->symtab[best].st_value;
3470 return symname(kallsyms, best);
3473 /* For kallsyms to ask for address resolution. NULL means not found. Careful
3474 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3475 const char *module_address_lookup(unsigned long addr,
3476 unsigned long *size,
3477 unsigned long *offset,
3478 char **modname,
3479 char *namebuf)
3481 struct module *mod;
3482 const char *ret = NULL;
3484 preempt_disable();
3485 list_for_each_entry_rcu(mod, &modules, list) {
3486 if (mod->state == MODULE_STATE_UNFORMED)
3487 continue;
3488 if (within_module(addr, mod)) {
3489 if (modname)
3490 *modname = mod->name;
3491 ret = get_ksymbol(mod, addr, size, offset);
3492 break;
3495 /* Make a copy in here where it's safe */
3496 if (ret) {
3497 strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
3498 ret = namebuf;
3500 preempt_enable();
3501 return ret;
3504 int lookup_module_symbol_name(unsigned long addr, char *symname)
3506 struct module *mod;
3508 preempt_disable();
3509 list_for_each_entry_rcu(mod, &modules, list) {
3510 if (mod->state == MODULE_STATE_UNFORMED)
3511 continue;
3512 if (within_module(addr, mod)) {
3513 const char *sym;
3515 sym = get_ksymbol(mod, addr, NULL, NULL);
3516 if (!sym)
3517 goto out;
3518 strlcpy(symname, sym, KSYM_NAME_LEN);
3519 preempt_enable();
3520 return 0;
3523 out:
3524 preempt_enable();
3525 return -ERANGE;
3528 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3529 unsigned long *offset, char *modname, char *name)
3531 struct module *mod;
3533 preempt_disable();
3534 list_for_each_entry_rcu(mod, &modules, list) {
3535 if (mod->state == MODULE_STATE_UNFORMED)
3536 continue;
3537 if (within_module(addr, mod)) {
3538 const char *sym;
3540 sym = get_ksymbol(mod, addr, size, offset);
3541 if (!sym)
3542 goto out;
3543 if (modname)
3544 strlcpy(modname, mod->name, MODULE_NAME_LEN);
3545 if (name)
3546 strlcpy(name, sym, KSYM_NAME_LEN);
3547 preempt_enable();
3548 return 0;
3551 out:
3552 preempt_enable();
3553 return -ERANGE;
3556 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3557 char *name, char *module_name, int *exported)
3559 struct module *mod;
3561 preempt_disable();
3562 list_for_each_entry_rcu(mod, &modules, list) {
3563 struct mod_kallsyms *kallsyms;
3565 if (mod->state == MODULE_STATE_UNFORMED)
3566 continue;
3567 kallsyms = rcu_dereference_sched(mod->kallsyms);
3568 if (symnum < kallsyms->num_symtab) {
3569 *value = kallsyms->symtab[symnum].st_value;
3570 *type = kallsyms->symtab[symnum].st_info;
3571 strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
3572 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3573 *exported = is_exported(name, *value, mod);
3574 preempt_enable();
3575 return 0;
3577 symnum -= kallsyms->num_symtab;
3579 preempt_enable();
3580 return -ERANGE;
3583 static unsigned long mod_find_symname(struct module *mod, const char *name)
3585 unsigned int i;
3586 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
3588 for (i = 0; i < kallsyms->num_symtab; i++)
3589 if (strcmp(name, symname(kallsyms, i)) == 0 &&
3590 kallsyms->symtab[i].st_info != 'U')
3591 return kallsyms->symtab[i].st_value;
3592 return 0;
3595 /* Look for this name: can be of form module:name. */
3596 unsigned long module_kallsyms_lookup_name(const char *name)
3598 struct module *mod;
3599 char *colon;
3600 unsigned long ret = 0;
3602 /* Don't lock: we're in enough trouble already. */
3603 preempt_disable();
3604 if ((colon = strchr(name, ':')) != NULL) {
3605 if ((mod = find_module_all(name, colon - name, false)) != NULL)
3606 ret = mod_find_symname(mod, colon+1);
3607 } else {
3608 list_for_each_entry_rcu(mod, &modules, list) {
3609 if (mod->state == MODULE_STATE_UNFORMED)
3610 continue;
3611 if ((ret = mod_find_symname(mod, name)) != 0)
3612 break;
3615 preempt_enable();
3616 return ret;
3619 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3620 struct module *, unsigned long),
3621 void *data)
3623 struct module *mod;
3624 unsigned int i;
3625 int ret;
3627 list_for_each_entry(mod, &modules, list) {
3628 /* We hold module_mutex: no need for rcu_dereference_sched */
3629 struct mod_kallsyms *kallsyms = mod->kallsyms;
3631 if (mod->state == MODULE_STATE_UNFORMED)
3632 continue;
3633 for (i = 0; i < kallsyms->num_symtab; i++) {
3634 ret = fn(data, symname(kallsyms, i),
3635 mod, kallsyms->symtab[i].st_value);
3636 if (ret != 0)
3637 return ret;
3640 return 0;
3642 #endif /* CONFIG_KALLSYMS */
3644 static char *module_flags(struct module *mod, char *buf)
3646 int bx = 0;
3648 BUG_ON(mod->state == MODULE_STATE_UNFORMED);
3649 if (mod->taints ||
3650 mod->state == MODULE_STATE_GOING ||
3651 mod->state == MODULE_STATE_COMING) {
3652 buf[bx++] = '(';
3653 bx += module_flags_taint(mod, buf + bx);
3654 /* Show a - for module-is-being-unloaded */
3655 if (mod->state == MODULE_STATE_GOING)
3656 buf[bx++] = '-';
3657 /* Show a + for module-is-being-loaded */
3658 if (mod->state == MODULE_STATE_COMING)
3659 buf[bx++] = '+';
3660 buf[bx++] = ')';
3662 buf[bx] = '\0';
3664 return buf;
3667 #ifdef CONFIG_PROC_FS
3668 /* Called by the /proc file system to return a list of modules. */
3669 static void *m_start(struct seq_file *m, loff_t *pos)
3671 mutex_lock(&module_mutex);
3672 return seq_list_start(&modules, *pos);
3675 static void *m_next(struct seq_file *m, void *p, loff_t *pos)
3677 return seq_list_next(p, &modules, pos);
3680 static void m_stop(struct seq_file *m, void *p)
3682 mutex_unlock(&module_mutex);
3685 static int m_show(struct seq_file *m, void *p)
3687 struct module *mod = list_entry(p, struct module, list);
3688 char buf[8];
3690 /* We always ignore unformed modules. */
3691 if (mod->state == MODULE_STATE_UNFORMED)
3692 return 0;
3694 seq_printf(m, "%s %u",
3695 mod->name, mod->init_size + mod->core_size);
3696 print_unload_info(m, mod);
3698 /* Informative for users. */
3699 seq_printf(m, " %s",
3700 mod->state == MODULE_STATE_GOING ? "Unloading":
3701 mod->state == MODULE_STATE_COMING ? "Loading":
3702 "Live");
3703 /* Used by oprofile and other similar tools. */
3704 seq_printf(m, " 0x%pK", mod->module_core);
3706 /* Taints info */
3707 if (mod->taints)
3708 seq_printf(m, " %s", module_flags(mod, buf));
3710 seq_printf(m, "\n");
3711 return 0;
3714 /* Format: modulename size refcount deps address
3716 Where refcount is a number or -, and deps is a comma-separated list
3717 of depends or -.
3719 static const struct seq_operations modules_op = {
3720 .start = m_start,
3721 .next = m_next,
3722 .stop = m_stop,
3723 .show = m_show
3726 static int modules_open(struct inode *inode, struct file *file)
3728 return seq_open(file, &modules_op);
3731 static const struct file_operations proc_modules_operations = {
3732 .open = modules_open,
3733 .read = seq_read,
3734 .llseek = seq_lseek,
3735 .release = seq_release,
3738 static int __init proc_modules_init(void)
3740 proc_create("modules", 0, NULL, &proc_modules_operations);
3741 return 0;
3743 module_init(proc_modules_init);
3744 #endif
3746 /* Given an address, look for it in the module exception tables. */
3747 const struct exception_table_entry *search_module_extables(unsigned long addr)
3749 const struct exception_table_entry *e = NULL;
3750 struct module *mod;
3752 preempt_disable();
3753 list_for_each_entry_rcu(mod, &modules, list) {
3754 if (mod->state == MODULE_STATE_UNFORMED)
3755 continue;
3756 if (mod->num_exentries == 0)
3757 continue;
3759 e = search_extable(mod->extable,
3760 mod->extable + mod->num_exentries - 1,
3761 addr);
3762 if (e)
3763 break;
3765 preempt_enable();
3767 /* Now, if we found one, we are running inside it now, hence
3768 we cannot unload the module, hence no refcnt needed. */
3769 return e;
3773 * is_module_address - is this address inside a module?
3774 * @addr: the address to check.
3776 * See is_module_text_address() if you simply want to see if the address
3777 * is code (not data).
3779 bool is_module_address(unsigned long addr)
3781 bool ret;
3783 preempt_disable();
3784 ret = __module_address(addr) != NULL;
3785 preempt_enable();
3787 return ret;
3791 * __module_address - get the module which contains an address.
3792 * @addr: the address.
3794 * Must be called with preempt disabled or module mutex held so that
3795 * module doesn't get freed during this.
3797 struct module *__module_address(unsigned long addr)
3799 struct module *mod;
3801 if (addr < module_addr_min || addr > module_addr_max)
3802 return NULL;
3804 list_for_each_entry_rcu(mod, &modules, list) {
3805 if (mod->state == MODULE_STATE_UNFORMED)
3806 continue;
3807 if (within_module(addr, mod))
3808 return mod;
3810 return NULL;
3812 EXPORT_SYMBOL_GPL(__module_address);
3815 * is_module_text_address - is this address inside module code?
3816 * @addr: the address to check.
3818 * See is_module_address() if you simply want to see if the address is
3819 * anywhere in a module. See kernel_text_address() for testing if an
3820 * address corresponds to kernel or module code.
3822 bool is_module_text_address(unsigned long addr)
3824 bool ret;
3826 preempt_disable();
3827 ret = __module_text_address(addr) != NULL;
3828 preempt_enable();
3830 return ret;
3834 * __module_text_address - get the module whose code contains an address.
3835 * @addr: the address.
3837 * Must be called with preempt disabled or module mutex held so that
3838 * module doesn't get freed during this.
3840 struct module *__module_text_address(unsigned long addr)
3842 struct module *mod = __module_address(addr);
3843 if (mod) {
3844 /* Make sure it's within the text section. */
3845 if (!within(addr, mod->module_init, mod->init_text_size)
3846 && !within(addr, mod->module_core, mod->core_text_size))
3847 mod = NULL;
3849 return mod;
3851 EXPORT_SYMBOL_GPL(__module_text_address);
3853 /* Don't grab lock, we're oopsing. */
3854 void print_modules(void)
3856 struct module *mod;
3857 char buf[8];
3859 printk(KERN_DEFAULT "Modules linked in:");
3860 /* Most callers should already have preempt disabled, but make sure */
3861 preempt_disable();
3862 list_for_each_entry_rcu(mod, &modules, list) {
3863 if (mod->state == MODULE_STATE_UNFORMED)
3864 continue;
3865 pr_cont(" %s%s", mod->name, module_flags(mod, buf));
3867 preempt_enable();
3868 if (last_unloaded_module[0])
3869 pr_cont(" [last unloaded: %s]", last_unloaded_module);
3870 pr_cont("\n");
3873 #ifdef CONFIG_MODVERSIONS
3874 /* Generate the signature for all relevant module structures here.
3875 * If these change, we don't want to try to parse the module. */
3876 void module_layout(struct module *mod,
3877 struct modversion_info *ver,
3878 struct kernel_param *kp,
3879 struct kernel_symbol *ks,
3880 struct tracepoint * const *tp)
3883 EXPORT_SYMBOL(module_layout);
3884 #endif