perf symbols: Fix debuginfo search for Ubuntu
[linux/fpc-iii.git] / kernel / module.c
blob9cb1437151ae738a0659fd099359e29b5c538086
1 /*
2 Copyright (C) 2002 Richard Henderson
3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/export.h>
20 #include <linux/moduleloader.h>
21 #include <linux/trace_events.h>
22 #include <linux/init.h>
23 #include <linux/kallsyms.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/sysfs.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/elf.h>
31 #include <linux/proc_fs.h>
32 #include <linux/security.h>
33 #include <linux/seq_file.h>
34 #include <linux/syscalls.h>
35 #include <linux/fcntl.h>
36 #include <linux/rcupdate.h>
37 #include <linux/capability.h>
38 #include <linux/cpu.h>
39 #include <linux/moduleparam.h>
40 #include <linux/errno.h>
41 #include <linux/err.h>
42 #include <linux/vermagic.h>
43 #include <linux/notifier.h>
44 #include <linux/sched.h>
45 #include <linux/device.h>
46 #include <linux/string.h>
47 #include <linux/mutex.h>
48 #include <linux/rculist.h>
49 #include <asm/uaccess.h>
50 #include <asm/cacheflush.h>
51 #include <asm/mmu_context.h>
52 #include <linux/license.h>
53 #include <asm/sections.h>
54 #include <linux/tracepoint.h>
55 #include <linux/ftrace.h>
56 #include <linux/livepatch.h>
57 #include <linux/async.h>
58 #include <linux/percpu.h>
59 #include <linux/kmemleak.h>
60 #include <linux/jump_label.h>
61 #include <linux/pfn.h>
62 #include <linux/bsearch.h>
63 #include <linux/dynamic_debug.h>
64 #include <uapi/linux/module.h>
65 #include "module-internal.h"
67 #define CREATE_TRACE_POINTS
68 #include <trace/events/module.h>
70 #ifndef ARCH_SHF_SMALL
71 #define ARCH_SHF_SMALL 0
72 #endif
75 * Modules' sections will be aligned on page boundaries
76 * to ensure complete separation of code and data, but
77 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
79 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
80 # define debug_align(X) ALIGN(X, PAGE_SIZE)
81 #else
82 # define debug_align(X) (X)
83 #endif
85 /* If this is set, the section belongs in the init part of the module */
86 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
89 * Mutex protects:
90 * 1) List of modules (also safely readable with preempt_disable),
91 * 2) module_use links,
92 * 3) module_addr_min/module_addr_max.
93 * (delete and add uses RCU list operations). */
94 DEFINE_MUTEX(module_mutex);
95 EXPORT_SYMBOL_GPL(module_mutex);
96 static LIST_HEAD(modules);
98 #ifdef CONFIG_MODULES_TREE_LOOKUP
101 * Use a latched RB-tree for __module_address(); this allows us to use
102 * RCU-sched lookups of the address from any context.
104 * This is conditional on PERF_EVENTS || TRACING because those can really hit
105 * __module_address() hard by doing a lot of stack unwinding; potentially from
106 * NMI context.
109 static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
111 struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
113 return (unsigned long)layout->base;
116 static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
118 struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
120 return (unsigned long)layout->size;
123 static __always_inline bool
124 mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
126 return __mod_tree_val(a) < __mod_tree_val(b);
129 static __always_inline int
130 mod_tree_comp(void *key, struct latch_tree_node *n)
132 unsigned long val = (unsigned long)key;
133 unsigned long start, end;
135 start = __mod_tree_val(n);
136 if (val < start)
137 return -1;
139 end = start + __mod_tree_size(n);
140 if (val >= end)
141 return 1;
143 return 0;
146 static const struct latch_tree_ops mod_tree_ops = {
147 .less = mod_tree_less,
148 .comp = mod_tree_comp,
151 static struct mod_tree_root {
152 struct latch_tree_root root;
153 unsigned long addr_min;
154 unsigned long addr_max;
155 } mod_tree __cacheline_aligned = {
156 .addr_min = -1UL,
159 #define module_addr_min mod_tree.addr_min
160 #define module_addr_max mod_tree.addr_max
162 static noinline void __mod_tree_insert(struct mod_tree_node *node)
164 latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops);
167 static void __mod_tree_remove(struct mod_tree_node *node)
169 latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops);
173 * These modifications: insert, remove_init and remove; are serialized by the
174 * module_mutex.
176 static void mod_tree_insert(struct module *mod)
178 mod->core_layout.mtn.mod = mod;
179 mod->init_layout.mtn.mod = mod;
181 __mod_tree_insert(&mod->core_layout.mtn);
182 if (mod->init_layout.size)
183 __mod_tree_insert(&mod->init_layout.mtn);
186 static void mod_tree_remove_init(struct module *mod)
188 if (mod->init_layout.size)
189 __mod_tree_remove(&mod->init_layout.mtn);
192 static void mod_tree_remove(struct module *mod)
194 __mod_tree_remove(&mod->core_layout.mtn);
195 mod_tree_remove_init(mod);
198 static struct module *mod_find(unsigned long addr)
200 struct latch_tree_node *ltn;
202 ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops);
203 if (!ltn)
204 return NULL;
206 return container_of(ltn, struct mod_tree_node, node)->mod;
209 #else /* MODULES_TREE_LOOKUP */
211 static unsigned long module_addr_min = -1UL, module_addr_max = 0;
213 static void mod_tree_insert(struct module *mod) { }
214 static void mod_tree_remove_init(struct module *mod) { }
215 static void mod_tree_remove(struct module *mod) { }
217 static struct module *mod_find(unsigned long addr)
219 struct module *mod;
221 list_for_each_entry_rcu(mod, &modules, list) {
222 if (within_module(addr, mod))
223 return mod;
226 return NULL;
229 #endif /* MODULES_TREE_LOOKUP */
232 * Bounds of module text, for speeding up __module_address.
233 * Protected by module_mutex.
235 static void __mod_update_bounds(void *base, unsigned int size)
237 unsigned long min = (unsigned long)base;
238 unsigned long max = min + size;
240 if (min < module_addr_min)
241 module_addr_min = min;
242 if (max > module_addr_max)
243 module_addr_max = max;
246 static void mod_update_bounds(struct module *mod)
248 __mod_update_bounds(mod->core_layout.base, mod->core_layout.size);
249 if (mod->init_layout.size)
250 __mod_update_bounds(mod->init_layout.base, mod->init_layout.size);
253 #ifdef CONFIG_KGDB_KDB
254 struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
255 #endif /* CONFIG_KGDB_KDB */
257 static void module_assert_mutex(void)
259 lockdep_assert_held(&module_mutex);
262 static void module_assert_mutex_or_preempt(void)
264 #ifdef CONFIG_LOCKDEP
265 if (unlikely(!debug_locks))
266 return;
268 WARN_ON_ONCE(!rcu_read_lock_sched_held() &&
269 !lockdep_is_held(&module_mutex));
270 #endif
273 static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
274 #ifndef CONFIG_MODULE_SIG_FORCE
275 module_param(sig_enforce, bool_enable_only, 0644);
276 #endif /* !CONFIG_MODULE_SIG_FORCE */
278 /* Block module loading/unloading? */
279 int modules_disabled = 0;
280 core_param(nomodule, modules_disabled, bint, 0);
282 /* Waiting for a module to finish initializing? */
283 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
285 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
287 int register_module_notifier(struct notifier_block *nb)
289 return blocking_notifier_chain_register(&module_notify_list, nb);
291 EXPORT_SYMBOL(register_module_notifier);
293 int unregister_module_notifier(struct notifier_block *nb)
295 return blocking_notifier_chain_unregister(&module_notify_list, nb);
297 EXPORT_SYMBOL(unregister_module_notifier);
299 struct load_info {
300 Elf_Ehdr *hdr;
301 unsigned long len;
302 Elf_Shdr *sechdrs;
303 char *secstrings, *strtab;
304 unsigned long symoffs, stroffs;
305 struct _ddebug *debug;
306 unsigned int num_debug;
307 bool sig_ok;
308 #ifdef CONFIG_KALLSYMS
309 unsigned long mod_kallsyms_init_off;
310 #endif
311 struct {
312 unsigned int sym, str, mod, vers, info, pcpu;
313 } index;
316 /* We require a truly strong try_module_get(): 0 means failure due to
317 ongoing or failed initialization etc. */
318 static inline int strong_try_module_get(struct module *mod)
320 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
321 if (mod && mod->state == MODULE_STATE_COMING)
322 return -EBUSY;
323 if (try_module_get(mod))
324 return 0;
325 else
326 return -ENOENT;
329 static inline void add_taint_module(struct module *mod, unsigned flag,
330 enum lockdep_ok lockdep_ok)
332 add_taint(flag, lockdep_ok);
333 mod->taints |= (1U << flag);
337 * A thread that wants to hold a reference to a module only while it
338 * is running can call this to safely exit. nfsd and lockd use this.
340 void __noreturn __module_put_and_exit(struct module *mod, long code)
342 module_put(mod);
343 do_exit(code);
345 EXPORT_SYMBOL(__module_put_and_exit);
347 /* Find a module section: 0 means not found. */
348 static unsigned int find_sec(const struct load_info *info, const char *name)
350 unsigned int i;
352 for (i = 1; i < info->hdr->e_shnum; i++) {
353 Elf_Shdr *shdr = &info->sechdrs[i];
354 /* Alloc bit cleared means "ignore it." */
355 if ((shdr->sh_flags & SHF_ALLOC)
356 && strcmp(info->secstrings + shdr->sh_name, name) == 0)
357 return i;
359 return 0;
362 /* Find a module section, or NULL. */
363 static void *section_addr(const struct load_info *info, const char *name)
365 /* Section 0 has sh_addr 0. */
366 return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
369 /* Find a module section, or NULL. Fill in number of "objects" in section. */
370 static void *section_objs(const struct load_info *info,
371 const char *name,
372 size_t object_size,
373 unsigned int *num)
375 unsigned int sec = find_sec(info, name);
377 /* Section 0 has sh_addr 0 and sh_size 0. */
378 *num = info->sechdrs[sec].sh_size / object_size;
379 return (void *)info->sechdrs[sec].sh_addr;
382 /* Provided by the linker */
383 extern const struct kernel_symbol __start___ksymtab[];
384 extern const struct kernel_symbol __stop___ksymtab[];
385 extern const struct kernel_symbol __start___ksymtab_gpl[];
386 extern const struct kernel_symbol __stop___ksymtab_gpl[];
387 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
388 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
389 extern const unsigned long __start___kcrctab[];
390 extern const unsigned long __start___kcrctab_gpl[];
391 extern const unsigned long __start___kcrctab_gpl_future[];
392 #ifdef CONFIG_UNUSED_SYMBOLS
393 extern const struct kernel_symbol __start___ksymtab_unused[];
394 extern const struct kernel_symbol __stop___ksymtab_unused[];
395 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
396 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
397 extern const unsigned long __start___kcrctab_unused[];
398 extern const unsigned long __start___kcrctab_unused_gpl[];
399 #endif
401 #ifndef CONFIG_MODVERSIONS
402 #define symversion(base, idx) NULL
403 #else
404 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
405 #endif
407 static bool each_symbol_in_section(const struct symsearch *arr,
408 unsigned int arrsize,
409 struct module *owner,
410 bool (*fn)(const struct symsearch *syms,
411 struct module *owner,
412 void *data),
413 void *data)
415 unsigned int j;
417 for (j = 0; j < arrsize; j++) {
418 if (fn(&arr[j], owner, data))
419 return true;
422 return false;
425 /* Returns true as soon as fn returns true, otherwise false. */
426 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
427 struct module *owner,
428 void *data),
429 void *data)
431 struct module *mod;
432 static const struct symsearch arr[] = {
433 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
434 NOT_GPL_ONLY, false },
435 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
436 __start___kcrctab_gpl,
437 GPL_ONLY, false },
438 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
439 __start___kcrctab_gpl_future,
440 WILL_BE_GPL_ONLY, false },
441 #ifdef CONFIG_UNUSED_SYMBOLS
442 { __start___ksymtab_unused, __stop___ksymtab_unused,
443 __start___kcrctab_unused,
444 NOT_GPL_ONLY, true },
445 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
446 __start___kcrctab_unused_gpl,
447 GPL_ONLY, true },
448 #endif
451 module_assert_mutex_or_preempt();
453 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
454 return true;
456 list_for_each_entry_rcu(mod, &modules, list) {
457 struct symsearch arr[] = {
458 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
459 NOT_GPL_ONLY, false },
460 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
461 mod->gpl_crcs,
462 GPL_ONLY, false },
463 { mod->gpl_future_syms,
464 mod->gpl_future_syms + mod->num_gpl_future_syms,
465 mod->gpl_future_crcs,
466 WILL_BE_GPL_ONLY, false },
467 #ifdef CONFIG_UNUSED_SYMBOLS
468 { mod->unused_syms,
469 mod->unused_syms + mod->num_unused_syms,
470 mod->unused_crcs,
471 NOT_GPL_ONLY, true },
472 { mod->unused_gpl_syms,
473 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
474 mod->unused_gpl_crcs,
475 GPL_ONLY, true },
476 #endif
479 if (mod->state == MODULE_STATE_UNFORMED)
480 continue;
482 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
483 return true;
485 return false;
487 EXPORT_SYMBOL_GPL(each_symbol_section);
489 struct find_symbol_arg {
490 /* Input */
491 const char *name;
492 bool gplok;
493 bool warn;
495 /* Output */
496 struct module *owner;
497 const unsigned long *crc;
498 const struct kernel_symbol *sym;
501 static bool check_symbol(const struct symsearch *syms,
502 struct module *owner,
503 unsigned int symnum, void *data)
505 struct find_symbol_arg *fsa = data;
507 if (!fsa->gplok) {
508 if (syms->licence == GPL_ONLY)
509 return false;
510 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
511 pr_warn("Symbol %s is being used by a non-GPL module, "
512 "which will not be allowed in the future\n",
513 fsa->name);
517 #ifdef CONFIG_UNUSED_SYMBOLS
518 if (syms->unused && fsa->warn) {
519 pr_warn("Symbol %s is marked as UNUSED, however this module is "
520 "using it.\n", fsa->name);
521 pr_warn("This symbol will go away in the future.\n");
522 pr_warn("Please evaluate if this is the right api to use and "
523 "if it really is, submit a report to the linux kernel "
524 "mailing list together with submitting your code for "
525 "inclusion.\n");
527 #endif
529 fsa->owner = owner;
530 fsa->crc = symversion(syms->crcs, symnum);
531 fsa->sym = &syms->start[symnum];
532 return true;
535 static int cmp_name(const void *va, const void *vb)
537 const char *a;
538 const struct kernel_symbol *b;
539 a = va; b = vb;
540 return strcmp(a, b->name);
543 static bool find_symbol_in_section(const struct symsearch *syms,
544 struct module *owner,
545 void *data)
547 struct find_symbol_arg *fsa = data;
548 struct kernel_symbol *sym;
550 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
551 sizeof(struct kernel_symbol), cmp_name);
553 if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
554 return true;
556 return false;
559 /* Find a symbol and return it, along with, (optional) crc and
560 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
561 const struct kernel_symbol *find_symbol(const char *name,
562 struct module **owner,
563 const unsigned long **crc,
564 bool gplok,
565 bool warn)
567 struct find_symbol_arg fsa;
569 fsa.name = name;
570 fsa.gplok = gplok;
571 fsa.warn = warn;
573 if (each_symbol_section(find_symbol_in_section, &fsa)) {
574 if (owner)
575 *owner = fsa.owner;
576 if (crc)
577 *crc = fsa.crc;
578 return fsa.sym;
581 pr_debug("Failed to find symbol %s\n", name);
582 return NULL;
584 EXPORT_SYMBOL_GPL(find_symbol);
587 * Search for module by name: must hold module_mutex (or preempt disabled
588 * for read-only access).
590 static struct module *find_module_all(const char *name, size_t len,
591 bool even_unformed)
593 struct module *mod;
595 module_assert_mutex_or_preempt();
597 list_for_each_entry(mod, &modules, list) {
598 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
599 continue;
600 if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
601 return mod;
603 return NULL;
606 struct module *find_module(const char *name)
608 module_assert_mutex();
609 return find_module_all(name, strlen(name), false);
611 EXPORT_SYMBOL_GPL(find_module);
613 #ifdef CONFIG_SMP
615 static inline void __percpu *mod_percpu(struct module *mod)
617 return mod->percpu;
620 static int percpu_modalloc(struct module *mod, struct load_info *info)
622 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
623 unsigned long align = pcpusec->sh_addralign;
625 if (!pcpusec->sh_size)
626 return 0;
628 if (align > PAGE_SIZE) {
629 pr_warn("%s: per-cpu alignment %li > %li\n",
630 mod->name, align, PAGE_SIZE);
631 align = PAGE_SIZE;
634 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
635 if (!mod->percpu) {
636 pr_warn("%s: Could not allocate %lu bytes percpu data\n",
637 mod->name, (unsigned long)pcpusec->sh_size);
638 return -ENOMEM;
640 mod->percpu_size = pcpusec->sh_size;
641 return 0;
644 static void percpu_modfree(struct module *mod)
646 free_percpu(mod->percpu);
649 static unsigned int find_pcpusec(struct load_info *info)
651 return find_sec(info, ".data..percpu");
654 static void percpu_modcopy(struct module *mod,
655 const void *from, unsigned long size)
657 int cpu;
659 for_each_possible_cpu(cpu)
660 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
664 * is_module_percpu_address - test whether address is from module static percpu
665 * @addr: address to test
667 * Test whether @addr belongs to module static percpu area.
669 * RETURNS:
670 * %true if @addr is from module static percpu area
672 bool is_module_percpu_address(unsigned long addr)
674 struct module *mod;
675 unsigned int cpu;
677 preempt_disable();
679 list_for_each_entry_rcu(mod, &modules, list) {
680 if (mod->state == MODULE_STATE_UNFORMED)
681 continue;
682 if (!mod->percpu_size)
683 continue;
684 for_each_possible_cpu(cpu) {
685 void *start = per_cpu_ptr(mod->percpu, cpu);
687 if ((void *)addr >= start &&
688 (void *)addr < start + mod->percpu_size) {
689 preempt_enable();
690 return true;
695 preempt_enable();
696 return false;
699 #else /* ... !CONFIG_SMP */
701 static inline void __percpu *mod_percpu(struct module *mod)
703 return NULL;
705 static int percpu_modalloc(struct module *mod, struct load_info *info)
707 /* UP modules shouldn't have this section: ENOMEM isn't quite right */
708 if (info->sechdrs[info->index.pcpu].sh_size != 0)
709 return -ENOMEM;
710 return 0;
712 static inline void percpu_modfree(struct module *mod)
715 static unsigned int find_pcpusec(struct load_info *info)
717 return 0;
719 static inline void percpu_modcopy(struct module *mod,
720 const void *from, unsigned long size)
722 /* pcpusec should be 0, and size of that section should be 0. */
723 BUG_ON(size != 0);
725 bool is_module_percpu_address(unsigned long addr)
727 return false;
730 #endif /* CONFIG_SMP */
732 #define MODINFO_ATTR(field) \
733 static void setup_modinfo_##field(struct module *mod, const char *s) \
735 mod->field = kstrdup(s, GFP_KERNEL); \
737 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
738 struct module_kobject *mk, char *buffer) \
740 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
742 static int modinfo_##field##_exists(struct module *mod) \
744 return mod->field != NULL; \
746 static void free_modinfo_##field(struct module *mod) \
748 kfree(mod->field); \
749 mod->field = NULL; \
751 static struct module_attribute modinfo_##field = { \
752 .attr = { .name = __stringify(field), .mode = 0444 }, \
753 .show = show_modinfo_##field, \
754 .setup = setup_modinfo_##field, \
755 .test = modinfo_##field##_exists, \
756 .free = free_modinfo_##field, \
759 MODINFO_ATTR(version);
760 MODINFO_ATTR(srcversion);
762 static char last_unloaded_module[MODULE_NAME_LEN+1];
764 #ifdef CONFIG_MODULE_UNLOAD
766 EXPORT_TRACEPOINT_SYMBOL(module_get);
768 /* MODULE_REF_BASE is the base reference count by kmodule loader. */
769 #define MODULE_REF_BASE 1
771 /* Init the unload section of the module. */
772 static int module_unload_init(struct module *mod)
775 * Initialize reference counter to MODULE_REF_BASE.
776 * refcnt == 0 means module is going.
778 atomic_set(&mod->refcnt, MODULE_REF_BASE);
780 INIT_LIST_HEAD(&mod->source_list);
781 INIT_LIST_HEAD(&mod->target_list);
783 /* Hold reference count during initialization. */
784 atomic_inc(&mod->refcnt);
786 return 0;
789 /* Does a already use b? */
790 static int already_uses(struct module *a, struct module *b)
792 struct module_use *use;
794 list_for_each_entry(use, &b->source_list, source_list) {
795 if (use->source == a) {
796 pr_debug("%s uses %s!\n", a->name, b->name);
797 return 1;
800 pr_debug("%s does not use %s!\n", a->name, b->name);
801 return 0;
805 * Module a uses b
806 * - we add 'a' as a "source", 'b' as a "target" of module use
807 * - the module_use is added to the list of 'b' sources (so
808 * 'b' can walk the list to see who sourced them), and of 'a'
809 * targets (so 'a' can see what modules it targets).
811 static int add_module_usage(struct module *a, struct module *b)
813 struct module_use *use;
815 pr_debug("Allocating new usage for %s.\n", a->name);
816 use = kmalloc(sizeof(*use), GFP_ATOMIC);
817 if (!use) {
818 pr_warn("%s: out of memory loading\n", a->name);
819 return -ENOMEM;
822 use->source = a;
823 use->target = b;
824 list_add(&use->source_list, &b->source_list);
825 list_add(&use->target_list, &a->target_list);
826 return 0;
829 /* Module a uses b: caller needs module_mutex() */
830 int ref_module(struct module *a, struct module *b)
832 int err;
834 if (b == NULL || already_uses(a, b))
835 return 0;
837 /* If module isn't available, we fail. */
838 err = strong_try_module_get(b);
839 if (err)
840 return err;
842 err = add_module_usage(a, b);
843 if (err) {
844 module_put(b);
845 return err;
847 return 0;
849 EXPORT_SYMBOL_GPL(ref_module);
851 /* Clear the unload stuff of the module. */
852 static void module_unload_free(struct module *mod)
854 struct module_use *use, *tmp;
856 mutex_lock(&module_mutex);
857 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
858 struct module *i = use->target;
859 pr_debug("%s unusing %s\n", mod->name, i->name);
860 module_put(i);
861 list_del(&use->source_list);
862 list_del(&use->target_list);
863 kfree(use);
865 mutex_unlock(&module_mutex);
868 #ifdef CONFIG_MODULE_FORCE_UNLOAD
869 static inline int try_force_unload(unsigned int flags)
871 int ret = (flags & O_TRUNC);
872 if (ret)
873 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
874 return ret;
876 #else
877 static inline int try_force_unload(unsigned int flags)
879 return 0;
881 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
883 /* Try to release refcount of module, 0 means success. */
884 static int try_release_module_ref(struct module *mod)
886 int ret;
888 /* Try to decrement refcnt which we set at loading */
889 ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
890 BUG_ON(ret < 0);
891 if (ret)
892 /* Someone can put this right now, recover with checking */
893 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
895 return ret;
898 static int try_stop_module(struct module *mod, int flags, int *forced)
900 /* If it's not unused, quit unless we're forcing. */
901 if (try_release_module_ref(mod) != 0) {
902 *forced = try_force_unload(flags);
903 if (!(*forced))
904 return -EWOULDBLOCK;
907 /* Mark it as dying. */
908 mod->state = MODULE_STATE_GOING;
910 return 0;
914 * module_refcount - return the refcount or -1 if unloading
916 * @mod: the module we're checking
918 * Returns:
919 * -1 if the module is in the process of unloading
920 * otherwise the number of references in the kernel to the module
922 int module_refcount(struct module *mod)
924 return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
926 EXPORT_SYMBOL(module_refcount);
928 /* This exists whether we can unload or not */
929 static void free_module(struct module *mod);
931 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
932 unsigned int, flags)
934 struct module *mod;
935 char name[MODULE_NAME_LEN];
936 int ret, forced = 0;
938 if (!capable(CAP_SYS_MODULE) || modules_disabled)
939 return -EPERM;
941 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
942 return -EFAULT;
943 name[MODULE_NAME_LEN-1] = '\0';
945 if (mutex_lock_interruptible(&module_mutex) != 0)
946 return -EINTR;
948 mod = find_module(name);
949 if (!mod) {
950 ret = -ENOENT;
951 goto out;
954 if (!list_empty(&mod->source_list)) {
955 /* Other modules depend on us: get rid of them first. */
956 ret = -EWOULDBLOCK;
957 goto out;
960 /* Doing init or already dying? */
961 if (mod->state != MODULE_STATE_LIVE) {
962 /* FIXME: if (force), slam module count damn the torpedoes */
963 pr_debug("%s already dying\n", mod->name);
964 ret = -EBUSY;
965 goto out;
968 /* If it has an init func, it must have an exit func to unload */
969 if (mod->init && !mod->exit) {
970 forced = try_force_unload(flags);
971 if (!forced) {
972 /* This module can't be removed */
973 ret = -EBUSY;
974 goto out;
978 /* Stop the machine so refcounts can't move and disable module. */
979 ret = try_stop_module(mod, flags, &forced);
980 if (ret != 0)
981 goto out;
983 mutex_unlock(&module_mutex);
984 /* Final destruction now no one is using it. */
985 if (mod->exit != NULL)
986 mod->exit();
987 blocking_notifier_call_chain(&module_notify_list,
988 MODULE_STATE_GOING, mod);
989 klp_module_going(mod);
990 ftrace_release_mod(mod);
992 async_synchronize_full();
994 /* Store the name of the last unloaded module for diagnostic purposes */
995 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
997 free_module(mod);
998 /* someone could wait for the module in add_unformed_module() */
999 wake_up_all(&module_wq);
1000 return 0;
1001 out:
1002 mutex_unlock(&module_mutex);
1003 return ret;
1006 static inline void print_unload_info(struct seq_file *m, struct module *mod)
1008 struct module_use *use;
1009 int printed_something = 0;
1011 seq_printf(m, " %i ", module_refcount(mod));
1014 * Always include a trailing , so userspace can differentiate
1015 * between this and the old multi-field proc format.
1017 list_for_each_entry(use, &mod->source_list, source_list) {
1018 printed_something = 1;
1019 seq_printf(m, "%s,", use->source->name);
1022 if (mod->init != NULL && mod->exit == NULL) {
1023 printed_something = 1;
1024 seq_puts(m, "[permanent],");
1027 if (!printed_something)
1028 seq_puts(m, "-");
1031 void __symbol_put(const char *symbol)
1033 struct module *owner;
1035 preempt_disable();
1036 if (!find_symbol(symbol, &owner, NULL, true, false))
1037 BUG();
1038 module_put(owner);
1039 preempt_enable();
1041 EXPORT_SYMBOL(__symbol_put);
1043 /* Note this assumes addr is a function, which it currently always is. */
1044 void symbol_put_addr(void *addr)
1046 struct module *modaddr;
1047 unsigned long a = (unsigned long)dereference_function_descriptor(addr);
1049 if (core_kernel_text(a))
1050 return;
1053 * Even though we hold a reference on the module; we still need to
1054 * disable preemption in order to safely traverse the data structure.
1056 preempt_disable();
1057 modaddr = __module_text_address(a);
1058 BUG_ON(!modaddr);
1059 module_put(modaddr);
1060 preempt_enable();
1062 EXPORT_SYMBOL_GPL(symbol_put_addr);
1064 static ssize_t show_refcnt(struct module_attribute *mattr,
1065 struct module_kobject *mk, char *buffer)
1067 return sprintf(buffer, "%i\n", module_refcount(mk->mod));
1070 static struct module_attribute modinfo_refcnt =
1071 __ATTR(refcnt, 0444, show_refcnt, NULL);
1073 void __module_get(struct module *module)
1075 if (module) {
1076 preempt_disable();
1077 atomic_inc(&module->refcnt);
1078 trace_module_get(module, _RET_IP_);
1079 preempt_enable();
1082 EXPORT_SYMBOL(__module_get);
1084 bool try_module_get(struct module *module)
1086 bool ret = true;
1088 if (module) {
1089 preempt_disable();
1090 /* Note: here, we can fail to get a reference */
1091 if (likely(module_is_live(module) &&
1092 atomic_inc_not_zero(&module->refcnt) != 0))
1093 trace_module_get(module, _RET_IP_);
1094 else
1095 ret = false;
1097 preempt_enable();
1099 return ret;
1101 EXPORT_SYMBOL(try_module_get);
1103 void module_put(struct module *module)
1105 int ret;
1107 if (module) {
1108 preempt_disable();
1109 ret = atomic_dec_if_positive(&module->refcnt);
1110 WARN_ON(ret < 0); /* Failed to put refcount */
1111 trace_module_put(module, _RET_IP_);
1112 preempt_enable();
1115 EXPORT_SYMBOL(module_put);
1117 #else /* !CONFIG_MODULE_UNLOAD */
1118 static inline void print_unload_info(struct seq_file *m, struct module *mod)
1120 /* We don't know the usage count, or what modules are using. */
1121 seq_puts(m, " - -");
1124 static inline void module_unload_free(struct module *mod)
1128 int ref_module(struct module *a, struct module *b)
1130 return strong_try_module_get(b);
1132 EXPORT_SYMBOL_GPL(ref_module);
1134 static inline int module_unload_init(struct module *mod)
1136 return 0;
1138 #endif /* CONFIG_MODULE_UNLOAD */
1140 static size_t module_flags_taint(struct module *mod, char *buf)
1142 size_t l = 0;
1144 if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
1145 buf[l++] = 'P';
1146 if (mod->taints & (1 << TAINT_OOT_MODULE))
1147 buf[l++] = 'O';
1148 if (mod->taints & (1 << TAINT_FORCED_MODULE))
1149 buf[l++] = 'F';
1150 if (mod->taints & (1 << TAINT_CRAP))
1151 buf[l++] = 'C';
1152 if (mod->taints & (1 << TAINT_UNSIGNED_MODULE))
1153 buf[l++] = 'E';
1154 if (mod->taints & (1 << TAINT_LIVEPATCH))
1155 buf[l++] = 'K';
1157 * TAINT_FORCED_RMMOD: could be added.
1158 * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
1159 * apply to modules.
1161 return l;
1164 static ssize_t show_initstate(struct module_attribute *mattr,
1165 struct module_kobject *mk, char *buffer)
1167 const char *state = "unknown";
1169 switch (mk->mod->state) {
1170 case MODULE_STATE_LIVE:
1171 state = "live";
1172 break;
1173 case MODULE_STATE_COMING:
1174 state = "coming";
1175 break;
1176 case MODULE_STATE_GOING:
1177 state = "going";
1178 break;
1179 default:
1180 BUG();
1182 return sprintf(buffer, "%s\n", state);
1185 static struct module_attribute modinfo_initstate =
1186 __ATTR(initstate, 0444, show_initstate, NULL);
1188 static ssize_t store_uevent(struct module_attribute *mattr,
1189 struct module_kobject *mk,
1190 const char *buffer, size_t count)
1192 enum kobject_action action;
1194 if (kobject_action_type(buffer, count, &action) == 0)
1195 kobject_uevent(&mk->kobj, action);
1196 return count;
1199 struct module_attribute module_uevent =
1200 __ATTR(uevent, 0200, NULL, store_uevent);
1202 static ssize_t show_coresize(struct module_attribute *mattr,
1203 struct module_kobject *mk, char *buffer)
1205 return sprintf(buffer, "%u\n", mk->mod->core_layout.size);
1208 static struct module_attribute modinfo_coresize =
1209 __ATTR(coresize, 0444, show_coresize, NULL);
1211 static ssize_t show_initsize(struct module_attribute *mattr,
1212 struct module_kobject *mk, char *buffer)
1214 return sprintf(buffer, "%u\n", mk->mod->init_layout.size);
1217 static struct module_attribute modinfo_initsize =
1218 __ATTR(initsize, 0444, show_initsize, NULL);
1220 static ssize_t show_taint(struct module_attribute *mattr,
1221 struct module_kobject *mk, char *buffer)
1223 size_t l;
1225 l = module_flags_taint(mk->mod, buffer);
1226 buffer[l++] = '\n';
1227 return l;
1230 static struct module_attribute modinfo_taint =
1231 __ATTR(taint, 0444, show_taint, NULL);
1233 static struct module_attribute *modinfo_attrs[] = {
1234 &module_uevent,
1235 &modinfo_version,
1236 &modinfo_srcversion,
1237 &modinfo_initstate,
1238 &modinfo_coresize,
1239 &modinfo_initsize,
1240 &modinfo_taint,
1241 #ifdef CONFIG_MODULE_UNLOAD
1242 &modinfo_refcnt,
1243 #endif
1244 NULL,
1247 static const char vermagic[] = VERMAGIC_STRING;
1249 static int try_to_force_load(struct module *mod, const char *reason)
1251 #ifdef CONFIG_MODULE_FORCE_LOAD
1252 if (!test_taint(TAINT_FORCED_MODULE))
1253 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
1254 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
1255 return 0;
1256 #else
1257 return -ENOEXEC;
1258 #endif
1261 #ifdef CONFIG_MODVERSIONS
1262 /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
1263 static unsigned long maybe_relocated(unsigned long crc,
1264 const struct module *crc_owner)
1266 #ifdef ARCH_RELOCATES_KCRCTAB
1267 if (crc_owner == NULL)
1268 return crc - (unsigned long)reloc_start;
1269 #endif
1270 return crc;
1273 static int check_version(Elf_Shdr *sechdrs,
1274 unsigned int versindex,
1275 const char *symname,
1276 struct module *mod,
1277 const unsigned long *crc,
1278 const struct module *crc_owner)
1280 unsigned int i, num_versions;
1281 struct modversion_info *versions;
1283 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1284 if (!crc)
1285 return 1;
1287 /* No versions at all? modprobe --force does this. */
1288 if (versindex == 0)
1289 return try_to_force_load(mod, symname) == 0;
1291 versions = (void *) sechdrs[versindex].sh_addr;
1292 num_versions = sechdrs[versindex].sh_size
1293 / sizeof(struct modversion_info);
1295 for (i = 0; i < num_versions; i++) {
1296 if (strcmp(versions[i].name, symname) != 0)
1297 continue;
1299 if (versions[i].crc == maybe_relocated(*crc, crc_owner))
1300 return 1;
1301 pr_debug("Found checksum %lX vs module %lX\n",
1302 maybe_relocated(*crc, crc_owner), versions[i].crc);
1303 goto bad_version;
1306 /* Broken toolchain. Warn once, then let it go.. */
1307 pr_warn_once("%s: no symbol version for %s\n", mod->name, symname);
1308 return 1;
1310 bad_version:
1311 pr_warn("%s: disagrees about version of symbol %s\n",
1312 mod->name, symname);
1313 return 0;
1316 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1317 unsigned int versindex,
1318 struct module *mod)
1320 const unsigned long *crc;
1323 * Since this should be found in kernel (which can't be removed), no
1324 * locking is necessary -- use preempt_disable() to placate lockdep.
1326 preempt_disable();
1327 if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL,
1328 &crc, true, false)) {
1329 preempt_enable();
1330 BUG();
1332 preempt_enable();
1333 return check_version(sechdrs, versindex,
1334 VMLINUX_SYMBOL_STR(module_layout), mod, crc,
1335 NULL);
1338 /* First part is kernel version, which we ignore if module has crcs. */
1339 static inline int same_magic(const char *amagic, const char *bmagic,
1340 bool has_crcs)
1342 if (has_crcs) {
1343 amagic += strcspn(amagic, " ");
1344 bmagic += strcspn(bmagic, " ");
1346 return strcmp(amagic, bmagic) == 0;
1348 #else
1349 static inline int check_version(Elf_Shdr *sechdrs,
1350 unsigned int versindex,
1351 const char *symname,
1352 struct module *mod,
1353 const unsigned long *crc,
1354 const struct module *crc_owner)
1356 return 1;
1359 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1360 unsigned int versindex,
1361 struct module *mod)
1363 return 1;
1366 static inline int same_magic(const char *amagic, const char *bmagic,
1367 bool has_crcs)
1369 return strcmp(amagic, bmagic) == 0;
1371 #endif /* CONFIG_MODVERSIONS */
1373 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1374 static const struct kernel_symbol *resolve_symbol(struct module *mod,
1375 const struct load_info *info,
1376 const char *name,
1377 char ownername[])
1379 struct module *owner;
1380 const struct kernel_symbol *sym;
1381 const unsigned long *crc;
1382 int err;
1385 * The module_mutex should not be a heavily contended lock;
1386 * if we get the occasional sleep here, we'll go an extra iteration
1387 * in the wait_event_interruptible(), which is harmless.
1389 sched_annotate_sleep();
1390 mutex_lock(&module_mutex);
1391 sym = find_symbol(name, &owner, &crc,
1392 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1393 if (!sym)
1394 goto unlock;
1396 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
1397 owner)) {
1398 sym = ERR_PTR(-EINVAL);
1399 goto getname;
1402 err = ref_module(mod, owner);
1403 if (err) {
1404 sym = ERR_PTR(err);
1405 goto getname;
1408 getname:
1409 /* We must make copy under the lock if we failed to get ref. */
1410 strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1411 unlock:
1412 mutex_unlock(&module_mutex);
1413 return sym;
1416 static const struct kernel_symbol *
1417 resolve_symbol_wait(struct module *mod,
1418 const struct load_info *info,
1419 const char *name)
1421 const struct kernel_symbol *ksym;
1422 char owner[MODULE_NAME_LEN];
1424 if (wait_event_interruptible_timeout(module_wq,
1425 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1426 || PTR_ERR(ksym) != -EBUSY,
1427 30 * HZ) <= 0) {
1428 pr_warn("%s: gave up waiting for init of module %s.\n",
1429 mod->name, owner);
1431 return ksym;
1435 * /sys/module/foo/sections stuff
1436 * J. Corbet <corbet@lwn.net>
1438 #ifdef CONFIG_SYSFS
1440 #ifdef CONFIG_KALLSYMS
1441 static inline bool sect_empty(const Elf_Shdr *sect)
1443 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1446 struct module_sect_attr {
1447 struct module_attribute mattr;
1448 char *name;
1449 unsigned long address;
1452 struct module_sect_attrs {
1453 struct attribute_group grp;
1454 unsigned int nsections;
1455 struct module_sect_attr attrs[0];
1458 static ssize_t module_sect_show(struct module_attribute *mattr,
1459 struct module_kobject *mk, char *buf)
1461 struct module_sect_attr *sattr =
1462 container_of(mattr, struct module_sect_attr, mattr);
1463 return sprintf(buf, "0x%pK\n", (void *)sattr->address);
1466 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1468 unsigned int section;
1470 for (section = 0; section < sect_attrs->nsections; section++)
1471 kfree(sect_attrs->attrs[section].name);
1472 kfree(sect_attrs);
1475 static void add_sect_attrs(struct module *mod, const struct load_info *info)
1477 unsigned int nloaded = 0, i, size[2];
1478 struct module_sect_attrs *sect_attrs;
1479 struct module_sect_attr *sattr;
1480 struct attribute **gattr;
1482 /* Count loaded sections and allocate structures */
1483 for (i = 0; i < info->hdr->e_shnum; i++)
1484 if (!sect_empty(&info->sechdrs[i]))
1485 nloaded++;
1486 size[0] = ALIGN(sizeof(*sect_attrs)
1487 + nloaded * sizeof(sect_attrs->attrs[0]),
1488 sizeof(sect_attrs->grp.attrs[0]));
1489 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1490 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1491 if (sect_attrs == NULL)
1492 return;
1494 /* Setup section attributes. */
1495 sect_attrs->grp.name = "sections";
1496 sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1498 sect_attrs->nsections = 0;
1499 sattr = &sect_attrs->attrs[0];
1500 gattr = &sect_attrs->grp.attrs[0];
1501 for (i = 0; i < info->hdr->e_shnum; i++) {
1502 Elf_Shdr *sec = &info->sechdrs[i];
1503 if (sect_empty(sec))
1504 continue;
1505 sattr->address = sec->sh_addr;
1506 sattr->name = kstrdup(info->secstrings + sec->sh_name,
1507 GFP_KERNEL);
1508 if (sattr->name == NULL)
1509 goto out;
1510 sect_attrs->nsections++;
1511 sysfs_attr_init(&sattr->mattr.attr);
1512 sattr->mattr.show = module_sect_show;
1513 sattr->mattr.store = NULL;
1514 sattr->mattr.attr.name = sattr->name;
1515 sattr->mattr.attr.mode = S_IRUGO;
1516 *(gattr++) = &(sattr++)->mattr.attr;
1518 *gattr = NULL;
1520 if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1521 goto out;
1523 mod->sect_attrs = sect_attrs;
1524 return;
1525 out:
1526 free_sect_attrs(sect_attrs);
1529 static void remove_sect_attrs(struct module *mod)
1531 if (mod->sect_attrs) {
1532 sysfs_remove_group(&mod->mkobj.kobj,
1533 &mod->sect_attrs->grp);
1534 /* We are positive that no one is using any sect attrs
1535 * at this point. Deallocate immediately. */
1536 free_sect_attrs(mod->sect_attrs);
1537 mod->sect_attrs = NULL;
1542 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1545 struct module_notes_attrs {
1546 struct kobject *dir;
1547 unsigned int notes;
1548 struct bin_attribute attrs[0];
1551 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1552 struct bin_attribute *bin_attr,
1553 char *buf, loff_t pos, size_t count)
1556 * The caller checked the pos and count against our size.
1558 memcpy(buf, bin_attr->private + pos, count);
1559 return count;
1562 static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1563 unsigned int i)
1565 if (notes_attrs->dir) {
1566 while (i-- > 0)
1567 sysfs_remove_bin_file(notes_attrs->dir,
1568 &notes_attrs->attrs[i]);
1569 kobject_put(notes_attrs->dir);
1571 kfree(notes_attrs);
1574 static void add_notes_attrs(struct module *mod, const struct load_info *info)
1576 unsigned int notes, loaded, i;
1577 struct module_notes_attrs *notes_attrs;
1578 struct bin_attribute *nattr;
1580 /* failed to create section attributes, so can't create notes */
1581 if (!mod->sect_attrs)
1582 return;
1584 /* Count notes sections and allocate structures. */
1585 notes = 0;
1586 for (i = 0; i < info->hdr->e_shnum; i++)
1587 if (!sect_empty(&info->sechdrs[i]) &&
1588 (info->sechdrs[i].sh_type == SHT_NOTE))
1589 ++notes;
1591 if (notes == 0)
1592 return;
1594 notes_attrs = kzalloc(sizeof(*notes_attrs)
1595 + notes * sizeof(notes_attrs->attrs[0]),
1596 GFP_KERNEL);
1597 if (notes_attrs == NULL)
1598 return;
1600 notes_attrs->notes = notes;
1601 nattr = &notes_attrs->attrs[0];
1602 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1603 if (sect_empty(&info->sechdrs[i]))
1604 continue;
1605 if (info->sechdrs[i].sh_type == SHT_NOTE) {
1606 sysfs_bin_attr_init(nattr);
1607 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1608 nattr->attr.mode = S_IRUGO;
1609 nattr->size = info->sechdrs[i].sh_size;
1610 nattr->private = (void *) info->sechdrs[i].sh_addr;
1611 nattr->read = module_notes_read;
1612 ++nattr;
1614 ++loaded;
1617 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1618 if (!notes_attrs->dir)
1619 goto out;
1621 for (i = 0; i < notes; ++i)
1622 if (sysfs_create_bin_file(notes_attrs->dir,
1623 &notes_attrs->attrs[i]))
1624 goto out;
1626 mod->notes_attrs = notes_attrs;
1627 return;
1629 out:
1630 free_notes_attrs(notes_attrs, i);
1633 static void remove_notes_attrs(struct module *mod)
1635 if (mod->notes_attrs)
1636 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1639 #else
1641 static inline void add_sect_attrs(struct module *mod,
1642 const struct load_info *info)
1646 static inline void remove_sect_attrs(struct module *mod)
1650 static inline void add_notes_attrs(struct module *mod,
1651 const struct load_info *info)
1655 static inline void remove_notes_attrs(struct module *mod)
1658 #endif /* CONFIG_KALLSYMS */
1660 static void add_usage_links(struct module *mod)
1662 #ifdef CONFIG_MODULE_UNLOAD
1663 struct module_use *use;
1664 int nowarn;
1666 mutex_lock(&module_mutex);
1667 list_for_each_entry(use, &mod->target_list, target_list) {
1668 nowarn = sysfs_create_link(use->target->holders_dir,
1669 &mod->mkobj.kobj, mod->name);
1671 mutex_unlock(&module_mutex);
1672 #endif
1675 static void del_usage_links(struct module *mod)
1677 #ifdef CONFIG_MODULE_UNLOAD
1678 struct module_use *use;
1680 mutex_lock(&module_mutex);
1681 list_for_each_entry(use, &mod->target_list, target_list)
1682 sysfs_remove_link(use->target->holders_dir, mod->name);
1683 mutex_unlock(&module_mutex);
1684 #endif
1687 static int module_add_modinfo_attrs(struct module *mod)
1689 struct module_attribute *attr;
1690 struct module_attribute *temp_attr;
1691 int error = 0;
1692 int i;
1694 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1695 (ARRAY_SIZE(modinfo_attrs) + 1)),
1696 GFP_KERNEL);
1697 if (!mod->modinfo_attrs)
1698 return -ENOMEM;
1700 temp_attr = mod->modinfo_attrs;
1701 for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1702 if (!attr->test || attr->test(mod)) {
1703 memcpy(temp_attr, attr, sizeof(*temp_attr));
1704 sysfs_attr_init(&temp_attr->attr);
1705 error = sysfs_create_file(&mod->mkobj.kobj,
1706 &temp_attr->attr);
1707 ++temp_attr;
1710 return error;
1713 static void module_remove_modinfo_attrs(struct module *mod)
1715 struct module_attribute *attr;
1716 int i;
1718 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1719 /* pick a field to test for end of list */
1720 if (!attr->attr.name)
1721 break;
1722 sysfs_remove_file(&mod->mkobj.kobj, &attr->attr);
1723 if (attr->free)
1724 attr->free(mod);
1726 kfree(mod->modinfo_attrs);
1729 static void mod_kobject_put(struct module *mod)
1731 DECLARE_COMPLETION_ONSTACK(c);
1732 mod->mkobj.kobj_completion = &c;
1733 kobject_put(&mod->mkobj.kobj);
1734 wait_for_completion(&c);
1737 static int mod_sysfs_init(struct module *mod)
1739 int err;
1740 struct kobject *kobj;
1742 if (!module_sysfs_initialized) {
1743 pr_err("%s: module sysfs not initialized\n", mod->name);
1744 err = -EINVAL;
1745 goto out;
1748 kobj = kset_find_obj(module_kset, mod->name);
1749 if (kobj) {
1750 pr_err("%s: module is already loaded\n", mod->name);
1751 kobject_put(kobj);
1752 err = -EINVAL;
1753 goto out;
1756 mod->mkobj.mod = mod;
1758 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1759 mod->mkobj.kobj.kset = module_kset;
1760 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1761 "%s", mod->name);
1762 if (err)
1763 mod_kobject_put(mod);
1765 /* delay uevent until full sysfs population */
1766 out:
1767 return err;
1770 static int mod_sysfs_setup(struct module *mod,
1771 const struct load_info *info,
1772 struct kernel_param *kparam,
1773 unsigned int num_params)
1775 int err;
1777 err = mod_sysfs_init(mod);
1778 if (err)
1779 goto out;
1781 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1782 if (!mod->holders_dir) {
1783 err = -ENOMEM;
1784 goto out_unreg;
1787 err = module_param_sysfs_setup(mod, kparam, num_params);
1788 if (err)
1789 goto out_unreg_holders;
1791 err = module_add_modinfo_attrs(mod);
1792 if (err)
1793 goto out_unreg_param;
1795 add_usage_links(mod);
1796 add_sect_attrs(mod, info);
1797 add_notes_attrs(mod, info);
1799 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1800 return 0;
1802 out_unreg_param:
1803 module_param_sysfs_remove(mod);
1804 out_unreg_holders:
1805 kobject_put(mod->holders_dir);
1806 out_unreg:
1807 mod_kobject_put(mod);
1808 out:
1809 return err;
1812 static void mod_sysfs_fini(struct module *mod)
1814 remove_notes_attrs(mod);
1815 remove_sect_attrs(mod);
1816 mod_kobject_put(mod);
1819 static void init_param_lock(struct module *mod)
1821 mutex_init(&mod->param_lock);
1823 #else /* !CONFIG_SYSFS */
1825 static int mod_sysfs_setup(struct module *mod,
1826 const struct load_info *info,
1827 struct kernel_param *kparam,
1828 unsigned int num_params)
1830 return 0;
1833 static void mod_sysfs_fini(struct module *mod)
1837 static void module_remove_modinfo_attrs(struct module *mod)
1841 static void del_usage_links(struct module *mod)
1845 static void init_param_lock(struct module *mod)
1848 #endif /* CONFIG_SYSFS */
1850 static void mod_sysfs_teardown(struct module *mod)
1852 del_usage_links(mod);
1853 module_remove_modinfo_attrs(mod);
1854 module_param_sysfs_remove(mod);
1855 kobject_put(mod->mkobj.drivers_dir);
1856 kobject_put(mod->holders_dir);
1857 mod_sysfs_fini(mod);
1860 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
1862 * LKM RO/NX protection: protect module's text/ro-data
1863 * from modification and any data from execution.
1865 * General layout of module is:
1866 * [text] [read-only-data] [ro-after-init] [writable data]
1867 * text_size -----^ ^ ^ ^
1868 * ro_size ------------------------| | |
1869 * ro_after_init_size -----------------------------| |
1870 * size -----------------------------------------------------------|
1872 * These values are always page-aligned (as is base)
1874 static void frob_text(const struct module_layout *layout,
1875 int (*set_memory)(unsigned long start, int num_pages))
1877 BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
1878 BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1));
1879 set_memory((unsigned long)layout->base,
1880 layout->text_size >> PAGE_SHIFT);
1883 static void frob_rodata(const struct module_layout *layout,
1884 int (*set_memory)(unsigned long start, int num_pages))
1886 BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
1887 BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1));
1888 BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
1889 set_memory((unsigned long)layout->base + layout->text_size,
1890 (layout->ro_size - layout->text_size) >> PAGE_SHIFT);
1893 static void frob_ro_after_init(const struct module_layout *layout,
1894 int (*set_memory)(unsigned long start, int num_pages))
1896 BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
1897 BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
1898 BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
1899 set_memory((unsigned long)layout->base + layout->ro_size,
1900 (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT);
1903 static void frob_writable_data(const struct module_layout *layout,
1904 int (*set_memory)(unsigned long start, int num_pages))
1906 BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
1907 BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
1908 BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1));
1909 set_memory((unsigned long)layout->base + layout->ro_after_init_size,
1910 (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
1913 /* livepatching wants to disable read-only so it can frob module. */
1914 void module_disable_ro(const struct module *mod)
1916 if (!rodata_enabled)
1917 return;
1919 frob_text(&mod->core_layout, set_memory_rw);
1920 frob_rodata(&mod->core_layout, set_memory_rw);
1921 frob_ro_after_init(&mod->core_layout, set_memory_rw);
1922 frob_text(&mod->init_layout, set_memory_rw);
1923 frob_rodata(&mod->init_layout, set_memory_rw);
1926 void module_enable_ro(const struct module *mod, bool after_init)
1928 if (!rodata_enabled)
1929 return;
1931 frob_text(&mod->core_layout, set_memory_ro);
1932 frob_rodata(&mod->core_layout, set_memory_ro);
1933 frob_text(&mod->init_layout, set_memory_ro);
1934 frob_rodata(&mod->init_layout, set_memory_ro);
1936 if (after_init)
1937 frob_ro_after_init(&mod->core_layout, set_memory_ro);
1940 static void module_enable_nx(const struct module *mod)
1942 frob_rodata(&mod->core_layout, set_memory_nx);
1943 frob_ro_after_init(&mod->core_layout, set_memory_nx);
1944 frob_writable_data(&mod->core_layout, set_memory_nx);
1945 frob_rodata(&mod->init_layout, set_memory_nx);
1946 frob_writable_data(&mod->init_layout, set_memory_nx);
1949 static void module_disable_nx(const struct module *mod)
1951 frob_rodata(&mod->core_layout, set_memory_x);
1952 frob_ro_after_init(&mod->core_layout, set_memory_x);
1953 frob_writable_data(&mod->core_layout, set_memory_x);
1954 frob_rodata(&mod->init_layout, set_memory_x);
1955 frob_writable_data(&mod->init_layout, set_memory_x);
1958 /* Iterate through all modules and set each module's text as RW */
1959 void set_all_modules_text_rw(void)
1961 struct module *mod;
1963 if (!rodata_enabled)
1964 return;
1966 mutex_lock(&module_mutex);
1967 list_for_each_entry_rcu(mod, &modules, list) {
1968 if (mod->state == MODULE_STATE_UNFORMED)
1969 continue;
1971 frob_text(&mod->core_layout, set_memory_rw);
1972 frob_text(&mod->init_layout, set_memory_rw);
1974 mutex_unlock(&module_mutex);
1977 /* Iterate through all modules and set each module's text as RO */
1978 void set_all_modules_text_ro(void)
1980 struct module *mod;
1982 if (!rodata_enabled)
1983 return;
1985 mutex_lock(&module_mutex);
1986 list_for_each_entry_rcu(mod, &modules, list) {
1987 if (mod->state == MODULE_STATE_UNFORMED)
1988 continue;
1990 frob_text(&mod->core_layout, set_memory_ro);
1991 frob_text(&mod->init_layout, set_memory_ro);
1993 mutex_unlock(&module_mutex);
1996 static void disable_ro_nx(const struct module_layout *layout)
1998 if (rodata_enabled) {
1999 frob_text(layout, set_memory_rw);
2000 frob_rodata(layout, set_memory_rw);
2001 frob_ro_after_init(layout, set_memory_rw);
2003 frob_rodata(layout, set_memory_x);
2004 frob_ro_after_init(layout, set_memory_x);
2005 frob_writable_data(layout, set_memory_x);
2008 #else
2009 static void disable_ro_nx(const struct module_layout *layout) { }
2010 static void module_enable_nx(const struct module *mod) { }
2011 static void module_disable_nx(const struct module *mod) { }
2012 #endif
2014 #ifdef CONFIG_LIVEPATCH
2016 * Persist Elf information about a module. Copy the Elf header,
2017 * section header table, section string table, and symtab section
2018 * index from info to mod->klp_info.
2020 static int copy_module_elf(struct module *mod, struct load_info *info)
2022 unsigned int size, symndx;
2023 int ret;
2025 size = sizeof(*mod->klp_info);
2026 mod->klp_info = kmalloc(size, GFP_KERNEL);
2027 if (mod->klp_info == NULL)
2028 return -ENOMEM;
2030 /* Elf header */
2031 size = sizeof(mod->klp_info->hdr);
2032 memcpy(&mod->klp_info->hdr, info->hdr, size);
2034 /* Elf section header table */
2035 size = sizeof(*info->sechdrs) * info->hdr->e_shnum;
2036 mod->klp_info->sechdrs = kmalloc(size, GFP_KERNEL);
2037 if (mod->klp_info->sechdrs == NULL) {
2038 ret = -ENOMEM;
2039 goto free_info;
2041 memcpy(mod->klp_info->sechdrs, info->sechdrs, size);
2043 /* Elf section name string table */
2044 size = info->sechdrs[info->hdr->e_shstrndx].sh_size;
2045 mod->klp_info->secstrings = kmalloc(size, GFP_KERNEL);
2046 if (mod->klp_info->secstrings == NULL) {
2047 ret = -ENOMEM;
2048 goto free_sechdrs;
2050 memcpy(mod->klp_info->secstrings, info->secstrings, size);
2052 /* Elf symbol section index */
2053 symndx = info->index.sym;
2054 mod->klp_info->symndx = symndx;
2057 * For livepatch modules, core_kallsyms.symtab is a complete
2058 * copy of the original symbol table. Adjust sh_addr to point
2059 * to core_kallsyms.symtab since the copy of the symtab in module
2060 * init memory is freed at the end of do_init_module().
2062 mod->klp_info->sechdrs[symndx].sh_addr = \
2063 (unsigned long) mod->core_kallsyms.symtab;
2065 return 0;
2067 free_sechdrs:
2068 kfree(mod->klp_info->sechdrs);
2069 free_info:
2070 kfree(mod->klp_info);
2071 return ret;
2074 static void free_module_elf(struct module *mod)
2076 kfree(mod->klp_info->sechdrs);
2077 kfree(mod->klp_info->secstrings);
2078 kfree(mod->klp_info);
2080 #else /* !CONFIG_LIVEPATCH */
2081 static int copy_module_elf(struct module *mod, struct load_info *info)
2083 return 0;
2086 static void free_module_elf(struct module *mod)
2089 #endif /* CONFIG_LIVEPATCH */
2091 void __weak module_memfree(void *module_region)
2093 vfree(module_region);
2096 void __weak module_arch_cleanup(struct module *mod)
2100 void __weak module_arch_freeing_init(struct module *mod)
2104 /* Free a module, remove from lists, etc. */
2105 static void free_module(struct module *mod)
2107 trace_module_free(mod);
2109 mod_sysfs_teardown(mod);
2111 /* We leave it in list to prevent duplicate loads, but make sure
2112 * that noone uses it while it's being deconstructed. */
2113 mutex_lock(&module_mutex);
2114 mod->state = MODULE_STATE_UNFORMED;
2115 mutex_unlock(&module_mutex);
2117 /* Remove dynamic debug info */
2118 ddebug_remove_module(mod->name);
2120 /* Arch-specific cleanup. */
2121 module_arch_cleanup(mod);
2123 /* Module unload stuff */
2124 module_unload_free(mod);
2126 /* Free any allocated parameters. */
2127 destroy_params(mod->kp, mod->num_kp);
2129 if (is_livepatch_module(mod))
2130 free_module_elf(mod);
2132 /* Now we can delete it from the lists */
2133 mutex_lock(&module_mutex);
2134 /* Unlink carefully: kallsyms could be walking list. */
2135 list_del_rcu(&mod->list);
2136 mod_tree_remove(mod);
2137 /* Remove this module from bug list, this uses list_del_rcu */
2138 module_bug_cleanup(mod);
2139 /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
2140 synchronize_sched();
2141 mutex_unlock(&module_mutex);
2143 /* This may be empty, but that's OK */
2144 disable_ro_nx(&mod->init_layout);
2145 module_arch_freeing_init(mod);
2146 module_memfree(mod->init_layout.base);
2147 kfree(mod->args);
2148 percpu_modfree(mod);
2150 /* Free lock-classes; relies on the preceding sync_rcu(). */
2151 lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
2153 /* Finally, free the core (containing the module structure) */
2154 disable_ro_nx(&mod->core_layout);
2155 module_memfree(mod->core_layout.base);
2157 #ifdef CONFIG_MPU
2158 update_protections(current->mm);
2159 #endif
2162 void *__symbol_get(const char *symbol)
2164 struct module *owner;
2165 const struct kernel_symbol *sym;
2167 preempt_disable();
2168 sym = find_symbol(symbol, &owner, NULL, true, true);
2169 if (sym && strong_try_module_get(owner))
2170 sym = NULL;
2171 preempt_enable();
2173 return sym ? (void *)sym->value : NULL;
2175 EXPORT_SYMBOL_GPL(__symbol_get);
2178 * Ensure that an exported symbol [global namespace] does not already exist
2179 * in the kernel or in some other module's exported symbol table.
2181 * You must hold the module_mutex.
2183 static int verify_export_symbols(struct module *mod)
2185 unsigned int i;
2186 struct module *owner;
2187 const struct kernel_symbol *s;
2188 struct {
2189 const struct kernel_symbol *sym;
2190 unsigned int num;
2191 } arr[] = {
2192 { mod->syms, mod->num_syms },
2193 { mod->gpl_syms, mod->num_gpl_syms },
2194 { mod->gpl_future_syms, mod->num_gpl_future_syms },
2195 #ifdef CONFIG_UNUSED_SYMBOLS
2196 { mod->unused_syms, mod->num_unused_syms },
2197 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
2198 #endif
2201 for (i = 0; i < ARRAY_SIZE(arr); i++) {
2202 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
2203 if (find_symbol(s->name, &owner, NULL, true, false)) {
2204 pr_err("%s: exports duplicate symbol %s"
2205 " (owned by %s)\n",
2206 mod->name, s->name, module_name(owner));
2207 return -ENOEXEC;
2211 return 0;
2214 /* Change all symbols so that st_value encodes the pointer directly. */
2215 static int simplify_symbols(struct module *mod, const struct load_info *info)
2217 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2218 Elf_Sym *sym = (void *)symsec->sh_addr;
2219 unsigned long secbase;
2220 unsigned int i;
2221 int ret = 0;
2222 const struct kernel_symbol *ksym;
2224 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
2225 const char *name = info->strtab + sym[i].st_name;
2227 switch (sym[i].st_shndx) {
2228 case SHN_COMMON:
2229 /* Ignore common symbols */
2230 if (!strncmp(name, "__gnu_lto", 9))
2231 break;
2233 /* We compiled with -fno-common. These are not
2234 supposed to happen. */
2235 pr_debug("Common symbol: %s\n", name);
2236 pr_warn("%s: please compile with -fno-common\n",
2237 mod->name);
2238 ret = -ENOEXEC;
2239 break;
2241 case SHN_ABS:
2242 /* Don't need to do anything */
2243 pr_debug("Absolute symbol: 0x%08lx\n",
2244 (long)sym[i].st_value);
2245 break;
2247 case SHN_LIVEPATCH:
2248 /* Livepatch symbols are resolved by livepatch */
2249 break;
2251 case SHN_UNDEF:
2252 ksym = resolve_symbol_wait(mod, info, name);
2253 /* Ok if resolved. */
2254 if (ksym && !IS_ERR(ksym)) {
2255 sym[i].st_value = ksym->value;
2256 break;
2259 /* Ok if weak. */
2260 if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
2261 break;
2263 pr_warn("%s: Unknown symbol %s (err %li)\n",
2264 mod->name, name, PTR_ERR(ksym));
2265 ret = PTR_ERR(ksym) ?: -ENOENT;
2266 break;
2268 default:
2269 /* Divert to percpu allocation if a percpu var. */
2270 if (sym[i].st_shndx == info->index.pcpu)
2271 secbase = (unsigned long)mod_percpu(mod);
2272 else
2273 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
2274 sym[i].st_value += secbase;
2275 break;
2279 return ret;
2282 static int apply_relocations(struct module *mod, const struct load_info *info)
2284 unsigned int i;
2285 int err = 0;
2287 /* Now do relocations. */
2288 for (i = 1; i < info->hdr->e_shnum; i++) {
2289 unsigned int infosec = info->sechdrs[i].sh_info;
2291 /* Not a valid relocation section? */
2292 if (infosec >= info->hdr->e_shnum)
2293 continue;
2295 /* Don't bother with non-allocated sections */
2296 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2297 continue;
2299 /* Livepatch relocation sections are applied by livepatch */
2300 if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
2301 continue;
2303 if (info->sechdrs[i].sh_type == SHT_REL)
2304 err = apply_relocate(info->sechdrs, info->strtab,
2305 info->index.sym, i, mod);
2306 else if (info->sechdrs[i].sh_type == SHT_RELA)
2307 err = apply_relocate_add(info->sechdrs, info->strtab,
2308 info->index.sym, i, mod);
2309 if (err < 0)
2310 break;
2312 return err;
2315 /* Additional bytes needed by arch in front of individual sections */
2316 unsigned int __weak arch_mod_section_prepend(struct module *mod,
2317 unsigned int section)
2319 /* default implementation just returns zero */
2320 return 0;
2323 /* Update size with this section: return offset. */
2324 static long get_offset(struct module *mod, unsigned int *size,
2325 Elf_Shdr *sechdr, unsigned int section)
2327 long ret;
2329 *size += arch_mod_section_prepend(mod, section);
2330 ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2331 *size = ret + sechdr->sh_size;
2332 return ret;
2335 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2336 might -- code, read-only data, read-write data, small data. Tally
2337 sizes, and place the offsets into sh_entsize fields: high bit means it
2338 belongs in init. */
2339 static void layout_sections(struct module *mod, struct load_info *info)
2341 static unsigned long const masks[][2] = {
2342 /* NOTE: all executable code must be the first section
2343 * in this array; otherwise modify the text_size
2344 * finder in the two loops below */
2345 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2346 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2347 { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL },
2348 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2349 { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2351 unsigned int m, i;
2353 for (i = 0; i < info->hdr->e_shnum; i++)
2354 info->sechdrs[i].sh_entsize = ~0UL;
2356 pr_debug("Core section allocation order:\n");
2357 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2358 for (i = 0; i < info->hdr->e_shnum; ++i) {
2359 Elf_Shdr *s = &info->sechdrs[i];
2360 const char *sname = info->secstrings + s->sh_name;
2362 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2363 || (s->sh_flags & masks[m][1])
2364 || s->sh_entsize != ~0UL
2365 || strstarts(sname, ".init"))
2366 continue;
2367 s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
2368 pr_debug("\t%s\n", sname);
2370 switch (m) {
2371 case 0: /* executable */
2372 mod->core_layout.size = debug_align(mod->core_layout.size);
2373 mod->core_layout.text_size = mod->core_layout.size;
2374 break;
2375 case 1: /* RO: text and ro-data */
2376 mod->core_layout.size = debug_align(mod->core_layout.size);
2377 mod->core_layout.ro_size = mod->core_layout.size;
2378 break;
2379 case 2: /* RO after init */
2380 mod->core_layout.size = debug_align(mod->core_layout.size);
2381 mod->core_layout.ro_after_init_size = mod->core_layout.size;
2382 break;
2383 case 4: /* whole core */
2384 mod->core_layout.size = debug_align(mod->core_layout.size);
2385 break;
2389 pr_debug("Init section allocation order:\n");
2390 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2391 for (i = 0; i < info->hdr->e_shnum; ++i) {
2392 Elf_Shdr *s = &info->sechdrs[i];
2393 const char *sname = info->secstrings + s->sh_name;
2395 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2396 || (s->sh_flags & masks[m][1])
2397 || s->sh_entsize != ~0UL
2398 || !strstarts(sname, ".init"))
2399 continue;
2400 s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
2401 | INIT_OFFSET_MASK);
2402 pr_debug("\t%s\n", sname);
2404 switch (m) {
2405 case 0: /* executable */
2406 mod->init_layout.size = debug_align(mod->init_layout.size);
2407 mod->init_layout.text_size = mod->init_layout.size;
2408 break;
2409 case 1: /* RO: text and ro-data */
2410 mod->init_layout.size = debug_align(mod->init_layout.size);
2411 mod->init_layout.ro_size = mod->init_layout.size;
2412 break;
2413 case 2:
2415 * RO after init doesn't apply to init_layout (only
2416 * core_layout), so it just takes the value of ro_size.
2418 mod->init_layout.ro_after_init_size = mod->init_layout.ro_size;
2419 break;
2420 case 4: /* whole init */
2421 mod->init_layout.size = debug_align(mod->init_layout.size);
2422 break;
2427 static void set_license(struct module *mod, const char *license)
2429 if (!license)
2430 license = "unspecified";
2432 if (!license_is_gpl_compatible(license)) {
2433 if (!test_taint(TAINT_PROPRIETARY_MODULE))
2434 pr_warn("%s: module license '%s' taints kernel.\n",
2435 mod->name, license);
2436 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2437 LOCKDEP_NOW_UNRELIABLE);
2441 /* Parse tag=value strings from .modinfo section */
2442 static char *next_string(char *string, unsigned long *secsize)
2444 /* Skip non-zero chars */
2445 while (string[0]) {
2446 string++;
2447 if ((*secsize)-- <= 1)
2448 return NULL;
2451 /* Skip any zero padding. */
2452 while (!string[0]) {
2453 string++;
2454 if ((*secsize)-- <= 1)
2455 return NULL;
2457 return string;
2460 static char *get_modinfo(struct load_info *info, const char *tag)
2462 char *p;
2463 unsigned int taglen = strlen(tag);
2464 Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2465 unsigned long size = infosec->sh_size;
2467 for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
2468 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2469 return p + taglen + 1;
2471 return NULL;
2474 static void setup_modinfo(struct module *mod, struct load_info *info)
2476 struct module_attribute *attr;
2477 int i;
2479 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2480 if (attr->setup)
2481 attr->setup(mod, get_modinfo(info, attr->attr.name));
2485 static void free_modinfo(struct module *mod)
2487 struct module_attribute *attr;
2488 int i;
2490 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2491 if (attr->free)
2492 attr->free(mod);
2496 #ifdef CONFIG_KALLSYMS
2498 /* lookup symbol in given range of kernel_symbols */
2499 static const struct kernel_symbol *lookup_symbol(const char *name,
2500 const struct kernel_symbol *start,
2501 const struct kernel_symbol *stop)
2503 return bsearch(name, start, stop - start,
2504 sizeof(struct kernel_symbol), cmp_name);
2507 static int is_exported(const char *name, unsigned long value,
2508 const struct module *mod)
2510 const struct kernel_symbol *ks;
2511 if (!mod)
2512 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2513 else
2514 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2515 return ks != NULL && ks->value == value;
2518 /* As per nm */
2519 static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2521 const Elf_Shdr *sechdrs = info->sechdrs;
2523 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2524 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2525 return 'v';
2526 else
2527 return 'w';
2529 if (sym->st_shndx == SHN_UNDEF)
2530 return 'U';
2531 if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu)
2532 return 'a';
2533 if (sym->st_shndx >= SHN_LORESERVE)
2534 return '?';
2535 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2536 return 't';
2537 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2538 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2539 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2540 return 'r';
2541 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2542 return 'g';
2543 else
2544 return 'd';
2546 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2547 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2548 return 's';
2549 else
2550 return 'b';
2552 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2553 ".debug")) {
2554 return 'n';
2556 return '?';
2559 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2560 unsigned int shnum, unsigned int pcpundx)
2562 const Elf_Shdr *sec;
2564 if (src->st_shndx == SHN_UNDEF
2565 || src->st_shndx >= shnum
2566 || !src->st_name)
2567 return false;
2569 #ifdef CONFIG_KALLSYMS_ALL
2570 if (src->st_shndx == pcpundx)
2571 return true;
2572 #endif
2574 sec = sechdrs + src->st_shndx;
2575 if (!(sec->sh_flags & SHF_ALLOC)
2576 #ifndef CONFIG_KALLSYMS_ALL
2577 || !(sec->sh_flags & SHF_EXECINSTR)
2578 #endif
2579 || (sec->sh_entsize & INIT_OFFSET_MASK))
2580 return false;
2582 return true;
2586 * We only allocate and copy the strings needed by the parts of symtab
2587 * we keep. This is simple, but has the effect of making multiple
2588 * copies of duplicates. We could be more sophisticated, see
2589 * linux-kernel thread starting with
2590 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2592 static void layout_symtab(struct module *mod, struct load_info *info)
2594 Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2595 Elf_Shdr *strsect = info->sechdrs + info->index.str;
2596 const Elf_Sym *src;
2597 unsigned int i, nsrc, ndst, strtab_size = 0;
2599 /* Put symbol section at end of init part of module. */
2600 symsect->sh_flags |= SHF_ALLOC;
2601 symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect,
2602 info->index.sym) | INIT_OFFSET_MASK;
2603 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2605 src = (void *)info->hdr + symsect->sh_offset;
2606 nsrc = symsect->sh_size / sizeof(*src);
2608 /* Compute total space required for the core symbols' strtab. */
2609 for (ndst = i = 0; i < nsrc; i++) {
2610 if (i == 0 || is_livepatch_module(mod) ||
2611 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
2612 info->index.pcpu)) {
2613 strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2614 ndst++;
2618 /* Append room for core symbols at end of core part. */
2619 info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1);
2620 info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym);
2621 mod->core_layout.size += strtab_size;
2622 mod->core_layout.size = debug_align(mod->core_layout.size);
2624 /* Put string table section at end of init part of module. */
2625 strsect->sh_flags |= SHF_ALLOC;
2626 strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect,
2627 info->index.str) | INIT_OFFSET_MASK;
2628 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2630 /* We'll tack temporary mod_kallsyms on the end. */
2631 mod->init_layout.size = ALIGN(mod->init_layout.size,
2632 __alignof__(struct mod_kallsyms));
2633 info->mod_kallsyms_init_off = mod->init_layout.size;
2634 mod->init_layout.size += sizeof(struct mod_kallsyms);
2635 mod->init_layout.size = debug_align(mod->init_layout.size);
2639 * We use the full symtab and strtab which layout_symtab arranged to
2640 * be appended to the init section. Later we switch to the cut-down
2641 * core-only ones.
2643 static void add_kallsyms(struct module *mod, const struct load_info *info)
2645 unsigned int i, ndst;
2646 const Elf_Sym *src;
2647 Elf_Sym *dst;
2648 char *s;
2649 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2651 /* Set up to point into init section. */
2652 mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off;
2654 mod->kallsyms->symtab = (void *)symsec->sh_addr;
2655 mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2656 /* Make sure we get permanent strtab: don't use info->strtab. */
2657 mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2659 /* Set types up while we still have access to sections. */
2660 for (i = 0; i < mod->kallsyms->num_symtab; i++)
2661 mod->kallsyms->symtab[i].st_info
2662 = elf_type(&mod->kallsyms->symtab[i], info);
2664 /* Now populate the cut down core kallsyms for after init. */
2665 mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs;
2666 mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
2667 src = mod->kallsyms->symtab;
2668 for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
2669 if (i == 0 || is_livepatch_module(mod) ||
2670 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
2671 info->index.pcpu)) {
2672 dst[ndst] = src[i];
2673 dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
2674 s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
2675 KSYM_NAME_LEN) + 1;
2678 mod->core_kallsyms.num_symtab = ndst;
2680 #else
2681 static inline void layout_symtab(struct module *mod, struct load_info *info)
2685 static void add_kallsyms(struct module *mod, const struct load_info *info)
2688 #endif /* CONFIG_KALLSYMS */
2690 static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2692 if (!debug)
2693 return;
2694 #ifdef CONFIG_DYNAMIC_DEBUG
2695 if (ddebug_add_module(debug, num, debug->modname))
2696 pr_err("dynamic debug error adding module: %s\n",
2697 debug->modname);
2698 #endif
2701 static void dynamic_debug_remove(struct _ddebug *debug)
2703 if (debug)
2704 ddebug_remove_module(debug->modname);
2707 void * __weak module_alloc(unsigned long size)
2709 return vmalloc_exec(size);
2712 #ifdef CONFIG_DEBUG_KMEMLEAK
2713 static void kmemleak_load_module(const struct module *mod,
2714 const struct load_info *info)
2716 unsigned int i;
2718 /* only scan the sections containing data */
2719 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2721 for (i = 1; i < info->hdr->e_shnum; i++) {
2722 /* Scan all writable sections that's not executable */
2723 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2724 !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2725 (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2726 continue;
2728 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2729 info->sechdrs[i].sh_size, GFP_KERNEL);
2732 #else
2733 static inline void kmemleak_load_module(const struct module *mod,
2734 const struct load_info *info)
2737 #endif
2739 #ifdef CONFIG_MODULE_SIG
2740 static int module_sig_check(struct load_info *info, int flags)
2742 int err = -ENOKEY;
2743 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2744 const void *mod = info->hdr;
2747 * Require flags == 0, as a module with version information
2748 * removed is no longer the module that was signed
2750 if (flags == 0 &&
2751 info->len > markerlen &&
2752 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2753 /* We truncate the module to discard the signature */
2754 info->len -= markerlen;
2755 err = mod_verify_sig(mod, &info->len);
2758 if (!err) {
2759 info->sig_ok = true;
2760 return 0;
2763 /* Not having a signature is only an error if we're strict. */
2764 if (err == -ENOKEY && !sig_enforce)
2765 err = 0;
2767 return err;
2769 #else /* !CONFIG_MODULE_SIG */
2770 static int module_sig_check(struct load_info *info, int flags)
2772 return 0;
2774 #endif /* !CONFIG_MODULE_SIG */
2776 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2777 static int elf_header_check(struct load_info *info)
2779 if (info->len < sizeof(*(info->hdr)))
2780 return -ENOEXEC;
2782 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2783 || info->hdr->e_type != ET_REL
2784 || !elf_check_arch(info->hdr)
2785 || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2786 return -ENOEXEC;
2788 if (info->hdr->e_shoff >= info->len
2789 || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2790 info->len - info->hdr->e_shoff))
2791 return -ENOEXEC;
2793 return 0;
2796 #define COPY_CHUNK_SIZE (16*PAGE_SIZE)
2798 static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len)
2800 do {
2801 unsigned long n = min(len, COPY_CHUNK_SIZE);
2803 if (copy_from_user(dst, usrc, n) != 0)
2804 return -EFAULT;
2805 cond_resched();
2806 dst += n;
2807 usrc += n;
2808 len -= n;
2809 } while (len);
2810 return 0;
2813 #ifdef CONFIG_LIVEPATCH
2814 static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
2816 if (get_modinfo(info, "livepatch")) {
2817 mod->klp = true;
2818 add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
2821 return 0;
2823 #else /* !CONFIG_LIVEPATCH */
2824 static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
2826 if (get_modinfo(info, "livepatch")) {
2827 pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
2828 mod->name);
2829 return -ENOEXEC;
2832 return 0;
2834 #endif /* CONFIG_LIVEPATCH */
2836 static void check_modinfo_retpoline(struct module *mod, struct load_info *info)
2838 if (retpoline_module_ok(get_modinfo(info, "retpoline")))
2839 return;
2841 pr_warn("%s: loading module not compiled with retpoline compiler.\n",
2842 mod->name);
2845 /* Sets info->hdr and info->len. */
2846 static int copy_module_from_user(const void __user *umod, unsigned long len,
2847 struct load_info *info)
2849 int err;
2851 info->len = len;
2852 if (info->len < sizeof(*(info->hdr)))
2853 return -ENOEXEC;
2855 err = security_kernel_read_file(NULL, READING_MODULE);
2856 if (err)
2857 return err;
2859 /* Suck in entire file: we'll want most of it. */
2860 info->hdr = __vmalloc(info->len,
2861 GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, PAGE_KERNEL);
2862 if (!info->hdr)
2863 return -ENOMEM;
2865 if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
2866 vfree(info->hdr);
2867 return -EFAULT;
2870 return 0;
2873 static void free_copy(struct load_info *info)
2875 vfree(info->hdr);
2878 static int rewrite_section_headers(struct load_info *info, int flags)
2880 unsigned int i;
2882 /* This should always be true, but let's be sure. */
2883 info->sechdrs[0].sh_addr = 0;
2885 for (i = 1; i < info->hdr->e_shnum; i++) {
2886 Elf_Shdr *shdr = &info->sechdrs[i];
2887 if (shdr->sh_type != SHT_NOBITS
2888 && info->len < shdr->sh_offset + shdr->sh_size) {
2889 pr_err("Module len %lu truncated\n", info->len);
2890 return -ENOEXEC;
2893 /* Mark all sections sh_addr with their address in the
2894 temporary image. */
2895 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2897 #ifndef CONFIG_MODULE_UNLOAD
2898 /* Don't load .exit sections */
2899 if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2900 shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2901 #endif
2904 /* Track but don't keep modinfo and version sections. */
2905 if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
2906 info->index.vers = 0; /* Pretend no __versions section! */
2907 else
2908 info->index.vers = find_sec(info, "__versions");
2909 info->index.info = find_sec(info, ".modinfo");
2910 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2911 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2912 return 0;
2916 * Set up our basic convenience variables (pointers to section headers,
2917 * search for module section index etc), and do some basic section
2918 * verification.
2920 * Return the temporary module pointer (we'll replace it with the final
2921 * one when we move the module sections around).
2923 static struct module *setup_load_info(struct load_info *info, int flags)
2925 unsigned int i;
2926 int err;
2927 struct module *mod;
2929 /* Set up the convenience variables */
2930 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2931 info->secstrings = (void *)info->hdr
2932 + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2934 err = rewrite_section_headers(info, flags);
2935 if (err)
2936 return ERR_PTR(err);
2938 /* Find internal symbols and strings. */
2939 for (i = 1; i < info->hdr->e_shnum; i++) {
2940 if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2941 info->index.sym = i;
2942 info->index.str = info->sechdrs[i].sh_link;
2943 info->strtab = (char *)info->hdr
2944 + info->sechdrs[info->index.str].sh_offset;
2945 break;
2949 info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2950 if (!info->index.mod) {
2951 pr_warn("No module found in object\n");
2952 return ERR_PTR(-ENOEXEC);
2954 /* This is temporary: point mod into copy of data. */
2955 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2957 if (info->index.sym == 0) {
2958 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
2959 return ERR_PTR(-ENOEXEC);
2962 info->index.pcpu = find_pcpusec(info);
2964 /* Check module struct version now, before we try to use module. */
2965 if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
2966 return ERR_PTR(-ENOEXEC);
2968 return mod;
2971 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
2973 const char *modmagic = get_modinfo(info, "vermagic");
2974 int err;
2976 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
2977 modmagic = NULL;
2979 /* This is allowed: modprobe --force will invalidate it. */
2980 if (!modmagic) {
2981 err = try_to_force_load(mod, "bad vermagic");
2982 if (err)
2983 return err;
2984 } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2985 pr_err("%s: version magic '%s' should be '%s'\n",
2986 mod->name, modmagic, vermagic);
2987 return -ENOEXEC;
2990 if (!get_modinfo(info, "intree")) {
2991 if (!test_taint(TAINT_OOT_MODULE))
2992 pr_warn("%s: loading out-of-tree module taints kernel.\n",
2993 mod->name);
2994 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
2997 check_modinfo_retpoline(mod, info);
2999 if (get_modinfo(info, "staging")) {
3000 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
3001 pr_warn("%s: module is from the staging directory, the quality "
3002 "is unknown, you have been warned.\n", mod->name);
3005 err = check_modinfo_livepatch(mod, info);
3006 if (err)
3007 return err;
3009 /* Set up license info based on the info section */
3010 set_license(mod, get_modinfo(info, "license"));
3012 return 0;
3015 static int find_module_sections(struct module *mod, struct load_info *info)
3017 mod->kp = section_objs(info, "__param",
3018 sizeof(*mod->kp), &mod->num_kp);
3019 mod->syms = section_objs(info, "__ksymtab",
3020 sizeof(*mod->syms), &mod->num_syms);
3021 mod->crcs = section_addr(info, "__kcrctab");
3022 mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
3023 sizeof(*mod->gpl_syms),
3024 &mod->num_gpl_syms);
3025 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
3026 mod->gpl_future_syms = section_objs(info,
3027 "__ksymtab_gpl_future",
3028 sizeof(*mod->gpl_future_syms),
3029 &mod->num_gpl_future_syms);
3030 mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
3032 #ifdef CONFIG_UNUSED_SYMBOLS
3033 mod->unused_syms = section_objs(info, "__ksymtab_unused",
3034 sizeof(*mod->unused_syms),
3035 &mod->num_unused_syms);
3036 mod->unused_crcs = section_addr(info, "__kcrctab_unused");
3037 mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
3038 sizeof(*mod->unused_gpl_syms),
3039 &mod->num_unused_gpl_syms);
3040 mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
3041 #endif
3042 #ifdef CONFIG_CONSTRUCTORS
3043 mod->ctors = section_objs(info, ".ctors",
3044 sizeof(*mod->ctors), &mod->num_ctors);
3045 if (!mod->ctors)
3046 mod->ctors = section_objs(info, ".init_array",
3047 sizeof(*mod->ctors), &mod->num_ctors);
3048 else if (find_sec(info, ".init_array")) {
3050 * This shouldn't happen with same compiler and binutils
3051 * building all parts of the module.
3053 pr_warn("%s: has both .ctors and .init_array.\n",
3054 mod->name);
3055 return -EINVAL;
3057 #endif
3059 #ifdef CONFIG_TRACEPOINTS
3060 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
3061 sizeof(*mod->tracepoints_ptrs),
3062 &mod->num_tracepoints);
3063 #endif
3064 #ifdef HAVE_JUMP_LABEL
3065 mod->jump_entries = section_objs(info, "__jump_table",
3066 sizeof(*mod->jump_entries),
3067 &mod->num_jump_entries);
3068 #endif
3069 #ifdef CONFIG_EVENT_TRACING
3070 mod->trace_events = section_objs(info, "_ftrace_events",
3071 sizeof(*mod->trace_events),
3072 &mod->num_trace_events);
3073 mod->trace_enums = section_objs(info, "_ftrace_enum_map",
3074 sizeof(*mod->trace_enums),
3075 &mod->num_trace_enums);
3076 #endif
3077 #ifdef CONFIG_TRACING
3078 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
3079 sizeof(*mod->trace_bprintk_fmt_start),
3080 &mod->num_trace_bprintk_fmt);
3081 #endif
3082 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
3083 /* sechdrs[0].sh_size is always zero */
3084 mod->ftrace_callsites = section_objs(info, "__mcount_loc",
3085 sizeof(*mod->ftrace_callsites),
3086 &mod->num_ftrace_callsites);
3087 #endif
3089 mod->extable = section_objs(info, "__ex_table",
3090 sizeof(*mod->extable), &mod->num_exentries);
3092 if (section_addr(info, "__obsparm"))
3093 pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
3095 info->debug = section_objs(info, "__verbose",
3096 sizeof(*info->debug), &info->num_debug);
3098 return 0;
3101 static int move_module(struct module *mod, struct load_info *info)
3103 int i;
3104 void *ptr;
3106 /* Do the allocs. */
3107 ptr = module_alloc(mod->core_layout.size);
3109 * The pointer to this block is stored in the module structure
3110 * which is inside the block. Just mark it as not being a
3111 * leak.
3113 kmemleak_not_leak(ptr);
3114 if (!ptr)
3115 return -ENOMEM;
3117 memset(ptr, 0, mod->core_layout.size);
3118 mod->core_layout.base = ptr;
3120 if (mod->init_layout.size) {
3121 ptr = module_alloc(mod->init_layout.size);
3123 * The pointer to this block is stored in the module structure
3124 * which is inside the block. This block doesn't need to be
3125 * scanned as it contains data and code that will be freed
3126 * after the module is initialized.
3128 kmemleak_ignore(ptr);
3129 if (!ptr) {
3130 module_memfree(mod->core_layout.base);
3131 return -ENOMEM;
3133 memset(ptr, 0, mod->init_layout.size);
3134 mod->init_layout.base = ptr;
3135 } else
3136 mod->init_layout.base = NULL;
3138 /* Transfer each section which specifies SHF_ALLOC */
3139 pr_debug("final section addresses:\n");
3140 for (i = 0; i < info->hdr->e_shnum; i++) {
3141 void *dest;
3142 Elf_Shdr *shdr = &info->sechdrs[i];
3144 if (!(shdr->sh_flags & SHF_ALLOC))
3145 continue;
3147 if (shdr->sh_entsize & INIT_OFFSET_MASK)
3148 dest = mod->init_layout.base
3149 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
3150 else
3151 dest = mod->core_layout.base + shdr->sh_entsize;
3153 if (shdr->sh_type != SHT_NOBITS)
3154 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
3155 /* Update sh_addr to point to copy in image. */
3156 shdr->sh_addr = (unsigned long)dest;
3157 pr_debug("\t0x%lx %s\n",
3158 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
3161 return 0;
3164 static int check_module_license_and_versions(struct module *mod)
3166 int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE);
3169 * ndiswrapper is under GPL by itself, but loads proprietary modules.
3170 * Don't use add_taint_module(), as it would prevent ndiswrapper from
3171 * using GPL-only symbols it needs.
3173 if (strcmp(mod->name, "ndiswrapper") == 0)
3174 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
3176 /* driverloader was caught wrongly pretending to be under GPL */
3177 if (strcmp(mod->name, "driverloader") == 0)
3178 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3179 LOCKDEP_NOW_UNRELIABLE);
3181 /* lve claims to be GPL but upstream won't provide source */
3182 if (strcmp(mod->name, "lve") == 0)
3183 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3184 LOCKDEP_NOW_UNRELIABLE);
3186 if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE))
3187 pr_warn("%s: module license taints kernel.\n", mod->name);
3189 #ifdef CONFIG_MODVERSIONS
3190 if ((mod->num_syms && !mod->crcs)
3191 || (mod->num_gpl_syms && !mod->gpl_crcs)
3192 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
3193 #ifdef CONFIG_UNUSED_SYMBOLS
3194 || (mod->num_unused_syms && !mod->unused_crcs)
3195 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
3196 #endif
3198 return try_to_force_load(mod,
3199 "no versions for exported symbols");
3201 #endif
3202 return 0;
3205 static void flush_module_icache(const struct module *mod)
3207 mm_segment_t old_fs;
3209 /* flush the icache in correct context */
3210 old_fs = get_fs();
3211 set_fs(KERNEL_DS);
3214 * Flush the instruction cache, since we've played with text.
3215 * Do it before processing of module parameters, so the module
3216 * can provide parameter accessor functions of its own.
3218 if (mod->init_layout.base)
3219 flush_icache_range((unsigned long)mod->init_layout.base,
3220 (unsigned long)mod->init_layout.base
3221 + mod->init_layout.size);
3222 flush_icache_range((unsigned long)mod->core_layout.base,
3223 (unsigned long)mod->core_layout.base + mod->core_layout.size);
3225 set_fs(old_fs);
3228 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
3229 Elf_Shdr *sechdrs,
3230 char *secstrings,
3231 struct module *mod)
3233 return 0;
3236 /* module_blacklist is a comma-separated list of module names */
3237 static char *module_blacklist;
3238 static bool blacklisted(char *module_name)
3240 const char *p;
3241 size_t len;
3243 if (!module_blacklist)
3244 return false;
3246 for (p = module_blacklist; *p; p += len) {
3247 len = strcspn(p, ",");
3248 if (strlen(module_name) == len && !memcmp(module_name, p, len))
3249 return true;
3250 if (p[len] == ',')
3251 len++;
3253 return false;
3255 core_param(module_blacklist, module_blacklist, charp, 0400);
3257 static struct module *layout_and_allocate(struct load_info *info, int flags)
3259 /* Module within temporary copy. */
3260 struct module *mod;
3261 unsigned int ndx;
3262 int err;
3264 mod = setup_load_info(info, flags);
3265 if (IS_ERR(mod))
3266 return mod;
3268 if (blacklisted(mod->name))
3269 return ERR_PTR(-EPERM);
3271 err = check_modinfo(mod, info, flags);
3272 if (err)
3273 return ERR_PTR(err);
3275 /* Allow arches to frob section contents and sizes. */
3276 err = module_frob_arch_sections(info->hdr, info->sechdrs,
3277 info->secstrings, mod);
3278 if (err < 0)
3279 return ERR_PTR(err);
3281 /* We will do a special allocation for per-cpu sections later. */
3282 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
3285 * Mark ro_after_init section with SHF_RO_AFTER_INIT so that
3286 * layout_sections() can put it in the right place.
3287 * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
3289 ndx = find_sec(info, ".data..ro_after_init");
3290 if (ndx)
3291 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
3293 /* Determine total sizes, and put offsets in sh_entsize. For now
3294 this is done generically; there doesn't appear to be any
3295 special cases for the architectures. */
3296 layout_sections(mod, info);
3297 layout_symtab(mod, info);
3299 /* Allocate and move to the final place */
3300 err = move_module(mod, info);
3301 if (err)
3302 return ERR_PTR(err);
3304 /* Module has been copied to its final place now: return it. */
3305 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
3306 kmemleak_load_module(mod, info);
3307 return mod;
3310 /* mod is no longer valid after this! */
3311 static void module_deallocate(struct module *mod, struct load_info *info)
3313 percpu_modfree(mod);
3314 module_arch_freeing_init(mod);
3315 module_memfree(mod->init_layout.base);
3316 module_memfree(mod->core_layout.base);
3319 int __weak module_finalize(const Elf_Ehdr *hdr,
3320 const Elf_Shdr *sechdrs,
3321 struct module *me)
3323 return 0;
3326 static int post_relocation(struct module *mod, const struct load_info *info)
3328 /* Sort exception table now relocations are done. */
3329 sort_extable(mod->extable, mod->extable + mod->num_exentries);
3331 /* Copy relocated percpu area over. */
3332 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
3333 info->sechdrs[info->index.pcpu].sh_size);
3335 /* Setup kallsyms-specific fields. */
3336 add_kallsyms(mod, info);
3338 /* Arch-specific module finalizing. */
3339 return module_finalize(info->hdr, info->sechdrs, mod);
3342 /* Is this module of this name done loading? No locks held. */
3343 static bool finished_loading(const char *name)
3345 struct module *mod;
3346 bool ret;
3349 * The module_mutex should not be a heavily contended lock;
3350 * if we get the occasional sleep here, we'll go an extra iteration
3351 * in the wait_event_interruptible(), which is harmless.
3353 sched_annotate_sleep();
3354 mutex_lock(&module_mutex);
3355 mod = find_module_all(name, strlen(name), true);
3356 ret = !mod || mod->state == MODULE_STATE_LIVE;
3357 mutex_unlock(&module_mutex);
3359 return ret;
3362 /* Call module constructors. */
3363 static void do_mod_ctors(struct module *mod)
3365 #ifdef CONFIG_CONSTRUCTORS
3366 unsigned long i;
3368 for (i = 0; i < mod->num_ctors; i++)
3369 mod->ctors[i]();
3370 #endif
3373 /* For freeing module_init on success, in case kallsyms traversing */
3374 struct mod_initfree {
3375 struct rcu_head rcu;
3376 void *module_init;
3379 static void do_free_init(struct rcu_head *head)
3381 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
3382 module_memfree(m->module_init);
3383 kfree(m);
3387 * This is where the real work happens.
3389 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
3390 * helper command 'lx-symbols'.
3392 static noinline int do_init_module(struct module *mod)
3394 int ret = 0;
3395 struct mod_initfree *freeinit;
3397 freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
3398 if (!freeinit) {
3399 ret = -ENOMEM;
3400 goto fail;
3402 freeinit->module_init = mod->init_layout.base;
3405 * We want to find out whether @mod uses async during init. Clear
3406 * PF_USED_ASYNC. async_schedule*() will set it.
3408 current->flags &= ~PF_USED_ASYNC;
3410 do_mod_ctors(mod);
3411 /* Start the module */
3412 if (mod->init != NULL)
3413 ret = do_one_initcall(mod->init);
3414 if (ret < 0) {
3415 goto fail_free_freeinit;
3417 if (ret > 0) {
3418 pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3419 "follow 0/-E convention\n"
3420 "%s: loading module anyway...\n",
3421 __func__, mod->name, ret, __func__);
3422 dump_stack();
3425 /* Now it's a first class citizen! */
3426 mod->state = MODULE_STATE_LIVE;
3427 blocking_notifier_call_chain(&module_notify_list,
3428 MODULE_STATE_LIVE, mod);
3431 * We need to finish all async code before the module init sequence
3432 * is done. This has potential to deadlock. For example, a newly
3433 * detected block device can trigger request_module() of the
3434 * default iosched from async probing task. Once userland helper
3435 * reaches here, async_synchronize_full() will wait on the async
3436 * task waiting on request_module() and deadlock.
3438 * This deadlock is avoided by perfomring async_synchronize_full()
3439 * iff module init queued any async jobs. This isn't a full
3440 * solution as it will deadlock the same if module loading from
3441 * async jobs nests more than once; however, due to the various
3442 * constraints, this hack seems to be the best option for now.
3443 * Please refer to the following thread for details.
3445 * http://thread.gmane.org/gmane.linux.kernel/1420814
3447 if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
3448 async_synchronize_full();
3450 mutex_lock(&module_mutex);
3451 /* Drop initial reference. */
3452 module_put(mod);
3453 trim_init_extable(mod);
3454 #ifdef CONFIG_KALLSYMS
3455 /* Switch to core kallsyms now init is done: kallsyms may be walking! */
3456 rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
3457 #endif
3458 module_enable_ro(mod, true);
3459 mod_tree_remove_init(mod);
3460 disable_ro_nx(&mod->init_layout);
3461 module_arch_freeing_init(mod);
3462 mod->init_layout.base = NULL;
3463 mod->init_layout.size = 0;
3464 mod->init_layout.ro_size = 0;
3465 mod->init_layout.ro_after_init_size = 0;
3466 mod->init_layout.text_size = 0;
3468 * We want to free module_init, but be aware that kallsyms may be
3469 * walking this with preempt disabled. In all the failure paths, we
3470 * call synchronize_sched(), but we don't want to slow down the success
3471 * path, so use actual RCU here.
3473 call_rcu_sched(&freeinit->rcu, do_free_init);
3474 mutex_unlock(&module_mutex);
3475 wake_up_all(&module_wq);
3477 return 0;
3479 fail_free_freeinit:
3480 kfree(freeinit);
3481 fail:
3482 /* Try to protect us from buggy refcounters. */
3483 mod->state = MODULE_STATE_GOING;
3484 synchronize_sched();
3485 module_put(mod);
3486 blocking_notifier_call_chain(&module_notify_list,
3487 MODULE_STATE_GOING, mod);
3488 klp_module_going(mod);
3489 ftrace_release_mod(mod);
3490 free_module(mod);
3491 wake_up_all(&module_wq);
3492 return ret;
3495 static int may_init_module(void)
3497 if (!capable(CAP_SYS_MODULE) || modules_disabled)
3498 return -EPERM;
3500 return 0;
3504 * We try to place it in the list now to make sure it's unique before
3505 * we dedicate too many resources. In particular, temporary percpu
3506 * memory exhaustion.
3508 static int add_unformed_module(struct module *mod)
3510 int err;
3511 struct module *old;
3513 mod->state = MODULE_STATE_UNFORMED;
3515 again:
3516 mutex_lock(&module_mutex);
3517 old = find_module_all(mod->name, strlen(mod->name), true);
3518 if (old != NULL) {
3519 if (old->state != MODULE_STATE_LIVE) {
3520 /* Wait in case it fails to load. */
3521 mutex_unlock(&module_mutex);
3522 err = wait_event_interruptible(module_wq,
3523 finished_loading(mod->name));
3524 if (err)
3525 goto out_unlocked;
3526 goto again;
3528 err = -EEXIST;
3529 goto out;
3531 mod_update_bounds(mod);
3532 list_add_rcu(&mod->list, &modules);
3533 mod_tree_insert(mod);
3534 err = 0;
3536 out:
3537 mutex_unlock(&module_mutex);
3538 out_unlocked:
3539 return err;
3542 static int complete_formation(struct module *mod, struct load_info *info)
3544 int err;
3546 mutex_lock(&module_mutex);
3548 /* Find duplicate symbols (must be called under lock). */
3549 err = verify_export_symbols(mod);
3550 if (err < 0)
3551 goto out;
3553 /* This relies on module_mutex for list integrity. */
3554 module_bug_finalize(info->hdr, info->sechdrs, mod);
3556 module_enable_ro(mod, false);
3557 module_enable_nx(mod);
3559 /* Mark state as coming so strong_try_module_get() ignores us,
3560 * but kallsyms etc. can see us. */
3561 mod->state = MODULE_STATE_COMING;
3562 mutex_unlock(&module_mutex);
3564 return 0;
3566 out:
3567 mutex_unlock(&module_mutex);
3568 return err;
3571 static int prepare_coming_module(struct module *mod)
3573 int err;
3575 ftrace_module_enable(mod);
3576 err = klp_module_coming(mod);
3577 if (err)
3578 return err;
3580 blocking_notifier_call_chain(&module_notify_list,
3581 MODULE_STATE_COMING, mod);
3582 return 0;
3585 static int unknown_module_param_cb(char *param, char *val, const char *modname,
3586 void *arg)
3588 struct module *mod = arg;
3589 int ret;
3591 if (strcmp(param, "async_probe") == 0) {
3592 mod->async_probe_requested = true;
3593 return 0;
3596 /* Check for magic 'dyndbg' arg */
3597 ret = ddebug_dyndbg_module_param_cb(param, val, modname);
3598 if (ret != 0)
3599 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
3600 return 0;
3603 /* Allocate and load the module: note that size of section 0 is always
3604 zero, and we rely on this for optional sections. */
3605 static int load_module(struct load_info *info, const char __user *uargs,
3606 int flags)
3608 struct module *mod;
3609 long err;
3610 char *after_dashes;
3612 err = module_sig_check(info, flags);
3613 if (err)
3614 goto free_copy;
3616 err = elf_header_check(info);
3617 if (err)
3618 goto free_copy;
3620 /* Figure out module layout, and allocate all the memory. */
3621 mod = layout_and_allocate(info, flags);
3622 if (IS_ERR(mod)) {
3623 err = PTR_ERR(mod);
3624 goto free_copy;
3627 /* Reserve our place in the list. */
3628 err = add_unformed_module(mod);
3629 if (err)
3630 goto free_module;
3632 #ifdef CONFIG_MODULE_SIG
3633 mod->sig_ok = info->sig_ok;
3634 if (!mod->sig_ok) {
3635 pr_notice_once("%s: module verification failed: signature "
3636 "and/or required key missing - tainting "
3637 "kernel\n", mod->name);
3638 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
3640 #endif
3642 /* To avoid stressing percpu allocator, do this once we're unique. */
3643 err = percpu_modalloc(mod, info);
3644 if (err)
3645 goto unlink_mod;
3647 /* Now module is in final location, initialize linked lists, etc. */
3648 err = module_unload_init(mod);
3649 if (err)
3650 goto unlink_mod;
3652 init_param_lock(mod);
3654 /* Now we've got everything in the final locations, we can
3655 * find optional sections. */
3656 err = find_module_sections(mod, info);
3657 if (err)
3658 goto free_unload;
3660 err = check_module_license_and_versions(mod);
3661 if (err)
3662 goto free_unload;
3664 /* Set up MODINFO_ATTR fields */
3665 setup_modinfo(mod, info);
3667 /* Fix up syms, so that st_value is a pointer to location. */
3668 err = simplify_symbols(mod, info);
3669 if (err < 0)
3670 goto free_modinfo;
3672 err = apply_relocations(mod, info);
3673 if (err < 0)
3674 goto free_modinfo;
3676 err = post_relocation(mod, info);
3677 if (err < 0)
3678 goto free_modinfo;
3680 flush_module_icache(mod);
3682 /* Now copy in args */
3683 mod->args = strndup_user(uargs, ~0UL >> 1);
3684 if (IS_ERR(mod->args)) {
3685 err = PTR_ERR(mod->args);
3686 goto free_arch_cleanup;
3689 dynamic_debug_setup(info->debug, info->num_debug);
3691 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3692 ftrace_module_init(mod);
3694 /* Finally it's fully formed, ready to start executing. */
3695 err = complete_formation(mod, info);
3696 if (err)
3697 goto ddebug_cleanup;
3699 err = prepare_coming_module(mod);
3700 if (err)
3701 goto bug_cleanup;
3703 /* Module is ready to execute: parsing args may do that. */
3704 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3705 -32768, 32767, mod,
3706 unknown_module_param_cb);
3707 if (IS_ERR(after_dashes)) {
3708 err = PTR_ERR(after_dashes);
3709 goto coming_cleanup;
3710 } else if (after_dashes) {
3711 pr_warn("%s: parameters '%s' after `--' ignored\n",
3712 mod->name, after_dashes);
3715 /* Link in to syfs. */
3716 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3717 if (err < 0)
3718 goto coming_cleanup;
3720 if (is_livepatch_module(mod)) {
3721 err = copy_module_elf(mod, info);
3722 if (err < 0)
3723 goto sysfs_cleanup;
3726 /* Get rid of temporary copy. */
3727 free_copy(info);
3729 /* Done! */
3730 trace_module_load(mod);
3732 return do_init_module(mod);
3734 sysfs_cleanup:
3735 mod_sysfs_teardown(mod);
3736 coming_cleanup:
3737 blocking_notifier_call_chain(&module_notify_list,
3738 MODULE_STATE_GOING, mod);
3739 klp_module_going(mod);
3740 bug_cleanup:
3741 /* module_bug_cleanup needs module_mutex protection */
3742 mutex_lock(&module_mutex);
3743 module_bug_cleanup(mod);
3744 mutex_unlock(&module_mutex);
3746 /* we can't deallocate the module until we clear memory protection */
3747 module_disable_ro(mod);
3748 module_disable_nx(mod);
3750 ddebug_cleanup:
3751 dynamic_debug_remove(info->debug);
3752 synchronize_sched();
3753 kfree(mod->args);
3754 free_arch_cleanup:
3755 module_arch_cleanup(mod);
3756 free_modinfo:
3757 free_modinfo(mod);
3758 free_unload:
3759 module_unload_free(mod);
3760 unlink_mod:
3761 mutex_lock(&module_mutex);
3762 /* Unlink carefully: kallsyms could be walking list. */
3763 list_del_rcu(&mod->list);
3764 mod_tree_remove(mod);
3765 wake_up_all(&module_wq);
3766 /* Wait for RCU-sched synchronizing before releasing mod->list. */
3767 synchronize_sched();
3768 mutex_unlock(&module_mutex);
3769 free_module:
3771 * Ftrace needs to clean up what it initialized.
3772 * This does nothing if ftrace_module_init() wasn't called,
3773 * but it must be called outside of module_mutex.
3775 ftrace_release_mod(mod);
3776 /* Free lock-classes; relies on the preceding sync_rcu() */
3777 lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
3779 module_deallocate(mod, info);
3780 free_copy:
3781 free_copy(info);
3782 return err;
3785 SYSCALL_DEFINE3(init_module, void __user *, umod,
3786 unsigned long, len, const char __user *, uargs)
3788 int err;
3789 struct load_info info = { };
3791 err = may_init_module();
3792 if (err)
3793 return err;
3795 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3796 umod, len, uargs);
3798 err = copy_module_from_user(umod, len, &info);
3799 if (err)
3800 return err;
3802 return load_module(&info, uargs, 0);
3805 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
3807 struct load_info info = { };
3808 loff_t size;
3809 void *hdr;
3810 int err;
3812 err = may_init_module();
3813 if (err)
3814 return err;
3816 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
3818 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
3819 |MODULE_INIT_IGNORE_VERMAGIC))
3820 return -EINVAL;
3822 err = kernel_read_file_from_fd(fd, &hdr, &size, INT_MAX,
3823 READING_MODULE);
3824 if (err)
3825 return err;
3826 info.hdr = hdr;
3827 info.len = size;
3829 return load_module(&info, uargs, flags);
3832 static inline int within(unsigned long addr, void *start, unsigned long size)
3834 return ((void *)addr >= start && (void *)addr < start + size);
3837 #ifdef CONFIG_KALLSYMS
3839 * This ignores the intensely annoying "mapping symbols" found
3840 * in ARM ELF files: $a, $t and $d.
3842 static inline int is_arm_mapping_symbol(const char *str)
3844 if (str[0] == '.' && str[1] == 'L')
3845 return true;
3846 return str[0] == '$' && strchr("axtd", str[1])
3847 && (str[2] == '\0' || str[2] == '.');
3850 static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
3852 return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
3855 static const char *get_ksymbol(struct module *mod,
3856 unsigned long addr,
3857 unsigned long *size,
3858 unsigned long *offset)
3860 unsigned int i, best = 0;
3861 unsigned long nextval;
3862 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
3864 /* At worse, next value is at end of module */
3865 if (within_module_init(addr, mod))
3866 nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size;
3867 else
3868 nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size;
3870 /* Scan for closest preceding symbol, and next symbol. (ELF
3871 starts real symbols at 1). */
3872 for (i = 1; i < kallsyms->num_symtab; i++) {
3873 if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
3874 continue;
3876 /* We ignore unnamed symbols: they're uninformative
3877 * and inserted at a whim. */
3878 if (*symname(kallsyms, i) == '\0'
3879 || is_arm_mapping_symbol(symname(kallsyms, i)))
3880 continue;
3882 if (kallsyms->symtab[i].st_value <= addr
3883 && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
3884 best = i;
3885 if (kallsyms->symtab[i].st_value > addr
3886 && kallsyms->symtab[i].st_value < nextval)
3887 nextval = kallsyms->symtab[i].st_value;
3890 if (!best)
3891 return NULL;
3893 if (size)
3894 *size = nextval - kallsyms->symtab[best].st_value;
3895 if (offset)
3896 *offset = addr - kallsyms->symtab[best].st_value;
3897 return symname(kallsyms, best);
3900 /* For kallsyms to ask for address resolution. NULL means not found. Careful
3901 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3902 const char *module_address_lookup(unsigned long addr,
3903 unsigned long *size,
3904 unsigned long *offset,
3905 char **modname,
3906 char *namebuf)
3908 const char *ret = NULL;
3909 struct module *mod;
3911 preempt_disable();
3912 mod = __module_address(addr);
3913 if (mod) {
3914 if (modname)
3915 *modname = mod->name;
3916 ret = get_ksymbol(mod, addr, size, offset);
3918 /* Make a copy in here where it's safe */
3919 if (ret) {
3920 strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
3921 ret = namebuf;
3923 preempt_enable();
3925 return ret;
3928 int lookup_module_symbol_name(unsigned long addr, char *symname)
3930 struct module *mod;
3932 preempt_disable();
3933 list_for_each_entry_rcu(mod, &modules, list) {
3934 if (mod->state == MODULE_STATE_UNFORMED)
3935 continue;
3936 if (within_module(addr, mod)) {
3937 const char *sym;
3939 sym = get_ksymbol(mod, addr, NULL, NULL);
3940 if (!sym)
3941 goto out;
3942 strlcpy(symname, sym, KSYM_NAME_LEN);
3943 preempt_enable();
3944 return 0;
3947 out:
3948 preempt_enable();
3949 return -ERANGE;
3952 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3953 unsigned long *offset, char *modname, char *name)
3955 struct module *mod;
3957 preempt_disable();
3958 list_for_each_entry_rcu(mod, &modules, list) {
3959 if (mod->state == MODULE_STATE_UNFORMED)
3960 continue;
3961 if (within_module(addr, mod)) {
3962 const char *sym;
3964 sym = get_ksymbol(mod, addr, size, offset);
3965 if (!sym)
3966 goto out;
3967 if (modname)
3968 strlcpy(modname, mod->name, MODULE_NAME_LEN);
3969 if (name)
3970 strlcpy(name, sym, KSYM_NAME_LEN);
3971 preempt_enable();
3972 return 0;
3975 out:
3976 preempt_enable();
3977 return -ERANGE;
3980 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3981 char *name, char *module_name, int *exported)
3983 struct module *mod;
3985 preempt_disable();
3986 list_for_each_entry_rcu(mod, &modules, list) {
3987 struct mod_kallsyms *kallsyms;
3989 if (mod->state == MODULE_STATE_UNFORMED)
3990 continue;
3991 kallsyms = rcu_dereference_sched(mod->kallsyms);
3992 if (symnum < kallsyms->num_symtab) {
3993 *value = kallsyms->symtab[symnum].st_value;
3994 *type = kallsyms->symtab[symnum].st_info;
3995 strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
3996 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3997 *exported = is_exported(name, *value, mod);
3998 preempt_enable();
3999 return 0;
4001 symnum -= kallsyms->num_symtab;
4003 preempt_enable();
4004 return -ERANGE;
4007 static unsigned long mod_find_symname(struct module *mod, const char *name)
4009 unsigned int i;
4010 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
4012 for (i = 0; i < kallsyms->num_symtab; i++)
4013 if (strcmp(name, symname(kallsyms, i)) == 0 &&
4014 kallsyms->symtab[i].st_shndx != SHN_UNDEF)
4015 return kallsyms->symtab[i].st_value;
4016 return 0;
4019 /* Look for this name: can be of form module:name. */
4020 unsigned long module_kallsyms_lookup_name(const char *name)
4022 struct module *mod;
4023 char *colon;
4024 unsigned long ret = 0;
4026 /* Don't lock: we're in enough trouble already. */
4027 preempt_disable();
4028 if ((colon = strchr(name, ':')) != NULL) {
4029 if ((mod = find_module_all(name, colon - name, false)) != NULL)
4030 ret = mod_find_symname(mod, colon+1);
4031 } else {
4032 list_for_each_entry_rcu(mod, &modules, list) {
4033 if (mod->state == MODULE_STATE_UNFORMED)
4034 continue;
4035 if ((ret = mod_find_symname(mod, name)) != 0)
4036 break;
4039 preempt_enable();
4040 return ret;
4043 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
4044 struct module *, unsigned long),
4045 void *data)
4047 struct module *mod;
4048 unsigned int i;
4049 int ret;
4051 module_assert_mutex();
4053 list_for_each_entry(mod, &modules, list) {
4054 /* We hold module_mutex: no need for rcu_dereference_sched */
4055 struct mod_kallsyms *kallsyms = mod->kallsyms;
4057 if (mod->state == MODULE_STATE_UNFORMED)
4058 continue;
4059 for (i = 0; i < kallsyms->num_symtab; i++) {
4061 if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
4062 continue;
4064 ret = fn(data, symname(kallsyms, i),
4065 mod, kallsyms->symtab[i].st_value);
4066 if (ret != 0)
4067 return ret;
4070 return 0;
4072 #endif /* CONFIG_KALLSYMS */
4074 static char *module_flags(struct module *mod, char *buf)
4076 int bx = 0;
4078 BUG_ON(mod->state == MODULE_STATE_UNFORMED);
4079 if (mod->taints ||
4080 mod->state == MODULE_STATE_GOING ||
4081 mod->state == MODULE_STATE_COMING) {
4082 buf[bx++] = '(';
4083 bx += module_flags_taint(mod, buf + bx);
4084 /* Show a - for module-is-being-unloaded */
4085 if (mod->state == MODULE_STATE_GOING)
4086 buf[bx++] = '-';
4087 /* Show a + for module-is-being-loaded */
4088 if (mod->state == MODULE_STATE_COMING)
4089 buf[bx++] = '+';
4090 buf[bx++] = ')';
4092 buf[bx] = '\0';
4094 return buf;
4097 #ifdef CONFIG_PROC_FS
4098 /* Called by the /proc file system to return a list of modules. */
4099 static void *m_start(struct seq_file *m, loff_t *pos)
4101 mutex_lock(&module_mutex);
4102 return seq_list_start(&modules, *pos);
4105 static void *m_next(struct seq_file *m, void *p, loff_t *pos)
4107 return seq_list_next(p, &modules, pos);
4110 static void m_stop(struct seq_file *m, void *p)
4112 mutex_unlock(&module_mutex);
4115 static int m_show(struct seq_file *m, void *p)
4117 struct module *mod = list_entry(p, struct module, list);
4118 char buf[8];
4120 /* We always ignore unformed modules. */
4121 if (mod->state == MODULE_STATE_UNFORMED)
4122 return 0;
4124 seq_printf(m, "%s %u",
4125 mod->name, mod->init_layout.size + mod->core_layout.size);
4126 print_unload_info(m, mod);
4128 /* Informative for users. */
4129 seq_printf(m, " %s",
4130 mod->state == MODULE_STATE_GOING ? "Unloading" :
4131 mod->state == MODULE_STATE_COMING ? "Loading" :
4132 "Live");
4133 /* Used by oprofile and other similar tools. */
4134 seq_printf(m, " 0x%pK", mod->core_layout.base);
4136 /* Taints info */
4137 if (mod->taints)
4138 seq_printf(m, " %s", module_flags(mod, buf));
4140 seq_puts(m, "\n");
4141 return 0;
4144 /* Format: modulename size refcount deps address
4146 Where refcount is a number or -, and deps is a comma-separated list
4147 of depends or -.
4149 static const struct seq_operations modules_op = {
4150 .start = m_start,
4151 .next = m_next,
4152 .stop = m_stop,
4153 .show = m_show
4156 static int modules_open(struct inode *inode, struct file *file)
4158 return seq_open(file, &modules_op);
4161 static const struct file_operations proc_modules_operations = {
4162 .open = modules_open,
4163 .read = seq_read,
4164 .llseek = seq_lseek,
4165 .release = seq_release,
4168 static int __init proc_modules_init(void)
4170 proc_create("modules", 0, NULL, &proc_modules_operations);
4171 return 0;
4173 module_init(proc_modules_init);
4174 #endif
4176 /* Given an address, look for it in the module exception tables. */
4177 const struct exception_table_entry *search_module_extables(unsigned long addr)
4179 const struct exception_table_entry *e = NULL;
4180 struct module *mod;
4182 preempt_disable();
4183 list_for_each_entry_rcu(mod, &modules, list) {
4184 if (mod->state == MODULE_STATE_UNFORMED)
4185 continue;
4186 if (mod->num_exentries == 0)
4187 continue;
4189 e = search_extable(mod->extable,
4190 mod->extable + mod->num_exentries - 1,
4191 addr);
4192 if (e)
4193 break;
4195 preempt_enable();
4197 /* Now, if we found one, we are running inside it now, hence
4198 we cannot unload the module, hence no refcnt needed. */
4199 return e;
4203 * is_module_address - is this address inside a module?
4204 * @addr: the address to check.
4206 * See is_module_text_address() if you simply want to see if the address
4207 * is code (not data).
4209 bool is_module_address(unsigned long addr)
4211 bool ret;
4213 preempt_disable();
4214 ret = __module_address(addr) != NULL;
4215 preempt_enable();
4217 return ret;
4221 * __module_address - get the module which contains an address.
4222 * @addr: the address.
4224 * Must be called with preempt disabled or module mutex held so that
4225 * module doesn't get freed during this.
4227 struct module *__module_address(unsigned long addr)
4229 struct module *mod;
4231 if (addr < module_addr_min || addr > module_addr_max)
4232 return NULL;
4234 module_assert_mutex_or_preempt();
4236 mod = mod_find(addr);
4237 if (mod) {
4238 BUG_ON(!within_module(addr, mod));
4239 if (mod->state == MODULE_STATE_UNFORMED)
4240 mod = NULL;
4242 return mod;
4244 EXPORT_SYMBOL_GPL(__module_address);
4247 * is_module_text_address - is this address inside module code?
4248 * @addr: the address to check.
4250 * See is_module_address() if you simply want to see if the address is
4251 * anywhere in a module. See kernel_text_address() for testing if an
4252 * address corresponds to kernel or module code.
4254 bool is_module_text_address(unsigned long addr)
4256 bool ret;
4258 preempt_disable();
4259 ret = __module_text_address(addr) != NULL;
4260 preempt_enable();
4262 return ret;
4266 * __module_text_address - get the module whose code contains an address.
4267 * @addr: the address.
4269 * Must be called with preempt disabled or module mutex held so that
4270 * module doesn't get freed during this.
4272 struct module *__module_text_address(unsigned long addr)
4274 struct module *mod = __module_address(addr);
4275 if (mod) {
4276 /* Make sure it's within the text section. */
4277 if (!within(addr, mod->init_layout.base, mod->init_layout.text_size)
4278 && !within(addr, mod->core_layout.base, mod->core_layout.text_size))
4279 mod = NULL;
4281 return mod;
4283 EXPORT_SYMBOL_GPL(__module_text_address);
4285 /* Don't grab lock, we're oopsing. */
4286 void print_modules(void)
4288 struct module *mod;
4289 char buf[8];
4291 printk(KERN_DEFAULT "Modules linked in:");
4292 /* Most callers should already have preempt disabled, but make sure */
4293 preempt_disable();
4294 list_for_each_entry_rcu(mod, &modules, list) {
4295 if (mod->state == MODULE_STATE_UNFORMED)
4296 continue;
4297 pr_cont(" %s%s", mod->name, module_flags(mod, buf));
4299 preempt_enable();
4300 if (last_unloaded_module[0])
4301 pr_cont(" [last unloaded: %s]", last_unloaded_module);
4302 pr_cont("\n");
4305 #ifdef CONFIG_MODVERSIONS
4306 /* Generate the signature for all relevant module structures here.
4307 * If these change, we don't want to try to parse the module. */
4308 void module_layout(struct module *mod,
4309 struct modversion_info *ver,
4310 struct kernel_param *kp,
4311 struct kernel_symbol *ks,
4312 struct tracepoint * const *tp)
4315 EXPORT_SYMBOL(module_layout);
4316 #endif