drm/nv4c/mc: disable msi
[linux/fpc-iii.git] / kernel / module.c
blob1d679a6c942f0415d6d852ad0a754db12bbb9391
1 /*
2 Copyright (C) 2002 Richard Henderson
3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/export.h>
20 #include <linux/moduleloader.h>
21 #include <linux/ftrace_event.h>
22 #include <linux/init.h>
23 #include <linux/kallsyms.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/sysfs.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/elf.h>
31 #include <linux/proc_fs.h>
32 #include <linux/security.h>
33 #include <linux/seq_file.h>
34 #include <linux/syscalls.h>
35 #include <linux/fcntl.h>
36 #include <linux/rcupdate.h>
37 #include <linux/capability.h>
38 #include <linux/cpu.h>
39 #include <linux/moduleparam.h>
40 #include <linux/errno.h>
41 #include <linux/err.h>
42 #include <linux/vermagic.h>
43 #include <linux/notifier.h>
44 #include <linux/sched.h>
45 #include <linux/stop_machine.h>
46 #include <linux/device.h>
47 #include <linux/string.h>
48 #include <linux/mutex.h>
49 #include <linux/rculist.h>
50 #include <asm/uaccess.h>
51 #include <asm/cacheflush.h>
52 #include <asm/mmu_context.h>
53 #include <linux/license.h>
54 #include <asm/sections.h>
55 #include <linux/tracepoint.h>
56 #include <linux/ftrace.h>
57 #include <linux/async.h>
58 #include <linux/percpu.h>
59 #include <linux/kmemleak.h>
60 #include <linux/jump_label.h>
61 #include <linux/pfn.h>
62 #include <linux/bsearch.h>
63 #include <linux/fips.h>
64 #include <uapi/linux/module.h>
65 #include "module-internal.h"
67 #define CREATE_TRACE_POINTS
68 #include <trace/events/module.h>
70 #ifndef ARCH_SHF_SMALL
71 #define ARCH_SHF_SMALL 0
72 #endif
75 * Modules' sections will be aligned on page boundaries
76 * to ensure complete separation of code and data, but
77 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
79 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
80 # define debug_align(X) ALIGN(X, PAGE_SIZE)
81 #else
82 # define debug_align(X) (X)
83 #endif
86 * Given BASE and SIZE this macro calculates the number of pages the
87 * memory regions occupies
89 #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \
90 (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \
91 PFN_DOWN((unsigned long)BASE) + 1) \
92 : (0UL))
94 /* If this is set, the section belongs in the init part of the module */
95 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
98 * Mutex protects:
99 * 1) List of modules (also safely readable with preempt_disable),
100 * 2) module_use links,
101 * 3) module_addr_min/module_addr_max.
102 * (delete uses stop_machine/add uses RCU list operations). */
103 DEFINE_MUTEX(module_mutex);
104 EXPORT_SYMBOL_GPL(module_mutex);
105 static LIST_HEAD(modules);
106 #ifdef CONFIG_KGDB_KDB
107 struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
108 #endif /* CONFIG_KGDB_KDB */
110 #ifdef CONFIG_MODULE_SIG
111 #ifdef CONFIG_MODULE_SIG_FORCE
112 static bool sig_enforce = true;
113 #else
114 static bool sig_enforce = false;
116 static int param_set_bool_enable_only(const char *val,
117 const struct kernel_param *kp)
119 int err;
120 bool test;
121 struct kernel_param dummy_kp = *kp;
123 dummy_kp.arg = &test;
125 err = param_set_bool(val, &dummy_kp);
126 if (err)
127 return err;
129 /* Don't let them unset it once it's set! */
130 if (!test && sig_enforce)
131 return -EROFS;
133 if (test)
134 sig_enforce = true;
135 return 0;
138 static const struct kernel_param_ops param_ops_bool_enable_only = {
139 .flags = KERNEL_PARAM_FL_NOARG,
140 .set = param_set_bool_enable_only,
141 .get = param_get_bool,
143 #define param_check_bool_enable_only param_check_bool
145 module_param(sig_enforce, bool_enable_only, 0644);
146 #endif /* !CONFIG_MODULE_SIG_FORCE */
147 #endif /* CONFIG_MODULE_SIG */
149 /* Block module loading/unloading? */
150 int modules_disabled = 0;
151 core_param(nomodule, modules_disabled, bint, 0);
153 /* Waiting for a module to finish initializing? */
154 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
156 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
158 /* Bounds of module allocation, for speeding __module_address.
159 * Protected by module_mutex. */
160 static unsigned long module_addr_min = -1UL, module_addr_max = 0;
162 int register_module_notifier(struct notifier_block * nb)
164 return blocking_notifier_chain_register(&module_notify_list, nb);
166 EXPORT_SYMBOL(register_module_notifier);
168 int unregister_module_notifier(struct notifier_block * nb)
170 return blocking_notifier_chain_unregister(&module_notify_list, nb);
172 EXPORT_SYMBOL(unregister_module_notifier);
174 struct load_info {
175 Elf_Ehdr *hdr;
176 unsigned long len;
177 Elf_Shdr *sechdrs;
178 char *secstrings, *strtab;
179 unsigned long symoffs, stroffs;
180 struct _ddebug *debug;
181 unsigned int num_debug;
182 bool sig_ok;
183 struct {
184 unsigned int sym, str, mod, vers, info, pcpu;
185 } index;
188 /* We require a truly strong try_module_get(): 0 means failure due to
189 ongoing or failed initialization etc. */
190 static inline int strong_try_module_get(struct module *mod)
192 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
193 if (mod && mod->state == MODULE_STATE_COMING)
194 return -EBUSY;
195 if (try_module_get(mod))
196 return 0;
197 else
198 return -ENOENT;
201 static inline void add_taint_module(struct module *mod, unsigned flag,
202 enum lockdep_ok lockdep_ok)
204 add_taint(flag, lockdep_ok);
205 mod->taints |= (1U << flag);
209 * A thread that wants to hold a reference to a module only while it
210 * is running can call this to safely exit. nfsd and lockd use this.
212 void __module_put_and_exit(struct module *mod, long code)
214 module_put(mod);
215 do_exit(code);
217 EXPORT_SYMBOL(__module_put_and_exit);
219 /* Find a module section: 0 means not found. */
220 static unsigned int find_sec(const struct load_info *info, const char *name)
222 unsigned int i;
224 for (i = 1; i < info->hdr->e_shnum; i++) {
225 Elf_Shdr *shdr = &info->sechdrs[i];
226 /* Alloc bit cleared means "ignore it." */
227 if ((shdr->sh_flags & SHF_ALLOC)
228 && strcmp(info->secstrings + shdr->sh_name, name) == 0)
229 return i;
231 return 0;
234 /* Find a module section, or NULL. */
235 static void *section_addr(const struct load_info *info, const char *name)
237 /* Section 0 has sh_addr 0. */
238 return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
241 /* Find a module section, or NULL. Fill in number of "objects" in section. */
242 static void *section_objs(const struct load_info *info,
243 const char *name,
244 size_t object_size,
245 unsigned int *num)
247 unsigned int sec = find_sec(info, name);
249 /* Section 0 has sh_addr 0 and sh_size 0. */
250 *num = info->sechdrs[sec].sh_size / object_size;
251 return (void *)info->sechdrs[sec].sh_addr;
254 /* Provided by the linker */
255 extern const struct kernel_symbol __start___ksymtab[];
256 extern const struct kernel_symbol __stop___ksymtab[];
257 extern const struct kernel_symbol __start___ksymtab_gpl[];
258 extern const struct kernel_symbol __stop___ksymtab_gpl[];
259 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
260 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
261 extern const unsigned long __start___kcrctab[];
262 extern const unsigned long __start___kcrctab_gpl[];
263 extern const unsigned long __start___kcrctab_gpl_future[];
264 #ifdef CONFIG_UNUSED_SYMBOLS
265 extern const struct kernel_symbol __start___ksymtab_unused[];
266 extern const struct kernel_symbol __stop___ksymtab_unused[];
267 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
268 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
269 extern const unsigned long __start___kcrctab_unused[];
270 extern const unsigned long __start___kcrctab_unused_gpl[];
271 #endif
273 #ifndef CONFIG_MODVERSIONS
274 #define symversion(base, idx) NULL
275 #else
276 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
277 #endif
279 static bool each_symbol_in_section(const struct symsearch *arr,
280 unsigned int arrsize,
281 struct module *owner,
282 bool (*fn)(const struct symsearch *syms,
283 struct module *owner,
284 void *data),
285 void *data)
287 unsigned int j;
289 for (j = 0; j < arrsize; j++) {
290 if (fn(&arr[j], owner, data))
291 return true;
294 return false;
297 /* Returns true as soon as fn returns true, otherwise false. */
298 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
299 struct module *owner,
300 void *data),
301 void *data)
303 struct module *mod;
304 static const struct symsearch arr[] = {
305 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
306 NOT_GPL_ONLY, false },
307 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
308 __start___kcrctab_gpl,
309 GPL_ONLY, false },
310 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
311 __start___kcrctab_gpl_future,
312 WILL_BE_GPL_ONLY, false },
313 #ifdef CONFIG_UNUSED_SYMBOLS
314 { __start___ksymtab_unused, __stop___ksymtab_unused,
315 __start___kcrctab_unused,
316 NOT_GPL_ONLY, true },
317 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
318 __start___kcrctab_unused_gpl,
319 GPL_ONLY, true },
320 #endif
323 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
324 return true;
326 list_for_each_entry_rcu(mod, &modules, list) {
327 struct symsearch arr[] = {
328 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
329 NOT_GPL_ONLY, false },
330 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
331 mod->gpl_crcs,
332 GPL_ONLY, false },
333 { mod->gpl_future_syms,
334 mod->gpl_future_syms + mod->num_gpl_future_syms,
335 mod->gpl_future_crcs,
336 WILL_BE_GPL_ONLY, false },
337 #ifdef CONFIG_UNUSED_SYMBOLS
338 { mod->unused_syms,
339 mod->unused_syms + mod->num_unused_syms,
340 mod->unused_crcs,
341 NOT_GPL_ONLY, true },
342 { mod->unused_gpl_syms,
343 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
344 mod->unused_gpl_crcs,
345 GPL_ONLY, true },
346 #endif
349 if (mod->state == MODULE_STATE_UNFORMED)
350 continue;
352 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
353 return true;
355 return false;
357 EXPORT_SYMBOL_GPL(each_symbol_section);
359 struct find_symbol_arg {
360 /* Input */
361 const char *name;
362 bool gplok;
363 bool warn;
365 /* Output */
366 struct module *owner;
367 const unsigned long *crc;
368 const struct kernel_symbol *sym;
371 static bool check_symbol(const struct symsearch *syms,
372 struct module *owner,
373 unsigned int symnum, void *data)
375 struct find_symbol_arg *fsa = data;
377 if (!fsa->gplok) {
378 if (syms->licence == GPL_ONLY)
379 return false;
380 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
381 pr_warn("Symbol %s is being used by a non-GPL module, "
382 "which will not be allowed in the future\n",
383 fsa->name);
387 #ifdef CONFIG_UNUSED_SYMBOLS
388 if (syms->unused && fsa->warn) {
389 pr_warn("Symbol %s is marked as UNUSED, however this module is "
390 "using it.\n", fsa->name);
391 pr_warn("This symbol will go away in the future.\n");
392 pr_warn("Please evalute if this is the right api to use and if "
393 "it really is, submit a report the linux kernel "
394 "mailinglist together with submitting your code for "
395 "inclusion.\n");
397 #endif
399 fsa->owner = owner;
400 fsa->crc = symversion(syms->crcs, symnum);
401 fsa->sym = &syms->start[symnum];
402 return true;
405 static int cmp_name(const void *va, const void *vb)
407 const char *a;
408 const struct kernel_symbol *b;
409 a = va; b = vb;
410 return strcmp(a, b->name);
413 static bool find_symbol_in_section(const struct symsearch *syms,
414 struct module *owner,
415 void *data)
417 struct find_symbol_arg *fsa = data;
418 struct kernel_symbol *sym;
420 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
421 sizeof(struct kernel_symbol), cmp_name);
423 if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
424 return true;
426 return false;
429 /* Find a symbol and return it, along with, (optional) crc and
430 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
431 const struct kernel_symbol *find_symbol(const char *name,
432 struct module **owner,
433 const unsigned long **crc,
434 bool gplok,
435 bool warn)
437 struct find_symbol_arg fsa;
439 fsa.name = name;
440 fsa.gplok = gplok;
441 fsa.warn = warn;
443 if (each_symbol_section(find_symbol_in_section, &fsa)) {
444 if (owner)
445 *owner = fsa.owner;
446 if (crc)
447 *crc = fsa.crc;
448 return fsa.sym;
451 pr_debug("Failed to find symbol %s\n", name);
452 return NULL;
454 EXPORT_SYMBOL_GPL(find_symbol);
456 /* Search for module by name: must hold module_mutex. */
457 static struct module *find_module_all(const char *name, size_t len,
458 bool even_unformed)
460 struct module *mod;
462 list_for_each_entry(mod, &modules, list) {
463 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
464 continue;
465 if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
466 return mod;
468 return NULL;
471 struct module *find_module(const char *name)
473 return find_module_all(name, strlen(name), false);
475 EXPORT_SYMBOL_GPL(find_module);
477 #ifdef CONFIG_SMP
479 static inline void __percpu *mod_percpu(struct module *mod)
481 return mod->percpu;
484 static int percpu_modalloc(struct module *mod, struct load_info *info)
486 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
487 unsigned long align = pcpusec->sh_addralign;
489 if (!pcpusec->sh_size)
490 return 0;
492 if (align > PAGE_SIZE) {
493 pr_warn("%s: per-cpu alignment %li > %li\n",
494 mod->name, align, PAGE_SIZE);
495 align = PAGE_SIZE;
498 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
499 if (!mod->percpu) {
500 pr_warn("%s: Could not allocate %lu bytes percpu data\n",
501 mod->name, (unsigned long)pcpusec->sh_size);
502 return -ENOMEM;
504 mod->percpu_size = pcpusec->sh_size;
505 return 0;
508 static void percpu_modfree(struct module *mod)
510 free_percpu(mod->percpu);
513 static unsigned int find_pcpusec(struct load_info *info)
515 return find_sec(info, ".data..percpu");
518 static void percpu_modcopy(struct module *mod,
519 const void *from, unsigned long size)
521 int cpu;
523 for_each_possible_cpu(cpu)
524 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
528 * is_module_percpu_address - test whether address is from module static percpu
529 * @addr: address to test
531 * Test whether @addr belongs to module static percpu area.
533 * RETURNS:
534 * %true if @addr is from module static percpu area
536 bool is_module_percpu_address(unsigned long addr)
538 struct module *mod;
539 unsigned int cpu;
541 preempt_disable();
543 list_for_each_entry_rcu(mod, &modules, list) {
544 if (mod->state == MODULE_STATE_UNFORMED)
545 continue;
546 if (!mod->percpu_size)
547 continue;
548 for_each_possible_cpu(cpu) {
549 void *start = per_cpu_ptr(mod->percpu, cpu);
551 if ((void *)addr >= start &&
552 (void *)addr < start + mod->percpu_size) {
553 preempt_enable();
554 return true;
559 preempt_enable();
560 return false;
563 #else /* ... !CONFIG_SMP */
565 static inline void __percpu *mod_percpu(struct module *mod)
567 return NULL;
569 static int percpu_modalloc(struct module *mod, struct load_info *info)
571 /* UP modules shouldn't have this section: ENOMEM isn't quite right */
572 if (info->sechdrs[info->index.pcpu].sh_size != 0)
573 return -ENOMEM;
574 return 0;
576 static inline void percpu_modfree(struct module *mod)
579 static unsigned int find_pcpusec(struct load_info *info)
581 return 0;
583 static inline void percpu_modcopy(struct module *mod,
584 const void *from, unsigned long size)
586 /* pcpusec should be 0, and size of that section should be 0. */
587 BUG_ON(size != 0);
589 bool is_module_percpu_address(unsigned long addr)
591 return false;
594 #endif /* CONFIG_SMP */
596 #define MODINFO_ATTR(field) \
597 static void setup_modinfo_##field(struct module *mod, const char *s) \
599 mod->field = kstrdup(s, GFP_KERNEL); \
601 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
602 struct module_kobject *mk, char *buffer) \
604 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
606 static int modinfo_##field##_exists(struct module *mod) \
608 return mod->field != NULL; \
610 static void free_modinfo_##field(struct module *mod) \
612 kfree(mod->field); \
613 mod->field = NULL; \
615 static struct module_attribute modinfo_##field = { \
616 .attr = { .name = __stringify(field), .mode = 0444 }, \
617 .show = show_modinfo_##field, \
618 .setup = setup_modinfo_##field, \
619 .test = modinfo_##field##_exists, \
620 .free = free_modinfo_##field, \
623 MODINFO_ATTR(version);
624 MODINFO_ATTR(srcversion);
626 static char last_unloaded_module[MODULE_NAME_LEN+1];
628 #ifdef CONFIG_MODULE_UNLOAD
630 EXPORT_TRACEPOINT_SYMBOL(module_get);
632 /* Init the unload section of the module. */
633 static int module_unload_init(struct module *mod)
635 mod->refptr = alloc_percpu(struct module_ref);
636 if (!mod->refptr)
637 return -ENOMEM;
639 INIT_LIST_HEAD(&mod->source_list);
640 INIT_LIST_HEAD(&mod->target_list);
642 /* Hold reference count during initialization. */
643 __this_cpu_write(mod->refptr->incs, 1);
645 return 0;
648 /* Does a already use b? */
649 static int already_uses(struct module *a, struct module *b)
651 struct module_use *use;
653 list_for_each_entry(use, &b->source_list, source_list) {
654 if (use->source == a) {
655 pr_debug("%s uses %s!\n", a->name, b->name);
656 return 1;
659 pr_debug("%s does not use %s!\n", a->name, b->name);
660 return 0;
664 * Module a uses b
665 * - we add 'a' as a "source", 'b' as a "target" of module use
666 * - the module_use is added to the list of 'b' sources (so
667 * 'b' can walk the list to see who sourced them), and of 'a'
668 * targets (so 'a' can see what modules it targets).
670 static int add_module_usage(struct module *a, struct module *b)
672 struct module_use *use;
674 pr_debug("Allocating new usage for %s.\n", a->name);
675 use = kmalloc(sizeof(*use), GFP_ATOMIC);
676 if (!use) {
677 pr_warn("%s: out of memory loading\n", a->name);
678 return -ENOMEM;
681 use->source = a;
682 use->target = b;
683 list_add(&use->source_list, &b->source_list);
684 list_add(&use->target_list, &a->target_list);
685 return 0;
688 /* Module a uses b: caller needs module_mutex() */
689 int ref_module(struct module *a, struct module *b)
691 int err;
693 if (b == NULL || already_uses(a, b))
694 return 0;
696 /* If module isn't available, we fail. */
697 err = strong_try_module_get(b);
698 if (err)
699 return err;
701 err = add_module_usage(a, b);
702 if (err) {
703 module_put(b);
704 return err;
706 return 0;
708 EXPORT_SYMBOL_GPL(ref_module);
710 /* Clear the unload stuff of the module. */
711 static void module_unload_free(struct module *mod)
713 struct module_use *use, *tmp;
715 mutex_lock(&module_mutex);
716 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
717 struct module *i = use->target;
718 pr_debug("%s unusing %s\n", mod->name, i->name);
719 module_put(i);
720 list_del(&use->source_list);
721 list_del(&use->target_list);
722 kfree(use);
724 mutex_unlock(&module_mutex);
726 free_percpu(mod->refptr);
729 #ifdef CONFIG_MODULE_FORCE_UNLOAD
730 static inline int try_force_unload(unsigned int flags)
732 int ret = (flags & O_TRUNC);
733 if (ret)
734 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
735 return ret;
737 #else
738 static inline int try_force_unload(unsigned int flags)
740 return 0;
742 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
744 struct stopref
746 struct module *mod;
747 int flags;
748 int *forced;
751 /* Whole machine is stopped with interrupts off when this runs. */
752 static int __try_stop_module(void *_sref)
754 struct stopref *sref = _sref;
756 /* If it's not unused, quit unless we're forcing. */
757 if (module_refcount(sref->mod) != 0) {
758 if (!(*sref->forced = try_force_unload(sref->flags)))
759 return -EWOULDBLOCK;
762 /* Mark it as dying. */
763 sref->mod->state = MODULE_STATE_GOING;
764 return 0;
767 static int try_stop_module(struct module *mod, int flags, int *forced)
769 struct stopref sref = { mod, flags, forced };
771 return stop_machine(__try_stop_module, &sref, NULL);
774 unsigned long module_refcount(struct module *mod)
776 unsigned long incs = 0, decs = 0;
777 int cpu;
779 for_each_possible_cpu(cpu)
780 decs += per_cpu_ptr(mod->refptr, cpu)->decs;
782 * ensure the incs are added up after the decs.
783 * module_put ensures incs are visible before decs with smp_wmb.
785 * This 2-count scheme avoids the situation where the refcount
786 * for CPU0 is read, then CPU0 increments the module refcount,
787 * then CPU1 drops that refcount, then the refcount for CPU1 is
788 * read. We would record a decrement but not its corresponding
789 * increment so we would see a low count (disaster).
791 * Rare situation? But module_refcount can be preempted, and we
792 * might be tallying up 4096+ CPUs. So it is not impossible.
794 smp_rmb();
795 for_each_possible_cpu(cpu)
796 incs += per_cpu_ptr(mod->refptr, cpu)->incs;
797 return incs - decs;
799 EXPORT_SYMBOL(module_refcount);
801 /* This exists whether we can unload or not */
802 static void free_module(struct module *mod);
804 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
805 unsigned int, flags)
807 struct module *mod;
808 char name[MODULE_NAME_LEN];
809 int ret, forced = 0;
811 if (!capable(CAP_SYS_MODULE) || modules_disabled)
812 return -EPERM;
814 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
815 return -EFAULT;
816 name[MODULE_NAME_LEN-1] = '\0';
818 if (mutex_lock_interruptible(&module_mutex) != 0)
819 return -EINTR;
821 mod = find_module(name);
822 if (!mod) {
823 ret = -ENOENT;
824 goto out;
827 if (!list_empty(&mod->source_list)) {
828 /* Other modules depend on us: get rid of them first. */
829 ret = -EWOULDBLOCK;
830 goto out;
833 /* Doing init or already dying? */
834 if (mod->state != MODULE_STATE_LIVE) {
835 /* FIXME: if (force), slam module count damn the torpedoes */
836 pr_debug("%s already dying\n", mod->name);
837 ret = -EBUSY;
838 goto out;
841 /* If it has an init func, it must have an exit func to unload */
842 if (mod->init && !mod->exit) {
843 forced = try_force_unload(flags);
844 if (!forced) {
845 /* This module can't be removed */
846 ret = -EBUSY;
847 goto out;
851 /* Stop the machine so refcounts can't move and disable module. */
852 ret = try_stop_module(mod, flags, &forced);
853 if (ret != 0)
854 goto out;
856 mutex_unlock(&module_mutex);
857 /* Final destruction now no one is using it. */
858 if (mod->exit != NULL)
859 mod->exit();
860 blocking_notifier_call_chain(&module_notify_list,
861 MODULE_STATE_GOING, mod);
862 async_synchronize_full();
864 /* Store the name of the last unloaded module for diagnostic purposes */
865 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
867 free_module(mod);
868 return 0;
869 out:
870 mutex_unlock(&module_mutex);
871 return ret;
874 static inline void print_unload_info(struct seq_file *m, struct module *mod)
876 struct module_use *use;
877 int printed_something = 0;
879 seq_printf(m, " %lu ", module_refcount(mod));
881 /* Always include a trailing , so userspace can differentiate
882 between this and the old multi-field proc format. */
883 list_for_each_entry(use, &mod->source_list, source_list) {
884 printed_something = 1;
885 seq_printf(m, "%s,", use->source->name);
888 if (mod->init != NULL && mod->exit == NULL) {
889 printed_something = 1;
890 seq_printf(m, "[permanent],");
893 if (!printed_something)
894 seq_printf(m, "-");
897 void __symbol_put(const char *symbol)
899 struct module *owner;
901 preempt_disable();
902 if (!find_symbol(symbol, &owner, NULL, true, false))
903 BUG();
904 module_put(owner);
905 preempt_enable();
907 EXPORT_SYMBOL(__symbol_put);
909 /* Note this assumes addr is a function, which it currently always is. */
910 void symbol_put_addr(void *addr)
912 struct module *modaddr;
913 unsigned long a = (unsigned long)dereference_function_descriptor(addr);
915 if (core_kernel_text(a))
916 return;
918 /* module_text_address is safe here: we're supposed to have reference
919 * to module from symbol_get, so it can't go away. */
920 modaddr = __module_text_address(a);
921 BUG_ON(!modaddr);
922 module_put(modaddr);
924 EXPORT_SYMBOL_GPL(symbol_put_addr);
926 static ssize_t show_refcnt(struct module_attribute *mattr,
927 struct module_kobject *mk, char *buffer)
929 return sprintf(buffer, "%lu\n", module_refcount(mk->mod));
932 static struct module_attribute modinfo_refcnt =
933 __ATTR(refcnt, 0444, show_refcnt, NULL);
935 void __module_get(struct module *module)
937 if (module) {
938 preempt_disable();
939 __this_cpu_inc(module->refptr->incs);
940 trace_module_get(module, _RET_IP_);
941 preempt_enable();
944 EXPORT_SYMBOL(__module_get);
946 bool try_module_get(struct module *module)
948 bool ret = true;
950 if (module) {
951 preempt_disable();
953 if (likely(module_is_live(module))) {
954 __this_cpu_inc(module->refptr->incs);
955 trace_module_get(module, _RET_IP_);
956 } else
957 ret = false;
959 preempt_enable();
961 return ret;
963 EXPORT_SYMBOL(try_module_get);
965 void module_put(struct module *module)
967 if (module) {
968 preempt_disable();
969 smp_wmb(); /* see comment in module_refcount */
970 __this_cpu_inc(module->refptr->decs);
972 trace_module_put(module, _RET_IP_);
973 preempt_enable();
976 EXPORT_SYMBOL(module_put);
978 #else /* !CONFIG_MODULE_UNLOAD */
979 static inline void print_unload_info(struct seq_file *m, struct module *mod)
981 /* We don't know the usage count, or what modules are using. */
982 seq_printf(m, " - -");
985 static inline void module_unload_free(struct module *mod)
989 int ref_module(struct module *a, struct module *b)
991 return strong_try_module_get(b);
993 EXPORT_SYMBOL_GPL(ref_module);
995 static inline int module_unload_init(struct module *mod)
997 return 0;
999 #endif /* CONFIG_MODULE_UNLOAD */
1001 static size_t module_flags_taint(struct module *mod, char *buf)
1003 size_t l = 0;
1005 if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
1006 buf[l++] = 'P';
1007 if (mod->taints & (1 << TAINT_OOT_MODULE))
1008 buf[l++] = 'O';
1009 if (mod->taints & (1 << TAINT_FORCED_MODULE))
1010 buf[l++] = 'F';
1011 if (mod->taints & (1 << TAINT_CRAP))
1012 buf[l++] = 'C';
1014 * TAINT_FORCED_RMMOD: could be added.
1015 * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
1016 * apply to modules.
1018 return l;
1021 static ssize_t show_initstate(struct module_attribute *mattr,
1022 struct module_kobject *mk, char *buffer)
1024 const char *state = "unknown";
1026 switch (mk->mod->state) {
1027 case MODULE_STATE_LIVE:
1028 state = "live";
1029 break;
1030 case MODULE_STATE_COMING:
1031 state = "coming";
1032 break;
1033 case MODULE_STATE_GOING:
1034 state = "going";
1035 break;
1036 default:
1037 BUG();
1039 return sprintf(buffer, "%s\n", state);
1042 static struct module_attribute modinfo_initstate =
1043 __ATTR(initstate, 0444, show_initstate, NULL);
1045 static ssize_t store_uevent(struct module_attribute *mattr,
1046 struct module_kobject *mk,
1047 const char *buffer, size_t count)
1049 enum kobject_action action;
1051 if (kobject_action_type(buffer, count, &action) == 0)
1052 kobject_uevent(&mk->kobj, action);
1053 return count;
1056 struct module_attribute module_uevent =
1057 __ATTR(uevent, 0200, NULL, store_uevent);
1059 static ssize_t show_coresize(struct module_attribute *mattr,
1060 struct module_kobject *mk, char *buffer)
1062 return sprintf(buffer, "%u\n", mk->mod->core_size);
1065 static struct module_attribute modinfo_coresize =
1066 __ATTR(coresize, 0444, show_coresize, NULL);
1068 static ssize_t show_initsize(struct module_attribute *mattr,
1069 struct module_kobject *mk, char *buffer)
1071 return sprintf(buffer, "%u\n", mk->mod->init_size);
1074 static struct module_attribute modinfo_initsize =
1075 __ATTR(initsize, 0444, show_initsize, NULL);
1077 static ssize_t show_taint(struct module_attribute *mattr,
1078 struct module_kobject *mk, char *buffer)
1080 size_t l;
1082 l = module_flags_taint(mk->mod, buffer);
1083 buffer[l++] = '\n';
1084 return l;
1087 static struct module_attribute modinfo_taint =
1088 __ATTR(taint, 0444, show_taint, NULL);
1090 static struct module_attribute *modinfo_attrs[] = {
1091 &module_uevent,
1092 &modinfo_version,
1093 &modinfo_srcversion,
1094 &modinfo_initstate,
1095 &modinfo_coresize,
1096 &modinfo_initsize,
1097 &modinfo_taint,
1098 #ifdef CONFIG_MODULE_UNLOAD
1099 &modinfo_refcnt,
1100 #endif
1101 NULL,
1104 static const char vermagic[] = VERMAGIC_STRING;
1106 static int try_to_force_load(struct module *mod, const char *reason)
1108 #ifdef CONFIG_MODULE_FORCE_LOAD
1109 if (!test_taint(TAINT_FORCED_MODULE))
1110 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
1111 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
1112 return 0;
1113 #else
1114 return -ENOEXEC;
1115 #endif
1118 #ifdef CONFIG_MODVERSIONS
1119 /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
1120 static unsigned long maybe_relocated(unsigned long crc,
1121 const struct module *crc_owner)
1123 #ifdef ARCH_RELOCATES_KCRCTAB
1124 if (crc_owner == NULL)
1125 return crc - (unsigned long)reloc_start;
1126 #endif
1127 return crc;
1130 static int check_version(Elf_Shdr *sechdrs,
1131 unsigned int versindex,
1132 const char *symname,
1133 struct module *mod,
1134 const unsigned long *crc,
1135 const struct module *crc_owner)
1137 unsigned int i, num_versions;
1138 struct modversion_info *versions;
1140 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1141 if (!crc)
1142 return 1;
1144 /* No versions at all? modprobe --force does this. */
1145 if (versindex == 0)
1146 return try_to_force_load(mod, symname) == 0;
1148 versions = (void *) sechdrs[versindex].sh_addr;
1149 num_versions = sechdrs[versindex].sh_size
1150 / sizeof(struct modversion_info);
1152 for (i = 0; i < num_versions; i++) {
1153 if (strcmp(versions[i].name, symname) != 0)
1154 continue;
1156 if (versions[i].crc == maybe_relocated(*crc, crc_owner))
1157 return 1;
1158 pr_debug("Found checksum %lX vs module %lX\n",
1159 maybe_relocated(*crc, crc_owner), versions[i].crc);
1160 goto bad_version;
1163 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
1164 return 0;
1166 bad_version:
1167 printk("%s: disagrees about version of symbol %s\n",
1168 mod->name, symname);
1169 return 0;
1172 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1173 unsigned int versindex,
1174 struct module *mod)
1176 const unsigned long *crc;
1178 /* Since this should be found in kernel (which can't be removed),
1179 * no locking is necessary. */
1180 if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL,
1181 &crc, true, false))
1182 BUG();
1183 return check_version(sechdrs, versindex,
1184 VMLINUX_SYMBOL_STR(module_layout), mod, crc,
1185 NULL);
1188 /* First part is kernel version, which we ignore if module has crcs. */
1189 static inline int same_magic(const char *amagic, const char *bmagic,
1190 bool has_crcs)
1192 if (has_crcs) {
1193 amagic += strcspn(amagic, " ");
1194 bmagic += strcspn(bmagic, " ");
1196 return strcmp(amagic, bmagic) == 0;
1198 #else
1199 static inline int check_version(Elf_Shdr *sechdrs,
1200 unsigned int versindex,
1201 const char *symname,
1202 struct module *mod,
1203 const unsigned long *crc,
1204 const struct module *crc_owner)
1206 return 1;
1209 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1210 unsigned int versindex,
1211 struct module *mod)
1213 return 1;
1216 static inline int same_magic(const char *amagic, const char *bmagic,
1217 bool has_crcs)
1219 return strcmp(amagic, bmagic) == 0;
1221 #endif /* CONFIG_MODVERSIONS */
1223 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1224 static const struct kernel_symbol *resolve_symbol(struct module *mod,
1225 const struct load_info *info,
1226 const char *name,
1227 char ownername[])
1229 struct module *owner;
1230 const struct kernel_symbol *sym;
1231 const unsigned long *crc;
1232 int err;
1234 mutex_lock(&module_mutex);
1235 sym = find_symbol(name, &owner, &crc,
1236 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1237 if (!sym)
1238 goto unlock;
1240 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
1241 owner)) {
1242 sym = ERR_PTR(-EINVAL);
1243 goto getname;
1246 err = ref_module(mod, owner);
1247 if (err) {
1248 sym = ERR_PTR(err);
1249 goto getname;
1252 getname:
1253 /* We must make copy under the lock if we failed to get ref. */
1254 strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1255 unlock:
1256 mutex_unlock(&module_mutex);
1257 return sym;
1260 static const struct kernel_symbol *
1261 resolve_symbol_wait(struct module *mod,
1262 const struct load_info *info,
1263 const char *name)
1265 const struct kernel_symbol *ksym;
1266 char owner[MODULE_NAME_LEN];
1268 if (wait_event_interruptible_timeout(module_wq,
1269 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1270 || PTR_ERR(ksym) != -EBUSY,
1271 30 * HZ) <= 0) {
1272 pr_warn("%s: gave up waiting for init of module %s.\n",
1273 mod->name, owner);
1275 return ksym;
1279 * /sys/module/foo/sections stuff
1280 * J. Corbet <corbet@lwn.net>
1282 #ifdef CONFIG_SYSFS
1284 #ifdef CONFIG_KALLSYMS
1285 static inline bool sect_empty(const Elf_Shdr *sect)
1287 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1290 struct module_sect_attr
1292 struct module_attribute mattr;
1293 char *name;
1294 unsigned long address;
1297 struct module_sect_attrs
1299 struct attribute_group grp;
1300 unsigned int nsections;
1301 struct module_sect_attr attrs[0];
1304 static ssize_t module_sect_show(struct module_attribute *mattr,
1305 struct module_kobject *mk, char *buf)
1307 struct module_sect_attr *sattr =
1308 container_of(mattr, struct module_sect_attr, mattr);
1309 return sprintf(buf, "0x%pK\n", (void *)sattr->address);
1312 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1314 unsigned int section;
1316 for (section = 0; section < sect_attrs->nsections; section++)
1317 kfree(sect_attrs->attrs[section].name);
1318 kfree(sect_attrs);
1321 static void add_sect_attrs(struct module *mod, const struct load_info *info)
1323 unsigned int nloaded = 0, i, size[2];
1324 struct module_sect_attrs *sect_attrs;
1325 struct module_sect_attr *sattr;
1326 struct attribute **gattr;
1328 /* Count loaded sections and allocate structures */
1329 for (i = 0; i < info->hdr->e_shnum; i++)
1330 if (!sect_empty(&info->sechdrs[i]))
1331 nloaded++;
1332 size[0] = ALIGN(sizeof(*sect_attrs)
1333 + nloaded * sizeof(sect_attrs->attrs[0]),
1334 sizeof(sect_attrs->grp.attrs[0]));
1335 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1336 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1337 if (sect_attrs == NULL)
1338 return;
1340 /* Setup section attributes. */
1341 sect_attrs->grp.name = "sections";
1342 sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1344 sect_attrs->nsections = 0;
1345 sattr = &sect_attrs->attrs[0];
1346 gattr = &sect_attrs->grp.attrs[0];
1347 for (i = 0; i < info->hdr->e_shnum; i++) {
1348 Elf_Shdr *sec = &info->sechdrs[i];
1349 if (sect_empty(sec))
1350 continue;
1351 sattr->address = sec->sh_addr;
1352 sattr->name = kstrdup(info->secstrings + sec->sh_name,
1353 GFP_KERNEL);
1354 if (sattr->name == NULL)
1355 goto out;
1356 sect_attrs->nsections++;
1357 sysfs_attr_init(&sattr->mattr.attr);
1358 sattr->mattr.show = module_sect_show;
1359 sattr->mattr.store = NULL;
1360 sattr->mattr.attr.name = sattr->name;
1361 sattr->mattr.attr.mode = S_IRUGO;
1362 *(gattr++) = &(sattr++)->mattr.attr;
1364 *gattr = NULL;
1366 if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1367 goto out;
1369 mod->sect_attrs = sect_attrs;
1370 return;
1371 out:
1372 free_sect_attrs(sect_attrs);
1375 static void remove_sect_attrs(struct module *mod)
1377 if (mod->sect_attrs) {
1378 sysfs_remove_group(&mod->mkobj.kobj,
1379 &mod->sect_attrs->grp);
1380 /* We are positive that no one is using any sect attrs
1381 * at this point. Deallocate immediately. */
1382 free_sect_attrs(mod->sect_attrs);
1383 mod->sect_attrs = NULL;
1388 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1391 struct module_notes_attrs {
1392 struct kobject *dir;
1393 unsigned int notes;
1394 struct bin_attribute attrs[0];
1397 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1398 struct bin_attribute *bin_attr,
1399 char *buf, loff_t pos, size_t count)
1402 * The caller checked the pos and count against our size.
1404 memcpy(buf, bin_attr->private + pos, count);
1405 return count;
1408 static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1409 unsigned int i)
1411 if (notes_attrs->dir) {
1412 while (i-- > 0)
1413 sysfs_remove_bin_file(notes_attrs->dir,
1414 &notes_attrs->attrs[i]);
1415 kobject_put(notes_attrs->dir);
1417 kfree(notes_attrs);
1420 static void add_notes_attrs(struct module *mod, const struct load_info *info)
1422 unsigned int notes, loaded, i;
1423 struct module_notes_attrs *notes_attrs;
1424 struct bin_attribute *nattr;
1426 /* failed to create section attributes, so can't create notes */
1427 if (!mod->sect_attrs)
1428 return;
1430 /* Count notes sections and allocate structures. */
1431 notes = 0;
1432 for (i = 0; i < info->hdr->e_shnum; i++)
1433 if (!sect_empty(&info->sechdrs[i]) &&
1434 (info->sechdrs[i].sh_type == SHT_NOTE))
1435 ++notes;
1437 if (notes == 0)
1438 return;
1440 notes_attrs = kzalloc(sizeof(*notes_attrs)
1441 + notes * sizeof(notes_attrs->attrs[0]),
1442 GFP_KERNEL);
1443 if (notes_attrs == NULL)
1444 return;
1446 notes_attrs->notes = notes;
1447 nattr = &notes_attrs->attrs[0];
1448 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1449 if (sect_empty(&info->sechdrs[i]))
1450 continue;
1451 if (info->sechdrs[i].sh_type == SHT_NOTE) {
1452 sysfs_bin_attr_init(nattr);
1453 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1454 nattr->attr.mode = S_IRUGO;
1455 nattr->size = info->sechdrs[i].sh_size;
1456 nattr->private = (void *) info->sechdrs[i].sh_addr;
1457 nattr->read = module_notes_read;
1458 ++nattr;
1460 ++loaded;
1463 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1464 if (!notes_attrs->dir)
1465 goto out;
1467 for (i = 0; i < notes; ++i)
1468 if (sysfs_create_bin_file(notes_attrs->dir,
1469 &notes_attrs->attrs[i]))
1470 goto out;
1472 mod->notes_attrs = notes_attrs;
1473 return;
1475 out:
1476 free_notes_attrs(notes_attrs, i);
1479 static void remove_notes_attrs(struct module *mod)
1481 if (mod->notes_attrs)
1482 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1485 #else
1487 static inline void add_sect_attrs(struct module *mod,
1488 const struct load_info *info)
1492 static inline void remove_sect_attrs(struct module *mod)
1496 static inline void add_notes_attrs(struct module *mod,
1497 const struct load_info *info)
1501 static inline void remove_notes_attrs(struct module *mod)
1504 #endif /* CONFIG_KALLSYMS */
1506 static void add_usage_links(struct module *mod)
1508 #ifdef CONFIG_MODULE_UNLOAD
1509 struct module_use *use;
1510 int nowarn;
1512 mutex_lock(&module_mutex);
1513 list_for_each_entry(use, &mod->target_list, target_list) {
1514 nowarn = sysfs_create_link(use->target->holders_dir,
1515 &mod->mkobj.kobj, mod->name);
1517 mutex_unlock(&module_mutex);
1518 #endif
1521 static void del_usage_links(struct module *mod)
1523 #ifdef CONFIG_MODULE_UNLOAD
1524 struct module_use *use;
1526 mutex_lock(&module_mutex);
1527 list_for_each_entry(use, &mod->target_list, target_list)
1528 sysfs_remove_link(use->target->holders_dir, mod->name);
1529 mutex_unlock(&module_mutex);
1530 #endif
1533 static int module_add_modinfo_attrs(struct module *mod)
1535 struct module_attribute *attr;
1536 struct module_attribute *temp_attr;
1537 int error = 0;
1538 int i;
1540 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1541 (ARRAY_SIZE(modinfo_attrs) + 1)),
1542 GFP_KERNEL);
1543 if (!mod->modinfo_attrs)
1544 return -ENOMEM;
1546 temp_attr = mod->modinfo_attrs;
1547 for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1548 if (!attr->test ||
1549 (attr->test && attr->test(mod))) {
1550 memcpy(temp_attr, attr, sizeof(*temp_attr));
1551 sysfs_attr_init(&temp_attr->attr);
1552 error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr);
1553 ++temp_attr;
1556 return error;
1559 static void module_remove_modinfo_attrs(struct module *mod)
1561 struct module_attribute *attr;
1562 int i;
1564 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1565 /* pick a field to test for end of list */
1566 if (!attr->attr.name)
1567 break;
1568 sysfs_remove_file(&mod->mkobj.kobj,&attr->attr);
1569 if (attr->free)
1570 attr->free(mod);
1572 kfree(mod->modinfo_attrs);
1575 static void mod_kobject_put(struct module *mod)
1577 DECLARE_COMPLETION_ONSTACK(c);
1578 mod->mkobj.kobj_completion = &c;
1579 kobject_put(&mod->mkobj.kobj);
1580 wait_for_completion(&c);
1583 static int mod_sysfs_init(struct module *mod)
1585 int err;
1586 struct kobject *kobj;
1588 if (!module_sysfs_initialized) {
1589 pr_err("%s: module sysfs not initialized\n", mod->name);
1590 err = -EINVAL;
1591 goto out;
1594 kobj = kset_find_obj(module_kset, mod->name);
1595 if (kobj) {
1596 pr_err("%s: module is already loaded\n", mod->name);
1597 kobject_put(kobj);
1598 err = -EINVAL;
1599 goto out;
1602 mod->mkobj.mod = mod;
1604 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1605 mod->mkobj.kobj.kset = module_kset;
1606 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1607 "%s", mod->name);
1608 if (err)
1609 mod_kobject_put(mod);
1611 /* delay uevent until full sysfs population */
1612 out:
1613 return err;
1616 static int mod_sysfs_setup(struct module *mod,
1617 const struct load_info *info,
1618 struct kernel_param *kparam,
1619 unsigned int num_params)
1621 int err;
1623 err = mod_sysfs_init(mod);
1624 if (err)
1625 goto out;
1627 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1628 if (!mod->holders_dir) {
1629 err = -ENOMEM;
1630 goto out_unreg;
1633 err = module_param_sysfs_setup(mod, kparam, num_params);
1634 if (err)
1635 goto out_unreg_holders;
1637 err = module_add_modinfo_attrs(mod);
1638 if (err)
1639 goto out_unreg_param;
1641 add_usage_links(mod);
1642 add_sect_attrs(mod, info);
1643 add_notes_attrs(mod, info);
1645 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1646 return 0;
1648 out_unreg_param:
1649 module_param_sysfs_remove(mod);
1650 out_unreg_holders:
1651 kobject_put(mod->holders_dir);
1652 out_unreg:
1653 mod_kobject_put(mod);
1654 out:
1655 return err;
1658 static void mod_sysfs_fini(struct module *mod)
1660 remove_notes_attrs(mod);
1661 remove_sect_attrs(mod);
1662 mod_kobject_put(mod);
1665 #else /* !CONFIG_SYSFS */
1667 static int mod_sysfs_setup(struct module *mod,
1668 const struct load_info *info,
1669 struct kernel_param *kparam,
1670 unsigned int num_params)
1672 return 0;
1675 static void mod_sysfs_fini(struct module *mod)
1679 static void module_remove_modinfo_attrs(struct module *mod)
1683 static void del_usage_links(struct module *mod)
1687 #endif /* CONFIG_SYSFS */
1689 static void mod_sysfs_teardown(struct module *mod)
1691 del_usage_links(mod);
1692 module_remove_modinfo_attrs(mod);
1693 module_param_sysfs_remove(mod);
1694 kobject_put(mod->mkobj.drivers_dir);
1695 kobject_put(mod->holders_dir);
1696 mod_sysfs_fini(mod);
1700 * unlink the module with the whole machine is stopped with interrupts off
1701 * - this defends against kallsyms not taking locks
1703 static int __unlink_module(void *_mod)
1705 struct module *mod = _mod;
1706 list_del(&mod->list);
1707 module_bug_cleanup(mod);
1708 return 0;
1711 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
1713 * LKM RO/NX protection: protect module's text/ro-data
1714 * from modification and any data from execution.
1716 void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages))
1718 unsigned long begin_pfn = PFN_DOWN((unsigned long)start);
1719 unsigned long end_pfn = PFN_DOWN((unsigned long)end);
1721 if (end_pfn > begin_pfn)
1722 set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1725 static void set_section_ro_nx(void *base,
1726 unsigned long text_size,
1727 unsigned long ro_size,
1728 unsigned long total_size)
1730 /* begin and end PFNs of the current subsection */
1731 unsigned long begin_pfn;
1732 unsigned long end_pfn;
1735 * Set RO for module text and RO-data:
1736 * - Always protect first page.
1737 * - Do not protect last partial page.
1739 if (ro_size > 0)
1740 set_page_attributes(base, base + ro_size, set_memory_ro);
1743 * Set NX permissions for module data:
1744 * - Do not protect first partial page.
1745 * - Always protect last page.
1747 if (total_size > text_size) {
1748 begin_pfn = PFN_UP((unsigned long)base + text_size);
1749 end_pfn = PFN_UP((unsigned long)base + total_size);
1750 if (end_pfn > begin_pfn)
1751 set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1755 static void unset_module_core_ro_nx(struct module *mod)
1757 set_page_attributes(mod->module_core + mod->core_text_size,
1758 mod->module_core + mod->core_size,
1759 set_memory_x);
1760 set_page_attributes(mod->module_core,
1761 mod->module_core + mod->core_ro_size,
1762 set_memory_rw);
1765 static void unset_module_init_ro_nx(struct module *mod)
1767 set_page_attributes(mod->module_init + mod->init_text_size,
1768 mod->module_init + mod->init_size,
1769 set_memory_x);
1770 set_page_attributes(mod->module_init,
1771 mod->module_init + mod->init_ro_size,
1772 set_memory_rw);
1775 /* Iterate through all modules and set each module's text as RW */
1776 void set_all_modules_text_rw(void)
1778 struct module *mod;
1780 mutex_lock(&module_mutex);
1781 list_for_each_entry_rcu(mod, &modules, list) {
1782 if (mod->state == MODULE_STATE_UNFORMED)
1783 continue;
1784 if ((mod->module_core) && (mod->core_text_size)) {
1785 set_page_attributes(mod->module_core,
1786 mod->module_core + mod->core_text_size,
1787 set_memory_rw);
1789 if ((mod->module_init) && (mod->init_text_size)) {
1790 set_page_attributes(mod->module_init,
1791 mod->module_init + mod->init_text_size,
1792 set_memory_rw);
1795 mutex_unlock(&module_mutex);
1798 /* Iterate through all modules and set each module's text as RO */
1799 void set_all_modules_text_ro(void)
1801 struct module *mod;
1803 mutex_lock(&module_mutex);
1804 list_for_each_entry_rcu(mod, &modules, list) {
1805 if (mod->state == MODULE_STATE_UNFORMED)
1806 continue;
1807 if ((mod->module_core) && (mod->core_text_size)) {
1808 set_page_attributes(mod->module_core,
1809 mod->module_core + mod->core_text_size,
1810 set_memory_ro);
1812 if ((mod->module_init) && (mod->init_text_size)) {
1813 set_page_attributes(mod->module_init,
1814 mod->module_init + mod->init_text_size,
1815 set_memory_ro);
1818 mutex_unlock(&module_mutex);
1820 #else
1821 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
1822 static void unset_module_core_ro_nx(struct module *mod) { }
1823 static void unset_module_init_ro_nx(struct module *mod) { }
1824 #endif
1826 void __weak module_free(struct module *mod, void *module_region)
1828 vfree(module_region);
1831 void __weak module_arch_cleanup(struct module *mod)
1835 /* Free a module, remove from lists, etc. */
1836 static void free_module(struct module *mod)
1838 trace_module_free(mod);
1840 mod_sysfs_teardown(mod);
1842 /* We leave it in list to prevent duplicate loads, but make sure
1843 * that noone uses it while it's being deconstructed. */
1844 mutex_lock(&module_mutex);
1845 mod->state = MODULE_STATE_UNFORMED;
1846 mutex_unlock(&module_mutex);
1848 /* Remove dynamic debug info */
1849 ddebug_remove_module(mod->name);
1851 /* Arch-specific cleanup. */
1852 module_arch_cleanup(mod);
1854 /* Module unload stuff */
1855 module_unload_free(mod);
1857 /* Free any allocated parameters. */
1858 destroy_params(mod->kp, mod->num_kp);
1860 /* Now we can delete it from the lists */
1861 mutex_lock(&module_mutex);
1862 stop_machine(__unlink_module, mod, NULL);
1863 mutex_unlock(&module_mutex);
1865 /* This may be NULL, but that's OK */
1866 unset_module_init_ro_nx(mod);
1867 module_free(mod, mod->module_init);
1868 kfree(mod->args);
1869 percpu_modfree(mod);
1871 /* Free lock-classes: */
1872 lockdep_free_key_range(mod->module_core, mod->core_size);
1874 /* Finally, free the core (containing the module structure) */
1875 unset_module_core_ro_nx(mod);
1876 module_free(mod, mod->module_core);
1878 #ifdef CONFIG_MPU
1879 update_protections(current->mm);
1880 #endif
1883 void *__symbol_get(const char *symbol)
1885 struct module *owner;
1886 const struct kernel_symbol *sym;
1888 preempt_disable();
1889 sym = find_symbol(symbol, &owner, NULL, true, true);
1890 if (sym && strong_try_module_get(owner))
1891 sym = NULL;
1892 preempt_enable();
1894 return sym ? (void *)sym->value : NULL;
1896 EXPORT_SYMBOL_GPL(__symbol_get);
1899 * Ensure that an exported symbol [global namespace] does not already exist
1900 * in the kernel or in some other module's exported symbol table.
1902 * You must hold the module_mutex.
1904 static int verify_export_symbols(struct module *mod)
1906 unsigned int i;
1907 struct module *owner;
1908 const struct kernel_symbol *s;
1909 struct {
1910 const struct kernel_symbol *sym;
1911 unsigned int num;
1912 } arr[] = {
1913 { mod->syms, mod->num_syms },
1914 { mod->gpl_syms, mod->num_gpl_syms },
1915 { mod->gpl_future_syms, mod->num_gpl_future_syms },
1916 #ifdef CONFIG_UNUSED_SYMBOLS
1917 { mod->unused_syms, mod->num_unused_syms },
1918 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
1919 #endif
1922 for (i = 0; i < ARRAY_SIZE(arr); i++) {
1923 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
1924 if (find_symbol(s->name, &owner, NULL, true, false)) {
1925 pr_err("%s: exports duplicate symbol %s"
1926 " (owned by %s)\n",
1927 mod->name, s->name, module_name(owner));
1928 return -ENOEXEC;
1932 return 0;
1935 /* Change all symbols so that st_value encodes the pointer directly. */
1936 static int simplify_symbols(struct module *mod, const struct load_info *info)
1938 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
1939 Elf_Sym *sym = (void *)symsec->sh_addr;
1940 unsigned long secbase;
1941 unsigned int i;
1942 int ret = 0;
1943 const struct kernel_symbol *ksym;
1945 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
1946 const char *name = info->strtab + sym[i].st_name;
1948 switch (sym[i].st_shndx) {
1949 case SHN_COMMON:
1950 /* We compiled with -fno-common. These are not
1951 supposed to happen. */
1952 pr_debug("Common symbol: %s\n", name);
1953 printk("%s: please compile with -fno-common\n",
1954 mod->name);
1955 ret = -ENOEXEC;
1956 break;
1958 case SHN_ABS:
1959 /* Don't need to do anything */
1960 pr_debug("Absolute symbol: 0x%08lx\n",
1961 (long)sym[i].st_value);
1962 break;
1964 case SHN_UNDEF:
1965 ksym = resolve_symbol_wait(mod, info, name);
1966 /* Ok if resolved. */
1967 if (ksym && !IS_ERR(ksym)) {
1968 sym[i].st_value = ksym->value;
1969 break;
1972 /* Ok if weak. */
1973 if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
1974 break;
1976 pr_warn("%s: Unknown symbol %s (err %li)\n",
1977 mod->name, name, PTR_ERR(ksym));
1978 ret = PTR_ERR(ksym) ?: -ENOENT;
1979 break;
1981 default:
1982 /* Divert to percpu allocation if a percpu var. */
1983 if (sym[i].st_shndx == info->index.pcpu)
1984 secbase = (unsigned long)mod_percpu(mod);
1985 else
1986 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
1987 sym[i].st_value += secbase;
1988 break;
1992 return ret;
1995 static int apply_relocations(struct module *mod, const struct load_info *info)
1997 unsigned int i;
1998 int err = 0;
2000 /* Now do relocations. */
2001 for (i = 1; i < info->hdr->e_shnum; i++) {
2002 unsigned int infosec = info->sechdrs[i].sh_info;
2004 /* Not a valid relocation section? */
2005 if (infosec >= info->hdr->e_shnum)
2006 continue;
2008 /* Don't bother with non-allocated sections */
2009 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2010 continue;
2012 if (info->sechdrs[i].sh_type == SHT_REL)
2013 err = apply_relocate(info->sechdrs, info->strtab,
2014 info->index.sym, i, mod);
2015 else if (info->sechdrs[i].sh_type == SHT_RELA)
2016 err = apply_relocate_add(info->sechdrs, info->strtab,
2017 info->index.sym, i, mod);
2018 if (err < 0)
2019 break;
2021 return err;
2024 /* Additional bytes needed by arch in front of individual sections */
2025 unsigned int __weak arch_mod_section_prepend(struct module *mod,
2026 unsigned int section)
2028 /* default implementation just returns zero */
2029 return 0;
2032 /* Update size with this section: return offset. */
2033 static long get_offset(struct module *mod, unsigned int *size,
2034 Elf_Shdr *sechdr, unsigned int section)
2036 long ret;
2038 *size += arch_mod_section_prepend(mod, section);
2039 ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2040 *size = ret + sechdr->sh_size;
2041 return ret;
2044 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2045 might -- code, read-only data, read-write data, small data. Tally
2046 sizes, and place the offsets into sh_entsize fields: high bit means it
2047 belongs in init. */
2048 static void layout_sections(struct module *mod, struct load_info *info)
2050 static unsigned long const masks[][2] = {
2051 /* NOTE: all executable code must be the first section
2052 * in this array; otherwise modify the text_size
2053 * finder in the two loops below */
2054 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2055 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2056 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2057 { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2059 unsigned int m, i;
2061 for (i = 0; i < info->hdr->e_shnum; i++)
2062 info->sechdrs[i].sh_entsize = ~0UL;
2064 pr_debug("Core section allocation order:\n");
2065 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2066 for (i = 0; i < info->hdr->e_shnum; ++i) {
2067 Elf_Shdr *s = &info->sechdrs[i];
2068 const char *sname = info->secstrings + s->sh_name;
2070 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2071 || (s->sh_flags & masks[m][1])
2072 || s->sh_entsize != ~0UL
2073 || strstarts(sname, ".init"))
2074 continue;
2075 s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
2076 pr_debug("\t%s\n", sname);
2078 switch (m) {
2079 case 0: /* executable */
2080 mod->core_size = debug_align(mod->core_size);
2081 mod->core_text_size = mod->core_size;
2082 break;
2083 case 1: /* RO: text and ro-data */
2084 mod->core_size = debug_align(mod->core_size);
2085 mod->core_ro_size = mod->core_size;
2086 break;
2087 case 3: /* whole core */
2088 mod->core_size = debug_align(mod->core_size);
2089 break;
2093 pr_debug("Init section allocation order:\n");
2094 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2095 for (i = 0; i < info->hdr->e_shnum; ++i) {
2096 Elf_Shdr *s = &info->sechdrs[i];
2097 const char *sname = info->secstrings + s->sh_name;
2099 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2100 || (s->sh_flags & masks[m][1])
2101 || s->sh_entsize != ~0UL
2102 || !strstarts(sname, ".init"))
2103 continue;
2104 s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
2105 | INIT_OFFSET_MASK);
2106 pr_debug("\t%s\n", sname);
2108 switch (m) {
2109 case 0: /* executable */
2110 mod->init_size = debug_align(mod->init_size);
2111 mod->init_text_size = mod->init_size;
2112 break;
2113 case 1: /* RO: text and ro-data */
2114 mod->init_size = debug_align(mod->init_size);
2115 mod->init_ro_size = mod->init_size;
2116 break;
2117 case 3: /* whole init */
2118 mod->init_size = debug_align(mod->init_size);
2119 break;
2124 static void set_license(struct module *mod, const char *license)
2126 if (!license)
2127 license = "unspecified";
2129 if (!license_is_gpl_compatible(license)) {
2130 if (!test_taint(TAINT_PROPRIETARY_MODULE))
2131 pr_warn("%s: module license '%s' taints kernel.\n",
2132 mod->name, license);
2133 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2134 LOCKDEP_NOW_UNRELIABLE);
2138 /* Parse tag=value strings from .modinfo section */
2139 static char *next_string(char *string, unsigned long *secsize)
2141 /* Skip non-zero chars */
2142 while (string[0]) {
2143 string++;
2144 if ((*secsize)-- <= 1)
2145 return NULL;
2148 /* Skip any zero padding. */
2149 while (!string[0]) {
2150 string++;
2151 if ((*secsize)-- <= 1)
2152 return NULL;
2154 return string;
2157 static char *get_modinfo(struct load_info *info, const char *tag)
2159 char *p;
2160 unsigned int taglen = strlen(tag);
2161 Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2162 unsigned long size = infosec->sh_size;
2164 for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
2165 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2166 return p + taglen + 1;
2168 return NULL;
2171 static void setup_modinfo(struct module *mod, struct load_info *info)
2173 struct module_attribute *attr;
2174 int i;
2176 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2177 if (attr->setup)
2178 attr->setup(mod, get_modinfo(info, attr->attr.name));
2182 static void free_modinfo(struct module *mod)
2184 struct module_attribute *attr;
2185 int i;
2187 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2188 if (attr->free)
2189 attr->free(mod);
2193 #ifdef CONFIG_KALLSYMS
2195 /* lookup symbol in given range of kernel_symbols */
2196 static const struct kernel_symbol *lookup_symbol(const char *name,
2197 const struct kernel_symbol *start,
2198 const struct kernel_symbol *stop)
2200 return bsearch(name, start, stop - start,
2201 sizeof(struct kernel_symbol), cmp_name);
2204 static int is_exported(const char *name, unsigned long value,
2205 const struct module *mod)
2207 const struct kernel_symbol *ks;
2208 if (!mod)
2209 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2210 else
2211 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2212 return ks != NULL && ks->value == value;
2215 /* As per nm */
2216 static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2218 const Elf_Shdr *sechdrs = info->sechdrs;
2220 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2221 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2222 return 'v';
2223 else
2224 return 'w';
2226 if (sym->st_shndx == SHN_UNDEF)
2227 return 'U';
2228 if (sym->st_shndx == SHN_ABS)
2229 return 'a';
2230 if (sym->st_shndx >= SHN_LORESERVE)
2231 return '?';
2232 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2233 return 't';
2234 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2235 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2236 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2237 return 'r';
2238 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2239 return 'g';
2240 else
2241 return 'd';
2243 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2244 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2245 return 's';
2246 else
2247 return 'b';
2249 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2250 ".debug")) {
2251 return 'n';
2253 return '?';
2256 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2257 unsigned int shnum)
2259 const Elf_Shdr *sec;
2261 if (src->st_shndx == SHN_UNDEF
2262 || src->st_shndx >= shnum
2263 || !src->st_name)
2264 return false;
2266 sec = sechdrs + src->st_shndx;
2267 if (!(sec->sh_flags & SHF_ALLOC)
2268 #ifndef CONFIG_KALLSYMS_ALL
2269 || !(sec->sh_flags & SHF_EXECINSTR)
2270 #endif
2271 || (sec->sh_entsize & INIT_OFFSET_MASK))
2272 return false;
2274 return true;
2278 * We only allocate and copy the strings needed by the parts of symtab
2279 * we keep. This is simple, but has the effect of making multiple
2280 * copies of duplicates. We could be more sophisticated, see
2281 * linux-kernel thread starting with
2282 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2284 static void layout_symtab(struct module *mod, struct load_info *info)
2286 Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2287 Elf_Shdr *strsect = info->sechdrs + info->index.str;
2288 const Elf_Sym *src;
2289 unsigned int i, nsrc, ndst, strtab_size = 0;
2291 /* Put symbol section at end of init part of module. */
2292 symsect->sh_flags |= SHF_ALLOC;
2293 symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
2294 info->index.sym) | INIT_OFFSET_MASK;
2295 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2297 src = (void *)info->hdr + symsect->sh_offset;
2298 nsrc = symsect->sh_size / sizeof(*src);
2300 /* Compute total space required for the core symbols' strtab. */
2301 for (ndst = i = 0; i < nsrc; i++) {
2302 if (i == 0 ||
2303 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2304 strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2305 ndst++;
2309 /* Append room for core symbols at end of core part. */
2310 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
2311 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
2312 mod->core_size += strtab_size;
2314 /* Put string table section at end of init part of module. */
2315 strsect->sh_flags |= SHF_ALLOC;
2316 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
2317 info->index.str) | INIT_OFFSET_MASK;
2318 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2321 static void add_kallsyms(struct module *mod, const struct load_info *info)
2323 unsigned int i, ndst;
2324 const Elf_Sym *src;
2325 Elf_Sym *dst;
2326 char *s;
2327 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2329 mod->symtab = (void *)symsec->sh_addr;
2330 mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2331 /* Make sure we get permanent strtab: don't use info->strtab. */
2332 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2334 /* Set types up while we still have access to sections. */
2335 for (i = 0; i < mod->num_symtab; i++)
2336 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
2338 mod->core_symtab = dst = mod->module_core + info->symoffs;
2339 mod->core_strtab = s = mod->module_core + info->stroffs;
2340 src = mod->symtab;
2341 for (ndst = i = 0; i < mod->num_symtab; i++) {
2342 if (i == 0 ||
2343 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2344 dst[ndst] = src[i];
2345 dst[ndst++].st_name = s - mod->core_strtab;
2346 s += strlcpy(s, &mod->strtab[src[i].st_name],
2347 KSYM_NAME_LEN) + 1;
2350 mod->core_num_syms = ndst;
2352 #else
2353 static inline void layout_symtab(struct module *mod, struct load_info *info)
2357 static void add_kallsyms(struct module *mod, const struct load_info *info)
2360 #endif /* CONFIG_KALLSYMS */
2362 static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2364 if (!debug)
2365 return;
2366 #ifdef CONFIG_DYNAMIC_DEBUG
2367 if (ddebug_add_module(debug, num, debug->modname))
2368 pr_err("dynamic debug error adding module: %s\n",
2369 debug->modname);
2370 #endif
2373 static void dynamic_debug_remove(struct _ddebug *debug)
2375 if (debug)
2376 ddebug_remove_module(debug->modname);
2379 void * __weak module_alloc(unsigned long size)
2381 return vmalloc_exec(size);
2384 static void *module_alloc_update_bounds(unsigned long size)
2386 void *ret = module_alloc(size);
2388 if (ret) {
2389 mutex_lock(&module_mutex);
2390 /* Update module bounds. */
2391 if ((unsigned long)ret < module_addr_min)
2392 module_addr_min = (unsigned long)ret;
2393 if ((unsigned long)ret + size > module_addr_max)
2394 module_addr_max = (unsigned long)ret + size;
2395 mutex_unlock(&module_mutex);
2397 return ret;
2400 #ifdef CONFIG_DEBUG_KMEMLEAK
2401 static void kmemleak_load_module(const struct module *mod,
2402 const struct load_info *info)
2404 unsigned int i;
2406 /* only scan the sections containing data */
2407 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2409 for (i = 1; i < info->hdr->e_shnum; i++) {
2410 /* Scan all writable sections that's not executable */
2411 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2412 !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2413 (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2414 continue;
2416 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2417 info->sechdrs[i].sh_size, GFP_KERNEL);
2420 #else
2421 static inline void kmemleak_load_module(const struct module *mod,
2422 const struct load_info *info)
2425 #endif
2427 #ifdef CONFIG_MODULE_SIG
2428 static int module_sig_check(struct load_info *info)
2430 int err = -ENOKEY;
2431 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2432 const void *mod = info->hdr;
2434 if (info->len > markerlen &&
2435 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2436 /* We truncate the module to discard the signature */
2437 info->len -= markerlen;
2438 err = mod_verify_sig(mod, &info->len);
2441 if (!err) {
2442 info->sig_ok = true;
2443 return 0;
2446 /* Not having a signature is only an error if we're strict. */
2447 if (err < 0 && fips_enabled)
2448 panic("Module verification failed with error %d in FIPS mode\n",
2449 err);
2450 if (err == -ENOKEY && !sig_enforce)
2451 err = 0;
2453 return err;
2455 #else /* !CONFIG_MODULE_SIG */
2456 static int module_sig_check(struct load_info *info)
2458 return 0;
2460 #endif /* !CONFIG_MODULE_SIG */
2462 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2463 static int elf_header_check(struct load_info *info)
2465 if (info->len < sizeof(*(info->hdr)))
2466 return -ENOEXEC;
2468 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2469 || info->hdr->e_type != ET_REL
2470 || !elf_check_arch(info->hdr)
2471 || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2472 return -ENOEXEC;
2474 if (info->hdr->e_shoff >= info->len
2475 || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2476 info->len - info->hdr->e_shoff))
2477 return -ENOEXEC;
2479 return 0;
2482 /* Sets info->hdr and info->len. */
2483 static int copy_module_from_user(const void __user *umod, unsigned long len,
2484 struct load_info *info)
2486 int err;
2488 info->len = len;
2489 if (info->len < sizeof(*(info->hdr)))
2490 return -ENOEXEC;
2492 err = security_kernel_module_from_file(NULL);
2493 if (err)
2494 return err;
2496 /* Suck in entire file: we'll want most of it. */
2497 info->hdr = vmalloc(info->len);
2498 if (!info->hdr)
2499 return -ENOMEM;
2501 if (copy_from_user(info->hdr, umod, info->len) != 0) {
2502 vfree(info->hdr);
2503 return -EFAULT;
2506 return 0;
2509 /* Sets info->hdr and info->len. */
2510 static int copy_module_from_fd(int fd, struct load_info *info)
2512 struct fd f = fdget(fd);
2513 int err;
2514 struct kstat stat;
2515 loff_t pos;
2516 ssize_t bytes = 0;
2518 if (!f.file)
2519 return -ENOEXEC;
2521 err = security_kernel_module_from_file(f.file);
2522 if (err)
2523 goto out;
2525 err = vfs_getattr(&f.file->f_path, &stat);
2526 if (err)
2527 goto out;
2529 if (stat.size > INT_MAX) {
2530 err = -EFBIG;
2531 goto out;
2534 /* Don't hand 0 to vmalloc, it whines. */
2535 if (stat.size == 0) {
2536 err = -EINVAL;
2537 goto out;
2540 info->hdr = vmalloc(stat.size);
2541 if (!info->hdr) {
2542 err = -ENOMEM;
2543 goto out;
2546 pos = 0;
2547 while (pos < stat.size) {
2548 bytes = kernel_read(f.file, pos, (char *)(info->hdr) + pos,
2549 stat.size - pos);
2550 if (bytes < 0) {
2551 vfree(info->hdr);
2552 err = bytes;
2553 goto out;
2555 if (bytes == 0)
2556 break;
2557 pos += bytes;
2559 info->len = pos;
2561 out:
2562 fdput(f);
2563 return err;
2566 static void free_copy(struct load_info *info)
2568 vfree(info->hdr);
2571 static int rewrite_section_headers(struct load_info *info, int flags)
2573 unsigned int i;
2575 /* This should always be true, but let's be sure. */
2576 info->sechdrs[0].sh_addr = 0;
2578 for (i = 1; i < info->hdr->e_shnum; i++) {
2579 Elf_Shdr *shdr = &info->sechdrs[i];
2580 if (shdr->sh_type != SHT_NOBITS
2581 && info->len < shdr->sh_offset + shdr->sh_size) {
2582 pr_err("Module len %lu truncated\n", info->len);
2583 return -ENOEXEC;
2586 /* Mark all sections sh_addr with their address in the
2587 temporary image. */
2588 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2590 #ifndef CONFIG_MODULE_UNLOAD
2591 /* Don't load .exit sections */
2592 if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2593 shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2594 #endif
2597 /* Track but don't keep modinfo and version sections. */
2598 if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
2599 info->index.vers = 0; /* Pretend no __versions section! */
2600 else
2601 info->index.vers = find_sec(info, "__versions");
2602 info->index.info = find_sec(info, ".modinfo");
2603 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2604 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2605 return 0;
2609 * Set up our basic convenience variables (pointers to section headers,
2610 * search for module section index etc), and do some basic section
2611 * verification.
2613 * Return the temporary module pointer (we'll replace it with the final
2614 * one when we move the module sections around).
2616 static struct module *setup_load_info(struct load_info *info, int flags)
2618 unsigned int i;
2619 int err;
2620 struct module *mod;
2622 /* Set up the convenience variables */
2623 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2624 info->secstrings = (void *)info->hdr
2625 + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2627 err = rewrite_section_headers(info, flags);
2628 if (err)
2629 return ERR_PTR(err);
2631 /* Find internal symbols and strings. */
2632 for (i = 1; i < info->hdr->e_shnum; i++) {
2633 if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2634 info->index.sym = i;
2635 info->index.str = info->sechdrs[i].sh_link;
2636 info->strtab = (char *)info->hdr
2637 + info->sechdrs[info->index.str].sh_offset;
2638 break;
2642 info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2643 if (!info->index.mod) {
2644 pr_warn("No module found in object\n");
2645 return ERR_PTR(-ENOEXEC);
2647 /* This is temporary: point mod into copy of data. */
2648 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2650 if (info->index.sym == 0) {
2651 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
2652 return ERR_PTR(-ENOEXEC);
2655 info->index.pcpu = find_pcpusec(info);
2657 /* Check module struct version now, before we try to use module. */
2658 if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
2659 return ERR_PTR(-ENOEXEC);
2661 return mod;
2664 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
2666 const char *modmagic = get_modinfo(info, "vermagic");
2667 int err;
2669 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
2670 modmagic = NULL;
2672 /* This is allowed: modprobe --force will invalidate it. */
2673 if (!modmagic) {
2674 err = try_to_force_load(mod, "bad vermagic");
2675 if (err)
2676 return err;
2677 } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2678 pr_err("%s: version magic '%s' should be '%s'\n",
2679 mod->name, modmagic, vermagic);
2680 return -ENOEXEC;
2683 if (!get_modinfo(info, "intree"))
2684 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
2686 if (get_modinfo(info, "staging")) {
2687 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
2688 pr_warn("%s: module is from the staging directory, the quality "
2689 "is unknown, you have been warned.\n", mod->name);
2692 /* Set up license info based on the info section */
2693 set_license(mod, get_modinfo(info, "license"));
2695 return 0;
2698 static int find_module_sections(struct module *mod, struct load_info *info)
2700 mod->kp = section_objs(info, "__param",
2701 sizeof(*mod->kp), &mod->num_kp);
2702 mod->syms = section_objs(info, "__ksymtab",
2703 sizeof(*mod->syms), &mod->num_syms);
2704 mod->crcs = section_addr(info, "__kcrctab");
2705 mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
2706 sizeof(*mod->gpl_syms),
2707 &mod->num_gpl_syms);
2708 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
2709 mod->gpl_future_syms = section_objs(info,
2710 "__ksymtab_gpl_future",
2711 sizeof(*mod->gpl_future_syms),
2712 &mod->num_gpl_future_syms);
2713 mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
2715 #ifdef CONFIG_UNUSED_SYMBOLS
2716 mod->unused_syms = section_objs(info, "__ksymtab_unused",
2717 sizeof(*mod->unused_syms),
2718 &mod->num_unused_syms);
2719 mod->unused_crcs = section_addr(info, "__kcrctab_unused");
2720 mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
2721 sizeof(*mod->unused_gpl_syms),
2722 &mod->num_unused_gpl_syms);
2723 mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
2724 #endif
2725 #ifdef CONFIG_CONSTRUCTORS
2726 mod->ctors = section_objs(info, ".ctors",
2727 sizeof(*mod->ctors), &mod->num_ctors);
2728 if (!mod->ctors)
2729 mod->ctors = section_objs(info, ".init_array",
2730 sizeof(*mod->ctors), &mod->num_ctors);
2731 else if (find_sec(info, ".init_array")) {
2733 * This shouldn't happen with same compiler and binutils
2734 * building all parts of the module.
2736 printk(KERN_WARNING "%s: has both .ctors and .init_array.\n",
2737 mod->name);
2738 return -EINVAL;
2740 #endif
2742 #ifdef CONFIG_TRACEPOINTS
2743 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
2744 sizeof(*mod->tracepoints_ptrs),
2745 &mod->num_tracepoints);
2746 #endif
2747 #ifdef HAVE_JUMP_LABEL
2748 mod->jump_entries = section_objs(info, "__jump_table",
2749 sizeof(*mod->jump_entries),
2750 &mod->num_jump_entries);
2751 #endif
2752 #ifdef CONFIG_EVENT_TRACING
2753 mod->trace_events = section_objs(info, "_ftrace_events",
2754 sizeof(*mod->trace_events),
2755 &mod->num_trace_events);
2756 #endif
2757 #ifdef CONFIG_TRACING
2758 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
2759 sizeof(*mod->trace_bprintk_fmt_start),
2760 &mod->num_trace_bprintk_fmt);
2761 #endif
2762 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
2763 /* sechdrs[0].sh_size is always zero */
2764 mod->ftrace_callsites = section_objs(info, "__mcount_loc",
2765 sizeof(*mod->ftrace_callsites),
2766 &mod->num_ftrace_callsites);
2767 #endif
2769 mod->extable = section_objs(info, "__ex_table",
2770 sizeof(*mod->extable), &mod->num_exentries);
2772 if (section_addr(info, "__obsparm"))
2773 pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
2775 info->debug = section_objs(info, "__verbose",
2776 sizeof(*info->debug), &info->num_debug);
2778 return 0;
2781 static int move_module(struct module *mod, struct load_info *info)
2783 int i;
2784 void *ptr;
2786 /* Do the allocs. */
2787 ptr = module_alloc_update_bounds(mod->core_size);
2789 * The pointer to this block is stored in the module structure
2790 * which is inside the block. Just mark it as not being a
2791 * leak.
2793 kmemleak_not_leak(ptr);
2794 if (!ptr)
2795 return -ENOMEM;
2797 memset(ptr, 0, mod->core_size);
2798 mod->module_core = ptr;
2800 if (mod->init_size) {
2801 ptr = module_alloc_update_bounds(mod->init_size);
2803 * The pointer to this block is stored in the module structure
2804 * which is inside the block. This block doesn't need to be
2805 * scanned as it contains data and code that will be freed
2806 * after the module is initialized.
2808 kmemleak_ignore(ptr);
2809 if (!ptr) {
2810 module_free(mod, mod->module_core);
2811 return -ENOMEM;
2813 memset(ptr, 0, mod->init_size);
2814 mod->module_init = ptr;
2815 } else
2816 mod->module_init = NULL;
2818 /* Transfer each section which specifies SHF_ALLOC */
2819 pr_debug("final section addresses:\n");
2820 for (i = 0; i < info->hdr->e_shnum; i++) {
2821 void *dest;
2822 Elf_Shdr *shdr = &info->sechdrs[i];
2824 if (!(shdr->sh_flags & SHF_ALLOC))
2825 continue;
2827 if (shdr->sh_entsize & INIT_OFFSET_MASK)
2828 dest = mod->module_init
2829 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
2830 else
2831 dest = mod->module_core + shdr->sh_entsize;
2833 if (shdr->sh_type != SHT_NOBITS)
2834 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
2835 /* Update sh_addr to point to copy in image. */
2836 shdr->sh_addr = (unsigned long)dest;
2837 pr_debug("\t0x%lx %s\n",
2838 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
2841 return 0;
2844 static int check_module_license_and_versions(struct module *mod)
2847 * ndiswrapper is under GPL by itself, but loads proprietary modules.
2848 * Don't use add_taint_module(), as it would prevent ndiswrapper from
2849 * using GPL-only symbols it needs.
2851 if (strcmp(mod->name, "ndiswrapper") == 0)
2852 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
2854 /* driverloader was caught wrongly pretending to be under GPL */
2855 if (strcmp(mod->name, "driverloader") == 0)
2856 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2857 LOCKDEP_NOW_UNRELIABLE);
2859 /* lve claims to be GPL but upstream won't provide source */
2860 if (strcmp(mod->name, "lve") == 0)
2861 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2862 LOCKDEP_NOW_UNRELIABLE);
2864 #ifdef CONFIG_MODVERSIONS
2865 if ((mod->num_syms && !mod->crcs)
2866 || (mod->num_gpl_syms && !mod->gpl_crcs)
2867 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
2868 #ifdef CONFIG_UNUSED_SYMBOLS
2869 || (mod->num_unused_syms && !mod->unused_crcs)
2870 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
2871 #endif
2873 return try_to_force_load(mod,
2874 "no versions for exported symbols");
2876 #endif
2877 return 0;
2880 static void flush_module_icache(const struct module *mod)
2882 mm_segment_t old_fs;
2884 /* flush the icache in correct context */
2885 old_fs = get_fs();
2886 set_fs(KERNEL_DS);
2889 * Flush the instruction cache, since we've played with text.
2890 * Do it before processing of module parameters, so the module
2891 * can provide parameter accessor functions of its own.
2893 if (mod->module_init)
2894 flush_icache_range((unsigned long)mod->module_init,
2895 (unsigned long)mod->module_init
2896 + mod->init_size);
2897 flush_icache_range((unsigned long)mod->module_core,
2898 (unsigned long)mod->module_core + mod->core_size);
2900 set_fs(old_fs);
2903 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
2904 Elf_Shdr *sechdrs,
2905 char *secstrings,
2906 struct module *mod)
2908 return 0;
2911 static struct module *layout_and_allocate(struct load_info *info, int flags)
2913 /* Module within temporary copy. */
2914 struct module *mod;
2915 int err;
2917 mod = setup_load_info(info, flags);
2918 if (IS_ERR(mod))
2919 return mod;
2921 err = check_modinfo(mod, info, flags);
2922 if (err)
2923 return ERR_PTR(err);
2925 /* Allow arches to frob section contents and sizes. */
2926 err = module_frob_arch_sections(info->hdr, info->sechdrs,
2927 info->secstrings, mod);
2928 if (err < 0)
2929 return ERR_PTR(err);
2931 /* We will do a special allocation for per-cpu sections later. */
2932 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
2934 /* Determine total sizes, and put offsets in sh_entsize. For now
2935 this is done generically; there doesn't appear to be any
2936 special cases for the architectures. */
2937 layout_sections(mod, info);
2938 layout_symtab(mod, info);
2940 /* Allocate and move to the final place */
2941 err = move_module(mod, info);
2942 if (err)
2943 return ERR_PTR(err);
2945 /* Module has been copied to its final place now: return it. */
2946 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2947 kmemleak_load_module(mod, info);
2948 return mod;
2951 /* mod is no longer valid after this! */
2952 static void module_deallocate(struct module *mod, struct load_info *info)
2954 percpu_modfree(mod);
2955 module_free(mod, mod->module_init);
2956 module_free(mod, mod->module_core);
2959 int __weak module_finalize(const Elf_Ehdr *hdr,
2960 const Elf_Shdr *sechdrs,
2961 struct module *me)
2963 return 0;
2966 static int post_relocation(struct module *mod, const struct load_info *info)
2968 /* Sort exception table now relocations are done. */
2969 sort_extable(mod->extable, mod->extable + mod->num_exentries);
2971 /* Copy relocated percpu area over. */
2972 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
2973 info->sechdrs[info->index.pcpu].sh_size);
2975 /* Setup kallsyms-specific fields. */
2976 add_kallsyms(mod, info);
2978 /* Arch-specific module finalizing. */
2979 return module_finalize(info->hdr, info->sechdrs, mod);
2982 /* Is this module of this name done loading? No locks held. */
2983 static bool finished_loading(const char *name)
2985 struct module *mod;
2986 bool ret;
2988 mutex_lock(&module_mutex);
2989 mod = find_module_all(name, strlen(name), true);
2990 ret = !mod || mod->state == MODULE_STATE_LIVE
2991 || mod->state == MODULE_STATE_GOING;
2992 mutex_unlock(&module_mutex);
2994 return ret;
2997 /* Call module constructors. */
2998 static void do_mod_ctors(struct module *mod)
3000 #ifdef CONFIG_CONSTRUCTORS
3001 unsigned long i;
3003 for (i = 0; i < mod->num_ctors; i++)
3004 mod->ctors[i]();
3005 #endif
3008 /* This is where the real work happens */
3009 static int do_init_module(struct module *mod)
3011 int ret = 0;
3014 * We want to find out whether @mod uses async during init. Clear
3015 * PF_USED_ASYNC. async_schedule*() will set it.
3017 current->flags &= ~PF_USED_ASYNC;
3019 blocking_notifier_call_chain(&module_notify_list,
3020 MODULE_STATE_COMING, mod);
3022 /* Set RO and NX regions for core */
3023 set_section_ro_nx(mod->module_core,
3024 mod->core_text_size,
3025 mod->core_ro_size,
3026 mod->core_size);
3028 /* Set RO and NX regions for init */
3029 set_section_ro_nx(mod->module_init,
3030 mod->init_text_size,
3031 mod->init_ro_size,
3032 mod->init_size);
3034 do_mod_ctors(mod);
3035 /* Start the module */
3036 if (mod->init != NULL)
3037 ret = do_one_initcall(mod->init);
3038 if (ret < 0) {
3039 /* Init routine failed: abort. Try to protect us from
3040 buggy refcounters. */
3041 mod->state = MODULE_STATE_GOING;
3042 synchronize_sched();
3043 module_put(mod);
3044 blocking_notifier_call_chain(&module_notify_list,
3045 MODULE_STATE_GOING, mod);
3046 free_module(mod);
3047 wake_up_all(&module_wq);
3048 return ret;
3050 if (ret > 0) {
3051 pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3052 "follow 0/-E convention\n"
3053 "%s: loading module anyway...\n",
3054 __func__, mod->name, ret, __func__);
3055 dump_stack();
3058 /* Now it's a first class citizen! */
3059 mod->state = MODULE_STATE_LIVE;
3060 blocking_notifier_call_chain(&module_notify_list,
3061 MODULE_STATE_LIVE, mod);
3064 * We need to finish all async code before the module init sequence
3065 * is done. This has potential to deadlock. For example, a newly
3066 * detected block device can trigger request_module() of the
3067 * default iosched from async probing task. Once userland helper
3068 * reaches here, async_synchronize_full() will wait on the async
3069 * task waiting on request_module() and deadlock.
3071 * This deadlock is avoided by perfomring async_synchronize_full()
3072 * iff module init queued any async jobs. This isn't a full
3073 * solution as it will deadlock the same if module loading from
3074 * async jobs nests more than once; however, due to the various
3075 * constraints, this hack seems to be the best option for now.
3076 * Please refer to the following thread for details.
3078 * http://thread.gmane.org/gmane.linux.kernel/1420814
3080 if (current->flags & PF_USED_ASYNC)
3081 async_synchronize_full();
3083 mutex_lock(&module_mutex);
3084 /* Drop initial reference. */
3085 module_put(mod);
3086 trim_init_extable(mod);
3087 #ifdef CONFIG_KALLSYMS
3088 mod->num_symtab = mod->core_num_syms;
3089 mod->symtab = mod->core_symtab;
3090 mod->strtab = mod->core_strtab;
3091 #endif
3092 unset_module_init_ro_nx(mod);
3093 module_free(mod, mod->module_init);
3094 mod->module_init = NULL;
3095 mod->init_size = 0;
3096 mod->init_ro_size = 0;
3097 mod->init_text_size = 0;
3098 mutex_unlock(&module_mutex);
3099 wake_up_all(&module_wq);
3101 return 0;
3104 static int may_init_module(void)
3106 if (!capable(CAP_SYS_MODULE) || modules_disabled)
3107 return -EPERM;
3109 return 0;
3113 * We try to place it in the list now to make sure it's unique before
3114 * we dedicate too many resources. In particular, temporary percpu
3115 * memory exhaustion.
3117 static int add_unformed_module(struct module *mod)
3119 int err;
3120 struct module *old;
3122 mod->state = MODULE_STATE_UNFORMED;
3124 again:
3125 mutex_lock(&module_mutex);
3126 old = find_module_all(mod->name, strlen(mod->name), true);
3127 if (old != NULL) {
3128 if (old->state == MODULE_STATE_COMING
3129 || old->state == MODULE_STATE_UNFORMED) {
3130 /* Wait in case it fails to load. */
3131 mutex_unlock(&module_mutex);
3132 err = wait_event_interruptible(module_wq,
3133 finished_loading(mod->name));
3134 if (err)
3135 goto out_unlocked;
3136 goto again;
3138 err = -EEXIST;
3139 goto out;
3141 list_add_rcu(&mod->list, &modules);
3142 err = 0;
3144 out:
3145 mutex_unlock(&module_mutex);
3146 out_unlocked:
3147 return err;
3150 static int complete_formation(struct module *mod, struct load_info *info)
3152 int err;
3154 mutex_lock(&module_mutex);
3156 /* Find duplicate symbols (must be called under lock). */
3157 err = verify_export_symbols(mod);
3158 if (err < 0)
3159 goto out;
3161 /* This relies on module_mutex for list integrity. */
3162 module_bug_finalize(info->hdr, info->sechdrs, mod);
3164 /* Mark state as coming so strong_try_module_get() ignores us,
3165 * but kallsyms etc. can see us. */
3166 mod->state = MODULE_STATE_COMING;
3168 out:
3169 mutex_unlock(&module_mutex);
3170 return err;
3173 static int unknown_module_param_cb(char *param, char *val, const char *modname)
3175 /* Check for magic 'dyndbg' arg */
3176 int ret = ddebug_dyndbg_module_param_cb(param, val, modname);
3177 if (ret != 0)
3178 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
3179 return 0;
3182 /* Allocate and load the module: note that size of section 0 is always
3183 zero, and we rely on this for optional sections. */
3184 static int load_module(struct load_info *info, const char __user *uargs,
3185 int flags)
3187 struct module *mod;
3188 long err;
3190 err = module_sig_check(info);
3191 if (err)
3192 goto free_copy;
3194 err = elf_header_check(info);
3195 if (err)
3196 goto free_copy;
3198 /* Figure out module layout, and allocate all the memory. */
3199 mod = layout_and_allocate(info, flags);
3200 if (IS_ERR(mod)) {
3201 err = PTR_ERR(mod);
3202 goto free_copy;
3205 /* Reserve our place in the list. */
3206 err = add_unformed_module(mod);
3207 if (err)
3208 goto free_module;
3210 #ifdef CONFIG_MODULE_SIG
3211 mod->sig_ok = info->sig_ok;
3212 if (!mod->sig_ok) {
3213 pr_notice_once("%s: module verification failed: signature "
3214 "and/or required key missing - tainting "
3215 "kernel\n", mod->name);
3216 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_STILL_OK);
3218 #endif
3220 /* To avoid stressing percpu allocator, do this once we're unique. */
3221 err = percpu_modalloc(mod, info);
3222 if (err)
3223 goto unlink_mod;
3225 /* Now module is in final location, initialize linked lists, etc. */
3226 err = module_unload_init(mod);
3227 if (err)
3228 goto unlink_mod;
3230 /* Now we've got everything in the final locations, we can
3231 * find optional sections. */
3232 err = find_module_sections(mod, info);
3233 if (err)
3234 goto free_unload;
3236 err = check_module_license_and_versions(mod);
3237 if (err)
3238 goto free_unload;
3240 /* Set up MODINFO_ATTR fields */
3241 setup_modinfo(mod, info);
3243 /* Fix up syms, so that st_value is a pointer to location. */
3244 err = simplify_symbols(mod, info);
3245 if (err < 0)
3246 goto free_modinfo;
3248 err = apply_relocations(mod, info);
3249 if (err < 0)
3250 goto free_modinfo;
3252 err = post_relocation(mod, info);
3253 if (err < 0)
3254 goto free_modinfo;
3256 flush_module_icache(mod);
3258 /* Now copy in args */
3259 mod->args = strndup_user(uargs, ~0UL >> 1);
3260 if (IS_ERR(mod->args)) {
3261 err = PTR_ERR(mod->args);
3262 goto free_arch_cleanup;
3265 dynamic_debug_setup(info->debug, info->num_debug);
3267 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3268 ftrace_module_init(mod);
3270 /* Finally it's fully formed, ready to start executing. */
3271 err = complete_formation(mod, info);
3272 if (err)
3273 goto ddebug_cleanup;
3275 /* Module is ready to execute: parsing args may do that. */
3276 err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3277 -32768, 32767, unknown_module_param_cb);
3278 if (err < 0)
3279 goto bug_cleanup;
3281 /* Link in to syfs. */
3282 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3283 if (err < 0)
3284 goto bug_cleanup;
3286 /* Get rid of temporary copy. */
3287 free_copy(info);
3289 /* Done! */
3290 trace_module_load(mod);
3292 return do_init_module(mod);
3294 bug_cleanup:
3295 /* module_bug_cleanup needs module_mutex protection */
3296 mutex_lock(&module_mutex);
3297 module_bug_cleanup(mod);
3298 mutex_unlock(&module_mutex);
3299 ddebug_cleanup:
3300 dynamic_debug_remove(info->debug);
3301 synchronize_sched();
3302 kfree(mod->args);
3303 free_arch_cleanup:
3304 module_arch_cleanup(mod);
3305 free_modinfo:
3306 free_modinfo(mod);
3307 free_unload:
3308 module_unload_free(mod);
3309 unlink_mod:
3310 mutex_lock(&module_mutex);
3311 /* Unlink carefully: kallsyms could be walking list. */
3312 list_del_rcu(&mod->list);
3313 wake_up_all(&module_wq);
3314 mutex_unlock(&module_mutex);
3315 free_module:
3316 module_deallocate(mod, info);
3317 free_copy:
3318 free_copy(info);
3319 return err;
3322 SYSCALL_DEFINE3(init_module, void __user *, umod,
3323 unsigned long, len, const char __user *, uargs)
3325 int err;
3326 struct load_info info = { };
3328 err = may_init_module();
3329 if (err)
3330 return err;
3332 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3333 umod, len, uargs);
3335 err = copy_module_from_user(umod, len, &info);
3336 if (err)
3337 return err;
3339 return load_module(&info, uargs, 0);
3342 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
3344 int err;
3345 struct load_info info = { };
3347 err = may_init_module();
3348 if (err)
3349 return err;
3351 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
3353 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
3354 |MODULE_INIT_IGNORE_VERMAGIC))
3355 return -EINVAL;
3357 err = copy_module_from_fd(fd, &info);
3358 if (err)
3359 return err;
3361 return load_module(&info, uargs, flags);
3364 static inline int within(unsigned long addr, void *start, unsigned long size)
3366 return ((void *)addr >= start && (void *)addr < start + size);
3369 #ifdef CONFIG_KALLSYMS
3371 * This ignores the intensely annoying "mapping symbols" found
3372 * in ARM ELF files: $a, $t and $d.
3374 static inline int is_arm_mapping_symbol(const char *str)
3376 return str[0] == '$' && strchr("atd", str[1])
3377 && (str[2] == '\0' || str[2] == '.');
3380 static const char *get_ksymbol(struct module *mod,
3381 unsigned long addr,
3382 unsigned long *size,
3383 unsigned long *offset)
3385 unsigned int i, best = 0;
3386 unsigned long nextval;
3388 /* At worse, next value is at end of module */
3389 if (within_module_init(addr, mod))
3390 nextval = (unsigned long)mod->module_init+mod->init_text_size;
3391 else
3392 nextval = (unsigned long)mod->module_core+mod->core_text_size;
3394 /* Scan for closest preceding symbol, and next symbol. (ELF
3395 starts real symbols at 1). */
3396 for (i = 1; i < mod->num_symtab; i++) {
3397 if (mod->symtab[i].st_shndx == SHN_UNDEF)
3398 continue;
3400 /* We ignore unnamed symbols: they're uninformative
3401 * and inserted at a whim. */
3402 if (mod->symtab[i].st_value <= addr
3403 && mod->symtab[i].st_value > mod->symtab[best].st_value
3404 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3405 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3406 best = i;
3407 if (mod->symtab[i].st_value > addr
3408 && mod->symtab[i].st_value < nextval
3409 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3410 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3411 nextval = mod->symtab[i].st_value;
3414 if (!best)
3415 return NULL;
3417 if (size)
3418 *size = nextval - mod->symtab[best].st_value;
3419 if (offset)
3420 *offset = addr - mod->symtab[best].st_value;
3421 return mod->strtab + mod->symtab[best].st_name;
3424 /* For kallsyms to ask for address resolution. NULL means not found. Careful
3425 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3426 const char *module_address_lookup(unsigned long addr,
3427 unsigned long *size,
3428 unsigned long *offset,
3429 char **modname,
3430 char *namebuf)
3432 struct module *mod;
3433 const char *ret = NULL;
3435 preempt_disable();
3436 list_for_each_entry_rcu(mod, &modules, list) {
3437 if (mod->state == MODULE_STATE_UNFORMED)
3438 continue;
3439 if (within_module_init(addr, mod) ||
3440 within_module_core(addr, mod)) {
3441 if (modname)
3442 *modname = mod->name;
3443 ret = get_ksymbol(mod, addr, size, offset);
3444 break;
3447 /* Make a copy in here where it's safe */
3448 if (ret) {
3449 strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
3450 ret = namebuf;
3452 preempt_enable();
3453 return ret;
3456 int lookup_module_symbol_name(unsigned long addr, char *symname)
3458 struct module *mod;
3460 preempt_disable();
3461 list_for_each_entry_rcu(mod, &modules, list) {
3462 if (mod->state == MODULE_STATE_UNFORMED)
3463 continue;
3464 if (within_module_init(addr, mod) ||
3465 within_module_core(addr, mod)) {
3466 const char *sym;
3468 sym = get_ksymbol(mod, addr, NULL, NULL);
3469 if (!sym)
3470 goto out;
3471 strlcpy(symname, sym, KSYM_NAME_LEN);
3472 preempt_enable();
3473 return 0;
3476 out:
3477 preempt_enable();
3478 return -ERANGE;
3481 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3482 unsigned long *offset, char *modname, char *name)
3484 struct module *mod;
3486 preempt_disable();
3487 list_for_each_entry_rcu(mod, &modules, list) {
3488 if (mod->state == MODULE_STATE_UNFORMED)
3489 continue;
3490 if (within_module_init(addr, mod) ||
3491 within_module_core(addr, mod)) {
3492 const char *sym;
3494 sym = get_ksymbol(mod, addr, size, offset);
3495 if (!sym)
3496 goto out;
3497 if (modname)
3498 strlcpy(modname, mod->name, MODULE_NAME_LEN);
3499 if (name)
3500 strlcpy(name, sym, KSYM_NAME_LEN);
3501 preempt_enable();
3502 return 0;
3505 out:
3506 preempt_enable();
3507 return -ERANGE;
3510 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3511 char *name, char *module_name, int *exported)
3513 struct module *mod;
3515 preempt_disable();
3516 list_for_each_entry_rcu(mod, &modules, list) {
3517 if (mod->state == MODULE_STATE_UNFORMED)
3518 continue;
3519 if (symnum < mod->num_symtab) {
3520 *value = mod->symtab[symnum].st_value;
3521 *type = mod->symtab[symnum].st_info;
3522 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
3523 KSYM_NAME_LEN);
3524 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3525 *exported = is_exported(name, *value, mod);
3526 preempt_enable();
3527 return 0;
3529 symnum -= mod->num_symtab;
3531 preempt_enable();
3532 return -ERANGE;
3535 static unsigned long mod_find_symname(struct module *mod, const char *name)
3537 unsigned int i;
3539 for (i = 0; i < mod->num_symtab; i++)
3540 if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
3541 mod->symtab[i].st_info != 'U')
3542 return mod->symtab[i].st_value;
3543 return 0;
3546 /* Look for this name: can be of form module:name. */
3547 unsigned long module_kallsyms_lookup_name(const char *name)
3549 struct module *mod;
3550 char *colon;
3551 unsigned long ret = 0;
3553 /* Don't lock: we're in enough trouble already. */
3554 preempt_disable();
3555 if ((colon = strchr(name, ':')) != NULL) {
3556 if ((mod = find_module_all(name, colon - name, false)) != NULL)
3557 ret = mod_find_symname(mod, colon+1);
3558 } else {
3559 list_for_each_entry_rcu(mod, &modules, list) {
3560 if (mod->state == MODULE_STATE_UNFORMED)
3561 continue;
3562 if ((ret = mod_find_symname(mod, name)) != 0)
3563 break;
3566 preempt_enable();
3567 return ret;
3570 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3571 struct module *, unsigned long),
3572 void *data)
3574 struct module *mod;
3575 unsigned int i;
3576 int ret;
3578 list_for_each_entry(mod, &modules, list) {
3579 if (mod->state == MODULE_STATE_UNFORMED)
3580 continue;
3581 for (i = 0; i < mod->num_symtab; i++) {
3582 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3583 mod, mod->symtab[i].st_value);
3584 if (ret != 0)
3585 return ret;
3588 return 0;
3590 #endif /* CONFIG_KALLSYMS */
3592 static char *module_flags(struct module *mod, char *buf)
3594 int bx = 0;
3596 BUG_ON(mod->state == MODULE_STATE_UNFORMED);
3597 if (mod->taints ||
3598 mod->state == MODULE_STATE_GOING ||
3599 mod->state == MODULE_STATE_COMING) {
3600 buf[bx++] = '(';
3601 bx += module_flags_taint(mod, buf + bx);
3602 /* Show a - for module-is-being-unloaded */
3603 if (mod->state == MODULE_STATE_GOING)
3604 buf[bx++] = '-';
3605 /* Show a + for module-is-being-loaded */
3606 if (mod->state == MODULE_STATE_COMING)
3607 buf[bx++] = '+';
3608 buf[bx++] = ')';
3610 buf[bx] = '\0';
3612 return buf;
3615 #ifdef CONFIG_PROC_FS
3616 /* Called by the /proc file system to return a list of modules. */
3617 static void *m_start(struct seq_file *m, loff_t *pos)
3619 mutex_lock(&module_mutex);
3620 return seq_list_start(&modules, *pos);
3623 static void *m_next(struct seq_file *m, void *p, loff_t *pos)
3625 return seq_list_next(p, &modules, pos);
3628 static void m_stop(struct seq_file *m, void *p)
3630 mutex_unlock(&module_mutex);
3633 static int m_show(struct seq_file *m, void *p)
3635 struct module *mod = list_entry(p, struct module, list);
3636 char buf[8];
3638 /* We always ignore unformed modules. */
3639 if (mod->state == MODULE_STATE_UNFORMED)
3640 return 0;
3642 seq_printf(m, "%s %u",
3643 mod->name, mod->init_size + mod->core_size);
3644 print_unload_info(m, mod);
3646 /* Informative for users. */
3647 seq_printf(m, " %s",
3648 mod->state == MODULE_STATE_GOING ? "Unloading":
3649 mod->state == MODULE_STATE_COMING ? "Loading":
3650 "Live");
3651 /* Used by oprofile and other similar tools. */
3652 seq_printf(m, " 0x%pK", mod->module_core);
3654 /* Taints info */
3655 if (mod->taints)
3656 seq_printf(m, " %s", module_flags(mod, buf));
3658 seq_printf(m, "\n");
3659 return 0;
3662 /* Format: modulename size refcount deps address
3664 Where refcount is a number or -, and deps is a comma-separated list
3665 of depends or -.
3667 static const struct seq_operations modules_op = {
3668 .start = m_start,
3669 .next = m_next,
3670 .stop = m_stop,
3671 .show = m_show
3674 static int modules_open(struct inode *inode, struct file *file)
3676 return seq_open(file, &modules_op);
3679 static const struct file_operations proc_modules_operations = {
3680 .open = modules_open,
3681 .read = seq_read,
3682 .llseek = seq_lseek,
3683 .release = seq_release,
3686 static int __init proc_modules_init(void)
3688 proc_create("modules", 0, NULL, &proc_modules_operations);
3689 return 0;
3691 module_init(proc_modules_init);
3692 #endif
3694 /* Given an address, look for it in the module exception tables. */
3695 const struct exception_table_entry *search_module_extables(unsigned long addr)
3697 const struct exception_table_entry *e = NULL;
3698 struct module *mod;
3700 preempt_disable();
3701 list_for_each_entry_rcu(mod, &modules, list) {
3702 if (mod->state == MODULE_STATE_UNFORMED)
3703 continue;
3704 if (mod->num_exentries == 0)
3705 continue;
3707 e = search_extable(mod->extable,
3708 mod->extable + mod->num_exentries - 1,
3709 addr);
3710 if (e)
3711 break;
3713 preempt_enable();
3715 /* Now, if we found one, we are running inside it now, hence
3716 we cannot unload the module, hence no refcnt needed. */
3717 return e;
3721 * is_module_address - is this address inside a module?
3722 * @addr: the address to check.
3724 * See is_module_text_address() if you simply want to see if the address
3725 * is code (not data).
3727 bool is_module_address(unsigned long addr)
3729 bool ret;
3731 preempt_disable();
3732 ret = __module_address(addr) != NULL;
3733 preempt_enable();
3735 return ret;
3739 * __module_address - get the module which contains an address.
3740 * @addr: the address.
3742 * Must be called with preempt disabled or module mutex held so that
3743 * module doesn't get freed during this.
3745 struct module *__module_address(unsigned long addr)
3747 struct module *mod;
3749 if (addr < module_addr_min || addr > module_addr_max)
3750 return NULL;
3752 list_for_each_entry_rcu(mod, &modules, list) {
3753 if (mod->state == MODULE_STATE_UNFORMED)
3754 continue;
3755 if (within_module_core(addr, mod)
3756 || within_module_init(addr, mod))
3757 return mod;
3759 return NULL;
3761 EXPORT_SYMBOL_GPL(__module_address);
3764 * is_module_text_address - is this address inside module code?
3765 * @addr: the address to check.
3767 * See is_module_address() if you simply want to see if the address is
3768 * anywhere in a module. See kernel_text_address() for testing if an
3769 * address corresponds to kernel or module code.
3771 bool is_module_text_address(unsigned long addr)
3773 bool ret;
3775 preempt_disable();
3776 ret = __module_text_address(addr) != NULL;
3777 preempt_enable();
3779 return ret;
3783 * __module_text_address - get the module whose code contains an address.
3784 * @addr: the address.
3786 * Must be called with preempt disabled or module mutex held so that
3787 * module doesn't get freed during this.
3789 struct module *__module_text_address(unsigned long addr)
3791 struct module *mod = __module_address(addr);
3792 if (mod) {
3793 /* Make sure it's within the text section. */
3794 if (!within(addr, mod->module_init, mod->init_text_size)
3795 && !within(addr, mod->module_core, mod->core_text_size))
3796 mod = NULL;
3798 return mod;
3800 EXPORT_SYMBOL_GPL(__module_text_address);
3802 /* Don't grab lock, we're oopsing. */
3803 void print_modules(void)
3805 struct module *mod;
3806 char buf[8];
3808 printk(KERN_DEFAULT "Modules linked in:");
3809 /* Most callers should already have preempt disabled, but make sure */
3810 preempt_disable();
3811 list_for_each_entry_rcu(mod, &modules, list) {
3812 if (mod->state == MODULE_STATE_UNFORMED)
3813 continue;
3814 printk(" %s%s", mod->name, module_flags(mod, buf));
3816 preempt_enable();
3817 if (last_unloaded_module[0])
3818 printk(" [last unloaded: %s]", last_unloaded_module);
3819 printk("\n");
3822 #ifdef CONFIG_MODVERSIONS
3823 /* Generate the signature for all relevant module structures here.
3824 * If these change, we don't want to try to parse the module. */
3825 void module_layout(struct module *mod,
3826 struct modversion_info *ver,
3827 struct kernel_param *kp,
3828 struct kernel_symbol *ks,
3829 struct tracepoint * const *tp)
3832 EXPORT_SYMBOL(module_layout);
3833 #endif