2 Copyright (C) 2002 Richard Henderson
3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/export.h>
20 #include <linux/moduleloader.h>
21 #include <linux/ftrace_event.h>
22 #include <linux/init.h>
23 #include <linux/kallsyms.h>
24 #include <linux/file.h>
26 #include <linux/sysfs.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/elf.h>
31 #include <linux/proc_fs.h>
32 #include <linux/security.h>
33 #include <linux/seq_file.h>
34 #include <linux/syscalls.h>
35 #include <linux/fcntl.h>
36 #include <linux/rcupdate.h>
37 #include <linux/capability.h>
38 #include <linux/cpu.h>
39 #include <linux/moduleparam.h>
40 #include <linux/errno.h>
41 #include <linux/err.h>
42 #include <linux/vermagic.h>
43 #include <linux/notifier.h>
44 #include <linux/sched.h>
45 #include <linux/stop_machine.h>
46 #include <linux/device.h>
47 #include <linux/string.h>
48 #include <linux/mutex.h>
49 #include <linux/rculist.h>
50 #include <asm/uaccess.h>
51 #include <asm/cacheflush.h>
52 #include <asm/mmu_context.h>
53 #include <linux/license.h>
54 #include <asm/sections.h>
55 #include <linux/tracepoint.h>
56 #include <linux/ftrace.h>
57 #include <linux/async.h>
58 #include <linux/percpu.h>
59 #include <linux/kmemleak.h>
60 #include <linux/jump_label.h>
61 #include <linux/pfn.h>
62 #include <linux/bsearch.h>
63 #include <uapi/linux/module.h>
64 #include "module-internal.h"
66 #define CREATE_TRACE_POINTS
67 #include <trace/events/module.h>
69 #ifndef ARCH_SHF_SMALL
70 #define ARCH_SHF_SMALL 0
74 * Modules' sections will be aligned on page boundaries
75 * to ensure complete separation of code and data, but
76 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
78 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
79 # define debug_align(X) ALIGN(X, PAGE_SIZE)
81 # define debug_align(X) (X)
85 * Given BASE and SIZE this macro calculates the number of pages the
86 * memory regions occupies
88 #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \
89 (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \
90 PFN_DOWN((unsigned long)BASE) + 1) \
93 /* If this is set, the section belongs in the init part of the module */
94 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
98 * 1) List of modules (also safely readable with preempt_disable),
99 * 2) module_use links,
100 * 3) module_addr_min/module_addr_max.
101 * (delete uses stop_machine/add uses RCU list operations). */
102 DEFINE_MUTEX(module_mutex
);
103 EXPORT_SYMBOL_GPL(module_mutex
);
104 static LIST_HEAD(modules
);
105 #ifdef CONFIG_KGDB_KDB
106 struct list_head
*kdb_modules
= &modules
; /* kdb needs the list of modules */
107 #endif /* CONFIG_KGDB_KDB */
109 #ifdef CONFIG_MODULE_SIG
110 #ifdef CONFIG_MODULE_SIG_FORCE
111 static bool sig_enforce
= true;
113 static bool sig_enforce
= false;
115 static int param_set_bool_enable_only(const char *val
,
116 const struct kernel_param
*kp
)
120 struct kernel_param dummy_kp
= *kp
;
122 dummy_kp
.arg
= &test
;
124 err
= param_set_bool(val
, &dummy_kp
);
128 /* Don't let them unset it once it's set! */
129 if (!test
&& sig_enforce
)
137 static const struct kernel_param_ops param_ops_bool_enable_only
= {
138 .flags
= KERNEL_PARAM_OPS_FL_NOARG
,
139 .set
= param_set_bool_enable_only
,
140 .get
= param_get_bool
,
142 #define param_check_bool_enable_only param_check_bool
144 module_param(sig_enforce
, bool_enable_only
, 0644);
145 #endif /* !CONFIG_MODULE_SIG_FORCE */
146 #endif /* CONFIG_MODULE_SIG */
148 /* Block module loading/unloading? */
149 int modules_disabled
= 0;
150 core_param(nomodule
, modules_disabled
, bint
, 0);
152 /* Waiting for a module to finish initializing? */
153 static DECLARE_WAIT_QUEUE_HEAD(module_wq
);
155 static BLOCKING_NOTIFIER_HEAD(module_notify_list
);
157 /* Bounds of module allocation, for speeding __module_address.
158 * Protected by module_mutex. */
159 static unsigned long module_addr_min
= -1UL, module_addr_max
= 0;
161 int register_module_notifier(struct notifier_block
* nb
)
163 return blocking_notifier_chain_register(&module_notify_list
, nb
);
165 EXPORT_SYMBOL(register_module_notifier
);
167 int unregister_module_notifier(struct notifier_block
* nb
)
169 return blocking_notifier_chain_unregister(&module_notify_list
, nb
);
171 EXPORT_SYMBOL(unregister_module_notifier
);
177 char *secstrings
, *strtab
;
178 unsigned long symoffs
, stroffs
;
179 struct _ddebug
*debug
;
180 unsigned int num_debug
;
182 #ifdef CONFIG_KALLSYMS
183 unsigned long mod_kallsyms_init_off
;
186 unsigned int sym
, str
, mod
, vers
, info
, pcpu
;
190 /* We require a truly strong try_module_get(): 0 means failure due to
191 ongoing or failed initialization etc. */
192 static inline int strong_try_module_get(struct module
*mod
)
194 BUG_ON(mod
&& mod
->state
== MODULE_STATE_UNFORMED
);
195 if (mod
&& mod
->state
== MODULE_STATE_COMING
)
197 if (try_module_get(mod
))
203 static inline void add_taint_module(struct module
*mod
, unsigned flag
,
204 enum lockdep_ok lockdep_ok
)
206 add_taint(flag
, lockdep_ok
);
207 mod
->taints
|= (1U << flag
);
211 * A thread that wants to hold a reference to a module only while it
212 * is running can call this to safely exit. nfsd and lockd use this.
214 void __module_put_and_exit(struct module
*mod
, long code
)
219 EXPORT_SYMBOL(__module_put_and_exit
);
221 /* Find a module section: 0 means not found. */
222 static unsigned int find_sec(const struct load_info
*info
, const char *name
)
226 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
227 Elf_Shdr
*shdr
= &info
->sechdrs
[i
];
228 /* Alloc bit cleared means "ignore it." */
229 if ((shdr
->sh_flags
& SHF_ALLOC
)
230 && strcmp(info
->secstrings
+ shdr
->sh_name
, name
) == 0)
236 /* Find a module section, or NULL. */
237 static void *section_addr(const struct load_info
*info
, const char *name
)
239 /* Section 0 has sh_addr 0. */
240 return (void *)info
->sechdrs
[find_sec(info
, name
)].sh_addr
;
243 /* Find a module section, or NULL. Fill in number of "objects" in section. */
244 static void *section_objs(const struct load_info
*info
,
249 unsigned int sec
= find_sec(info
, name
);
251 /* Section 0 has sh_addr 0 and sh_size 0. */
252 *num
= info
->sechdrs
[sec
].sh_size
/ object_size
;
253 return (void *)info
->sechdrs
[sec
].sh_addr
;
256 /* Provided by the linker */
257 extern const struct kernel_symbol __start___ksymtab
[];
258 extern const struct kernel_symbol __stop___ksymtab
[];
259 extern const struct kernel_symbol __start___ksymtab_gpl
[];
260 extern const struct kernel_symbol __stop___ksymtab_gpl
[];
261 extern const struct kernel_symbol __start___ksymtab_gpl_future
[];
262 extern const struct kernel_symbol __stop___ksymtab_gpl_future
[];
263 extern const unsigned long __start___kcrctab
[];
264 extern const unsigned long __start___kcrctab_gpl
[];
265 extern const unsigned long __start___kcrctab_gpl_future
[];
266 #ifdef CONFIG_UNUSED_SYMBOLS
267 extern const struct kernel_symbol __start___ksymtab_unused
[];
268 extern const struct kernel_symbol __stop___ksymtab_unused
[];
269 extern const struct kernel_symbol __start___ksymtab_unused_gpl
[];
270 extern const struct kernel_symbol __stop___ksymtab_unused_gpl
[];
271 extern const unsigned long __start___kcrctab_unused
[];
272 extern const unsigned long __start___kcrctab_unused_gpl
[];
275 #ifndef CONFIG_MODVERSIONS
276 #define symversion(base, idx) NULL
278 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
281 static bool each_symbol_in_section(const struct symsearch
*arr
,
282 unsigned int arrsize
,
283 struct module
*owner
,
284 bool (*fn
)(const struct symsearch
*syms
,
285 struct module
*owner
,
291 for (j
= 0; j
< arrsize
; j
++) {
292 if (fn(&arr
[j
], owner
, data
))
299 /* Returns true as soon as fn returns true, otherwise false. */
300 bool each_symbol_section(bool (*fn
)(const struct symsearch
*arr
,
301 struct module
*owner
,
306 static const struct symsearch arr
[] = {
307 { __start___ksymtab
, __stop___ksymtab
, __start___kcrctab
,
308 NOT_GPL_ONLY
, false },
309 { __start___ksymtab_gpl
, __stop___ksymtab_gpl
,
310 __start___kcrctab_gpl
,
312 { __start___ksymtab_gpl_future
, __stop___ksymtab_gpl_future
,
313 __start___kcrctab_gpl_future
,
314 WILL_BE_GPL_ONLY
, false },
315 #ifdef CONFIG_UNUSED_SYMBOLS
316 { __start___ksymtab_unused
, __stop___ksymtab_unused
,
317 __start___kcrctab_unused
,
318 NOT_GPL_ONLY
, true },
319 { __start___ksymtab_unused_gpl
, __stop___ksymtab_unused_gpl
,
320 __start___kcrctab_unused_gpl
,
325 if (each_symbol_in_section(arr
, ARRAY_SIZE(arr
), NULL
, fn
, data
))
328 list_for_each_entry_rcu(mod
, &modules
, list
) {
329 struct symsearch arr
[] = {
330 { mod
->syms
, mod
->syms
+ mod
->num_syms
, mod
->crcs
,
331 NOT_GPL_ONLY
, false },
332 { mod
->gpl_syms
, mod
->gpl_syms
+ mod
->num_gpl_syms
,
335 { mod
->gpl_future_syms
,
336 mod
->gpl_future_syms
+ mod
->num_gpl_future_syms
,
337 mod
->gpl_future_crcs
,
338 WILL_BE_GPL_ONLY
, false },
339 #ifdef CONFIG_UNUSED_SYMBOLS
341 mod
->unused_syms
+ mod
->num_unused_syms
,
343 NOT_GPL_ONLY
, true },
344 { mod
->unused_gpl_syms
,
345 mod
->unused_gpl_syms
+ mod
->num_unused_gpl_syms
,
346 mod
->unused_gpl_crcs
,
351 if (mod
->state
== MODULE_STATE_UNFORMED
)
354 if (each_symbol_in_section(arr
, ARRAY_SIZE(arr
), mod
, fn
, data
))
359 EXPORT_SYMBOL_GPL(each_symbol_section
);
361 struct find_symbol_arg
{
368 struct module
*owner
;
369 const unsigned long *crc
;
370 const struct kernel_symbol
*sym
;
373 static bool check_symbol(const struct symsearch
*syms
,
374 struct module
*owner
,
375 unsigned int symnum
, void *data
)
377 struct find_symbol_arg
*fsa
= data
;
380 if (syms
->licence
== GPL_ONLY
)
382 if (syms
->licence
== WILL_BE_GPL_ONLY
&& fsa
->warn
) {
383 pr_warn("Symbol %s is being used by a non-GPL module, "
384 "which will not be allowed in the future\n",
389 #ifdef CONFIG_UNUSED_SYMBOLS
390 if (syms
->unused
&& fsa
->warn
) {
391 pr_warn("Symbol %s is marked as UNUSED, however this module is "
392 "using it.\n", fsa
->name
);
393 pr_warn("This symbol will go away in the future.\n");
394 pr_warn("Please evalute if this is the right api to use and if "
395 "it really is, submit a report the linux kernel "
396 "mailinglist together with submitting your code for "
402 fsa
->crc
= symversion(syms
->crcs
, symnum
);
403 fsa
->sym
= &syms
->start
[symnum
];
407 static int cmp_name(const void *va
, const void *vb
)
410 const struct kernel_symbol
*b
;
412 return strcmp(a
, b
->name
);
415 static bool find_symbol_in_section(const struct symsearch
*syms
,
416 struct module
*owner
,
419 struct find_symbol_arg
*fsa
= data
;
420 struct kernel_symbol
*sym
;
422 sym
= bsearch(fsa
->name
, syms
->start
, syms
->stop
- syms
->start
,
423 sizeof(struct kernel_symbol
), cmp_name
);
425 if (sym
!= NULL
&& check_symbol(syms
, owner
, sym
- syms
->start
, data
))
431 /* Find a symbol and return it, along with, (optional) crc and
432 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
433 const struct kernel_symbol
*find_symbol(const char *name
,
434 struct module
**owner
,
435 const unsigned long **crc
,
439 struct find_symbol_arg fsa
;
445 if (each_symbol_section(find_symbol_in_section
, &fsa
)) {
453 pr_debug("Failed to find symbol %s\n", name
);
456 EXPORT_SYMBOL_GPL(find_symbol
);
458 /* Search for module by name: must hold module_mutex. */
459 static struct module
*find_module_all(const char *name
, size_t len
,
464 list_for_each_entry(mod
, &modules
, list
) {
465 if (!even_unformed
&& mod
->state
== MODULE_STATE_UNFORMED
)
467 if (strlen(mod
->name
) == len
&& !memcmp(mod
->name
, name
, len
))
473 struct module
*find_module(const char *name
)
475 return find_module_all(name
, strlen(name
), false);
477 EXPORT_SYMBOL_GPL(find_module
);
481 static inline void __percpu
*mod_percpu(struct module
*mod
)
486 static int percpu_modalloc(struct module
*mod
, struct load_info
*info
)
488 Elf_Shdr
*pcpusec
= &info
->sechdrs
[info
->index
.pcpu
];
489 unsigned long align
= pcpusec
->sh_addralign
;
491 if (!pcpusec
->sh_size
)
494 if (align
> PAGE_SIZE
) {
495 pr_warn("%s: per-cpu alignment %li > %li\n",
496 mod
->name
, align
, PAGE_SIZE
);
500 mod
->percpu
= __alloc_reserved_percpu(pcpusec
->sh_size
, align
);
502 pr_warn("%s: Could not allocate %lu bytes percpu data\n",
503 mod
->name
, (unsigned long)pcpusec
->sh_size
);
506 mod
->percpu_size
= pcpusec
->sh_size
;
510 static void percpu_modfree(struct module
*mod
)
512 free_percpu(mod
->percpu
);
515 static unsigned int find_pcpusec(struct load_info
*info
)
517 return find_sec(info
, ".data..percpu");
520 static void percpu_modcopy(struct module
*mod
,
521 const void *from
, unsigned long size
)
525 for_each_possible_cpu(cpu
)
526 memcpy(per_cpu_ptr(mod
->percpu
, cpu
), from
, size
);
530 * is_module_percpu_address - test whether address is from module static percpu
531 * @addr: address to test
533 * Test whether @addr belongs to module static percpu area.
536 * %true if @addr is from module static percpu area
538 bool is_module_percpu_address(unsigned long addr
)
545 list_for_each_entry_rcu(mod
, &modules
, list
) {
546 if (mod
->state
== MODULE_STATE_UNFORMED
)
548 if (!mod
->percpu_size
)
550 for_each_possible_cpu(cpu
) {
551 void *start
= per_cpu_ptr(mod
->percpu
, cpu
);
553 if ((void *)addr
>= start
&&
554 (void *)addr
< start
+ mod
->percpu_size
) {
565 #else /* ... !CONFIG_SMP */
567 static inline void __percpu
*mod_percpu(struct module
*mod
)
571 static int percpu_modalloc(struct module
*mod
, struct load_info
*info
)
573 /* UP modules shouldn't have this section: ENOMEM isn't quite right */
574 if (info
->sechdrs
[info
->index
.pcpu
].sh_size
!= 0)
578 static inline void percpu_modfree(struct module
*mod
)
581 static unsigned int find_pcpusec(struct load_info
*info
)
585 static inline void percpu_modcopy(struct module
*mod
,
586 const void *from
, unsigned long size
)
588 /* pcpusec should be 0, and size of that section should be 0. */
591 bool is_module_percpu_address(unsigned long addr
)
596 #endif /* CONFIG_SMP */
598 #define MODINFO_ATTR(field) \
599 static void setup_modinfo_##field(struct module *mod, const char *s) \
601 mod->field = kstrdup(s, GFP_KERNEL); \
603 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
604 struct module_kobject *mk, char *buffer) \
606 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
608 static int modinfo_##field##_exists(struct module *mod) \
610 return mod->field != NULL; \
612 static void free_modinfo_##field(struct module *mod) \
617 static struct module_attribute modinfo_##field = { \
618 .attr = { .name = __stringify(field), .mode = 0444 }, \
619 .show = show_modinfo_##field, \
620 .setup = setup_modinfo_##field, \
621 .test = modinfo_##field##_exists, \
622 .free = free_modinfo_##field, \
625 MODINFO_ATTR(version
);
626 MODINFO_ATTR(srcversion
);
628 static char last_unloaded_module
[MODULE_NAME_LEN
+1];
630 #ifdef CONFIG_MODULE_UNLOAD
632 EXPORT_TRACEPOINT_SYMBOL(module_get
);
634 /* Init the unload section of the module. */
635 static int module_unload_init(struct module
*mod
)
637 mod
->refptr
= alloc_percpu(struct module_ref
);
641 INIT_LIST_HEAD(&mod
->source_list
);
642 INIT_LIST_HEAD(&mod
->target_list
);
644 /* Hold reference count during initialization. */
645 raw_cpu_write(mod
->refptr
->incs
, 1);
650 /* Does a already use b? */
651 static int already_uses(struct module
*a
, struct module
*b
)
653 struct module_use
*use
;
655 list_for_each_entry(use
, &b
->source_list
, source_list
) {
656 if (use
->source
== a
) {
657 pr_debug("%s uses %s!\n", a
->name
, b
->name
);
661 pr_debug("%s does not use %s!\n", a
->name
, b
->name
);
667 * - we add 'a' as a "source", 'b' as a "target" of module use
668 * - the module_use is added to the list of 'b' sources (so
669 * 'b' can walk the list to see who sourced them), and of 'a'
670 * targets (so 'a' can see what modules it targets).
672 static int add_module_usage(struct module
*a
, struct module
*b
)
674 struct module_use
*use
;
676 pr_debug("Allocating new usage for %s.\n", a
->name
);
677 use
= kmalloc(sizeof(*use
), GFP_ATOMIC
);
679 pr_warn("%s: out of memory loading\n", a
->name
);
685 list_add(&use
->source_list
, &b
->source_list
);
686 list_add(&use
->target_list
, &a
->target_list
);
690 /* Module a uses b: caller needs module_mutex() */
691 int ref_module(struct module
*a
, struct module
*b
)
695 if (b
== NULL
|| already_uses(a
, b
))
698 /* If module isn't available, we fail. */
699 err
= strong_try_module_get(b
);
703 err
= add_module_usage(a
, b
);
710 EXPORT_SYMBOL_GPL(ref_module
);
712 /* Clear the unload stuff of the module. */
713 static void module_unload_free(struct module
*mod
)
715 struct module_use
*use
, *tmp
;
717 mutex_lock(&module_mutex
);
718 list_for_each_entry_safe(use
, tmp
, &mod
->target_list
, target_list
) {
719 struct module
*i
= use
->target
;
720 pr_debug("%s unusing %s\n", mod
->name
, i
->name
);
722 list_del(&use
->source_list
);
723 list_del(&use
->target_list
);
726 mutex_unlock(&module_mutex
);
728 free_percpu(mod
->refptr
);
731 #ifdef CONFIG_MODULE_FORCE_UNLOAD
732 static inline int try_force_unload(unsigned int flags
)
734 int ret
= (flags
& O_TRUNC
);
736 add_taint(TAINT_FORCED_RMMOD
, LOCKDEP_NOW_UNRELIABLE
);
740 static inline int try_force_unload(unsigned int flags
)
744 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
753 /* Whole machine is stopped with interrupts off when this runs. */
754 static int __try_stop_module(void *_sref
)
756 struct stopref
*sref
= _sref
;
758 /* If it's not unused, quit unless we're forcing. */
759 if (module_refcount(sref
->mod
) != 0) {
760 if (!(*sref
->forced
= try_force_unload(sref
->flags
)))
764 /* Mark it as dying. */
765 sref
->mod
->state
= MODULE_STATE_GOING
;
769 static int try_stop_module(struct module
*mod
, int flags
, int *forced
)
771 struct stopref sref
= { mod
, flags
, forced
};
773 return stop_machine(__try_stop_module
, &sref
, NULL
);
776 unsigned long module_refcount(struct module
*mod
)
778 unsigned long incs
= 0, decs
= 0;
781 for_each_possible_cpu(cpu
)
782 decs
+= per_cpu_ptr(mod
->refptr
, cpu
)->decs
;
784 * ensure the incs are added up after the decs.
785 * module_put ensures incs are visible before decs with smp_wmb.
787 * This 2-count scheme avoids the situation where the refcount
788 * for CPU0 is read, then CPU0 increments the module refcount,
789 * then CPU1 drops that refcount, then the refcount for CPU1 is
790 * read. We would record a decrement but not its corresponding
791 * increment so we would see a low count (disaster).
793 * Rare situation? But module_refcount can be preempted, and we
794 * might be tallying up 4096+ CPUs. So it is not impossible.
797 for_each_possible_cpu(cpu
)
798 incs
+= per_cpu_ptr(mod
->refptr
, cpu
)->incs
;
801 EXPORT_SYMBOL(module_refcount
);
803 /* This exists whether we can unload or not */
804 static void free_module(struct module
*mod
);
806 SYSCALL_DEFINE2(delete_module
, const char __user
*, name_user
,
810 char name
[MODULE_NAME_LEN
];
813 if (!capable(CAP_SYS_MODULE
) || modules_disabled
)
816 if (strncpy_from_user(name
, name_user
, MODULE_NAME_LEN
-1) < 0)
818 name
[MODULE_NAME_LEN
-1] = '\0';
820 if (mutex_lock_interruptible(&module_mutex
) != 0)
823 mod
= find_module(name
);
829 if (!list_empty(&mod
->source_list
)) {
830 /* Other modules depend on us: get rid of them first. */
835 /* Doing init or already dying? */
836 if (mod
->state
!= MODULE_STATE_LIVE
) {
837 /* FIXME: if (force), slam module count damn the torpedoes */
838 pr_debug("%s already dying\n", mod
->name
);
843 /* If it has an init func, it must have an exit func to unload */
844 if (mod
->init
&& !mod
->exit
) {
845 forced
= try_force_unload(flags
);
847 /* This module can't be removed */
853 /* Stop the machine so refcounts can't move and disable module. */
854 ret
= try_stop_module(mod
, flags
, &forced
);
858 mutex_unlock(&module_mutex
);
859 /* Final destruction now no one is using it. */
860 if (mod
->exit
!= NULL
)
862 blocking_notifier_call_chain(&module_notify_list
,
863 MODULE_STATE_GOING
, mod
);
864 async_synchronize_full();
866 /* Store the name of the last unloaded module for diagnostic purposes */
867 strlcpy(last_unloaded_module
, mod
->name
, sizeof(last_unloaded_module
));
872 mutex_unlock(&module_mutex
);
876 static inline void print_unload_info(struct seq_file
*m
, struct module
*mod
)
878 struct module_use
*use
;
879 int printed_something
= 0;
881 seq_printf(m
, " %lu ", module_refcount(mod
));
883 /* Always include a trailing , so userspace can differentiate
884 between this and the old multi-field proc format. */
885 list_for_each_entry(use
, &mod
->source_list
, source_list
) {
886 printed_something
= 1;
887 seq_printf(m
, "%s,", use
->source
->name
);
890 if (mod
->init
!= NULL
&& mod
->exit
== NULL
) {
891 printed_something
= 1;
892 seq_printf(m
, "[permanent],");
895 if (!printed_something
)
899 void __symbol_put(const char *symbol
)
901 struct module
*owner
;
904 if (!find_symbol(symbol
, &owner
, NULL
, true, false))
909 EXPORT_SYMBOL(__symbol_put
);
911 /* Note this assumes addr is a function, which it currently always is. */
912 void symbol_put_addr(void *addr
)
914 struct module
*modaddr
;
915 unsigned long a
= (unsigned long)dereference_function_descriptor(addr
);
917 if (core_kernel_text(a
))
921 * Even though we hold a reference on the module; we still need to
922 * disable preemption in order to safely traverse the data structure.
925 modaddr
= __module_text_address(a
);
930 EXPORT_SYMBOL_GPL(symbol_put_addr
);
932 static ssize_t
show_refcnt(struct module_attribute
*mattr
,
933 struct module_kobject
*mk
, char *buffer
)
935 return sprintf(buffer
, "%lu\n", module_refcount(mk
->mod
));
938 static struct module_attribute modinfo_refcnt
=
939 __ATTR(refcnt
, 0444, show_refcnt
, NULL
);
941 void __module_get(struct module
*module
)
945 __this_cpu_inc(module
->refptr
->incs
);
946 trace_module_get(module
, _RET_IP_
);
950 EXPORT_SYMBOL(__module_get
);
952 bool try_module_get(struct module
*module
)
959 if (likely(module_is_live(module
))) {
960 __this_cpu_inc(module
->refptr
->incs
);
961 trace_module_get(module
, _RET_IP_
);
969 EXPORT_SYMBOL(try_module_get
);
971 void module_put(struct module
*module
)
975 smp_wmb(); /* see comment in module_refcount */
976 __this_cpu_inc(module
->refptr
->decs
);
978 trace_module_put(module
, _RET_IP_
);
982 EXPORT_SYMBOL(module_put
);
984 #else /* !CONFIG_MODULE_UNLOAD */
985 static inline void print_unload_info(struct seq_file
*m
, struct module
*mod
)
987 /* We don't know the usage count, or what modules are using. */
988 seq_printf(m
, " - -");
991 static inline void module_unload_free(struct module
*mod
)
995 int ref_module(struct module
*a
, struct module
*b
)
997 return strong_try_module_get(b
);
999 EXPORT_SYMBOL_GPL(ref_module
);
1001 static inline int module_unload_init(struct module
*mod
)
1005 #endif /* CONFIG_MODULE_UNLOAD */
1007 static size_t module_flags_taint(struct module
*mod
, char *buf
)
1011 if (mod
->taints
& (1 << TAINT_PROPRIETARY_MODULE
))
1013 if (mod
->taints
& (1 << TAINT_OOT_MODULE
))
1015 if (mod
->taints
& (1 << TAINT_FORCED_MODULE
))
1017 if (mod
->taints
& (1 << TAINT_CRAP
))
1019 if (mod
->taints
& (1 << TAINT_UNSIGNED_MODULE
))
1022 * TAINT_FORCED_RMMOD: could be added.
1023 * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
1029 static ssize_t
show_initstate(struct module_attribute
*mattr
,
1030 struct module_kobject
*mk
, char *buffer
)
1032 const char *state
= "unknown";
1034 switch (mk
->mod
->state
) {
1035 case MODULE_STATE_LIVE
:
1038 case MODULE_STATE_COMING
:
1041 case MODULE_STATE_GOING
:
1047 return sprintf(buffer
, "%s\n", state
);
1050 static struct module_attribute modinfo_initstate
=
1051 __ATTR(initstate
, 0444, show_initstate
, NULL
);
1053 static ssize_t
store_uevent(struct module_attribute
*mattr
,
1054 struct module_kobject
*mk
,
1055 const char *buffer
, size_t count
)
1057 enum kobject_action action
;
1059 if (kobject_action_type(buffer
, count
, &action
) == 0)
1060 kobject_uevent(&mk
->kobj
, action
);
1064 struct module_attribute module_uevent
=
1065 __ATTR(uevent
, 0200, NULL
, store_uevent
);
1067 static ssize_t
show_coresize(struct module_attribute
*mattr
,
1068 struct module_kobject
*mk
, char *buffer
)
1070 return sprintf(buffer
, "%u\n", mk
->mod
->core_size
);
1073 static struct module_attribute modinfo_coresize
=
1074 __ATTR(coresize
, 0444, show_coresize
, NULL
);
1076 static ssize_t
show_initsize(struct module_attribute
*mattr
,
1077 struct module_kobject
*mk
, char *buffer
)
1079 return sprintf(buffer
, "%u\n", mk
->mod
->init_size
);
1082 static struct module_attribute modinfo_initsize
=
1083 __ATTR(initsize
, 0444, show_initsize
, NULL
);
1085 static ssize_t
show_taint(struct module_attribute
*mattr
,
1086 struct module_kobject
*mk
, char *buffer
)
1090 l
= module_flags_taint(mk
->mod
, buffer
);
1095 static struct module_attribute modinfo_taint
=
1096 __ATTR(taint
, 0444, show_taint
, NULL
);
1098 static struct module_attribute
*modinfo_attrs
[] = {
1101 &modinfo_srcversion
,
1106 #ifdef CONFIG_MODULE_UNLOAD
1112 static const char vermagic
[] = VERMAGIC_STRING
;
1114 static int try_to_force_load(struct module
*mod
, const char *reason
)
1116 #ifdef CONFIG_MODULE_FORCE_LOAD
1117 if (!test_taint(TAINT_FORCED_MODULE
))
1118 pr_warn("%s: %s: kernel tainted.\n", mod
->name
, reason
);
1119 add_taint_module(mod
, TAINT_FORCED_MODULE
, LOCKDEP_NOW_UNRELIABLE
);
1126 #ifdef CONFIG_MODVERSIONS
1127 /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
1128 static unsigned long maybe_relocated(unsigned long crc
,
1129 const struct module
*crc_owner
)
1131 #ifdef ARCH_RELOCATES_KCRCTAB
1132 if (crc_owner
== NULL
)
1133 return crc
- (unsigned long)reloc_start
;
1138 static int check_version(Elf_Shdr
*sechdrs
,
1139 unsigned int versindex
,
1140 const char *symname
,
1142 const unsigned long *crc
,
1143 const struct module
*crc_owner
)
1145 unsigned int i
, num_versions
;
1146 struct modversion_info
*versions
;
1148 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1152 /* No versions at all? modprobe --force does this. */
1154 return try_to_force_load(mod
, symname
) == 0;
1156 versions
= (void *) sechdrs
[versindex
].sh_addr
;
1157 num_versions
= sechdrs
[versindex
].sh_size
1158 / sizeof(struct modversion_info
);
1160 for (i
= 0; i
< num_versions
; i
++) {
1161 if (strcmp(versions
[i
].name
, symname
) != 0)
1164 if (versions
[i
].crc
== maybe_relocated(*crc
, crc_owner
))
1166 pr_debug("Found checksum %lX vs module %lX\n",
1167 maybe_relocated(*crc
, crc_owner
), versions
[i
].crc
);
1171 pr_warn("%s: no symbol version for %s\n", mod
->name
, symname
);
1175 printk("%s: disagrees about version of symbol %s\n",
1176 mod
->name
, symname
);
1180 static inline int check_modstruct_version(Elf_Shdr
*sechdrs
,
1181 unsigned int versindex
,
1184 const unsigned long *crc
;
1186 /* Since this should be found in kernel (which can't be removed),
1187 * no locking is necessary. */
1188 if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout
), NULL
,
1191 return check_version(sechdrs
, versindex
,
1192 VMLINUX_SYMBOL_STR(module_layout
), mod
, crc
,
1196 /* First part is kernel version, which we ignore if module has crcs. */
1197 static inline int same_magic(const char *amagic
, const char *bmagic
,
1201 amagic
+= strcspn(amagic
, " ");
1202 bmagic
+= strcspn(bmagic
, " ");
1204 return strcmp(amagic
, bmagic
) == 0;
1207 static inline int check_version(Elf_Shdr
*sechdrs
,
1208 unsigned int versindex
,
1209 const char *symname
,
1211 const unsigned long *crc
,
1212 const struct module
*crc_owner
)
1217 static inline int check_modstruct_version(Elf_Shdr
*sechdrs
,
1218 unsigned int versindex
,
1224 static inline int same_magic(const char *amagic
, const char *bmagic
,
1227 return strcmp(amagic
, bmagic
) == 0;
1229 #endif /* CONFIG_MODVERSIONS */
1231 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1232 static const struct kernel_symbol
*resolve_symbol(struct module
*mod
,
1233 const struct load_info
*info
,
1237 struct module
*owner
;
1238 const struct kernel_symbol
*sym
;
1239 const unsigned long *crc
;
1242 mutex_lock(&module_mutex
);
1243 sym
= find_symbol(name
, &owner
, &crc
,
1244 !(mod
->taints
& (1 << TAINT_PROPRIETARY_MODULE
)), true);
1248 if (!check_version(info
->sechdrs
, info
->index
.vers
, name
, mod
, crc
,
1250 sym
= ERR_PTR(-EINVAL
);
1254 err
= ref_module(mod
, owner
);
1261 /* We must make copy under the lock if we failed to get ref. */
1262 strncpy(ownername
, module_name(owner
), MODULE_NAME_LEN
);
1264 mutex_unlock(&module_mutex
);
1268 static const struct kernel_symbol
*
1269 resolve_symbol_wait(struct module
*mod
,
1270 const struct load_info
*info
,
1273 const struct kernel_symbol
*ksym
;
1274 char owner
[MODULE_NAME_LEN
];
1276 if (wait_event_interruptible_timeout(module_wq
,
1277 !IS_ERR(ksym
= resolve_symbol(mod
, info
, name
, owner
))
1278 || PTR_ERR(ksym
) != -EBUSY
,
1280 pr_warn("%s: gave up waiting for init of module %s.\n",
1287 * /sys/module/foo/sections stuff
1288 * J. Corbet <corbet@lwn.net>
1292 #ifdef CONFIG_KALLSYMS
1293 static inline bool sect_empty(const Elf_Shdr
*sect
)
1295 return !(sect
->sh_flags
& SHF_ALLOC
) || sect
->sh_size
== 0;
1298 struct module_sect_attr
1300 struct module_attribute mattr
;
1302 unsigned long address
;
1305 struct module_sect_attrs
1307 struct attribute_group grp
;
1308 unsigned int nsections
;
1309 struct module_sect_attr attrs
[0];
1312 static ssize_t
module_sect_show(struct module_attribute
*mattr
,
1313 struct module_kobject
*mk
, char *buf
)
1315 struct module_sect_attr
*sattr
=
1316 container_of(mattr
, struct module_sect_attr
, mattr
);
1317 return sprintf(buf
, "0x%pK\n", (void *)sattr
->address
);
1320 static void free_sect_attrs(struct module_sect_attrs
*sect_attrs
)
1322 unsigned int section
;
1324 for (section
= 0; section
< sect_attrs
->nsections
; section
++)
1325 kfree(sect_attrs
->attrs
[section
].name
);
1329 static void add_sect_attrs(struct module
*mod
, const struct load_info
*info
)
1331 unsigned int nloaded
= 0, i
, size
[2];
1332 struct module_sect_attrs
*sect_attrs
;
1333 struct module_sect_attr
*sattr
;
1334 struct attribute
**gattr
;
1336 /* Count loaded sections and allocate structures */
1337 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++)
1338 if (!sect_empty(&info
->sechdrs
[i
]))
1340 size
[0] = ALIGN(sizeof(*sect_attrs
)
1341 + nloaded
* sizeof(sect_attrs
->attrs
[0]),
1342 sizeof(sect_attrs
->grp
.attrs
[0]));
1343 size
[1] = (nloaded
+ 1) * sizeof(sect_attrs
->grp
.attrs
[0]);
1344 sect_attrs
= kzalloc(size
[0] + size
[1], GFP_KERNEL
);
1345 if (sect_attrs
== NULL
)
1348 /* Setup section attributes. */
1349 sect_attrs
->grp
.name
= "sections";
1350 sect_attrs
->grp
.attrs
= (void *)sect_attrs
+ size
[0];
1352 sect_attrs
->nsections
= 0;
1353 sattr
= §_attrs
->attrs
[0];
1354 gattr
= §_attrs
->grp
.attrs
[0];
1355 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++) {
1356 Elf_Shdr
*sec
= &info
->sechdrs
[i
];
1357 if (sect_empty(sec
))
1359 sattr
->address
= sec
->sh_addr
;
1360 sattr
->name
= kstrdup(info
->secstrings
+ sec
->sh_name
,
1362 if (sattr
->name
== NULL
)
1364 sect_attrs
->nsections
++;
1365 sysfs_attr_init(&sattr
->mattr
.attr
);
1366 sattr
->mattr
.show
= module_sect_show
;
1367 sattr
->mattr
.store
= NULL
;
1368 sattr
->mattr
.attr
.name
= sattr
->name
;
1369 sattr
->mattr
.attr
.mode
= S_IRUGO
;
1370 *(gattr
++) = &(sattr
++)->mattr
.attr
;
1374 if (sysfs_create_group(&mod
->mkobj
.kobj
, §_attrs
->grp
))
1377 mod
->sect_attrs
= sect_attrs
;
1380 free_sect_attrs(sect_attrs
);
1383 static void remove_sect_attrs(struct module
*mod
)
1385 if (mod
->sect_attrs
) {
1386 sysfs_remove_group(&mod
->mkobj
.kobj
,
1387 &mod
->sect_attrs
->grp
);
1388 /* We are positive that no one is using any sect attrs
1389 * at this point. Deallocate immediately. */
1390 free_sect_attrs(mod
->sect_attrs
);
1391 mod
->sect_attrs
= NULL
;
1396 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1399 struct module_notes_attrs
{
1400 struct kobject
*dir
;
1402 struct bin_attribute attrs
[0];
1405 static ssize_t
module_notes_read(struct file
*filp
, struct kobject
*kobj
,
1406 struct bin_attribute
*bin_attr
,
1407 char *buf
, loff_t pos
, size_t count
)
1410 * The caller checked the pos and count against our size.
1412 memcpy(buf
, bin_attr
->private + pos
, count
);
1416 static void free_notes_attrs(struct module_notes_attrs
*notes_attrs
,
1419 if (notes_attrs
->dir
) {
1421 sysfs_remove_bin_file(notes_attrs
->dir
,
1422 ¬es_attrs
->attrs
[i
]);
1423 kobject_put(notes_attrs
->dir
);
1428 static void add_notes_attrs(struct module
*mod
, const struct load_info
*info
)
1430 unsigned int notes
, loaded
, i
;
1431 struct module_notes_attrs
*notes_attrs
;
1432 struct bin_attribute
*nattr
;
1434 /* failed to create section attributes, so can't create notes */
1435 if (!mod
->sect_attrs
)
1438 /* Count notes sections and allocate structures. */
1440 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++)
1441 if (!sect_empty(&info
->sechdrs
[i
]) &&
1442 (info
->sechdrs
[i
].sh_type
== SHT_NOTE
))
1448 notes_attrs
= kzalloc(sizeof(*notes_attrs
)
1449 + notes
* sizeof(notes_attrs
->attrs
[0]),
1451 if (notes_attrs
== NULL
)
1454 notes_attrs
->notes
= notes
;
1455 nattr
= ¬es_attrs
->attrs
[0];
1456 for (loaded
= i
= 0; i
< info
->hdr
->e_shnum
; ++i
) {
1457 if (sect_empty(&info
->sechdrs
[i
]))
1459 if (info
->sechdrs
[i
].sh_type
== SHT_NOTE
) {
1460 sysfs_bin_attr_init(nattr
);
1461 nattr
->attr
.name
= mod
->sect_attrs
->attrs
[loaded
].name
;
1462 nattr
->attr
.mode
= S_IRUGO
;
1463 nattr
->size
= info
->sechdrs
[i
].sh_size
;
1464 nattr
->private = (void *) info
->sechdrs
[i
].sh_addr
;
1465 nattr
->read
= module_notes_read
;
1471 notes_attrs
->dir
= kobject_create_and_add("notes", &mod
->mkobj
.kobj
);
1472 if (!notes_attrs
->dir
)
1475 for (i
= 0; i
< notes
; ++i
)
1476 if (sysfs_create_bin_file(notes_attrs
->dir
,
1477 ¬es_attrs
->attrs
[i
]))
1480 mod
->notes_attrs
= notes_attrs
;
1484 free_notes_attrs(notes_attrs
, i
);
1487 static void remove_notes_attrs(struct module
*mod
)
1489 if (mod
->notes_attrs
)
1490 free_notes_attrs(mod
->notes_attrs
, mod
->notes_attrs
->notes
);
1495 static inline void add_sect_attrs(struct module
*mod
,
1496 const struct load_info
*info
)
1500 static inline void remove_sect_attrs(struct module
*mod
)
1504 static inline void add_notes_attrs(struct module
*mod
,
1505 const struct load_info
*info
)
1509 static inline void remove_notes_attrs(struct module
*mod
)
1512 #endif /* CONFIG_KALLSYMS */
1514 static void add_usage_links(struct module
*mod
)
1516 #ifdef CONFIG_MODULE_UNLOAD
1517 struct module_use
*use
;
1520 mutex_lock(&module_mutex
);
1521 list_for_each_entry(use
, &mod
->target_list
, target_list
) {
1522 nowarn
= sysfs_create_link(use
->target
->holders_dir
,
1523 &mod
->mkobj
.kobj
, mod
->name
);
1525 mutex_unlock(&module_mutex
);
1529 static void del_usage_links(struct module
*mod
)
1531 #ifdef CONFIG_MODULE_UNLOAD
1532 struct module_use
*use
;
1534 mutex_lock(&module_mutex
);
1535 list_for_each_entry(use
, &mod
->target_list
, target_list
)
1536 sysfs_remove_link(use
->target
->holders_dir
, mod
->name
);
1537 mutex_unlock(&module_mutex
);
1541 static int module_add_modinfo_attrs(struct module
*mod
)
1543 struct module_attribute
*attr
;
1544 struct module_attribute
*temp_attr
;
1548 mod
->modinfo_attrs
= kzalloc((sizeof(struct module_attribute
) *
1549 (ARRAY_SIZE(modinfo_attrs
) + 1)),
1551 if (!mod
->modinfo_attrs
)
1554 temp_attr
= mod
->modinfo_attrs
;
1555 for (i
= 0; (attr
= modinfo_attrs
[i
]) && !error
; i
++) {
1557 (attr
->test
&& attr
->test(mod
))) {
1558 memcpy(temp_attr
, attr
, sizeof(*temp_attr
));
1559 sysfs_attr_init(&temp_attr
->attr
);
1560 error
= sysfs_create_file(&mod
->mkobj
.kobj
,&temp_attr
->attr
);
1567 static void module_remove_modinfo_attrs(struct module
*mod
)
1569 struct module_attribute
*attr
;
1572 for (i
= 0; (attr
= &mod
->modinfo_attrs
[i
]); i
++) {
1573 /* pick a field to test for end of list */
1574 if (!attr
->attr
.name
)
1576 sysfs_remove_file(&mod
->mkobj
.kobj
,&attr
->attr
);
1580 kfree(mod
->modinfo_attrs
);
1583 static void mod_kobject_put(struct module
*mod
)
1585 DECLARE_COMPLETION_ONSTACK(c
);
1586 mod
->mkobj
.kobj_completion
= &c
;
1587 kobject_put(&mod
->mkobj
.kobj
);
1588 wait_for_completion(&c
);
1591 static int mod_sysfs_init(struct module
*mod
)
1594 struct kobject
*kobj
;
1596 if (!module_sysfs_initialized
) {
1597 pr_err("%s: module sysfs not initialized\n", mod
->name
);
1602 kobj
= kset_find_obj(module_kset
, mod
->name
);
1604 pr_err("%s: module is already loaded\n", mod
->name
);
1610 mod
->mkobj
.mod
= mod
;
1612 memset(&mod
->mkobj
.kobj
, 0, sizeof(mod
->mkobj
.kobj
));
1613 mod
->mkobj
.kobj
.kset
= module_kset
;
1614 err
= kobject_init_and_add(&mod
->mkobj
.kobj
, &module_ktype
, NULL
,
1617 mod_kobject_put(mod
);
1619 /* delay uevent until full sysfs population */
1624 static int mod_sysfs_setup(struct module
*mod
,
1625 const struct load_info
*info
,
1626 struct kernel_param
*kparam
,
1627 unsigned int num_params
)
1631 err
= mod_sysfs_init(mod
);
1635 mod
->holders_dir
= kobject_create_and_add("holders", &mod
->mkobj
.kobj
);
1636 if (!mod
->holders_dir
) {
1641 err
= module_param_sysfs_setup(mod
, kparam
, num_params
);
1643 goto out_unreg_holders
;
1645 err
= module_add_modinfo_attrs(mod
);
1647 goto out_unreg_param
;
1649 add_usage_links(mod
);
1650 add_sect_attrs(mod
, info
);
1651 add_notes_attrs(mod
, info
);
1653 kobject_uevent(&mod
->mkobj
.kobj
, KOBJ_ADD
);
1657 module_param_sysfs_remove(mod
);
1659 kobject_put(mod
->holders_dir
);
1661 mod_kobject_put(mod
);
1666 static void mod_sysfs_fini(struct module
*mod
)
1668 remove_notes_attrs(mod
);
1669 remove_sect_attrs(mod
);
1670 mod_kobject_put(mod
);
1673 #else /* !CONFIG_SYSFS */
1675 static int mod_sysfs_setup(struct module
*mod
,
1676 const struct load_info
*info
,
1677 struct kernel_param
*kparam
,
1678 unsigned int num_params
)
1683 static void mod_sysfs_fini(struct module
*mod
)
1687 static void module_remove_modinfo_attrs(struct module
*mod
)
1691 static void del_usage_links(struct module
*mod
)
1695 #endif /* CONFIG_SYSFS */
1697 static void mod_sysfs_teardown(struct module
*mod
)
1699 del_usage_links(mod
);
1700 module_remove_modinfo_attrs(mod
);
1701 module_param_sysfs_remove(mod
);
1702 kobject_put(mod
->mkobj
.drivers_dir
);
1703 kobject_put(mod
->holders_dir
);
1704 mod_sysfs_fini(mod
);
1708 * unlink the module with the whole machine is stopped with interrupts off
1709 * - this defends against kallsyms not taking locks
1711 static int __unlink_module(void *_mod
)
1713 struct module
*mod
= _mod
;
1714 list_del(&mod
->list
);
1715 module_bug_cleanup(mod
);
1719 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
1721 * LKM RO/NX protection: protect module's text/ro-data
1722 * from modification and any data from execution.
1724 void set_page_attributes(void *start
, void *end
, int (*set
)(unsigned long start
, int num_pages
))
1726 unsigned long begin_pfn
= PFN_DOWN((unsigned long)start
);
1727 unsigned long end_pfn
= PFN_DOWN((unsigned long)end
);
1729 if (end_pfn
> begin_pfn
)
1730 set(begin_pfn
<< PAGE_SHIFT
, end_pfn
- begin_pfn
);
1733 static void set_section_ro_nx(void *base
,
1734 unsigned long text_size
,
1735 unsigned long ro_size
,
1736 unsigned long total_size
)
1738 /* begin and end PFNs of the current subsection */
1739 unsigned long begin_pfn
;
1740 unsigned long end_pfn
;
1743 * Set RO for module text and RO-data:
1744 * - Always protect first page.
1745 * - Do not protect last partial page.
1748 set_page_attributes(base
, base
+ ro_size
, set_memory_ro
);
1751 * Set NX permissions for module data:
1752 * - Do not protect first partial page.
1753 * - Always protect last page.
1755 if (total_size
> text_size
) {
1756 begin_pfn
= PFN_UP((unsigned long)base
+ text_size
);
1757 end_pfn
= PFN_UP((unsigned long)base
+ total_size
);
1758 if (end_pfn
> begin_pfn
)
1759 set_memory_nx(begin_pfn
<< PAGE_SHIFT
, end_pfn
- begin_pfn
);
1763 static void unset_module_core_ro_nx(struct module
*mod
)
1765 set_page_attributes(mod
->module_core
+ mod
->core_text_size
,
1766 mod
->module_core
+ mod
->core_size
,
1768 set_page_attributes(mod
->module_core
,
1769 mod
->module_core
+ mod
->core_ro_size
,
1773 static void unset_module_init_ro_nx(struct module
*mod
)
1775 set_page_attributes(mod
->module_init
+ mod
->init_text_size
,
1776 mod
->module_init
+ mod
->init_size
,
1778 set_page_attributes(mod
->module_init
,
1779 mod
->module_init
+ mod
->init_ro_size
,
1783 /* Iterate through all modules and set each module's text as RW */
1784 void set_all_modules_text_rw(void)
1788 mutex_lock(&module_mutex
);
1789 list_for_each_entry_rcu(mod
, &modules
, list
) {
1790 if (mod
->state
== MODULE_STATE_UNFORMED
)
1792 if ((mod
->module_core
) && (mod
->core_text_size
)) {
1793 set_page_attributes(mod
->module_core
,
1794 mod
->module_core
+ mod
->core_text_size
,
1797 if ((mod
->module_init
) && (mod
->init_text_size
)) {
1798 set_page_attributes(mod
->module_init
,
1799 mod
->module_init
+ mod
->init_text_size
,
1803 mutex_unlock(&module_mutex
);
1806 /* Iterate through all modules and set each module's text as RO */
1807 void set_all_modules_text_ro(void)
1811 mutex_lock(&module_mutex
);
1812 list_for_each_entry_rcu(mod
, &modules
, list
) {
1813 if (mod
->state
== MODULE_STATE_UNFORMED
)
1815 if ((mod
->module_core
) && (mod
->core_text_size
)) {
1816 set_page_attributes(mod
->module_core
,
1817 mod
->module_core
+ mod
->core_text_size
,
1820 if ((mod
->module_init
) && (mod
->init_text_size
)) {
1821 set_page_attributes(mod
->module_init
,
1822 mod
->module_init
+ mod
->init_text_size
,
1826 mutex_unlock(&module_mutex
);
1829 static inline void set_section_ro_nx(void *base
, unsigned long text_size
, unsigned long ro_size
, unsigned long total_size
) { }
1830 static void unset_module_core_ro_nx(struct module
*mod
) { }
1831 static void unset_module_init_ro_nx(struct module
*mod
) { }
1834 void __weak
module_free(struct module
*mod
, void *module_region
)
1836 vfree(module_region
);
1839 void __weak
module_arch_cleanup(struct module
*mod
)
1843 /* Free a module, remove from lists, etc. */
1844 static void free_module(struct module
*mod
)
1846 trace_module_free(mod
);
1848 mod_sysfs_teardown(mod
);
1850 /* We leave it in list to prevent duplicate loads, but make sure
1851 * that noone uses it while it's being deconstructed. */
1852 mutex_lock(&module_mutex
);
1853 mod
->state
= MODULE_STATE_UNFORMED
;
1854 mutex_unlock(&module_mutex
);
1856 /* Remove dynamic debug info */
1857 ddebug_remove_module(mod
->name
);
1859 /* Arch-specific cleanup. */
1860 module_arch_cleanup(mod
);
1862 /* Module unload stuff */
1863 module_unload_free(mod
);
1865 /* Free any allocated parameters. */
1866 destroy_params(mod
->kp
, mod
->num_kp
);
1868 /* Now we can delete it from the lists */
1869 mutex_lock(&module_mutex
);
1870 stop_machine(__unlink_module
, mod
, NULL
);
1871 mutex_unlock(&module_mutex
);
1873 /* This may be NULL, but that's OK */
1874 unset_module_init_ro_nx(mod
);
1875 module_free(mod
, mod
->module_init
);
1877 percpu_modfree(mod
);
1879 /* Free lock-classes: */
1880 lockdep_free_key_range(mod
->module_core
, mod
->core_size
);
1882 /* Finally, free the core (containing the module structure) */
1883 unset_module_core_ro_nx(mod
);
1884 module_free(mod
, mod
->module_core
);
1887 update_protections(current
->mm
);
1891 void *__symbol_get(const char *symbol
)
1893 struct module
*owner
;
1894 const struct kernel_symbol
*sym
;
1897 sym
= find_symbol(symbol
, &owner
, NULL
, true, true);
1898 if (sym
&& strong_try_module_get(owner
))
1902 return sym
? (void *)sym
->value
: NULL
;
1904 EXPORT_SYMBOL_GPL(__symbol_get
);
1907 * Ensure that an exported symbol [global namespace] does not already exist
1908 * in the kernel or in some other module's exported symbol table.
1910 * You must hold the module_mutex.
1912 static int verify_export_symbols(struct module
*mod
)
1915 struct module
*owner
;
1916 const struct kernel_symbol
*s
;
1918 const struct kernel_symbol
*sym
;
1921 { mod
->syms
, mod
->num_syms
},
1922 { mod
->gpl_syms
, mod
->num_gpl_syms
},
1923 { mod
->gpl_future_syms
, mod
->num_gpl_future_syms
},
1924 #ifdef CONFIG_UNUSED_SYMBOLS
1925 { mod
->unused_syms
, mod
->num_unused_syms
},
1926 { mod
->unused_gpl_syms
, mod
->num_unused_gpl_syms
},
1930 for (i
= 0; i
< ARRAY_SIZE(arr
); i
++) {
1931 for (s
= arr
[i
].sym
; s
< arr
[i
].sym
+ arr
[i
].num
; s
++) {
1932 if (find_symbol(s
->name
, &owner
, NULL
, true, false)) {
1933 pr_err("%s: exports duplicate symbol %s"
1935 mod
->name
, s
->name
, module_name(owner
));
1943 /* Change all symbols so that st_value encodes the pointer directly. */
1944 static int simplify_symbols(struct module
*mod
, const struct load_info
*info
)
1946 Elf_Shdr
*symsec
= &info
->sechdrs
[info
->index
.sym
];
1947 Elf_Sym
*sym
= (void *)symsec
->sh_addr
;
1948 unsigned long secbase
;
1951 const struct kernel_symbol
*ksym
;
1953 for (i
= 1; i
< symsec
->sh_size
/ sizeof(Elf_Sym
); i
++) {
1954 const char *name
= info
->strtab
+ sym
[i
].st_name
;
1956 switch (sym
[i
].st_shndx
) {
1958 /* Ignore common symbols */
1959 if (!strncmp(name
, "__gnu_lto", 9))
1962 /* We compiled with -fno-common. These are not
1963 supposed to happen. */
1964 pr_debug("Common symbol: %s\n", name
);
1965 printk("%s: please compile with -fno-common\n",
1971 /* Don't need to do anything */
1972 pr_debug("Absolute symbol: 0x%08lx\n",
1973 (long)sym
[i
].st_value
);
1977 ksym
= resolve_symbol_wait(mod
, info
, name
);
1978 /* Ok if resolved. */
1979 if (ksym
&& !IS_ERR(ksym
)) {
1980 sym
[i
].st_value
= ksym
->value
;
1985 if (!ksym
&& ELF_ST_BIND(sym
[i
].st_info
) == STB_WEAK
)
1988 pr_warn("%s: Unknown symbol %s (err %li)\n",
1989 mod
->name
, name
, PTR_ERR(ksym
));
1990 ret
= PTR_ERR(ksym
) ?: -ENOENT
;
1994 /* Divert to percpu allocation if a percpu var. */
1995 if (sym
[i
].st_shndx
== info
->index
.pcpu
)
1996 secbase
= (unsigned long)mod_percpu(mod
);
1998 secbase
= info
->sechdrs
[sym
[i
].st_shndx
].sh_addr
;
1999 sym
[i
].st_value
+= secbase
;
2007 static int apply_relocations(struct module
*mod
, const struct load_info
*info
)
2012 /* Now do relocations. */
2013 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
2014 unsigned int infosec
= info
->sechdrs
[i
].sh_info
;
2016 /* Not a valid relocation section? */
2017 if (infosec
>= info
->hdr
->e_shnum
)
2020 /* Don't bother with non-allocated sections */
2021 if (!(info
->sechdrs
[infosec
].sh_flags
& SHF_ALLOC
))
2024 if (info
->sechdrs
[i
].sh_type
== SHT_REL
)
2025 err
= apply_relocate(info
->sechdrs
, info
->strtab
,
2026 info
->index
.sym
, i
, mod
);
2027 else if (info
->sechdrs
[i
].sh_type
== SHT_RELA
)
2028 err
= apply_relocate_add(info
->sechdrs
, info
->strtab
,
2029 info
->index
.sym
, i
, mod
);
2036 /* Additional bytes needed by arch in front of individual sections */
2037 unsigned int __weak
arch_mod_section_prepend(struct module
*mod
,
2038 unsigned int section
)
2040 /* default implementation just returns zero */
2044 /* Update size with this section: return offset. */
2045 static long get_offset(struct module
*mod
, unsigned int *size
,
2046 Elf_Shdr
*sechdr
, unsigned int section
)
2050 *size
+= arch_mod_section_prepend(mod
, section
);
2051 ret
= ALIGN(*size
, sechdr
->sh_addralign
?: 1);
2052 *size
= ret
+ sechdr
->sh_size
;
2056 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2057 might -- code, read-only data, read-write data, small data. Tally
2058 sizes, and place the offsets into sh_entsize fields: high bit means it
2060 static void layout_sections(struct module
*mod
, struct load_info
*info
)
2062 static unsigned long const masks
[][2] = {
2063 /* NOTE: all executable code must be the first section
2064 * in this array; otherwise modify the text_size
2065 * finder in the two loops below */
2066 { SHF_EXECINSTR
| SHF_ALLOC
, ARCH_SHF_SMALL
},
2067 { SHF_ALLOC
, SHF_WRITE
| ARCH_SHF_SMALL
},
2068 { SHF_WRITE
| SHF_ALLOC
, ARCH_SHF_SMALL
},
2069 { ARCH_SHF_SMALL
| SHF_ALLOC
, 0 }
2073 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++)
2074 info
->sechdrs
[i
].sh_entsize
= ~0UL;
2076 pr_debug("Core section allocation order:\n");
2077 for (m
= 0; m
< ARRAY_SIZE(masks
); ++m
) {
2078 for (i
= 0; i
< info
->hdr
->e_shnum
; ++i
) {
2079 Elf_Shdr
*s
= &info
->sechdrs
[i
];
2080 const char *sname
= info
->secstrings
+ s
->sh_name
;
2082 if ((s
->sh_flags
& masks
[m
][0]) != masks
[m
][0]
2083 || (s
->sh_flags
& masks
[m
][1])
2084 || s
->sh_entsize
!= ~0UL
2085 || strstarts(sname
, ".init"))
2087 s
->sh_entsize
= get_offset(mod
, &mod
->core_size
, s
, i
);
2088 pr_debug("\t%s\n", sname
);
2091 case 0: /* executable */
2092 mod
->core_size
= debug_align(mod
->core_size
);
2093 mod
->core_text_size
= mod
->core_size
;
2095 case 1: /* RO: text and ro-data */
2096 mod
->core_size
= debug_align(mod
->core_size
);
2097 mod
->core_ro_size
= mod
->core_size
;
2099 case 3: /* whole core */
2100 mod
->core_size
= debug_align(mod
->core_size
);
2105 pr_debug("Init section allocation order:\n");
2106 for (m
= 0; m
< ARRAY_SIZE(masks
); ++m
) {
2107 for (i
= 0; i
< info
->hdr
->e_shnum
; ++i
) {
2108 Elf_Shdr
*s
= &info
->sechdrs
[i
];
2109 const char *sname
= info
->secstrings
+ s
->sh_name
;
2111 if ((s
->sh_flags
& masks
[m
][0]) != masks
[m
][0]
2112 || (s
->sh_flags
& masks
[m
][1])
2113 || s
->sh_entsize
!= ~0UL
2114 || !strstarts(sname
, ".init"))
2116 s
->sh_entsize
= (get_offset(mod
, &mod
->init_size
, s
, i
)
2117 | INIT_OFFSET_MASK
);
2118 pr_debug("\t%s\n", sname
);
2121 case 0: /* executable */
2122 mod
->init_size
= debug_align(mod
->init_size
);
2123 mod
->init_text_size
= mod
->init_size
;
2125 case 1: /* RO: text and ro-data */
2126 mod
->init_size
= debug_align(mod
->init_size
);
2127 mod
->init_ro_size
= mod
->init_size
;
2129 case 3: /* whole init */
2130 mod
->init_size
= debug_align(mod
->init_size
);
2136 static void set_license(struct module
*mod
, const char *license
)
2139 license
= "unspecified";
2141 if (!license_is_gpl_compatible(license
)) {
2142 if (!test_taint(TAINT_PROPRIETARY_MODULE
))
2143 pr_warn("%s: module license '%s' taints kernel.\n",
2144 mod
->name
, license
);
2145 add_taint_module(mod
, TAINT_PROPRIETARY_MODULE
,
2146 LOCKDEP_NOW_UNRELIABLE
);
2150 /* Parse tag=value strings from .modinfo section */
2151 static char *next_string(char *string
, unsigned long *secsize
)
2153 /* Skip non-zero chars */
2156 if ((*secsize
)-- <= 1)
2160 /* Skip any zero padding. */
2161 while (!string
[0]) {
2163 if ((*secsize
)-- <= 1)
2169 static char *get_modinfo(struct load_info
*info
, const char *tag
)
2172 unsigned int taglen
= strlen(tag
);
2173 Elf_Shdr
*infosec
= &info
->sechdrs
[info
->index
.info
];
2174 unsigned long size
= infosec
->sh_size
;
2176 for (p
= (char *)infosec
->sh_addr
; p
; p
= next_string(p
, &size
)) {
2177 if (strncmp(p
, tag
, taglen
) == 0 && p
[taglen
] == '=')
2178 return p
+ taglen
+ 1;
2183 static void setup_modinfo(struct module
*mod
, struct load_info
*info
)
2185 struct module_attribute
*attr
;
2188 for (i
= 0; (attr
= modinfo_attrs
[i
]); i
++) {
2190 attr
->setup(mod
, get_modinfo(info
, attr
->attr
.name
));
2194 static void free_modinfo(struct module
*mod
)
2196 struct module_attribute
*attr
;
2199 for (i
= 0; (attr
= modinfo_attrs
[i
]); i
++) {
2205 #ifdef CONFIG_KALLSYMS
2207 /* lookup symbol in given range of kernel_symbols */
2208 static const struct kernel_symbol
*lookup_symbol(const char *name
,
2209 const struct kernel_symbol
*start
,
2210 const struct kernel_symbol
*stop
)
2212 return bsearch(name
, start
, stop
- start
,
2213 sizeof(struct kernel_symbol
), cmp_name
);
2216 static int is_exported(const char *name
, unsigned long value
,
2217 const struct module
*mod
)
2219 const struct kernel_symbol
*ks
;
2221 ks
= lookup_symbol(name
, __start___ksymtab
, __stop___ksymtab
);
2223 ks
= lookup_symbol(name
, mod
->syms
, mod
->syms
+ mod
->num_syms
);
2224 return ks
!= NULL
&& ks
->value
== value
;
2228 static char elf_type(const Elf_Sym
*sym
, const struct load_info
*info
)
2230 const Elf_Shdr
*sechdrs
= info
->sechdrs
;
2232 if (ELF_ST_BIND(sym
->st_info
) == STB_WEAK
) {
2233 if (ELF_ST_TYPE(sym
->st_info
) == STT_OBJECT
)
2238 if (sym
->st_shndx
== SHN_UNDEF
)
2240 if (sym
->st_shndx
== SHN_ABS
)
2242 if (sym
->st_shndx
>= SHN_LORESERVE
)
2244 if (sechdrs
[sym
->st_shndx
].sh_flags
& SHF_EXECINSTR
)
2246 if (sechdrs
[sym
->st_shndx
].sh_flags
& SHF_ALLOC
2247 && sechdrs
[sym
->st_shndx
].sh_type
!= SHT_NOBITS
) {
2248 if (!(sechdrs
[sym
->st_shndx
].sh_flags
& SHF_WRITE
))
2250 else if (sechdrs
[sym
->st_shndx
].sh_flags
& ARCH_SHF_SMALL
)
2255 if (sechdrs
[sym
->st_shndx
].sh_type
== SHT_NOBITS
) {
2256 if (sechdrs
[sym
->st_shndx
].sh_flags
& ARCH_SHF_SMALL
)
2261 if (strstarts(info
->secstrings
+ sechdrs
[sym
->st_shndx
].sh_name
,
2268 static bool is_core_symbol(const Elf_Sym
*src
, const Elf_Shdr
*sechdrs
,
2271 const Elf_Shdr
*sec
;
2273 if (src
->st_shndx
== SHN_UNDEF
2274 || src
->st_shndx
>= shnum
2278 sec
= sechdrs
+ src
->st_shndx
;
2279 if (!(sec
->sh_flags
& SHF_ALLOC
)
2280 #ifndef CONFIG_KALLSYMS_ALL
2281 || !(sec
->sh_flags
& SHF_EXECINSTR
)
2283 || (sec
->sh_entsize
& INIT_OFFSET_MASK
))
2290 * We only allocate and copy the strings needed by the parts of symtab
2291 * we keep. This is simple, but has the effect of making multiple
2292 * copies of duplicates. We could be more sophisticated, see
2293 * linux-kernel thread starting with
2294 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2296 static void layout_symtab(struct module
*mod
, struct load_info
*info
)
2298 Elf_Shdr
*symsect
= info
->sechdrs
+ info
->index
.sym
;
2299 Elf_Shdr
*strsect
= info
->sechdrs
+ info
->index
.str
;
2301 unsigned int i
, nsrc
, ndst
, strtab_size
= 0;
2303 /* Put symbol section at end of init part of module. */
2304 symsect
->sh_flags
|= SHF_ALLOC
;
2305 symsect
->sh_entsize
= get_offset(mod
, &mod
->init_size
, symsect
,
2306 info
->index
.sym
) | INIT_OFFSET_MASK
;
2307 pr_debug("\t%s\n", info
->secstrings
+ symsect
->sh_name
);
2309 src
= (void *)info
->hdr
+ symsect
->sh_offset
;
2310 nsrc
= symsect
->sh_size
/ sizeof(*src
);
2312 /* Compute total space required for the core symbols' strtab. */
2313 for (ndst
= i
= 0; i
< nsrc
; i
++) {
2315 is_core_symbol(src
+i
, info
->sechdrs
, info
->hdr
->e_shnum
)) {
2316 strtab_size
+= strlen(&info
->strtab
[src
[i
].st_name
])+1;
2321 /* Append room for core symbols at end of core part. */
2322 info
->symoffs
= ALIGN(mod
->core_size
, symsect
->sh_addralign
?: 1);
2323 info
->stroffs
= mod
->core_size
= info
->symoffs
+ ndst
* sizeof(Elf_Sym
);
2324 mod
->core_size
+= strtab_size
;
2326 /* Put string table section at end of init part of module. */
2327 strsect
->sh_flags
|= SHF_ALLOC
;
2328 strsect
->sh_entsize
= get_offset(mod
, &mod
->init_size
, strsect
,
2329 info
->index
.str
) | INIT_OFFSET_MASK
;
2330 pr_debug("\t%s\n", info
->secstrings
+ strsect
->sh_name
);
2332 /* We'll tack temporary mod_kallsyms on the end. */
2333 mod
->init_size
= ALIGN(mod
->init_size
,
2334 __alignof__(struct mod_kallsyms
));
2335 info
->mod_kallsyms_init_off
= mod
->init_size
;
2336 mod
->init_size
+= sizeof(struct mod_kallsyms
);
2337 mod
->init_size
= debug_align(mod
->init_size
);
2341 * We use the full symtab and strtab which layout_symtab arranged to
2342 * be appended to the init section. Later we switch to the cut-down
2345 static void add_kallsyms(struct module
*mod
, const struct load_info
*info
)
2347 unsigned int i
, ndst
;
2351 Elf_Shdr
*symsec
= &info
->sechdrs
[info
->index
.sym
];
2353 /* Set up to point into init section. */
2354 mod
->kallsyms
= mod
->module_init
+ info
->mod_kallsyms_init_off
;
2356 mod
->kallsyms
->symtab
= (void *)symsec
->sh_addr
;
2357 mod
->kallsyms
->num_symtab
= symsec
->sh_size
/ sizeof(Elf_Sym
);
2358 /* Make sure we get permanent strtab: don't use info->strtab. */
2359 mod
->kallsyms
->strtab
= (void *)info
->sechdrs
[info
->index
.str
].sh_addr
;
2361 /* Set types up while we still have access to sections. */
2362 for (i
= 0; i
< mod
->kallsyms
->num_symtab
; i
++)
2363 mod
->kallsyms
->symtab
[i
].st_info
2364 = elf_type(&mod
->kallsyms
->symtab
[i
], info
);
2366 /* Now populate the cut down core kallsyms for after init. */
2367 mod
->core_kallsyms
.symtab
= dst
= mod
->module_core
+ info
->symoffs
;
2368 mod
->core_kallsyms
.strtab
= s
= mod
->module_core
+ info
->stroffs
;
2369 src
= mod
->kallsyms
->symtab
;
2370 for (ndst
= i
= 0; i
< mod
->kallsyms
->num_symtab
; i
++) {
2372 is_core_symbol(src
+i
, info
->sechdrs
, info
->hdr
->e_shnum
)) {
2374 dst
[ndst
++].st_name
= s
- mod
->core_kallsyms
.strtab
;
2375 s
+= strlcpy(s
, &mod
->kallsyms
->strtab
[src
[i
].st_name
],
2379 mod
->core_kallsyms
.num_symtab
= ndst
;
2382 static inline void layout_symtab(struct module
*mod
, struct load_info
*info
)
2386 static void add_kallsyms(struct module
*mod
, const struct load_info
*info
)
2389 #endif /* CONFIG_KALLSYMS */
2391 static void dynamic_debug_setup(struct _ddebug
*debug
, unsigned int num
)
2395 #ifdef CONFIG_DYNAMIC_DEBUG
2396 if (ddebug_add_module(debug
, num
, debug
->modname
))
2397 pr_err("dynamic debug error adding module: %s\n",
2402 static void dynamic_debug_remove(struct _ddebug
*debug
)
2405 ddebug_remove_module(debug
->modname
);
2408 void * __weak
module_alloc(unsigned long size
)
2410 return vmalloc_exec(size
);
2413 static void *module_alloc_update_bounds(unsigned long size
)
2415 void *ret
= module_alloc(size
);
2418 mutex_lock(&module_mutex
);
2419 /* Update module bounds. */
2420 if ((unsigned long)ret
< module_addr_min
)
2421 module_addr_min
= (unsigned long)ret
;
2422 if ((unsigned long)ret
+ size
> module_addr_max
)
2423 module_addr_max
= (unsigned long)ret
+ size
;
2424 mutex_unlock(&module_mutex
);
2429 #ifdef CONFIG_DEBUG_KMEMLEAK
2430 static void kmemleak_load_module(const struct module
*mod
,
2431 const struct load_info
*info
)
2435 /* only scan the sections containing data */
2436 kmemleak_scan_area(mod
, sizeof(struct module
), GFP_KERNEL
);
2438 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
2439 /* Scan all writable sections that's not executable */
2440 if (!(info
->sechdrs
[i
].sh_flags
& SHF_ALLOC
) ||
2441 !(info
->sechdrs
[i
].sh_flags
& SHF_WRITE
) ||
2442 (info
->sechdrs
[i
].sh_flags
& SHF_EXECINSTR
))
2445 kmemleak_scan_area((void *)info
->sechdrs
[i
].sh_addr
,
2446 info
->sechdrs
[i
].sh_size
, GFP_KERNEL
);
2450 static inline void kmemleak_load_module(const struct module
*mod
,
2451 const struct load_info
*info
)
2456 #ifdef CONFIG_MODULE_SIG
2457 static int module_sig_check(struct load_info
*info
)
2460 const unsigned long markerlen
= sizeof(MODULE_SIG_STRING
) - 1;
2461 const void *mod
= info
->hdr
;
2463 if (info
->len
> markerlen
&&
2464 memcmp(mod
+ info
->len
- markerlen
, MODULE_SIG_STRING
, markerlen
) == 0) {
2465 /* We truncate the module to discard the signature */
2466 info
->len
-= markerlen
;
2467 err
= mod_verify_sig(mod
, &info
->len
);
2471 info
->sig_ok
= true;
2475 /* Not having a signature is only an error if we're strict. */
2476 if (err
== -ENOKEY
&& !sig_enforce
)
2481 #else /* !CONFIG_MODULE_SIG */
2482 static int module_sig_check(struct load_info
*info
)
2486 #endif /* !CONFIG_MODULE_SIG */
2488 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2489 static int elf_header_check(struct load_info
*info
)
2491 if (info
->len
< sizeof(*(info
->hdr
)))
2494 if (memcmp(info
->hdr
->e_ident
, ELFMAG
, SELFMAG
) != 0
2495 || info
->hdr
->e_type
!= ET_REL
2496 || !elf_check_arch(info
->hdr
)
2497 || info
->hdr
->e_shentsize
!= sizeof(Elf_Shdr
))
2500 if (info
->hdr
->e_shoff
>= info
->len
2501 || (info
->hdr
->e_shnum
* sizeof(Elf_Shdr
) >
2502 info
->len
- info
->hdr
->e_shoff
))
2508 /* Sets info->hdr and info->len. */
2509 static int copy_module_from_user(const void __user
*umod
, unsigned long len
,
2510 struct load_info
*info
)
2515 if (info
->len
< sizeof(*(info
->hdr
)))
2518 err
= security_kernel_module_from_file(NULL
);
2522 /* Suck in entire file: we'll want most of it. */
2523 info
->hdr
= vmalloc(info
->len
);
2527 if (copy_from_user(info
->hdr
, umod
, info
->len
) != 0) {
2535 /* Sets info->hdr and info->len. */
2536 static int copy_module_from_fd(int fd
, struct load_info
*info
)
2538 struct fd f
= fdget(fd
);
2547 err
= security_kernel_module_from_file(f
.file
);
2551 err
= vfs_getattr(&f
.file
->f_path
, &stat
);
2555 if (stat
.size
> INT_MAX
) {
2560 /* Don't hand 0 to vmalloc, it whines. */
2561 if (stat
.size
== 0) {
2566 info
->hdr
= vmalloc(stat
.size
);
2573 while (pos
< stat
.size
) {
2574 bytes
= kernel_read(f
.file
, pos
, (char *)(info
->hdr
) + pos
,
2592 static void free_copy(struct load_info
*info
)
2597 static int rewrite_section_headers(struct load_info
*info
, int flags
)
2601 /* This should always be true, but let's be sure. */
2602 info
->sechdrs
[0].sh_addr
= 0;
2604 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
2605 Elf_Shdr
*shdr
= &info
->sechdrs
[i
];
2606 if (shdr
->sh_type
!= SHT_NOBITS
2607 && info
->len
< shdr
->sh_offset
+ shdr
->sh_size
) {
2608 pr_err("Module len %lu truncated\n", info
->len
);
2612 /* Mark all sections sh_addr with their address in the
2614 shdr
->sh_addr
= (size_t)info
->hdr
+ shdr
->sh_offset
;
2616 #ifndef CONFIG_MODULE_UNLOAD
2617 /* Don't load .exit sections */
2618 if (strstarts(info
->secstrings
+shdr
->sh_name
, ".exit"))
2619 shdr
->sh_flags
&= ~(unsigned long)SHF_ALLOC
;
2623 /* Track but don't keep modinfo and version sections. */
2624 if (flags
& MODULE_INIT_IGNORE_MODVERSIONS
)
2625 info
->index
.vers
= 0; /* Pretend no __versions section! */
2627 info
->index
.vers
= find_sec(info
, "__versions");
2628 info
->index
.info
= find_sec(info
, ".modinfo");
2629 info
->sechdrs
[info
->index
.info
].sh_flags
&= ~(unsigned long)SHF_ALLOC
;
2630 info
->sechdrs
[info
->index
.vers
].sh_flags
&= ~(unsigned long)SHF_ALLOC
;
2635 * Set up our basic convenience variables (pointers to section headers,
2636 * search for module section index etc), and do some basic section
2639 * Return the temporary module pointer (we'll replace it with the final
2640 * one when we move the module sections around).
2642 static struct module
*setup_load_info(struct load_info
*info
, int flags
)
2648 /* Set up the convenience variables */
2649 info
->sechdrs
= (void *)info
->hdr
+ info
->hdr
->e_shoff
;
2650 info
->secstrings
= (void *)info
->hdr
2651 + info
->sechdrs
[info
->hdr
->e_shstrndx
].sh_offset
;
2653 err
= rewrite_section_headers(info
, flags
);
2655 return ERR_PTR(err
);
2657 /* Find internal symbols and strings. */
2658 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
2659 if (info
->sechdrs
[i
].sh_type
== SHT_SYMTAB
) {
2660 info
->index
.sym
= i
;
2661 info
->index
.str
= info
->sechdrs
[i
].sh_link
;
2662 info
->strtab
= (char *)info
->hdr
2663 + info
->sechdrs
[info
->index
.str
].sh_offset
;
2668 info
->index
.mod
= find_sec(info
, ".gnu.linkonce.this_module");
2669 if (!info
->index
.mod
) {
2670 pr_warn("No module found in object\n");
2671 return ERR_PTR(-ENOEXEC
);
2673 /* This is temporary: point mod into copy of data. */
2674 mod
= (void *)info
->sechdrs
[info
->index
.mod
].sh_addr
;
2676 if (info
->index
.sym
== 0) {
2677 pr_warn("%s: module has no symbols (stripped?)\n", mod
->name
);
2678 return ERR_PTR(-ENOEXEC
);
2681 info
->index
.pcpu
= find_pcpusec(info
);
2683 /* Check module struct version now, before we try to use module. */
2684 if (!check_modstruct_version(info
->sechdrs
, info
->index
.vers
, mod
))
2685 return ERR_PTR(-ENOEXEC
);
2690 static int check_modinfo(struct module
*mod
, struct load_info
*info
, int flags
)
2692 const char *modmagic
= get_modinfo(info
, "vermagic");
2695 if (flags
& MODULE_INIT_IGNORE_VERMAGIC
)
2698 /* This is allowed: modprobe --force will invalidate it. */
2700 err
= try_to_force_load(mod
, "bad vermagic");
2703 } else if (!same_magic(modmagic
, vermagic
, info
->index
.vers
)) {
2704 pr_err("%s: version magic '%s' should be '%s'\n",
2705 mod
->name
, modmagic
, vermagic
);
2709 if (!get_modinfo(info
, "intree"))
2710 add_taint_module(mod
, TAINT_OOT_MODULE
, LOCKDEP_STILL_OK
);
2712 if (get_modinfo(info
, "staging")) {
2713 add_taint_module(mod
, TAINT_CRAP
, LOCKDEP_STILL_OK
);
2714 pr_warn("%s: module is from the staging directory, the quality "
2715 "is unknown, you have been warned.\n", mod
->name
);
2718 /* Set up license info based on the info section */
2719 set_license(mod
, get_modinfo(info
, "license"));
2724 static int find_module_sections(struct module
*mod
, struct load_info
*info
)
2726 mod
->kp
= section_objs(info
, "__param",
2727 sizeof(*mod
->kp
), &mod
->num_kp
);
2728 mod
->syms
= section_objs(info
, "__ksymtab",
2729 sizeof(*mod
->syms
), &mod
->num_syms
);
2730 mod
->crcs
= section_addr(info
, "__kcrctab");
2731 mod
->gpl_syms
= section_objs(info
, "__ksymtab_gpl",
2732 sizeof(*mod
->gpl_syms
),
2733 &mod
->num_gpl_syms
);
2734 mod
->gpl_crcs
= section_addr(info
, "__kcrctab_gpl");
2735 mod
->gpl_future_syms
= section_objs(info
,
2736 "__ksymtab_gpl_future",
2737 sizeof(*mod
->gpl_future_syms
),
2738 &mod
->num_gpl_future_syms
);
2739 mod
->gpl_future_crcs
= section_addr(info
, "__kcrctab_gpl_future");
2741 #ifdef CONFIG_UNUSED_SYMBOLS
2742 mod
->unused_syms
= section_objs(info
, "__ksymtab_unused",
2743 sizeof(*mod
->unused_syms
),
2744 &mod
->num_unused_syms
);
2745 mod
->unused_crcs
= section_addr(info
, "__kcrctab_unused");
2746 mod
->unused_gpl_syms
= section_objs(info
, "__ksymtab_unused_gpl",
2747 sizeof(*mod
->unused_gpl_syms
),
2748 &mod
->num_unused_gpl_syms
);
2749 mod
->unused_gpl_crcs
= section_addr(info
, "__kcrctab_unused_gpl");
2751 #ifdef CONFIG_CONSTRUCTORS
2752 mod
->ctors
= section_objs(info
, ".ctors",
2753 sizeof(*mod
->ctors
), &mod
->num_ctors
);
2755 mod
->ctors
= section_objs(info
, ".init_array",
2756 sizeof(*mod
->ctors
), &mod
->num_ctors
);
2757 else if (find_sec(info
, ".init_array")) {
2759 * This shouldn't happen with same compiler and binutils
2760 * building all parts of the module.
2762 printk(KERN_WARNING
"%s: has both .ctors and .init_array.\n",
2768 #ifdef CONFIG_TRACEPOINTS
2769 mod
->tracepoints_ptrs
= section_objs(info
, "__tracepoints_ptrs",
2770 sizeof(*mod
->tracepoints_ptrs
),
2771 &mod
->num_tracepoints
);
2773 #ifdef HAVE_JUMP_LABEL
2774 mod
->jump_entries
= section_objs(info
, "__jump_table",
2775 sizeof(*mod
->jump_entries
),
2776 &mod
->num_jump_entries
);
2778 #ifdef CONFIG_EVENT_TRACING
2779 mod
->trace_events
= section_objs(info
, "_ftrace_events",
2780 sizeof(*mod
->trace_events
),
2781 &mod
->num_trace_events
);
2783 #ifdef CONFIG_TRACING
2784 mod
->trace_bprintk_fmt_start
= section_objs(info
, "__trace_printk_fmt",
2785 sizeof(*mod
->trace_bprintk_fmt_start
),
2786 &mod
->num_trace_bprintk_fmt
);
2788 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
2789 /* sechdrs[0].sh_size is always zero */
2790 mod
->ftrace_callsites
= section_objs(info
, "__mcount_loc",
2791 sizeof(*mod
->ftrace_callsites
),
2792 &mod
->num_ftrace_callsites
);
2795 mod
->extable
= section_objs(info
, "__ex_table",
2796 sizeof(*mod
->extable
), &mod
->num_exentries
);
2798 if (section_addr(info
, "__obsparm"))
2799 pr_warn("%s: Ignoring obsolete parameters\n", mod
->name
);
2801 info
->debug
= section_objs(info
, "__verbose",
2802 sizeof(*info
->debug
), &info
->num_debug
);
2807 static int move_module(struct module
*mod
, struct load_info
*info
)
2812 /* Do the allocs. */
2813 ptr
= module_alloc_update_bounds(mod
->core_size
);
2815 * The pointer to this block is stored in the module structure
2816 * which is inside the block. Just mark it as not being a
2819 kmemleak_not_leak(ptr
);
2823 memset(ptr
, 0, mod
->core_size
);
2824 mod
->module_core
= ptr
;
2826 if (mod
->init_size
) {
2827 ptr
= module_alloc_update_bounds(mod
->init_size
);
2829 * The pointer to this block is stored in the module structure
2830 * which is inside the block. This block doesn't need to be
2831 * scanned as it contains data and code that will be freed
2832 * after the module is initialized.
2834 kmemleak_ignore(ptr
);
2836 module_free(mod
, mod
->module_core
);
2839 memset(ptr
, 0, mod
->init_size
);
2840 mod
->module_init
= ptr
;
2842 mod
->module_init
= NULL
;
2844 /* Transfer each section which specifies SHF_ALLOC */
2845 pr_debug("final section addresses:\n");
2846 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++) {
2848 Elf_Shdr
*shdr
= &info
->sechdrs
[i
];
2850 if (!(shdr
->sh_flags
& SHF_ALLOC
))
2853 if (shdr
->sh_entsize
& INIT_OFFSET_MASK
)
2854 dest
= mod
->module_init
2855 + (shdr
->sh_entsize
& ~INIT_OFFSET_MASK
);
2857 dest
= mod
->module_core
+ shdr
->sh_entsize
;
2859 if (shdr
->sh_type
!= SHT_NOBITS
)
2860 memcpy(dest
, (void *)shdr
->sh_addr
, shdr
->sh_size
);
2861 /* Update sh_addr to point to copy in image. */
2862 shdr
->sh_addr
= (unsigned long)dest
;
2863 pr_debug("\t0x%lx %s\n",
2864 (long)shdr
->sh_addr
, info
->secstrings
+ shdr
->sh_name
);
2870 static int check_module_license_and_versions(struct module
*mod
)
2873 * ndiswrapper is under GPL by itself, but loads proprietary modules.
2874 * Don't use add_taint_module(), as it would prevent ndiswrapper from
2875 * using GPL-only symbols it needs.
2877 if (strcmp(mod
->name
, "ndiswrapper") == 0)
2878 add_taint(TAINT_PROPRIETARY_MODULE
, LOCKDEP_NOW_UNRELIABLE
);
2880 /* driverloader was caught wrongly pretending to be under GPL */
2881 if (strcmp(mod
->name
, "driverloader") == 0)
2882 add_taint_module(mod
, TAINT_PROPRIETARY_MODULE
,
2883 LOCKDEP_NOW_UNRELIABLE
);
2885 /* lve claims to be GPL but upstream won't provide source */
2886 if (strcmp(mod
->name
, "lve") == 0)
2887 add_taint_module(mod
, TAINT_PROPRIETARY_MODULE
,
2888 LOCKDEP_NOW_UNRELIABLE
);
2890 #ifdef CONFIG_MODVERSIONS
2891 if ((mod
->num_syms
&& !mod
->crcs
)
2892 || (mod
->num_gpl_syms
&& !mod
->gpl_crcs
)
2893 || (mod
->num_gpl_future_syms
&& !mod
->gpl_future_crcs
)
2894 #ifdef CONFIG_UNUSED_SYMBOLS
2895 || (mod
->num_unused_syms
&& !mod
->unused_crcs
)
2896 || (mod
->num_unused_gpl_syms
&& !mod
->unused_gpl_crcs
)
2899 return try_to_force_load(mod
,
2900 "no versions for exported symbols");
2906 static void flush_module_icache(const struct module
*mod
)
2908 mm_segment_t old_fs
;
2910 /* flush the icache in correct context */
2915 * Flush the instruction cache, since we've played with text.
2916 * Do it before processing of module parameters, so the module
2917 * can provide parameter accessor functions of its own.
2919 if (mod
->module_init
)
2920 flush_icache_range((unsigned long)mod
->module_init
,
2921 (unsigned long)mod
->module_init
2923 flush_icache_range((unsigned long)mod
->module_core
,
2924 (unsigned long)mod
->module_core
+ mod
->core_size
);
2929 int __weak
module_frob_arch_sections(Elf_Ehdr
*hdr
,
2937 static struct module
*layout_and_allocate(struct load_info
*info
, int flags
)
2939 /* Module within temporary copy. */
2943 mod
= setup_load_info(info
, flags
);
2947 err
= check_modinfo(mod
, info
, flags
);
2949 return ERR_PTR(err
);
2951 /* Allow arches to frob section contents and sizes. */
2952 err
= module_frob_arch_sections(info
->hdr
, info
->sechdrs
,
2953 info
->secstrings
, mod
);
2955 return ERR_PTR(err
);
2957 /* We will do a special allocation for per-cpu sections later. */
2958 info
->sechdrs
[info
->index
.pcpu
].sh_flags
&= ~(unsigned long)SHF_ALLOC
;
2960 /* Determine total sizes, and put offsets in sh_entsize. For now
2961 this is done generically; there doesn't appear to be any
2962 special cases for the architectures. */
2963 layout_sections(mod
, info
);
2964 layout_symtab(mod
, info
);
2966 /* Allocate and move to the final place */
2967 err
= move_module(mod
, info
);
2969 return ERR_PTR(err
);
2971 /* Module has been copied to its final place now: return it. */
2972 mod
= (void *)info
->sechdrs
[info
->index
.mod
].sh_addr
;
2973 kmemleak_load_module(mod
, info
);
2977 /* mod is no longer valid after this! */
2978 static void module_deallocate(struct module
*mod
, struct load_info
*info
)
2980 percpu_modfree(mod
);
2981 module_free(mod
, mod
->module_init
);
2982 module_free(mod
, mod
->module_core
);
2985 int __weak
module_finalize(const Elf_Ehdr
*hdr
,
2986 const Elf_Shdr
*sechdrs
,
2992 static int post_relocation(struct module
*mod
, const struct load_info
*info
)
2994 /* Sort exception table now relocations are done. */
2995 sort_extable(mod
->extable
, mod
->extable
+ mod
->num_exentries
);
2997 /* Copy relocated percpu area over. */
2998 percpu_modcopy(mod
, (void *)info
->sechdrs
[info
->index
.pcpu
].sh_addr
,
2999 info
->sechdrs
[info
->index
.pcpu
].sh_size
);
3001 /* Setup kallsyms-specific fields. */
3002 add_kallsyms(mod
, info
);
3004 /* Arch-specific module finalizing. */
3005 return module_finalize(info
->hdr
, info
->sechdrs
, mod
);
3008 /* Is this module of this name done loading? No locks held. */
3009 static bool finished_loading(const char *name
)
3014 mutex_lock(&module_mutex
);
3015 mod
= find_module_all(name
, strlen(name
), true);
3016 ret
= !mod
|| mod
->state
== MODULE_STATE_LIVE
3017 || mod
->state
== MODULE_STATE_GOING
;
3018 mutex_unlock(&module_mutex
);
3023 /* Call module constructors. */
3024 static void do_mod_ctors(struct module
*mod
)
3026 #ifdef CONFIG_CONSTRUCTORS
3029 for (i
= 0; i
< mod
->num_ctors
; i
++)
3034 /* This is where the real work happens */
3035 static int do_init_module(struct module
*mod
)
3040 * We want to find out whether @mod uses async during init. Clear
3041 * PF_USED_ASYNC. async_schedule*() will set it.
3043 current
->flags
&= ~PF_USED_ASYNC
;
3046 /* Start the module */
3047 if (mod
->init
!= NULL
)
3048 ret
= do_one_initcall(mod
->init
);
3050 /* Init routine failed: abort. Try to protect us from
3051 buggy refcounters. */
3052 mod
->state
= MODULE_STATE_GOING
;
3053 synchronize_sched();
3055 blocking_notifier_call_chain(&module_notify_list
,
3056 MODULE_STATE_GOING
, mod
);
3058 wake_up_all(&module_wq
);
3062 pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3063 "follow 0/-E convention\n"
3064 "%s: loading module anyway...\n",
3065 __func__
, mod
->name
, ret
, __func__
);
3069 /* Now it's a first class citizen! */
3070 mod
->state
= MODULE_STATE_LIVE
;
3071 blocking_notifier_call_chain(&module_notify_list
,
3072 MODULE_STATE_LIVE
, mod
);
3075 * We need to finish all async code before the module init sequence
3076 * is done. This has potential to deadlock. For example, a newly
3077 * detected block device can trigger request_module() of the
3078 * default iosched from async probing task. Once userland helper
3079 * reaches here, async_synchronize_full() will wait on the async
3080 * task waiting on request_module() and deadlock.
3082 * This deadlock is avoided by perfomring async_synchronize_full()
3083 * iff module init queued any async jobs. This isn't a full
3084 * solution as it will deadlock the same if module loading from
3085 * async jobs nests more than once; however, due to the various
3086 * constraints, this hack seems to be the best option for now.
3087 * Please refer to the following thread for details.
3089 * http://thread.gmane.org/gmane.linux.kernel/1420814
3091 if (current
->flags
& PF_USED_ASYNC
)
3092 async_synchronize_full();
3094 mutex_lock(&module_mutex
);
3095 /* Drop initial reference. */
3097 trim_init_extable(mod
);
3098 #ifdef CONFIG_KALLSYMS
3099 /* Switch to core kallsyms now init is done: kallsyms may be walking! */
3100 rcu_assign_pointer(mod
->kallsyms
, &mod
->core_kallsyms
);
3102 unset_module_init_ro_nx(mod
);
3103 module_free(mod
, mod
->module_init
);
3104 mod
->module_init
= NULL
;
3106 mod
->init_ro_size
= 0;
3107 mod
->init_text_size
= 0;
3108 mutex_unlock(&module_mutex
);
3109 wake_up_all(&module_wq
);
3114 static int may_init_module(void)
3116 if (!capable(CAP_SYS_MODULE
) || modules_disabled
)
3123 * We try to place it in the list now to make sure it's unique before
3124 * we dedicate too many resources. In particular, temporary percpu
3125 * memory exhaustion.
3127 static int add_unformed_module(struct module
*mod
)
3132 mod
->state
= MODULE_STATE_UNFORMED
;
3135 mutex_lock(&module_mutex
);
3136 old
= find_module_all(mod
->name
, strlen(mod
->name
), true);
3138 if (old
->state
== MODULE_STATE_COMING
3139 || old
->state
== MODULE_STATE_UNFORMED
) {
3140 /* Wait in case it fails to load. */
3141 mutex_unlock(&module_mutex
);
3142 err
= wait_event_interruptible(module_wq
,
3143 finished_loading(mod
->name
));
3151 list_add_rcu(&mod
->list
, &modules
);
3155 mutex_unlock(&module_mutex
);
3160 static int complete_formation(struct module
*mod
, struct load_info
*info
)
3164 mutex_lock(&module_mutex
);
3166 /* Find duplicate symbols (must be called under lock). */
3167 err
= verify_export_symbols(mod
);
3171 /* This relies on module_mutex for list integrity. */
3172 module_bug_finalize(info
->hdr
, info
->sechdrs
, mod
);
3174 /* Set RO and NX regions for core */
3175 set_section_ro_nx(mod
->module_core
,
3176 mod
->core_text_size
,
3180 /* Set RO and NX regions for init */
3181 set_section_ro_nx(mod
->module_init
,
3182 mod
->init_text_size
,
3186 /* Mark state as coming so strong_try_module_get() ignores us,
3187 * but kallsyms etc. can see us. */
3188 mod
->state
= MODULE_STATE_COMING
;
3189 mutex_unlock(&module_mutex
);
3191 blocking_notifier_call_chain(&module_notify_list
,
3192 MODULE_STATE_COMING
, mod
);
3196 mutex_unlock(&module_mutex
);
3200 static int unknown_module_param_cb(char *param
, char *val
, const char *modname
)
3202 /* Check for magic 'dyndbg' arg */
3203 int ret
= ddebug_dyndbg_module_param_cb(param
, val
, modname
);
3205 pr_warn("%s: unknown parameter '%s' ignored\n", modname
, param
);
3209 /* Allocate and load the module: note that size of section 0 is always
3210 zero, and we rely on this for optional sections. */
3211 static int load_module(struct load_info
*info
, const char __user
*uargs
,
3218 err
= module_sig_check(info
);
3222 err
= elf_header_check(info
);
3226 /* Figure out module layout, and allocate all the memory. */
3227 mod
= layout_and_allocate(info
, flags
);
3233 /* Reserve our place in the list. */
3234 err
= add_unformed_module(mod
);
3238 #ifdef CONFIG_MODULE_SIG
3239 mod
->sig_ok
= info
->sig_ok
;
3241 pr_notice_once("%s: module verification failed: signature "
3242 "and/or required key missing - tainting "
3243 "kernel\n", mod
->name
);
3244 add_taint_module(mod
, TAINT_UNSIGNED_MODULE
, LOCKDEP_STILL_OK
);
3248 /* To avoid stressing percpu allocator, do this once we're unique. */
3249 err
= percpu_modalloc(mod
, info
);
3253 /* Now module is in final location, initialize linked lists, etc. */
3254 err
= module_unload_init(mod
);
3258 /* Now we've got everything in the final locations, we can
3259 * find optional sections. */
3260 err
= find_module_sections(mod
, info
);
3264 err
= check_module_license_and_versions(mod
);
3268 /* Set up MODINFO_ATTR fields */
3269 setup_modinfo(mod
, info
);
3271 /* Fix up syms, so that st_value is a pointer to location. */
3272 err
= simplify_symbols(mod
, info
);
3276 err
= apply_relocations(mod
, info
);
3280 err
= post_relocation(mod
, info
);
3284 flush_module_icache(mod
);
3286 /* Now copy in args */
3287 mod
->args
= strndup_user(uargs
, ~0UL >> 1);
3288 if (IS_ERR(mod
->args
)) {
3289 err
= PTR_ERR(mod
->args
);
3290 goto free_arch_cleanup
;
3293 dynamic_debug_setup(info
->debug
, info
->num_debug
);
3295 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3296 ftrace_module_init(mod
);
3298 /* Finally it's fully formed, ready to start executing. */
3299 err
= complete_formation(mod
, info
);
3301 goto ddebug_cleanup
;
3303 /* Module is ready to execute: parsing args may do that. */
3304 after_dashes
= parse_args(mod
->name
, mod
->args
, mod
->kp
, mod
->num_kp
,
3305 -32768, 32767, unknown_module_param_cb
);
3306 if (IS_ERR(after_dashes
)) {
3307 err
= PTR_ERR(after_dashes
);
3309 } else if (after_dashes
) {
3310 pr_warn("%s: parameters '%s' after `--' ignored\n",
3311 mod
->name
, after_dashes
);
3314 /* Link in to syfs. */
3315 err
= mod_sysfs_setup(mod
, info
, mod
->kp
, mod
->num_kp
);
3319 /* Get rid of temporary copy. */
3323 trace_module_load(mod
);
3325 return do_init_module(mod
);
3328 /* module_bug_cleanup needs module_mutex protection */
3329 mutex_lock(&module_mutex
);
3330 module_bug_cleanup(mod
);
3331 mutex_unlock(&module_mutex
);
3333 blocking_notifier_call_chain(&module_notify_list
,
3334 MODULE_STATE_GOING
, mod
);
3336 /* we can't deallocate the module until we clear memory protection */
3337 unset_module_init_ro_nx(mod
);
3338 unset_module_core_ro_nx(mod
);
3341 dynamic_debug_remove(info
->debug
);
3342 synchronize_sched();
3345 module_arch_cleanup(mod
);
3349 module_unload_free(mod
);
3351 mutex_lock(&module_mutex
);
3352 /* Unlink carefully: kallsyms could be walking list. */
3353 list_del_rcu(&mod
->list
);
3354 wake_up_all(&module_wq
);
3355 mutex_unlock(&module_mutex
);
3357 module_deallocate(mod
, info
);
3363 SYSCALL_DEFINE3(init_module
, void __user
*, umod
,
3364 unsigned long, len
, const char __user
*, uargs
)
3367 struct load_info info
= { };
3369 err
= may_init_module();
3373 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3376 err
= copy_module_from_user(umod
, len
, &info
);
3380 return load_module(&info
, uargs
, 0);
3383 SYSCALL_DEFINE3(finit_module
, int, fd
, const char __user
*, uargs
, int, flags
)
3386 struct load_info info
= { };
3388 err
= may_init_module();
3392 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd
, uargs
, flags
);
3394 if (flags
& ~(MODULE_INIT_IGNORE_MODVERSIONS
3395 |MODULE_INIT_IGNORE_VERMAGIC
))
3398 err
= copy_module_from_fd(fd
, &info
);
3402 return load_module(&info
, uargs
, flags
);
3405 static inline int within(unsigned long addr
, void *start
, unsigned long size
)
3407 return ((void *)addr
>= start
&& (void *)addr
< start
+ size
);
3410 #ifdef CONFIG_KALLSYMS
3412 * This ignores the intensely annoying "mapping symbols" found
3413 * in ARM ELF files: $a, $t and $d.
3415 static inline int is_arm_mapping_symbol(const char *str
)
3417 if (str
[0] == '.' && str
[1] == 'L')
3419 return str
[0] == '$' && strchr("axtd", str
[1])
3420 && (str
[2] == '\0' || str
[2] == '.');
3423 static const char *symname(struct mod_kallsyms
*kallsyms
, unsigned int symnum
)
3425 return kallsyms
->strtab
+ kallsyms
->symtab
[symnum
].st_name
;
3428 static const char *get_ksymbol(struct module
*mod
,
3430 unsigned long *size
,
3431 unsigned long *offset
)
3433 unsigned int i
, best
= 0;
3434 unsigned long nextval
;
3435 struct mod_kallsyms
*kallsyms
= rcu_dereference_sched(mod
->kallsyms
);
3437 /* At worse, next value is at end of module */
3438 if (within_module_init(addr
, mod
))
3439 nextval
= (unsigned long)mod
->module_init
+mod
->init_text_size
;
3441 nextval
= (unsigned long)mod
->module_core
+mod
->core_text_size
;
3443 /* Scan for closest preceding symbol, and next symbol. (ELF
3444 starts real symbols at 1). */
3445 for (i
= 1; i
< kallsyms
->num_symtab
; i
++) {
3446 if (kallsyms
->symtab
[i
].st_shndx
== SHN_UNDEF
)
3449 /* We ignore unnamed symbols: they're uninformative
3450 * and inserted at a whim. */
3451 if (*symname(kallsyms
, i
) == '\0'
3452 || is_arm_mapping_symbol(symname(kallsyms
, i
)))
3455 if (kallsyms
->symtab
[i
].st_value
<= addr
3456 && kallsyms
->symtab
[i
].st_value
> kallsyms
->symtab
[best
].st_value
)
3458 if (kallsyms
->symtab
[i
].st_value
> addr
3459 && kallsyms
->symtab
[i
].st_value
< nextval
)
3460 nextval
= kallsyms
->symtab
[i
].st_value
;
3467 *size
= nextval
- kallsyms
->symtab
[best
].st_value
;
3469 *offset
= addr
- kallsyms
->symtab
[best
].st_value
;
3470 return symname(kallsyms
, best
);
3473 /* For kallsyms to ask for address resolution. NULL means not found. Careful
3474 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3475 const char *module_address_lookup(unsigned long addr
,
3476 unsigned long *size
,
3477 unsigned long *offset
,
3482 const char *ret
= NULL
;
3485 list_for_each_entry_rcu(mod
, &modules
, list
) {
3486 if (mod
->state
== MODULE_STATE_UNFORMED
)
3488 if (within_module(addr
, mod
)) {
3490 *modname
= mod
->name
;
3491 ret
= get_ksymbol(mod
, addr
, size
, offset
);
3495 /* Make a copy in here where it's safe */
3497 strncpy(namebuf
, ret
, KSYM_NAME_LEN
- 1);
3504 int lookup_module_symbol_name(unsigned long addr
, char *symname
)
3509 list_for_each_entry_rcu(mod
, &modules
, list
) {
3510 if (mod
->state
== MODULE_STATE_UNFORMED
)
3512 if (within_module(addr
, mod
)) {
3515 sym
= get_ksymbol(mod
, addr
, NULL
, NULL
);
3518 strlcpy(symname
, sym
, KSYM_NAME_LEN
);
3528 int lookup_module_symbol_attrs(unsigned long addr
, unsigned long *size
,
3529 unsigned long *offset
, char *modname
, char *name
)
3534 list_for_each_entry_rcu(mod
, &modules
, list
) {
3535 if (mod
->state
== MODULE_STATE_UNFORMED
)
3537 if (within_module(addr
, mod
)) {
3540 sym
= get_ksymbol(mod
, addr
, size
, offset
);
3544 strlcpy(modname
, mod
->name
, MODULE_NAME_LEN
);
3546 strlcpy(name
, sym
, KSYM_NAME_LEN
);
3556 int module_get_kallsym(unsigned int symnum
, unsigned long *value
, char *type
,
3557 char *name
, char *module_name
, int *exported
)
3562 list_for_each_entry_rcu(mod
, &modules
, list
) {
3563 struct mod_kallsyms
*kallsyms
;
3565 if (mod
->state
== MODULE_STATE_UNFORMED
)
3567 kallsyms
= rcu_dereference_sched(mod
->kallsyms
);
3568 if (symnum
< kallsyms
->num_symtab
) {
3569 *value
= kallsyms
->symtab
[symnum
].st_value
;
3570 *type
= kallsyms
->symtab
[symnum
].st_info
;
3571 strlcpy(name
, symname(kallsyms
, symnum
), KSYM_NAME_LEN
);
3572 strlcpy(module_name
, mod
->name
, MODULE_NAME_LEN
);
3573 *exported
= is_exported(name
, *value
, mod
);
3577 symnum
-= kallsyms
->num_symtab
;
3583 static unsigned long mod_find_symname(struct module
*mod
, const char *name
)
3586 struct mod_kallsyms
*kallsyms
= rcu_dereference_sched(mod
->kallsyms
);
3588 for (i
= 0; i
< kallsyms
->num_symtab
; i
++)
3589 if (strcmp(name
, symname(kallsyms
, i
)) == 0 &&
3590 kallsyms
->symtab
[i
].st_info
!= 'U')
3591 return kallsyms
->symtab
[i
].st_value
;
3595 /* Look for this name: can be of form module:name. */
3596 unsigned long module_kallsyms_lookup_name(const char *name
)
3600 unsigned long ret
= 0;
3602 /* Don't lock: we're in enough trouble already. */
3604 if ((colon
= strchr(name
, ':')) != NULL
) {
3605 if ((mod
= find_module_all(name
, colon
- name
, false)) != NULL
)
3606 ret
= mod_find_symname(mod
, colon
+1);
3608 list_for_each_entry_rcu(mod
, &modules
, list
) {
3609 if (mod
->state
== MODULE_STATE_UNFORMED
)
3611 if ((ret
= mod_find_symname(mod
, name
)) != 0)
3619 int module_kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
3620 struct module
*, unsigned long),
3627 list_for_each_entry(mod
, &modules
, list
) {
3628 /* We hold module_mutex: no need for rcu_dereference_sched */
3629 struct mod_kallsyms
*kallsyms
= mod
->kallsyms
;
3631 if (mod
->state
== MODULE_STATE_UNFORMED
)
3633 for (i
= 0; i
< kallsyms
->num_symtab
; i
++) {
3634 ret
= fn(data
, symname(kallsyms
, i
),
3635 mod
, kallsyms
->symtab
[i
].st_value
);
3642 #endif /* CONFIG_KALLSYMS */
3644 static char *module_flags(struct module
*mod
, char *buf
)
3648 BUG_ON(mod
->state
== MODULE_STATE_UNFORMED
);
3650 mod
->state
== MODULE_STATE_GOING
||
3651 mod
->state
== MODULE_STATE_COMING
) {
3653 bx
+= module_flags_taint(mod
, buf
+ bx
);
3654 /* Show a - for module-is-being-unloaded */
3655 if (mod
->state
== MODULE_STATE_GOING
)
3657 /* Show a + for module-is-being-loaded */
3658 if (mod
->state
== MODULE_STATE_COMING
)
3667 #ifdef CONFIG_PROC_FS
3668 /* Called by the /proc file system to return a list of modules. */
3669 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
3671 mutex_lock(&module_mutex
);
3672 return seq_list_start(&modules
, *pos
);
3675 static void *m_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
3677 return seq_list_next(p
, &modules
, pos
);
3680 static void m_stop(struct seq_file
*m
, void *p
)
3682 mutex_unlock(&module_mutex
);
3685 static int m_show(struct seq_file
*m
, void *p
)
3687 struct module
*mod
= list_entry(p
, struct module
, list
);
3690 /* We always ignore unformed modules. */
3691 if (mod
->state
== MODULE_STATE_UNFORMED
)
3694 seq_printf(m
, "%s %u",
3695 mod
->name
, mod
->init_size
+ mod
->core_size
);
3696 print_unload_info(m
, mod
);
3698 /* Informative for users. */
3699 seq_printf(m
, " %s",
3700 mod
->state
== MODULE_STATE_GOING
? "Unloading":
3701 mod
->state
== MODULE_STATE_COMING
? "Loading":
3703 /* Used by oprofile and other similar tools. */
3704 seq_printf(m
, " 0x%pK", mod
->module_core
);
3708 seq_printf(m
, " %s", module_flags(mod
, buf
));
3710 seq_printf(m
, "\n");
3714 /* Format: modulename size refcount deps address
3716 Where refcount is a number or -, and deps is a comma-separated list
3719 static const struct seq_operations modules_op
= {
3726 static int modules_open(struct inode
*inode
, struct file
*file
)
3728 return seq_open(file
, &modules_op
);
3731 static const struct file_operations proc_modules_operations
= {
3732 .open
= modules_open
,
3734 .llseek
= seq_lseek
,
3735 .release
= seq_release
,
3738 static int __init
proc_modules_init(void)
3740 proc_create("modules", 0, NULL
, &proc_modules_operations
);
3743 module_init(proc_modules_init
);
3746 /* Given an address, look for it in the module exception tables. */
3747 const struct exception_table_entry
*search_module_extables(unsigned long addr
)
3749 const struct exception_table_entry
*e
= NULL
;
3753 list_for_each_entry_rcu(mod
, &modules
, list
) {
3754 if (mod
->state
== MODULE_STATE_UNFORMED
)
3756 if (mod
->num_exentries
== 0)
3759 e
= search_extable(mod
->extable
,
3760 mod
->extable
+ mod
->num_exentries
- 1,
3767 /* Now, if we found one, we are running inside it now, hence
3768 we cannot unload the module, hence no refcnt needed. */
3773 * is_module_address - is this address inside a module?
3774 * @addr: the address to check.
3776 * See is_module_text_address() if you simply want to see if the address
3777 * is code (not data).
3779 bool is_module_address(unsigned long addr
)
3784 ret
= __module_address(addr
) != NULL
;
3791 * __module_address - get the module which contains an address.
3792 * @addr: the address.
3794 * Must be called with preempt disabled or module mutex held so that
3795 * module doesn't get freed during this.
3797 struct module
*__module_address(unsigned long addr
)
3801 if (addr
< module_addr_min
|| addr
> module_addr_max
)
3804 list_for_each_entry_rcu(mod
, &modules
, list
) {
3805 if (mod
->state
== MODULE_STATE_UNFORMED
)
3807 if (within_module(addr
, mod
))
3812 EXPORT_SYMBOL_GPL(__module_address
);
3815 * is_module_text_address - is this address inside module code?
3816 * @addr: the address to check.
3818 * See is_module_address() if you simply want to see if the address is
3819 * anywhere in a module. See kernel_text_address() for testing if an
3820 * address corresponds to kernel or module code.
3822 bool is_module_text_address(unsigned long addr
)
3827 ret
= __module_text_address(addr
) != NULL
;
3834 * __module_text_address - get the module whose code contains an address.
3835 * @addr: the address.
3837 * Must be called with preempt disabled or module mutex held so that
3838 * module doesn't get freed during this.
3840 struct module
*__module_text_address(unsigned long addr
)
3842 struct module
*mod
= __module_address(addr
);
3844 /* Make sure it's within the text section. */
3845 if (!within(addr
, mod
->module_init
, mod
->init_text_size
)
3846 && !within(addr
, mod
->module_core
, mod
->core_text_size
))
3851 EXPORT_SYMBOL_GPL(__module_text_address
);
3853 /* Don't grab lock, we're oopsing. */
3854 void print_modules(void)
3859 printk(KERN_DEFAULT
"Modules linked in:");
3860 /* Most callers should already have preempt disabled, but make sure */
3862 list_for_each_entry_rcu(mod
, &modules
, list
) {
3863 if (mod
->state
== MODULE_STATE_UNFORMED
)
3865 pr_cont(" %s%s", mod
->name
, module_flags(mod
, buf
));
3868 if (last_unloaded_module
[0])
3869 pr_cont(" [last unloaded: %s]", last_unloaded_module
);
3873 #ifdef CONFIG_MODVERSIONS
3874 /* Generate the signature for all relevant module structures here.
3875 * If these change, we don't want to try to parse the module. */
3876 void module_layout(struct module
*mod
,
3877 struct modversion_info
*ver
,
3878 struct kernel_param
*kp
,
3879 struct kernel_symbol
*ks
,
3880 struct tracepoint
* const *tp
)
3883 EXPORT_SYMBOL(module_layout
);