1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2002 Richard Henderson
4 * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5 * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org>
8 #define INCLUDE_VERMAGIC
10 #include <linux/export.h>
11 #include <linux/extable.h>
12 #include <linux/moduleloader.h>
13 #include <linux/module_signature.h>
14 #include <linux/trace_events.h>
15 #include <linux/init.h>
16 #include <linux/kallsyms.h>
17 #include <linux/buildid.h>
19 #include <linux/kernel.h>
20 #include <linux/kernel_read_file.h>
21 #include <linux/kstrtox.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/elf.h>
25 #include <linux/seq_file.h>
26 #include <linux/syscalls.h>
27 #include <linux/fcntl.h>
28 #include <linux/rcupdate.h>
29 #include <linux/capability.h>
30 #include <linux/cpu.h>
31 #include <linux/moduleparam.h>
32 #include <linux/errno.h>
33 #include <linux/err.h>
34 #include <linux/vermagic.h>
35 #include <linux/notifier.h>
36 #include <linux/sched.h>
37 #include <linux/device.h>
38 #include <linux/string.h>
39 #include <linux/mutex.h>
40 #include <linux/rculist.h>
41 #include <linux/uaccess.h>
42 #include <asm/cacheflush.h>
43 #include <linux/set_memory.h>
44 #include <asm/mmu_context.h>
45 #include <linux/license.h>
46 #include <asm/sections.h>
47 #include <linux/tracepoint.h>
48 #include <linux/ftrace.h>
49 #include <linux/livepatch.h>
50 #include <linux/async.h>
51 #include <linux/percpu.h>
52 #include <linux/kmemleak.h>
53 #include <linux/jump_label.h>
54 #include <linux/pfn.h>
55 #include <linux/bsearch.h>
56 #include <linux/dynamic_debug.h>
57 #include <linux/audit.h>
58 #include <linux/cfi.h>
59 #include <linux/codetag.h>
60 #include <linux/debugfs.h>
61 #include <linux/execmem.h>
62 #include <uapi/linux/module.h>
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/module.h>
70 * 1) List of modules (also safely readable with preempt_disable),
71 * 2) module_use links,
72 * 3) mod_tree.addr_min/mod_tree.addr_max.
73 * (delete and add uses RCU list operations).
75 DEFINE_MUTEX(module_mutex
);
78 /* Work queue for freeing init sections in success case */
79 static void do_free_init(struct work_struct
*w
);
80 static DECLARE_WORK(init_free_wq
, do_free_init
);
81 static LLIST_HEAD(init_free_list
);
83 struct mod_tree_root mod_tree __cacheline_aligned
= {
88 const struct kernel_symbol
*start
, *stop
;
90 enum mod_license license
;
94 * Bounds of module memory, for speeding up __module_address.
95 * Protected by module_mutex.
97 static void __mod_update_bounds(enum mod_mem_type type __maybe_unused
, void *base
,
98 unsigned int size
, struct mod_tree_root
*tree
)
100 unsigned long min
= (unsigned long)base
;
101 unsigned long max
= min
+ size
;
103 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
104 if (mod_mem_type_is_core_data(type
)) {
105 if (min
< tree
->data_addr_min
)
106 tree
->data_addr_min
= min
;
107 if (max
> tree
->data_addr_max
)
108 tree
->data_addr_max
= max
;
112 if (min
< tree
->addr_min
)
113 tree
->addr_min
= min
;
114 if (max
> tree
->addr_max
)
115 tree
->addr_max
= max
;
118 static void mod_update_bounds(struct module
*mod
)
120 for_each_mod_mem_type(type
) {
121 struct module_memory
*mod_mem
= &mod
->mem
[type
];
124 __mod_update_bounds(type
, mod_mem
->base
, mod_mem
->size
, &mod_tree
);
128 /* Block module loading/unloading? */
129 int modules_disabled
;
130 core_param(nomodule
, modules_disabled
, bint
, 0);
132 /* Waiting for a module to finish initializing? */
133 static DECLARE_WAIT_QUEUE_HEAD(module_wq
);
135 static BLOCKING_NOTIFIER_HEAD(module_notify_list
);
137 int register_module_notifier(struct notifier_block
*nb
)
139 return blocking_notifier_chain_register(&module_notify_list
, nb
);
141 EXPORT_SYMBOL(register_module_notifier
);
143 int unregister_module_notifier(struct notifier_block
*nb
)
145 return blocking_notifier_chain_unregister(&module_notify_list
, nb
);
147 EXPORT_SYMBOL(unregister_module_notifier
);
150 * We require a truly strong try_module_get(): 0 means success.
151 * Otherwise an error is returned due to ongoing or failed
152 * initialization etc.
154 static inline int strong_try_module_get(struct module
*mod
)
156 BUG_ON(mod
&& mod
->state
== MODULE_STATE_UNFORMED
);
157 if (mod
&& mod
->state
== MODULE_STATE_COMING
)
159 if (try_module_get(mod
))
165 static inline void add_taint_module(struct module
*mod
, unsigned flag
,
166 enum lockdep_ok lockdep_ok
)
168 add_taint(flag
, lockdep_ok
);
169 set_bit(flag
, &mod
->taints
);
173 * A thread that wants to hold a reference to a module only while it
174 * is running can call this to safely exit.
176 void __noreturn
__module_put_and_kthread_exit(struct module
*mod
, long code
)
181 EXPORT_SYMBOL(__module_put_and_kthread_exit
);
183 /* Find a module section: 0 means not found. */
184 static unsigned int find_sec(const struct load_info
*info
, const char *name
)
188 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
189 Elf_Shdr
*shdr
= &info
->sechdrs
[i
];
190 /* Alloc bit cleared means "ignore it." */
191 if ((shdr
->sh_flags
& SHF_ALLOC
)
192 && strcmp(info
->secstrings
+ shdr
->sh_name
, name
) == 0)
199 * find_any_unique_sec() - Find a unique section index by name
200 * @info: Load info for the module to scan
201 * @name: Name of the section we're looking for
203 * Locates a unique section by name. Ignores SHF_ALLOC.
205 * Return: Section index if found uniquely, zero if absent, negative count
206 * of total instances if multiple were found.
208 static int find_any_unique_sec(const struct load_info
*info
, const char *name
)
211 unsigned int count
= 0;
214 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
215 if (strcmp(info
->secstrings
+ info
->sechdrs
[i
].sh_name
,
223 } else if (count
== 0) {
230 /* Find a module section, or NULL. */
231 static void *section_addr(const struct load_info
*info
, const char *name
)
233 /* Section 0 has sh_addr 0. */
234 return (void *)info
->sechdrs
[find_sec(info
, name
)].sh_addr
;
237 /* Find a module section, or NULL. Fill in number of "objects" in section. */
238 static void *section_objs(const struct load_info
*info
,
243 unsigned int sec
= find_sec(info
, name
);
245 /* Section 0 has sh_addr 0 and sh_size 0. */
246 *num
= info
->sechdrs
[sec
].sh_size
/ object_size
;
247 return (void *)info
->sechdrs
[sec
].sh_addr
;
250 /* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */
251 static unsigned int find_any_sec(const struct load_info
*info
, const char *name
)
255 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
256 Elf_Shdr
*shdr
= &info
->sechdrs
[i
];
257 if (strcmp(info
->secstrings
+ shdr
->sh_name
, name
) == 0)
264 * Find a module section, or NULL. Fill in number of "objects" in section.
265 * Ignores SHF_ALLOC flag.
267 static __maybe_unused
void *any_section_objs(const struct load_info
*info
,
272 unsigned int sec
= find_any_sec(info
, name
);
274 /* Section 0 has sh_addr 0 and sh_size 0. */
275 *num
= info
->sechdrs
[sec
].sh_size
/ object_size
;
276 return (void *)info
->sechdrs
[sec
].sh_addr
;
279 #ifndef CONFIG_MODVERSIONS
280 #define symversion(base, idx) NULL
282 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
285 static const char *kernel_symbol_name(const struct kernel_symbol
*sym
)
287 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
288 return offset_to_ptr(&sym
->name_offset
);
294 static const char *kernel_symbol_namespace(const struct kernel_symbol
*sym
)
296 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
297 if (!sym
->namespace_offset
)
299 return offset_to_ptr(&sym
->namespace_offset
);
301 return sym
->namespace;
305 int cmp_name(const void *name
, const void *sym
)
307 return strcmp(name
, kernel_symbol_name(sym
));
310 static bool find_exported_symbol_in_section(const struct symsearch
*syms
,
311 struct module
*owner
,
312 struct find_symbol_arg
*fsa
)
314 struct kernel_symbol
*sym
;
316 if (!fsa
->gplok
&& syms
->license
== GPL_ONLY
)
319 sym
= bsearch(fsa
->name
, syms
->start
, syms
->stop
- syms
->start
,
320 sizeof(struct kernel_symbol
), cmp_name
);
325 fsa
->crc
= symversion(syms
->crcs
, sym
- syms
->start
);
327 fsa
->license
= syms
->license
;
333 * Find an exported symbol and return it, along with, (optional) crc and
334 * (optional) module which owns it. Needs preempt disabled or module_mutex.
336 bool find_symbol(struct find_symbol_arg
*fsa
)
338 static const struct symsearch arr
[] = {
339 { __start___ksymtab
, __stop___ksymtab
, __start___kcrctab
,
341 { __start___ksymtab_gpl
, __stop___ksymtab_gpl
,
342 __start___kcrctab_gpl
,
348 module_assert_mutex_or_preempt();
350 for (i
= 0; i
< ARRAY_SIZE(arr
); i
++)
351 if (find_exported_symbol_in_section(&arr
[i
], NULL
, fsa
))
354 list_for_each_entry_rcu(mod
, &modules
, list
,
355 lockdep_is_held(&module_mutex
)) {
356 struct symsearch arr
[] = {
357 { mod
->syms
, mod
->syms
+ mod
->num_syms
, mod
->crcs
,
359 { mod
->gpl_syms
, mod
->gpl_syms
+ mod
->num_gpl_syms
,
364 if (mod
->state
== MODULE_STATE_UNFORMED
)
367 for (i
= 0; i
< ARRAY_SIZE(arr
); i
++)
368 if (find_exported_symbol_in_section(&arr
[i
], mod
, fsa
))
372 pr_debug("Failed to find symbol %s\n", fsa
->name
);
377 * Search for module by name: must hold module_mutex (or preempt disabled
378 * for read-only access).
380 struct module
*find_module_all(const char *name
, size_t len
,
385 module_assert_mutex_or_preempt();
387 list_for_each_entry_rcu(mod
, &modules
, list
,
388 lockdep_is_held(&module_mutex
)) {
389 if (!even_unformed
&& mod
->state
== MODULE_STATE_UNFORMED
)
391 if (strlen(mod
->name
) == len
&& !memcmp(mod
->name
, name
, len
))
397 struct module
*find_module(const char *name
)
399 return find_module_all(name
, strlen(name
), false);
404 static inline void __percpu
*mod_percpu(struct module
*mod
)
409 static int percpu_modalloc(struct module
*mod
, struct load_info
*info
)
411 Elf_Shdr
*pcpusec
= &info
->sechdrs
[info
->index
.pcpu
];
412 unsigned long align
= pcpusec
->sh_addralign
;
414 if (!pcpusec
->sh_size
)
417 if (align
> PAGE_SIZE
) {
418 pr_warn("%s: per-cpu alignment %li > %li\n",
419 mod
->name
, align
, PAGE_SIZE
);
423 mod
->percpu
= __alloc_reserved_percpu(pcpusec
->sh_size
, align
);
425 pr_warn("%s: Could not allocate %lu bytes percpu data\n",
426 mod
->name
, (unsigned long)pcpusec
->sh_size
);
429 mod
->percpu_size
= pcpusec
->sh_size
;
433 static void percpu_modfree(struct module
*mod
)
435 free_percpu(mod
->percpu
);
438 static unsigned int find_pcpusec(struct load_info
*info
)
440 return find_sec(info
, ".data..percpu");
443 static void percpu_modcopy(struct module
*mod
,
444 const void *from
, unsigned long size
)
448 for_each_possible_cpu(cpu
)
449 memcpy(per_cpu_ptr(mod
->percpu
, cpu
), from
, size
);
452 bool __is_module_percpu_address(unsigned long addr
, unsigned long *can_addr
)
459 list_for_each_entry_rcu(mod
, &modules
, list
) {
460 if (mod
->state
== MODULE_STATE_UNFORMED
)
462 if (!mod
->percpu_size
)
464 for_each_possible_cpu(cpu
) {
465 void *start
= per_cpu_ptr(mod
->percpu
, cpu
);
466 void *va
= (void *)addr
;
468 if (va
>= start
&& va
< start
+ mod
->percpu_size
) {
470 *can_addr
= (unsigned long) (va
- start
);
471 *can_addr
+= (unsigned long)
472 per_cpu_ptr(mod
->percpu
,
486 * is_module_percpu_address() - test whether address is from module static percpu
487 * @addr: address to test
489 * Test whether @addr belongs to module static percpu area.
491 * Return: %true if @addr is from module static percpu area
493 bool is_module_percpu_address(unsigned long addr
)
495 return __is_module_percpu_address(addr
, NULL
);
498 #else /* ... !CONFIG_SMP */
500 static inline void __percpu
*mod_percpu(struct module
*mod
)
504 static int percpu_modalloc(struct module
*mod
, struct load_info
*info
)
506 /* UP modules shouldn't have this section: ENOMEM isn't quite right */
507 if (info
->sechdrs
[info
->index
.pcpu
].sh_size
!= 0)
511 static inline void percpu_modfree(struct module
*mod
)
514 static unsigned int find_pcpusec(struct load_info
*info
)
518 static inline void percpu_modcopy(struct module
*mod
,
519 const void *from
, unsigned long size
)
521 /* pcpusec should be 0, and size of that section should be 0. */
524 bool is_module_percpu_address(unsigned long addr
)
529 bool __is_module_percpu_address(unsigned long addr
, unsigned long *can_addr
)
534 #endif /* CONFIG_SMP */
536 #define MODINFO_ATTR(field) \
537 static void setup_modinfo_##field(struct module *mod, const char *s) \
539 mod->field = kstrdup(s, GFP_KERNEL); \
541 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
542 struct module_kobject *mk, char *buffer) \
544 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
546 static int modinfo_##field##_exists(struct module *mod) \
548 return mod->field != NULL; \
550 static void free_modinfo_##field(struct module *mod) \
555 static struct module_attribute modinfo_##field = { \
556 .attr = { .name = __stringify(field), .mode = 0444 }, \
557 .show = show_modinfo_##field, \
558 .setup = setup_modinfo_##field, \
559 .test = modinfo_##field##_exists, \
560 .free = free_modinfo_##field, \
563 MODINFO_ATTR(version
);
564 MODINFO_ATTR(srcversion
);
567 char name
[MODULE_NAME_LEN
+ 1];
568 char taints
[MODULE_FLAGS_BUF_SIZE
];
569 } last_unloaded_module
;
571 #ifdef CONFIG_MODULE_UNLOAD
573 EXPORT_TRACEPOINT_SYMBOL(module_get
);
575 /* MODULE_REF_BASE is the base reference count by kmodule loader. */
576 #define MODULE_REF_BASE 1
578 /* Init the unload section of the module. */
579 static int module_unload_init(struct module
*mod
)
582 * Initialize reference counter to MODULE_REF_BASE.
583 * refcnt == 0 means module is going.
585 atomic_set(&mod
->refcnt
, MODULE_REF_BASE
);
587 INIT_LIST_HEAD(&mod
->source_list
);
588 INIT_LIST_HEAD(&mod
->target_list
);
590 /* Hold reference count during initialization. */
591 atomic_inc(&mod
->refcnt
);
596 /* Does a already use b? */
597 static int already_uses(struct module
*a
, struct module
*b
)
599 struct module_use
*use
;
601 list_for_each_entry(use
, &b
->source_list
, source_list
) {
602 if (use
->source
== a
)
605 pr_debug("%s does not use %s!\n", a
->name
, b
->name
);
611 * - we add 'a' as a "source", 'b' as a "target" of module use
612 * - the module_use is added to the list of 'b' sources (so
613 * 'b' can walk the list to see who sourced them), and of 'a'
614 * targets (so 'a' can see what modules it targets).
616 static int add_module_usage(struct module
*a
, struct module
*b
)
618 struct module_use
*use
;
620 pr_debug("Allocating new usage for %s.\n", a
->name
);
621 use
= kmalloc(sizeof(*use
), GFP_ATOMIC
);
627 list_add(&use
->source_list
, &b
->source_list
);
628 list_add(&use
->target_list
, &a
->target_list
);
632 /* Module a uses b: caller needs module_mutex() */
633 static int ref_module(struct module
*a
, struct module
*b
)
637 if (b
== NULL
|| already_uses(a
, b
))
640 /* If module isn't available, we fail. */
641 err
= strong_try_module_get(b
);
645 err
= add_module_usage(a
, b
);
653 /* Clear the unload stuff of the module. */
654 static void module_unload_free(struct module
*mod
)
656 struct module_use
*use
, *tmp
;
658 mutex_lock(&module_mutex
);
659 list_for_each_entry_safe(use
, tmp
, &mod
->target_list
, target_list
) {
660 struct module
*i
= use
->target
;
661 pr_debug("%s unusing %s\n", mod
->name
, i
->name
);
663 list_del(&use
->source_list
);
664 list_del(&use
->target_list
);
667 mutex_unlock(&module_mutex
);
670 #ifdef CONFIG_MODULE_FORCE_UNLOAD
671 static inline int try_force_unload(unsigned int flags
)
673 int ret
= (flags
& O_TRUNC
);
675 add_taint(TAINT_FORCED_RMMOD
, LOCKDEP_NOW_UNRELIABLE
);
679 static inline int try_force_unload(unsigned int flags
)
683 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
685 /* Try to release refcount of module, 0 means success. */
686 static int try_release_module_ref(struct module
*mod
)
690 /* Try to decrement refcnt which we set at loading */
691 ret
= atomic_sub_return(MODULE_REF_BASE
, &mod
->refcnt
);
694 /* Someone can put this right now, recover with checking */
695 ret
= atomic_add_unless(&mod
->refcnt
, MODULE_REF_BASE
, 0);
700 static int try_stop_module(struct module
*mod
, int flags
, int *forced
)
702 /* If it's not unused, quit unless we're forcing. */
703 if (try_release_module_ref(mod
) != 0) {
704 *forced
= try_force_unload(flags
);
709 /* Mark it as dying. */
710 mod
->state
= MODULE_STATE_GOING
;
716 * module_refcount() - return the refcount or -1 if unloading
717 * @mod: the module we're checking
720 * -1 if the module is in the process of unloading
721 * otherwise the number of references in the kernel to the module
723 int module_refcount(struct module
*mod
)
725 return atomic_read(&mod
->refcnt
) - MODULE_REF_BASE
;
727 EXPORT_SYMBOL(module_refcount
);
729 /* This exists whether we can unload or not */
730 static void free_module(struct module
*mod
);
732 SYSCALL_DEFINE2(delete_module
, const char __user
*, name_user
,
736 char name
[MODULE_NAME_LEN
];
737 char buf
[MODULE_FLAGS_BUF_SIZE
];
740 if (!capable(CAP_SYS_MODULE
) || modules_disabled
)
743 if (strncpy_from_user(name
, name_user
, MODULE_NAME_LEN
-1) < 0)
745 name
[MODULE_NAME_LEN
-1] = '\0';
747 audit_log_kern_module(name
);
749 if (mutex_lock_interruptible(&module_mutex
) != 0)
752 mod
= find_module(name
);
758 if (!list_empty(&mod
->source_list
)) {
759 /* Other modules depend on us: get rid of them first. */
764 /* Doing init or already dying? */
765 if (mod
->state
!= MODULE_STATE_LIVE
) {
766 /* FIXME: if (force), slam module count damn the torpedoes */
767 pr_debug("%s already dying\n", mod
->name
);
772 /* If it has an init func, it must have an exit func to unload */
773 if (mod
->init
&& !mod
->exit
) {
774 forced
= try_force_unload(flags
);
776 /* This module can't be removed */
782 ret
= try_stop_module(mod
, flags
, &forced
);
786 mutex_unlock(&module_mutex
);
787 /* Final destruction now no one is using it. */
788 if (mod
->exit
!= NULL
)
790 blocking_notifier_call_chain(&module_notify_list
,
791 MODULE_STATE_GOING
, mod
);
792 klp_module_going(mod
);
793 ftrace_release_mod(mod
);
795 async_synchronize_full();
797 /* Store the name and taints of the last unloaded module for diagnostic purposes */
798 strscpy(last_unloaded_module
.name
, mod
->name
, sizeof(last_unloaded_module
.name
));
799 strscpy(last_unloaded_module
.taints
, module_flags(mod
, buf
, false), sizeof(last_unloaded_module
.taints
));
802 /* someone could wait for the module in add_unformed_module() */
803 wake_up_all(&module_wq
);
806 mutex_unlock(&module_mutex
);
810 void __symbol_put(const char *symbol
)
812 struct find_symbol_arg fsa
= {
818 BUG_ON(!find_symbol(&fsa
));
819 module_put(fsa
.owner
);
822 EXPORT_SYMBOL(__symbol_put
);
824 /* Note this assumes addr is a function, which it currently always is. */
825 void symbol_put_addr(void *addr
)
827 struct module
*modaddr
;
828 unsigned long a
= (unsigned long)dereference_function_descriptor(addr
);
830 if (core_kernel_text(a
))
834 * Even though we hold a reference on the module; we still need to
835 * disable preemption in order to safely traverse the data structure.
838 modaddr
= __module_text_address(a
);
843 EXPORT_SYMBOL_GPL(symbol_put_addr
);
845 static ssize_t
show_refcnt(struct module_attribute
*mattr
,
846 struct module_kobject
*mk
, char *buffer
)
848 return sprintf(buffer
, "%i\n", module_refcount(mk
->mod
));
851 static struct module_attribute modinfo_refcnt
=
852 __ATTR(refcnt
, 0444, show_refcnt
, NULL
);
854 void __module_get(struct module
*module
)
857 atomic_inc(&module
->refcnt
);
858 trace_module_get(module
, _RET_IP_
);
861 EXPORT_SYMBOL(__module_get
);
863 bool try_module_get(struct module
*module
)
868 /* Note: here, we can fail to get a reference */
869 if (likely(module_is_live(module
) &&
870 atomic_inc_not_zero(&module
->refcnt
) != 0))
871 trace_module_get(module
, _RET_IP_
);
877 EXPORT_SYMBOL(try_module_get
);
879 void module_put(struct module
*module
)
884 ret
= atomic_dec_if_positive(&module
->refcnt
);
885 WARN_ON(ret
< 0); /* Failed to put refcount */
886 trace_module_put(module
, _RET_IP_
);
889 EXPORT_SYMBOL(module_put
);
891 #else /* !CONFIG_MODULE_UNLOAD */
892 static inline void module_unload_free(struct module
*mod
)
896 static int ref_module(struct module
*a
, struct module
*b
)
898 return strong_try_module_get(b
);
901 static inline int module_unload_init(struct module
*mod
)
905 #endif /* CONFIG_MODULE_UNLOAD */
907 size_t module_flags_taint(unsigned long taints
, char *buf
)
912 for (i
= 0; i
< TAINT_FLAGS_COUNT
; i
++) {
913 if (taint_flags
[i
].module
&& test_bit(i
, &taints
))
914 buf
[l
++] = taint_flags
[i
].c_true
;
920 static ssize_t
show_initstate(struct module_attribute
*mattr
,
921 struct module_kobject
*mk
, char *buffer
)
923 const char *state
= "unknown";
925 switch (mk
->mod
->state
) {
926 case MODULE_STATE_LIVE
:
929 case MODULE_STATE_COMING
:
932 case MODULE_STATE_GOING
:
938 return sprintf(buffer
, "%s\n", state
);
941 static struct module_attribute modinfo_initstate
=
942 __ATTR(initstate
, 0444, show_initstate
, NULL
);
944 static ssize_t
store_uevent(struct module_attribute
*mattr
,
945 struct module_kobject
*mk
,
946 const char *buffer
, size_t count
)
950 rc
= kobject_synth_uevent(&mk
->kobj
, buffer
, count
);
951 return rc
? rc
: count
;
954 struct module_attribute module_uevent
=
955 __ATTR(uevent
, 0200, NULL
, store_uevent
);
957 static ssize_t
show_coresize(struct module_attribute
*mattr
,
958 struct module_kobject
*mk
, char *buffer
)
960 unsigned int size
= mk
->mod
->mem
[MOD_TEXT
].size
;
962 if (!IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
)) {
963 for_class_mod_mem_type(type
, core_data
)
964 size
+= mk
->mod
->mem
[type
].size
;
966 return sprintf(buffer
, "%u\n", size
);
969 static struct module_attribute modinfo_coresize
=
970 __ATTR(coresize
, 0444, show_coresize
, NULL
);
972 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
973 static ssize_t
show_datasize(struct module_attribute
*mattr
,
974 struct module_kobject
*mk
, char *buffer
)
976 unsigned int size
= 0;
978 for_class_mod_mem_type(type
, core_data
)
979 size
+= mk
->mod
->mem
[type
].size
;
980 return sprintf(buffer
, "%u\n", size
);
983 static struct module_attribute modinfo_datasize
=
984 __ATTR(datasize
, 0444, show_datasize
, NULL
);
987 static ssize_t
show_initsize(struct module_attribute
*mattr
,
988 struct module_kobject
*mk
, char *buffer
)
990 unsigned int size
= 0;
992 for_class_mod_mem_type(type
, init
)
993 size
+= mk
->mod
->mem
[type
].size
;
994 return sprintf(buffer
, "%u\n", size
);
997 static struct module_attribute modinfo_initsize
=
998 __ATTR(initsize
, 0444, show_initsize
, NULL
);
1000 static ssize_t
show_taint(struct module_attribute
*mattr
,
1001 struct module_kobject
*mk
, char *buffer
)
1005 l
= module_flags_taint(mk
->mod
->taints
, buffer
);
1010 static struct module_attribute modinfo_taint
=
1011 __ATTR(taint
, 0444, show_taint
, NULL
);
1013 struct module_attribute
*modinfo_attrs
[] = {
1016 &modinfo_srcversion
,
1019 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
1024 #ifdef CONFIG_MODULE_UNLOAD
1030 size_t modinfo_attrs_count
= ARRAY_SIZE(modinfo_attrs
);
1032 static const char vermagic
[] = VERMAGIC_STRING
;
1034 int try_to_force_load(struct module
*mod
, const char *reason
)
1036 #ifdef CONFIG_MODULE_FORCE_LOAD
1037 if (!test_taint(TAINT_FORCED_MODULE
))
1038 pr_warn("%s: %s: kernel tainted.\n", mod
->name
, reason
);
1039 add_taint_module(mod
, TAINT_FORCED_MODULE
, LOCKDEP_NOW_UNRELIABLE
);
1046 /* Parse tag=value strings from .modinfo section */
1047 char *module_next_tag_pair(char *string
, unsigned long *secsize
)
1049 /* Skip non-zero chars */
1052 if ((*secsize
)-- <= 1)
1056 /* Skip any zero padding. */
1057 while (!string
[0]) {
1059 if ((*secsize
)-- <= 1)
1065 static char *get_next_modinfo(const struct load_info
*info
, const char *tag
,
1069 unsigned int taglen
= strlen(tag
);
1070 Elf_Shdr
*infosec
= &info
->sechdrs
[info
->index
.info
];
1071 unsigned long size
= infosec
->sh_size
;
1074 * get_modinfo() calls made before rewrite_section_headers()
1075 * must use sh_offset, as sh_addr isn't set!
1077 char *modinfo
= (char *)info
->hdr
+ infosec
->sh_offset
;
1080 size
-= prev
- modinfo
;
1081 modinfo
= module_next_tag_pair(prev
, &size
);
1084 for (p
= modinfo
; p
; p
= module_next_tag_pair(p
, &size
)) {
1085 if (strncmp(p
, tag
, taglen
) == 0 && p
[taglen
] == '=')
1086 return p
+ taglen
+ 1;
1091 static char *get_modinfo(const struct load_info
*info
, const char *tag
)
1093 return get_next_modinfo(info
, tag
, NULL
);
1096 static int verify_namespace_is_imported(const struct load_info
*info
,
1097 const struct kernel_symbol
*sym
,
1100 const char *namespace;
1101 char *imported_namespace
;
1103 namespace = kernel_symbol_namespace(sym
);
1104 if (namespace && namespace[0]) {
1105 for_each_modinfo_entry(imported_namespace
, info
, "import_ns") {
1106 if (strcmp(namespace, imported_namespace
) == 0)
1109 #ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
1114 "%s: module uses symbol (%s) from namespace %s, but does not import it.\n",
1115 mod
->name
, kernel_symbol_name(sym
), namespace);
1116 #ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
1123 static bool inherit_taint(struct module
*mod
, struct module
*owner
, const char *name
)
1125 if (!owner
|| !test_bit(TAINT_PROPRIETARY_MODULE
, &owner
->taints
))
1128 if (mod
->using_gplonly_symbols
) {
1129 pr_err("%s: module using GPL-only symbols uses symbols %s from proprietary module %s.\n",
1130 mod
->name
, name
, owner
->name
);
1134 if (!test_bit(TAINT_PROPRIETARY_MODULE
, &mod
->taints
)) {
1135 pr_warn("%s: module uses symbols %s from proprietary module %s, inheriting taint.\n",
1136 mod
->name
, name
, owner
->name
);
1137 set_bit(TAINT_PROPRIETARY_MODULE
, &mod
->taints
);
1142 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1143 static const struct kernel_symbol
*resolve_symbol(struct module
*mod
,
1144 const struct load_info
*info
,
1148 struct find_symbol_arg fsa
= {
1150 .gplok
= !(mod
->taints
& (1 << TAINT_PROPRIETARY_MODULE
)),
1156 * The module_mutex should not be a heavily contended lock;
1157 * if we get the occasional sleep here, we'll go an extra iteration
1158 * in the wait_event_interruptible(), which is harmless.
1160 sched_annotate_sleep();
1161 mutex_lock(&module_mutex
);
1162 if (!find_symbol(&fsa
))
1165 if (fsa
.license
== GPL_ONLY
)
1166 mod
->using_gplonly_symbols
= true;
1168 if (!inherit_taint(mod
, fsa
.owner
, name
)) {
1173 if (!check_version(info
, name
, mod
, fsa
.crc
)) {
1174 fsa
.sym
= ERR_PTR(-EINVAL
);
1178 err
= verify_namespace_is_imported(info
, fsa
.sym
, mod
);
1180 fsa
.sym
= ERR_PTR(err
);
1184 err
= ref_module(mod
, fsa
.owner
);
1186 fsa
.sym
= ERR_PTR(err
);
1191 /* We must make copy under the lock if we failed to get ref. */
1192 strncpy(ownername
, module_name(fsa
.owner
), MODULE_NAME_LEN
);
1194 mutex_unlock(&module_mutex
);
1198 static const struct kernel_symbol
*
1199 resolve_symbol_wait(struct module
*mod
,
1200 const struct load_info
*info
,
1203 const struct kernel_symbol
*ksym
;
1204 char owner
[MODULE_NAME_LEN
];
1206 if (wait_event_interruptible_timeout(module_wq
,
1207 !IS_ERR(ksym
= resolve_symbol(mod
, info
, name
, owner
))
1208 || PTR_ERR(ksym
) != -EBUSY
,
1210 pr_warn("%s: gave up waiting for init of module %s.\n",
1216 void __weak
module_arch_cleanup(struct module
*mod
)
1220 void __weak
module_arch_freeing_init(struct module
*mod
)
1224 void *__module_writable_address(struct module
*mod
, void *loc
)
1226 for_class_mod_mem_type(type
, text
) {
1227 struct module_memory
*mem
= &mod
->mem
[type
];
1229 if (loc
>= mem
->base
&& loc
< mem
->base
+ mem
->size
)
1230 return loc
+ (mem
->rw_copy
- mem
->base
);
1236 static int module_memory_alloc(struct module
*mod
, enum mod_mem_type type
)
1238 unsigned int size
= PAGE_ALIGN(mod
->mem
[type
].size
);
1239 enum execmem_type execmem_type
;
1242 mod
->mem
[type
].size
= size
;
1244 if (mod_mem_type_is_data(type
))
1245 execmem_type
= EXECMEM_MODULE_DATA
;
1247 execmem_type
= EXECMEM_MODULE_TEXT
;
1249 ptr
= execmem_alloc(execmem_type
, size
);
1253 mod
->mem
[type
].base
= ptr
;
1255 if (execmem_is_rox(execmem_type
)) {
1256 ptr
= vzalloc(size
);
1259 execmem_free(mod
->mem
[type
].base
);
1263 mod
->mem
[type
].rw_copy
= ptr
;
1264 mod
->mem
[type
].is_rox
= true;
1266 mod
->mem
[type
].rw_copy
= mod
->mem
[type
].base
;
1267 memset(mod
->mem
[type
].base
, 0, size
);
1271 * The pointer to these blocks of memory are stored on the module
1272 * structure and we keep that around so long as the module is
1273 * around. We only free that memory when we unload the module.
1274 * Just mark them as not being a leak then. The .init* ELF
1275 * sections *do* get freed after boot so we *could* treat them
1276 * slightly differently with kmemleak_ignore() and only grey
1277 * them out as they work as typical memory allocations which
1278 * *do* eventually get freed, but let's just keep things simple
1279 * and avoid *any* false positives.
1281 kmemleak_not_leak(ptr
);
1286 static void module_memory_free(struct module
*mod
, enum mod_mem_type type
)
1288 struct module_memory
*mem
= &mod
->mem
[type
];
1291 vfree(mem
->rw_copy
);
1293 execmem_free(mem
->base
);
1296 static void free_mod_mem(struct module
*mod
)
1298 for_each_mod_mem_type(type
) {
1299 struct module_memory
*mod_mem
= &mod
->mem
[type
];
1301 if (type
== MOD_DATA
)
1304 /* Free lock-classes; relies on the preceding sync_rcu(). */
1305 lockdep_free_key_range(mod_mem
->base
, mod_mem
->size
);
1307 module_memory_free(mod
, type
);
1310 /* MOD_DATA hosts mod, so free it at last */
1311 lockdep_free_key_range(mod
->mem
[MOD_DATA
].base
, mod
->mem
[MOD_DATA
].size
);
1312 module_memory_free(mod
, MOD_DATA
);
1315 /* Free a module, remove from lists, etc. */
1316 static void free_module(struct module
*mod
)
1318 trace_module_free(mod
);
1320 codetag_unload_module(mod
);
1322 mod_sysfs_teardown(mod
);
1325 * We leave it in list to prevent duplicate loads, but make sure
1326 * that noone uses it while it's being deconstructed.
1328 mutex_lock(&module_mutex
);
1329 mod
->state
= MODULE_STATE_UNFORMED
;
1330 mutex_unlock(&module_mutex
);
1332 /* Arch-specific cleanup. */
1333 module_arch_cleanup(mod
);
1335 /* Module unload stuff */
1336 module_unload_free(mod
);
1338 /* Free any allocated parameters. */
1339 destroy_params(mod
->kp
, mod
->num_kp
);
1341 if (is_livepatch_module(mod
))
1342 free_module_elf(mod
);
1344 /* Now we can delete it from the lists */
1345 mutex_lock(&module_mutex
);
1346 /* Unlink carefully: kallsyms could be walking list. */
1347 list_del_rcu(&mod
->list
);
1348 mod_tree_remove(mod
);
1349 /* Remove this module from bug list, this uses list_del_rcu */
1350 module_bug_cleanup(mod
);
1351 /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
1353 if (try_add_tainted_module(mod
))
1354 pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n",
1356 mutex_unlock(&module_mutex
);
1358 /* This may be empty, but that's OK */
1359 module_arch_freeing_init(mod
);
1361 percpu_modfree(mod
);
1366 void *__symbol_get(const char *symbol
)
1368 struct find_symbol_arg fsa
= {
1375 if (!find_symbol(&fsa
))
1377 if (fsa
.license
!= GPL_ONLY
) {
1378 pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n",
1382 if (strong_try_module_get(fsa
.owner
))
1385 return (void *)kernel_symbol_value(fsa
.sym
);
1390 EXPORT_SYMBOL_GPL(__symbol_get
);
1393 * Ensure that an exported symbol [global namespace] does not already exist
1394 * in the kernel or in some other module's exported symbol table.
1396 * You must hold the module_mutex.
1398 static int verify_exported_symbols(struct module
*mod
)
1401 const struct kernel_symbol
*s
;
1403 const struct kernel_symbol
*sym
;
1406 { mod
->syms
, mod
->num_syms
},
1407 { mod
->gpl_syms
, mod
->num_gpl_syms
},
1410 for (i
= 0; i
< ARRAY_SIZE(arr
); i
++) {
1411 for (s
= arr
[i
].sym
; s
< arr
[i
].sym
+ arr
[i
].num
; s
++) {
1412 struct find_symbol_arg fsa
= {
1413 .name
= kernel_symbol_name(s
),
1416 if (find_symbol(&fsa
)) {
1417 pr_err("%s: exports duplicate symbol %s"
1419 mod
->name
, kernel_symbol_name(s
),
1420 module_name(fsa
.owner
));
1428 static bool ignore_undef_symbol(Elf_Half emachine
, const char *name
)
1431 * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as
1432 * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64.
1433 * i386 has a similar problem but may not deserve a fix.
1435 * If we ever have to ignore many symbols, consider refactoring the code to
1436 * only warn if referenced by a relocation.
1438 if (emachine
== EM_386
|| emachine
== EM_X86_64
)
1439 return !strcmp(name
, "_GLOBAL_OFFSET_TABLE_");
1443 /* Change all symbols so that st_value encodes the pointer directly. */
1444 static int simplify_symbols(struct module
*mod
, const struct load_info
*info
)
1446 Elf_Shdr
*symsec
= &info
->sechdrs
[info
->index
.sym
];
1447 Elf_Sym
*sym
= (void *)symsec
->sh_addr
;
1448 unsigned long secbase
;
1451 const struct kernel_symbol
*ksym
;
1453 for (i
= 1; i
< symsec
->sh_size
/ sizeof(Elf_Sym
); i
++) {
1454 const char *name
= info
->strtab
+ sym
[i
].st_name
;
1456 switch (sym
[i
].st_shndx
) {
1458 /* Ignore common symbols */
1459 if (!strncmp(name
, "__gnu_lto", 9))
1463 * We compiled with -fno-common. These are not
1464 * supposed to happen.
1466 pr_debug("Common symbol: %s\n", name
);
1467 pr_warn("%s: please compile with -fno-common\n",
1473 /* Don't need to do anything */
1474 pr_debug("Absolute symbol: 0x%08lx %s\n",
1475 (long)sym
[i
].st_value
, name
);
1479 /* Livepatch symbols are resolved by livepatch */
1483 ksym
= resolve_symbol_wait(mod
, info
, name
);
1484 /* Ok if resolved. */
1485 if (ksym
&& !IS_ERR(ksym
)) {
1486 sym
[i
].st_value
= kernel_symbol_value(ksym
);
1490 /* Ok if weak or ignored. */
1492 (ELF_ST_BIND(sym
[i
].st_info
) == STB_WEAK
||
1493 ignore_undef_symbol(info
->hdr
->e_machine
, name
)))
1496 ret
= PTR_ERR(ksym
) ?: -ENOENT
;
1497 pr_warn("%s: Unknown symbol %s (err %d)\n",
1498 mod
->name
, name
, ret
);
1502 /* Divert to percpu allocation if a percpu var. */
1503 if (sym
[i
].st_shndx
== info
->index
.pcpu
)
1504 secbase
= (unsigned long)mod_percpu(mod
);
1506 secbase
= info
->sechdrs
[sym
[i
].st_shndx
].sh_addr
;
1507 sym
[i
].st_value
+= secbase
;
1515 static int apply_relocations(struct module
*mod
, const struct load_info
*info
)
1520 /* Now do relocations. */
1521 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
1522 unsigned int infosec
= info
->sechdrs
[i
].sh_info
;
1524 /* Not a valid relocation section? */
1525 if (infosec
>= info
->hdr
->e_shnum
)
1528 /* Don't bother with non-allocated sections */
1529 if (!(info
->sechdrs
[infosec
].sh_flags
& SHF_ALLOC
))
1532 if (info
->sechdrs
[i
].sh_flags
& SHF_RELA_LIVEPATCH
)
1533 err
= klp_apply_section_relocs(mod
, info
->sechdrs
,
1538 else if (info
->sechdrs
[i
].sh_type
== SHT_REL
)
1539 err
= apply_relocate(info
->sechdrs
, info
->strtab
,
1540 info
->index
.sym
, i
, mod
);
1541 else if (info
->sechdrs
[i
].sh_type
== SHT_RELA
)
1542 err
= apply_relocate_add(info
->sechdrs
, info
->strtab
,
1543 info
->index
.sym
, i
, mod
);
1550 /* Additional bytes needed by arch in front of individual sections */
1551 unsigned int __weak
arch_mod_section_prepend(struct module
*mod
,
1552 unsigned int section
)
1554 /* default implementation just returns zero */
1558 long module_get_offset_and_type(struct module
*mod
, enum mod_mem_type type
,
1559 Elf_Shdr
*sechdr
, unsigned int section
)
1562 long mask
= ((unsigned long)(type
) & SH_ENTSIZE_TYPE_MASK
) << SH_ENTSIZE_TYPE_SHIFT
;
1564 mod
->mem
[type
].size
+= arch_mod_section_prepend(mod
, section
);
1565 offset
= ALIGN(mod
->mem
[type
].size
, sechdr
->sh_addralign
?: 1);
1566 mod
->mem
[type
].size
= offset
+ sechdr
->sh_size
;
1568 WARN_ON_ONCE(offset
& mask
);
1569 return offset
| mask
;
1572 bool module_init_layout_section(const char *sname
)
1574 #ifndef CONFIG_MODULE_UNLOAD
1575 if (module_exit_section(sname
))
1578 return module_init_section(sname
);
1581 static void __layout_sections(struct module
*mod
, struct load_info
*info
, bool is_init
)
1585 static const unsigned long masks
[][2] = {
1587 * NOTE: all executable code must be the first section
1588 * in this array; otherwise modify the text_size
1589 * finder in the two loops below
1591 { SHF_EXECINSTR
| SHF_ALLOC
, ARCH_SHF_SMALL
},
1592 { SHF_ALLOC
, SHF_WRITE
| ARCH_SHF_SMALL
},
1593 { SHF_RO_AFTER_INIT
| SHF_ALLOC
, ARCH_SHF_SMALL
},
1594 { SHF_WRITE
| SHF_ALLOC
, ARCH_SHF_SMALL
},
1595 { ARCH_SHF_SMALL
| SHF_ALLOC
, 0 }
1597 static const int core_m_to_mem_type
[] = {
1604 static const int init_m_to_mem_type
[] = {
1612 for (m
= 0; m
< ARRAY_SIZE(masks
); ++m
) {
1613 enum mod_mem_type type
= is_init
? init_m_to_mem_type
[m
] : core_m_to_mem_type
[m
];
1615 for (i
= 0; i
< info
->hdr
->e_shnum
; ++i
) {
1616 Elf_Shdr
*s
= &info
->sechdrs
[i
];
1617 const char *sname
= info
->secstrings
+ s
->sh_name
;
1619 if ((s
->sh_flags
& masks
[m
][0]) != masks
[m
][0]
1620 || (s
->sh_flags
& masks
[m
][1])
1621 || s
->sh_entsize
!= ~0UL
1622 || is_init
!= module_init_layout_section(sname
))
1625 if (WARN_ON_ONCE(type
== MOD_INVALID
))
1629 * Do not allocate codetag memory as we load it into
1630 * preallocated contiguous memory.
1632 if (codetag_needs_module_section(mod
, sname
, s
->sh_size
)) {
1634 * s->sh_entsize won't be used but populate the
1635 * type field to avoid confusion.
1637 s
->sh_entsize
= ((unsigned long)(type
) & SH_ENTSIZE_TYPE_MASK
)
1638 << SH_ENTSIZE_TYPE_SHIFT
;
1642 s
->sh_entsize
= module_get_offset_and_type(mod
, type
, s
, i
);
1643 pr_debug("\t%s\n", sname
);
1649 * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
1650 * might -- code, read-only data, read-write data, small data. Tally
1651 * sizes, and place the offsets into sh_entsize fields: high bit means it
1654 static void layout_sections(struct module
*mod
, struct load_info
*info
)
1658 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++)
1659 info
->sechdrs
[i
].sh_entsize
= ~0UL;
1661 pr_debug("Core section allocation order for %s:\n", mod
->name
);
1662 __layout_sections(mod
, info
, false);
1664 pr_debug("Init section allocation order for %s:\n", mod
->name
);
1665 __layout_sections(mod
, info
, true);
1668 static void module_license_taint_check(struct module
*mod
, const char *license
)
1671 license
= "unspecified";
1673 if (!license_is_gpl_compatible(license
)) {
1674 if (!test_taint(TAINT_PROPRIETARY_MODULE
))
1675 pr_warn("%s: module license '%s' taints kernel.\n",
1676 mod
->name
, license
);
1677 add_taint_module(mod
, TAINT_PROPRIETARY_MODULE
,
1678 LOCKDEP_NOW_UNRELIABLE
);
1682 static void setup_modinfo(struct module
*mod
, struct load_info
*info
)
1684 struct module_attribute
*attr
;
1687 for (i
= 0; (attr
= modinfo_attrs
[i
]); i
++) {
1689 attr
->setup(mod
, get_modinfo(info
, attr
->attr
.name
));
1693 static void free_modinfo(struct module
*mod
)
1695 struct module_attribute
*attr
;
1698 for (i
= 0; (attr
= modinfo_attrs
[i
]); i
++) {
1704 bool __weak
module_init_section(const char *name
)
1706 return strstarts(name
, ".init");
1709 bool __weak
module_exit_section(const char *name
)
1711 return strstarts(name
, ".exit");
1714 static int validate_section_offset(const struct load_info
*info
, Elf_Shdr
*shdr
)
1716 #if defined(CONFIG_64BIT)
1717 unsigned long long secend
;
1719 unsigned long secend
;
1723 * Check for both overflow and offset/size being
1726 secend
= shdr
->sh_offset
+ shdr
->sh_size
;
1727 if (secend
< shdr
->sh_offset
|| secend
> info
->len
)
1734 * elf_validity_ehdr() - Checks an ELF header for module validity
1735 * @info: Load info containing the ELF header to check
1737 * Checks whether an ELF header could belong to a valid module. Checks:
1739 * * ELF header is within the data the user provided
1740 * * ELF magic is present
1741 * * It is relocatable (not final linked, not core file, etc.)
1742 * * The header's machine type matches what the architecture expects.
1743 * * Optional arch-specific hook for other properties
1744 * - module_elf_check_arch() is currently only used by PPC to check
1745 * ELF ABI version, but may be used by others in the future.
1747 * Return: %0 if valid, %-ENOEXEC on failure.
1749 static int elf_validity_ehdr(const struct load_info
*info
)
1751 if (info
->len
< sizeof(*(info
->hdr
))) {
1752 pr_err("Invalid ELF header len %lu\n", info
->len
);
1755 if (memcmp(info
->hdr
->e_ident
, ELFMAG
, SELFMAG
) != 0) {
1756 pr_err("Invalid ELF header magic: != %s\n", ELFMAG
);
1759 if (info
->hdr
->e_type
!= ET_REL
) {
1760 pr_err("Invalid ELF header type: %u != %u\n",
1761 info
->hdr
->e_type
, ET_REL
);
1764 if (!elf_check_arch(info
->hdr
)) {
1765 pr_err("Invalid architecture in ELF header: %u\n",
1766 info
->hdr
->e_machine
);
1769 if (!module_elf_check_arch(info
->hdr
)) {
1770 pr_err("Invalid module architecture in ELF header: %u\n",
1771 info
->hdr
->e_machine
);
1778 * elf_validity_cache_sechdrs() - Cache section headers if valid
1779 * @info: Load info to compute section headers from
1783 * * ELF header is valid (see elf_validity_ehdr())
1784 * * Section headers are the size we expect
1785 * * Section array fits in the user provided data
1786 * * Section index 0 is NULL
1787 * * Section contents are inbounds
1789 * Then updates @info with a &load_info->sechdrs pointer if valid.
1791 * Return: %0 if valid, negative error code if validation failed.
1793 static int elf_validity_cache_sechdrs(struct load_info
*info
)
1800 err
= elf_validity_ehdr(info
);
1804 if (info
->hdr
->e_shentsize
!= sizeof(Elf_Shdr
)) {
1805 pr_err("Invalid ELF section header size\n");
1810 * e_shnum is 16 bits, and sizeof(Elf_Shdr) is
1811 * known and small. So e_shnum * sizeof(Elf_Shdr)
1812 * will not overflow unsigned long on any platform.
1814 if (info
->hdr
->e_shoff
>= info
->len
1815 || (info
->hdr
->e_shnum
* sizeof(Elf_Shdr
) >
1816 info
->len
- info
->hdr
->e_shoff
)) {
1817 pr_err("Invalid ELF section header overflow\n");
1821 sechdrs
= (void *)info
->hdr
+ info
->hdr
->e_shoff
;
1824 * The code assumes that section 0 has a length of zero and
1825 * an addr of zero, so check for it.
1827 if (sechdrs
[0].sh_type
!= SHT_NULL
1828 || sechdrs
[0].sh_size
!= 0
1829 || sechdrs
[0].sh_addr
!= 0) {
1830 pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n",
1831 sechdrs
[0].sh_type
);
1835 /* Validate contents are inbounds */
1836 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
1838 switch (shdr
->sh_type
) {
1841 /* No contents, offset/size don't mean anything */
1844 err
= validate_section_offset(info
, shdr
);
1846 pr_err("Invalid ELF section in module (section %u type %u)\n",
1853 info
->sechdrs
= sechdrs
;
1859 * elf_validity_cache_secstrings() - Caches section names if valid
1860 * @info: Load info to cache section names from. Must have valid sechdrs.
1862 * Specifically checks:
1864 * * Section name table index is inbounds of section headers
1865 * * Section name table is not empty
1866 * * Section name table is NUL terminated
1867 * * All section name offsets are inbounds of the section
1869 * Then updates @info with a &load_info->secstrings pointer if valid.
1871 * Return: %0 if valid, negative error code if validation failed.
1873 static int elf_validity_cache_secstrings(struct load_info
*info
)
1875 Elf_Shdr
*strhdr
, *shdr
;
1880 * Verify if the section name table index is valid.
1882 if (info
->hdr
->e_shstrndx
== SHN_UNDEF
1883 || info
->hdr
->e_shstrndx
>= info
->hdr
->e_shnum
) {
1884 pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n",
1885 info
->hdr
->e_shstrndx
, info
->hdr
->e_shstrndx
,
1886 info
->hdr
->e_shnum
);
1890 strhdr
= &info
->sechdrs
[info
->hdr
->e_shstrndx
];
1893 * The section name table must be NUL-terminated, as required
1894 * by the spec. This makes strcmp and pr_* calls that access
1895 * strings in the section safe.
1897 secstrings
= (void *)info
->hdr
+ strhdr
->sh_offset
;
1898 if (strhdr
->sh_size
== 0) {
1899 pr_err("empty section name table\n");
1902 if (secstrings
[strhdr
->sh_size
- 1] != '\0') {
1903 pr_err("ELF Spec violation: section name table isn't null terminated\n");
1907 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++) {
1908 shdr
= &info
->sechdrs
[i
];
1909 /* SHT_NULL means sh_name has an undefined value */
1910 if (shdr
->sh_type
== SHT_NULL
)
1912 if (shdr
->sh_name
>= strhdr
->sh_size
) {
1913 pr_err("Invalid ELF section name in module (section %u type %u)\n",
1919 info
->secstrings
= secstrings
;
1924 * elf_validity_cache_index_info() - Validate and cache modinfo section
1925 * @info: Load info to populate the modinfo index on.
1926 * Must have &load_info->sechdrs and &load_info->secstrings populated
1928 * Checks that if there is a .modinfo section, it is unique.
1929 * Then, it caches its index in &load_info->index.info.
1930 * Finally, it tries to populate the name to improve error messages.
1932 * Return: %0 if valid, %-ENOEXEC if multiple modinfo sections were found.
1934 static int elf_validity_cache_index_info(struct load_info
*info
)
1938 info_idx
= find_any_unique_sec(info
, ".modinfo");
1941 /* Early return, no .modinfo */
1945 pr_err("Only one .modinfo section must exist.\n");
1949 info
->index
.info
= info_idx
;
1950 /* Try to find a name early so we can log errors with a module name */
1951 info
->name
= get_modinfo(info
, "name");
1957 * elf_validity_cache_index_mod() - Validates and caches this_module section
1958 * @info: Load info to cache this_module on.
1959 * Must have &load_info->sechdrs and &load_info->secstrings populated
1961 * The ".gnu.linkonce.this_module" ELF section is special. It is what modpost
1962 * uses to refer to __this_module and let's use rely on THIS_MODULE to point
1963 * to &__this_module properly. The kernel's modpost declares it on each
1964 * modules's *.mod.c file. If the struct module of the kernel changes a full
1965 * kernel rebuild is required.
1967 * We have a few expectations for this special section, this function
1968 * validates all this for us:
1970 * * The section has contents
1971 * * The section is unique
1972 * * We expect the kernel to always have to allocate it: SHF_ALLOC
1973 * * The section size must match the kernel's run time's struct module
1976 * If all checks pass, the index will be cached in &load_info->index.mod
1978 * Return: %0 on validation success, %-ENOEXEC on failure
1980 static int elf_validity_cache_index_mod(struct load_info
*info
)
1985 mod_idx
= find_any_unique_sec(info
, ".gnu.linkonce.this_module");
1987 pr_err("module %s: Exactly one .gnu.linkonce.this_module section must exist.\n",
1988 info
->name
?: "(missing .modinfo section or name field)");
1992 shdr
= &info
->sechdrs
[mod_idx
];
1994 if (shdr
->sh_type
== SHT_NOBITS
) {
1995 pr_err("module %s: .gnu.linkonce.this_module section must have a size set\n",
1996 info
->name
?: "(missing .modinfo section or name field)");
2000 if (!(shdr
->sh_flags
& SHF_ALLOC
)) {
2001 pr_err("module %s: .gnu.linkonce.this_module must occupy memory during process execution\n",
2002 info
->name
?: "(missing .modinfo section or name field)");
2006 if (shdr
->sh_size
!= sizeof(struct module
)) {
2007 pr_err("module %s: .gnu.linkonce.this_module section size must match the kernel's built struct module size at run time\n",
2008 info
->name
?: "(missing .modinfo section or name field)");
2012 info
->index
.mod
= mod_idx
;
2018 * elf_validity_cache_index_sym() - Validate and cache symtab index
2019 * @info: Load info to cache symtab index in.
2020 * Must have &load_info->sechdrs and &load_info->secstrings populated.
2022 * Checks that there is exactly one symbol table, then caches its index in
2023 * &load_info->index.sym.
2025 * Return: %0 if valid, %-ENOEXEC on failure.
2027 static int elf_validity_cache_index_sym(struct load_info
*info
)
2029 unsigned int sym_idx
;
2030 unsigned int num_sym_secs
= 0;
2033 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
2034 if (info
->sechdrs
[i
].sh_type
== SHT_SYMTAB
) {
2040 if (num_sym_secs
!= 1) {
2041 pr_warn("%s: module has no symbols (stripped?)\n",
2042 info
->name
?: "(missing .modinfo section or name field)");
2046 info
->index
.sym
= sym_idx
;
2052 * elf_validity_cache_index_str() - Validate and cache strtab index
2053 * @info: Load info to cache strtab index in.
2054 * Must have &load_info->sechdrs and &load_info->secstrings populated.
2055 * Must have &load_info->index.sym populated.
2057 * Looks at the symbol table's associated string table, makes sure it is
2058 * in-bounds, and caches it.
2060 * Return: %0 if valid, %-ENOEXEC on failure.
2062 static int elf_validity_cache_index_str(struct load_info
*info
)
2064 unsigned int str_idx
= info
->sechdrs
[info
->index
.sym
].sh_link
;
2066 if (str_idx
== SHN_UNDEF
|| str_idx
>= info
->hdr
->e_shnum
) {
2067 pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n",
2068 str_idx
, str_idx
, info
->hdr
->e_shnum
);
2072 info
->index
.str
= str_idx
;
2077 * elf_validity_cache_index() - Resolve, validate, cache section indices
2078 * @info: Load info to read from and update.
2079 * &load_info->sechdrs and &load_info->secstrings must be populated.
2080 * @flags: Load flags, relevant to suppress version loading, see
2081 * uapi/linux/module.h
2083 * Populates &load_info->index, validating as it goes.
2084 * See child functions for per-field validation:
2086 * * elf_validity_cache_index_info()
2087 * * elf_validity_cache_index_mod()
2088 * * elf_validity_cache_index_sym()
2089 * * elf_validity_cache_index_str()
2091 * If versioning is not suppressed via flags, load the version index from
2092 * a section called "__versions" with no validation.
2094 * If CONFIG_SMP is enabled, load the percpu section by name with no
2097 * Return: 0 on success, negative error code if an index failed validation.
2099 static int elf_validity_cache_index(struct load_info
*info
, int flags
)
2103 err
= elf_validity_cache_index_info(info
);
2106 err
= elf_validity_cache_index_mod(info
);
2109 err
= elf_validity_cache_index_sym(info
);
2112 err
= elf_validity_cache_index_str(info
);
2116 if (flags
& MODULE_INIT_IGNORE_MODVERSIONS
)
2117 info
->index
.vers
= 0; /* Pretend no __versions section! */
2119 info
->index
.vers
= find_sec(info
, "__versions");
2121 info
->index
.pcpu
= find_pcpusec(info
);
2127 * elf_validity_cache_strtab() - Validate and cache symbol string table
2128 * @info: Load info to read from and update.
2129 * Must have &load_info->sechdrs and &load_info->secstrings populated.
2130 * Must have &load_info->index populated.
2134 * * The string table is not empty.
2135 * * The string table starts and ends with NUL (required by ELF spec).
2136 * * Every &Elf_Sym->st_name offset in the symbol table is inbounds of the
2139 * And caches the pointer as &load_info->strtab in @info.
2141 * Return: 0 on success, negative error code if a check failed.
2143 static int elf_validity_cache_strtab(struct load_info
*info
)
2145 Elf_Shdr
*str_shdr
= &info
->sechdrs
[info
->index
.str
];
2146 Elf_Shdr
*sym_shdr
= &info
->sechdrs
[info
->index
.sym
];
2147 char *strtab
= (char *)info
->hdr
+ str_shdr
->sh_offset
;
2148 Elf_Sym
*syms
= (void *)info
->hdr
+ sym_shdr
->sh_offset
;
2151 if (str_shdr
->sh_size
== 0) {
2152 pr_err("empty symbol string table\n");
2155 if (strtab
[0] != '\0') {
2156 pr_err("symbol string table missing leading NUL\n");
2159 if (strtab
[str_shdr
->sh_size
- 1] != '\0') {
2160 pr_err("symbol string table isn't NUL terminated\n");
2165 * Now that we know strtab is correctly structured, check symbol
2166 * starts are inbounds before they're used later.
2168 for (i
= 0; i
< sym_shdr
->sh_size
/ sizeof(*syms
); i
++) {
2169 if (syms
[i
].st_name
>= str_shdr
->sh_size
) {
2170 pr_err("symbol name out of bounds in string table");
2175 info
->strtab
= strtab
;
2180 * Check userspace passed ELF module against our expectations, and cache
2181 * useful variables for further processing as we go.
2183 * This does basic validity checks against section offsets and sizes, the
2184 * section name string table, and the indices used for it (sh_name).
2186 * As a last step, since we're already checking the ELF sections we cache
2187 * useful variables which will be used later for our convenience:
2189 * o pointers to section headers
2190 * o cache the modinfo symbol section
2191 * o cache the string symbol section
2192 * o cache the module section
2194 * As a last step we set info->mod to the temporary copy of the module in
2195 * info->hdr. The final one will be allocated in move_module(). Any
2196 * modifications we make to our copy of the module will be carried over
2197 * to the final minted module.
2199 static int elf_validity_cache_copy(struct load_info
*info
, int flags
)
2203 err
= elf_validity_cache_sechdrs(info
);
2206 err
= elf_validity_cache_secstrings(info
);
2209 err
= elf_validity_cache_index(info
, flags
);
2212 err
= elf_validity_cache_strtab(info
);
2216 /* This is temporary: point mod into copy of data. */
2217 info
->mod
= (void *)info
->hdr
+ info
->sechdrs
[info
->index
.mod
].sh_offset
;
2220 * If we didn't load the .modinfo 'name' field earlier, fall back to
2221 * on-disk struct mod 'name' field.
2224 info
->name
= info
->mod
->name
;
2229 #define COPY_CHUNK_SIZE (16*PAGE_SIZE)
2231 static int copy_chunked_from_user(void *dst
, const void __user
*usrc
, unsigned long len
)
2234 unsigned long n
= min(len
, COPY_CHUNK_SIZE
);
2236 if (copy_from_user(dst
, usrc
, n
) != 0)
2246 static int check_modinfo_livepatch(struct module
*mod
, struct load_info
*info
)
2248 if (!get_modinfo(info
, "livepatch"))
2249 /* Nothing more to do */
2252 if (set_livepatch_module(mod
))
2255 pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
2260 static void check_modinfo_retpoline(struct module
*mod
, struct load_info
*info
)
2262 if (retpoline_module_ok(get_modinfo(info
, "retpoline")))
2265 pr_warn("%s: loading module not compiled with retpoline compiler.\n",
2269 /* Sets info->hdr and info->len. */
2270 static int copy_module_from_user(const void __user
*umod
, unsigned long len
,
2271 struct load_info
*info
)
2276 if (info
->len
< sizeof(*(info
->hdr
)))
2279 err
= security_kernel_load_data(LOADING_MODULE
, true);
2283 /* Suck in entire file: we'll want most of it. */
2284 info
->hdr
= __vmalloc(info
->len
, GFP_KERNEL
| __GFP_NOWARN
);
2288 if (copy_chunked_from_user(info
->hdr
, umod
, info
->len
) != 0) {
2293 err
= security_kernel_post_load_data((char *)info
->hdr
, info
->len
,
2294 LOADING_MODULE
, "init_module");
2302 static void free_copy(struct load_info
*info
, int flags
)
2304 if (flags
& MODULE_INIT_COMPRESSED_FILE
)
2305 module_decompress_cleanup(info
);
2310 static int rewrite_section_headers(struct load_info
*info
, int flags
)
2314 /* This should always be true, but let's be sure. */
2315 info
->sechdrs
[0].sh_addr
= 0;
2317 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
2318 Elf_Shdr
*shdr
= &info
->sechdrs
[i
];
2321 * Mark all sections sh_addr with their address in the
2324 shdr
->sh_addr
= (size_t)info
->hdr
+ shdr
->sh_offset
;
2328 /* Track but don't keep modinfo and version sections. */
2329 info
->sechdrs
[info
->index
.vers
].sh_flags
&= ~(unsigned long)SHF_ALLOC
;
2330 info
->sechdrs
[info
->index
.info
].sh_flags
&= ~(unsigned long)SHF_ALLOC
;
2336 * These calls taint the kernel depending certain module circumstances */
2337 static void module_augment_kernel_taints(struct module
*mod
, struct load_info
*info
)
2339 int prev_taint
= test_taint(TAINT_PROPRIETARY_MODULE
);
2341 if (!get_modinfo(info
, "intree")) {
2342 if (!test_taint(TAINT_OOT_MODULE
))
2343 pr_warn("%s: loading out-of-tree module taints kernel.\n",
2345 add_taint_module(mod
, TAINT_OOT_MODULE
, LOCKDEP_STILL_OK
);
2348 check_modinfo_retpoline(mod
, info
);
2350 if (get_modinfo(info
, "staging")) {
2351 add_taint_module(mod
, TAINT_CRAP
, LOCKDEP_STILL_OK
);
2352 pr_warn("%s: module is from the staging directory, the quality "
2353 "is unknown, you have been warned.\n", mod
->name
);
2356 if (is_livepatch_module(mod
)) {
2357 add_taint_module(mod
, TAINT_LIVEPATCH
, LOCKDEP_STILL_OK
);
2358 pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n",
2362 module_license_taint_check(mod
, get_modinfo(info
, "license"));
2364 if (get_modinfo(info
, "test")) {
2365 if (!test_taint(TAINT_TEST
))
2366 pr_warn("%s: loading test module taints kernel.\n",
2368 add_taint_module(mod
, TAINT_TEST
, LOCKDEP_STILL_OK
);
2370 #ifdef CONFIG_MODULE_SIG
2371 mod
->sig_ok
= info
->sig_ok
;
2373 pr_notice_once("%s: module verification failed: signature "
2374 "and/or required key missing - tainting "
2375 "kernel\n", mod
->name
);
2376 add_taint_module(mod
, TAINT_UNSIGNED_MODULE
, LOCKDEP_STILL_OK
);
2381 * ndiswrapper is under GPL by itself, but loads proprietary modules.
2382 * Don't use add_taint_module(), as it would prevent ndiswrapper from
2383 * using GPL-only symbols it needs.
2385 if (strcmp(mod
->name
, "ndiswrapper") == 0)
2386 add_taint(TAINT_PROPRIETARY_MODULE
, LOCKDEP_NOW_UNRELIABLE
);
2388 /* driverloader was caught wrongly pretending to be under GPL */
2389 if (strcmp(mod
->name
, "driverloader") == 0)
2390 add_taint_module(mod
, TAINT_PROPRIETARY_MODULE
,
2391 LOCKDEP_NOW_UNRELIABLE
);
2393 /* lve claims to be GPL but upstream won't provide source */
2394 if (strcmp(mod
->name
, "lve") == 0)
2395 add_taint_module(mod
, TAINT_PROPRIETARY_MODULE
,
2396 LOCKDEP_NOW_UNRELIABLE
);
2398 if (!prev_taint
&& test_taint(TAINT_PROPRIETARY_MODULE
))
2399 pr_warn("%s: module license taints kernel.\n", mod
->name
);
2403 static int check_modinfo(struct module
*mod
, struct load_info
*info
, int flags
)
2405 const char *modmagic
= get_modinfo(info
, "vermagic");
2408 if (flags
& MODULE_INIT_IGNORE_VERMAGIC
)
2411 /* This is allowed: modprobe --force will invalidate it. */
2413 err
= try_to_force_load(mod
, "bad vermagic");
2416 } else if (!same_magic(modmagic
, vermagic
, info
->index
.vers
)) {
2417 pr_err("%s: version magic '%s' should be '%s'\n",
2418 info
->name
, modmagic
, vermagic
);
2422 err
= check_modinfo_livepatch(mod
, info
);
2429 static int find_module_sections(struct module
*mod
, struct load_info
*info
)
2431 mod
->kp
= section_objs(info
, "__param",
2432 sizeof(*mod
->kp
), &mod
->num_kp
);
2433 mod
->syms
= section_objs(info
, "__ksymtab",
2434 sizeof(*mod
->syms
), &mod
->num_syms
);
2435 mod
->crcs
= section_addr(info
, "__kcrctab");
2436 mod
->gpl_syms
= section_objs(info
, "__ksymtab_gpl",
2437 sizeof(*mod
->gpl_syms
),
2438 &mod
->num_gpl_syms
);
2439 mod
->gpl_crcs
= section_addr(info
, "__kcrctab_gpl");
2441 #ifdef CONFIG_CONSTRUCTORS
2442 mod
->ctors
= section_objs(info
, ".ctors",
2443 sizeof(*mod
->ctors
), &mod
->num_ctors
);
2445 mod
->ctors
= section_objs(info
, ".init_array",
2446 sizeof(*mod
->ctors
), &mod
->num_ctors
);
2447 else if (find_sec(info
, ".init_array")) {
2449 * This shouldn't happen with same compiler and binutils
2450 * building all parts of the module.
2452 pr_warn("%s: has both .ctors and .init_array.\n",
2458 mod
->noinstr_text_start
= section_objs(info
, ".noinstr.text", 1,
2459 &mod
->noinstr_text_size
);
2461 #ifdef CONFIG_TRACEPOINTS
2462 mod
->tracepoints_ptrs
= section_objs(info
, "__tracepoints_ptrs",
2463 sizeof(*mod
->tracepoints_ptrs
),
2464 &mod
->num_tracepoints
);
2466 #ifdef CONFIG_TREE_SRCU
2467 mod
->srcu_struct_ptrs
= section_objs(info
, "___srcu_struct_ptrs",
2468 sizeof(*mod
->srcu_struct_ptrs
),
2469 &mod
->num_srcu_structs
);
2471 #ifdef CONFIG_BPF_EVENTS
2472 mod
->bpf_raw_events
= section_objs(info
, "__bpf_raw_tp_map",
2473 sizeof(*mod
->bpf_raw_events
),
2474 &mod
->num_bpf_raw_events
);
2476 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
2477 mod
->btf_data
= any_section_objs(info
, ".BTF", 1, &mod
->btf_data_size
);
2478 mod
->btf_base_data
= any_section_objs(info
, ".BTF.base", 1,
2479 &mod
->btf_base_data_size
);
2481 #ifdef CONFIG_JUMP_LABEL
2482 mod
->jump_entries
= section_objs(info
, "__jump_table",
2483 sizeof(*mod
->jump_entries
),
2484 &mod
->num_jump_entries
);
2486 #ifdef CONFIG_EVENT_TRACING
2487 mod
->trace_events
= section_objs(info
, "_ftrace_events",
2488 sizeof(*mod
->trace_events
),
2489 &mod
->num_trace_events
);
2490 mod
->trace_evals
= section_objs(info
, "_ftrace_eval_map",
2491 sizeof(*mod
->trace_evals
),
2492 &mod
->num_trace_evals
);
2494 #ifdef CONFIG_TRACING
2495 mod
->trace_bprintk_fmt_start
= section_objs(info
, "__trace_printk_fmt",
2496 sizeof(*mod
->trace_bprintk_fmt_start
),
2497 &mod
->num_trace_bprintk_fmt
);
2499 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
2500 /* sechdrs[0].sh_size is always zero */
2501 mod
->ftrace_callsites
= section_objs(info
, FTRACE_CALLSITE_SECTION
,
2502 sizeof(*mod
->ftrace_callsites
),
2503 &mod
->num_ftrace_callsites
);
2505 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
2506 mod
->ei_funcs
= section_objs(info
, "_error_injection_whitelist",
2507 sizeof(*mod
->ei_funcs
),
2508 &mod
->num_ei_funcs
);
2510 #ifdef CONFIG_KPROBES
2511 mod
->kprobes_text_start
= section_objs(info
, ".kprobes.text", 1,
2512 &mod
->kprobes_text_size
);
2513 mod
->kprobe_blacklist
= section_objs(info
, "_kprobe_blacklist",
2514 sizeof(unsigned long),
2515 &mod
->num_kprobe_blacklist
);
2517 #ifdef CONFIG_PRINTK_INDEX
2518 mod
->printk_index_start
= section_objs(info
, ".printk_index",
2519 sizeof(*mod
->printk_index_start
),
2520 &mod
->printk_index_size
);
2522 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE
2523 mod
->static_call_sites
= section_objs(info
, ".static_call_sites",
2524 sizeof(*mod
->static_call_sites
),
2525 &mod
->num_static_call_sites
);
2527 #if IS_ENABLED(CONFIG_KUNIT)
2528 mod
->kunit_suites
= section_objs(info
, ".kunit_test_suites",
2529 sizeof(*mod
->kunit_suites
),
2530 &mod
->num_kunit_suites
);
2531 mod
->kunit_init_suites
= section_objs(info
, ".kunit_init_test_suites",
2532 sizeof(*mod
->kunit_init_suites
),
2533 &mod
->num_kunit_init_suites
);
2536 mod
->extable
= section_objs(info
, "__ex_table",
2537 sizeof(*mod
->extable
), &mod
->num_exentries
);
2539 if (section_addr(info
, "__obsparm"))
2540 pr_warn("%s: Ignoring obsolete parameters\n", mod
->name
);
2542 #ifdef CONFIG_DYNAMIC_DEBUG_CORE
2543 mod
->dyndbg_info
.descs
= section_objs(info
, "__dyndbg",
2544 sizeof(*mod
->dyndbg_info
.descs
),
2545 &mod
->dyndbg_info
.num_descs
);
2546 mod
->dyndbg_info
.classes
= section_objs(info
, "__dyndbg_classes",
2547 sizeof(*mod
->dyndbg_info
.classes
),
2548 &mod
->dyndbg_info
.num_classes
);
2554 static int move_module(struct module
*mod
, struct load_info
*info
)
2557 enum mod_mem_type t
= 0;
2559 bool codetag_section_found
= false;
2561 for_each_mod_mem_type(type
) {
2562 if (!mod
->mem
[type
].size
) {
2563 mod
->mem
[type
].base
= NULL
;
2564 mod
->mem
[type
].rw_copy
= NULL
;
2568 ret
= module_memory_alloc(mod
, type
);
2575 /* Transfer each section which specifies SHF_ALLOC */
2576 pr_debug("Final section addresses for %s:\n", mod
->name
);
2577 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++) {
2579 Elf_Shdr
*shdr
= &info
->sechdrs
[i
];
2583 if (!(shdr
->sh_flags
& SHF_ALLOC
))
2586 sname
= info
->secstrings
+ shdr
->sh_name
;
2588 * Load codetag sections separately as they might still be used
2589 * after module unload.
2591 if (codetag_needs_module_section(mod
, sname
, shdr
->sh_size
)) {
2592 dest
= codetag_alloc_module_section(mod
, sname
, shdr
->sh_size
,
2593 arch_mod_section_prepend(mod
, i
), shdr
->sh_addralign
);
2594 if (WARN_ON(!dest
)) {
2599 ret
= PTR_ERR(dest
);
2602 addr
= (unsigned long)dest
;
2603 codetag_section_found
= true;
2605 enum mod_mem_type type
= shdr
->sh_entsize
>> SH_ENTSIZE_TYPE_SHIFT
;
2606 unsigned long offset
= shdr
->sh_entsize
& SH_ENTSIZE_OFFSET_MASK
;
2608 addr
= (unsigned long)mod
->mem
[type
].base
+ offset
;
2609 dest
= mod
->mem
[type
].rw_copy
+ offset
;
2612 if (shdr
->sh_type
!= SHT_NOBITS
) {
2614 * Our ELF checker already validated this, but let's
2615 * be pedantic and make the goal clearer. We actually
2616 * end up copying over all modifications made to the
2617 * userspace copy of the entire struct module.
2619 if (i
== info
->index
.mod
&&
2620 (WARN_ON_ONCE(shdr
->sh_size
!= sizeof(struct module
)))) {
2624 memcpy(dest
, (void *)shdr
->sh_addr
, shdr
->sh_size
);
2627 * Update the userspace copy's ELF section address to point to
2628 * our newly allocated memory as a pure convenience so that
2629 * users of info can keep taking advantage and using the newly
2630 * minted official memory area.
2632 shdr
->sh_addr
= addr
;
2633 pr_debug("\t0x%lx 0x%.8lx %s\n", (long)shdr
->sh_addr
,
2634 (long)shdr
->sh_size
, info
->secstrings
+ shdr
->sh_name
);
2639 for (t
--; t
>= 0; t
--)
2640 module_memory_free(mod
, t
);
2641 if (codetag_section_found
)
2642 codetag_free_module_sections(mod
);
2647 static int check_export_symbol_versions(struct module
*mod
)
2649 #ifdef CONFIG_MODVERSIONS
2650 if ((mod
->num_syms
&& !mod
->crcs
) ||
2651 (mod
->num_gpl_syms
&& !mod
->gpl_crcs
)) {
2652 return try_to_force_load(mod
,
2653 "no versions for exported symbols");
2659 static void flush_module_icache(const struct module
*mod
)
2662 * Flush the instruction cache, since we've played with text.
2663 * Do it before processing of module parameters, so the module
2664 * can provide parameter accessor functions of its own.
2666 for_each_mod_mem_type(type
) {
2667 const struct module_memory
*mod_mem
= &mod
->mem
[type
];
2669 if (mod_mem
->size
) {
2670 flush_icache_range((unsigned long)mod_mem
->base
,
2671 (unsigned long)mod_mem
->base
+ mod_mem
->size
);
2676 bool __weak
module_elf_check_arch(Elf_Ehdr
*hdr
)
2681 int __weak
module_frob_arch_sections(Elf_Ehdr
*hdr
,
2689 /* module_blacklist is a comma-separated list of module names */
2690 static char *module_blacklist
;
2691 static bool blacklisted(const char *module_name
)
2696 if (!module_blacklist
)
2699 for (p
= module_blacklist
; *p
; p
+= len
) {
2700 len
= strcspn(p
, ",");
2701 if (strlen(module_name
) == len
&& !memcmp(module_name
, p
, len
))
2708 core_param(module_blacklist
, module_blacklist
, charp
, 0400);
2710 static struct module
*layout_and_allocate(struct load_info
*info
, int flags
)
2716 /* Allow arches to frob section contents and sizes. */
2717 err
= module_frob_arch_sections(info
->hdr
, info
->sechdrs
,
2718 info
->secstrings
, info
->mod
);
2720 return ERR_PTR(err
);
2722 err
= module_enforce_rwx_sections(info
->hdr
, info
->sechdrs
,
2723 info
->secstrings
, info
->mod
);
2725 return ERR_PTR(err
);
2727 /* We will do a special allocation for per-cpu sections later. */
2728 info
->sechdrs
[info
->index
.pcpu
].sh_flags
&= ~(unsigned long)SHF_ALLOC
;
2731 * Mark ro_after_init section with SHF_RO_AFTER_INIT so that
2732 * layout_sections() can put it in the right place.
2733 * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
2735 ndx
= find_sec(info
, ".data..ro_after_init");
2737 info
->sechdrs
[ndx
].sh_flags
|= SHF_RO_AFTER_INIT
;
2739 * Mark the __jump_table section as ro_after_init as well: these data
2740 * structures are never modified, with the exception of entries that
2741 * refer to code in the __init section, which are annotated as such
2742 * at module load time.
2744 ndx
= find_sec(info
, "__jump_table");
2746 info
->sechdrs
[ndx
].sh_flags
|= SHF_RO_AFTER_INIT
;
2749 * Determine total sizes, and put offsets in sh_entsize. For now
2750 * this is done generically; there doesn't appear to be any
2751 * special cases for the architectures.
2753 layout_sections(info
->mod
, info
);
2754 layout_symtab(info
->mod
, info
);
2756 /* Allocate and move to the final place */
2757 err
= move_module(info
->mod
, info
);
2759 return ERR_PTR(err
);
2761 /* Module has been copied to its final place now: return it. */
2762 mod
= (void *)info
->sechdrs
[info
->index
.mod
].sh_addr
;
2763 kmemleak_load_module(mod
, info
);
2764 codetag_module_replaced(info
->mod
, mod
);
2769 /* mod is no longer valid after this! */
2770 static void module_deallocate(struct module
*mod
, struct load_info
*info
)
2772 percpu_modfree(mod
);
2773 module_arch_freeing_init(mod
);
2778 int __weak
module_finalize(const Elf_Ehdr
*hdr
,
2779 const Elf_Shdr
*sechdrs
,
2785 int __weak
module_post_finalize(const Elf_Ehdr
*hdr
,
2786 const Elf_Shdr
*sechdrs
,
2792 static int post_relocation(struct module
*mod
, const struct load_info
*info
)
2796 /* Sort exception table now relocations are done. */
2797 sort_extable(mod
->extable
, mod
->extable
+ mod
->num_exentries
);
2799 /* Copy relocated percpu area over. */
2800 percpu_modcopy(mod
, (void *)info
->sechdrs
[info
->index
.pcpu
].sh_addr
,
2801 info
->sechdrs
[info
->index
.pcpu
].sh_size
);
2803 /* Setup kallsyms-specific fields. */
2804 add_kallsyms(mod
, info
);
2806 /* Arch-specific module finalizing. */
2807 ret
= module_finalize(info
->hdr
, info
->sechdrs
, mod
);
2811 for_each_mod_mem_type(type
) {
2812 struct module_memory
*mem
= &mod
->mem
[type
];
2815 if (!execmem_update_copy(mem
->base
, mem
->rw_copy
,
2819 vfree(mem
->rw_copy
);
2820 mem
->rw_copy
= NULL
;
2824 return module_post_finalize(info
->hdr
, info
->sechdrs
, mod
);
2827 /* Call module constructors. */
2828 static void do_mod_ctors(struct module
*mod
)
2830 #ifdef CONFIG_CONSTRUCTORS
2833 for (i
= 0; i
< mod
->num_ctors
; i
++)
2838 /* For freeing module_init on success, in case kallsyms traversing */
2839 struct mod_initfree
{
2840 struct llist_node node
;
2846 static void do_free_init(struct work_struct
*w
)
2848 struct llist_node
*pos
, *n
, *list
;
2849 struct mod_initfree
*initfree
;
2851 list
= llist_del_all(&init_free_list
);
2855 llist_for_each_safe(pos
, n
, list
) {
2856 initfree
= container_of(pos
, struct mod_initfree
, node
);
2857 execmem_free(initfree
->init_text
);
2858 execmem_free(initfree
->init_data
);
2859 execmem_free(initfree
->init_rodata
);
2864 void flush_module_init_free_work(void)
2866 flush_work(&init_free_wq
);
2869 #undef MODULE_PARAM_PREFIX
2870 #define MODULE_PARAM_PREFIX "module."
2871 /* Default value for module->async_probe_requested */
2872 static bool async_probe
;
2873 module_param(async_probe
, bool, 0644);
2876 * This is where the real work happens.
2878 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
2879 * helper command 'lx-symbols'.
2881 static noinline
int do_init_module(struct module
*mod
)
2884 struct mod_initfree
*freeinit
;
2885 #if defined(CONFIG_MODULE_STATS)
2886 unsigned int text_size
= 0, total_size
= 0;
2888 for_each_mod_mem_type(type
) {
2889 const struct module_memory
*mod_mem
= &mod
->mem
[type
];
2890 if (mod_mem
->size
) {
2891 total_size
+= mod_mem
->size
;
2892 if (type
== MOD_TEXT
|| type
== MOD_INIT_TEXT
)
2893 text_size
+= mod_mem
->size
;
2898 freeinit
= kmalloc(sizeof(*freeinit
), GFP_KERNEL
);
2903 freeinit
->init_text
= mod
->mem
[MOD_INIT_TEXT
].base
;
2904 freeinit
->init_data
= mod
->mem
[MOD_INIT_DATA
].base
;
2905 freeinit
->init_rodata
= mod
->mem
[MOD_INIT_RODATA
].base
;
2908 /* Start the module */
2909 if (mod
->init
!= NULL
)
2910 ret
= do_one_initcall(mod
->init
);
2912 goto fail_free_freeinit
;
2915 pr_warn("%s: '%s'->init suspiciously returned %d, it should "
2916 "follow 0/-E convention\n"
2917 "%s: loading module anyway...\n",
2918 __func__
, mod
->name
, ret
, __func__
);
2922 /* Now it's a first class citizen! */
2923 mod
->state
= MODULE_STATE_LIVE
;
2924 blocking_notifier_call_chain(&module_notify_list
,
2925 MODULE_STATE_LIVE
, mod
);
2927 /* Delay uevent until module has finished its init routine */
2928 kobject_uevent(&mod
->mkobj
.kobj
, KOBJ_ADD
);
2931 * We need to finish all async code before the module init sequence
2932 * is done. This has potential to deadlock if synchronous module
2933 * loading is requested from async (which is not allowed!).
2935 * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous
2936 * request_module() from async workers") for more details.
2938 if (!mod
->async_probe_requested
)
2939 async_synchronize_full();
2941 ftrace_free_mem(mod
, mod
->mem
[MOD_INIT_TEXT
].base
,
2942 mod
->mem
[MOD_INIT_TEXT
].base
+ mod
->mem
[MOD_INIT_TEXT
].size
);
2943 mutex_lock(&module_mutex
);
2944 /* Drop initial reference. */
2946 trim_init_extable(mod
);
2947 #ifdef CONFIG_KALLSYMS
2948 /* Switch to core kallsyms now init is done: kallsyms may be walking! */
2949 rcu_assign_pointer(mod
->kallsyms
, &mod
->core_kallsyms
);
2951 ret
= module_enable_rodata_ro(mod
, true);
2953 goto fail_mutex_unlock
;
2954 mod_tree_remove_init(mod
);
2955 module_arch_freeing_init(mod
);
2956 for_class_mod_mem_type(type
, init
) {
2957 mod
->mem
[type
].base
= NULL
;
2958 mod
->mem
[type
].size
= 0;
2961 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
2962 /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointers */
2963 mod
->btf_data
= NULL
;
2964 mod
->btf_base_data
= NULL
;
2967 * We want to free module_init, but be aware that kallsyms may be
2968 * walking this with preempt disabled. In all the failure paths, we
2969 * call synchronize_rcu(), but we don't want to slow down the success
2970 * path. execmem_free() cannot be called in an interrupt, so do the
2971 * work and call synchronize_rcu() in a work queue.
2973 * Note that execmem_alloc() on most architectures creates W+X page
2974 * mappings which won't be cleaned up until do_free_init() runs. Any
2975 * code such as mark_rodata_ro() which depends on those mappings to
2976 * be cleaned up needs to sync with the queued work by invoking
2977 * flush_module_init_free_work().
2979 if (llist_add(&freeinit
->node
, &init_free_list
))
2980 schedule_work(&init_free_wq
);
2982 mutex_unlock(&module_mutex
);
2983 wake_up_all(&module_wq
);
2985 mod_stat_add_long(text_size
, &total_text_size
);
2986 mod_stat_add_long(total_size
, &total_mod_size
);
2988 mod_stat_inc(&modcount
);
2993 mutex_unlock(&module_mutex
);
2997 /* Try to protect us from buggy refcounters. */
2998 mod
->state
= MODULE_STATE_GOING
;
3001 blocking_notifier_call_chain(&module_notify_list
,
3002 MODULE_STATE_GOING
, mod
);
3003 klp_module_going(mod
);
3004 ftrace_release_mod(mod
);
3006 wake_up_all(&module_wq
);
3011 static int may_init_module(void)
3013 if (!capable(CAP_SYS_MODULE
) || modules_disabled
)
3019 /* Is this module of this name done loading? No locks held. */
3020 static bool finished_loading(const char *name
)
3026 * The module_mutex should not be a heavily contended lock;
3027 * if we get the occasional sleep here, we'll go an extra iteration
3028 * in the wait_event_interruptible(), which is harmless.
3030 sched_annotate_sleep();
3031 mutex_lock(&module_mutex
);
3032 mod
= find_module_all(name
, strlen(name
), true);
3033 ret
= !mod
|| mod
->state
== MODULE_STATE_LIVE
3034 || mod
->state
== MODULE_STATE_GOING
;
3035 mutex_unlock(&module_mutex
);
3040 /* Must be called with module_mutex held */
3041 static int module_patient_check_exists(const char *name
,
3042 enum fail_dup_mod_reason reason
)
3047 old
= find_module_all(name
, strlen(name
), true);
3051 if (old
->state
== MODULE_STATE_COMING
||
3052 old
->state
== MODULE_STATE_UNFORMED
) {
3053 /* Wait in case it fails to load. */
3054 mutex_unlock(&module_mutex
);
3055 err
= wait_event_interruptible(module_wq
,
3056 finished_loading(name
));
3057 mutex_lock(&module_mutex
);
3061 /* The module might have gone in the meantime. */
3062 old
= find_module_all(name
, strlen(name
), true);
3065 if (try_add_failed_module(name
, reason
))
3066 pr_warn("Could not add fail-tracking for module: %s\n", name
);
3069 * We are here only when the same module was being loaded. Do
3070 * not try to load it again right now. It prevents long delays
3071 * caused by serialized module load failures. It might happen
3072 * when more devices of the same type trigger load of
3073 * a particular module.
3075 if (old
&& old
->state
== MODULE_STATE_LIVE
)
3081 * We try to place it in the list now to make sure it's unique before
3082 * we dedicate too many resources. In particular, temporary percpu
3083 * memory exhaustion.
3085 static int add_unformed_module(struct module
*mod
)
3089 mod
->state
= MODULE_STATE_UNFORMED
;
3091 mutex_lock(&module_mutex
);
3092 err
= module_patient_check_exists(mod
->name
, FAIL_DUP_MOD_LOAD
);
3096 mod_update_bounds(mod
);
3097 list_add_rcu(&mod
->list
, &modules
);
3098 mod_tree_insert(mod
);
3102 mutex_unlock(&module_mutex
);
3106 static int complete_formation(struct module
*mod
, struct load_info
*info
)
3110 mutex_lock(&module_mutex
);
3112 /* Find duplicate symbols (must be called under lock). */
3113 err
= verify_exported_symbols(mod
);
3117 /* These rely on module_mutex for list integrity. */
3118 module_bug_finalize(info
->hdr
, info
->sechdrs
, mod
);
3119 module_cfi_finalize(info
->hdr
, info
->sechdrs
, mod
);
3121 err
= module_enable_rodata_ro(mod
, false);
3123 goto out_strict_rwx
;
3124 err
= module_enable_data_nx(mod
);
3126 goto out_strict_rwx
;
3127 err
= module_enable_text_rox(mod
);
3129 goto out_strict_rwx
;
3132 * Mark state as coming so strong_try_module_get() ignores us,
3133 * but kallsyms etc. can see us.
3135 mod
->state
= MODULE_STATE_COMING
;
3136 mutex_unlock(&module_mutex
);
3141 module_bug_cleanup(mod
);
3143 mutex_unlock(&module_mutex
);
3147 static int prepare_coming_module(struct module
*mod
)
3151 ftrace_module_enable(mod
);
3152 err
= klp_module_coming(mod
);
3156 err
= blocking_notifier_call_chain_robust(&module_notify_list
,
3157 MODULE_STATE_COMING
, MODULE_STATE_GOING
, mod
);
3158 err
= notifier_to_errno(err
);
3160 klp_module_going(mod
);
3165 static int unknown_module_param_cb(char *param
, char *val
, const char *modname
,
3168 struct module
*mod
= arg
;
3171 if (strcmp(param
, "async_probe") == 0) {
3172 if (kstrtobool(val
, &mod
->async_probe_requested
))
3173 mod
->async_probe_requested
= true;
3177 /* Check for magic 'dyndbg' arg */
3178 ret
= ddebug_dyndbg_module_param_cb(param
, val
, modname
);
3180 pr_warn("%s: unknown parameter '%s' ignored\n", modname
, param
);
3184 /* Module within temporary copy, this doesn't do any allocation */
3185 static int early_mod_check(struct load_info
*info
, int flags
)
3190 * Now that we know we have the correct module name, check
3191 * if it's blacklisted.
3193 if (blacklisted(info
->name
)) {
3194 pr_err("Module %s is blacklisted\n", info
->name
);
3198 err
= rewrite_section_headers(info
, flags
);
3202 /* Check module struct version now, before we try to use module. */
3203 if (!check_modstruct_version(info
, info
->mod
))
3206 err
= check_modinfo(info
->mod
, info
, flags
);
3210 mutex_lock(&module_mutex
);
3211 err
= module_patient_check_exists(info
->mod
->name
, FAIL_DUP_MOD_BECOMING
);
3212 mutex_unlock(&module_mutex
);
3218 * Allocate and load the module: note that size of section 0 is always
3219 * zero, and we rely on this for optional sections.
3221 static int load_module(struct load_info
*info
, const char __user
*uargs
,
3225 bool module_allocated
= false;
3230 * Do the signature check (if any) first. All that
3231 * the signature check needs is info->len, it does
3232 * not need any of the section info. That can be
3233 * set up later. This will minimize the chances
3234 * of a corrupt module causing problems before
3235 * we even get to the signature check.
3237 * The check will also adjust info->len by stripping
3238 * off the sig length at the end of the module, making
3239 * checks against info->len more correct.
3241 err
= module_sig_check(info
, flags
);
3246 * Do basic sanity checks against the ELF header and
3247 * sections. Cache useful sections and set the
3248 * info->mod to the userspace passed struct module.
3250 err
= elf_validity_cache_copy(info
, flags
);
3254 err
= early_mod_check(info
, flags
);
3258 /* Figure out module layout, and allocate all the memory. */
3259 mod
= layout_and_allocate(info
, flags
);
3265 module_allocated
= true;
3267 audit_log_kern_module(mod
->name
);
3269 /* Reserve our place in the list. */
3270 err
= add_unformed_module(mod
);
3275 * We are tainting your kernel if your module gets into
3276 * the modules linked list somehow.
3278 module_augment_kernel_taints(mod
, info
);
3280 /* To avoid stressing percpu allocator, do this once we're unique. */
3281 err
= percpu_modalloc(mod
, info
);
3285 /* Now module is in final location, initialize linked lists, etc. */
3286 err
= module_unload_init(mod
);
3290 init_param_lock(mod
);
3293 * Now we've got everything in the final locations, we can
3294 * find optional sections.
3296 err
= find_module_sections(mod
, info
);
3300 err
= check_export_symbol_versions(mod
);
3304 /* Set up MODINFO_ATTR fields */
3305 setup_modinfo(mod
, info
);
3307 /* Fix up syms, so that st_value is a pointer to location. */
3308 err
= simplify_symbols(mod
, info
);
3312 err
= apply_relocations(mod
, info
);
3316 err
= post_relocation(mod
, info
);
3320 flush_module_icache(mod
);
3322 /* Now copy in args */
3323 mod
->args
= strndup_user(uargs
, ~0UL >> 1);
3324 if (IS_ERR(mod
->args
)) {
3325 err
= PTR_ERR(mod
->args
);
3326 goto free_arch_cleanup
;
3329 init_build_id(mod
, info
);
3331 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3332 ftrace_module_init(mod
);
3334 /* Finally it's fully formed, ready to start executing. */
3335 err
= complete_formation(mod
, info
);
3337 goto ddebug_cleanup
;
3339 err
= prepare_coming_module(mod
);
3343 mod
->async_probe_requested
= async_probe
;
3345 /* Module is ready to execute: parsing args may do that. */
3346 after_dashes
= parse_args(mod
->name
, mod
->args
, mod
->kp
, mod
->num_kp
,
3348 unknown_module_param_cb
);
3349 if (IS_ERR(after_dashes
)) {
3350 err
= PTR_ERR(after_dashes
);
3351 goto coming_cleanup
;
3352 } else if (after_dashes
) {
3353 pr_warn("%s: parameters '%s' after `--' ignored\n",
3354 mod
->name
, after_dashes
);
3357 /* Link in to sysfs. */
3358 err
= mod_sysfs_setup(mod
, info
, mod
->kp
, mod
->num_kp
);
3360 goto coming_cleanup
;
3362 if (is_livepatch_module(mod
)) {
3363 err
= copy_module_elf(mod
, info
);
3368 /* Get rid of temporary copy. */
3369 free_copy(info
, flags
);
3371 codetag_load_module(mod
);
3374 trace_module_load(mod
);
3376 return do_init_module(mod
);
3379 mod_sysfs_teardown(mod
);
3381 mod
->state
= MODULE_STATE_GOING
;
3382 destroy_params(mod
->kp
, mod
->num_kp
);
3383 blocking_notifier_call_chain(&module_notify_list
,
3384 MODULE_STATE_GOING
, mod
);
3385 klp_module_going(mod
);
3387 mod
->state
= MODULE_STATE_GOING
;
3388 /* module_bug_cleanup needs module_mutex protection */
3389 mutex_lock(&module_mutex
);
3390 module_bug_cleanup(mod
);
3391 mutex_unlock(&module_mutex
);
3394 ftrace_release_mod(mod
);
3398 module_arch_cleanup(mod
);
3402 module_unload_free(mod
);
3404 mutex_lock(&module_mutex
);
3405 /* Unlink carefully: kallsyms could be walking list. */
3406 list_del_rcu(&mod
->list
);
3407 mod_tree_remove(mod
);
3408 wake_up_all(&module_wq
);
3409 /* Wait for RCU-sched synchronizing before releasing mod->list. */
3411 mutex_unlock(&module_mutex
);
3413 mod_stat_bump_invalid(info
, flags
);
3414 /* Free lock-classes; relies on the preceding sync_rcu() */
3415 for_class_mod_mem_type(type
, core_data
) {
3416 lockdep_free_key_range(mod
->mem
[type
].base
,
3417 mod
->mem
[type
].size
);
3420 module_deallocate(mod
, info
);
3423 * The info->len is always set. We distinguish between
3424 * failures once the proper module was allocated and
3427 if (!module_allocated
)
3428 mod_stat_bump_becoming(info
, flags
);
3429 free_copy(info
, flags
);
3433 SYSCALL_DEFINE3(init_module
, void __user
*, umod
,
3434 unsigned long, len
, const char __user
*, uargs
)
3437 struct load_info info
= { };
3439 err
= may_init_module();
3443 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3446 err
= copy_module_from_user(umod
, len
, &info
);
3448 mod_stat_inc(&failed_kreads
);
3449 mod_stat_add_long(len
, &invalid_kread_bytes
);
3453 return load_module(&info
, uargs
, 0);
3458 struct hlist_node entry
;
3459 struct completion complete
;
3463 #define IDEM_HASH_BITS 8
3464 static struct hlist_head idem_hash
[1 << IDEM_HASH_BITS
];
3465 static DEFINE_SPINLOCK(idem_lock
);
3467 static bool idempotent(struct idempotent
*u
, const void *cookie
)
3469 int hash
= hash_ptr(cookie
, IDEM_HASH_BITS
);
3470 struct hlist_head
*head
= idem_hash
+ hash
;
3471 struct idempotent
*existing
;
3476 init_completion(&u
->complete
);
3478 spin_lock(&idem_lock
);
3480 hlist_for_each_entry(existing
, head
, entry
) {
3481 if (existing
->cookie
!= cookie
)
3486 hlist_add_head(&u
->entry
, idem_hash
+ hash
);
3487 spin_unlock(&idem_lock
);
3493 * We were the first one with 'cookie' on the list, and we ended
3494 * up completing the operation. We now need to walk the list,
3495 * remove everybody - which includes ourselves - fill in the return
3496 * value, and then complete the operation.
3498 static int idempotent_complete(struct idempotent
*u
, int ret
)
3500 const void *cookie
= u
->cookie
;
3501 int hash
= hash_ptr(cookie
, IDEM_HASH_BITS
);
3502 struct hlist_head
*head
= idem_hash
+ hash
;
3503 struct hlist_node
*next
;
3504 struct idempotent
*pos
;
3506 spin_lock(&idem_lock
);
3507 hlist_for_each_entry_safe(pos
, next
, head
, entry
) {
3508 if (pos
->cookie
!= cookie
)
3510 hlist_del_init(&pos
->entry
);
3512 complete(&pos
->complete
);
3514 spin_unlock(&idem_lock
);
3519 * Wait for the idempotent worker.
3521 * If we get interrupted, we need to remove ourselves from the
3522 * the idempotent list, and the completion may still come in.
3524 * The 'idem_lock' protects against the race, and 'idem.ret' was
3525 * initialized to -EINTR and is thus always the right return
3526 * value even if the idempotent work then completes between
3527 * the wait_for_completion and the cleanup.
3529 static int idempotent_wait_for_completion(struct idempotent
*u
)
3531 if (wait_for_completion_interruptible(&u
->complete
)) {
3532 spin_lock(&idem_lock
);
3533 if (!hlist_unhashed(&u
->entry
))
3534 hlist_del(&u
->entry
);
3535 spin_unlock(&idem_lock
);
3540 static int init_module_from_file(struct file
*f
, const char __user
* uargs
, int flags
)
3542 struct load_info info
= { };
3546 len
= kernel_read_file(f
, 0, &buf
, INT_MAX
, NULL
, READING_MODULE
);
3548 mod_stat_inc(&failed_kreads
);
3552 if (flags
& MODULE_INIT_COMPRESSED_FILE
) {
3553 int err
= module_decompress(&info
, buf
, len
);
3554 vfree(buf
); /* compressed data is no longer needed */
3556 mod_stat_inc(&failed_decompress
);
3557 mod_stat_add_long(len
, &invalid_decompress_bytes
);
3565 return load_module(&info
, uargs
, flags
);
3568 static int idempotent_init_module(struct file
*f
, const char __user
* uargs
, int flags
)
3570 struct idempotent idem
;
3572 if (!(f
->f_mode
& FMODE_READ
))
3575 /* Are we the winners of the race and get to do this? */
3576 if (!idempotent(&idem
, file_inode(f
))) {
3577 int ret
= init_module_from_file(f
, uargs
, flags
);
3578 return idempotent_complete(&idem
, ret
);
3582 * Somebody else won the race and is loading the module.
3584 return idempotent_wait_for_completion(&idem
);
3587 SYSCALL_DEFINE3(finit_module
, int, fd
, const char __user
*, uargs
, int, flags
)
3589 int err
= may_init_module();
3593 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd
, uargs
, flags
);
3595 if (flags
& ~(MODULE_INIT_IGNORE_MODVERSIONS
3596 |MODULE_INIT_IGNORE_VERMAGIC
3597 |MODULE_INIT_COMPRESSED_FILE
))
3603 return idempotent_init_module(fd_file(f
), uargs
, flags
);
3606 /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */
3607 char *module_flags(struct module
*mod
, char *buf
, bool show_state
)
3611 BUG_ON(mod
->state
== MODULE_STATE_UNFORMED
);
3612 if (!mod
->taints
&& !show_state
)
3615 mod
->state
== MODULE_STATE_GOING
||
3616 mod
->state
== MODULE_STATE_COMING
) {
3618 bx
+= module_flags_taint(mod
->taints
, buf
+ bx
);
3619 /* Show a - for module-is-being-unloaded */
3620 if (mod
->state
== MODULE_STATE_GOING
&& show_state
)
3622 /* Show a + for module-is-being-loaded */
3623 if (mod
->state
== MODULE_STATE_COMING
&& show_state
)
3633 /* Given an address, look for it in the module exception tables. */
3634 const struct exception_table_entry
*search_module_extables(unsigned long addr
)
3636 const struct exception_table_entry
*e
= NULL
;
3640 mod
= __module_address(addr
);
3644 if (!mod
->num_exentries
)
3647 e
= search_extable(mod
->extable
,
3654 * Now, if we found one, we are running inside it now, hence
3655 * we cannot unload the module, hence no refcnt needed.
3661 * is_module_address() - is this address inside a module?
3662 * @addr: the address to check.
3664 * See is_module_text_address() if you simply want to see if the address
3665 * is code (not data).
3667 bool is_module_address(unsigned long addr
)
3672 ret
= __module_address(addr
) != NULL
;
3679 * __module_address() - get the module which contains an address.
3680 * @addr: the address.
3682 * Must be called with preempt disabled or module mutex held so that
3683 * module doesn't get freed during this.
3685 struct module
*__module_address(unsigned long addr
)
3689 if (addr
>= mod_tree
.addr_min
&& addr
<= mod_tree
.addr_max
)
3692 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
3693 if (addr
>= mod_tree
.data_addr_min
&& addr
<= mod_tree
.data_addr_max
)
3700 module_assert_mutex_or_preempt();
3702 mod
= mod_find(addr
, &mod_tree
);
3704 BUG_ON(!within_module(addr
, mod
));
3705 if (mod
->state
== MODULE_STATE_UNFORMED
)
3712 * is_module_text_address() - is this address inside module code?
3713 * @addr: the address to check.
3715 * See is_module_address() if you simply want to see if the address is
3716 * anywhere in a module. See kernel_text_address() for testing if an
3717 * address corresponds to kernel or module code.
3719 bool is_module_text_address(unsigned long addr
)
3724 ret
= __module_text_address(addr
) != NULL
;
3731 * __module_text_address() - get the module whose code contains an address.
3732 * @addr: the address.
3734 * Must be called with preempt disabled or module mutex held so that
3735 * module doesn't get freed during this.
3737 struct module
*__module_text_address(unsigned long addr
)
3739 struct module
*mod
= __module_address(addr
);
3741 /* Make sure it's within the text section. */
3742 if (!within_module_mem_type(addr
, mod
, MOD_TEXT
) &&
3743 !within_module_mem_type(addr
, mod
, MOD_INIT_TEXT
))
3749 /* Don't grab lock, we're oopsing. */
3750 void print_modules(void)
3753 char buf
[MODULE_FLAGS_BUF_SIZE
];
3755 printk(KERN_DEFAULT
"Modules linked in:");
3756 /* Most callers should already have preempt disabled, but make sure */
3758 list_for_each_entry_rcu(mod
, &modules
, list
) {
3759 if (mod
->state
== MODULE_STATE_UNFORMED
)
3761 pr_cont(" %s%s", mod
->name
, module_flags(mod
, buf
, true));
3764 print_unloaded_tainted_modules();
3766 if (last_unloaded_module
.name
[0])
3767 pr_cont(" [last unloaded: %s%s]", last_unloaded_module
.name
,
3768 last_unloaded_module
.taints
);
3772 #ifdef CONFIG_MODULE_DEBUGFS
3773 struct dentry
*mod_debugfs_root
;
3775 static int module_debugfs_init(void)
3777 mod_debugfs_root
= debugfs_create_dir("modules", NULL
);
3780 module_init(module_debugfs_init
);