2 * core.c - Kernel Live Patching Core
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/ftrace.h>
28 #include <linux/list.h>
29 #include <linux/kallsyms.h>
30 #include <linux/livepatch.h>
31 #include <linux/elf.h>
32 #include <linux/moduleloader.h>
33 #include <asm/cacheflush.h>
36 * struct klp_ops - structure for tracking registered ftrace ops structs
38 * A single ftrace_ops is shared between all enabled replacement functions
39 * (klp_func structs) which have the same old_addr. This allows the switch
40 * between function versions to happen instantaneously by updating the klp_ops
41 * struct's func_stack list. The winner is the klp_func at the top of the
42 * func_stack (front of the list).
44 * @node: node for the global klp_ops list
45 * @func_stack: list head for the stack of klp_func's (active func is on top)
46 * @fops: registered ftrace ops struct
49 struct list_head node
;
50 struct list_head func_stack
;
51 struct ftrace_ops fops
;
55 * The klp_mutex protects the global lists and state transitions of any
56 * structure reachable from them. References to any structure must be obtained
57 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
58 * ensure it gets consistent data).
60 static DEFINE_MUTEX(klp_mutex
);
62 static LIST_HEAD(klp_patches
);
63 static LIST_HEAD(klp_ops
);
65 static struct kobject
*klp_root_kobj
;
67 static struct klp_ops
*klp_find_ops(unsigned long old_addr
)
70 struct klp_func
*func
;
72 list_for_each_entry(ops
, &klp_ops
, node
) {
73 func
= list_first_entry(&ops
->func_stack
, struct klp_func
,
75 if (func
->old_addr
== old_addr
)
82 static bool klp_is_module(struct klp_object
*obj
)
87 static bool klp_is_object_loaded(struct klp_object
*obj
)
89 return !obj
->name
|| obj
->mod
;
92 /* sets obj->mod if object is not vmlinux and module is found */
93 static void klp_find_object_module(struct klp_object
*obj
)
97 if (!klp_is_module(obj
))
100 mutex_lock(&module_mutex
);
102 * We do not want to block removal of patched modules and therefore
103 * we do not take a reference here. The patches are removed by
104 * klp_module_going() instead.
106 mod
= find_module(obj
->name
);
108 * Do not mess work of klp_module_coming() and klp_module_going().
109 * Note that the patch might still be needed before klp_module_going()
110 * is called. Module functions can be called even in the GOING state
111 * until mod->exit() finishes. This is especially important for
112 * patches that modify semantic of the functions.
114 if (mod
&& mod
->klp_alive
)
117 mutex_unlock(&module_mutex
);
120 /* klp_mutex must be held by caller */
121 static bool klp_is_patch_registered(struct klp_patch
*patch
)
123 struct klp_patch
*mypatch
;
125 list_for_each_entry(mypatch
, &klp_patches
, list
)
126 if (mypatch
== patch
)
132 static bool klp_initialized(void)
134 return !!klp_root_kobj
;
137 struct klp_find_arg
{
145 static int klp_find_callback(void *data
, const char *name
,
146 struct module
*mod
, unsigned long addr
)
148 struct klp_find_arg
*args
= data
;
150 if ((mod
&& !args
->objname
) || (!mod
&& args
->objname
))
153 if (strcmp(args
->name
, name
))
156 if (args
->objname
&& strcmp(args
->objname
, mod
->name
))
163 * Finish the search when the symbol is found for the desired position
164 * or the position is not defined for a non-unique symbol.
166 if ((args
->pos
&& (args
->count
== args
->pos
)) ||
167 (!args
->pos
&& (args
->count
> 1)))
173 static int klp_find_object_symbol(const char *objname
, const char *name
,
174 unsigned long sympos
, unsigned long *addr
)
176 struct klp_find_arg args
= {
184 mutex_lock(&module_mutex
);
185 kallsyms_on_each_symbol(klp_find_callback
, &args
);
186 mutex_unlock(&module_mutex
);
189 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
190 * otherwise ensure the symbol position count matches sympos.
193 pr_err("symbol '%s' not found in symbol table\n", name
);
194 else if (args
.count
> 1 && sympos
== 0) {
195 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
197 } else if (sympos
!= args
.count
&& sympos
> 0) {
198 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
199 sympos
, name
, objname
? objname
: "vmlinux");
209 static int klp_resolve_symbols(Elf_Shdr
*relasec
, struct module
*pmod
)
211 int i
, cnt
, vmlinux
, ret
;
212 char objname
[MODULE_NAME_LEN
];
213 char symname
[KSYM_NAME_LEN
];
214 char *strtab
= pmod
->core_kallsyms
.strtab
;
217 unsigned long sympos
, addr
;
220 * Since the field widths for objname and symname in the sscanf()
221 * call are hard-coded and correspond to MODULE_NAME_LEN and
222 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
223 * and KSYM_NAME_LEN have the values we expect them to have.
225 * Because the value of MODULE_NAME_LEN can differ among architectures,
226 * we use the smallest/strictest upper bound possible (56, based on
227 * the current definition of MODULE_NAME_LEN) to prevent overflows.
229 BUILD_BUG_ON(MODULE_NAME_LEN
< 56 || KSYM_NAME_LEN
!= 128);
231 relas
= (Elf_Rela
*) relasec
->sh_addr
;
232 /* For each rela in this klp relocation section */
233 for (i
= 0; i
< relasec
->sh_size
/ sizeof(Elf_Rela
); i
++) {
234 sym
= pmod
->core_kallsyms
.symtab
+ ELF_R_SYM(relas
[i
].r_info
);
235 if (sym
->st_shndx
!= SHN_LIVEPATCH
) {
236 pr_err("symbol %s is not marked as a livepatch symbol",
237 strtab
+ sym
->st_name
);
241 /* Format: .klp.sym.objname.symname,sympos */
242 cnt
= sscanf(strtab
+ sym
->st_name
,
243 ".klp.sym.%55[^.].%127[^,],%lu",
244 objname
, symname
, &sympos
);
246 pr_err("symbol %s has an incorrectly formatted name",
247 strtab
+ sym
->st_name
);
251 /* klp_find_object_symbol() treats a NULL objname as vmlinux */
252 vmlinux
= !strcmp(objname
, "vmlinux");
253 ret
= klp_find_object_symbol(vmlinux
? NULL
: objname
,
254 symname
, sympos
, &addr
);
258 sym
->st_value
= addr
;
264 static int klp_write_object_relocations(struct module
*pmod
,
265 struct klp_object
*obj
)
268 const char *objname
, *secname
;
269 char sec_objname
[MODULE_NAME_LEN
];
272 if (WARN_ON(!klp_is_object_loaded(obj
)))
275 objname
= klp_is_module(obj
) ? obj
->name
: "vmlinux";
277 /* For each klp relocation section */
278 for (i
= 1; i
< pmod
->klp_info
->hdr
.e_shnum
; i
++) {
279 sec
= pmod
->klp_info
->sechdrs
+ i
;
280 secname
= pmod
->klp_info
->secstrings
+ sec
->sh_name
;
281 if (!(sec
->sh_flags
& SHF_RELA_LIVEPATCH
))
285 * Format: .klp.rela.sec_objname.section_name
286 * See comment in klp_resolve_symbols() for an explanation
287 * of the selected field width value.
289 cnt
= sscanf(secname
, ".klp.rela.%55[^.]", sec_objname
);
291 pr_err("section %s has an incorrectly formatted name",
297 if (strcmp(objname
, sec_objname
))
300 ret
= klp_resolve_symbols(sec
, pmod
);
304 ret
= apply_relocate_add(pmod
->klp_info
->sechdrs
,
305 pmod
->core_kallsyms
.strtab
,
306 pmod
->klp_info
->symndx
, i
, pmod
);
314 static void notrace
klp_ftrace_handler(unsigned long ip
,
315 unsigned long parent_ip
,
316 struct ftrace_ops
*fops
,
317 struct pt_regs
*regs
)
320 struct klp_func
*func
;
322 ops
= container_of(fops
, struct klp_ops
, fops
);
325 func
= list_first_or_null_rcu(&ops
->func_stack
, struct klp_func
,
327 if (WARN_ON_ONCE(!func
))
330 klp_arch_set_pc(regs
, (unsigned long)func
->new_func
);
336 * Convert a function address into the appropriate ftrace location.
338 * Usually this is just the address of the function, but on some architectures
339 * it's more complicated so allow them to provide a custom behaviour.
341 #ifndef klp_get_ftrace_location
342 static unsigned long klp_get_ftrace_location(unsigned long faddr
)
348 static void klp_disable_func(struct klp_func
*func
)
352 if (WARN_ON(func
->state
!= KLP_ENABLED
))
354 if (WARN_ON(!func
->old_addr
))
357 ops
= klp_find_ops(func
->old_addr
);
361 if (list_is_singular(&ops
->func_stack
)) {
362 unsigned long ftrace_loc
;
364 ftrace_loc
= klp_get_ftrace_location(func
->old_addr
);
365 if (WARN_ON(!ftrace_loc
))
368 WARN_ON(unregister_ftrace_function(&ops
->fops
));
369 WARN_ON(ftrace_set_filter_ip(&ops
->fops
, ftrace_loc
, 1, 0));
371 list_del_rcu(&func
->stack_node
);
372 list_del(&ops
->node
);
375 list_del_rcu(&func
->stack_node
);
378 func
->state
= KLP_DISABLED
;
381 static int klp_enable_func(struct klp_func
*func
)
386 if (WARN_ON(!func
->old_addr
))
389 if (WARN_ON(func
->state
!= KLP_DISABLED
))
392 ops
= klp_find_ops(func
->old_addr
);
394 unsigned long ftrace_loc
;
396 ftrace_loc
= klp_get_ftrace_location(func
->old_addr
);
398 pr_err("failed to find location for function '%s'\n",
403 ops
= kzalloc(sizeof(*ops
), GFP_KERNEL
);
407 ops
->fops
.func
= klp_ftrace_handler
;
408 ops
->fops
.flags
= FTRACE_OPS_FL_SAVE_REGS
|
409 FTRACE_OPS_FL_DYNAMIC
|
410 FTRACE_OPS_FL_IPMODIFY
;
412 list_add(&ops
->node
, &klp_ops
);
414 INIT_LIST_HEAD(&ops
->func_stack
);
415 list_add_rcu(&func
->stack_node
, &ops
->func_stack
);
417 ret
= ftrace_set_filter_ip(&ops
->fops
, ftrace_loc
, 0, 0);
419 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
420 func
->old_name
, ret
);
424 ret
= register_ftrace_function(&ops
->fops
);
426 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
427 func
->old_name
, ret
);
428 ftrace_set_filter_ip(&ops
->fops
, ftrace_loc
, 1, 0);
434 list_add_rcu(&func
->stack_node
, &ops
->func_stack
);
437 func
->state
= KLP_ENABLED
;
442 list_del_rcu(&func
->stack_node
);
443 list_del(&ops
->node
);
448 static void klp_disable_object(struct klp_object
*obj
)
450 struct klp_func
*func
;
452 klp_for_each_func(obj
, func
)
453 if (func
->state
== KLP_ENABLED
)
454 klp_disable_func(func
);
456 obj
->state
= KLP_DISABLED
;
459 static int klp_enable_object(struct klp_object
*obj
)
461 struct klp_func
*func
;
464 if (WARN_ON(obj
->state
!= KLP_DISABLED
))
467 if (WARN_ON(!klp_is_object_loaded(obj
)))
470 klp_for_each_func(obj
, func
) {
471 ret
= klp_enable_func(func
);
473 klp_disable_object(obj
);
477 obj
->state
= KLP_ENABLED
;
482 static int __klp_disable_patch(struct klp_patch
*patch
)
484 struct klp_object
*obj
;
486 /* enforce stacking: only the last enabled patch can be disabled */
487 if (!list_is_last(&patch
->list
, &klp_patches
) &&
488 list_next_entry(patch
, list
)->state
== KLP_ENABLED
)
491 pr_notice("disabling patch '%s'\n", patch
->mod
->name
);
493 klp_for_each_object(patch
, obj
) {
494 if (obj
->state
== KLP_ENABLED
)
495 klp_disable_object(obj
);
498 patch
->state
= KLP_DISABLED
;
504 * klp_disable_patch() - disables a registered patch
505 * @patch: The registered, enabled patch to be disabled
507 * Unregisters the patched functions from ftrace.
509 * Return: 0 on success, otherwise error
511 int klp_disable_patch(struct klp_patch
*patch
)
515 mutex_lock(&klp_mutex
);
517 if (!klp_is_patch_registered(patch
)) {
522 if (patch
->state
== KLP_DISABLED
) {
527 ret
= __klp_disable_patch(patch
);
530 mutex_unlock(&klp_mutex
);
533 EXPORT_SYMBOL_GPL(klp_disable_patch
);
535 static int __klp_enable_patch(struct klp_patch
*patch
)
537 struct klp_object
*obj
;
540 if (WARN_ON(patch
->state
!= KLP_DISABLED
))
543 /* enforce stacking: only the first disabled patch can be enabled */
544 if (patch
->list
.prev
!= &klp_patches
&&
545 list_prev_entry(patch
, list
)->state
== KLP_DISABLED
)
548 pr_notice("enabling patch '%s'\n", patch
->mod
->name
);
550 klp_for_each_object(patch
, obj
) {
551 if (!klp_is_object_loaded(obj
))
554 ret
= klp_enable_object(obj
);
559 patch
->state
= KLP_ENABLED
;
564 WARN_ON(__klp_disable_patch(patch
));
569 * klp_enable_patch() - enables a registered patch
570 * @patch: The registered, disabled patch to be enabled
572 * Performs the needed symbol lookups and code relocations,
573 * then registers the patched functions with ftrace.
575 * Return: 0 on success, otherwise error
577 int klp_enable_patch(struct klp_patch
*patch
)
581 mutex_lock(&klp_mutex
);
583 if (!klp_is_patch_registered(patch
)) {
588 ret
= __klp_enable_patch(patch
);
591 mutex_unlock(&klp_mutex
);
594 EXPORT_SYMBOL_GPL(klp_enable_patch
);
599 * /sys/kernel/livepatch
600 * /sys/kernel/livepatch/<patch>
601 * /sys/kernel/livepatch/<patch>/enabled
602 * /sys/kernel/livepatch/<patch>/<object>
603 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
606 static ssize_t
enabled_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
607 const char *buf
, size_t count
)
609 struct klp_patch
*patch
;
613 ret
= kstrtoul(buf
, 10, &val
);
617 if (val
!= KLP_DISABLED
&& val
!= KLP_ENABLED
)
620 patch
= container_of(kobj
, struct klp_patch
, kobj
);
622 mutex_lock(&klp_mutex
);
624 if (val
== patch
->state
) {
625 /* already in requested state */
630 if (val
== KLP_ENABLED
) {
631 ret
= __klp_enable_patch(patch
);
635 ret
= __klp_disable_patch(patch
);
640 mutex_unlock(&klp_mutex
);
645 mutex_unlock(&klp_mutex
);
649 static ssize_t
enabled_show(struct kobject
*kobj
,
650 struct kobj_attribute
*attr
, char *buf
)
652 struct klp_patch
*patch
;
654 patch
= container_of(kobj
, struct klp_patch
, kobj
);
655 return snprintf(buf
, PAGE_SIZE
-1, "%d\n", patch
->state
);
658 static struct kobj_attribute enabled_kobj_attr
= __ATTR_RW(enabled
);
659 static struct attribute
*klp_patch_attrs
[] = {
660 &enabled_kobj_attr
.attr
,
664 static void klp_kobj_release_patch(struct kobject
*kobj
)
667 * Once we have a consistency model we'll need to module_put() the
668 * patch module here. See klp_register_patch() for more details.
672 static struct kobj_type klp_ktype_patch
= {
673 .release
= klp_kobj_release_patch
,
674 .sysfs_ops
= &kobj_sysfs_ops
,
675 .default_attrs
= klp_patch_attrs
,
678 static void klp_kobj_release_object(struct kobject
*kobj
)
682 static struct kobj_type klp_ktype_object
= {
683 .release
= klp_kobj_release_object
,
684 .sysfs_ops
= &kobj_sysfs_ops
,
687 static void klp_kobj_release_func(struct kobject
*kobj
)
691 static struct kobj_type klp_ktype_func
= {
692 .release
= klp_kobj_release_func
,
693 .sysfs_ops
= &kobj_sysfs_ops
,
697 * Free all functions' kobjects in the array up to some limit. When limit is
698 * NULL, all kobjects are freed.
700 static void klp_free_funcs_limited(struct klp_object
*obj
,
701 struct klp_func
*limit
)
703 struct klp_func
*func
;
705 for (func
= obj
->funcs
; func
->old_name
&& func
!= limit
; func
++)
706 kobject_put(&func
->kobj
);
709 /* Clean up when a patched object is unloaded */
710 static void klp_free_object_loaded(struct klp_object
*obj
)
712 struct klp_func
*func
;
716 klp_for_each_func(obj
, func
)
721 * Free all objects' kobjects in the array up to some limit. When limit is
722 * NULL, all kobjects are freed.
724 static void klp_free_objects_limited(struct klp_patch
*patch
,
725 struct klp_object
*limit
)
727 struct klp_object
*obj
;
729 for (obj
= patch
->objs
; obj
->funcs
&& obj
!= limit
; obj
++) {
730 klp_free_funcs_limited(obj
, NULL
);
731 kobject_put(&obj
->kobj
);
735 static void klp_free_patch(struct klp_patch
*patch
)
737 klp_free_objects_limited(patch
, NULL
);
738 if (!list_empty(&patch
->list
))
739 list_del(&patch
->list
);
740 kobject_put(&patch
->kobj
);
743 static int klp_init_func(struct klp_object
*obj
, struct klp_func
*func
)
745 if (!func
->old_name
|| !func
->new_func
)
748 INIT_LIST_HEAD(&func
->stack_node
);
749 func
->state
= KLP_DISABLED
;
751 /* The format for the sysfs directory is <function,sympos> where sympos
752 * is the nth occurrence of this symbol in kallsyms for the patched
753 * object. If the user selects 0 for old_sympos, then 1 will be used
754 * since a unique symbol will be the first occurrence.
756 return kobject_init_and_add(&func
->kobj
, &klp_ktype_func
,
757 &obj
->kobj
, "%s,%lu", func
->old_name
,
758 func
->old_sympos
? func
->old_sympos
: 1);
761 /* Arches may override this to finish any remaining arch-specific tasks */
762 void __weak
arch_klp_init_object_loaded(struct klp_patch
*patch
,
763 struct klp_object
*obj
)
767 /* parts of the initialization that is done only when the object is loaded */
768 static int klp_init_object_loaded(struct klp_patch
*patch
,
769 struct klp_object
*obj
)
771 struct klp_func
*func
;
774 module_disable_ro(patch
->mod
);
775 ret
= klp_write_object_relocations(patch
->mod
, obj
);
777 module_enable_ro(patch
->mod
, true);
781 arch_klp_init_object_loaded(patch
, obj
);
782 module_enable_ro(patch
->mod
, true);
784 klp_for_each_func(obj
, func
) {
785 ret
= klp_find_object_symbol(obj
->name
, func
->old_name
,
795 static int klp_init_object(struct klp_patch
*patch
, struct klp_object
*obj
)
797 struct klp_func
*func
;
804 obj
->state
= KLP_DISABLED
;
807 klp_find_object_module(obj
);
809 name
= klp_is_module(obj
) ? obj
->name
: "vmlinux";
810 ret
= kobject_init_and_add(&obj
->kobj
, &klp_ktype_object
,
811 &patch
->kobj
, "%s", name
);
815 klp_for_each_func(obj
, func
) {
816 ret
= klp_init_func(obj
, func
);
821 if (klp_is_object_loaded(obj
)) {
822 ret
= klp_init_object_loaded(patch
, obj
);
830 klp_free_funcs_limited(obj
, func
);
831 kobject_put(&obj
->kobj
);
835 static int klp_init_patch(struct klp_patch
*patch
)
837 struct klp_object
*obj
;
843 mutex_lock(&klp_mutex
);
845 patch
->state
= KLP_DISABLED
;
847 ret
= kobject_init_and_add(&patch
->kobj
, &klp_ktype_patch
,
848 klp_root_kobj
, "%s", patch
->mod
->name
);
852 klp_for_each_object(patch
, obj
) {
853 ret
= klp_init_object(patch
, obj
);
858 list_add_tail(&patch
->list
, &klp_patches
);
860 mutex_unlock(&klp_mutex
);
865 klp_free_objects_limited(patch
, obj
);
866 kobject_put(&patch
->kobj
);
868 mutex_unlock(&klp_mutex
);
873 * klp_unregister_patch() - unregisters a patch
874 * @patch: Disabled patch to be unregistered
876 * Frees the data structures and removes the sysfs interface.
878 * Return: 0 on success, otherwise error
880 int klp_unregister_patch(struct klp_patch
*patch
)
884 mutex_lock(&klp_mutex
);
886 if (!klp_is_patch_registered(patch
)) {
891 if (patch
->state
== KLP_ENABLED
) {
896 klp_free_patch(patch
);
899 mutex_unlock(&klp_mutex
);
902 EXPORT_SYMBOL_GPL(klp_unregister_patch
);
905 * klp_register_patch() - registers a patch
906 * @patch: Patch to be registered
908 * Initializes the data structure associated with the patch and
909 * creates the sysfs interface.
911 * Return: 0 on success, otherwise error
913 int klp_register_patch(struct klp_patch
*patch
)
917 if (!patch
|| !patch
->mod
)
920 if (!is_livepatch_module(patch
->mod
)) {
921 pr_err("module %s is not marked as a livepatch module",
926 if (!klp_initialized())
930 * A reference is taken on the patch module to prevent it from being
931 * unloaded. Right now, we don't allow patch modules to unload since
932 * there is currently no method to determine if a thread is still
933 * running in the patched code contained in the patch module once
934 * the ftrace registration is successful.
936 if (!try_module_get(patch
->mod
))
939 ret
= klp_init_patch(patch
);
941 module_put(patch
->mod
);
945 EXPORT_SYMBOL_GPL(klp_register_patch
);
947 int klp_module_coming(struct module
*mod
)
950 struct klp_patch
*patch
;
951 struct klp_object
*obj
;
953 if (WARN_ON(mod
->state
!= MODULE_STATE_COMING
))
956 mutex_lock(&klp_mutex
);
958 * Each module has to know that klp_module_coming()
959 * has been called. We never know what module will
960 * get patched by a new patch.
962 mod
->klp_alive
= true;
964 list_for_each_entry(patch
, &klp_patches
, list
) {
965 klp_for_each_object(patch
, obj
) {
966 if (!klp_is_module(obj
) || strcmp(obj
->name
, mod
->name
))
971 ret
= klp_init_object_loaded(patch
, obj
);
973 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
974 patch
->mod
->name
, obj
->mod
->name
, ret
);
978 if (patch
->state
== KLP_DISABLED
)
981 pr_notice("applying patch '%s' to loading module '%s'\n",
982 patch
->mod
->name
, obj
->mod
->name
);
984 ret
= klp_enable_object(obj
);
986 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
987 patch
->mod
->name
, obj
->mod
->name
, ret
);
995 mutex_unlock(&klp_mutex
);
1001 * If a patch is unsuccessfully applied, return
1002 * error to the module loader.
1004 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1005 patch
->mod
->name
, obj
->mod
->name
, obj
->mod
->name
);
1006 mod
->klp_alive
= false;
1007 klp_free_object_loaded(obj
);
1008 mutex_unlock(&klp_mutex
);
1013 void klp_module_going(struct module
*mod
)
1015 struct klp_patch
*patch
;
1016 struct klp_object
*obj
;
1018 if (WARN_ON(mod
->state
!= MODULE_STATE_GOING
&&
1019 mod
->state
!= MODULE_STATE_COMING
))
1022 mutex_lock(&klp_mutex
);
1024 * Each module has to know that klp_module_going()
1025 * has been called. We never know what module will
1026 * get patched by a new patch.
1028 mod
->klp_alive
= false;
1030 list_for_each_entry(patch
, &klp_patches
, list
) {
1031 klp_for_each_object(patch
, obj
) {
1032 if (!klp_is_module(obj
) || strcmp(obj
->name
, mod
->name
))
1035 if (patch
->state
!= KLP_DISABLED
) {
1036 pr_notice("reverting patch '%s' on unloading module '%s'\n",
1037 patch
->mod
->name
, obj
->mod
->name
);
1038 klp_disable_object(obj
);
1041 klp_free_object_loaded(obj
);
1046 mutex_unlock(&klp_mutex
);
1049 static int __init
klp_init(void)
1053 ret
= klp_check_compiler_support();
1055 pr_info("Your compiler is too old; turning off.\n");
1059 klp_root_kobj
= kobject_create_and_add("livepatch", kernel_kobj
);
1066 module_init(klp_init
);