2 * core.c - Kernel Live Patching Core
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/ftrace.h>
28 #include <linux/list.h>
29 #include <linux/kallsyms.h>
30 #include <linux/livepatch.h>
31 #include <linux/elf.h>
32 #include <linux/moduleloader.h>
33 #include <asm/cacheflush.h>
36 * struct klp_ops - structure for tracking registered ftrace ops structs
38 * A single ftrace_ops is shared between all enabled replacement functions
39 * (klp_func structs) which have the same old_addr. This allows the switch
40 * between function versions to happen instantaneously by updating the klp_ops
41 * struct's func_stack list. The winner is the klp_func at the top of the
42 * func_stack (front of the list).
44 * @node: node for the global klp_ops list
45 * @func_stack: list head for the stack of klp_func's (active func is on top)
46 * @fops: registered ftrace ops struct
49 struct list_head node
;
50 struct list_head func_stack
;
51 struct ftrace_ops fops
;
55 * The klp_mutex protects the global lists and state transitions of any
56 * structure reachable from them. References to any structure must be obtained
57 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
58 * ensure it gets consistent data).
60 static DEFINE_MUTEX(klp_mutex
);
62 static LIST_HEAD(klp_patches
);
63 static LIST_HEAD(klp_ops
);
65 static struct kobject
*klp_root_kobj
;
67 static struct klp_ops
*klp_find_ops(unsigned long old_addr
)
70 struct klp_func
*func
;
72 list_for_each_entry(ops
, &klp_ops
, node
) {
73 func
= list_first_entry(&ops
->func_stack
, struct klp_func
,
75 if (func
->old_addr
== old_addr
)
82 static bool klp_is_module(struct klp_object
*obj
)
87 static bool klp_is_object_loaded(struct klp_object
*obj
)
89 return !obj
->name
|| obj
->mod
;
92 /* sets obj->mod if object is not vmlinux and module is found */
93 static void klp_find_object_module(struct klp_object
*obj
)
97 if (!klp_is_module(obj
))
100 mutex_lock(&module_mutex
);
102 * We do not want to block removal of patched modules and therefore
103 * we do not take a reference here. The patches are removed by
104 * klp_module_going() instead.
106 mod
= find_module(obj
->name
);
108 * Do not mess work of klp_module_coming() and klp_module_going().
109 * Note that the patch might still be needed before klp_module_going()
110 * is called. Module functions can be called even in the GOING state
111 * until mod->exit() finishes. This is especially important for
112 * patches that modify semantic of the functions.
114 if (mod
&& mod
->klp_alive
)
117 mutex_unlock(&module_mutex
);
120 /* klp_mutex must be held by caller */
121 static bool klp_is_patch_registered(struct klp_patch
*patch
)
123 struct klp_patch
*mypatch
;
125 list_for_each_entry(mypatch
, &klp_patches
, list
)
126 if (mypatch
== patch
)
132 static bool klp_initialized(void)
134 return !!klp_root_kobj
;
137 struct klp_find_arg
{
145 static int klp_find_callback(void *data
, const char *name
,
146 struct module
*mod
, unsigned long addr
)
148 struct klp_find_arg
*args
= data
;
150 if ((mod
&& !args
->objname
) || (!mod
&& args
->objname
))
153 if (strcmp(args
->name
, name
))
156 if (args
->objname
&& strcmp(args
->objname
, mod
->name
))
163 * Finish the search when the symbol is found for the desired position
164 * or the position is not defined for a non-unique symbol.
166 if ((args
->pos
&& (args
->count
== args
->pos
)) ||
167 (!args
->pos
&& (args
->count
> 1)))
173 static int klp_find_object_symbol(const char *objname
, const char *name
,
174 unsigned long sympos
, unsigned long *addr
)
176 struct klp_find_arg args
= {
184 mutex_lock(&module_mutex
);
185 kallsyms_on_each_symbol(klp_find_callback
, &args
);
186 mutex_unlock(&module_mutex
);
189 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
190 * otherwise ensure the symbol position count matches sympos.
193 pr_err("symbol '%s' not found in symbol table\n", name
);
194 else if (args
.count
> 1 && sympos
== 0) {
195 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
197 } else if (sympos
!= args
.count
&& sympos
> 0) {
198 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
199 sympos
, name
, objname
? objname
: "vmlinux");
209 static int klp_resolve_symbols(Elf_Shdr
*relasec
, struct module
*pmod
)
211 int i
, cnt
, vmlinux
, ret
;
212 char objname
[MODULE_NAME_LEN
];
213 char symname
[KSYM_NAME_LEN
];
214 char *strtab
= pmod
->core_kallsyms
.strtab
;
217 unsigned long sympos
, addr
;
220 * Since the field widths for objname and symname in the sscanf()
221 * call are hard-coded and correspond to MODULE_NAME_LEN and
222 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
223 * and KSYM_NAME_LEN have the values we expect them to have.
225 * Because the value of MODULE_NAME_LEN can differ among architectures,
226 * we use the smallest/strictest upper bound possible (56, based on
227 * the current definition of MODULE_NAME_LEN) to prevent overflows.
229 BUILD_BUG_ON(MODULE_NAME_LEN
< 56 || KSYM_NAME_LEN
!= 128);
231 relas
= (Elf_Rela
*) relasec
->sh_addr
;
232 /* For each rela in this klp relocation section */
233 for (i
= 0; i
< relasec
->sh_size
/ sizeof(Elf_Rela
); i
++) {
234 sym
= pmod
->core_kallsyms
.symtab
+ ELF_R_SYM(relas
[i
].r_info
);
235 if (sym
->st_shndx
!= SHN_LIVEPATCH
) {
236 pr_err("symbol %s is not marked as a livepatch symbol",
237 strtab
+ sym
->st_name
);
241 /* Format: .klp.sym.objname.symname,sympos */
242 cnt
= sscanf(strtab
+ sym
->st_name
,
243 ".klp.sym.%55[^.].%127[^,],%lu",
244 objname
, symname
, &sympos
);
246 pr_err("symbol %s has an incorrectly formatted name",
247 strtab
+ sym
->st_name
);
251 /* klp_find_object_symbol() treats a NULL objname as vmlinux */
252 vmlinux
= !strcmp(objname
, "vmlinux");
253 ret
= klp_find_object_symbol(vmlinux
? NULL
: objname
,
254 symname
, sympos
, &addr
);
258 sym
->st_value
= addr
;
264 static int klp_write_object_relocations(struct module
*pmod
,
265 struct klp_object
*obj
)
268 const char *objname
, *secname
;
269 char sec_objname
[MODULE_NAME_LEN
];
272 if (WARN_ON(!klp_is_object_loaded(obj
)))
275 objname
= klp_is_module(obj
) ? obj
->name
: "vmlinux";
277 module_disable_ro(pmod
);
278 /* For each klp relocation section */
279 for (i
= 1; i
< pmod
->klp_info
->hdr
.e_shnum
; i
++) {
280 sec
= pmod
->klp_info
->sechdrs
+ i
;
281 secname
= pmod
->klp_info
->secstrings
+ sec
->sh_name
;
282 if (!(sec
->sh_flags
& SHF_RELA_LIVEPATCH
))
286 * Format: .klp.rela.sec_objname.section_name
287 * See comment in klp_resolve_symbols() for an explanation
288 * of the selected field width value.
290 cnt
= sscanf(secname
, ".klp.rela.%55[^.]", sec_objname
);
292 pr_err("section %s has an incorrectly formatted name",
298 if (strcmp(objname
, sec_objname
))
301 ret
= klp_resolve_symbols(sec
, pmod
);
305 ret
= apply_relocate_add(pmod
->klp_info
->sechdrs
,
306 pmod
->core_kallsyms
.strtab
,
307 pmod
->klp_info
->symndx
, i
, pmod
);
312 module_enable_ro(pmod
);
316 static void notrace
klp_ftrace_handler(unsigned long ip
,
317 unsigned long parent_ip
,
318 struct ftrace_ops
*fops
,
319 struct pt_regs
*regs
)
322 struct klp_func
*func
;
324 ops
= container_of(fops
, struct klp_ops
, fops
);
327 func
= list_first_or_null_rcu(&ops
->func_stack
, struct klp_func
,
329 if (WARN_ON_ONCE(!func
))
332 klp_arch_set_pc(regs
, (unsigned long)func
->new_func
);
338 * Convert a function address into the appropriate ftrace location.
340 * Usually this is just the address of the function, but on some architectures
341 * it's more complicated so allow them to provide a custom behaviour.
343 #ifndef klp_get_ftrace_location
344 static unsigned long klp_get_ftrace_location(unsigned long faddr
)
350 static void klp_disable_func(struct klp_func
*func
)
354 if (WARN_ON(func
->state
!= KLP_ENABLED
))
356 if (WARN_ON(!func
->old_addr
))
359 ops
= klp_find_ops(func
->old_addr
);
363 if (list_is_singular(&ops
->func_stack
)) {
364 unsigned long ftrace_loc
;
366 ftrace_loc
= klp_get_ftrace_location(func
->old_addr
);
367 if (WARN_ON(!ftrace_loc
))
370 WARN_ON(unregister_ftrace_function(&ops
->fops
));
371 WARN_ON(ftrace_set_filter_ip(&ops
->fops
, ftrace_loc
, 1, 0));
373 list_del_rcu(&func
->stack_node
);
374 list_del(&ops
->node
);
377 list_del_rcu(&func
->stack_node
);
380 func
->state
= KLP_DISABLED
;
383 static int klp_enable_func(struct klp_func
*func
)
388 if (WARN_ON(!func
->old_addr
))
391 if (WARN_ON(func
->state
!= KLP_DISABLED
))
394 ops
= klp_find_ops(func
->old_addr
);
396 unsigned long ftrace_loc
;
398 ftrace_loc
= klp_get_ftrace_location(func
->old_addr
);
400 pr_err("failed to find location for function '%s'\n",
405 ops
= kzalloc(sizeof(*ops
), GFP_KERNEL
);
409 ops
->fops
.func
= klp_ftrace_handler
;
410 ops
->fops
.flags
= FTRACE_OPS_FL_SAVE_REGS
|
411 FTRACE_OPS_FL_DYNAMIC
|
412 FTRACE_OPS_FL_IPMODIFY
;
414 list_add(&ops
->node
, &klp_ops
);
416 INIT_LIST_HEAD(&ops
->func_stack
);
417 list_add_rcu(&func
->stack_node
, &ops
->func_stack
);
419 ret
= ftrace_set_filter_ip(&ops
->fops
, ftrace_loc
, 0, 0);
421 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
422 func
->old_name
, ret
);
426 ret
= register_ftrace_function(&ops
->fops
);
428 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
429 func
->old_name
, ret
);
430 ftrace_set_filter_ip(&ops
->fops
, ftrace_loc
, 1, 0);
436 list_add_rcu(&func
->stack_node
, &ops
->func_stack
);
439 func
->state
= KLP_ENABLED
;
444 list_del_rcu(&func
->stack_node
);
445 list_del(&ops
->node
);
450 static void klp_disable_object(struct klp_object
*obj
)
452 struct klp_func
*func
;
454 klp_for_each_func(obj
, func
)
455 if (func
->state
== KLP_ENABLED
)
456 klp_disable_func(func
);
458 obj
->state
= KLP_DISABLED
;
461 static int klp_enable_object(struct klp_object
*obj
)
463 struct klp_func
*func
;
466 if (WARN_ON(obj
->state
!= KLP_DISABLED
))
469 if (WARN_ON(!klp_is_object_loaded(obj
)))
472 klp_for_each_func(obj
, func
) {
473 ret
= klp_enable_func(func
);
475 klp_disable_object(obj
);
479 obj
->state
= KLP_ENABLED
;
484 static int __klp_disable_patch(struct klp_patch
*patch
)
486 struct klp_object
*obj
;
488 /* enforce stacking: only the last enabled patch can be disabled */
489 if (!list_is_last(&patch
->list
, &klp_patches
) &&
490 list_next_entry(patch
, list
)->state
== KLP_ENABLED
)
493 pr_notice("disabling patch '%s'\n", patch
->mod
->name
);
495 klp_for_each_object(patch
, obj
) {
496 if (obj
->state
== KLP_ENABLED
)
497 klp_disable_object(obj
);
500 patch
->state
= KLP_DISABLED
;
506 * klp_disable_patch() - disables a registered patch
507 * @patch: The registered, enabled patch to be disabled
509 * Unregisters the patched functions from ftrace.
511 * Return: 0 on success, otherwise error
513 int klp_disable_patch(struct klp_patch
*patch
)
517 mutex_lock(&klp_mutex
);
519 if (!klp_is_patch_registered(patch
)) {
524 if (patch
->state
== KLP_DISABLED
) {
529 ret
= __klp_disable_patch(patch
);
532 mutex_unlock(&klp_mutex
);
535 EXPORT_SYMBOL_GPL(klp_disable_patch
);
537 static int __klp_enable_patch(struct klp_patch
*patch
)
539 struct klp_object
*obj
;
542 if (WARN_ON(patch
->state
!= KLP_DISABLED
))
545 /* enforce stacking: only the first disabled patch can be enabled */
546 if (patch
->list
.prev
!= &klp_patches
&&
547 list_prev_entry(patch
, list
)->state
== KLP_DISABLED
)
550 pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
551 add_taint(TAINT_LIVEPATCH
, LOCKDEP_STILL_OK
);
553 pr_notice("enabling patch '%s'\n", patch
->mod
->name
);
555 klp_for_each_object(patch
, obj
) {
556 if (!klp_is_object_loaded(obj
))
559 ret
= klp_enable_object(obj
);
564 patch
->state
= KLP_ENABLED
;
569 WARN_ON(__klp_disable_patch(patch
));
574 * klp_enable_patch() - enables a registered patch
575 * @patch: The registered, disabled patch to be enabled
577 * Performs the needed symbol lookups and code relocations,
578 * then registers the patched functions with ftrace.
580 * Return: 0 on success, otherwise error
582 int klp_enable_patch(struct klp_patch
*patch
)
586 mutex_lock(&klp_mutex
);
588 if (!klp_is_patch_registered(patch
)) {
593 ret
= __klp_enable_patch(patch
);
596 mutex_unlock(&klp_mutex
);
599 EXPORT_SYMBOL_GPL(klp_enable_patch
);
604 * /sys/kernel/livepatch
605 * /sys/kernel/livepatch/<patch>
606 * /sys/kernel/livepatch/<patch>/enabled
607 * /sys/kernel/livepatch/<patch>/<object>
608 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
611 static ssize_t
enabled_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
612 const char *buf
, size_t count
)
614 struct klp_patch
*patch
;
618 ret
= kstrtoul(buf
, 10, &val
);
622 if (val
!= KLP_DISABLED
&& val
!= KLP_ENABLED
)
625 patch
= container_of(kobj
, struct klp_patch
, kobj
);
627 mutex_lock(&klp_mutex
);
629 if (val
== patch
->state
) {
630 /* already in requested state */
635 if (val
== KLP_ENABLED
) {
636 ret
= __klp_enable_patch(patch
);
640 ret
= __klp_disable_patch(patch
);
645 mutex_unlock(&klp_mutex
);
650 mutex_unlock(&klp_mutex
);
654 static ssize_t
enabled_show(struct kobject
*kobj
,
655 struct kobj_attribute
*attr
, char *buf
)
657 struct klp_patch
*patch
;
659 patch
= container_of(kobj
, struct klp_patch
, kobj
);
660 return snprintf(buf
, PAGE_SIZE
-1, "%d\n", patch
->state
);
663 static struct kobj_attribute enabled_kobj_attr
= __ATTR_RW(enabled
);
664 static struct attribute
*klp_patch_attrs
[] = {
665 &enabled_kobj_attr
.attr
,
669 static void klp_kobj_release_patch(struct kobject
*kobj
)
672 * Once we have a consistency model we'll need to module_put() the
673 * patch module here. See klp_register_patch() for more details.
677 static struct kobj_type klp_ktype_patch
= {
678 .release
= klp_kobj_release_patch
,
679 .sysfs_ops
= &kobj_sysfs_ops
,
680 .default_attrs
= klp_patch_attrs
,
683 static void klp_kobj_release_object(struct kobject
*kobj
)
687 static struct kobj_type klp_ktype_object
= {
688 .release
= klp_kobj_release_object
,
689 .sysfs_ops
= &kobj_sysfs_ops
,
692 static void klp_kobj_release_func(struct kobject
*kobj
)
696 static struct kobj_type klp_ktype_func
= {
697 .release
= klp_kobj_release_func
,
698 .sysfs_ops
= &kobj_sysfs_ops
,
702 * Free all functions' kobjects in the array up to some limit. When limit is
703 * NULL, all kobjects are freed.
705 static void klp_free_funcs_limited(struct klp_object
*obj
,
706 struct klp_func
*limit
)
708 struct klp_func
*func
;
710 for (func
= obj
->funcs
; func
->old_name
&& func
!= limit
; func
++)
711 kobject_put(&func
->kobj
);
714 /* Clean up when a patched object is unloaded */
715 static void klp_free_object_loaded(struct klp_object
*obj
)
717 struct klp_func
*func
;
721 klp_for_each_func(obj
, func
)
726 * Free all objects' kobjects in the array up to some limit. When limit is
727 * NULL, all kobjects are freed.
729 static void klp_free_objects_limited(struct klp_patch
*patch
,
730 struct klp_object
*limit
)
732 struct klp_object
*obj
;
734 for (obj
= patch
->objs
; obj
->funcs
&& obj
!= limit
; obj
++) {
735 klp_free_funcs_limited(obj
, NULL
);
736 kobject_put(&obj
->kobj
);
740 static void klp_free_patch(struct klp_patch
*patch
)
742 klp_free_objects_limited(patch
, NULL
);
743 if (!list_empty(&patch
->list
))
744 list_del(&patch
->list
);
745 kobject_put(&patch
->kobj
);
748 static int klp_init_func(struct klp_object
*obj
, struct klp_func
*func
)
750 if (!func
->old_name
|| !func
->new_func
)
753 INIT_LIST_HEAD(&func
->stack_node
);
754 func
->state
= KLP_DISABLED
;
756 /* The format for the sysfs directory is <function,sympos> where sympos
757 * is the nth occurrence of this symbol in kallsyms for the patched
758 * object. If the user selects 0 for old_sympos, then 1 will be used
759 * since a unique symbol will be the first occurrence.
761 return kobject_init_and_add(&func
->kobj
, &klp_ktype_func
,
762 &obj
->kobj
, "%s,%lu", func
->old_name
,
763 func
->old_sympos
? func
->old_sympos
: 1);
766 /* parts of the initialization that is done only when the object is loaded */
767 static int klp_init_object_loaded(struct klp_patch
*patch
,
768 struct klp_object
*obj
)
770 struct klp_func
*func
;
773 ret
= klp_write_object_relocations(patch
->mod
, obj
);
777 klp_for_each_func(obj
, func
) {
778 ret
= klp_find_object_symbol(obj
->name
, func
->old_name
,
788 static int klp_init_object(struct klp_patch
*patch
, struct klp_object
*obj
)
790 struct klp_func
*func
;
797 obj
->state
= KLP_DISABLED
;
800 klp_find_object_module(obj
);
802 name
= klp_is_module(obj
) ? obj
->name
: "vmlinux";
803 ret
= kobject_init_and_add(&obj
->kobj
, &klp_ktype_object
,
804 &patch
->kobj
, "%s", name
);
808 klp_for_each_func(obj
, func
) {
809 ret
= klp_init_func(obj
, func
);
814 if (klp_is_object_loaded(obj
)) {
815 ret
= klp_init_object_loaded(patch
, obj
);
823 klp_free_funcs_limited(obj
, func
);
824 kobject_put(&obj
->kobj
);
828 static int klp_init_patch(struct klp_patch
*patch
)
830 struct klp_object
*obj
;
836 mutex_lock(&klp_mutex
);
838 patch
->state
= KLP_DISABLED
;
840 ret
= kobject_init_and_add(&patch
->kobj
, &klp_ktype_patch
,
841 klp_root_kobj
, "%s", patch
->mod
->name
);
845 klp_for_each_object(patch
, obj
) {
846 ret
= klp_init_object(patch
, obj
);
851 list_add_tail(&patch
->list
, &klp_patches
);
853 mutex_unlock(&klp_mutex
);
858 klp_free_objects_limited(patch
, obj
);
859 kobject_put(&patch
->kobj
);
861 mutex_unlock(&klp_mutex
);
866 * klp_unregister_patch() - unregisters a patch
867 * @patch: Disabled patch to be unregistered
869 * Frees the data structures and removes the sysfs interface.
871 * Return: 0 on success, otherwise error
873 int klp_unregister_patch(struct klp_patch
*patch
)
877 mutex_lock(&klp_mutex
);
879 if (!klp_is_patch_registered(patch
)) {
884 if (patch
->state
== KLP_ENABLED
) {
889 klp_free_patch(patch
);
892 mutex_unlock(&klp_mutex
);
895 EXPORT_SYMBOL_GPL(klp_unregister_patch
);
898 * klp_register_patch() - registers a patch
899 * @patch: Patch to be registered
901 * Initializes the data structure associated with the patch and
902 * creates the sysfs interface.
904 * Return: 0 on success, otherwise error
906 int klp_register_patch(struct klp_patch
*patch
)
910 if (!patch
|| !patch
->mod
)
913 if (!is_livepatch_module(patch
->mod
)) {
914 pr_err("module %s is not marked as a livepatch module",
919 if (!klp_initialized())
923 * A reference is taken on the patch module to prevent it from being
924 * unloaded. Right now, we don't allow patch modules to unload since
925 * there is currently no method to determine if a thread is still
926 * running in the patched code contained in the patch module once
927 * the ftrace registration is successful.
929 if (!try_module_get(patch
->mod
))
932 ret
= klp_init_patch(patch
);
934 module_put(patch
->mod
);
938 EXPORT_SYMBOL_GPL(klp_register_patch
);
940 int klp_module_coming(struct module
*mod
)
943 struct klp_patch
*patch
;
944 struct klp_object
*obj
;
946 if (WARN_ON(mod
->state
!= MODULE_STATE_COMING
))
949 mutex_lock(&klp_mutex
);
951 * Each module has to know that klp_module_coming()
952 * has been called. We never know what module will
953 * get patched by a new patch.
955 mod
->klp_alive
= true;
957 list_for_each_entry(patch
, &klp_patches
, list
) {
958 klp_for_each_object(patch
, obj
) {
959 if (!klp_is_module(obj
) || strcmp(obj
->name
, mod
->name
))
964 ret
= klp_init_object_loaded(patch
, obj
);
966 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
967 patch
->mod
->name
, obj
->mod
->name
, ret
);
971 if (patch
->state
== KLP_DISABLED
)
974 pr_notice("applying patch '%s' to loading module '%s'\n",
975 patch
->mod
->name
, obj
->mod
->name
);
977 ret
= klp_enable_object(obj
);
979 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
980 patch
->mod
->name
, obj
->mod
->name
, ret
);
988 mutex_unlock(&klp_mutex
);
994 * If a patch is unsuccessfully applied, return
995 * error to the module loader.
997 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
998 patch
->mod
->name
, obj
->mod
->name
, obj
->mod
->name
);
999 mod
->klp_alive
= false;
1000 klp_free_object_loaded(obj
);
1001 mutex_unlock(&klp_mutex
);
1006 void klp_module_going(struct module
*mod
)
1008 struct klp_patch
*patch
;
1009 struct klp_object
*obj
;
1011 if (WARN_ON(mod
->state
!= MODULE_STATE_GOING
&&
1012 mod
->state
!= MODULE_STATE_COMING
))
1015 mutex_lock(&klp_mutex
);
1017 * Each module has to know that klp_module_going()
1018 * has been called. We never know what module will
1019 * get patched by a new patch.
1021 mod
->klp_alive
= false;
1023 list_for_each_entry(patch
, &klp_patches
, list
) {
1024 klp_for_each_object(patch
, obj
) {
1025 if (!klp_is_module(obj
) || strcmp(obj
->name
, mod
->name
))
1028 if (patch
->state
!= KLP_DISABLED
) {
1029 pr_notice("reverting patch '%s' on unloading module '%s'\n",
1030 patch
->mod
->name
, obj
->mod
->name
);
1031 klp_disable_object(obj
);
1034 klp_free_object_loaded(obj
);
1039 mutex_unlock(&klp_mutex
);
1042 static int __init
klp_init(void)
1046 ret
= klp_check_compiler_support();
1048 pr_info("Your compiler is too old; turning off.\n");
1052 klp_root_kobj
= kobject_create_and_add("livepatch", kernel_kobj
);
1059 module_init(klp_init
);