1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * core.c - Kernel Live Patching Core
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/list.h>
16 #include <linux/kallsyms.h>
17 #include <linux/livepatch.h>
18 #include <linux/elf.h>
19 #include <linux/moduleloader.h>
20 #include <linux/completion.h>
21 #include <linux/memory.h>
22 #include <asm/cacheflush.h>
26 #include "transition.h"
29 * klp_mutex is a coarse lock which serializes access to klp data. All
30 * accesses to klp-related variables and structures must have mutex protection,
31 * except within the following functions which carefully avoid the need for it:
33 * - klp_ftrace_handler()
34 * - klp_update_patch_state()
36 DEFINE_MUTEX(klp_mutex
);
39 * Actively used patches: enabled or in transition. Note that replaced
40 * or disabled patches are not listed even though the related kernel
41 * module still can be loaded.
43 LIST_HEAD(klp_patches
);
45 static struct kobject
*klp_root_kobj
;
47 static bool klp_is_module(struct klp_object
*obj
)
52 /* sets obj->mod if object is not vmlinux and module is found */
53 static void klp_find_object_module(struct klp_object
*obj
)
57 if (!klp_is_module(obj
))
60 mutex_lock(&module_mutex
);
62 * We do not want to block removal of patched modules and therefore
63 * we do not take a reference here. The patches are removed by
64 * klp_module_going() instead.
66 mod
= find_module(obj
->name
);
68 * Do not mess work of klp_module_coming() and klp_module_going().
69 * Note that the patch might still be needed before klp_module_going()
70 * is called. Module functions can be called even in the GOING state
71 * until mod->exit() finishes. This is especially important for
72 * patches that modify semantic of the functions.
74 if (mod
&& mod
->klp_alive
)
77 mutex_unlock(&module_mutex
);
80 static bool klp_initialized(void)
82 return !!klp_root_kobj
;
85 static struct klp_func
*klp_find_func(struct klp_object
*obj
,
86 struct klp_func
*old_func
)
88 struct klp_func
*func
;
90 klp_for_each_func(obj
, func
) {
91 if ((strcmp(old_func
->old_name
, func
->old_name
) == 0) &&
92 (old_func
->old_sympos
== func
->old_sympos
)) {
100 static struct klp_object
*klp_find_object(struct klp_patch
*patch
,
101 struct klp_object
*old_obj
)
103 struct klp_object
*obj
;
105 klp_for_each_object(patch
, obj
) {
106 if (klp_is_module(old_obj
)) {
107 if (klp_is_module(obj
) &&
108 strcmp(old_obj
->name
, obj
->name
) == 0) {
111 } else if (!klp_is_module(obj
)) {
119 struct klp_find_arg
{
127 static int klp_find_callback(void *data
, const char *name
,
128 struct module
*mod
, unsigned long addr
)
130 struct klp_find_arg
*args
= data
;
132 if ((mod
&& !args
->objname
) || (!mod
&& args
->objname
))
135 if (strcmp(args
->name
, name
))
138 if (args
->objname
&& strcmp(args
->objname
, mod
->name
))
145 * Finish the search when the symbol is found for the desired position
146 * or the position is not defined for a non-unique symbol.
148 if ((args
->pos
&& (args
->count
== args
->pos
)) ||
149 (!args
->pos
&& (args
->count
> 1)))
155 static int klp_find_object_symbol(const char *objname
, const char *name
,
156 unsigned long sympos
, unsigned long *addr
)
158 struct klp_find_arg args
= {
166 mutex_lock(&module_mutex
);
168 module_kallsyms_on_each_symbol(klp_find_callback
, &args
);
170 kallsyms_on_each_symbol(klp_find_callback
, &args
);
171 mutex_unlock(&module_mutex
);
174 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
175 * otherwise ensure the symbol position count matches sympos.
178 pr_err("symbol '%s' not found in symbol table\n", name
);
179 else if (args
.count
> 1 && sympos
== 0) {
180 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
182 } else if (sympos
!= args
.count
&& sympos
> 0) {
183 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
184 sympos
, name
, objname
? objname
: "vmlinux");
194 static int klp_resolve_symbols(Elf64_Shdr
*sechdrs
, const char *strtab
,
195 unsigned int symndx
, Elf_Shdr
*relasec
,
196 const char *sec_objname
)
199 char sym_objname
[MODULE_NAME_LEN
];
200 char sym_name
[KSYM_NAME_LEN
];
203 unsigned long sympos
, addr
;
205 bool sec_vmlinux
= !strcmp(sec_objname
, "vmlinux");
208 * Since the field widths for sym_objname and sym_name in the sscanf()
209 * call are hard-coded and correspond to MODULE_NAME_LEN and
210 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
211 * and KSYM_NAME_LEN have the values we expect them to have.
213 * Because the value of MODULE_NAME_LEN can differ among architectures,
214 * we use the smallest/strictest upper bound possible (56, based on
215 * the current definition of MODULE_NAME_LEN) to prevent overflows.
217 BUILD_BUG_ON(MODULE_NAME_LEN
< 56 || KSYM_NAME_LEN
!= 128);
219 relas
= (Elf_Rela
*) relasec
->sh_addr
;
220 /* For each rela in this klp relocation section */
221 for (i
= 0; i
< relasec
->sh_size
/ sizeof(Elf_Rela
); i
++) {
222 sym
= (Elf64_Sym
*)sechdrs
[symndx
].sh_addr
+ ELF_R_SYM(relas
[i
].r_info
);
223 if (sym
->st_shndx
!= SHN_LIVEPATCH
) {
224 pr_err("symbol %s is not marked as a livepatch symbol\n",
225 strtab
+ sym
->st_name
);
229 /* Format: .klp.sym.sym_objname.sym_name,sympos */
230 cnt
= sscanf(strtab
+ sym
->st_name
,
231 ".klp.sym.%55[^.].%127[^,],%lu",
232 sym_objname
, sym_name
, &sympos
);
234 pr_err("symbol %s has an incorrectly formatted name\n",
235 strtab
+ sym
->st_name
);
239 sym_vmlinux
= !strcmp(sym_objname
, "vmlinux");
242 * Prevent module-specific KLP rela sections from referencing
243 * vmlinux symbols. This helps prevent ordering issues with
244 * module special section initializations. Presumably such
245 * symbols are exported and normal relas can be used instead.
247 if (!sec_vmlinux
&& sym_vmlinux
) {
248 pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
253 /* klp_find_object_symbol() treats a NULL objname as vmlinux */
254 ret
= klp_find_object_symbol(sym_vmlinux
? NULL
: sym_objname
,
255 sym_name
, sympos
, &addr
);
259 sym
->st_value
= addr
;
266 * At a high-level, there are two types of klp relocation sections: those which
267 * reference symbols which live in vmlinux; and those which reference symbols
268 * which live in other modules. This function is called for both types:
270 * 1) When a klp module itself loads, the module code calls this function to
271 * write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections).
272 * These relocations are written to the klp module text to allow the patched
273 * code/data to reference unexported vmlinux symbols. They're written as
274 * early as possible to ensure that other module init code (.e.g.,
275 * jump_label_apply_nops) can access any unexported vmlinux symbols which
276 * might be referenced by the klp module's special sections.
278 * 2) When a to-be-patched module loads -- or is already loaded when a
279 * corresponding klp module loads -- klp code calls this function to write
280 * module-specific klp relocations (.klp.rela.{module}.* sections). These
281 * are written to the klp module text to allow the patched code/data to
282 * reference symbols which live in the to-be-patched module or one of its
283 * module dependencies. Exported symbols are supported, in addition to
284 * unexported symbols, in order to enable late module patching, which allows
285 * the to-be-patched module to be loaded and patched sometime *after* the
286 * klp module is loaded.
288 int klp_apply_section_relocs(struct module
*pmod
, Elf_Shdr
*sechdrs
,
289 const char *shstrtab
, const char *strtab
,
290 unsigned int symndx
, unsigned int secndx
,
294 char sec_objname
[MODULE_NAME_LEN
];
295 Elf_Shdr
*sec
= sechdrs
+ secndx
;
298 * Format: .klp.rela.sec_objname.section_name
299 * See comment in klp_resolve_symbols() for an explanation
300 * of the selected field width value.
302 cnt
= sscanf(shstrtab
+ sec
->sh_name
, ".klp.rela.%55[^.]",
305 pr_err("section %s has an incorrectly formatted name\n",
306 shstrtab
+ sec
->sh_name
);
310 if (strcmp(objname
? objname
: "vmlinux", sec_objname
))
313 ret
= klp_resolve_symbols(sechdrs
, strtab
, symndx
, sec
, sec_objname
);
317 return apply_relocate_add(sechdrs
, strtab
, symndx
, secndx
, pmod
);
323 * /sys/kernel/livepatch
324 * /sys/kernel/livepatch/<patch>
325 * /sys/kernel/livepatch/<patch>/enabled
326 * /sys/kernel/livepatch/<patch>/transition
327 * /sys/kernel/livepatch/<patch>/force
328 * /sys/kernel/livepatch/<patch>/<object>
329 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
331 static int __klp_disable_patch(struct klp_patch
*patch
);
333 static ssize_t
enabled_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
334 const char *buf
, size_t count
)
336 struct klp_patch
*patch
;
340 ret
= kstrtobool(buf
, &enabled
);
344 patch
= container_of(kobj
, struct klp_patch
, kobj
);
346 mutex_lock(&klp_mutex
);
348 if (patch
->enabled
== enabled
) {
349 /* already in requested state */
355 * Allow to reverse a pending transition in both ways. It might be
356 * necessary to complete the transition without forcing and breaking
357 * the system integrity.
359 * Do not allow to re-enable a disabled patch.
361 if (patch
== klp_transition_patch
)
362 klp_reverse_transition();
364 ret
= __klp_disable_patch(patch
);
369 mutex_unlock(&klp_mutex
);
376 static ssize_t
enabled_show(struct kobject
*kobj
,
377 struct kobj_attribute
*attr
, char *buf
)
379 struct klp_patch
*patch
;
381 patch
= container_of(kobj
, struct klp_patch
, kobj
);
382 return snprintf(buf
, PAGE_SIZE
-1, "%d\n", patch
->enabled
);
385 static ssize_t
transition_show(struct kobject
*kobj
,
386 struct kobj_attribute
*attr
, char *buf
)
388 struct klp_patch
*patch
;
390 patch
= container_of(kobj
, struct klp_patch
, kobj
);
391 return snprintf(buf
, PAGE_SIZE
-1, "%d\n",
392 patch
== klp_transition_patch
);
395 static ssize_t
force_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
396 const char *buf
, size_t count
)
398 struct klp_patch
*patch
;
402 ret
= kstrtobool(buf
, &val
);
409 mutex_lock(&klp_mutex
);
411 patch
= container_of(kobj
, struct klp_patch
, kobj
);
412 if (patch
!= klp_transition_patch
) {
413 mutex_unlock(&klp_mutex
);
417 klp_force_transition();
419 mutex_unlock(&klp_mutex
);
424 static struct kobj_attribute enabled_kobj_attr
= __ATTR_RW(enabled
);
425 static struct kobj_attribute transition_kobj_attr
= __ATTR_RO(transition
);
426 static struct kobj_attribute force_kobj_attr
= __ATTR_WO(force
);
427 static struct attribute
*klp_patch_attrs
[] = {
428 &enabled_kobj_attr
.attr
,
429 &transition_kobj_attr
.attr
,
430 &force_kobj_attr
.attr
,
433 ATTRIBUTE_GROUPS(klp_patch
);
435 static void klp_free_object_dynamic(struct klp_object
*obj
)
441 static void klp_init_func_early(struct klp_object
*obj
,
442 struct klp_func
*func
);
443 static void klp_init_object_early(struct klp_patch
*patch
,
444 struct klp_object
*obj
);
446 static struct klp_object
*klp_alloc_object_dynamic(const char *name
,
447 struct klp_patch
*patch
)
449 struct klp_object
*obj
;
451 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
456 obj
->name
= kstrdup(name
, GFP_KERNEL
);
463 klp_init_object_early(patch
, obj
);
469 static void klp_free_func_nop(struct klp_func
*func
)
471 kfree(func
->old_name
);
475 static struct klp_func
*klp_alloc_func_nop(struct klp_func
*old_func
,
476 struct klp_object
*obj
)
478 struct klp_func
*func
;
480 func
= kzalloc(sizeof(*func
), GFP_KERNEL
);
484 if (old_func
->old_name
) {
485 func
->old_name
= kstrdup(old_func
->old_name
, GFP_KERNEL
);
486 if (!func
->old_name
) {
492 klp_init_func_early(obj
, func
);
494 * func->new_func is same as func->old_func. These addresses are
495 * set when the object is loaded, see klp_init_object_loaded().
497 func
->old_sympos
= old_func
->old_sympos
;
503 static int klp_add_object_nops(struct klp_patch
*patch
,
504 struct klp_object
*old_obj
)
506 struct klp_object
*obj
;
507 struct klp_func
*func
, *old_func
;
509 obj
= klp_find_object(patch
, old_obj
);
512 obj
= klp_alloc_object_dynamic(old_obj
->name
, patch
);
517 klp_for_each_func(old_obj
, old_func
) {
518 func
= klp_find_func(obj
, old_func
);
522 func
= klp_alloc_func_nop(old_func
, obj
);
531 * Add 'nop' functions which simply return to the caller to run
532 * the original function. The 'nop' functions are added to a
533 * patch to facilitate a 'replace' mode.
535 static int klp_add_nops(struct klp_patch
*patch
)
537 struct klp_patch
*old_patch
;
538 struct klp_object
*old_obj
;
540 klp_for_each_patch(old_patch
) {
541 klp_for_each_object(old_patch
, old_obj
) {
544 err
= klp_add_object_nops(patch
, old_obj
);
553 static void klp_kobj_release_patch(struct kobject
*kobj
)
555 struct klp_patch
*patch
;
557 patch
= container_of(kobj
, struct klp_patch
, kobj
);
558 complete(&patch
->finish
);
561 static struct kobj_type klp_ktype_patch
= {
562 .release
= klp_kobj_release_patch
,
563 .sysfs_ops
= &kobj_sysfs_ops
,
564 .default_groups
= klp_patch_groups
,
567 static void klp_kobj_release_object(struct kobject
*kobj
)
569 struct klp_object
*obj
;
571 obj
= container_of(kobj
, struct klp_object
, kobj
);
574 klp_free_object_dynamic(obj
);
577 static struct kobj_type klp_ktype_object
= {
578 .release
= klp_kobj_release_object
,
579 .sysfs_ops
= &kobj_sysfs_ops
,
582 static void klp_kobj_release_func(struct kobject
*kobj
)
584 struct klp_func
*func
;
586 func
= container_of(kobj
, struct klp_func
, kobj
);
589 klp_free_func_nop(func
);
592 static struct kobj_type klp_ktype_func
= {
593 .release
= klp_kobj_release_func
,
594 .sysfs_ops
= &kobj_sysfs_ops
,
597 static void __klp_free_funcs(struct klp_object
*obj
, bool nops_only
)
599 struct klp_func
*func
, *tmp_func
;
601 klp_for_each_func_safe(obj
, func
, tmp_func
) {
602 if (nops_only
&& !func
->nop
)
605 list_del(&func
->node
);
606 kobject_put(&func
->kobj
);
610 /* Clean up when a patched object is unloaded */
611 static void klp_free_object_loaded(struct klp_object
*obj
)
613 struct klp_func
*func
;
617 klp_for_each_func(obj
, func
) {
618 func
->old_func
= NULL
;
621 func
->new_func
= NULL
;
625 static void __klp_free_objects(struct klp_patch
*patch
, bool nops_only
)
627 struct klp_object
*obj
, *tmp_obj
;
629 klp_for_each_object_safe(patch
, obj
, tmp_obj
) {
630 __klp_free_funcs(obj
, nops_only
);
632 if (nops_only
&& !obj
->dynamic
)
635 list_del(&obj
->node
);
636 kobject_put(&obj
->kobj
);
640 static void klp_free_objects(struct klp_patch
*patch
)
642 __klp_free_objects(patch
, false);
645 static void klp_free_objects_dynamic(struct klp_patch
*patch
)
647 __klp_free_objects(patch
, true);
651 * This function implements the free operations that can be called safely
654 * The operation must be completed by calling klp_free_patch_finish()
657 static void klp_free_patch_start(struct klp_patch
*patch
)
659 if (!list_empty(&patch
->list
))
660 list_del(&patch
->list
);
662 klp_free_objects(patch
);
666 * This function implements the free part that must be called outside
669 * It must be called after klp_free_patch_start(). And it has to be
670 * the last function accessing the livepatch structures when the patch
673 static void klp_free_patch_finish(struct klp_patch
*patch
)
676 * Avoid deadlock with enabled_store() sysfs callback by
677 * calling this outside klp_mutex. It is safe because
678 * this is called when the patch gets disabled and it
679 * cannot get enabled again.
681 kobject_put(&patch
->kobj
);
682 wait_for_completion(&patch
->finish
);
684 /* Put the module after the last access to struct klp_patch. */
686 module_put(patch
->mod
);
690 * The livepatch might be freed from sysfs interface created by the patch.
691 * This work allows to wait until the interface is destroyed in a separate
694 static void klp_free_patch_work_fn(struct work_struct
*work
)
696 struct klp_patch
*patch
=
697 container_of(work
, struct klp_patch
, free_work
);
699 klp_free_patch_finish(patch
);
702 void klp_free_patch_async(struct klp_patch
*patch
)
704 klp_free_patch_start(patch
);
705 schedule_work(&patch
->free_work
);
708 void klp_free_replaced_patches_async(struct klp_patch
*new_patch
)
710 struct klp_patch
*old_patch
, *tmp_patch
;
712 klp_for_each_patch_safe(old_patch
, tmp_patch
) {
713 if (old_patch
== new_patch
)
715 klp_free_patch_async(old_patch
);
719 static int klp_init_func(struct klp_object
*obj
, struct klp_func
*func
)
725 * NOPs get the address later. The patched module must be loaded,
726 * see klp_init_object_loaded().
728 if (!func
->new_func
&& !func
->nop
)
731 if (strlen(func
->old_name
) >= KSYM_NAME_LEN
)
734 INIT_LIST_HEAD(&func
->stack_node
);
735 func
->patched
= false;
736 func
->transition
= false;
738 /* The format for the sysfs directory is <function,sympos> where sympos
739 * is the nth occurrence of this symbol in kallsyms for the patched
740 * object. If the user selects 0 for old_sympos, then 1 will be used
741 * since a unique symbol will be the first occurrence.
743 return kobject_add(&func
->kobj
, &obj
->kobj
, "%s,%lu",
745 func
->old_sympos
? func
->old_sympos
: 1);
748 static int klp_apply_object_relocs(struct klp_patch
*patch
,
749 struct klp_object
*obj
)
752 struct klp_modinfo
*info
= patch
->mod
->klp_info
;
754 for (i
= 1; i
< info
->hdr
.e_shnum
; i
++) {
755 Elf_Shdr
*sec
= info
->sechdrs
+ i
;
757 if (!(sec
->sh_flags
& SHF_RELA_LIVEPATCH
))
760 ret
= klp_apply_section_relocs(patch
->mod
, info
->sechdrs
,
762 patch
->mod
->core_kallsyms
.strtab
,
763 info
->symndx
, i
, obj
->name
);
771 /* parts of the initialization that is done only when the object is loaded */
772 static int klp_init_object_loaded(struct klp_patch
*patch
,
773 struct klp_object
*obj
)
775 struct klp_func
*func
;
778 if (klp_is_module(obj
)) {
780 * Only write module-specific relocations here
781 * (.klp.rela.{module}.*). vmlinux-specific relocations were
782 * written earlier during the initialization of the klp module
785 ret
= klp_apply_object_relocs(patch
, obj
);
790 klp_for_each_func(obj
, func
) {
791 ret
= klp_find_object_symbol(obj
->name
, func
->old_name
,
793 (unsigned long *)&func
->old_func
);
797 ret
= kallsyms_lookup_size_offset((unsigned long)func
->old_func
,
798 &func
->old_size
, NULL
);
800 pr_err("kallsyms size lookup failed for '%s'\n",
806 func
->new_func
= func
->old_func
;
808 ret
= kallsyms_lookup_size_offset((unsigned long)func
->new_func
,
809 &func
->new_size
, NULL
);
811 pr_err("kallsyms size lookup failed for '%s' replacement\n",
820 static int klp_init_object(struct klp_patch
*patch
, struct klp_object
*obj
)
822 struct klp_func
*func
;
826 if (klp_is_module(obj
) && strlen(obj
->name
) >= MODULE_NAME_LEN
)
829 obj
->patched
= false;
832 klp_find_object_module(obj
);
834 name
= klp_is_module(obj
) ? obj
->name
: "vmlinux";
835 ret
= kobject_add(&obj
->kobj
, &patch
->kobj
, "%s", name
);
839 klp_for_each_func(obj
, func
) {
840 ret
= klp_init_func(obj
, func
);
845 if (klp_is_object_loaded(obj
))
846 ret
= klp_init_object_loaded(patch
, obj
);
851 static void klp_init_func_early(struct klp_object
*obj
,
852 struct klp_func
*func
)
854 kobject_init(&func
->kobj
, &klp_ktype_func
);
855 list_add_tail(&func
->node
, &obj
->func_list
);
858 static void klp_init_object_early(struct klp_patch
*patch
,
859 struct klp_object
*obj
)
861 INIT_LIST_HEAD(&obj
->func_list
);
862 kobject_init(&obj
->kobj
, &klp_ktype_object
);
863 list_add_tail(&obj
->node
, &patch
->obj_list
);
866 static int klp_init_patch_early(struct klp_patch
*patch
)
868 struct klp_object
*obj
;
869 struct klp_func
*func
;
874 INIT_LIST_HEAD(&patch
->list
);
875 INIT_LIST_HEAD(&patch
->obj_list
);
876 kobject_init(&patch
->kobj
, &klp_ktype_patch
);
877 patch
->enabled
= false;
878 patch
->forced
= false;
879 INIT_WORK(&patch
->free_work
, klp_free_patch_work_fn
);
880 init_completion(&patch
->finish
);
882 klp_for_each_object_static(patch
, obj
) {
886 klp_init_object_early(patch
, obj
);
888 klp_for_each_func_static(obj
, func
) {
889 klp_init_func_early(obj
, func
);
893 if (!try_module_get(patch
->mod
))
899 static int klp_init_patch(struct klp_patch
*patch
)
901 struct klp_object
*obj
;
904 ret
= kobject_add(&patch
->kobj
, klp_root_kobj
, "%s", patch
->mod
->name
);
908 if (patch
->replace
) {
909 ret
= klp_add_nops(patch
);
914 klp_for_each_object(patch
, obj
) {
915 ret
= klp_init_object(patch
, obj
);
920 list_add_tail(&patch
->list
, &klp_patches
);
925 static int __klp_disable_patch(struct klp_patch
*patch
)
927 struct klp_object
*obj
;
929 if (WARN_ON(!patch
->enabled
))
932 if (klp_transition_patch
)
935 klp_init_transition(patch
, KLP_UNPATCHED
);
937 klp_for_each_object(patch
, obj
)
939 klp_pre_unpatch_callback(obj
);
942 * Enforce the order of the func->transition writes in
943 * klp_init_transition() and the TIF_PATCH_PENDING writes in
944 * klp_start_transition(). In the rare case where klp_ftrace_handler()
945 * is called shortly after klp_update_patch_state() switches the task,
946 * this ensures the handler sees that func->transition is set.
950 klp_start_transition();
951 patch
->enabled
= false;
952 klp_try_complete_transition();
957 static int __klp_enable_patch(struct klp_patch
*patch
)
959 struct klp_object
*obj
;
962 if (klp_transition_patch
)
965 if (WARN_ON(patch
->enabled
))
968 pr_notice("enabling patch '%s'\n", patch
->mod
->name
);
970 klp_init_transition(patch
, KLP_PATCHED
);
973 * Enforce the order of the func->transition writes in
974 * klp_init_transition() and the ops->func_stack writes in
975 * klp_patch_object(), so that klp_ftrace_handler() will see the
976 * func->transition updates before the handler is registered and the
977 * new funcs become visible to the handler.
981 klp_for_each_object(patch
, obj
) {
982 if (!klp_is_object_loaded(obj
))
985 ret
= klp_pre_patch_callback(obj
);
987 pr_warn("pre-patch callback failed for object '%s'\n",
988 klp_is_module(obj
) ? obj
->name
: "vmlinux");
992 ret
= klp_patch_object(obj
);
994 pr_warn("failed to patch object '%s'\n",
995 klp_is_module(obj
) ? obj
->name
: "vmlinux");
1000 klp_start_transition();
1001 patch
->enabled
= true;
1002 klp_try_complete_transition();
1006 pr_warn("failed to enable patch '%s'\n", patch
->mod
->name
);
1008 klp_cancel_transition();
1013 * klp_enable_patch() - enable the livepatch
1014 * @patch: patch to be enabled
1016 * Initializes the data structure associated with the patch, creates the sysfs
1017 * interface, performs the needed symbol lookups and code relocations,
1018 * registers the patched functions with ftrace.
1020 * This function is supposed to be called from the livepatch module_init()
1023 * Return: 0 on success, otherwise error
1025 int klp_enable_patch(struct klp_patch
*patch
)
1029 if (!patch
|| !patch
->mod
)
1032 if (!is_livepatch_module(patch
->mod
)) {
1033 pr_err("module %s is not marked as a livepatch module\n",
1038 if (!klp_initialized())
1041 if (!klp_have_reliable_stack()) {
1042 pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
1043 pr_warn("The livepatch transition may never complete.\n");
1046 mutex_lock(&klp_mutex
);
1048 if (!klp_is_patch_compatible(patch
)) {
1049 pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n",
1051 mutex_unlock(&klp_mutex
);
1055 ret
= klp_init_patch_early(patch
);
1057 mutex_unlock(&klp_mutex
);
1061 ret
= klp_init_patch(patch
);
1065 ret
= __klp_enable_patch(patch
);
1069 mutex_unlock(&klp_mutex
);
1074 klp_free_patch_start(patch
);
1076 mutex_unlock(&klp_mutex
);
1078 klp_free_patch_finish(patch
);
1082 EXPORT_SYMBOL_GPL(klp_enable_patch
);
1085 * This function unpatches objects from the replaced livepatches.
1087 * We could be pretty aggressive here. It is called in the situation where
1088 * these structures are no longer accessed from the ftrace handler.
1089 * All functions are redirected by the klp_transition_patch. They
1090 * use either a new code or they are in the original code because
1091 * of the special nop function patches.
1093 * The only exception is when the transition was forced. In this case,
1094 * klp_ftrace_handler() might still see the replaced patch on the stack.
1095 * Fortunately, it is carefully designed to work with removed functions
1096 * thanks to RCU. We only have to keep the patches on the system. Also
1097 * this is handled transparently by patch->module_put.
1099 void klp_unpatch_replaced_patches(struct klp_patch
*new_patch
)
1101 struct klp_patch
*old_patch
;
1103 klp_for_each_patch(old_patch
) {
1104 if (old_patch
== new_patch
)
1107 old_patch
->enabled
= false;
1108 klp_unpatch_objects(old_patch
);
1113 * This function removes the dynamically allocated 'nop' functions.
1115 * We could be pretty aggressive. NOPs do not change the existing
1116 * behavior except for adding unnecessary delay by the ftrace handler.
1118 * It is safe even when the transition was forced. The ftrace handler
1119 * will see a valid ops->func_stack entry thanks to RCU.
1121 * We could even free the NOPs structures. They must be the last entry
1122 * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1123 * It does the same as klp_synchronize_transition() to make sure that
1124 * nobody is inside the ftrace handler once the operation finishes.
1126 * IMPORTANT: It must be called right after removing the replaced patches!
1128 void klp_discard_nops(struct klp_patch
*new_patch
)
1130 klp_unpatch_objects_dynamic(klp_transition_patch
);
1131 klp_free_objects_dynamic(klp_transition_patch
);
1135 * Remove parts of patches that touch a given kernel module. The list of
1136 * patches processed might be limited. When limit is NULL, all patches
1139 static void klp_cleanup_module_patches_limited(struct module
*mod
,
1140 struct klp_patch
*limit
)
1142 struct klp_patch
*patch
;
1143 struct klp_object
*obj
;
1145 klp_for_each_patch(patch
) {
1149 klp_for_each_object(patch
, obj
) {
1150 if (!klp_is_module(obj
) || strcmp(obj
->name
, mod
->name
))
1153 if (patch
!= klp_transition_patch
)
1154 klp_pre_unpatch_callback(obj
);
1156 pr_notice("reverting patch '%s' on unloading module '%s'\n",
1157 patch
->mod
->name
, obj
->mod
->name
);
1158 klp_unpatch_object(obj
);
1160 klp_post_unpatch_callback(obj
);
1162 klp_free_object_loaded(obj
);
1168 int klp_module_coming(struct module
*mod
)
1171 struct klp_patch
*patch
;
1172 struct klp_object
*obj
;
1174 if (WARN_ON(mod
->state
!= MODULE_STATE_COMING
))
1177 if (!strcmp(mod
->name
, "vmlinux")) {
1178 pr_err("vmlinux.ko: invalid module name");
1182 mutex_lock(&klp_mutex
);
1184 * Each module has to know that klp_module_coming()
1185 * has been called. We never know what module will
1186 * get patched by a new patch.
1188 mod
->klp_alive
= true;
1190 klp_for_each_patch(patch
) {
1191 klp_for_each_object(patch
, obj
) {
1192 if (!klp_is_module(obj
) || strcmp(obj
->name
, mod
->name
))
1197 ret
= klp_init_object_loaded(patch
, obj
);
1199 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1200 patch
->mod
->name
, obj
->mod
->name
, ret
);
1204 pr_notice("applying patch '%s' to loading module '%s'\n",
1205 patch
->mod
->name
, obj
->mod
->name
);
1207 ret
= klp_pre_patch_callback(obj
);
1209 pr_warn("pre-patch callback failed for object '%s'\n",
1214 ret
= klp_patch_object(obj
);
1216 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1217 patch
->mod
->name
, obj
->mod
->name
, ret
);
1219 klp_post_unpatch_callback(obj
);
1223 if (patch
!= klp_transition_patch
)
1224 klp_post_patch_callback(obj
);
1230 mutex_unlock(&klp_mutex
);
1236 * If a patch is unsuccessfully applied, return
1237 * error to the module loader.
1239 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1240 patch
->mod
->name
, obj
->mod
->name
, obj
->mod
->name
);
1241 mod
->klp_alive
= false;
1243 klp_cleanup_module_patches_limited(mod
, patch
);
1244 mutex_unlock(&klp_mutex
);
1249 void klp_module_going(struct module
*mod
)
1251 if (WARN_ON(mod
->state
!= MODULE_STATE_GOING
&&
1252 mod
->state
!= MODULE_STATE_COMING
))
1255 mutex_lock(&klp_mutex
);
1257 * Each module has to know that klp_module_going()
1258 * has been called. We never know what module will
1259 * get patched by a new patch.
1261 mod
->klp_alive
= false;
1263 klp_cleanup_module_patches_limited(mod
, NULL
);
1265 mutex_unlock(&klp_mutex
);
1268 static int __init
klp_init(void)
1270 klp_root_kobj
= kobject_create_and_add("livepatch", kernel_kobj
);
1277 module_init(klp_init
);