2 * core.c - Kernel Live Patching Core
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/ftrace.h>
28 #include <linux/list.h>
29 #include <linux/kallsyms.h>
30 #include <linux/livepatch.h>
31 #include <asm/cacheflush.h>
34 * struct klp_ops - structure for tracking registered ftrace ops structs
36 * A single ftrace_ops is shared between all enabled replacement functions
37 * (klp_func structs) which have the same old_addr. This allows the switch
38 * between function versions to happen instantaneously by updating the klp_ops
39 * struct's func_stack list. The winner is the klp_func at the top of the
40 * func_stack (front of the list).
42 * @node: node for the global klp_ops list
43 * @func_stack: list head for the stack of klp_func's (active func is on top)
44 * @fops: registered ftrace ops struct
47 struct list_head node
;
48 struct list_head func_stack
;
49 struct ftrace_ops fops
;
53 * The klp_mutex protects the global lists and state transitions of any
54 * structure reachable from them. References to any structure must be obtained
55 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
56 * ensure it gets consistent data).
58 static DEFINE_MUTEX(klp_mutex
);
60 static LIST_HEAD(klp_patches
);
61 static LIST_HEAD(klp_ops
);
63 static struct kobject
*klp_root_kobj
;
65 static struct klp_ops
*klp_find_ops(unsigned long old_addr
)
68 struct klp_func
*func
;
70 list_for_each_entry(ops
, &klp_ops
, node
) {
71 func
= list_first_entry(&ops
->func_stack
, struct klp_func
,
73 if (func
->old_addr
== old_addr
)
80 static bool klp_is_module(struct klp_object
*obj
)
85 static bool klp_is_object_loaded(struct klp_object
*obj
)
87 return !obj
->name
|| obj
->mod
;
90 /* sets obj->mod if object is not vmlinux and module is found */
91 static void klp_find_object_module(struct klp_object
*obj
)
95 if (!klp_is_module(obj
))
98 mutex_lock(&module_mutex
);
100 * We do not want to block removal of patched modules and therefore
101 * we do not take a reference here. The patches are removed by
102 * klp_module_going() instead.
104 mod
= find_module(obj
->name
);
106 * Do not mess work of klp_module_coming() and klp_module_going().
107 * Note that the patch might still be needed before klp_module_going()
108 * is called. Module functions can be called even in the GOING state
109 * until mod->exit() finishes. This is especially important for
110 * patches that modify semantic of the functions.
112 if (mod
&& mod
->klp_alive
)
115 mutex_unlock(&module_mutex
);
118 /* klp_mutex must be held by caller */
119 static bool klp_is_patch_registered(struct klp_patch
*patch
)
121 struct klp_patch
*mypatch
;
123 list_for_each_entry(mypatch
, &klp_patches
, list
)
124 if (mypatch
== patch
)
130 static bool klp_initialized(void)
132 return !!klp_root_kobj
;
135 struct klp_find_arg
{
143 static int klp_find_callback(void *data
, const char *name
,
144 struct module
*mod
, unsigned long addr
)
146 struct klp_find_arg
*args
= data
;
148 if ((mod
&& !args
->objname
) || (!mod
&& args
->objname
))
151 if (strcmp(args
->name
, name
))
154 if (args
->objname
&& strcmp(args
->objname
, mod
->name
))
161 * Finish the search when the symbol is found for the desired position
162 * or the position is not defined for a non-unique symbol.
164 if ((args
->pos
&& (args
->count
== args
->pos
)) ||
165 (!args
->pos
&& (args
->count
> 1)))
171 static int klp_find_object_symbol(const char *objname
, const char *name
,
172 unsigned long sympos
, unsigned long *addr
)
174 struct klp_find_arg args
= {
182 mutex_lock(&module_mutex
);
183 kallsyms_on_each_symbol(klp_find_callback
, &args
);
184 mutex_unlock(&module_mutex
);
187 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
188 * otherwise ensure the symbol position count matches sympos.
191 pr_err("symbol '%s' not found in symbol table\n", name
);
192 else if (args
.count
> 1 && sympos
== 0) {
193 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
195 } else if (sympos
!= args
.count
&& sympos
> 0) {
196 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
197 sympos
, name
, objname
? objname
: "vmlinux");
208 * external symbols are located outside the parent object (where the parent
209 * object is either vmlinux or the kmod being patched).
211 static int klp_find_external_symbol(struct module
*pmod
, const char *name
,
214 const struct kernel_symbol
*sym
;
216 /* first, check if it's an exported symbol */
218 sym
= find_symbol(name
, NULL
, NULL
, true, true);
227 * Check if it's in another .o within the patch module. This also
228 * checks that the external symbol is unique.
230 return klp_find_object_symbol(pmod
->name
, name
, 0, addr
);
233 static int klp_write_object_relocations(struct module
*pmod
,
234 struct klp_object
*obj
)
238 struct klp_reloc
*reloc
;
240 if (WARN_ON(!klp_is_object_loaded(obj
)))
243 if (WARN_ON(!obj
->relocs
))
246 module_disable_ro(pmod
);
248 for (reloc
= obj
->relocs
; reloc
->name
; reloc
++) {
249 /* discover the address of the referenced symbol */
250 if (reloc
->external
) {
251 if (reloc
->sympos
> 0) {
252 pr_err("non-zero sympos for external reloc symbol '%s' is not supported\n",
257 ret
= klp_find_external_symbol(pmod
, reloc
->name
, &val
);
259 ret
= klp_find_object_symbol(obj
->name
,
266 ret
= klp_write_module_reloc(pmod
, reloc
->type
, reloc
->loc
,
267 val
+ reloc
->addend
);
269 pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
270 reloc
->name
, val
, ret
);
276 module_enable_ro(pmod
);
280 static void notrace
klp_ftrace_handler(unsigned long ip
,
281 unsigned long parent_ip
,
282 struct ftrace_ops
*fops
,
283 struct pt_regs
*regs
)
286 struct klp_func
*func
;
288 ops
= container_of(fops
, struct klp_ops
, fops
);
291 func
= list_first_or_null_rcu(&ops
->func_stack
, struct klp_func
,
293 if (WARN_ON_ONCE(!func
))
296 klp_arch_set_pc(regs
, (unsigned long)func
->new_func
);
301 static void klp_disable_func(struct klp_func
*func
)
305 if (WARN_ON(func
->state
!= KLP_ENABLED
))
307 if (WARN_ON(!func
->old_addr
))
310 ops
= klp_find_ops(func
->old_addr
);
314 if (list_is_singular(&ops
->func_stack
)) {
315 WARN_ON(unregister_ftrace_function(&ops
->fops
));
316 WARN_ON(ftrace_set_filter_ip(&ops
->fops
, func
->old_addr
, 1, 0));
318 list_del_rcu(&func
->stack_node
);
319 list_del(&ops
->node
);
322 list_del_rcu(&func
->stack_node
);
325 func
->state
= KLP_DISABLED
;
328 static int klp_enable_func(struct klp_func
*func
)
333 if (WARN_ON(!func
->old_addr
))
336 if (WARN_ON(func
->state
!= KLP_DISABLED
))
339 ops
= klp_find_ops(func
->old_addr
);
341 ops
= kzalloc(sizeof(*ops
), GFP_KERNEL
);
345 ops
->fops
.func
= klp_ftrace_handler
;
346 ops
->fops
.flags
= FTRACE_OPS_FL_SAVE_REGS
|
347 FTRACE_OPS_FL_DYNAMIC
|
348 FTRACE_OPS_FL_IPMODIFY
;
350 list_add(&ops
->node
, &klp_ops
);
352 INIT_LIST_HEAD(&ops
->func_stack
);
353 list_add_rcu(&func
->stack_node
, &ops
->func_stack
);
355 ret
= ftrace_set_filter_ip(&ops
->fops
, func
->old_addr
, 0, 0);
357 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
358 func
->old_name
, ret
);
362 ret
= register_ftrace_function(&ops
->fops
);
364 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
365 func
->old_name
, ret
);
366 ftrace_set_filter_ip(&ops
->fops
, func
->old_addr
, 1, 0);
372 list_add_rcu(&func
->stack_node
, &ops
->func_stack
);
375 func
->state
= KLP_ENABLED
;
380 list_del_rcu(&func
->stack_node
);
381 list_del(&ops
->node
);
386 static void klp_disable_object(struct klp_object
*obj
)
388 struct klp_func
*func
;
390 klp_for_each_func(obj
, func
)
391 if (func
->state
== KLP_ENABLED
)
392 klp_disable_func(func
);
394 obj
->state
= KLP_DISABLED
;
397 static int klp_enable_object(struct klp_object
*obj
)
399 struct klp_func
*func
;
402 if (WARN_ON(obj
->state
!= KLP_DISABLED
))
405 if (WARN_ON(!klp_is_object_loaded(obj
)))
408 klp_for_each_func(obj
, func
) {
409 ret
= klp_enable_func(func
);
411 klp_disable_object(obj
);
415 obj
->state
= KLP_ENABLED
;
420 static int __klp_disable_patch(struct klp_patch
*patch
)
422 struct klp_object
*obj
;
424 /* enforce stacking: only the last enabled patch can be disabled */
425 if (!list_is_last(&patch
->list
, &klp_patches
) &&
426 list_next_entry(patch
, list
)->state
== KLP_ENABLED
)
429 pr_notice("disabling patch '%s'\n", patch
->mod
->name
);
431 klp_for_each_object(patch
, obj
) {
432 if (obj
->state
== KLP_ENABLED
)
433 klp_disable_object(obj
);
436 patch
->state
= KLP_DISABLED
;
442 * klp_disable_patch() - disables a registered patch
443 * @patch: The registered, enabled patch to be disabled
445 * Unregisters the patched functions from ftrace.
447 * Return: 0 on success, otherwise error
449 int klp_disable_patch(struct klp_patch
*patch
)
453 mutex_lock(&klp_mutex
);
455 if (!klp_is_patch_registered(patch
)) {
460 if (patch
->state
== KLP_DISABLED
) {
465 ret
= __klp_disable_patch(patch
);
468 mutex_unlock(&klp_mutex
);
471 EXPORT_SYMBOL_GPL(klp_disable_patch
);
473 static int __klp_enable_patch(struct klp_patch
*patch
)
475 struct klp_object
*obj
;
478 if (WARN_ON(patch
->state
!= KLP_DISABLED
))
481 /* enforce stacking: only the first disabled patch can be enabled */
482 if (patch
->list
.prev
!= &klp_patches
&&
483 list_prev_entry(patch
, list
)->state
== KLP_DISABLED
)
486 pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
487 add_taint(TAINT_LIVEPATCH
, LOCKDEP_STILL_OK
);
489 pr_notice("enabling patch '%s'\n", patch
->mod
->name
);
491 klp_for_each_object(patch
, obj
) {
492 if (!klp_is_object_loaded(obj
))
495 ret
= klp_enable_object(obj
);
500 patch
->state
= KLP_ENABLED
;
505 WARN_ON(__klp_disable_patch(patch
));
510 * klp_enable_patch() - enables a registered patch
511 * @patch: The registered, disabled patch to be enabled
513 * Performs the needed symbol lookups and code relocations,
514 * then registers the patched functions with ftrace.
516 * Return: 0 on success, otherwise error
518 int klp_enable_patch(struct klp_patch
*patch
)
522 mutex_lock(&klp_mutex
);
524 if (!klp_is_patch_registered(patch
)) {
529 ret
= __klp_enable_patch(patch
);
532 mutex_unlock(&klp_mutex
);
535 EXPORT_SYMBOL_GPL(klp_enable_patch
);
540 * /sys/kernel/livepatch
541 * /sys/kernel/livepatch/<patch>
542 * /sys/kernel/livepatch/<patch>/enabled
543 * /sys/kernel/livepatch/<patch>/<object>
544 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
547 static ssize_t
enabled_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
548 const char *buf
, size_t count
)
550 struct klp_patch
*patch
;
554 ret
= kstrtoul(buf
, 10, &val
);
558 if (val
!= KLP_DISABLED
&& val
!= KLP_ENABLED
)
561 patch
= container_of(kobj
, struct klp_patch
, kobj
);
563 mutex_lock(&klp_mutex
);
565 if (val
== patch
->state
) {
566 /* already in requested state */
571 if (val
== KLP_ENABLED
) {
572 ret
= __klp_enable_patch(patch
);
576 ret
= __klp_disable_patch(patch
);
581 mutex_unlock(&klp_mutex
);
586 mutex_unlock(&klp_mutex
);
590 static ssize_t
enabled_show(struct kobject
*kobj
,
591 struct kobj_attribute
*attr
, char *buf
)
593 struct klp_patch
*patch
;
595 patch
= container_of(kobj
, struct klp_patch
, kobj
);
596 return snprintf(buf
, PAGE_SIZE
-1, "%d\n", patch
->state
);
599 static struct kobj_attribute enabled_kobj_attr
= __ATTR_RW(enabled
);
600 static struct attribute
*klp_patch_attrs
[] = {
601 &enabled_kobj_attr
.attr
,
605 static void klp_kobj_release_patch(struct kobject
*kobj
)
608 * Once we have a consistency model we'll need to module_put() the
609 * patch module here. See klp_register_patch() for more details.
613 static struct kobj_type klp_ktype_patch
= {
614 .release
= klp_kobj_release_patch
,
615 .sysfs_ops
= &kobj_sysfs_ops
,
616 .default_attrs
= klp_patch_attrs
,
619 static void klp_kobj_release_object(struct kobject
*kobj
)
623 static struct kobj_type klp_ktype_object
= {
624 .release
= klp_kobj_release_object
,
625 .sysfs_ops
= &kobj_sysfs_ops
,
628 static void klp_kobj_release_func(struct kobject
*kobj
)
632 static struct kobj_type klp_ktype_func
= {
633 .release
= klp_kobj_release_func
,
634 .sysfs_ops
= &kobj_sysfs_ops
,
638 * Free all functions' kobjects in the array up to some limit. When limit is
639 * NULL, all kobjects are freed.
641 static void klp_free_funcs_limited(struct klp_object
*obj
,
642 struct klp_func
*limit
)
644 struct klp_func
*func
;
646 for (func
= obj
->funcs
; func
->old_name
&& func
!= limit
; func
++)
647 kobject_put(&func
->kobj
);
650 /* Clean up when a patched object is unloaded */
651 static void klp_free_object_loaded(struct klp_object
*obj
)
653 struct klp_func
*func
;
657 klp_for_each_func(obj
, func
)
662 * Free all objects' kobjects in the array up to some limit. When limit is
663 * NULL, all kobjects are freed.
665 static void klp_free_objects_limited(struct klp_patch
*patch
,
666 struct klp_object
*limit
)
668 struct klp_object
*obj
;
670 for (obj
= patch
->objs
; obj
->funcs
&& obj
!= limit
; obj
++) {
671 klp_free_funcs_limited(obj
, NULL
);
672 kobject_put(&obj
->kobj
);
676 static void klp_free_patch(struct klp_patch
*patch
)
678 klp_free_objects_limited(patch
, NULL
);
679 if (!list_empty(&patch
->list
))
680 list_del(&patch
->list
);
681 kobject_put(&patch
->kobj
);
684 static int klp_init_func(struct klp_object
*obj
, struct klp_func
*func
)
686 INIT_LIST_HEAD(&func
->stack_node
);
687 func
->state
= KLP_DISABLED
;
689 /* The format for the sysfs directory is <function,sympos> where sympos
690 * is the nth occurrence of this symbol in kallsyms for the patched
691 * object. If the user selects 0 for old_sympos, then 1 will be used
692 * since a unique symbol will be the first occurrence.
694 return kobject_init_and_add(&func
->kobj
, &klp_ktype_func
,
695 &obj
->kobj
, "%s,%lu", func
->old_name
,
696 func
->old_sympos
? func
->old_sympos
: 1);
699 /* parts of the initialization that is done only when the object is loaded */
700 static int klp_init_object_loaded(struct klp_patch
*patch
,
701 struct klp_object
*obj
)
703 struct klp_func
*func
;
707 ret
= klp_write_object_relocations(patch
->mod
, obj
);
712 klp_for_each_func(obj
, func
) {
713 ret
= klp_find_object_symbol(obj
->name
, func
->old_name
,
723 static int klp_init_object(struct klp_patch
*patch
, struct klp_object
*obj
)
725 struct klp_func
*func
;
732 obj
->state
= KLP_DISABLED
;
735 klp_find_object_module(obj
);
737 name
= klp_is_module(obj
) ? obj
->name
: "vmlinux";
738 ret
= kobject_init_and_add(&obj
->kobj
, &klp_ktype_object
,
739 &patch
->kobj
, "%s", name
);
743 klp_for_each_func(obj
, func
) {
744 ret
= klp_init_func(obj
, func
);
749 if (klp_is_object_loaded(obj
)) {
750 ret
= klp_init_object_loaded(patch
, obj
);
758 klp_free_funcs_limited(obj
, func
);
759 kobject_put(&obj
->kobj
);
763 static int klp_init_patch(struct klp_patch
*patch
)
765 struct klp_object
*obj
;
771 mutex_lock(&klp_mutex
);
773 patch
->state
= KLP_DISABLED
;
775 ret
= kobject_init_and_add(&patch
->kobj
, &klp_ktype_patch
,
776 klp_root_kobj
, "%s", patch
->mod
->name
);
780 klp_for_each_object(patch
, obj
) {
781 ret
= klp_init_object(patch
, obj
);
786 list_add_tail(&patch
->list
, &klp_patches
);
788 mutex_unlock(&klp_mutex
);
793 klp_free_objects_limited(patch
, obj
);
794 kobject_put(&patch
->kobj
);
796 mutex_unlock(&klp_mutex
);
801 * klp_unregister_patch() - unregisters a patch
802 * @patch: Disabled patch to be unregistered
804 * Frees the data structures and removes the sysfs interface.
806 * Return: 0 on success, otherwise error
808 int klp_unregister_patch(struct klp_patch
*patch
)
812 mutex_lock(&klp_mutex
);
814 if (!klp_is_patch_registered(patch
)) {
819 if (patch
->state
== KLP_ENABLED
) {
824 klp_free_patch(patch
);
827 mutex_unlock(&klp_mutex
);
830 EXPORT_SYMBOL_GPL(klp_unregister_patch
);
833 * klp_register_patch() - registers a patch
834 * @patch: Patch to be registered
836 * Initializes the data structure associated with the patch and
837 * creates the sysfs interface.
839 * Return: 0 on success, otherwise error
841 int klp_register_patch(struct klp_patch
*patch
)
845 if (!klp_initialized())
848 if (!patch
|| !patch
->mod
)
852 * A reference is taken on the patch module to prevent it from being
853 * unloaded. Right now, we don't allow patch modules to unload since
854 * there is currently no method to determine if a thread is still
855 * running in the patched code contained in the patch module once
856 * the ftrace registration is successful.
858 if (!try_module_get(patch
->mod
))
861 ret
= klp_init_patch(patch
);
863 module_put(patch
->mod
);
867 EXPORT_SYMBOL_GPL(klp_register_patch
);
869 int klp_module_coming(struct module
*mod
)
872 struct klp_patch
*patch
;
873 struct klp_object
*obj
;
875 if (WARN_ON(mod
->state
!= MODULE_STATE_COMING
))
878 mutex_lock(&klp_mutex
);
880 * Each module has to know that klp_module_coming()
881 * has been called. We never know what module will
882 * get patched by a new patch.
884 mod
->klp_alive
= true;
886 list_for_each_entry(patch
, &klp_patches
, list
) {
887 klp_for_each_object(patch
, obj
) {
888 if (!klp_is_module(obj
) || strcmp(obj
->name
, mod
->name
))
893 ret
= klp_init_object_loaded(patch
, obj
);
895 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
896 patch
->mod
->name
, obj
->mod
->name
, ret
);
900 if (patch
->state
== KLP_DISABLED
)
903 pr_notice("applying patch '%s' to loading module '%s'\n",
904 patch
->mod
->name
, obj
->mod
->name
);
906 ret
= klp_enable_object(obj
);
908 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
909 patch
->mod
->name
, obj
->mod
->name
, ret
);
917 mutex_unlock(&klp_mutex
);
923 * If a patch is unsuccessfully applied, return
924 * error to the module loader.
926 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
927 patch
->mod
->name
, obj
->mod
->name
, obj
->mod
->name
);
928 mod
->klp_alive
= false;
929 klp_free_object_loaded(obj
);
930 mutex_unlock(&klp_mutex
);
935 void klp_module_going(struct module
*mod
)
937 struct klp_patch
*patch
;
938 struct klp_object
*obj
;
940 if (WARN_ON(mod
->state
!= MODULE_STATE_GOING
&&
941 mod
->state
!= MODULE_STATE_COMING
))
944 mutex_lock(&klp_mutex
);
946 * Each module has to know that klp_module_going()
947 * has been called. We never know what module will
948 * get patched by a new patch.
950 mod
->klp_alive
= false;
952 list_for_each_entry(patch
, &klp_patches
, list
) {
953 klp_for_each_object(patch
, obj
) {
954 if (!klp_is_module(obj
) || strcmp(obj
->name
, mod
->name
))
957 if (patch
->state
!= KLP_DISABLED
) {
958 pr_notice("reverting patch '%s' on unloading module '%s'\n",
959 patch
->mod
->name
, obj
->mod
->name
);
960 klp_disable_object(obj
);
963 klp_free_object_loaded(obj
);
968 mutex_unlock(&klp_mutex
);
971 static int __init
klp_init(void)
975 ret
= klp_check_compiler_support();
977 pr_info("Your compiler is too old; turning off.\n");
981 klp_root_kobj
= kobject_create_and_add("livepatch", kernel_kobj
);
988 module_init(klp_init
);