split dev_queue
[cor.git] / kernel / livepatch / core.c
blobc3512e7e0801253d4ab03a450f3cd91090fa3ea4
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * core.c - Kernel Live Patching Core
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
7 */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/list.h>
16 #include <linux/kallsyms.h>
17 #include <linux/livepatch.h>
18 #include <linux/elf.h>
19 #include <linux/moduleloader.h>
20 #include <linux/completion.h>
21 #include <linux/memory.h>
22 #include <asm/cacheflush.h>
23 #include "core.h"
24 #include "patch.h"
25 #include "state.h"
26 #include "transition.h"
29 * klp_mutex is a coarse lock which serializes access to klp data. All
30 * accesses to klp-related variables and structures must have mutex protection,
31 * except within the following functions which carefully avoid the need for it:
33 * - klp_ftrace_handler()
34 * - klp_update_patch_state()
36 DEFINE_MUTEX(klp_mutex);
39 * Actively used patches: enabled or in transition. Note that replaced
40 * or disabled patches are not listed even though the related kernel
41 * module still can be loaded.
43 LIST_HEAD(klp_patches);
45 static struct kobject *klp_root_kobj;
47 static bool klp_is_module(struct klp_object *obj)
49 return obj->name;
52 /* sets obj->mod if object is not vmlinux and module is found */
53 static void klp_find_object_module(struct klp_object *obj)
55 struct module *mod;
57 if (!klp_is_module(obj))
58 return;
60 mutex_lock(&module_mutex);
62 * We do not want to block removal of patched modules and therefore
63 * we do not take a reference here. The patches are removed by
64 * klp_module_going() instead.
66 mod = find_module(obj->name);
68 * Do not mess work of klp_module_coming() and klp_module_going().
69 * Note that the patch might still be needed before klp_module_going()
70 * is called. Module functions can be called even in the GOING state
71 * until mod->exit() finishes. This is especially important for
72 * patches that modify semantic of the functions.
74 if (mod && mod->klp_alive)
75 obj->mod = mod;
77 mutex_unlock(&module_mutex);
80 static bool klp_initialized(void)
82 return !!klp_root_kobj;
85 static struct klp_func *klp_find_func(struct klp_object *obj,
86 struct klp_func *old_func)
88 struct klp_func *func;
90 klp_for_each_func(obj, func) {
91 if ((strcmp(old_func->old_name, func->old_name) == 0) &&
92 (old_func->old_sympos == func->old_sympos)) {
93 return func;
97 return NULL;
100 static struct klp_object *klp_find_object(struct klp_patch *patch,
101 struct klp_object *old_obj)
103 struct klp_object *obj;
105 klp_for_each_object(patch, obj) {
106 if (klp_is_module(old_obj)) {
107 if (klp_is_module(obj) &&
108 strcmp(old_obj->name, obj->name) == 0) {
109 return obj;
111 } else if (!klp_is_module(obj)) {
112 return obj;
116 return NULL;
119 struct klp_find_arg {
120 const char *objname;
121 const char *name;
122 unsigned long addr;
123 unsigned long count;
124 unsigned long pos;
127 static int klp_find_callback(void *data, const char *name,
128 struct module *mod, unsigned long addr)
130 struct klp_find_arg *args = data;
132 if ((mod && !args->objname) || (!mod && args->objname))
133 return 0;
135 if (strcmp(args->name, name))
136 return 0;
138 if (args->objname && strcmp(args->objname, mod->name))
139 return 0;
141 args->addr = addr;
142 args->count++;
145 * Finish the search when the symbol is found for the desired position
146 * or the position is not defined for a non-unique symbol.
148 if ((args->pos && (args->count == args->pos)) ||
149 (!args->pos && (args->count > 1)))
150 return 1;
152 return 0;
155 static int klp_find_object_symbol(const char *objname, const char *name,
156 unsigned long sympos, unsigned long *addr)
158 struct klp_find_arg args = {
159 .objname = objname,
160 .name = name,
161 .addr = 0,
162 .count = 0,
163 .pos = sympos,
166 mutex_lock(&module_mutex);
167 if (objname)
168 module_kallsyms_on_each_symbol(klp_find_callback, &args);
169 else
170 kallsyms_on_each_symbol(klp_find_callback, &args);
171 mutex_unlock(&module_mutex);
174 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
175 * otherwise ensure the symbol position count matches sympos.
177 if (args.addr == 0)
178 pr_err("symbol '%s' not found in symbol table\n", name);
179 else if (args.count > 1 && sympos == 0) {
180 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
181 name, objname);
182 } else if (sympos != args.count && sympos > 0) {
183 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
184 sympos, name, objname ? objname : "vmlinux");
185 } else {
186 *addr = args.addr;
187 return 0;
190 *addr = 0;
191 return -EINVAL;
194 static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
196 int i, cnt, vmlinux, ret;
197 char objname[MODULE_NAME_LEN];
198 char symname[KSYM_NAME_LEN];
199 char *strtab = pmod->core_kallsyms.strtab;
200 Elf_Rela *relas;
201 Elf_Sym *sym;
202 unsigned long sympos, addr;
205 * Since the field widths for objname and symname in the sscanf()
206 * call are hard-coded and correspond to MODULE_NAME_LEN and
207 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
208 * and KSYM_NAME_LEN have the values we expect them to have.
210 * Because the value of MODULE_NAME_LEN can differ among architectures,
211 * we use the smallest/strictest upper bound possible (56, based on
212 * the current definition of MODULE_NAME_LEN) to prevent overflows.
214 BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
216 relas = (Elf_Rela *) relasec->sh_addr;
217 /* For each rela in this klp relocation section */
218 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
219 sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
220 if (sym->st_shndx != SHN_LIVEPATCH) {
221 pr_err("symbol %s is not marked as a livepatch symbol\n",
222 strtab + sym->st_name);
223 return -EINVAL;
226 /* Format: .klp.sym.objname.symname,sympos */
227 cnt = sscanf(strtab + sym->st_name,
228 ".klp.sym.%55[^.].%127[^,],%lu",
229 objname, symname, &sympos);
230 if (cnt != 3) {
231 pr_err("symbol %s has an incorrectly formatted name\n",
232 strtab + sym->st_name);
233 return -EINVAL;
236 /* klp_find_object_symbol() treats a NULL objname as vmlinux */
237 vmlinux = !strcmp(objname, "vmlinux");
238 ret = klp_find_object_symbol(vmlinux ? NULL : objname,
239 symname, sympos, &addr);
240 if (ret)
241 return ret;
243 sym->st_value = addr;
246 return 0;
249 static int klp_write_object_relocations(struct module *pmod,
250 struct klp_object *obj)
252 int i, cnt, ret = 0;
253 const char *objname, *secname;
254 char sec_objname[MODULE_NAME_LEN];
255 Elf_Shdr *sec;
257 if (WARN_ON(!klp_is_object_loaded(obj)))
258 return -EINVAL;
260 objname = klp_is_module(obj) ? obj->name : "vmlinux";
262 /* For each klp relocation section */
263 for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
264 sec = pmod->klp_info->sechdrs + i;
265 secname = pmod->klp_info->secstrings + sec->sh_name;
266 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
267 continue;
270 * Format: .klp.rela.sec_objname.section_name
271 * See comment in klp_resolve_symbols() for an explanation
272 * of the selected field width value.
274 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
275 if (cnt != 1) {
276 pr_err("section %s has an incorrectly formatted name\n",
277 secname);
278 ret = -EINVAL;
279 break;
282 if (strcmp(objname, sec_objname))
283 continue;
285 ret = klp_resolve_symbols(sec, pmod);
286 if (ret)
287 break;
289 ret = apply_relocate_add(pmod->klp_info->sechdrs,
290 pmod->core_kallsyms.strtab,
291 pmod->klp_info->symndx, i, pmod);
292 if (ret)
293 break;
296 return ret;
300 * Sysfs Interface
302 * /sys/kernel/livepatch
303 * /sys/kernel/livepatch/<patch>
304 * /sys/kernel/livepatch/<patch>/enabled
305 * /sys/kernel/livepatch/<patch>/transition
306 * /sys/kernel/livepatch/<patch>/force
307 * /sys/kernel/livepatch/<patch>/<object>
308 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
310 static int __klp_disable_patch(struct klp_patch *patch);
312 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
313 const char *buf, size_t count)
315 struct klp_patch *patch;
316 int ret;
317 bool enabled;
319 ret = kstrtobool(buf, &enabled);
320 if (ret)
321 return ret;
323 patch = container_of(kobj, struct klp_patch, kobj);
325 mutex_lock(&klp_mutex);
327 if (patch->enabled == enabled) {
328 /* already in requested state */
329 ret = -EINVAL;
330 goto out;
334 * Allow to reverse a pending transition in both ways. It might be
335 * necessary to complete the transition without forcing and breaking
336 * the system integrity.
338 * Do not allow to re-enable a disabled patch.
340 if (patch == klp_transition_patch)
341 klp_reverse_transition();
342 else if (!enabled)
343 ret = __klp_disable_patch(patch);
344 else
345 ret = -EINVAL;
347 out:
348 mutex_unlock(&klp_mutex);
350 if (ret)
351 return ret;
352 return count;
355 static ssize_t enabled_show(struct kobject *kobj,
356 struct kobj_attribute *attr, char *buf)
358 struct klp_patch *patch;
360 patch = container_of(kobj, struct klp_patch, kobj);
361 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
364 static ssize_t transition_show(struct kobject *kobj,
365 struct kobj_attribute *attr, char *buf)
367 struct klp_patch *patch;
369 patch = container_of(kobj, struct klp_patch, kobj);
370 return snprintf(buf, PAGE_SIZE-1, "%d\n",
371 patch == klp_transition_patch);
374 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
375 const char *buf, size_t count)
377 struct klp_patch *patch;
378 int ret;
379 bool val;
381 ret = kstrtobool(buf, &val);
382 if (ret)
383 return ret;
385 if (!val)
386 return count;
388 mutex_lock(&klp_mutex);
390 patch = container_of(kobj, struct klp_patch, kobj);
391 if (patch != klp_transition_patch) {
392 mutex_unlock(&klp_mutex);
393 return -EINVAL;
396 klp_force_transition();
398 mutex_unlock(&klp_mutex);
400 return count;
403 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
404 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
405 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
406 static struct attribute *klp_patch_attrs[] = {
407 &enabled_kobj_attr.attr,
408 &transition_kobj_attr.attr,
409 &force_kobj_attr.attr,
410 NULL
412 ATTRIBUTE_GROUPS(klp_patch);
414 static void klp_free_object_dynamic(struct klp_object *obj)
416 kfree(obj->name);
417 kfree(obj);
420 static void klp_init_func_early(struct klp_object *obj,
421 struct klp_func *func);
422 static void klp_init_object_early(struct klp_patch *patch,
423 struct klp_object *obj);
425 static struct klp_object *klp_alloc_object_dynamic(const char *name,
426 struct klp_patch *patch)
428 struct klp_object *obj;
430 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
431 if (!obj)
432 return NULL;
434 if (name) {
435 obj->name = kstrdup(name, GFP_KERNEL);
436 if (!obj->name) {
437 kfree(obj);
438 return NULL;
442 klp_init_object_early(patch, obj);
443 obj->dynamic = true;
445 return obj;
448 static void klp_free_func_nop(struct klp_func *func)
450 kfree(func->old_name);
451 kfree(func);
454 static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
455 struct klp_object *obj)
457 struct klp_func *func;
459 func = kzalloc(sizeof(*func), GFP_KERNEL);
460 if (!func)
461 return NULL;
463 if (old_func->old_name) {
464 func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
465 if (!func->old_name) {
466 kfree(func);
467 return NULL;
471 klp_init_func_early(obj, func);
473 * func->new_func is same as func->old_func. These addresses are
474 * set when the object is loaded, see klp_init_object_loaded().
476 func->old_sympos = old_func->old_sympos;
477 func->nop = true;
479 return func;
482 static int klp_add_object_nops(struct klp_patch *patch,
483 struct klp_object *old_obj)
485 struct klp_object *obj;
486 struct klp_func *func, *old_func;
488 obj = klp_find_object(patch, old_obj);
490 if (!obj) {
491 obj = klp_alloc_object_dynamic(old_obj->name, patch);
492 if (!obj)
493 return -ENOMEM;
496 klp_for_each_func(old_obj, old_func) {
497 func = klp_find_func(obj, old_func);
498 if (func)
499 continue;
501 func = klp_alloc_func_nop(old_func, obj);
502 if (!func)
503 return -ENOMEM;
506 return 0;
510 * Add 'nop' functions which simply return to the caller to run
511 * the original function. The 'nop' functions are added to a
512 * patch to facilitate a 'replace' mode.
514 static int klp_add_nops(struct klp_patch *patch)
516 struct klp_patch *old_patch;
517 struct klp_object *old_obj;
519 klp_for_each_patch(old_patch) {
520 klp_for_each_object(old_patch, old_obj) {
521 int err;
523 err = klp_add_object_nops(patch, old_obj);
524 if (err)
525 return err;
529 return 0;
532 static void klp_kobj_release_patch(struct kobject *kobj)
534 struct klp_patch *patch;
536 patch = container_of(kobj, struct klp_patch, kobj);
537 complete(&patch->finish);
540 static struct kobj_type klp_ktype_patch = {
541 .release = klp_kobj_release_patch,
542 .sysfs_ops = &kobj_sysfs_ops,
543 .default_groups = klp_patch_groups,
546 static void klp_kobj_release_object(struct kobject *kobj)
548 struct klp_object *obj;
550 obj = container_of(kobj, struct klp_object, kobj);
552 if (obj->dynamic)
553 klp_free_object_dynamic(obj);
556 static struct kobj_type klp_ktype_object = {
557 .release = klp_kobj_release_object,
558 .sysfs_ops = &kobj_sysfs_ops,
561 static void klp_kobj_release_func(struct kobject *kobj)
563 struct klp_func *func;
565 func = container_of(kobj, struct klp_func, kobj);
567 if (func->nop)
568 klp_free_func_nop(func);
571 static struct kobj_type klp_ktype_func = {
572 .release = klp_kobj_release_func,
573 .sysfs_ops = &kobj_sysfs_ops,
576 static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
578 struct klp_func *func, *tmp_func;
580 klp_for_each_func_safe(obj, func, tmp_func) {
581 if (nops_only && !func->nop)
582 continue;
584 list_del(&func->node);
585 kobject_put(&func->kobj);
589 /* Clean up when a patched object is unloaded */
590 static void klp_free_object_loaded(struct klp_object *obj)
592 struct klp_func *func;
594 obj->mod = NULL;
596 klp_for_each_func(obj, func) {
597 func->old_func = NULL;
599 if (func->nop)
600 func->new_func = NULL;
604 static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
606 struct klp_object *obj, *tmp_obj;
608 klp_for_each_object_safe(patch, obj, tmp_obj) {
609 __klp_free_funcs(obj, nops_only);
611 if (nops_only && !obj->dynamic)
612 continue;
614 list_del(&obj->node);
615 kobject_put(&obj->kobj);
619 static void klp_free_objects(struct klp_patch *patch)
621 __klp_free_objects(patch, false);
624 static void klp_free_objects_dynamic(struct klp_patch *patch)
626 __klp_free_objects(patch, true);
630 * This function implements the free operations that can be called safely
631 * under klp_mutex.
633 * The operation must be completed by calling klp_free_patch_finish()
634 * outside klp_mutex.
636 static void klp_free_patch_start(struct klp_patch *patch)
638 if (!list_empty(&patch->list))
639 list_del(&patch->list);
641 klp_free_objects(patch);
645 * This function implements the free part that must be called outside
646 * klp_mutex.
648 * It must be called after klp_free_patch_start(). And it has to be
649 * the last function accessing the livepatch structures when the patch
650 * gets disabled.
652 static void klp_free_patch_finish(struct klp_patch *patch)
655 * Avoid deadlock with enabled_store() sysfs callback by
656 * calling this outside klp_mutex. It is safe because
657 * this is called when the patch gets disabled and it
658 * cannot get enabled again.
660 kobject_put(&patch->kobj);
661 wait_for_completion(&patch->finish);
663 /* Put the module after the last access to struct klp_patch. */
664 if (!patch->forced)
665 module_put(patch->mod);
669 * The livepatch might be freed from sysfs interface created by the patch.
670 * This work allows to wait until the interface is destroyed in a separate
671 * context.
673 static void klp_free_patch_work_fn(struct work_struct *work)
675 struct klp_patch *patch =
676 container_of(work, struct klp_patch, free_work);
678 klp_free_patch_finish(patch);
681 void klp_free_patch_async(struct klp_patch *patch)
683 klp_free_patch_start(patch);
684 schedule_work(&patch->free_work);
687 void klp_free_replaced_patches_async(struct klp_patch *new_patch)
689 struct klp_patch *old_patch, *tmp_patch;
691 klp_for_each_patch_safe(old_patch, tmp_patch) {
692 if (old_patch == new_patch)
693 return;
694 klp_free_patch_async(old_patch);
698 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
700 if (!func->old_name)
701 return -EINVAL;
704 * NOPs get the address later. The patched module must be loaded,
705 * see klp_init_object_loaded().
707 if (!func->new_func && !func->nop)
708 return -EINVAL;
710 if (strlen(func->old_name) >= KSYM_NAME_LEN)
711 return -EINVAL;
713 INIT_LIST_HEAD(&func->stack_node);
714 func->patched = false;
715 func->transition = false;
717 /* The format for the sysfs directory is <function,sympos> where sympos
718 * is the nth occurrence of this symbol in kallsyms for the patched
719 * object. If the user selects 0 for old_sympos, then 1 will be used
720 * since a unique symbol will be the first occurrence.
722 return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
723 func->old_name,
724 func->old_sympos ? func->old_sympos : 1);
727 /* Arches may override this to finish any remaining arch-specific tasks */
728 void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
729 struct klp_object *obj)
733 /* parts of the initialization that is done only when the object is loaded */
734 static int klp_init_object_loaded(struct klp_patch *patch,
735 struct klp_object *obj)
737 struct klp_func *func;
738 int ret;
740 mutex_lock(&text_mutex);
742 module_disable_ro(patch->mod);
743 ret = klp_write_object_relocations(patch->mod, obj);
744 if (ret) {
745 module_enable_ro(patch->mod, true);
746 mutex_unlock(&text_mutex);
747 return ret;
750 arch_klp_init_object_loaded(patch, obj);
751 module_enable_ro(patch->mod, true);
753 mutex_unlock(&text_mutex);
755 klp_for_each_func(obj, func) {
756 ret = klp_find_object_symbol(obj->name, func->old_name,
757 func->old_sympos,
758 (unsigned long *)&func->old_func);
759 if (ret)
760 return ret;
762 ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
763 &func->old_size, NULL);
764 if (!ret) {
765 pr_err("kallsyms size lookup failed for '%s'\n",
766 func->old_name);
767 return -ENOENT;
770 if (func->nop)
771 func->new_func = func->old_func;
773 ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
774 &func->new_size, NULL);
775 if (!ret) {
776 pr_err("kallsyms size lookup failed for '%s' replacement\n",
777 func->old_name);
778 return -ENOENT;
782 return 0;
785 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
787 struct klp_func *func;
788 int ret;
789 const char *name;
791 if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
792 return -EINVAL;
794 obj->patched = false;
795 obj->mod = NULL;
797 klp_find_object_module(obj);
799 name = klp_is_module(obj) ? obj->name : "vmlinux";
800 ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
801 if (ret)
802 return ret;
804 klp_for_each_func(obj, func) {
805 ret = klp_init_func(obj, func);
806 if (ret)
807 return ret;
810 if (klp_is_object_loaded(obj))
811 ret = klp_init_object_loaded(patch, obj);
813 return ret;
816 static void klp_init_func_early(struct klp_object *obj,
817 struct klp_func *func)
819 kobject_init(&func->kobj, &klp_ktype_func);
820 list_add_tail(&func->node, &obj->func_list);
823 static void klp_init_object_early(struct klp_patch *patch,
824 struct klp_object *obj)
826 INIT_LIST_HEAD(&obj->func_list);
827 kobject_init(&obj->kobj, &klp_ktype_object);
828 list_add_tail(&obj->node, &patch->obj_list);
831 static int klp_init_patch_early(struct klp_patch *patch)
833 struct klp_object *obj;
834 struct klp_func *func;
836 if (!patch->objs)
837 return -EINVAL;
839 INIT_LIST_HEAD(&patch->list);
840 INIT_LIST_HEAD(&patch->obj_list);
841 kobject_init(&patch->kobj, &klp_ktype_patch);
842 patch->enabled = false;
843 patch->forced = false;
844 INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
845 init_completion(&patch->finish);
847 klp_for_each_object_static(patch, obj) {
848 if (!obj->funcs)
849 return -EINVAL;
851 klp_init_object_early(patch, obj);
853 klp_for_each_func_static(obj, func) {
854 klp_init_func_early(obj, func);
858 if (!try_module_get(patch->mod))
859 return -ENODEV;
861 return 0;
864 static int klp_init_patch(struct klp_patch *patch)
866 struct klp_object *obj;
867 int ret;
869 ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
870 if (ret)
871 return ret;
873 if (patch->replace) {
874 ret = klp_add_nops(patch);
875 if (ret)
876 return ret;
879 klp_for_each_object(patch, obj) {
880 ret = klp_init_object(patch, obj);
881 if (ret)
882 return ret;
885 list_add_tail(&patch->list, &klp_patches);
887 return 0;
890 static int __klp_disable_patch(struct klp_patch *patch)
892 struct klp_object *obj;
894 if (WARN_ON(!patch->enabled))
895 return -EINVAL;
897 if (klp_transition_patch)
898 return -EBUSY;
900 klp_init_transition(patch, KLP_UNPATCHED);
902 klp_for_each_object(patch, obj)
903 if (obj->patched)
904 klp_pre_unpatch_callback(obj);
907 * Enforce the order of the func->transition writes in
908 * klp_init_transition() and the TIF_PATCH_PENDING writes in
909 * klp_start_transition(). In the rare case where klp_ftrace_handler()
910 * is called shortly after klp_update_patch_state() switches the task,
911 * this ensures the handler sees that func->transition is set.
913 smp_wmb();
915 klp_start_transition();
916 patch->enabled = false;
917 klp_try_complete_transition();
919 return 0;
922 static int __klp_enable_patch(struct klp_patch *patch)
924 struct klp_object *obj;
925 int ret;
927 if (klp_transition_patch)
928 return -EBUSY;
930 if (WARN_ON(patch->enabled))
931 return -EINVAL;
933 pr_notice("enabling patch '%s'\n", patch->mod->name);
935 klp_init_transition(patch, KLP_PATCHED);
938 * Enforce the order of the func->transition writes in
939 * klp_init_transition() and the ops->func_stack writes in
940 * klp_patch_object(), so that klp_ftrace_handler() will see the
941 * func->transition updates before the handler is registered and the
942 * new funcs become visible to the handler.
944 smp_wmb();
946 klp_for_each_object(patch, obj) {
947 if (!klp_is_object_loaded(obj))
948 continue;
950 ret = klp_pre_patch_callback(obj);
951 if (ret) {
952 pr_warn("pre-patch callback failed for object '%s'\n",
953 klp_is_module(obj) ? obj->name : "vmlinux");
954 goto err;
957 ret = klp_patch_object(obj);
958 if (ret) {
959 pr_warn("failed to patch object '%s'\n",
960 klp_is_module(obj) ? obj->name : "vmlinux");
961 goto err;
965 klp_start_transition();
966 patch->enabled = true;
967 klp_try_complete_transition();
969 return 0;
970 err:
971 pr_warn("failed to enable patch '%s'\n", patch->mod->name);
973 klp_cancel_transition();
974 return ret;
978 * klp_enable_patch() - enable the livepatch
979 * @patch: patch to be enabled
981 * Initializes the data structure associated with the patch, creates the sysfs
982 * interface, performs the needed symbol lookups and code relocations,
983 * registers the patched functions with ftrace.
985 * This function is supposed to be called from the livepatch module_init()
986 * callback.
988 * Return: 0 on success, otherwise error
990 int klp_enable_patch(struct klp_patch *patch)
992 int ret;
994 if (!patch || !patch->mod)
995 return -EINVAL;
997 if (!is_livepatch_module(patch->mod)) {
998 pr_err("module %s is not marked as a livepatch module\n",
999 patch->mod->name);
1000 return -EINVAL;
1003 if (!klp_initialized())
1004 return -ENODEV;
1006 if (!klp_have_reliable_stack()) {
1007 pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
1008 pr_warn("The livepatch transition may never complete.\n");
1011 mutex_lock(&klp_mutex);
1013 if (!klp_is_patch_compatible(patch)) {
1014 pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n",
1015 patch->mod->name);
1016 mutex_unlock(&klp_mutex);
1017 return -EINVAL;
1020 ret = klp_init_patch_early(patch);
1021 if (ret) {
1022 mutex_unlock(&klp_mutex);
1023 return ret;
1026 ret = klp_init_patch(patch);
1027 if (ret)
1028 goto err;
1030 ret = __klp_enable_patch(patch);
1031 if (ret)
1032 goto err;
1034 mutex_unlock(&klp_mutex);
1036 return 0;
1038 err:
1039 klp_free_patch_start(patch);
1041 mutex_unlock(&klp_mutex);
1043 klp_free_patch_finish(patch);
1045 return ret;
1047 EXPORT_SYMBOL_GPL(klp_enable_patch);
1050 * This function unpatches objects from the replaced livepatches.
1052 * We could be pretty aggressive here. It is called in the situation where
1053 * these structures are no longer accessed from the ftrace handler.
1054 * All functions are redirected by the klp_transition_patch. They
1055 * use either a new code or they are in the original code because
1056 * of the special nop function patches.
1058 * The only exception is when the transition was forced. In this case,
1059 * klp_ftrace_handler() might still see the replaced patch on the stack.
1060 * Fortunately, it is carefully designed to work with removed functions
1061 * thanks to RCU. We only have to keep the patches on the system. Also
1062 * this is handled transparently by patch->module_put.
1064 void klp_unpatch_replaced_patches(struct klp_patch *new_patch)
1066 struct klp_patch *old_patch;
1068 klp_for_each_patch(old_patch) {
1069 if (old_patch == new_patch)
1070 return;
1072 old_patch->enabled = false;
1073 klp_unpatch_objects(old_patch);
1078 * This function removes the dynamically allocated 'nop' functions.
1080 * We could be pretty aggressive. NOPs do not change the existing
1081 * behavior except for adding unnecessary delay by the ftrace handler.
1083 * It is safe even when the transition was forced. The ftrace handler
1084 * will see a valid ops->func_stack entry thanks to RCU.
1086 * We could even free the NOPs structures. They must be the last entry
1087 * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1088 * It does the same as klp_synchronize_transition() to make sure that
1089 * nobody is inside the ftrace handler once the operation finishes.
1091 * IMPORTANT: It must be called right after removing the replaced patches!
1093 void klp_discard_nops(struct klp_patch *new_patch)
1095 klp_unpatch_objects_dynamic(klp_transition_patch);
1096 klp_free_objects_dynamic(klp_transition_patch);
1100 * Remove parts of patches that touch a given kernel module. The list of
1101 * patches processed might be limited. When limit is NULL, all patches
1102 * will be handled.
1104 static void klp_cleanup_module_patches_limited(struct module *mod,
1105 struct klp_patch *limit)
1107 struct klp_patch *patch;
1108 struct klp_object *obj;
1110 klp_for_each_patch(patch) {
1111 if (patch == limit)
1112 break;
1114 klp_for_each_object(patch, obj) {
1115 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1116 continue;
1118 if (patch != klp_transition_patch)
1119 klp_pre_unpatch_callback(obj);
1121 pr_notice("reverting patch '%s' on unloading module '%s'\n",
1122 patch->mod->name, obj->mod->name);
1123 klp_unpatch_object(obj);
1125 klp_post_unpatch_callback(obj);
1127 klp_free_object_loaded(obj);
1128 break;
1133 int klp_module_coming(struct module *mod)
1135 int ret;
1136 struct klp_patch *patch;
1137 struct klp_object *obj;
1139 if (WARN_ON(mod->state != MODULE_STATE_COMING))
1140 return -EINVAL;
1142 mutex_lock(&klp_mutex);
1144 * Each module has to know that klp_module_coming()
1145 * has been called. We never know what module will
1146 * get patched by a new patch.
1148 mod->klp_alive = true;
1150 klp_for_each_patch(patch) {
1151 klp_for_each_object(patch, obj) {
1152 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1153 continue;
1155 obj->mod = mod;
1157 ret = klp_init_object_loaded(patch, obj);
1158 if (ret) {
1159 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1160 patch->mod->name, obj->mod->name, ret);
1161 goto err;
1164 pr_notice("applying patch '%s' to loading module '%s'\n",
1165 patch->mod->name, obj->mod->name);
1167 ret = klp_pre_patch_callback(obj);
1168 if (ret) {
1169 pr_warn("pre-patch callback failed for object '%s'\n",
1170 obj->name);
1171 goto err;
1174 ret = klp_patch_object(obj);
1175 if (ret) {
1176 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1177 patch->mod->name, obj->mod->name, ret);
1179 klp_post_unpatch_callback(obj);
1180 goto err;
1183 if (patch != klp_transition_patch)
1184 klp_post_patch_callback(obj);
1186 break;
1190 mutex_unlock(&klp_mutex);
1192 return 0;
1194 err:
1196 * If a patch is unsuccessfully applied, return
1197 * error to the module loader.
1199 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1200 patch->mod->name, obj->mod->name, obj->mod->name);
1201 mod->klp_alive = false;
1202 obj->mod = NULL;
1203 klp_cleanup_module_patches_limited(mod, patch);
1204 mutex_unlock(&klp_mutex);
1206 return ret;
1209 void klp_module_going(struct module *mod)
1211 if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1212 mod->state != MODULE_STATE_COMING))
1213 return;
1215 mutex_lock(&klp_mutex);
1217 * Each module has to know that klp_module_going()
1218 * has been called. We never know what module will
1219 * get patched by a new patch.
1221 mod->klp_alive = false;
1223 klp_cleanup_module_patches_limited(mod, NULL);
1225 mutex_unlock(&klp_mutex);
1228 static int __init klp_init(void)
1230 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1231 if (!klp_root_kobj)
1232 return -ENOMEM;
1234 return 0;
1237 module_init(klp_init);