PM: sleep: core: Switch back to async_schedule_dev()
[linux/fpc-iii.git] / kernel / livepatch / core.c
blobab4a4606d19b761caaabb82d39c632b37b0c3e3b
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * core.c - Kernel Live Patching Core
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
7 */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/list.h>
16 #include <linux/kallsyms.h>
17 #include <linux/livepatch.h>
18 #include <linux/elf.h>
19 #include <linux/moduleloader.h>
20 #include <linux/completion.h>
21 #include <linux/memory.h>
22 #include <asm/cacheflush.h>
23 #include "core.h"
24 #include "patch.h"
25 #include "transition.h"
28 * klp_mutex is a coarse lock which serializes access to klp data. All
29 * accesses to klp-related variables and structures must have mutex protection,
30 * except within the following functions which carefully avoid the need for it:
32 * - klp_ftrace_handler()
33 * - klp_update_patch_state()
35 DEFINE_MUTEX(klp_mutex);
38 * Actively used patches: enabled or in transition. Note that replaced
39 * or disabled patches are not listed even though the related kernel
40 * module still can be loaded.
42 LIST_HEAD(klp_patches);
44 static struct kobject *klp_root_kobj;
46 static bool klp_is_module(struct klp_object *obj)
48 return obj->name;
51 /* sets obj->mod if object is not vmlinux and module is found */
52 static void klp_find_object_module(struct klp_object *obj)
54 struct module *mod;
56 if (!klp_is_module(obj))
57 return;
59 mutex_lock(&module_mutex);
61 * We do not want to block removal of patched modules and therefore
62 * we do not take a reference here. The patches are removed by
63 * klp_module_going() instead.
65 mod = find_module(obj->name);
67 * Do not mess work of klp_module_coming() and klp_module_going().
68 * Note that the patch might still be needed before klp_module_going()
69 * is called. Module functions can be called even in the GOING state
70 * until mod->exit() finishes. This is especially important for
71 * patches that modify semantic of the functions.
73 if (mod && mod->klp_alive)
74 obj->mod = mod;
76 mutex_unlock(&module_mutex);
79 static bool klp_initialized(void)
81 return !!klp_root_kobj;
84 static struct klp_func *klp_find_func(struct klp_object *obj,
85 struct klp_func *old_func)
87 struct klp_func *func;
89 klp_for_each_func(obj, func) {
90 if ((strcmp(old_func->old_name, func->old_name) == 0) &&
91 (old_func->old_sympos == func->old_sympos)) {
92 return func;
96 return NULL;
99 static struct klp_object *klp_find_object(struct klp_patch *patch,
100 struct klp_object *old_obj)
102 struct klp_object *obj;
104 klp_for_each_object(patch, obj) {
105 if (klp_is_module(old_obj)) {
106 if (klp_is_module(obj) &&
107 strcmp(old_obj->name, obj->name) == 0) {
108 return obj;
110 } else if (!klp_is_module(obj)) {
111 return obj;
115 return NULL;
118 struct klp_find_arg {
119 const char *objname;
120 const char *name;
121 unsigned long addr;
122 unsigned long count;
123 unsigned long pos;
126 static int klp_find_callback(void *data, const char *name,
127 struct module *mod, unsigned long addr)
129 struct klp_find_arg *args = data;
131 if ((mod && !args->objname) || (!mod && args->objname))
132 return 0;
134 if (strcmp(args->name, name))
135 return 0;
137 if (args->objname && strcmp(args->objname, mod->name))
138 return 0;
140 args->addr = addr;
141 args->count++;
144 * Finish the search when the symbol is found for the desired position
145 * or the position is not defined for a non-unique symbol.
147 if ((args->pos && (args->count == args->pos)) ||
148 (!args->pos && (args->count > 1)))
149 return 1;
151 return 0;
154 static int klp_find_object_symbol(const char *objname, const char *name,
155 unsigned long sympos, unsigned long *addr)
157 struct klp_find_arg args = {
158 .objname = objname,
159 .name = name,
160 .addr = 0,
161 .count = 0,
162 .pos = sympos,
165 mutex_lock(&module_mutex);
166 if (objname)
167 module_kallsyms_on_each_symbol(klp_find_callback, &args);
168 else
169 kallsyms_on_each_symbol(klp_find_callback, &args);
170 mutex_unlock(&module_mutex);
173 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
174 * otherwise ensure the symbol position count matches sympos.
176 if (args.addr == 0)
177 pr_err("symbol '%s' not found in symbol table\n", name);
178 else if (args.count > 1 && sympos == 0) {
179 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
180 name, objname);
181 } else if (sympos != args.count && sympos > 0) {
182 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
183 sympos, name, objname ? objname : "vmlinux");
184 } else {
185 *addr = args.addr;
186 return 0;
189 *addr = 0;
190 return -EINVAL;
193 static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
195 int i, cnt, vmlinux, ret;
196 char objname[MODULE_NAME_LEN];
197 char symname[KSYM_NAME_LEN];
198 char *strtab = pmod->core_kallsyms.strtab;
199 Elf_Rela *relas;
200 Elf_Sym *sym;
201 unsigned long sympos, addr;
204 * Since the field widths for objname and symname in the sscanf()
205 * call are hard-coded and correspond to MODULE_NAME_LEN and
206 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
207 * and KSYM_NAME_LEN have the values we expect them to have.
209 * Because the value of MODULE_NAME_LEN can differ among architectures,
210 * we use the smallest/strictest upper bound possible (56, based on
211 * the current definition of MODULE_NAME_LEN) to prevent overflows.
213 BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
215 relas = (Elf_Rela *) relasec->sh_addr;
216 /* For each rela in this klp relocation section */
217 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
218 sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
219 if (sym->st_shndx != SHN_LIVEPATCH) {
220 pr_err("symbol %s is not marked as a livepatch symbol\n",
221 strtab + sym->st_name);
222 return -EINVAL;
225 /* Format: .klp.sym.objname.symname,sympos */
226 cnt = sscanf(strtab + sym->st_name,
227 ".klp.sym.%55[^.].%127[^,],%lu",
228 objname, symname, &sympos);
229 if (cnt != 3) {
230 pr_err("symbol %s has an incorrectly formatted name\n",
231 strtab + sym->st_name);
232 return -EINVAL;
235 /* klp_find_object_symbol() treats a NULL objname as vmlinux */
236 vmlinux = !strcmp(objname, "vmlinux");
237 ret = klp_find_object_symbol(vmlinux ? NULL : objname,
238 symname, sympos, &addr);
239 if (ret)
240 return ret;
242 sym->st_value = addr;
245 return 0;
248 static int klp_write_object_relocations(struct module *pmod,
249 struct klp_object *obj)
251 int i, cnt, ret = 0;
252 const char *objname, *secname;
253 char sec_objname[MODULE_NAME_LEN];
254 Elf_Shdr *sec;
256 if (WARN_ON(!klp_is_object_loaded(obj)))
257 return -EINVAL;
259 objname = klp_is_module(obj) ? obj->name : "vmlinux";
261 /* For each klp relocation section */
262 for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
263 sec = pmod->klp_info->sechdrs + i;
264 secname = pmod->klp_info->secstrings + sec->sh_name;
265 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
266 continue;
269 * Format: .klp.rela.sec_objname.section_name
270 * See comment in klp_resolve_symbols() for an explanation
271 * of the selected field width value.
273 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
274 if (cnt != 1) {
275 pr_err("section %s has an incorrectly formatted name\n",
276 secname);
277 ret = -EINVAL;
278 break;
281 if (strcmp(objname, sec_objname))
282 continue;
284 ret = klp_resolve_symbols(sec, pmod);
285 if (ret)
286 break;
288 ret = apply_relocate_add(pmod->klp_info->sechdrs,
289 pmod->core_kallsyms.strtab,
290 pmod->klp_info->symndx, i, pmod);
291 if (ret)
292 break;
295 return ret;
299 * Sysfs Interface
301 * /sys/kernel/livepatch
302 * /sys/kernel/livepatch/<patch>
303 * /sys/kernel/livepatch/<patch>/enabled
304 * /sys/kernel/livepatch/<patch>/transition
305 * /sys/kernel/livepatch/<patch>/force
306 * /sys/kernel/livepatch/<patch>/<object>
307 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
309 static int __klp_disable_patch(struct klp_patch *patch);
311 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
312 const char *buf, size_t count)
314 struct klp_patch *patch;
315 int ret;
316 bool enabled;
318 ret = kstrtobool(buf, &enabled);
319 if (ret)
320 return ret;
322 patch = container_of(kobj, struct klp_patch, kobj);
324 mutex_lock(&klp_mutex);
326 if (patch->enabled == enabled) {
327 /* already in requested state */
328 ret = -EINVAL;
329 goto out;
333 * Allow to reverse a pending transition in both ways. It might be
334 * necessary to complete the transition without forcing and breaking
335 * the system integrity.
337 * Do not allow to re-enable a disabled patch.
339 if (patch == klp_transition_patch)
340 klp_reverse_transition();
341 else if (!enabled)
342 ret = __klp_disable_patch(patch);
343 else
344 ret = -EINVAL;
346 out:
347 mutex_unlock(&klp_mutex);
349 if (ret)
350 return ret;
351 return count;
354 static ssize_t enabled_show(struct kobject *kobj,
355 struct kobj_attribute *attr, char *buf)
357 struct klp_patch *patch;
359 patch = container_of(kobj, struct klp_patch, kobj);
360 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
363 static ssize_t transition_show(struct kobject *kobj,
364 struct kobj_attribute *attr, char *buf)
366 struct klp_patch *patch;
368 patch = container_of(kobj, struct klp_patch, kobj);
369 return snprintf(buf, PAGE_SIZE-1, "%d\n",
370 patch == klp_transition_patch);
373 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
374 const char *buf, size_t count)
376 struct klp_patch *patch;
377 int ret;
378 bool val;
380 ret = kstrtobool(buf, &val);
381 if (ret)
382 return ret;
384 if (!val)
385 return count;
387 mutex_lock(&klp_mutex);
389 patch = container_of(kobj, struct klp_patch, kobj);
390 if (patch != klp_transition_patch) {
391 mutex_unlock(&klp_mutex);
392 return -EINVAL;
395 klp_force_transition();
397 mutex_unlock(&klp_mutex);
399 return count;
402 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
403 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
404 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
405 static struct attribute *klp_patch_attrs[] = {
406 &enabled_kobj_attr.attr,
407 &transition_kobj_attr.attr,
408 &force_kobj_attr.attr,
409 NULL
411 ATTRIBUTE_GROUPS(klp_patch);
413 static void klp_free_object_dynamic(struct klp_object *obj)
415 kfree(obj->name);
416 kfree(obj);
419 static void klp_init_func_early(struct klp_object *obj,
420 struct klp_func *func);
421 static void klp_init_object_early(struct klp_patch *patch,
422 struct klp_object *obj);
424 static struct klp_object *klp_alloc_object_dynamic(const char *name,
425 struct klp_patch *patch)
427 struct klp_object *obj;
429 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
430 if (!obj)
431 return NULL;
433 if (name) {
434 obj->name = kstrdup(name, GFP_KERNEL);
435 if (!obj->name) {
436 kfree(obj);
437 return NULL;
441 klp_init_object_early(patch, obj);
442 obj->dynamic = true;
444 return obj;
447 static void klp_free_func_nop(struct klp_func *func)
449 kfree(func->old_name);
450 kfree(func);
453 static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
454 struct klp_object *obj)
456 struct klp_func *func;
458 func = kzalloc(sizeof(*func), GFP_KERNEL);
459 if (!func)
460 return NULL;
462 if (old_func->old_name) {
463 func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
464 if (!func->old_name) {
465 kfree(func);
466 return NULL;
470 klp_init_func_early(obj, func);
472 * func->new_func is same as func->old_func. These addresses are
473 * set when the object is loaded, see klp_init_object_loaded().
475 func->old_sympos = old_func->old_sympos;
476 func->nop = true;
478 return func;
481 static int klp_add_object_nops(struct klp_patch *patch,
482 struct klp_object *old_obj)
484 struct klp_object *obj;
485 struct klp_func *func, *old_func;
487 obj = klp_find_object(patch, old_obj);
489 if (!obj) {
490 obj = klp_alloc_object_dynamic(old_obj->name, patch);
491 if (!obj)
492 return -ENOMEM;
495 klp_for_each_func(old_obj, old_func) {
496 func = klp_find_func(obj, old_func);
497 if (func)
498 continue;
500 func = klp_alloc_func_nop(old_func, obj);
501 if (!func)
502 return -ENOMEM;
505 return 0;
509 * Add 'nop' functions which simply return to the caller to run
510 * the original function. The 'nop' functions are added to a
511 * patch to facilitate a 'replace' mode.
513 static int klp_add_nops(struct klp_patch *patch)
515 struct klp_patch *old_patch;
516 struct klp_object *old_obj;
518 klp_for_each_patch(old_patch) {
519 klp_for_each_object(old_patch, old_obj) {
520 int err;
522 err = klp_add_object_nops(patch, old_obj);
523 if (err)
524 return err;
528 return 0;
531 static void klp_kobj_release_patch(struct kobject *kobj)
533 struct klp_patch *patch;
535 patch = container_of(kobj, struct klp_patch, kobj);
536 complete(&patch->finish);
539 static struct kobj_type klp_ktype_patch = {
540 .release = klp_kobj_release_patch,
541 .sysfs_ops = &kobj_sysfs_ops,
542 .default_groups = klp_patch_groups,
545 static void klp_kobj_release_object(struct kobject *kobj)
547 struct klp_object *obj;
549 obj = container_of(kobj, struct klp_object, kobj);
551 if (obj->dynamic)
552 klp_free_object_dynamic(obj);
555 static struct kobj_type klp_ktype_object = {
556 .release = klp_kobj_release_object,
557 .sysfs_ops = &kobj_sysfs_ops,
560 static void klp_kobj_release_func(struct kobject *kobj)
562 struct klp_func *func;
564 func = container_of(kobj, struct klp_func, kobj);
566 if (func->nop)
567 klp_free_func_nop(func);
570 static struct kobj_type klp_ktype_func = {
571 .release = klp_kobj_release_func,
572 .sysfs_ops = &kobj_sysfs_ops,
575 static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
577 struct klp_func *func, *tmp_func;
579 klp_for_each_func_safe(obj, func, tmp_func) {
580 if (nops_only && !func->nop)
581 continue;
583 list_del(&func->node);
584 kobject_put(&func->kobj);
588 /* Clean up when a patched object is unloaded */
589 static void klp_free_object_loaded(struct klp_object *obj)
591 struct klp_func *func;
593 obj->mod = NULL;
595 klp_for_each_func(obj, func) {
596 func->old_func = NULL;
598 if (func->nop)
599 func->new_func = NULL;
603 static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
605 struct klp_object *obj, *tmp_obj;
607 klp_for_each_object_safe(patch, obj, tmp_obj) {
608 __klp_free_funcs(obj, nops_only);
610 if (nops_only && !obj->dynamic)
611 continue;
613 list_del(&obj->node);
614 kobject_put(&obj->kobj);
618 static void klp_free_objects(struct klp_patch *patch)
620 __klp_free_objects(patch, false);
623 static void klp_free_objects_dynamic(struct klp_patch *patch)
625 __klp_free_objects(patch, true);
629 * This function implements the free operations that can be called safely
630 * under klp_mutex.
632 * The operation must be completed by calling klp_free_patch_finish()
633 * outside klp_mutex.
635 void klp_free_patch_start(struct klp_patch *patch)
637 if (!list_empty(&patch->list))
638 list_del(&patch->list);
640 klp_free_objects(patch);
644 * This function implements the free part that must be called outside
645 * klp_mutex.
647 * It must be called after klp_free_patch_start(). And it has to be
648 * the last function accessing the livepatch structures when the patch
649 * gets disabled.
651 static void klp_free_patch_finish(struct klp_patch *patch)
654 * Avoid deadlock with enabled_store() sysfs callback by
655 * calling this outside klp_mutex. It is safe because
656 * this is called when the patch gets disabled and it
657 * cannot get enabled again.
659 kobject_put(&patch->kobj);
660 wait_for_completion(&patch->finish);
662 /* Put the module after the last access to struct klp_patch. */
663 if (!patch->forced)
664 module_put(patch->mod);
668 * The livepatch might be freed from sysfs interface created by the patch.
669 * This work allows to wait until the interface is destroyed in a separate
670 * context.
672 static void klp_free_patch_work_fn(struct work_struct *work)
674 struct klp_patch *patch =
675 container_of(work, struct klp_patch, free_work);
677 klp_free_patch_finish(patch);
680 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
682 if (!func->old_name)
683 return -EINVAL;
686 * NOPs get the address later. The patched module must be loaded,
687 * see klp_init_object_loaded().
689 if (!func->new_func && !func->nop)
690 return -EINVAL;
692 if (strlen(func->old_name) >= KSYM_NAME_LEN)
693 return -EINVAL;
695 INIT_LIST_HEAD(&func->stack_node);
696 func->patched = false;
697 func->transition = false;
699 /* The format for the sysfs directory is <function,sympos> where sympos
700 * is the nth occurrence of this symbol in kallsyms for the patched
701 * object. If the user selects 0 for old_sympos, then 1 will be used
702 * since a unique symbol will be the first occurrence.
704 return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
705 func->old_name,
706 func->old_sympos ? func->old_sympos : 1);
709 /* Arches may override this to finish any remaining arch-specific tasks */
710 void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
711 struct klp_object *obj)
715 /* parts of the initialization that is done only when the object is loaded */
716 static int klp_init_object_loaded(struct klp_patch *patch,
717 struct klp_object *obj)
719 struct klp_func *func;
720 int ret;
722 mutex_lock(&text_mutex);
724 module_disable_ro(patch->mod);
725 ret = klp_write_object_relocations(patch->mod, obj);
726 if (ret) {
727 module_enable_ro(patch->mod, true);
728 mutex_unlock(&text_mutex);
729 return ret;
732 arch_klp_init_object_loaded(patch, obj);
733 module_enable_ro(patch->mod, true);
735 mutex_unlock(&text_mutex);
737 klp_for_each_func(obj, func) {
738 ret = klp_find_object_symbol(obj->name, func->old_name,
739 func->old_sympos,
740 (unsigned long *)&func->old_func);
741 if (ret)
742 return ret;
744 ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
745 &func->old_size, NULL);
746 if (!ret) {
747 pr_err("kallsyms size lookup failed for '%s'\n",
748 func->old_name);
749 return -ENOENT;
752 if (func->nop)
753 func->new_func = func->old_func;
755 ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
756 &func->new_size, NULL);
757 if (!ret) {
758 pr_err("kallsyms size lookup failed for '%s' replacement\n",
759 func->old_name);
760 return -ENOENT;
764 return 0;
767 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
769 struct klp_func *func;
770 int ret;
771 const char *name;
773 if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
774 return -EINVAL;
776 obj->patched = false;
777 obj->mod = NULL;
779 klp_find_object_module(obj);
781 name = klp_is_module(obj) ? obj->name : "vmlinux";
782 ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
783 if (ret)
784 return ret;
786 klp_for_each_func(obj, func) {
787 ret = klp_init_func(obj, func);
788 if (ret)
789 return ret;
792 if (klp_is_object_loaded(obj))
793 ret = klp_init_object_loaded(patch, obj);
795 return ret;
798 static void klp_init_func_early(struct klp_object *obj,
799 struct klp_func *func)
801 kobject_init(&func->kobj, &klp_ktype_func);
802 list_add_tail(&func->node, &obj->func_list);
805 static void klp_init_object_early(struct klp_patch *patch,
806 struct klp_object *obj)
808 INIT_LIST_HEAD(&obj->func_list);
809 kobject_init(&obj->kobj, &klp_ktype_object);
810 list_add_tail(&obj->node, &patch->obj_list);
813 static int klp_init_patch_early(struct klp_patch *patch)
815 struct klp_object *obj;
816 struct klp_func *func;
818 if (!patch->objs)
819 return -EINVAL;
821 INIT_LIST_HEAD(&patch->list);
822 INIT_LIST_HEAD(&patch->obj_list);
823 kobject_init(&patch->kobj, &klp_ktype_patch);
824 patch->enabled = false;
825 patch->forced = false;
826 INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
827 init_completion(&patch->finish);
829 klp_for_each_object_static(patch, obj) {
830 if (!obj->funcs)
831 return -EINVAL;
833 klp_init_object_early(patch, obj);
835 klp_for_each_func_static(obj, func) {
836 klp_init_func_early(obj, func);
840 if (!try_module_get(patch->mod))
841 return -ENODEV;
843 return 0;
846 static int klp_init_patch(struct klp_patch *patch)
848 struct klp_object *obj;
849 int ret;
851 ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
852 if (ret)
853 return ret;
855 if (patch->replace) {
856 ret = klp_add_nops(patch);
857 if (ret)
858 return ret;
861 klp_for_each_object(patch, obj) {
862 ret = klp_init_object(patch, obj);
863 if (ret)
864 return ret;
867 list_add_tail(&patch->list, &klp_patches);
869 return 0;
872 static int __klp_disable_patch(struct klp_patch *patch)
874 struct klp_object *obj;
876 if (WARN_ON(!patch->enabled))
877 return -EINVAL;
879 if (klp_transition_patch)
880 return -EBUSY;
882 klp_init_transition(patch, KLP_UNPATCHED);
884 klp_for_each_object(patch, obj)
885 if (obj->patched)
886 klp_pre_unpatch_callback(obj);
889 * Enforce the order of the func->transition writes in
890 * klp_init_transition() and the TIF_PATCH_PENDING writes in
891 * klp_start_transition(). In the rare case where klp_ftrace_handler()
892 * is called shortly after klp_update_patch_state() switches the task,
893 * this ensures the handler sees that func->transition is set.
895 smp_wmb();
897 klp_start_transition();
898 patch->enabled = false;
899 klp_try_complete_transition();
901 return 0;
904 static int __klp_enable_patch(struct klp_patch *patch)
906 struct klp_object *obj;
907 int ret;
909 if (klp_transition_patch)
910 return -EBUSY;
912 if (WARN_ON(patch->enabled))
913 return -EINVAL;
915 pr_notice("enabling patch '%s'\n", patch->mod->name);
917 klp_init_transition(patch, KLP_PATCHED);
920 * Enforce the order of the func->transition writes in
921 * klp_init_transition() and the ops->func_stack writes in
922 * klp_patch_object(), so that klp_ftrace_handler() will see the
923 * func->transition updates before the handler is registered and the
924 * new funcs become visible to the handler.
926 smp_wmb();
928 klp_for_each_object(patch, obj) {
929 if (!klp_is_object_loaded(obj))
930 continue;
932 ret = klp_pre_patch_callback(obj);
933 if (ret) {
934 pr_warn("pre-patch callback failed for object '%s'\n",
935 klp_is_module(obj) ? obj->name : "vmlinux");
936 goto err;
939 ret = klp_patch_object(obj);
940 if (ret) {
941 pr_warn("failed to patch object '%s'\n",
942 klp_is_module(obj) ? obj->name : "vmlinux");
943 goto err;
947 klp_start_transition();
948 patch->enabled = true;
949 klp_try_complete_transition();
951 return 0;
952 err:
953 pr_warn("failed to enable patch '%s'\n", patch->mod->name);
955 klp_cancel_transition();
956 return ret;
960 * klp_enable_patch() - enable the livepatch
961 * @patch: patch to be enabled
963 * Initializes the data structure associated with the patch, creates the sysfs
964 * interface, performs the needed symbol lookups and code relocations,
965 * registers the patched functions with ftrace.
967 * This function is supposed to be called from the livepatch module_init()
968 * callback.
970 * Return: 0 on success, otherwise error
972 int klp_enable_patch(struct klp_patch *patch)
974 int ret;
976 if (!patch || !patch->mod)
977 return -EINVAL;
979 if (!is_livepatch_module(patch->mod)) {
980 pr_err("module %s is not marked as a livepatch module\n",
981 patch->mod->name);
982 return -EINVAL;
985 if (!klp_initialized())
986 return -ENODEV;
988 if (!klp_have_reliable_stack()) {
989 pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
990 pr_warn("The livepatch transition may never complete.\n");
993 mutex_lock(&klp_mutex);
995 ret = klp_init_patch_early(patch);
996 if (ret) {
997 mutex_unlock(&klp_mutex);
998 return ret;
1001 ret = klp_init_patch(patch);
1002 if (ret)
1003 goto err;
1005 ret = __klp_enable_patch(patch);
1006 if (ret)
1007 goto err;
1009 mutex_unlock(&klp_mutex);
1011 return 0;
1013 err:
1014 klp_free_patch_start(patch);
1016 mutex_unlock(&klp_mutex);
1018 klp_free_patch_finish(patch);
1020 return ret;
1022 EXPORT_SYMBOL_GPL(klp_enable_patch);
1025 * This function removes replaced patches.
1027 * We could be pretty aggressive here. It is called in the situation where
1028 * these structures are no longer accessible. All functions are redirected
1029 * by the klp_transition_patch. They use either a new code or they are in
1030 * the original code because of the special nop function patches.
1032 * The only exception is when the transition was forced. In this case,
1033 * klp_ftrace_handler() might still see the replaced patch on the stack.
1034 * Fortunately, it is carefully designed to work with removed functions
1035 * thanks to RCU. We only have to keep the patches on the system. Also
1036 * this is handled transparently by patch->module_put.
1038 void klp_discard_replaced_patches(struct klp_patch *new_patch)
1040 struct klp_patch *old_patch, *tmp_patch;
1042 klp_for_each_patch_safe(old_patch, tmp_patch) {
1043 if (old_patch == new_patch)
1044 return;
1046 old_patch->enabled = false;
1047 klp_unpatch_objects(old_patch);
1048 klp_free_patch_start(old_patch);
1049 schedule_work(&old_patch->free_work);
1054 * This function removes the dynamically allocated 'nop' functions.
1056 * We could be pretty aggressive. NOPs do not change the existing
1057 * behavior except for adding unnecessary delay by the ftrace handler.
1059 * It is safe even when the transition was forced. The ftrace handler
1060 * will see a valid ops->func_stack entry thanks to RCU.
1062 * We could even free the NOPs structures. They must be the last entry
1063 * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1064 * It does the same as klp_synchronize_transition() to make sure that
1065 * nobody is inside the ftrace handler once the operation finishes.
1067 * IMPORTANT: It must be called right after removing the replaced patches!
1069 void klp_discard_nops(struct klp_patch *new_patch)
1071 klp_unpatch_objects_dynamic(klp_transition_patch);
1072 klp_free_objects_dynamic(klp_transition_patch);
1076 * Remove parts of patches that touch a given kernel module. The list of
1077 * patches processed might be limited. When limit is NULL, all patches
1078 * will be handled.
1080 static void klp_cleanup_module_patches_limited(struct module *mod,
1081 struct klp_patch *limit)
1083 struct klp_patch *patch;
1084 struct klp_object *obj;
1086 klp_for_each_patch(patch) {
1087 if (patch == limit)
1088 break;
1090 klp_for_each_object(patch, obj) {
1091 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1092 continue;
1094 if (patch != klp_transition_patch)
1095 klp_pre_unpatch_callback(obj);
1097 pr_notice("reverting patch '%s' on unloading module '%s'\n",
1098 patch->mod->name, obj->mod->name);
1099 klp_unpatch_object(obj);
1101 klp_post_unpatch_callback(obj);
1103 klp_free_object_loaded(obj);
1104 break;
1109 int klp_module_coming(struct module *mod)
1111 int ret;
1112 struct klp_patch *patch;
1113 struct klp_object *obj;
1115 if (WARN_ON(mod->state != MODULE_STATE_COMING))
1116 return -EINVAL;
1118 mutex_lock(&klp_mutex);
1120 * Each module has to know that klp_module_coming()
1121 * has been called. We never know what module will
1122 * get patched by a new patch.
1124 mod->klp_alive = true;
1126 klp_for_each_patch(patch) {
1127 klp_for_each_object(patch, obj) {
1128 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1129 continue;
1131 obj->mod = mod;
1133 ret = klp_init_object_loaded(patch, obj);
1134 if (ret) {
1135 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1136 patch->mod->name, obj->mod->name, ret);
1137 goto err;
1140 pr_notice("applying patch '%s' to loading module '%s'\n",
1141 patch->mod->name, obj->mod->name);
1143 ret = klp_pre_patch_callback(obj);
1144 if (ret) {
1145 pr_warn("pre-patch callback failed for object '%s'\n",
1146 obj->name);
1147 goto err;
1150 ret = klp_patch_object(obj);
1151 if (ret) {
1152 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1153 patch->mod->name, obj->mod->name, ret);
1155 klp_post_unpatch_callback(obj);
1156 goto err;
1159 if (patch != klp_transition_patch)
1160 klp_post_patch_callback(obj);
1162 break;
1166 mutex_unlock(&klp_mutex);
1168 return 0;
1170 err:
1172 * If a patch is unsuccessfully applied, return
1173 * error to the module loader.
1175 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1176 patch->mod->name, obj->mod->name, obj->mod->name);
1177 mod->klp_alive = false;
1178 obj->mod = NULL;
1179 klp_cleanup_module_patches_limited(mod, patch);
1180 mutex_unlock(&klp_mutex);
1182 return ret;
1185 void klp_module_going(struct module *mod)
1187 if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1188 mod->state != MODULE_STATE_COMING))
1189 return;
1191 mutex_lock(&klp_mutex);
1193 * Each module has to know that klp_module_going()
1194 * has been called. We never know what module will
1195 * get patched by a new patch.
1197 mod->klp_alive = false;
1199 klp_cleanup_module_patches_limited(mod, NULL);
1201 mutex_unlock(&klp_mutex);
1204 static int __init klp_init(void)
1206 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1207 if (!klp_root_kobj)
1208 return -ENOMEM;
1210 return 0;
1213 module_init(klp_init);