1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * patch.c - livepatch patching functions
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
7 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/livepatch.h>
13 #include <linux/list.h>
14 #include <linux/ftrace.h>
15 #include <linux/rculist.h>
16 #include <linux/slab.h>
17 #include <linux/bug.h>
18 #include <linux/printk.h>
21 #include "transition.h"
23 static LIST_HEAD(klp_ops
);
25 struct klp_ops
*klp_find_ops(void *old_func
)
28 struct klp_func
*func
;
30 list_for_each_entry(ops
, &klp_ops
, node
) {
31 func
= list_first_entry(&ops
->func_stack
, struct klp_func
,
33 if (func
->old_func
== old_func
)
40 static void notrace
klp_ftrace_handler(unsigned long ip
,
41 unsigned long parent_ip
,
42 struct ftrace_ops
*fops
,
46 struct klp_func
*func
;
49 ops
= container_of(fops
, struct klp_ops
, fops
);
52 * A variant of synchronize_rcu() is used to allow patching functions
53 * where RCU is not watching, see klp_synchronize_transition().
55 preempt_disable_notrace();
57 func
= list_first_or_null_rcu(&ops
->func_stack
, struct klp_func
,
61 * func should never be NULL because preemption should be disabled here
62 * and unregister_ftrace_function() does the equivalent of a
63 * synchronize_rcu() before the func_stack removal.
65 if (WARN_ON_ONCE(!func
))
69 * In the enable path, enforce the order of the ops->func_stack and
70 * func->transition reads. The corresponding write barrier is in
71 * __klp_enable_patch().
73 * (Note that this barrier technically isn't needed in the disable
74 * path. In the rare case where klp_update_patch_state() runs before
75 * this handler, its TIF_PATCH_PENDING read and this func->transition
76 * read need to be ordered. But klp_update_patch_state() already
81 if (unlikely(func
->transition
)) {
84 * Enforce the order of the func->transition and
85 * current->patch_state reads. Otherwise we could read an
86 * out-of-date task state and pick the wrong function. The
87 * corresponding write barrier is in klp_init_transition().
91 patch_state
= current
->patch_state
;
93 WARN_ON_ONCE(patch_state
== KLP_UNDEFINED
);
95 if (patch_state
== KLP_UNPATCHED
) {
97 * Use the previously patched version of the function.
98 * If no previous patches exist, continue with the
101 func
= list_entry_rcu(func
->stack_node
.next
,
102 struct klp_func
, stack_node
);
104 if (&func
->stack_node
== &ops
->func_stack
)
110 * NOPs are used to replace existing patches with original code.
111 * Do nothing! Setting pc would cause an infinite loop.
116 klp_arch_set_pc(regs
, (unsigned long)func
->new_func
);
119 preempt_enable_notrace();
123 * Convert a function address into the appropriate ftrace location.
125 * Usually this is just the address of the function, but on some architectures
126 * it's more complicated so allow them to provide a custom behaviour.
128 #ifndef klp_get_ftrace_location
129 static unsigned long klp_get_ftrace_location(unsigned long faddr
)
135 static void klp_unpatch_func(struct klp_func
*func
)
139 if (WARN_ON(!func
->patched
))
141 if (WARN_ON(!func
->old_func
))
144 ops
= klp_find_ops(func
->old_func
);
148 if (list_is_singular(&ops
->func_stack
)) {
149 unsigned long ftrace_loc
;
152 klp_get_ftrace_location((unsigned long)func
->old_func
);
153 if (WARN_ON(!ftrace_loc
))
156 WARN_ON(unregister_ftrace_function(&ops
->fops
));
157 WARN_ON(ftrace_set_filter_ip(&ops
->fops
, ftrace_loc
, 1, 0));
159 list_del_rcu(&func
->stack_node
);
160 list_del(&ops
->node
);
163 list_del_rcu(&func
->stack_node
);
166 func
->patched
= false;
169 static int klp_patch_func(struct klp_func
*func
)
174 if (WARN_ON(!func
->old_func
))
177 if (WARN_ON(func
->patched
))
180 ops
= klp_find_ops(func
->old_func
);
182 unsigned long ftrace_loc
;
185 klp_get_ftrace_location((unsigned long)func
->old_func
);
187 pr_err("failed to find location for function '%s'\n",
192 ops
= kzalloc(sizeof(*ops
), GFP_KERNEL
);
196 ops
->fops
.func
= klp_ftrace_handler
;
197 ops
->fops
.flags
= FTRACE_OPS_FL_SAVE_REGS
|
198 FTRACE_OPS_FL_DYNAMIC
|
199 FTRACE_OPS_FL_IPMODIFY
;
201 list_add(&ops
->node
, &klp_ops
);
203 INIT_LIST_HEAD(&ops
->func_stack
);
204 list_add_rcu(&func
->stack_node
, &ops
->func_stack
);
206 ret
= ftrace_set_filter_ip(&ops
->fops
, ftrace_loc
, 0, 0);
208 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
209 func
->old_name
, ret
);
213 ret
= register_ftrace_function(&ops
->fops
);
215 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
216 func
->old_name
, ret
);
217 ftrace_set_filter_ip(&ops
->fops
, ftrace_loc
, 1, 0);
223 list_add_rcu(&func
->stack_node
, &ops
->func_stack
);
226 func
->patched
= true;
231 list_del_rcu(&func
->stack_node
);
232 list_del(&ops
->node
);
237 static void __klp_unpatch_object(struct klp_object
*obj
, bool nops_only
)
239 struct klp_func
*func
;
241 klp_for_each_func(obj
, func
) {
242 if (nops_only
&& !func
->nop
)
246 klp_unpatch_func(func
);
249 if (obj
->dynamic
|| !nops_only
)
250 obj
->patched
= false;
254 void klp_unpatch_object(struct klp_object
*obj
)
256 __klp_unpatch_object(obj
, false);
259 int klp_patch_object(struct klp_object
*obj
)
261 struct klp_func
*func
;
264 if (WARN_ON(obj
->patched
))
267 klp_for_each_func(obj
, func
) {
268 ret
= klp_patch_func(func
);
270 klp_unpatch_object(obj
);
279 static void __klp_unpatch_objects(struct klp_patch
*patch
, bool nops_only
)
281 struct klp_object
*obj
;
283 klp_for_each_object(patch
, obj
)
285 __klp_unpatch_object(obj
, nops_only
);
288 void klp_unpatch_objects(struct klp_patch
*patch
)
290 __klp_unpatch_objects(patch
, false);
293 void klp_unpatch_objects_dynamic(struct klp_patch
*patch
)
295 __klp_unpatch_objects(patch
, true);