1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * patch.c - livepatch patching functions
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
7 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/livepatch.h>
13 #include <linux/list.h>
14 #include <linux/ftrace.h>
15 #include <linux/rculist.h>
16 #include <linux/slab.h>
17 #include <linux/bug.h>
18 #include <linux/printk.h>
21 #include "transition.h"
23 static LIST_HEAD(klp_ops
);
25 struct klp_ops
*klp_find_ops(void *old_func
)
28 struct klp_func
*func
;
30 list_for_each_entry(ops
, &klp_ops
, node
) {
31 func
= list_first_entry(&ops
->func_stack
, struct klp_func
,
33 if (func
->old_func
== old_func
)
40 static void notrace
klp_ftrace_handler(unsigned long ip
,
41 unsigned long parent_ip
,
42 struct ftrace_ops
*fops
,
43 struct ftrace_regs
*fregs
)
46 struct klp_func
*func
;
50 ops
= container_of(fops
, struct klp_ops
, fops
);
52 bit
= ftrace_test_recursion_trylock(ip
, parent_ip
);
53 if (WARN_ON_ONCE(bit
< 0))
56 * A variant of synchronize_rcu() is used to allow patching functions
57 * where RCU is not watching, see klp_synchronize_transition().
59 preempt_disable_notrace();
61 func
= list_first_or_null_rcu(&ops
->func_stack
, struct klp_func
,
65 * func should never be NULL because preemption should be disabled here
66 * and unregister_ftrace_function() does the equivalent of a
67 * synchronize_rcu() before the func_stack removal.
69 if (WARN_ON_ONCE(!func
))
73 * In the enable path, enforce the order of the ops->func_stack and
74 * func->transition reads. The corresponding write barrier is in
75 * __klp_enable_patch().
77 * (Note that this barrier technically isn't needed in the disable
78 * path. In the rare case where klp_update_patch_state() runs before
79 * this handler, its TIF_PATCH_PENDING read and this func->transition
80 * read need to be ordered. But klp_update_patch_state() already
85 if (unlikely(func
->transition
)) {
88 * Enforce the order of the func->transition and
89 * current->patch_state reads. Otherwise we could read an
90 * out-of-date task state and pick the wrong function. The
91 * corresponding write barrier is in klp_init_transition().
95 patch_state
= current
->patch_state
;
97 WARN_ON_ONCE(patch_state
== KLP_UNDEFINED
);
99 if (patch_state
== KLP_UNPATCHED
) {
101 * Use the previously patched version of the function.
102 * If no previous patches exist, continue with the
105 func
= list_entry_rcu(func
->stack_node
.next
,
106 struct klp_func
, stack_node
);
108 if (&func
->stack_node
== &ops
->func_stack
)
114 * NOPs are used to replace existing patches with original code.
115 * Do nothing! Setting pc would cause an infinite loop.
120 klp_arch_set_pc(fregs
, (unsigned long)func
->new_func
);
123 preempt_enable_notrace();
124 ftrace_test_recursion_unlock(bit
);
128 * Convert a function address into the appropriate ftrace location.
130 * Usually this is just the address of the function, but on some architectures
131 * it's more complicated so allow them to provide a custom behaviour.
133 #ifndef klp_get_ftrace_location
134 static unsigned long klp_get_ftrace_location(unsigned long faddr
)
140 static void klp_unpatch_func(struct klp_func
*func
)
144 if (WARN_ON(!func
->patched
))
146 if (WARN_ON(!func
->old_func
))
149 ops
= klp_find_ops(func
->old_func
);
153 if (list_is_singular(&ops
->func_stack
)) {
154 unsigned long ftrace_loc
;
157 klp_get_ftrace_location((unsigned long)func
->old_func
);
158 if (WARN_ON(!ftrace_loc
))
161 WARN_ON(unregister_ftrace_function(&ops
->fops
));
162 WARN_ON(ftrace_set_filter_ip(&ops
->fops
, ftrace_loc
, 1, 0));
164 list_del_rcu(&func
->stack_node
);
165 list_del(&ops
->node
);
168 list_del_rcu(&func
->stack_node
);
171 func
->patched
= false;
174 static int klp_patch_func(struct klp_func
*func
)
179 if (WARN_ON(!func
->old_func
))
182 if (WARN_ON(func
->patched
))
185 ops
= klp_find_ops(func
->old_func
);
187 unsigned long ftrace_loc
;
190 klp_get_ftrace_location((unsigned long)func
->old_func
);
192 pr_err("failed to find location for function '%s'\n",
197 ops
= kzalloc(sizeof(*ops
), GFP_KERNEL
);
201 ops
->fops
.func
= klp_ftrace_handler
;
202 ops
->fops
.flags
= FTRACE_OPS_FL_DYNAMIC
|
203 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
204 FTRACE_OPS_FL_SAVE_REGS
|
206 FTRACE_OPS_FL_IPMODIFY
|
207 FTRACE_OPS_FL_PERMANENT
;
209 list_add(&ops
->node
, &klp_ops
);
211 INIT_LIST_HEAD(&ops
->func_stack
);
212 list_add_rcu(&func
->stack_node
, &ops
->func_stack
);
214 ret
= ftrace_set_filter_ip(&ops
->fops
, ftrace_loc
, 0, 0);
216 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
217 func
->old_name
, ret
);
221 ret
= register_ftrace_function(&ops
->fops
);
223 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
224 func
->old_name
, ret
);
225 ftrace_set_filter_ip(&ops
->fops
, ftrace_loc
, 1, 0);
231 list_add_rcu(&func
->stack_node
, &ops
->func_stack
);
234 func
->patched
= true;
239 list_del_rcu(&func
->stack_node
);
240 list_del(&ops
->node
);
245 static void __klp_unpatch_object(struct klp_object
*obj
, bool nops_only
)
247 struct klp_func
*func
;
249 klp_for_each_func(obj
, func
) {
250 if (nops_only
&& !func
->nop
)
254 klp_unpatch_func(func
);
257 if (obj
->dynamic
|| !nops_only
)
258 obj
->patched
= false;
262 void klp_unpatch_object(struct klp_object
*obj
)
264 __klp_unpatch_object(obj
, false);
267 int klp_patch_object(struct klp_object
*obj
)
269 struct klp_func
*func
;
272 if (WARN_ON(obj
->patched
))
275 klp_for_each_func(obj
, func
) {
276 ret
= klp_patch_func(func
);
278 klp_unpatch_object(obj
);
287 static void __klp_unpatch_objects(struct klp_patch
*patch
, bool nops_only
)
289 struct klp_object
*obj
;
291 klp_for_each_object(patch
, obj
)
293 __klp_unpatch_object(obj
, nops_only
);
296 void klp_unpatch_objects(struct klp_patch
*patch
)
298 __klp_unpatch_objects(patch
, false);
301 void klp_unpatch_objects_dynamic(struct klp_patch
*patch
)
303 __klp_unpatch_objects(patch
, true);