2 * patch.c - livepatch patching functions
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/livepatch.h>
25 #include <linux/list.h>
26 #include <linux/ftrace.h>
27 #include <linux/rculist.h>
28 #include <linux/slab.h>
29 #include <linux/bug.h>
30 #include <linux/printk.h>
33 #include "transition.h"
35 static LIST_HEAD(klp_ops
);
37 struct klp_ops
*klp_find_ops(void *old_func
)
40 struct klp_func
*func
;
42 list_for_each_entry(ops
, &klp_ops
, node
) {
43 func
= list_first_entry(&ops
->func_stack
, struct klp_func
,
45 if (func
->old_func
== old_func
)
52 static void notrace
klp_ftrace_handler(unsigned long ip
,
53 unsigned long parent_ip
,
54 struct ftrace_ops
*fops
,
58 struct klp_func
*func
;
61 ops
= container_of(fops
, struct klp_ops
, fops
);
64 * A variant of synchronize_rcu() is used to allow patching functions
65 * where RCU is not watching, see klp_synchronize_transition().
67 preempt_disable_notrace();
69 func
= list_first_or_null_rcu(&ops
->func_stack
, struct klp_func
,
73 * func should never be NULL because preemption should be disabled here
74 * and unregister_ftrace_function() does the equivalent of a
75 * synchronize_rcu() before the func_stack removal.
77 if (WARN_ON_ONCE(!func
))
81 * In the enable path, enforce the order of the ops->func_stack and
82 * func->transition reads. The corresponding write barrier is in
83 * __klp_enable_patch().
85 * (Note that this barrier technically isn't needed in the disable
86 * path. In the rare case where klp_update_patch_state() runs before
87 * this handler, its TIF_PATCH_PENDING read and this func->transition
88 * read need to be ordered. But klp_update_patch_state() already
93 if (unlikely(func
->transition
)) {
96 * Enforce the order of the func->transition and
97 * current->patch_state reads. Otherwise we could read an
98 * out-of-date task state and pick the wrong function. The
99 * corresponding write barrier is in klp_init_transition().
103 patch_state
= current
->patch_state
;
105 WARN_ON_ONCE(patch_state
== KLP_UNDEFINED
);
107 if (patch_state
== KLP_UNPATCHED
) {
109 * Use the previously patched version of the function.
110 * If no previous patches exist, continue with the
113 func
= list_entry_rcu(func
->stack_node
.next
,
114 struct klp_func
, stack_node
);
116 if (&func
->stack_node
== &ops
->func_stack
)
122 * NOPs are used to replace existing patches with original code.
123 * Do nothing! Setting pc would cause an infinite loop.
128 klp_arch_set_pc(regs
, (unsigned long)func
->new_func
);
131 preempt_enable_notrace();
135 * Convert a function address into the appropriate ftrace location.
137 * Usually this is just the address of the function, but on some architectures
138 * it's more complicated so allow them to provide a custom behaviour.
140 #ifndef klp_get_ftrace_location
141 static unsigned long klp_get_ftrace_location(unsigned long faddr
)
147 static void klp_unpatch_func(struct klp_func
*func
)
151 if (WARN_ON(!func
->patched
))
153 if (WARN_ON(!func
->old_func
))
156 ops
= klp_find_ops(func
->old_func
);
160 if (list_is_singular(&ops
->func_stack
)) {
161 unsigned long ftrace_loc
;
164 klp_get_ftrace_location((unsigned long)func
->old_func
);
165 if (WARN_ON(!ftrace_loc
))
168 WARN_ON(unregister_ftrace_function(&ops
->fops
));
169 WARN_ON(ftrace_set_filter_ip(&ops
->fops
, ftrace_loc
, 1, 0));
171 list_del_rcu(&func
->stack_node
);
172 list_del(&ops
->node
);
175 list_del_rcu(&func
->stack_node
);
178 func
->patched
= false;
181 static int klp_patch_func(struct klp_func
*func
)
186 if (WARN_ON(!func
->old_func
))
189 if (WARN_ON(func
->patched
))
192 ops
= klp_find_ops(func
->old_func
);
194 unsigned long ftrace_loc
;
197 klp_get_ftrace_location((unsigned long)func
->old_func
);
199 pr_err("failed to find location for function '%s'\n",
204 ops
= kzalloc(sizeof(*ops
), GFP_KERNEL
);
208 ops
->fops
.func
= klp_ftrace_handler
;
209 ops
->fops
.flags
= FTRACE_OPS_FL_SAVE_REGS
|
210 FTRACE_OPS_FL_DYNAMIC
|
211 FTRACE_OPS_FL_IPMODIFY
;
213 list_add(&ops
->node
, &klp_ops
);
215 INIT_LIST_HEAD(&ops
->func_stack
);
216 list_add_rcu(&func
->stack_node
, &ops
->func_stack
);
218 ret
= ftrace_set_filter_ip(&ops
->fops
, ftrace_loc
, 0, 0);
220 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
221 func
->old_name
, ret
);
225 ret
= register_ftrace_function(&ops
->fops
);
227 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
228 func
->old_name
, ret
);
229 ftrace_set_filter_ip(&ops
->fops
, ftrace_loc
, 1, 0);
235 list_add_rcu(&func
->stack_node
, &ops
->func_stack
);
238 func
->patched
= true;
243 list_del_rcu(&func
->stack_node
);
244 list_del(&ops
->node
);
249 static void __klp_unpatch_object(struct klp_object
*obj
, bool nops_only
)
251 struct klp_func
*func
;
253 klp_for_each_func(obj
, func
) {
254 if (nops_only
&& !func
->nop
)
258 klp_unpatch_func(func
);
261 if (obj
->dynamic
|| !nops_only
)
262 obj
->patched
= false;
266 void klp_unpatch_object(struct klp_object
*obj
)
268 __klp_unpatch_object(obj
, false);
271 int klp_patch_object(struct klp_object
*obj
)
273 struct klp_func
*func
;
276 if (WARN_ON(obj
->patched
))
279 klp_for_each_func(obj
, func
) {
280 ret
= klp_patch_func(func
);
282 klp_unpatch_object(obj
);
291 static void __klp_unpatch_objects(struct klp_patch
*patch
, bool nops_only
)
293 struct klp_object
*obj
;
295 klp_for_each_object(patch
, obj
)
297 __klp_unpatch_object(obj
, nops_only
);
300 void klp_unpatch_objects(struct klp_patch
*patch
)
302 __klp_unpatch_objects(patch
, false);
305 void klp_unpatch_objects_dynamic(struct klp_patch
*patch
)
307 __klp_unpatch_objects(patch
, true);