ALSA: rme9652: Hardening for potential Spectre v1
[linux/fpc-iii.git] / kernel / livepatch / patch.c
blob82d584225dc68e2b240a96ef5e1dd19917974fb0
1 /*
2 * patch.c - livepatch patching functions
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/livepatch.h>
25 #include <linux/list.h>
26 #include <linux/ftrace.h>
27 #include <linux/rculist.h>
28 #include <linux/slab.h>
29 #include <linux/bug.h>
30 #include <linux/printk.h>
31 #include "core.h"
32 #include "patch.h"
33 #include "transition.h"
35 static LIST_HEAD(klp_ops);
37 struct klp_ops *klp_find_ops(unsigned long old_addr)
39 struct klp_ops *ops;
40 struct klp_func *func;
42 list_for_each_entry(ops, &klp_ops, node) {
43 func = list_first_entry(&ops->func_stack, struct klp_func,
44 stack_node);
45 if (func->old_addr == old_addr)
46 return ops;
49 return NULL;
52 static void notrace klp_ftrace_handler(unsigned long ip,
53 unsigned long parent_ip,
54 struct ftrace_ops *fops,
55 struct pt_regs *regs)
57 struct klp_ops *ops;
58 struct klp_func *func;
59 int patch_state;
61 ops = container_of(fops, struct klp_ops, fops);
64 * A variant of synchronize_sched() is used to allow patching functions
65 * where RCU is not watching, see klp_synchronize_transition().
67 preempt_disable_notrace();
69 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
70 stack_node);
73 * func should never be NULL because preemption should be disabled here
74 * and unregister_ftrace_function() does the equivalent of a
75 * synchronize_sched() before the func_stack removal.
77 if (WARN_ON_ONCE(!func))
78 goto unlock;
81 * In the enable path, enforce the order of the ops->func_stack and
82 * func->transition reads. The corresponding write barrier is in
83 * __klp_enable_patch().
85 * (Note that this barrier technically isn't needed in the disable
86 * path. In the rare case where klp_update_patch_state() runs before
87 * this handler, its TIF_PATCH_PENDING read and this func->transition
88 * read need to be ordered. But klp_update_patch_state() already
89 * enforces that.)
91 smp_rmb();
93 if (unlikely(func->transition)) {
96 * Enforce the order of the func->transition and
97 * current->patch_state reads. Otherwise we could read an
98 * out-of-date task state and pick the wrong function. The
99 * corresponding write barrier is in klp_init_transition().
101 smp_rmb();
103 patch_state = current->patch_state;
105 WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
107 if (patch_state == KLP_UNPATCHED) {
109 * Use the previously patched version of the function.
110 * If no previous patches exist, continue with the
111 * original function.
113 func = list_entry_rcu(func->stack_node.next,
114 struct klp_func, stack_node);
116 if (&func->stack_node == &ops->func_stack)
117 goto unlock;
121 klp_arch_set_pc(regs, (unsigned long)func->new_func);
122 unlock:
123 preempt_enable_notrace();
127 * Convert a function address into the appropriate ftrace location.
129 * Usually this is just the address of the function, but on some architectures
130 * it's more complicated so allow them to provide a custom behaviour.
132 #ifndef klp_get_ftrace_location
133 static unsigned long klp_get_ftrace_location(unsigned long faddr)
135 return faddr;
137 #endif
139 static void klp_unpatch_func(struct klp_func *func)
141 struct klp_ops *ops;
143 if (WARN_ON(!func->patched))
144 return;
145 if (WARN_ON(!func->old_addr))
146 return;
148 ops = klp_find_ops(func->old_addr);
149 if (WARN_ON(!ops))
150 return;
152 if (list_is_singular(&ops->func_stack)) {
153 unsigned long ftrace_loc;
155 ftrace_loc = klp_get_ftrace_location(func->old_addr);
156 if (WARN_ON(!ftrace_loc))
157 return;
159 WARN_ON(unregister_ftrace_function(&ops->fops));
160 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
162 list_del_rcu(&func->stack_node);
163 list_del(&ops->node);
164 kfree(ops);
165 } else {
166 list_del_rcu(&func->stack_node);
169 func->patched = false;
172 static int klp_patch_func(struct klp_func *func)
174 struct klp_ops *ops;
175 int ret;
177 if (WARN_ON(!func->old_addr))
178 return -EINVAL;
180 if (WARN_ON(func->patched))
181 return -EINVAL;
183 ops = klp_find_ops(func->old_addr);
184 if (!ops) {
185 unsigned long ftrace_loc;
187 ftrace_loc = klp_get_ftrace_location(func->old_addr);
188 if (!ftrace_loc) {
189 pr_err("failed to find location for function '%s'\n",
190 func->old_name);
191 return -EINVAL;
194 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
195 if (!ops)
196 return -ENOMEM;
198 ops->fops.func = klp_ftrace_handler;
199 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
200 FTRACE_OPS_FL_DYNAMIC |
201 FTRACE_OPS_FL_IPMODIFY;
203 list_add(&ops->node, &klp_ops);
205 INIT_LIST_HEAD(&ops->func_stack);
206 list_add_rcu(&func->stack_node, &ops->func_stack);
208 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
209 if (ret) {
210 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
211 func->old_name, ret);
212 goto err;
215 ret = register_ftrace_function(&ops->fops);
216 if (ret) {
217 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
218 func->old_name, ret);
219 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
220 goto err;
224 } else {
225 list_add_rcu(&func->stack_node, &ops->func_stack);
228 func->patched = true;
230 return 0;
232 err:
233 list_del_rcu(&func->stack_node);
234 list_del(&ops->node);
235 kfree(ops);
236 return ret;
239 void klp_unpatch_object(struct klp_object *obj)
241 struct klp_func *func;
243 klp_for_each_func(obj, func)
244 if (func->patched)
245 klp_unpatch_func(func);
247 obj->patched = false;
250 int klp_patch_object(struct klp_object *obj)
252 struct klp_func *func;
253 int ret;
255 if (WARN_ON(obj->patched))
256 return -EINVAL;
258 klp_for_each_func(obj, func) {
259 ret = klp_patch_func(func);
260 if (ret) {
261 klp_unpatch_object(obj);
262 return ret;
265 obj->patched = true;
267 return 0;
270 void klp_unpatch_objects(struct klp_patch *patch)
272 struct klp_object *obj;
274 klp_for_each_object(patch, obj)
275 if (obj->patched)
276 klp_unpatch_object(obj);