Merge tag 'for-linus-20190706' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / kernel / livepatch / transition.c
blobabb2a4a2cbb2c54b547133a001275406c6744c30
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * transition.c - Kernel Live Patching transition functions
5 * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
6 */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/cpu.h>
11 #include <linux/stacktrace.h>
12 #include "core.h"
13 #include "patch.h"
14 #include "transition.h"
15 #include "../sched/sched.h"
17 #define MAX_STACK_ENTRIES 100
18 #define STACK_ERR_BUF_SIZE 128
20 #define SIGNALS_TIMEOUT 15
22 struct klp_patch *klp_transition_patch;
24 static int klp_target_state = KLP_UNDEFINED;
26 static unsigned int klp_signals_cnt;
29 * This work can be performed periodically to finish patching or unpatching any
30 * "straggler" tasks which failed to transition in the first attempt.
32 static void klp_transition_work_fn(struct work_struct *work)
34 mutex_lock(&klp_mutex);
36 if (klp_transition_patch)
37 klp_try_complete_transition();
39 mutex_unlock(&klp_mutex);
41 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
44 * This function is just a stub to implement a hard force
45 * of synchronize_rcu(). This requires synchronizing
46 * tasks even in userspace and idle.
48 static void klp_sync(struct work_struct *work)
53 * We allow to patch also functions where RCU is not watching,
54 * e.g. before user_exit(). We can not rely on the RCU infrastructure
55 * to do the synchronization. Instead hard force the sched synchronization.
57 * This approach allows to use RCU functions for manipulating func_stack
58 * safely.
60 static void klp_synchronize_transition(void)
62 schedule_on_each_cpu(klp_sync);
66 * The transition to the target patch state is complete. Clean up the data
67 * structures.
69 static void klp_complete_transition(void)
71 struct klp_object *obj;
72 struct klp_func *func;
73 struct task_struct *g, *task;
74 unsigned int cpu;
76 pr_debug("'%s': completing %s transition\n",
77 klp_transition_patch->mod->name,
78 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
80 if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
81 klp_discard_replaced_patches(klp_transition_patch);
82 klp_discard_nops(klp_transition_patch);
85 if (klp_target_state == KLP_UNPATCHED) {
87 * All tasks have transitioned to KLP_UNPATCHED so we can now
88 * remove the new functions from the func_stack.
90 klp_unpatch_objects(klp_transition_patch);
93 * Make sure klp_ftrace_handler() can no longer see functions
94 * from this patch on the ops->func_stack. Otherwise, after
95 * func->transition gets cleared, the handler may choose a
96 * removed function.
98 klp_synchronize_transition();
101 klp_for_each_object(klp_transition_patch, obj)
102 klp_for_each_func(obj, func)
103 func->transition = false;
105 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
106 if (klp_target_state == KLP_PATCHED)
107 klp_synchronize_transition();
109 read_lock(&tasklist_lock);
110 for_each_process_thread(g, task) {
111 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
112 task->patch_state = KLP_UNDEFINED;
114 read_unlock(&tasklist_lock);
116 for_each_possible_cpu(cpu) {
117 task = idle_task(cpu);
118 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
119 task->patch_state = KLP_UNDEFINED;
122 klp_for_each_object(klp_transition_patch, obj) {
123 if (!klp_is_object_loaded(obj))
124 continue;
125 if (klp_target_state == KLP_PATCHED)
126 klp_post_patch_callback(obj);
127 else if (klp_target_state == KLP_UNPATCHED)
128 klp_post_unpatch_callback(obj);
131 pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
132 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
134 klp_target_state = KLP_UNDEFINED;
135 klp_transition_patch = NULL;
139 * This is called in the error path, to cancel a transition before it has
140 * started, i.e. klp_init_transition() has been called but
141 * klp_start_transition() hasn't. If the transition *has* been started,
142 * klp_reverse_transition() should be used instead.
144 void klp_cancel_transition(void)
146 if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
147 return;
149 pr_debug("'%s': canceling patching transition, going to unpatch\n",
150 klp_transition_patch->mod->name);
152 klp_target_state = KLP_UNPATCHED;
153 klp_complete_transition();
157 * Switch the patched state of the task to the set of functions in the target
158 * patch state.
160 * NOTE: If task is not 'current', the caller must ensure the task is inactive.
161 * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
163 void klp_update_patch_state(struct task_struct *task)
166 * A variant of synchronize_rcu() is used to allow patching functions
167 * where RCU is not watching, see klp_synchronize_transition().
169 preempt_disable_notrace();
172 * This test_and_clear_tsk_thread_flag() call also serves as a read
173 * barrier (smp_rmb) for two cases:
175 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
176 * klp_target_state read. The corresponding write barrier is in
177 * klp_init_transition().
179 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
180 * of func->transition, if klp_ftrace_handler() is called later on
181 * the same CPU. See __klp_disable_patch().
183 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
184 task->patch_state = READ_ONCE(klp_target_state);
186 preempt_enable_notrace();
190 * Determine whether the given stack trace includes any references to a
191 * to-be-patched or to-be-unpatched function.
193 static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
194 unsigned int nr_entries)
196 unsigned long func_addr, func_size, address;
197 struct klp_ops *ops;
198 int i;
200 for (i = 0; i < nr_entries; i++) {
201 address = entries[i];
203 if (klp_target_state == KLP_UNPATCHED) {
205 * Check for the to-be-unpatched function
206 * (the func itself).
208 func_addr = (unsigned long)func->new_func;
209 func_size = func->new_size;
210 } else {
212 * Check for the to-be-patched function
213 * (the previous func).
215 ops = klp_find_ops(func->old_func);
217 if (list_is_singular(&ops->func_stack)) {
218 /* original function */
219 func_addr = (unsigned long)func->old_func;
220 func_size = func->old_size;
221 } else {
222 /* previously patched function */
223 struct klp_func *prev;
225 prev = list_next_entry(func, stack_node);
226 func_addr = (unsigned long)prev->new_func;
227 func_size = prev->new_size;
231 if (address >= func_addr && address < func_addr + func_size)
232 return -EAGAIN;
235 return 0;
239 * Determine whether it's safe to transition the task to the target patch state
240 * by looking for any to-be-patched or to-be-unpatched functions on its stack.
242 static int klp_check_stack(struct task_struct *task, char *err_buf)
244 static unsigned long entries[MAX_STACK_ENTRIES];
245 struct klp_object *obj;
246 struct klp_func *func;
247 int ret, nr_entries;
249 ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
250 WARN_ON_ONCE(ret == -ENOSYS);
251 if (ret < 0) {
252 snprintf(err_buf, STACK_ERR_BUF_SIZE,
253 "%s: %s:%d has an unreliable stack\n",
254 __func__, task->comm, task->pid);
255 return ret;
257 nr_entries = ret;
259 klp_for_each_object(klp_transition_patch, obj) {
260 if (!obj->patched)
261 continue;
262 klp_for_each_func(obj, func) {
263 ret = klp_check_stack_func(func, entries, nr_entries);
264 if (ret) {
265 snprintf(err_buf, STACK_ERR_BUF_SIZE,
266 "%s: %s:%d is sleeping on function %s\n",
267 __func__, task->comm, task->pid,
268 func->old_name);
269 return ret;
274 return 0;
278 * Try to safely switch a task to the target patch state. If it's currently
279 * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
280 * if the stack is unreliable, return false.
282 static bool klp_try_switch_task(struct task_struct *task)
284 struct rq *rq;
285 struct rq_flags flags;
286 int ret;
287 bool success = false;
288 char err_buf[STACK_ERR_BUF_SIZE];
290 err_buf[0] = '\0';
292 /* check if this task has already switched over */
293 if (task->patch_state == klp_target_state)
294 return true;
297 * Now try to check the stack for any to-be-patched or to-be-unpatched
298 * functions. If all goes well, switch the task to the target patch
299 * state.
301 rq = task_rq_lock(task, &flags);
303 if (task_running(rq, task) && task != current) {
304 snprintf(err_buf, STACK_ERR_BUF_SIZE,
305 "%s: %s:%d is running\n", __func__, task->comm,
306 task->pid);
307 goto done;
310 ret = klp_check_stack(task, err_buf);
311 if (ret)
312 goto done;
314 success = true;
316 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
317 task->patch_state = klp_target_state;
319 done:
320 task_rq_unlock(rq, task, &flags);
323 * Due to console deadlock issues, pr_debug() can't be used while
324 * holding the task rq lock. Instead we have to use a temporary buffer
325 * and print the debug message after releasing the lock.
327 if (err_buf[0] != '\0')
328 pr_debug("%s", err_buf);
330 return success;
335 * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
336 * Kthreads with TIF_PATCH_PENDING set are woken up.
338 static void klp_send_signals(void)
340 struct task_struct *g, *task;
342 if (klp_signals_cnt == SIGNALS_TIMEOUT)
343 pr_notice("signaling remaining tasks\n");
345 read_lock(&tasklist_lock);
346 for_each_process_thread(g, task) {
347 if (!klp_patch_pending(task))
348 continue;
351 * There is a small race here. We could see TIF_PATCH_PENDING
352 * set and decide to wake up a kthread or send a fake signal.
353 * Meanwhile the task could migrate itself and the action
354 * would be meaningless. It is not serious though.
356 if (task->flags & PF_KTHREAD) {
358 * Wake up a kthread which sleeps interruptedly and
359 * still has not been migrated.
361 wake_up_state(task, TASK_INTERRUPTIBLE);
362 } else {
364 * Send fake signal to all non-kthread tasks which are
365 * still not migrated.
367 spin_lock_irq(&task->sighand->siglock);
368 signal_wake_up(task, 0);
369 spin_unlock_irq(&task->sighand->siglock);
372 read_unlock(&tasklist_lock);
376 * Try to switch all remaining tasks to the target patch state by walking the
377 * stacks of sleeping tasks and looking for any to-be-patched or
378 * to-be-unpatched functions. If such functions are found, the task can't be
379 * switched yet.
381 * If any tasks are still stuck in the initial patch state, schedule a retry.
383 void klp_try_complete_transition(void)
385 unsigned int cpu;
386 struct task_struct *g, *task;
387 struct klp_patch *patch;
388 bool complete = true;
390 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
393 * Try to switch the tasks to the target patch state by walking their
394 * stacks and looking for any to-be-patched or to-be-unpatched
395 * functions. If such functions are found on a stack, or if the stack
396 * is deemed unreliable, the task can't be switched yet.
398 * Usually this will transition most (or all) of the tasks on a system
399 * unless the patch includes changes to a very common function.
401 read_lock(&tasklist_lock);
402 for_each_process_thread(g, task)
403 if (!klp_try_switch_task(task))
404 complete = false;
405 read_unlock(&tasklist_lock);
408 * Ditto for the idle "swapper" tasks.
410 get_online_cpus();
411 for_each_possible_cpu(cpu) {
412 task = idle_task(cpu);
413 if (cpu_online(cpu)) {
414 if (!klp_try_switch_task(task))
415 complete = false;
416 } else if (task->patch_state != klp_target_state) {
417 /* offline idle tasks can be switched immediately */
418 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
419 task->patch_state = klp_target_state;
422 put_online_cpus();
424 if (!complete) {
425 if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
426 klp_send_signals();
427 klp_signals_cnt++;
430 * Some tasks weren't able to be switched over. Try again
431 * later and/or wait for other methods like kernel exit
432 * switching.
434 schedule_delayed_work(&klp_transition_work,
435 round_jiffies_relative(HZ));
436 return;
439 /* we're done, now cleanup the data structures */
440 patch = klp_transition_patch;
441 klp_complete_transition();
444 * It would make more sense to free the patch in
445 * klp_complete_transition() but it is called also
446 * from klp_cancel_transition().
448 if (!patch->enabled) {
449 klp_free_patch_start(patch);
450 schedule_work(&patch->free_work);
455 * Start the transition to the specified target patch state so tasks can begin
456 * switching to it.
458 void klp_start_transition(void)
460 struct task_struct *g, *task;
461 unsigned int cpu;
463 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
465 pr_notice("'%s': starting %s transition\n",
466 klp_transition_patch->mod->name,
467 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
470 * Mark all normal tasks as needing a patch state update. They'll
471 * switch either in klp_try_complete_transition() or as they exit the
472 * kernel.
474 read_lock(&tasklist_lock);
475 for_each_process_thread(g, task)
476 if (task->patch_state != klp_target_state)
477 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
478 read_unlock(&tasklist_lock);
481 * Mark all idle tasks as needing a patch state update. They'll switch
482 * either in klp_try_complete_transition() or at the idle loop switch
483 * point.
485 for_each_possible_cpu(cpu) {
486 task = idle_task(cpu);
487 if (task->patch_state != klp_target_state)
488 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
491 klp_signals_cnt = 0;
495 * Initialize the global target patch state and all tasks to the initial patch
496 * state, and initialize all function transition states to true in preparation
497 * for patching or unpatching.
499 void klp_init_transition(struct klp_patch *patch, int state)
501 struct task_struct *g, *task;
502 unsigned int cpu;
503 struct klp_object *obj;
504 struct klp_func *func;
505 int initial_state = !state;
507 WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
509 klp_transition_patch = patch;
512 * Set the global target patch state which tasks will switch to. This
513 * has no effect until the TIF_PATCH_PENDING flags get set later.
515 klp_target_state = state;
517 pr_debug("'%s': initializing %s transition\n", patch->mod->name,
518 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
521 * Initialize all tasks to the initial patch state to prepare them for
522 * switching to the target state.
524 read_lock(&tasklist_lock);
525 for_each_process_thread(g, task) {
526 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
527 task->patch_state = initial_state;
529 read_unlock(&tasklist_lock);
532 * Ditto for the idle "swapper" tasks.
534 for_each_possible_cpu(cpu) {
535 task = idle_task(cpu);
536 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
537 task->patch_state = initial_state;
541 * Enforce the order of the task->patch_state initializations and the
542 * func->transition updates to ensure that klp_ftrace_handler() doesn't
543 * see a func in transition with a task->patch_state of KLP_UNDEFINED.
545 * Also enforce the order of the klp_target_state write and future
546 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
547 * set a task->patch_state to KLP_UNDEFINED.
549 smp_wmb();
552 * Set the func transition states so klp_ftrace_handler() will know to
553 * switch to the transition logic.
555 * When patching, the funcs aren't yet in the func_stack and will be
556 * made visible to the ftrace handler shortly by the calls to
557 * klp_patch_object().
559 * When unpatching, the funcs are already in the func_stack and so are
560 * already visible to the ftrace handler.
562 klp_for_each_object(patch, obj)
563 klp_for_each_func(obj, func)
564 func->transition = true;
568 * This function can be called in the middle of an existing transition to
569 * reverse the direction of the target patch state. This can be done to
570 * effectively cancel an existing enable or disable operation if there are any
571 * tasks which are stuck in the initial patch state.
573 void klp_reverse_transition(void)
575 unsigned int cpu;
576 struct task_struct *g, *task;
578 pr_debug("'%s': reversing transition from %s\n",
579 klp_transition_patch->mod->name,
580 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
581 "unpatching to patching");
583 klp_transition_patch->enabled = !klp_transition_patch->enabled;
585 klp_target_state = !klp_target_state;
588 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
589 * klp_update_patch_state() running in parallel with
590 * klp_start_transition().
592 read_lock(&tasklist_lock);
593 for_each_process_thread(g, task)
594 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
595 read_unlock(&tasklist_lock);
597 for_each_possible_cpu(cpu)
598 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
600 /* Let any remaining calls to klp_update_patch_state() complete */
601 klp_synchronize_transition();
603 klp_start_transition();
606 /* Called from copy_process() during fork */
607 void klp_copy_process(struct task_struct *child)
609 child->patch_state = current->patch_state;
611 /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
615 * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
616 * existing transition to finish.
618 * NOTE: klp_update_patch_state(task) requires the task to be inactive or
619 * 'current'. This is not the case here and the consistency model could be
620 * broken. Administrator, who is the only one to execute the
621 * klp_force_transitions(), has to be aware of this.
623 void klp_force_transition(void)
625 struct klp_patch *patch;
626 struct task_struct *g, *task;
627 unsigned int cpu;
629 pr_warn("forcing remaining tasks to the patched state\n");
631 read_lock(&tasklist_lock);
632 for_each_process_thread(g, task)
633 klp_update_patch_state(task);
634 read_unlock(&tasklist_lock);
636 for_each_possible_cpu(cpu)
637 klp_update_patch_state(idle_task(cpu));
639 klp_for_each_patch(patch)
640 patch->forced = true;