1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * transition.c - Kernel Live Patching transition functions
5 * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/cpu.h>
11 #include <linux/stacktrace.h>
12 #include <linux/static_call.h>
15 #include "transition.h"
17 #define MAX_STACK_ENTRIES 100
18 static DEFINE_PER_CPU(unsigned long[MAX_STACK_ENTRIES
], klp_stack_entries
);
20 #define STACK_ERR_BUF_SIZE 128
22 #define SIGNALS_TIMEOUT 15
24 struct klp_patch
*klp_transition_patch
;
26 static int klp_target_state
= KLP_TRANSITION_IDLE
;
28 static unsigned int klp_signals_cnt
;
31 * When a livepatch is in progress, enable klp stack checking in
32 * cond_resched(). This helps CPU-bound kthreads get patched.
34 #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
36 #define klp_cond_resched_enable() sched_dynamic_klp_enable()
37 #define klp_cond_resched_disable() sched_dynamic_klp_disable()
39 #else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
41 DEFINE_STATIC_KEY_FALSE(klp_sched_try_switch_key
);
42 EXPORT_SYMBOL(klp_sched_try_switch_key
);
44 #define klp_cond_resched_enable() static_branch_enable(&klp_sched_try_switch_key)
45 #define klp_cond_resched_disable() static_branch_disable(&klp_sched_try_switch_key)
47 #endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
50 * This work can be performed periodically to finish patching or unpatching any
51 * "straggler" tasks which failed to transition in the first attempt.
53 static void klp_transition_work_fn(struct work_struct
*work
)
55 mutex_lock(&klp_mutex
);
57 if (klp_transition_patch
)
58 klp_try_complete_transition();
60 mutex_unlock(&klp_mutex
);
62 static DECLARE_DELAYED_WORK(klp_transition_work
, klp_transition_work_fn
);
65 * This function is just a stub to implement a hard force
66 * of synchronize_rcu(). This requires synchronizing
67 * tasks even in userspace and idle.
69 static void klp_sync(struct work_struct
*work
)
74 * We allow to patch also functions where RCU is not watching,
75 * e.g. before user_exit(). We can not rely on the RCU infrastructure
76 * to do the synchronization. Instead hard force the sched synchronization.
78 * This approach allows to use RCU functions for manipulating func_stack
81 static void klp_synchronize_transition(void)
83 schedule_on_each_cpu(klp_sync
);
87 * The transition to the target patch state is complete. Clean up the data
90 static void klp_complete_transition(void)
92 struct klp_object
*obj
;
93 struct klp_func
*func
;
94 struct task_struct
*g
, *task
;
97 pr_debug("'%s': completing %s transition\n",
98 klp_transition_patch
->mod
->name
,
99 klp_target_state
== KLP_TRANSITION_PATCHED
? "patching" : "unpatching");
101 if (klp_transition_patch
->replace
&& klp_target_state
== KLP_TRANSITION_PATCHED
) {
102 klp_unpatch_replaced_patches(klp_transition_patch
);
103 klp_discard_nops(klp_transition_patch
);
106 if (klp_target_state
== KLP_TRANSITION_UNPATCHED
) {
108 * All tasks have transitioned to KLP_TRANSITION_UNPATCHED so we can now
109 * remove the new functions from the func_stack.
111 klp_unpatch_objects(klp_transition_patch
);
114 * Make sure klp_ftrace_handler() can no longer see functions
115 * from this patch on the ops->func_stack. Otherwise, after
116 * func->transition gets cleared, the handler may choose a
119 klp_synchronize_transition();
122 klp_for_each_object(klp_transition_patch
, obj
)
123 klp_for_each_func(obj
, func
)
124 func
->transition
= false;
126 /* Prevent klp_ftrace_handler() from seeing KLP_TRANSITION_IDLE state */
127 if (klp_target_state
== KLP_TRANSITION_PATCHED
)
128 klp_synchronize_transition();
130 read_lock(&tasklist_lock
);
131 for_each_process_thread(g
, task
) {
132 WARN_ON_ONCE(test_tsk_thread_flag(task
, TIF_PATCH_PENDING
));
133 task
->patch_state
= KLP_TRANSITION_IDLE
;
135 read_unlock(&tasklist_lock
);
137 for_each_possible_cpu(cpu
) {
138 task
= idle_task(cpu
);
139 WARN_ON_ONCE(test_tsk_thread_flag(task
, TIF_PATCH_PENDING
));
140 task
->patch_state
= KLP_TRANSITION_IDLE
;
143 klp_for_each_object(klp_transition_patch
, obj
) {
144 if (!klp_is_object_loaded(obj
))
146 if (klp_target_state
== KLP_TRANSITION_PATCHED
)
147 klp_post_patch_callback(obj
);
148 else if (klp_target_state
== KLP_TRANSITION_UNPATCHED
)
149 klp_post_unpatch_callback(obj
);
152 pr_notice("'%s': %s complete\n", klp_transition_patch
->mod
->name
,
153 klp_target_state
== KLP_TRANSITION_PATCHED
? "patching" : "unpatching");
155 klp_target_state
= KLP_TRANSITION_IDLE
;
156 klp_transition_patch
= NULL
;
160 * This is called in the error path, to cancel a transition before it has
161 * started, i.e. klp_init_transition() has been called but
162 * klp_start_transition() hasn't. If the transition *has* been started,
163 * klp_reverse_transition() should be used instead.
165 void klp_cancel_transition(void)
167 if (WARN_ON_ONCE(klp_target_state
!= KLP_TRANSITION_PATCHED
))
170 pr_debug("'%s': canceling patching transition, going to unpatch\n",
171 klp_transition_patch
->mod
->name
);
173 klp_target_state
= KLP_TRANSITION_UNPATCHED
;
174 klp_complete_transition();
178 * Switch the patched state of the task to the set of functions in the target
181 * NOTE: If task is not 'current', the caller must ensure the task is inactive.
182 * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
184 void klp_update_patch_state(struct task_struct
*task
)
187 * A variant of synchronize_rcu() is used to allow patching functions
188 * where RCU is not watching, see klp_synchronize_transition().
190 preempt_disable_notrace();
193 * This test_and_clear_tsk_thread_flag() call also serves as a read
194 * barrier (smp_rmb) for two cases:
196 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
197 * klp_target_state read. The corresponding write barriers are in
198 * klp_init_transition() and klp_reverse_transition().
200 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
201 * of func->transition, if klp_ftrace_handler() is called later on
202 * the same CPU. See __klp_disable_patch().
204 if (test_and_clear_tsk_thread_flag(task
, TIF_PATCH_PENDING
))
205 task
->patch_state
= READ_ONCE(klp_target_state
);
207 preempt_enable_notrace();
211 * Determine whether the given stack trace includes any references to a
212 * to-be-patched or to-be-unpatched function.
214 static int klp_check_stack_func(struct klp_func
*func
, unsigned long *entries
,
215 unsigned int nr_entries
)
217 unsigned long func_addr
, func_size
, address
;
221 if (klp_target_state
== KLP_TRANSITION_UNPATCHED
) {
223 * Check for the to-be-unpatched function
226 func_addr
= (unsigned long)func
->new_func
;
227 func_size
= func
->new_size
;
230 * Check for the to-be-patched function
231 * (the previous func).
233 ops
= klp_find_ops(func
->old_func
);
235 if (list_is_singular(&ops
->func_stack
)) {
236 /* original function */
237 func_addr
= (unsigned long)func
->old_func
;
238 func_size
= func
->old_size
;
240 /* previously patched function */
241 struct klp_func
*prev
;
243 prev
= list_next_entry(func
, stack_node
);
244 func_addr
= (unsigned long)prev
->new_func
;
245 func_size
= prev
->new_size
;
249 for (i
= 0; i
< nr_entries
; i
++) {
250 address
= entries
[i
];
252 if (address
>= func_addr
&& address
< func_addr
+ func_size
)
260 * Determine whether it's safe to transition the task to the target patch state
261 * by looking for any to-be-patched or to-be-unpatched functions on its stack.
263 static int klp_check_stack(struct task_struct
*task
, const char **oldname
)
265 unsigned long *entries
= this_cpu_ptr(klp_stack_entries
);
266 struct klp_object
*obj
;
267 struct klp_func
*func
;
270 /* Protect 'klp_stack_entries' */
271 lockdep_assert_preemption_disabled();
273 ret
= stack_trace_save_tsk_reliable(task
, entries
, MAX_STACK_ENTRIES
);
278 klp_for_each_object(klp_transition_patch
, obj
) {
281 klp_for_each_func(obj
, func
) {
282 ret
= klp_check_stack_func(func
, entries
, nr_entries
);
284 *oldname
= func
->old_name
;
293 static int klp_check_and_switch_task(struct task_struct
*task
, void *arg
)
297 if (task_curr(task
) && task
!= current
)
300 ret
= klp_check_stack(task
, arg
);
304 clear_tsk_thread_flag(task
, TIF_PATCH_PENDING
);
305 task
->patch_state
= klp_target_state
;
310 * Try to safely switch a task to the target patch state. If it's currently
311 * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
312 * if the stack is unreliable, return false.
314 static bool klp_try_switch_task(struct task_struct
*task
)
316 const char *old_name
;
319 /* check if this task has already switched over */
320 if (task
->patch_state
== klp_target_state
)
324 * For arches which don't have reliable stack traces, we have to rely
325 * on other methods (e.g., switching tasks at kernel exit).
327 if (!klp_have_reliable_stack())
331 * Now try to check the stack for any to-be-patched or to-be-unpatched
332 * functions. If all goes well, switch the task to the target patch
336 ret
= klp_check_and_switch_task(current
, &old_name
);
338 ret
= task_call_func(task
, klp_check_and_switch_task
, &old_name
);
341 case 0: /* success */
344 case -EBUSY
: /* klp_check_and_switch_task() */
345 pr_debug("%s: %s:%d is running\n",
346 __func__
, task
->comm
, task
->pid
);
348 case -EINVAL
: /* klp_check_and_switch_task() */
349 pr_debug("%s: %s:%d has an unreliable stack\n",
350 __func__
, task
->comm
, task
->pid
);
352 case -EADDRINUSE
: /* klp_check_and_switch_task() */
353 pr_debug("%s: %s:%d is sleeping on function %s\n",
354 __func__
, task
->comm
, task
->pid
, old_name
);
358 pr_debug("%s: Unknown error code (%d) when trying to switch %s:%d\n",
359 __func__
, ret
, task
->comm
, task
->pid
);
366 void __klp_sched_try_switch(void)
368 if (likely(!klp_patch_pending(current
)))
372 * This function is called from cond_resched() which is called in many
373 * places throughout the kernel. Using the klp_mutex here might
376 * Instead, disable preemption to prevent racing with other callers of
377 * klp_try_switch_task(). Thanks to task_call_func() they won't be
378 * able to switch this task while it's running.
383 * Make sure current didn't get patched between the above check and
386 if (unlikely(!klp_patch_pending(current
)))
390 * Enforce the order of the TIF_PATCH_PENDING read above and the
391 * klp_target_state read in klp_try_switch_task(). The corresponding
392 * write barriers are in klp_init_transition() and
393 * klp_reverse_transition().
397 klp_try_switch_task(current
);
402 EXPORT_SYMBOL(__klp_sched_try_switch
);
405 * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
406 * Kthreads with TIF_PATCH_PENDING set are woken up.
408 static void klp_send_signals(void)
410 struct task_struct
*g
, *task
;
412 if (klp_signals_cnt
== SIGNALS_TIMEOUT
)
413 pr_notice("signaling remaining tasks\n");
415 read_lock(&tasklist_lock
);
416 for_each_process_thread(g
, task
) {
417 if (!klp_patch_pending(task
))
421 * There is a small race here. We could see TIF_PATCH_PENDING
422 * set and decide to wake up a kthread or send a fake signal.
423 * Meanwhile the task could migrate itself and the action
424 * would be meaningless. It is not serious though.
426 if (task
->flags
& PF_KTHREAD
) {
428 * Wake up a kthread which sleeps interruptedly and
429 * still has not been migrated.
431 wake_up_state(task
, TASK_INTERRUPTIBLE
);
434 * Send fake signal to all non-kthread tasks which are
435 * still not migrated.
437 set_notify_signal(task
);
440 read_unlock(&tasklist_lock
);
444 * Try to switch all remaining tasks to the target patch state by walking the
445 * stacks of sleeping tasks and looking for any to-be-patched or
446 * to-be-unpatched functions. If such functions are found, the task can't be
449 * If any tasks are still stuck in the initial patch state, schedule a retry.
451 void klp_try_complete_transition(void)
454 struct task_struct
*g
, *task
;
455 struct klp_patch
*patch
;
456 bool complete
= true;
458 WARN_ON_ONCE(klp_target_state
== KLP_TRANSITION_IDLE
);
461 * Try to switch the tasks to the target patch state by walking their
462 * stacks and looking for any to-be-patched or to-be-unpatched
463 * functions. If such functions are found on a stack, or if the stack
464 * is deemed unreliable, the task can't be switched yet.
466 * Usually this will transition most (or all) of the tasks on a system
467 * unless the patch includes changes to a very common function.
469 read_lock(&tasklist_lock
);
470 for_each_process_thread(g
, task
)
471 if (!klp_try_switch_task(task
))
473 read_unlock(&tasklist_lock
);
476 * Ditto for the idle "swapper" tasks.
479 for_each_possible_cpu(cpu
) {
480 task
= idle_task(cpu
);
481 if (cpu_online(cpu
)) {
482 if (!klp_try_switch_task(task
)) {
484 /* Make idle task go through the main loop. */
485 wake_up_if_idle(cpu
);
487 } else if (task
->patch_state
!= klp_target_state
) {
488 /* offline idle tasks can be switched immediately */
489 clear_tsk_thread_flag(task
, TIF_PATCH_PENDING
);
490 task
->patch_state
= klp_target_state
;
496 if (klp_signals_cnt
&& !(klp_signals_cnt
% SIGNALS_TIMEOUT
))
501 * Some tasks weren't able to be switched over. Try again
502 * later and/or wait for other methods like kernel exit
505 schedule_delayed_work(&klp_transition_work
,
506 round_jiffies_relative(HZ
));
510 /* Done! Now cleanup the data structures. */
511 klp_cond_resched_disable();
512 patch
= klp_transition_patch
;
513 klp_complete_transition();
516 * It would make more sense to free the unused patches in
517 * klp_complete_transition() but it is called also
518 * from klp_cancel_transition().
521 klp_free_patch_async(patch
);
522 else if (patch
->replace
)
523 klp_free_replaced_patches_async(patch
);
527 * Start the transition to the specified target patch state so tasks can begin
530 void klp_start_transition(void)
532 struct task_struct
*g
, *task
;
535 WARN_ON_ONCE(klp_target_state
== KLP_TRANSITION_IDLE
);
537 pr_notice("'%s': starting %s transition\n",
538 klp_transition_patch
->mod
->name
,
539 klp_target_state
== KLP_TRANSITION_PATCHED
? "patching" : "unpatching");
542 * Mark all normal tasks as needing a patch state update. They'll
543 * switch either in klp_try_complete_transition() or as they exit the
546 read_lock(&tasklist_lock
);
547 for_each_process_thread(g
, task
)
548 if (task
->patch_state
!= klp_target_state
)
549 set_tsk_thread_flag(task
, TIF_PATCH_PENDING
);
550 read_unlock(&tasklist_lock
);
553 * Mark all idle tasks as needing a patch state update. They'll switch
554 * either in klp_try_complete_transition() or at the idle loop switch
557 for_each_possible_cpu(cpu
) {
558 task
= idle_task(cpu
);
559 if (task
->patch_state
!= klp_target_state
)
560 set_tsk_thread_flag(task
, TIF_PATCH_PENDING
);
563 klp_cond_resched_enable();
569 * Initialize the global target patch state and all tasks to the initial patch
570 * state, and initialize all function transition states to true in preparation
571 * for patching or unpatching.
573 void klp_init_transition(struct klp_patch
*patch
, int state
)
575 struct task_struct
*g
, *task
;
577 struct klp_object
*obj
;
578 struct klp_func
*func
;
579 int initial_state
= !state
;
581 WARN_ON_ONCE(klp_target_state
!= KLP_TRANSITION_IDLE
);
583 klp_transition_patch
= patch
;
586 * Set the global target patch state which tasks will switch to. This
587 * has no effect until the TIF_PATCH_PENDING flags get set later.
589 klp_target_state
= state
;
591 pr_debug("'%s': initializing %s transition\n", patch
->mod
->name
,
592 klp_target_state
== KLP_TRANSITION_PATCHED
? "patching" : "unpatching");
595 * Initialize all tasks to the initial patch state to prepare them for
596 * switching to the target state.
598 read_lock(&tasklist_lock
);
599 for_each_process_thread(g
, task
) {
600 WARN_ON_ONCE(task
->patch_state
!= KLP_TRANSITION_IDLE
);
601 task
->patch_state
= initial_state
;
603 read_unlock(&tasklist_lock
);
606 * Ditto for the idle "swapper" tasks.
608 for_each_possible_cpu(cpu
) {
609 task
= idle_task(cpu
);
610 WARN_ON_ONCE(task
->patch_state
!= KLP_TRANSITION_IDLE
);
611 task
->patch_state
= initial_state
;
615 * Enforce the order of the task->patch_state initializations and the
616 * func->transition updates to ensure that klp_ftrace_handler() doesn't
617 * see a func in transition with a task->patch_state of KLP_TRANSITION_IDLE.
619 * Also enforce the order of the klp_target_state write and future
620 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() and
621 * __klp_sched_try_switch() don't set a task->patch_state to
622 * KLP_TRANSITION_IDLE.
627 * Set the func transition states so klp_ftrace_handler() will know to
628 * switch to the transition logic.
630 * When patching, the funcs aren't yet in the func_stack and will be
631 * made visible to the ftrace handler shortly by the calls to
632 * klp_patch_object().
634 * When unpatching, the funcs are already in the func_stack and so are
635 * already visible to the ftrace handler.
637 klp_for_each_object(patch
, obj
)
638 klp_for_each_func(obj
, func
)
639 func
->transition
= true;
643 * This function can be called in the middle of an existing transition to
644 * reverse the direction of the target patch state. This can be done to
645 * effectively cancel an existing enable or disable operation if there are any
646 * tasks which are stuck in the initial patch state.
648 void klp_reverse_transition(void)
651 struct task_struct
*g
, *task
;
653 pr_debug("'%s': reversing transition from %s\n",
654 klp_transition_patch
->mod
->name
,
655 klp_target_state
== KLP_TRANSITION_PATCHED
? "patching to unpatching" :
656 "unpatching to patching");
659 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
660 * klp_update_patch_state() or __klp_sched_try_switch() running in
661 * parallel with the reverse transition.
663 read_lock(&tasklist_lock
);
664 for_each_process_thread(g
, task
)
665 clear_tsk_thread_flag(task
, TIF_PATCH_PENDING
);
666 read_unlock(&tasklist_lock
);
668 for_each_possible_cpu(cpu
)
669 clear_tsk_thread_flag(idle_task(cpu
), TIF_PATCH_PENDING
);
672 * Make sure all existing invocations of klp_update_patch_state() and
673 * __klp_sched_try_switch() see the cleared TIF_PATCH_PENDING before
674 * starting the reverse transition.
676 klp_synchronize_transition();
679 * All patching has stopped, now re-initialize the global variables to
680 * prepare for the reverse transition.
682 klp_transition_patch
->enabled
= !klp_transition_patch
->enabled
;
683 klp_target_state
= !klp_target_state
;
686 * Enforce the order of the klp_target_state write and the
687 * TIF_PATCH_PENDING writes in klp_start_transition() to ensure
688 * klp_update_patch_state() and __klp_sched_try_switch() don't set
689 * task->patch_state to the wrong value.
693 klp_start_transition();
696 /* Called from copy_process() during fork */
697 void klp_copy_process(struct task_struct
*child
)
701 * The parent process may have gone through a KLP transition since
702 * the thread flag was copied in setup_thread_stack earlier. Bring
703 * the task flag up to date with the parent here.
705 * The operation is serialized against all klp_*_transition()
706 * operations by the tasklist_lock. The only exceptions are
707 * klp_update_patch_state(current) and __klp_sched_try_switch(), but we
708 * cannot race with them because we are current.
710 if (test_tsk_thread_flag(current
, TIF_PATCH_PENDING
))
711 set_tsk_thread_flag(child
, TIF_PATCH_PENDING
);
713 clear_tsk_thread_flag(child
, TIF_PATCH_PENDING
);
715 child
->patch_state
= current
->patch_state
;
719 * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
720 * existing transition to finish.
722 * NOTE: klp_update_patch_state(task) requires the task to be inactive or
723 * 'current'. This is not the case here and the consistency model could be
724 * broken. Administrator, who is the only one to execute the
725 * klp_force_transitions(), has to be aware of this.
727 void klp_force_transition(void)
729 struct klp_patch
*patch
;
730 struct task_struct
*g
, *task
;
733 pr_warn("forcing remaining tasks to the patched state\n");
735 read_lock(&tasklist_lock
);
736 for_each_process_thread(g
, task
)
737 klp_update_patch_state(task
);
738 read_unlock(&tasklist_lock
);
740 for_each_possible_cpu(cpu
)
741 klp_update_patch_state(idle_task(cpu
));
743 /* Set forced flag for patches being removed. */
744 if (klp_target_state
== KLP_TRANSITION_UNPATCHED
)
745 klp_transition_patch
->forced
= true;
746 else if (klp_transition_patch
->replace
) {
747 klp_for_each_patch(patch
) {
748 if (patch
!= klp_transition_patch
)
749 patch
->forced
= true;