2 * Copyright (C) 2008-2014 Mathieu Desnoyers
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/types.h>
21 #include <linux/jhash.h>
22 #include <linux/list.h>
23 #include <linux/rcupdate.h>
24 #include <linux/tracepoint.h>
25 #include <linux/err.h>
26 #include <linux/slab.h>
27 #include <linux/sched/signal.h>
28 #include <linux/sched/task.h>
29 #include <linux/static_key.h>
31 extern tracepoint_ptr_t __start___tracepoints_ptrs
[];
32 extern tracepoint_ptr_t __stop___tracepoints_ptrs
[];
34 DEFINE_SRCU(tracepoint_srcu
);
35 EXPORT_SYMBOL_GPL(tracepoint_srcu
);
37 /* Set to 1 to enable tracepoint debug output */
38 static const int tracepoint_debug
;
42 * Tracepoint module list mutex protects the local module list.
44 static DEFINE_MUTEX(tracepoint_module_list_mutex
);
46 /* Local list of struct tp_module */
47 static LIST_HEAD(tracepoint_module_list
);
48 #endif /* CONFIG_MODULES */
51 * tracepoints_mutex protects the builtin and module tracepoints.
52 * tracepoints_mutex nests inside tracepoint_module_list_mutex.
54 static DEFINE_MUTEX(tracepoints_mutex
);
56 static struct rcu_head
*early_probes
;
57 static bool ok_to_free_tracepoints
;
61 * It is used to delay the free of multiple probes array until a quiescent
66 struct tracepoint_func probes
[0];
69 static inline void *allocate_probes(int count
)
71 struct tp_probes
*p
= kmalloc(count
* sizeof(struct tracepoint_func
)
72 + sizeof(struct tp_probes
), GFP_KERNEL
);
73 return p
== NULL
? NULL
: p
->probes
;
76 static void srcu_free_old_probes(struct rcu_head
*head
)
78 kfree(container_of(head
, struct tp_probes
, rcu
));
81 static void rcu_free_old_probes(struct rcu_head
*head
)
83 call_srcu(&tracepoint_srcu
, head
, srcu_free_old_probes
);
86 static __init
int release_early_probes(void)
90 ok_to_free_tracepoints
= true;
92 while (early_probes
) {
94 early_probes
= tmp
->next
;
95 call_rcu_sched(tmp
, rcu_free_old_probes
);
101 /* SRCU is initialized at core_initcall */
102 postcore_initcall(release_early_probes
);
104 static inline void release_probes(struct tracepoint_func
*old
)
107 struct tp_probes
*tp_probes
= container_of(old
,
108 struct tp_probes
, probes
[0]);
111 * We can't free probes if SRCU is not initialized yet.
112 * Postpone the freeing till after SRCU is initialized.
114 if (unlikely(!ok_to_free_tracepoints
)) {
115 tp_probes
->rcu
.next
= early_probes
;
116 early_probes
= &tp_probes
->rcu
;
121 * Tracepoint probes are protected by both sched RCU and SRCU,
122 * by calling the SRCU callback in the sched RCU callback we
123 * cover both cases. So let us chain the SRCU and sched RCU
124 * callbacks to wait for both grace periods.
126 call_rcu_sched(&tp_probes
->rcu
, rcu_free_old_probes
);
130 static void debug_print_probes(struct tracepoint_func
*funcs
)
134 if (!tracepoint_debug
|| !funcs
)
137 for (i
= 0; funcs
[i
].func
; i
++)
138 printk(KERN_DEBUG
"Probe %d : %p\n", i
, funcs
[i
].func
);
141 static struct tracepoint_func
*
142 func_add(struct tracepoint_func
**funcs
, struct tracepoint_func
*tp_func
,
145 struct tracepoint_func
*old
, *new;
149 if (WARN_ON(!tp_func
->func
))
150 return ERR_PTR(-EINVAL
);
152 debug_print_probes(*funcs
);
155 /* (N -> N+1), (N != 0, 1) probes */
156 for (nr_probes
= 0; old
[nr_probes
].func
; nr_probes
++) {
157 /* Insert before probes of lower priority */
158 if (pos
< 0 && old
[nr_probes
].prio
< prio
)
160 if (old
[nr_probes
].func
== tp_func
->func
&&
161 old
[nr_probes
].data
== tp_func
->data
)
162 return ERR_PTR(-EEXIST
);
165 /* + 2 : one for new probe, one for NULL func */
166 new = allocate_probes(nr_probes
+ 2);
168 return ERR_PTR(-ENOMEM
);
172 memcpy(new, old
, nr_probes
* sizeof(struct tracepoint_func
));
174 /* Copy higher priority probes ahead of the new probe */
175 memcpy(new, old
, pos
* sizeof(struct tracepoint_func
));
176 /* Copy the rest after it. */
177 memcpy(new + pos
+ 1, old
+ pos
,
178 (nr_probes
- pos
) * sizeof(struct tracepoint_func
));
183 new[nr_probes
+ 1].func
= NULL
;
185 debug_print_probes(*funcs
);
189 static void *func_remove(struct tracepoint_func
**funcs
,
190 struct tracepoint_func
*tp_func
)
192 int nr_probes
= 0, nr_del
= 0, i
;
193 struct tracepoint_func
*old
, *new;
198 return ERR_PTR(-ENOENT
);
200 debug_print_probes(*funcs
);
201 /* (N -> M), (N > 1, M >= 0) probes */
203 for (nr_probes
= 0; old
[nr_probes
].func
; nr_probes
++) {
204 if (old
[nr_probes
].func
== tp_func
->func
&&
205 old
[nr_probes
].data
== tp_func
->data
)
211 * If probe is NULL, then nr_probes = nr_del = 0, and then the
212 * entire entry will be removed.
214 if (nr_probes
- nr_del
== 0) {
215 /* N -> 0, (N > 1) */
217 debug_print_probes(*funcs
);
221 /* N -> M, (N > 1, M > 0) */
223 new = allocate_probes(nr_probes
- nr_del
+ 1);
225 return ERR_PTR(-ENOMEM
);
226 for (i
= 0; old
[i
].func
; i
++)
227 if (old
[i
].func
!= tp_func
->func
228 || old
[i
].data
!= tp_func
->data
)
230 new[nr_probes
- nr_del
].func
= NULL
;
233 debug_print_probes(*funcs
);
238 * Add the probe function to a tracepoint.
240 static int tracepoint_add_func(struct tracepoint
*tp
,
241 struct tracepoint_func
*func
, int prio
)
243 struct tracepoint_func
*old
, *tp_funcs
;
246 if (tp
->regfunc
&& !static_key_enabled(&tp
->key
)) {
252 tp_funcs
= rcu_dereference_protected(tp
->funcs
,
253 lockdep_is_held(&tracepoints_mutex
));
254 old
= func_add(&tp_funcs
, func
, prio
);
256 WARN_ON_ONCE(PTR_ERR(old
) != -ENOMEM
);
261 * rcu_assign_pointer has as smp_store_release() which makes sure
262 * that the new probe callbacks array is consistent before setting
263 * a pointer to it. This array is referenced by __DO_TRACE from
264 * include/linux/tracepoint.h using rcu_dereference_sched().
266 rcu_assign_pointer(tp
->funcs
, tp_funcs
);
267 if (!static_key_enabled(&tp
->key
))
268 static_key_slow_inc(&tp
->key
);
274 * Remove a probe function from a tracepoint.
275 * Note: only waiting an RCU period after setting elem->call to the empty
276 * function insures that the original callback is not used anymore. This insured
277 * by preempt_disable around the call site.
279 static int tracepoint_remove_func(struct tracepoint
*tp
,
280 struct tracepoint_func
*func
)
282 struct tracepoint_func
*old
, *tp_funcs
;
284 tp_funcs
= rcu_dereference_protected(tp
->funcs
,
285 lockdep_is_held(&tracepoints_mutex
));
286 old
= func_remove(&tp_funcs
, func
);
288 WARN_ON_ONCE(PTR_ERR(old
) != -ENOMEM
);
293 /* Removed last function */
294 if (tp
->unregfunc
&& static_key_enabled(&tp
->key
))
297 if (static_key_enabled(&tp
->key
))
298 static_key_slow_dec(&tp
->key
);
300 rcu_assign_pointer(tp
->funcs
, tp_funcs
);
306 * tracepoint_probe_register_prio - Connect a probe to a tracepoint with priority
308 * @probe: probe handler
309 * @data: tracepoint data
310 * @prio: priority of this function over other registered functions
312 * Returns 0 if ok, error value on error.
313 * Note: if @tp is within a module, the caller is responsible for
314 * unregistering the probe before the module is gone. This can be
315 * performed either with a tracepoint module going notifier, or from
316 * within module exit functions.
318 int tracepoint_probe_register_prio(struct tracepoint
*tp
, void *probe
,
319 void *data
, int prio
)
321 struct tracepoint_func tp_func
;
324 mutex_lock(&tracepoints_mutex
);
325 tp_func
.func
= probe
;
328 ret
= tracepoint_add_func(tp
, &tp_func
, prio
);
329 mutex_unlock(&tracepoints_mutex
);
332 EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio
);
335 * tracepoint_probe_register - Connect a probe to a tracepoint
337 * @probe: probe handler
338 * @data: tracepoint data
340 * Returns 0 if ok, error value on error.
341 * Note: if @tp is within a module, the caller is responsible for
342 * unregistering the probe before the module is gone. This can be
343 * performed either with a tracepoint module going notifier, or from
344 * within module exit functions.
346 int tracepoint_probe_register(struct tracepoint
*tp
, void *probe
, void *data
)
348 return tracepoint_probe_register_prio(tp
, probe
, data
, TRACEPOINT_DEFAULT_PRIO
);
350 EXPORT_SYMBOL_GPL(tracepoint_probe_register
);
353 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
355 * @probe: probe function pointer
356 * @data: tracepoint data
358 * Returns 0 if ok, error value on error.
360 int tracepoint_probe_unregister(struct tracepoint
*tp
, void *probe
, void *data
)
362 struct tracepoint_func tp_func
;
365 mutex_lock(&tracepoints_mutex
);
366 tp_func
.func
= probe
;
368 ret
= tracepoint_remove_func(tp
, &tp_func
);
369 mutex_unlock(&tracepoints_mutex
);
372 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister
);
374 static void for_each_tracepoint_range(
375 tracepoint_ptr_t
*begin
, tracepoint_ptr_t
*end
,
376 void (*fct
)(struct tracepoint
*tp
, void *priv
),
379 tracepoint_ptr_t
*iter
;
383 for (iter
= begin
; iter
< end
; iter
++)
384 fct(tracepoint_ptr_deref(iter
), priv
);
387 #ifdef CONFIG_MODULES
388 bool trace_module_has_bad_taint(struct module
*mod
)
390 return mod
->taints
& ~((1 << TAINT_OOT_MODULE
) | (1 << TAINT_CRAP
) |
391 (1 << TAINT_UNSIGNED_MODULE
));
394 static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list
);
397 * register_tracepoint_notifier - register tracepoint coming/going notifier
398 * @nb: notifier block
400 * Notifiers registered with this function are called on module
401 * coming/going with the tracepoint_module_list_mutex held.
402 * The notifier block callback should expect a "struct tp_module" data
405 int register_tracepoint_module_notifier(struct notifier_block
*nb
)
407 struct tp_module
*tp_mod
;
410 mutex_lock(&tracepoint_module_list_mutex
);
411 ret
= blocking_notifier_chain_register(&tracepoint_notify_list
, nb
);
414 list_for_each_entry(tp_mod
, &tracepoint_module_list
, list
)
415 (void) nb
->notifier_call(nb
, MODULE_STATE_COMING
, tp_mod
);
417 mutex_unlock(&tracepoint_module_list_mutex
);
420 EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier
);
423 * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier
424 * @nb: notifier block
426 * The notifier block callback should expect a "struct tp_module" data
429 int unregister_tracepoint_module_notifier(struct notifier_block
*nb
)
431 struct tp_module
*tp_mod
;
434 mutex_lock(&tracepoint_module_list_mutex
);
435 ret
= blocking_notifier_chain_unregister(&tracepoint_notify_list
, nb
);
438 list_for_each_entry(tp_mod
, &tracepoint_module_list
, list
)
439 (void) nb
->notifier_call(nb
, MODULE_STATE_GOING
, tp_mod
);
441 mutex_unlock(&tracepoint_module_list_mutex
);
445 EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier
);
448 * Ensure the tracer unregistered the module's probes before the module
449 * teardown is performed. Prevents leaks of probe and data pointers.
451 static void tp_module_going_check_quiescent(struct tracepoint
*tp
, void *priv
)
453 WARN_ON_ONCE(tp
->funcs
);
456 static int tracepoint_module_coming(struct module
*mod
)
458 struct tp_module
*tp_mod
;
461 if (!mod
->num_tracepoints
)
465 * We skip modules that taint the kernel, especially those with different
466 * module headers (for forced load), to make sure we don't cause a crash.
467 * Staging, out-of-tree, and unsigned GPL modules are fine.
469 if (trace_module_has_bad_taint(mod
))
471 mutex_lock(&tracepoint_module_list_mutex
);
472 tp_mod
= kmalloc(sizeof(struct tp_module
), GFP_KERNEL
);
478 list_add_tail(&tp_mod
->list
, &tracepoint_module_list
);
479 blocking_notifier_call_chain(&tracepoint_notify_list
,
480 MODULE_STATE_COMING
, tp_mod
);
482 mutex_unlock(&tracepoint_module_list_mutex
);
486 static void tracepoint_module_going(struct module
*mod
)
488 struct tp_module
*tp_mod
;
490 if (!mod
->num_tracepoints
)
493 mutex_lock(&tracepoint_module_list_mutex
);
494 list_for_each_entry(tp_mod
, &tracepoint_module_list
, list
) {
495 if (tp_mod
->mod
== mod
) {
496 blocking_notifier_call_chain(&tracepoint_notify_list
,
497 MODULE_STATE_GOING
, tp_mod
);
498 list_del(&tp_mod
->list
);
501 * Called the going notifier before checking for
504 for_each_tracepoint_range(mod
->tracepoints_ptrs
,
505 mod
->tracepoints_ptrs
+ mod
->num_tracepoints
,
506 tp_module_going_check_quiescent
, NULL
);
511 * In the case of modules that were tainted at "coming", we'll simply
512 * walk through the list without finding it. We cannot use the "tainted"
513 * flag on "going", in case a module taints the kernel only after being
516 mutex_unlock(&tracepoint_module_list_mutex
);
519 static int tracepoint_module_notify(struct notifier_block
*self
,
520 unsigned long val
, void *data
)
522 struct module
*mod
= data
;
526 case MODULE_STATE_COMING
:
527 ret
= tracepoint_module_coming(mod
);
529 case MODULE_STATE_LIVE
:
531 case MODULE_STATE_GOING
:
532 tracepoint_module_going(mod
);
534 case MODULE_STATE_UNFORMED
:
540 static struct notifier_block tracepoint_module_nb
= {
541 .notifier_call
= tracepoint_module_notify
,
545 static __init
int init_tracepoints(void)
549 ret
= register_module_notifier(&tracepoint_module_nb
);
551 pr_warn("Failed to register tracepoint module enter notifier\n");
555 __initcall(init_tracepoints
);
556 #endif /* CONFIG_MODULES */
559 * for_each_kernel_tracepoint - iteration on all kernel tracepoints
561 * @priv: private data
563 void for_each_kernel_tracepoint(void (*fct
)(struct tracepoint
*tp
, void *priv
),
566 for_each_tracepoint_range(__start___tracepoints_ptrs
,
567 __stop___tracepoints_ptrs
, fct
, priv
);
569 EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint
);
571 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
573 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
574 static int sys_tracepoint_refcount
;
576 int syscall_regfunc(void)
578 struct task_struct
*p
, *t
;
580 if (!sys_tracepoint_refcount
) {
581 read_lock(&tasklist_lock
);
582 for_each_process_thread(p
, t
) {
583 set_tsk_thread_flag(t
, TIF_SYSCALL_TRACEPOINT
);
585 read_unlock(&tasklist_lock
);
587 sys_tracepoint_refcount
++;
592 void syscall_unregfunc(void)
594 struct task_struct
*p
, *t
;
596 sys_tracepoint_refcount
--;
597 if (!sys_tracepoint_refcount
) {
598 read_lock(&tasklist_lock
);
599 for_each_process_thread(p
, t
) {
600 clear_tsk_thread_flag(t
, TIF_SYSCALL_TRACEPOINT
);
602 read_unlock(&tasklist_lock
);