drm/connector: hdmi: Fix memory leak in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / kernel / trace / fgraph.c
blob41e7a15dcb50c4bf99a1620ddae7ba0562aa95ca
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Infrastructure to took into function calls and returns.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8 * Highly modified by Steven Rostedt (VMware).
9 */
10 #include <linux/bits.h>
11 #include <linux/jump_label.h>
12 #include <linux/suspend.h>
13 #include <linux/ftrace.h>
14 #include <linux/static_call.h>
15 #include <linux/slab.h>
17 #include <trace/events/sched.h>
19 #include "ftrace_internal.h"
20 #include "trace.h"
23 * FGRAPH_FRAME_SIZE: Size in bytes of the meta data on the shadow stack
24 * FGRAPH_FRAME_OFFSET: Size in long words of the meta data frame
26 #define FGRAPH_FRAME_SIZE sizeof(struct ftrace_ret_stack)
27 #define FGRAPH_FRAME_OFFSET DIV_ROUND_UP(FGRAPH_FRAME_SIZE, sizeof(long))
30 * On entry to a function (via function_graph_enter()), a new fgraph frame
31 * (ftrace_ret_stack) is pushed onto the stack as well as a word that
32 * holds a bitmask and a type (called "bitmap"). The bitmap is defined as:
34 * bits: 0 - 9 offset in words from the previous ftrace_ret_stack
36 * bits: 10 - 11 Type of storage
37 * 0 - reserved
38 * 1 - bitmap of fgraph_array index
39 * 2 - reserved data
41 * For type with "bitmap of fgraph_array index" (FGRAPH_TYPE_BITMAP):
42 * bits: 12 - 27 The bitmap of fgraph_ops fgraph_array index
43 * That is, it's a bitmask of 0-15 (16 bits)
44 * where if a corresponding ops in the fgraph_array[]
45 * expects a callback from the return of the function
46 * it's corresponding bit will be set.
49 * The top of the ret_stack (when not empty) will always have a reference
50 * word that points to the last fgraph frame that was saved.
52 * For reserved data:
53 * bits: 12 - 17 The size in words that is stored
54 * bits: 18 - 23 The index of fgraph_array, which shows who is stored
56 * That is, at the end of function_graph_enter, if the first and forth
57 * fgraph_ops on the fgraph_array[] (index 0 and 3) needs their retfunc called
58 * on the return of the function being traced, and the forth fgraph_ops
59 * stored two words of data, this is what will be on the task's shadow
60 * ret_stack: (the stack grows upward)
62 * ret_stack[SHADOW_STACK_OFFSET]
63 * | SHADOW_STACK_TASK_VARS(ret_stack)[15] |
64 * ...
65 * | SHADOW_STACK_TASK_VARS(ret_stack)[0] |
66 * ret_stack[SHADOW_STACK_MAX_OFFSET]
67 * ...
68 * | | <- task->curr_ret_stack
69 * +--------------------------------------------+
70 * | (3 << 12) | (3 << 10) | FGRAPH_FRAME_OFFSET|
71 * | *or put another way* |
72 * | (3 << FGRAPH_DATA_INDEX_SHIFT)| \ | This is for fgraph_ops[3].
73 * | ((2 - 1) << FGRAPH_DATA_SHIFT)| \ | The data size is 2 words.
74 * | (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT)| \ |
75 * | (offset2:FGRAPH_FRAME_OFFSET+3) | <- the offset2 is from here
76 * +--------------------------------------------+ ( It is 4 words from the ret_stack)
77 * | STORED DATA WORD 2 |
78 * | STORED DATA WORD 1 |
79 * +--------------------------------------------+
80 * | (9 << 12) | (1 << 10) | FGRAPH_FRAME_OFFSET|
81 * | *or put another way* |
82 * | (BIT(3)|BIT(0)) << FGRAPH_INDEX_SHIFT | \ |
83 * | FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT| \ |
84 * | (offset1:FGRAPH_FRAME_OFFSET) | <- the offset1 is from here
85 * +--------------------------------------------+
86 * | struct ftrace_ret_stack |
87 * | (stores the saved ret pointer) | <- the offset points here
88 * +--------------------------------------------+
89 * | (X) | (N) | ( N words away from
90 * | | previous ret_stack)
91 * ...
92 * ret_stack[0]
94 * If a backtrace is required, and the real return pointer needs to be
95 * fetched, then it looks at the task's curr_ret_stack offset, if it
96 * is greater than zero (reserved, or right before popped), it would mask
97 * the value by FGRAPH_FRAME_OFFSET_MASK to get the offset of the
98 * ftrace_ret_stack structure stored on the shadow stack.
102 * The following is for the top word on the stack:
104 * FGRAPH_FRAME_OFFSET (0-9) holds the offset delta to the fgraph frame
105 * FGRAPH_TYPE (10-11) holds the type of word this is.
106 * (RESERVED or BITMAP)
108 #define FGRAPH_FRAME_OFFSET_BITS 10
109 #define FGRAPH_FRAME_OFFSET_MASK GENMASK(FGRAPH_FRAME_OFFSET_BITS - 1, 0)
111 #define FGRAPH_TYPE_BITS 2
112 #define FGRAPH_TYPE_MASK GENMASK(FGRAPH_TYPE_BITS - 1, 0)
113 #define FGRAPH_TYPE_SHIFT FGRAPH_FRAME_OFFSET_BITS
115 enum {
116 FGRAPH_TYPE_RESERVED = 0,
117 FGRAPH_TYPE_BITMAP = 1,
118 FGRAPH_TYPE_DATA = 2,
122 * For BITMAP type:
123 * FGRAPH_INDEX (12-27) bits holding the gops index wanting return callback called
125 #define FGRAPH_INDEX_BITS 16
126 #define FGRAPH_INDEX_MASK GENMASK(FGRAPH_INDEX_BITS - 1, 0)
127 #define FGRAPH_INDEX_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS)
130 * For DATA type:
131 * FGRAPH_DATA (12-17) bits hold the size of data (in words)
132 * FGRAPH_INDEX (18-23) bits hold the index for which gops->idx the data is for
134 * Note:
135 * data_size == 0 means 1 word, and 31 (=2^5 - 1) means 32 words.
137 #define FGRAPH_DATA_BITS 5
138 #define FGRAPH_DATA_MASK GENMASK(FGRAPH_DATA_BITS - 1, 0)
139 #define FGRAPH_DATA_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS)
140 #define FGRAPH_MAX_DATA_SIZE (sizeof(long) * (1 << FGRAPH_DATA_BITS))
142 #define FGRAPH_DATA_INDEX_BITS 4
143 #define FGRAPH_DATA_INDEX_MASK GENMASK(FGRAPH_DATA_INDEX_BITS - 1, 0)
144 #define FGRAPH_DATA_INDEX_SHIFT (FGRAPH_DATA_SHIFT + FGRAPH_DATA_BITS)
146 #define FGRAPH_MAX_INDEX \
147 ((FGRAPH_INDEX_SIZE << FGRAPH_DATA_BITS) + FGRAPH_RET_INDEX)
149 #define FGRAPH_ARRAY_SIZE FGRAPH_INDEX_BITS
152 * SHADOW_STACK_SIZE: The size in bytes of the entire shadow stack
153 * SHADOW_STACK_OFFSET: The size in long words of the shadow stack
154 * SHADOW_STACK_MAX_OFFSET: The max offset of the stack for a new frame to be added
156 #define SHADOW_STACK_SIZE (PAGE_SIZE)
157 #define SHADOW_STACK_OFFSET (SHADOW_STACK_SIZE / sizeof(long))
158 /* Leave on a buffer at the end */
159 #define SHADOW_STACK_MAX_OFFSET \
160 (SHADOW_STACK_OFFSET - (FGRAPH_FRAME_OFFSET + 1 + FGRAPH_ARRAY_SIZE))
162 /* RET_STACK(): Return the frame from a given @offset from task @t */
163 #define RET_STACK(t, offset) ((struct ftrace_ret_stack *)(&(t)->ret_stack[offset]))
166 * Each fgraph_ops has a reservered unsigned long at the end (top) of the
167 * ret_stack to store task specific state.
169 #define SHADOW_STACK_TASK_VARS(ret_stack) \
170 ((unsigned long *)(&(ret_stack)[SHADOW_STACK_OFFSET - FGRAPH_ARRAY_SIZE]))
172 DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
173 int ftrace_graph_active;
175 static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE];
176 static unsigned long fgraph_array_bitmask;
178 /* LRU index table for fgraph_array */
179 static int fgraph_lru_table[FGRAPH_ARRAY_SIZE];
180 static int fgraph_lru_next;
181 static int fgraph_lru_last;
183 /* Initialize fgraph_lru_table with unused index */
184 static void fgraph_lru_init(void)
186 int i;
188 for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
189 fgraph_lru_table[i] = i;
192 /* Release the used index to the LRU table */
193 static int fgraph_lru_release_index(int idx)
195 if (idx < 0 || idx >= FGRAPH_ARRAY_SIZE ||
196 WARN_ON_ONCE(fgraph_lru_table[fgraph_lru_last] != -1))
197 return -1;
199 fgraph_lru_table[fgraph_lru_last] = idx;
200 fgraph_lru_last = (fgraph_lru_last + 1) % FGRAPH_ARRAY_SIZE;
202 clear_bit(idx, &fgraph_array_bitmask);
203 return 0;
206 /* Allocate a new index from LRU table */
207 static int fgraph_lru_alloc_index(void)
209 int idx = fgraph_lru_table[fgraph_lru_next];
211 /* No id is available */
212 if (idx == -1)
213 return -1;
215 fgraph_lru_table[fgraph_lru_next] = -1;
216 fgraph_lru_next = (fgraph_lru_next + 1) % FGRAPH_ARRAY_SIZE;
218 set_bit(idx, &fgraph_array_bitmask);
219 return idx;
222 /* Get the offset to the fgraph frame from a ret_stack value */
223 static inline int __get_offset(unsigned long val)
225 return val & FGRAPH_FRAME_OFFSET_MASK;
228 /* Get the type of word from a ret_stack value */
229 static inline int __get_type(unsigned long val)
231 return (val >> FGRAPH_TYPE_SHIFT) & FGRAPH_TYPE_MASK;
234 /* Get the data_index for a DATA type ret_stack word */
235 static inline int __get_data_index(unsigned long val)
237 return (val >> FGRAPH_DATA_INDEX_SHIFT) & FGRAPH_DATA_INDEX_MASK;
240 /* Get the data_size for a DATA type ret_stack word */
241 static inline int __get_data_size(unsigned long val)
243 return ((val >> FGRAPH_DATA_SHIFT) & FGRAPH_DATA_MASK) + 1;
246 /* Get the word from the ret_stack at @offset */
247 static inline unsigned long get_fgraph_entry(struct task_struct *t, int offset)
249 return t->ret_stack[offset];
252 /* Get the FRAME_OFFSET from the word from the @offset on ret_stack */
253 static inline int get_frame_offset(struct task_struct *t, int offset)
255 return __get_offset(t->ret_stack[offset]);
258 /* For BITMAP type: get the bitmask from the @offset at ret_stack */
259 static inline unsigned long
260 get_bitmap_bits(struct task_struct *t, int offset)
262 return (t->ret_stack[offset] >> FGRAPH_INDEX_SHIFT) & FGRAPH_INDEX_MASK;
265 /* Write the bitmap to the ret_stack at @offset (does index, offset and bitmask) */
266 static inline void
267 set_bitmap(struct task_struct *t, int offset, unsigned long bitmap)
269 t->ret_stack[offset] = (bitmap << FGRAPH_INDEX_SHIFT) |
270 (FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET;
273 /* For DATA type: get the data saved under the ret_stack word at @offset */
274 static inline void *get_data_type_data(struct task_struct *t, int offset)
276 unsigned long val = t->ret_stack[offset];
278 if (__get_type(val) != FGRAPH_TYPE_DATA)
279 return NULL;
280 offset -= __get_data_size(val);
281 return (void *)&t->ret_stack[offset];
284 /* Create the ret_stack word for a DATA type */
285 static inline unsigned long make_data_type_val(int idx, int size, int offset)
287 return (idx << FGRAPH_DATA_INDEX_SHIFT) |
288 ((size - 1) << FGRAPH_DATA_SHIFT) |
289 (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT) | offset;
292 /* ftrace_graph_entry set to this to tell some archs to run function graph */
293 static int entry_run(struct ftrace_graph_ent *trace, struct fgraph_ops *ops)
295 return 0;
298 /* ftrace_graph_return set to this to tell some archs to run function graph */
299 static void return_run(struct ftrace_graph_ret *trace, struct fgraph_ops *ops)
303 static void ret_stack_set_task_var(struct task_struct *t, int idx, long val)
305 unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack);
307 gvals[idx] = val;
310 static unsigned long *
311 ret_stack_get_task_var(struct task_struct *t, int idx)
313 unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack);
315 return &gvals[idx];
318 static void ret_stack_init_task_vars(unsigned long *ret_stack)
320 unsigned long *gvals = SHADOW_STACK_TASK_VARS(ret_stack);
322 memset(gvals, 0, sizeof(*gvals) * FGRAPH_ARRAY_SIZE);
326 * fgraph_reserve_data - Reserve storage on the task's ret_stack
327 * @idx: The index of fgraph_array
328 * @size_bytes: The size in bytes to reserve
330 * Reserves space of up to FGRAPH_MAX_DATA_SIZE bytes on the
331 * task's ret_stack shadow stack, for a given fgraph_ops during
332 * the entryfunc() call. If entryfunc() returns zero, the storage
333 * is discarded. An entryfunc() can only call this once per iteration.
334 * The fgraph_ops retfunc() can retrieve this stored data with
335 * fgraph_retrieve_data().
337 * Returns: On success, a pointer to the data on the stack.
338 * Otherwise, NULL if there's not enough space left on the
339 * ret_stack for the data, or if fgraph_reserve_data() was called
340 * more than once for a single entryfunc() call.
342 void *fgraph_reserve_data(int idx, int size_bytes)
344 unsigned long val;
345 void *data;
346 int curr_ret_stack = current->curr_ret_stack;
347 int data_size;
349 if (size_bytes > FGRAPH_MAX_DATA_SIZE)
350 return NULL;
352 /* Convert the data size to number of longs. */
353 data_size = (size_bytes + sizeof(long) - 1) >> (sizeof(long) == 4 ? 2 : 3);
355 val = get_fgraph_entry(current, curr_ret_stack - 1);
356 data = &current->ret_stack[curr_ret_stack];
358 curr_ret_stack += data_size + 1;
359 if (unlikely(curr_ret_stack >= SHADOW_STACK_MAX_OFFSET))
360 return NULL;
362 val = make_data_type_val(idx, data_size, __get_offset(val) + data_size + 1);
364 /* Set the last word to be reserved */
365 current->ret_stack[curr_ret_stack - 1] = val;
367 /* Make sure interrupts see this */
368 barrier();
369 current->curr_ret_stack = curr_ret_stack;
370 /* Again sync with interrupts, and reset reserve */
371 current->ret_stack[curr_ret_stack - 1] = val;
373 return data;
377 * fgraph_retrieve_data - Retrieve stored data from fgraph_reserve_data()
378 * @idx: the index of fgraph_array (fgraph_ops::idx)
379 * @size_bytes: pointer to retrieved data size.
381 * This is to be called by a fgraph_ops retfunc(), to retrieve data that
382 * was stored by the fgraph_ops entryfunc() on the function entry.
383 * That is, this will retrieve the data that was reserved on the
384 * entry of the function that corresponds to the exit of the function
385 * that the fgraph_ops retfunc() is called on.
387 * Returns: The stored data from fgraph_reserve_data() called by the
388 * matching entryfunc() for the retfunc() this is called from.
389 * Or NULL if there was nothing stored.
391 void *fgraph_retrieve_data(int idx, int *size_bytes)
393 int offset = current->curr_ret_stack - 1;
394 unsigned long val;
396 val = get_fgraph_entry(current, offset);
397 while (__get_type(val) == FGRAPH_TYPE_DATA) {
398 if (__get_data_index(val) == idx)
399 goto found;
400 offset -= __get_data_size(val) + 1;
401 val = get_fgraph_entry(current, offset);
403 return NULL;
404 found:
405 if (size_bytes)
406 *size_bytes = __get_data_size(val) * sizeof(long);
407 return get_data_type_data(current, offset);
411 * fgraph_get_task_var - retrieve a task specific state variable
412 * @gops: The ftrace_ops that owns the task specific variable
414 * Every registered fgraph_ops has a task state variable
415 * reserved on the task's ret_stack. This function returns the
416 * address to that variable.
418 * Returns the address to the fgraph_ops @gops tasks specific
419 * unsigned long variable.
421 unsigned long *fgraph_get_task_var(struct fgraph_ops *gops)
423 return ret_stack_get_task_var(current, gops->idx);
427 * @offset: The offset into @t->ret_stack to find the ret_stack entry
428 * @frame_offset: Where to place the offset into @t->ret_stack of that entry
430 * Returns a pointer to the previous ret_stack below @offset or NULL
431 * when it reaches the bottom of the stack.
433 * Calling this with:
435 * offset = task->curr_ret_stack;
436 * do {
437 * ret_stack = get_ret_stack(task, offset, &offset);
438 * } while (ret_stack);
440 * Will iterate through all the ret_stack entries from curr_ret_stack
441 * down to the first one.
443 static inline struct ftrace_ret_stack *
444 get_ret_stack(struct task_struct *t, int offset, int *frame_offset)
446 int offs;
448 BUILD_BUG_ON(FGRAPH_FRAME_SIZE % sizeof(long));
450 if (unlikely(offset <= 0))
451 return NULL;
453 offs = get_frame_offset(t, --offset);
454 if (WARN_ON_ONCE(offs <= 0 || offs > offset))
455 return NULL;
457 offset -= offs;
459 *frame_offset = offset;
460 return RET_STACK(t, offset);
463 /* Both enabled by default (can be cleared by function_graph tracer flags */
464 static bool fgraph_sleep_time = true;
466 #ifdef CONFIG_DYNAMIC_FTRACE
468 * archs can override this function if they must do something
469 * to enable hook for graph tracer.
471 int __weak ftrace_enable_ftrace_graph_caller(void)
473 return 0;
477 * archs can override this function if they must do something
478 * to disable hook for graph tracer.
480 int __weak ftrace_disable_ftrace_graph_caller(void)
482 return 0;
484 #endif
486 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace,
487 struct fgraph_ops *gops)
489 return 0;
492 static void ftrace_graph_ret_stub(struct ftrace_graph_ret *trace,
493 struct fgraph_ops *gops)
497 static struct fgraph_ops fgraph_stub = {
498 .entryfunc = ftrace_graph_entry_stub,
499 .retfunc = ftrace_graph_ret_stub,
502 static struct fgraph_ops *fgraph_direct_gops = &fgraph_stub;
503 DEFINE_STATIC_CALL(fgraph_func, ftrace_graph_entry_stub);
504 DEFINE_STATIC_CALL(fgraph_retfunc, ftrace_graph_ret_stub);
505 static DEFINE_STATIC_KEY_TRUE(fgraph_do_direct);
508 * ftrace_graph_stop - set to permanently disable function graph tracing
510 * In case of an error int function graph tracing, this is called
511 * to try to keep function graph tracing from causing any more harm.
512 * Usually this is pretty severe and this is called to try to at least
513 * get a warning out to the user.
515 void ftrace_graph_stop(void)
517 static_branch_enable(&kill_ftrace_graph);
520 /* Add a function return address to the trace stack on thread info.*/
521 static int
522 ftrace_push_return_trace(unsigned long ret, unsigned long func,
523 unsigned long frame_pointer, unsigned long *retp,
524 int fgraph_idx)
526 struct ftrace_ret_stack *ret_stack;
527 unsigned long long calltime;
528 unsigned long val;
529 int offset;
531 if (unlikely(ftrace_graph_is_dead()))
532 return -EBUSY;
534 if (!current->ret_stack)
535 return -EBUSY;
537 BUILD_BUG_ON(SHADOW_STACK_SIZE % sizeof(long));
539 /* Set val to "reserved" with the delta to the new fgraph frame */
540 val = (FGRAPH_TYPE_RESERVED << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET;
543 * We must make sure the ret_stack is tested before we read
544 * anything else.
546 smp_rmb();
549 * Check if there's room on the shadow stack to fit a fraph frame
550 * and a bitmap word.
552 if (current->curr_ret_stack + FGRAPH_FRAME_OFFSET + 1 >= SHADOW_STACK_MAX_OFFSET) {
553 atomic_inc(&current->trace_overrun);
554 return -EBUSY;
557 calltime = trace_clock_local();
559 offset = READ_ONCE(current->curr_ret_stack);
560 ret_stack = RET_STACK(current, offset);
561 offset += FGRAPH_FRAME_OFFSET;
563 /* ret offset = FGRAPH_FRAME_OFFSET ; type = reserved */
564 current->ret_stack[offset] = val;
565 ret_stack->ret = ret;
567 * The unwinders expect curr_ret_stack to point to either zero
568 * or an offset where to find the next ret_stack. Even though the
569 * ret stack might be bogus, we want to write the ret and the
570 * offset to find the ret_stack before we increment the stack point.
571 * If an interrupt comes in now before we increment the curr_ret_stack
572 * it may blow away what we wrote. But that's fine, because the
573 * offset will still be correct (even though the 'ret' won't be).
574 * What we worry about is the offset being correct after we increment
575 * the curr_ret_stack and before we update that offset, as if an
576 * interrupt comes in and does an unwind stack dump, it will need
577 * at least a correct offset!
579 barrier();
580 WRITE_ONCE(current->curr_ret_stack, offset + 1);
582 * This next barrier is to ensure that an interrupt coming in
583 * will not corrupt what we are about to write.
585 barrier();
587 /* Still keep it reserved even if an interrupt came in */
588 current->ret_stack[offset] = val;
590 ret_stack->ret = ret;
591 ret_stack->func = func;
592 ret_stack->calltime = calltime;
593 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
594 ret_stack->fp = frame_pointer;
595 #endif
596 ret_stack->retp = retp;
597 return offset;
601 * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
602 * functions. But those archs currently don't support direct functions
603 * anyway, and ftrace_find_rec_direct() is just a stub for them.
604 * Define MCOUNT_INSN_SIZE to keep those archs compiling.
606 #ifndef MCOUNT_INSN_SIZE
607 /* Make sure this only works without direct calls */
608 # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
609 # error MCOUNT_INSN_SIZE not defined with direct calls enabled
610 # endif
611 # define MCOUNT_INSN_SIZE 0
612 #endif
614 /* If the caller does not use ftrace, call this function. */
615 int function_graph_enter(unsigned long ret, unsigned long func,
616 unsigned long frame_pointer, unsigned long *retp)
618 struct ftrace_graph_ent trace;
619 unsigned long bitmap = 0;
620 int offset;
621 int i;
623 trace.func = func;
624 trace.depth = ++current->curr_ret_depth;
626 offset = ftrace_push_return_trace(ret, func, frame_pointer, retp, 0);
627 if (offset < 0)
628 goto out;
630 #ifdef CONFIG_HAVE_STATIC_CALL
631 if (static_branch_likely(&fgraph_do_direct)) {
632 int save_curr_ret_stack = current->curr_ret_stack;
634 if (static_call(fgraph_func)(&trace, fgraph_direct_gops))
635 bitmap |= BIT(fgraph_direct_gops->idx);
636 else
637 /* Clear out any saved storage */
638 current->curr_ret_stack = save_curr_ret_stack;
639 } else
640 #endif
642 for_each_set_bit(i, &fgraph_array_bitmask,
643 sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) {
644 struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]);
645 int save_curr_ret_stack;
647 if (gops == &fgraph_stub)
648 continue;
650 save_curr_ret_stack = current->curr_ret_stack;
651 if (ftrace_ops_test(&gops->ops, func, NULL) &&
652 gops->entryfunc(&trace, gops))
653 bitmap |= BIT(i);
654 else
655 /* Clear out any saved storage */
656 current->curr_ret_stack = save_curr_ret_stack;
660 if (!bitmap)
661 goto out_ret;
664 * Since this function uses fgraph_idx = 0 as a tail-call checking
665 * flag, set that bit always.
667 set_bitmap(current, offset, bitmap | BIT(0));
669 return 0;
670 out_ret:
671 current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1;
672 out:
673 current->curr_ret_depth--;
674 return -EBUSY;
677 /* Retrieve a function return address to the trace stack on thread info.*/
678 static struct ftrace_ret_stack *
679 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
680 unsigned long frame_pointer, int *offset)
682 struct ftrace_ret_stack *ret_stack;
684 ret_stack = get_ret_stack(current, current->curr_ret_stack, offset);
686 if (unlikely(!ret_stack)) {
687 ftrace_graph_stop();
688 WARN(1, "Bad function graph ret_stack pointer: %d",
689 current->curr_ret_stack);
690 /* Might as well panic, otherwise we have no where to go */
691 *ret = (unsigned long)panic;
692 return NULL;
695 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
697 * The arch may choose to record the frame pointer used
698 * and check it here to make sure that it is what we expect it
699 * to be. If gcc does not set the place holder of the return
700 * address in the frame pointer, and does a copy instead, then
701 * the function graph trace will fail. This test detects this
702 * case.
704 * Currently, x86_32 with optimize for size (-Os) makes the latest
705 * gcc do the above.
707 * Note, -mfentry does not use frame pointers, and this test
708 * is not needed if CC_USING_FENTRY is set.
710 if (unlikely(ret_stack->fp != frame_pointer)) {
711 ftrace_graph_stop();
712 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
713 " from func %ps return to %lx\n",
714 ret_stack->fp,
715 frame_pointer,
716 (void *)ret_stack->func,
717 ret_stack->ret);
718 *ret = (unsigned long)panic;
719 return NULL;
721 #endif
723 *offset += FGRAPH_FRAME_OFFSET;
724 *ret = ret_stack->ret;
725 trace->func = ret_stack->func;
726 trace->calltime = ret_stack->calltime;
727 trace->overrun = atomic_read(&current->trace_overrun);
728 trace->depth = current->curr_ret_depth;
730 * We still want to trace interrupts coming in if
731 * max_depth is set to 1. Make sure the decrement is
732 * seen before ftrace_graph_return.
734 barrier();
736 return ret_stack;
740 * Hibernation protection.
741 * The state of the current task is too much unstable during
742 * suspend/restore to disk. We want to protect against that.
744 static int
745 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
746 void *unused)
748 switch (state) {
749 case PM_HIBERNATION_PREPARE:
750 pause_graph_tracing();
751 break;
753 case PM_POST_HIBERNATION:
754 unpause_graph_tracing();
755 break;
757 return NOTIFY_DONE;
760 static struct notifier_block ftrace_suspend_notifier = {
761 .notifier_call = ftrace_suspend_notifier_call,
764 /* fgraph_ret_regs is not defined without CONFIG_FUNCTION_GRAPH_RETVAL */
765 struct fgraph_ret_regs;
768 * Send the trace to the ring-buffer.
769 * @return the original return address.
771 static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs,
772 unsigned long frame_pointer)
774 struct ftrace_ret_stack *ret_stack;
775 struct ftrace_graph_ret trace;
776 unsigned long bitmap;
777 unsigned long ret;
778 int offset;
779 int i;
781 ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer, &offset);
783 if (unlikely(!ret_stack)) {
784 ftrace_graph_stop();
785 WARN_ON(1);
786 /* Might as well panic. What else to do? */
787 return (unsigned long)panic;
790 trace.rettime = trace_clock_local();
791 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
792 trace.retval = fgraph_ret_regs_return_value(ret_regs);
793 #endif
795 bitmap = get_bitmap_bits(current, offset);
797 #ifdef CONFIG_HAVE_STATIC_CALL
798 if (static_branch_likely(&fgraph_do_direct)) {
799 if (test_bit(fgraph_direct_gops->idx, &bitmap))
800 static_call(fgraph_retfunc)(&trace, fgraph_direct_gops);
801 } else
802 #endif
804 for_each_set_bit(i, &bitmap, sizeof(bitmap) * BITS_PER_BYTE) {
805 struct fgraph_ops *gops = fgraph_array[i];
807 if (gops == &fgraph_stub)
808 continue;
810 gops->retfunc(&trace, gops);
815 * The ftrace_graph_return() may still access the current
816 * ret_stack structure, we need to make sure the update of
817 * curr_ret_stack is after that.
819 barrier();
820 current->curr_ret_stack = offset - FGRAPH_FRAME_OFFSET;
822 current->curr_ret_depth--;
823 return ret;
827 * After all architecures have selected HAVE_FUNCTION_GRAPH_RETVAL, we can
828 * leave only ftrace_return_to_handler(ret_regs).
830 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
831 unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs)
833 return __ftrace_return_to_handler(ret_regs,
834 fgraph_ret_regs_frame_pointer(ret_regs));
836 #else
837 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
839 return __ftrace_return_to_handler(NULL, frame_pointer);
841 #endif
844 * ftrace_graph_get_ret_stack - return the entry of the shadow stack
845 * @task: The task to read the shadow stack from.
846 * @idx: Index down the shadow stack
848 * Return the ret_struct on the shadow stack of the @task at the
849 * call graph at @idx starting with zero. If @idx is zero, it
850 * will return the last saved ret_stack entry. If it is greater than
851 * zero, it will return the corresponding ret_stack for the depth
852 * of saved return addresses.
854 struct ftrace_ret_stack *
855 ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
857 struct ftrace_ret_stack *ret_stack = NULL;
858 int offset = task->curr_ret_stack;
860 if (offset < 0)
861 return NULL;
863 do {
864 ret_stack = get_ret_stack(task, offset, &offset);
865 } while (ret_stack && --idx >= 0);
867 return ret_stack;
871 * ftrace_graph_ret_addr - return the original value of the return address
872 * @task: The task the unwinder is being executed on
873 * @idx: An initialized pointer to the next stack index to use
874 * @ret: The current return address (likely pointing to return_handler)
875 * @retp: The address on the stack of the current return location
877 * This function can be called by stack unwinding code to convert a found stack
878 * return address (@ret) to its original value, in case the function graph
879 * tracer has modified it to be 'return_to_handler'. If the address hasn't
880 * been modified, the unchanged value of @ret is returned.
882 * @idx holds the last index used to know where to start from. It should be
883 * initialized to zero for the first iteration as that will mean to start
884 * at the top of the shadow stack. If the location is found, this pointer
885 * will be assigned that location so that if called again, it will continue
886 * where it left off.
888 * @retp is a pointer to the return address on the stack.
890 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
891 unsigned long ret, unsigned long *retp)
893 struct ftrace_ret_stack *ret_stack;
894 unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler);
895 int i = task->curr_ret_stack;
897 if (ret != return_handler)
898 return ret;
900 if (!idx)
901 return ret;
903 i = *idx ? : task->curr_ret_stack;
904 while (i > 0) {
905 ret_stack = get_ret_stack(task, i, &i);
906 if (!ret_stack)
907 break;
909 * For the tail-call, there would be 2 or more ftrace_ret_stacks on
910 * the ret_stack, which records "return_to_handler" as the return
911 * address except for the last one.
912 * But on the real stack, there should be 1 entry because tail-call
913 * reuses the return address on the stack and jump to the next function.
914 * Thus we will continue to find real return address.
916 if (ret_stack->retp == retp &&
917 ret_stack->ret != return_handler) {
918 *idx = i;
919 return ret_stack->ret;
923 return ret;
926 static struct ftrace_ops graph_ops = {
927 .func = ftrace_graph_func,
928 .flags = FTRACE_OPS_GRAPH_STUB,
929 #ifdef FTRACE_GRAPH_TRAMP_ADDR
930 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
931 /* trampoline_size is only needed for dynamically allocated tramps */
932 #endif
935 void fgraph_init_ops(struct ftrace_ops *dst_ops,
936 struct ftrace_ops *src_ops)
938 dst_ops->flags = FTRACE_OPS_FL_PID | FTRACE_OPS_GRAPH_STUB;
940 #ifdef CONFIG_DYNAMIC_FTRACE
941 if (src_ops) {
942 dst_ops->func_hash = &src_ops->local_hash;
943 mutex_init(&dst_ops->local_hash.regex_lock);
944 INIT_LIST_HEAD(&dst_ops->subop_list);
945 dst_ops->flags |= FTRACE_OPS_FL_INITIALIZED;
947 #endif
950 void ftrace_graph_sleep_time_control(bool enable)
952 fgraph_sleep_time = enable;
956 * Simply points to ftrace_stub, but with the proper protocol.
957 * Defined by the linker script in linux/vmlinux.lds.h
959 void ftrace_stub_graph(struct ftrace_graph_ret *trace, struct fgraph_ops *gops);
961 /* The callbacks that hook a function */
962 trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
963 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
965 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
966 static int alloc_retstack_tasklist(unsigned long **ret_stack_list)
968 int i;
969 int ret = 0;
970 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
971 struct task_struct *g, *t;
973 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
974 ret_stack_list[i] = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL);
975 if (!ret_stack_list[i]) {
976 start = 0;
977 end = i;
978 ret = -ENOMEM;
979 goto free;
983 rcu_read_lock();
984 for_each_process_thread(g, t) {
985 if (start == end) {
986 ret = -EAGAIN;
987 goto unlock;
990 if (t->ret_stack == NULL) {
991 atomic_set(&t->trace_overrun, 0);
992 ret_stack_init_task_vars(ret_stack_list[start]);
993 t->curr_ret_stack = 0;
994 t->curr_ret_depth = -1;
995 /* Make sure the tasks see the 0 first: */
996 smp_wmb();
997 t->ret_stack = ret_stack_list[start++];
1001 unlock:
1002 rcu_read_unlock();
1003 free:
1004 for (i = start; i < end; i++)
1005 kfree(ret_stack_list[i]);
1006 return ret;
1009 static void
1010 ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
1011 struct task_struct *prev,
1012 struct task_struct *next,
1013 unsigned int prev_state)
1015 struct ftrace_ret_stack *ret_stack;
1016 unsigned long long timestamp;
1017 int offset;
1020 * Does the user want to count the time a function was asleep.
1021 * If so, do not update the time stamps.
1023 if (fgraph_sleep_time)
1024 return;
1026 timestamp = trace_clock_local();
1028 prev->ftrace_timestamp = timestamp;
1030 /* only process tasks that we timestamped */
1031 if (!next->ftrace_timestamp)
1032 return;
1035 * Update all the counters in next to make up for the
1036 * time next was sleeping.
1038 timestamp -= next->ftrace_timestamp;
1040 for (offset = next->curr_ret_stack; offset > 0; ) {
1041 ret_stack = get_ret_stack(next, offset, &offset);
1042 if (ret_stack)
1043 ret_stack->calltime += timestamp;
1047 static DEFINE_PER_CPU(unsigned long *, idle_ret_stack);
1049 static void
1050 graph_init_task(struct task_struct *t, unsigned long *ret_stack)
1052 atomic_set(&t->trace_overrun, 0);
1053 ret_stack_init_task_vars(ret_stack);
1054 t->ftrace_timestamp = 0;
1055 t->curr_ret_stack = 0;
1056 t->curr_ret_depth = -1;
1057 /* make curr_ret_stack visible before we add the ret_stack */
1058 smp_wmb();
1059 t->ret_stack = ret_stack;
1063 * Allocate a return stack for the idle task. May be the first
1064 * time through, or it may be done by CPU hotplug online.
1066 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
1068 t->curr_ret_stack = 0;
1069 t->curr_ret_depth = -1;
1071 * The idle task has no parent, it either has its own
1072 * stack or no stack at all.
1074 if (t->ret_stack)
1075 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
1077 if (ftrace_graph_active) {
1078 unsigned long *ret_stack;
1080 ret_stack = per_cpu(idle_ret_stack, cpu);
1081 if (!ret_stack) {
1082 ret_stack = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL);
1083 if (!ret_stack)
1084 return;
1085 per_cpu(idle_ret_stack, cpu) = ret_stack;
1087 graph_init_task(t, ret_stack);
1091 /* Allocate a return stack for newly created task */
1092 void ftrace_graph_init_task(struct task_struct *t)
1094 /* Make sure we do not use the parent ret_stack */
1095 t->ret_stack = NULL;
1096 t->curr_ret_stack = 0;
1097 t->curr_ret_depth = -1;
1099 if (ftrace_graph_active) {
1100 unsigned long *ret_stack;
1102 ret_stack = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL);
1103 if (!ret_stack)
1104 return;
1105 graph_init_task(t, ret_stack);
1109 void ftrace_graph_exit_task(struct task_struct *t)
1111 unsigned long *ret_stack = t->ret_stack;
1113 t->ret_stack = NULL;
1114 /* NULL must become visible to IRQs before we free it: */
1115 barrier();
1117 kfree(ret_stack);
1120 #ifdef CONFIG_DYNAMIC_FTRACE
1121 static int fgraph_pid_func(struct ftrace_graph_ent *trace,
1122 struct fgraph_ops *gops)
1124 struct trace_array *tr = gops->ops.private;
1125 int pid;
1127 if (tr) {
1128 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
1129 if (pid == FTRACE_PID_IGNORE)
1130 return 0;
1131 if (pid != FTRACE_PID_TRACE &&
1132 pid != current->pid)
1133 return 0;
1136 return gops->saved_func(trace, gops);
1139 void fgraph_update_pid_func(void)
1141 struct fgraph_ops *gops;
1142 struct ftrace_ops *op;
1144 if (!(graph_ops.flags & FTRACE_OPS_FL_INITIALIZED))
1145 return;
1147 list_for_each_entry(op, &graph_ops.subop_list, list) {
1148 if (op->flags & FTRACE_OPS_FL_PID) {
1149 gops = container_of(op, struct fgraph_ops, ops);
1150 gops->entryfunc = ftrace_pids_enabled(op) ?
1151 fgraph_pid_func : gops->saved_func;
1152 if (ftrace_graph_active == 1)
1153 static_call_update(fgraph_func, gops->entryfunc);
1157 #endif
1159 /* Allocate a return stack for each task */
1160 static int start_graph_tracing(void)
1162 unsigned long **ret_stack_list;
1163 int ret;
1165 ret_stack_list = kcalloc(FTRACE_RETSTACK_ALLOC_SIZE,
1166 sizeof(*ret_stack_list), GFP_KERNEL);
1168 if (!ret_stack_list)
1169 return -ENOMEM;
1171 do {
1172 ret = alloc_retstack_tasklist(ret_stack_list);
1173 } while (ret == -EAGAIN);
1175 if (!ret) {
1176 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
1177 if (ret)
1178 pr_info("ftrace_graph: Couldn't activate tracepoint"
1179 " probe to kernel_sched_switch\n");
1182 kfree(ret_stack_list);
1183 return ret;
1186 static void init_task_vars(int idx)
1188 struct task_struct *g, *t;
1189 int cpu;
1191 for_each_online_cpu(cpu) {
1192 if (idle_task(cpu)->ret_stack)
1193 ret_stack_set_task_var(idle_task(cpu), idx, 0);
1196 read_lock(&tasklist_lock);
1197 for_each_process_thread(g, t) {
1198 if (t->ret_stack)
1199 ret_stack_set_task_var(t, idx, 0);
1201 read_unlock(&tasklist_lock);
1204 static void ftrace_graph_enable_direct(bool enable_branch, struct fgraph_ops *gops)
1206 trace_func_graph_ent_t func = NULL;
1207 trace_func_graph_ret_t retfunc = NULL;
1208 int i;
1210 if (gops) {
1211 func = gops->entryfunc;
1212 retfunc = gops->retfunc;
1213 fgraph_direct_gops = gops;
1214 } else {
1215 for_each_set_bit(i, &fgraph_array_bitmask,
1216 sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) {
1217 func = fgraph_array[i]->entryfunc;
1218 retfunc = fgraph_array[i]->retfunc;
1219 fgraph_direct_gops = fgraph_array[i];
1222 if (WARN_ON_ONCE(!func))
1223 return;
1225 static_call_update(fgraph_func, func);
1226 static_call_update(fgraph_retfunc, retfunc);
1227 if (enable_branch)
1228 static_branch_disable(&fgraph_do_direct);
1231 static void ftrace_graph_disable_direct(bool disable_branch)
1233 if (disable_branch)
1234 static_branch_disable(&fgraph_do_direct);
1235 static_call_update(fgraph_func, ftrace_graph_entry_stub);
1236 static_call_update(fgraph_retfunc, ftrace_graph_ret_stub);
1237 fgraph_direct_gops = &fgraph_stub;
1240 /* The cpu_boot init_task->ret_stack will never be freed */
1241 static int fgraph_cpu_init(unsigned int cpu)
1243 if (!idle_task(cpu)->ret_stack)
1244 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
1245 return 0;
1248 int register_ftrace_graph(struct fgraph_ops *gops)
1250 static bool fgraph_initialized;
1251 int command = 0;
1252 int ret = 0;
1253 int i = -1;
1255 mutex_lock(&ftrace_lock);
1257 if (!fgraph_initialized) {
1258 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "fgraph_idle_init",
1259 fgraph_cpu_init, NULL);
1260 if (ret < 0) {
1261 pr_warn("fgraph: Error to init cpu hotplug support\n");
1262 return ret;
1264 fgraph_initialized = true;
1265 ret = 0;
1268 if (!fgraph_array[0]) {
1269 /* The array must always have real data on it */
1270 for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
1271 fgraph_array[i] = &fgraph_stub;
1272 fgraph_lru_init();
1275 i = fgraph_lru_alloc_index();
1276 if (i < 0 || WARN_ON_ONCE(fgraph_array[i] != &fgraph_stub)) {
1277 ret = -ENOSPC;
1278 goto out;
1280 gops->idx = i;
1282 ftrace_graph_active++;
1284 if (ftrace_graph_active == 2)
1285 ftrace_graph_disable_direct(true);
1287 if (ftrace_graph_active == 1) {
1288 ftrace_graph_enable_direct(false, gops);
1289 register_pm_notifier(&ftrace_suspend_notifier);
1290 ret = start_graph_tracing();
1291 if (ret)
1292 goto error;
1294 * Some archs just test to see if these are not
1295 * the default function
1297 ftrace_graph_return = return_run;
1298 ftrace_graph_entry = entry_run;
1299 command = FTRACE_START_FUNC_RET;
1300 } else {
1301 init_task_vars(gops->idx);
1303 /* Always save the function, and reset at unregistering */
1304 gops->saved_func = gops->entryfunc;
1306 ret = ftrace_startup_subops(&graph_ops, &gops->ops, command);
1307 if (!ret)
1308 fgraph_array[i] = gops;
1310 error:
1311 if (ret) {
1312 ftrace_graph_active--;
1313 gops->saved_func = NULL;
1314 fgraph_lru_release_index(i);
1316 out:
1317 mutex_unlock(&ftrace_lock);
1318 return ret;
1321 void unregister_ftrace_graph(struct fgraph_ops *gops)
1323 int command = 0;
1325 mutex_lock(&ftrace_lock);
1327 if (unlikely(!ftrace_graph_active))
1328 goto out;
1330 if (unlikely(gops->idx < 0 || gops->idx >= FGRAPH_ARRAY_SIZE ||
1331 fgraph_array[gops->idx] != gops))
1332 goto out;
1334 if (fgraph_lru_release_index(gops->idx) < 0)
1335 goto out;
1337 fgraph_array[gops->idx] = &fgraph_stub;
1339 ftrace_graph_active--;
1341 if (!ftrace_graph_active)
1342 command = FTRACE_STOP_FUNC_RET;
1344 ftrace_shutdown_subops(&graph_ops, &gops->ops, command);
1346 if (ftrace_graph_active == 1)
1347 ftrace_graph_enable_direct(true, NULL);
1348 else if (!ftrace_graph_active)
1349 ftrace_graph_disable_direct(false);
1351 if (!ftrace_graph_active) {
1352 ftrace_graph_return = ftrace_stub_graph;
1353 ftrace_graph_entry = ftrace_graph_entry_stub;
1354 unregister_pm_notifier(&ftrace_suspend_notifier);
1355 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
1357 out:
1358 gops->saved_func = NULL;
1359 mutex_unlock(&ftrace_lock);