Merge patch series "led: add function naming option from linux"
[u-boot.git] / lib / trace.c
blob1d5f7dec979e7251d11189a8dc5a84c8af628a40
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (c) 2012 The Chromium OS Authors.
4 */
6 #include <mapmem.h>
7 #include <time.h>
8 #include <trace.h>
9 #include <linux/errno.h>
10 #include <asm/global_data.h>
11 #include <asm/io.h>
12 #include <asm/sections.h>
14 DECLARE_GLOBAL_DATA_PTR;
16 static char trace_enabled __section(".data");
17 static char trace_inited __section(".data");
19 /* The header block at the start of the trace memory area */
20 struct trace_hdr {
21 int func_count; /* Total number of function call sites */
22 u64 call_count; /* Total number of tracked function calls */
23 u64 untracked_count; /* Total number of untracked function calls */
24 int funcs_used; /* Total number of functions used */
27 * Call count for each function. This is indexed by the word offset
28 * of the function from gd->relocaddr
30 uintptr_t *call_accum;
32 /* Function trace list */
33 struct trace_call *ftrace; /* The function call records */
34 ulong ftrace_size; /* Num. of ftrace records we have space for */
35 ulong ftrace_count; /* Num. of ftrace records written */
36 ulong ftrace_too_deep_count; /* Functions that were too deep */
38 int depth; /* Depth of function calls */
39 int depth_limit; /* Depth limit to trace to */
40 int max_depth; /* Maximum depth seen so far */
41 int min_depth; /* Minimum depth seen so far */
42 bool trace_locked; /* Used to detect recursive tracing */
45 /* Pointer to start of trace buffer */
46 static struct trace_hdr *hdr __section(".data");
48 static inline uintptr_t __attribute__((no_instrument_function))
49 func_ptr_to_num(void *func_ptr)
51 uintptr_t offset = (uintptr_t)func_ptr;
53 #ifdef CONFIG_SANDBOX
54 offset -= (uintptr_t)_init;
55 #else
56 if (gd->flags & GD_FLG_RELOC)
57 offset -= gd->relocaddr;
58 else
59 offset -= CONFIG_TEXT_BASE;
60 #endif
61 return offset / FUNC_SITE_SIZE;
64 #if defined(CONFIG_EFI_LOADER) && (defined(CONFIG_ARM) || defined(CONFIG_RISCV))
66 /**
67 * trace_gd - the value of the gd register
69 static volatile gd_t *trace_gd;
71 /**
72 * trace_save_gd() - save the value of the gd register
74 static void notrace trace_save_gd(void)
76 trace_gd = gd;
79 /**
80 * trace_swap_gd() - swap between U-Boot and application gd register value
82 * An UEFI application may change the value of the register that gd lives in.
83 * But some of our functions like get_ticks() access this register. So we
84 * have to set the gd register to the U-Boot value when entering a trace
85 * point and set it back to the application value when exiting the trace point.
87 static void notrace trace_swap_gd(void)
89 volatile gd_t *temp_gd = trace_gd;
91 trace_gd = gd;
92 set_gd(temp_gd);
95 #else
97 static void notrace trace_save_gd(void)
101 static void notrace trace_swap_gd(void)
105 #endif
107 static void notrace add_ftrace(void *func_ptr, void *caller, ulong flags)
109 if (hdr->depth > hdr->depth_limit) {
110 hdr->ftrace_too_deep_count++;
111 return;
113 if (hdr->ftrace_count < hdr->ftrace_size) {
114 struct trace_call *rec = &hdr->ftrace[hdr->ftrace_count];
116 rec->func = func_ptr_to_num(func_ptr);
117 rec->caller = func_ptr_to_num(caller);
118 rec->flags = flags | (timer_get_us() & FUNCF_TIMESTAMP_MASK);
120 hdr->ftrace_count++;
124 * __cyg_profile_func_enter() - record function entry
126 * We add to our tally for this function and add to the list of called
127 * functions.
129 * @func_ptr: pointer to function being entered
130 * @caller: pointer to function which called this function
132 void notrace __cyg_profile_func_enter(void *func_ptr, void *caller)
134 if (trace_enabled) {
135 int func;
137 if (hdr->trace_locked) {
138 trace_enabled = 0;
139 puts("trace: recursion detected, disabling\n");
140 hdr->trace_locked = false;
141 return;
144 hdr->trace_locked = true;
145 trace_swap_gd();
146 add_ftrace(func_ptr, caller, FUNCF_ENTRY);
147 func = func_ptr_to_num(func_ptr);
148 if (func < hdr->func_count) {
149 hdr->call_accum[func]++;
150 hdr->call_count++;
151 } else {
152 hdr->untracked_count++;
154 hdr->depth++;
155 if (hdr->depth > hdr->max_depth)
156 hdr->max_depth = hdr->depth;
157 trace_swap_gd();
158 hdr->trace_locked = false;
163 * __cyg_profile_func_exit() - record function exit
165 * @func_ptr: pointer to function being entered
166 * @caller: pointer to function which called this function
168 void notrace __cyg_profile_func_exit(void *func_ptr, void *caller)
170 if (trace_enabled) {
171 trace_swap_gd();
172 hdr->depth--;
173 add_ftrace(func_ptr, caller, FUNCF_EXIT);
174 if (hdr->depth < hdr->min_depth)
175 hdr->min_depth = hdr->depth;
176 trace_swap_gd();
181 * trace_list_functions() - produce a list of called functions
183 * The information is written into the supplied buffer - a header followed
184 * by a list of function records.
186 * @buff: buffer to place list into
187 * @buff_size: size of buffer
188 * @needed: returns size of buffer needed, which may be
189 * greater than buff_size if we ran out of space.
190 * Return: 0 if ok, -ENOSPC if space was exhausted
192 int trace_list_functions(void *buff, size_t buff_size, size_t *needed)
194 struct trace_output_hdr *output_hdr = NULL;
195 void *end, *ptr = buff;
196 size_t func;
197 size_t upto;
199 end = buff ? buff + buff_size : NULL;
201 /* Place some header information */
202 if (ptr + sizeof(struct trace_output_hdr) < end)
203 output_hdr = ptr;
204 ptr += sizeof(struct trace_output_hdr);
206 /* Add information about each function */
207 for (func = upto = 0; func < hdr->func_count; func++) {
208 size_t calls = hdr->call_accum[func];
210 if (!calls)
211 continue;
213 if (ptr + sizeof(struct trace_output_func) < end) {
214 struct trace_output_func *stats = ptr;
216 stats->offset = func * FUNC_SITE_SIZE;
217 stats->call_count = calls;
218 upto++;
220 ptr += sizeof(struct trace_output_func);
223 /* Update the header */
224 if (output_hdr) {
225 output_hdr->rec_count = upto;
226 output_hdr->type = TRACE_CHUNK_FUNCS;
229 /* Work out how must of the buffer we used */
230 *needed = ptr - buff;
231 if (ptr > end)
232 return -ENOSPC;
234 return 0;
238 * trace_list_functions() - produce a list of function calls
240 * The information is written into the supplied buffer - a header followed
241 * by a list of function records.
243 * @buff: buffer to place list into
244 * @buff_size: size of buffer
245 * @needed: returns size of buffer needed, which may be
246 * greater than buff_size if we ran out of space.
247 * Return: 0 if ok, -ENOSPC if space was exhausted
249 int trace_list_calls(void *buff, size_t buff_size, size_t *needed)
251 struct trace_output_hdr *output_hdr = NULL;
252 void *end, *ptr = buff;
253 size_t rec, upto;
254 size_t count;
256 end = buff ? buff + buff_size : NULL;
258 /* Place some header information */
259 if (ptr + sizeof(struct trace_output_hdr) < end)
260 output_hdr = ptr;
261 ptr += sizeof(struct trace_output_hdr);
263 /* Add information about each call */
264 count = hdr->ftrace_count;
265 if (count > hdr->ftrace_size)
266 count = hdr->ftrace_size;
267 for (rec = upto = 0; rec < count; rec++) {
268 if (ptr + sizeof(struct trace_call) < end) {
269 struct trace_call *call = &hdr->ftrace[rec];
270 struct trace_call *out = ptr;
272 out->func = call->func * FUNC_SITE_SIZE;
273 out->caller = call->caller * FUNC_SITE_SIZE;
274 out->flags = call->flags;
275 upto++;
277 ptr += sizeof(struct trace_call);
280 /* Update the header */
281 if (output_hdr) {
282 memset(output_hdr, '\0', sizeof(*output_hdr));
283 output_hdr->rec_count = upto;
284 output_hdr->type = TRACE_CHUNK_CALLS;
285 output_hdr->version = TRACE_VERSION;
286 output_hdr->text_base = CONFIG_TEXT_BASE;
289 /* Work out how must of the buffer we used */
290 *needed = ptr - buff;
291 if (ptr > end)
292 return -ENOSPC;
294 return 0;
298 * trace_print_stats() - print basic information about tracing
300 void trace_print_stats(void)
302 ulong count;
304 #ifndef FTRACE
305 puts("Warning: make U-Boot with FTRACE to enable function instrumenting.\n");
306 puts("You will likely get zeroed data here\n");
307 #endif
308 if (!trace_inited) {
309 printf("Trace is disabled\n");
310 return;
312 print_grouped_ull(hdr->func_count, 10);
313 puts(" function sites\n");
314 print_grouped_ull(hdr->call_count, 10);
315 puts(" function calls\n");
316 print_grouped_ull(hdr->untracked_count, 10);
317 puts(" untracked function calls\n");
318 count = min(hdr->ftrace_count, hdr->ftrace_size);
319 print_grouped_ull(count, 10);
320 puts(" traced function calls");
321 if (hdr->ftrace_count > hdr->ftrace_size) {
322 printf(" (%lu dropped due to overflow)",
323 hdr->ftrace_count - hdr->ftrace_size);
326 /* Add in minimum depth since the trace did not start at top level */
327 printf("\n%15d maximum observed call depth\n",
328 hdr->max_depth - hdr->min_depth);
329 printf("%15d call depth limit\n", hdr->depth_limit);
330 print_grouped_ull(hdr->ftrace_too_deep_count, 10);
331 puts(" calls not traced due to depth\n");
332 print_grouped_ull(hdr->ftrace_size, 10);
333 puts(" max function calls\n");
334 printf("\ntrace buffer %lx call records %lx\n",
335 (ulong)map_to_sysmem(hdr), (ulong)map_to_sysmem(hdr->ftrace));
338 void notrace trace_set_enabled(int enabled)
340 trace_enabled = enabled != 0;
343 static int get_func_count(void)
345 /* Detect no support for mon_len since this means tracing cannot work */
346 if (IS_ENABLED(CONFIG_SANDBOX) && !gd->mon_len) {
347 puts("Tracing is not supported on this board\n");
348 return -ENOTSUPP;
351 return gd->mon_len / FUNC_SITE_SIZE;
354 static int notrace trace_init_(void *buff, size_t buff_size, bool copy_early,
355 bool enable)
357 int func_count = get_func_count();
358 size_t needed;
359 int was_disabled = !trace_enabled;
361 if (func_count < 0)
362 return func_count;
363 trace_save_gd();
365 if (copy_early) {
366 #ifdef CONFIG_TRACE_EARLY
367 ulong used, count;
368 char *end;
371 * Copy over the early trace data if we have it. Disable
372 * tracing while we are doing this.
374 trace_enabled = 0;
375 hdr = map_sysmem(CONFIG_TRACE_EARLY_ADDR,
376 CONFIG_TRACE_EARLY_SIZE);
377 count = min(hdr->ftrace_count, hdr->ftrace_size);
378 end = (char *)&hdr->ftrace[count];
379 used = end - (char *)hdr;
380 printf("trace: copying %08lx bytes of early data from %x to %08lx\n",
381 used, CONFIG_TRACE_EARLY_ADDR,
382 (ulong)map_to_sysmem(buff));
383 printf("%lu traced function calls", count);
384 if (hdr->ftrace_count > hdr->ftrace_size) {
385 printf(" (%lu dropped due to overflow)",
386 hdr->ftrace_count - hdr->ftrace_size);
387 hdr->ftrace_count = hdr->ftrace_size;
389 puts("\n");
390 memcpy(buff, hdr, used);
391 #endif
393 hdr = (struct trace_hdr *)buff;
394 needed = sizeof(*hdr) + func_count * sizeof(uintptr_t);
395 if (needed > buff_size) {
396 printf("trace: buffer size %zx bytes: at least %zx needed\n",
397 buff_size, needed);
398 return -ENOSPC;
401 if (was_disabled) {
402 memset(hdr, '\0', needed);
403 hdr->min_depth = INT_MAX;
405 hdr->func_count = func_count;
406 hdr->call_accum = (uintptr_t *)(hdr + 1);
408 /* Use any remaining space for the timed function trace */
409 hdr->ftrace = (struct trace_call *)(buff + needed);
410 hdr->ftrace_size = (buff_size - needed) / sizeof(*hdr->ftrace);
411 hdr->depth_limit = CONFIG_TRACE_CALL_DEPTH_LIMIT;
413 printf("trace: initialized, %senabled\n", enable ? "" : "not ");
414 trace_enabled = enable;
415 trace_inited = 1;
417 return 0;
421 * trace_init() - initialize the tracing system and enable it
423 * @buff: Pointer to trace buffer
424 * @buff_size: Size of trace buffer
425 * Return: 0 if ok
427 int notrace trace_init(void *buff, size_t buff_size)
429 /* If traces are enabled already, we may have early traces to copy */
430 return trace_init_(buff, buff_size, trace_enabled, true);
434 * trace_wipe() - clear accumulated traced data
436 * May be called with tracing enabled or disabled.
438 int notrace trace_wipe(void)
440 bool was_enabled = trace_enabled;
442 if (trace_enabled)
443 trace_enabled = 0;
444 return trace_init_(gd->trace_buff, CONFIG_TRACE_BUFFER_SIZE,
445 false, was_enabled);
448 #ifdef CONFIG_TRACE_EARLY
450 * trace_early_init() - initialize the tracing system for early tracing
452 * Return: 0 if ok, -ENOSPC if not enough memory is available
454 int notrace trace_early_init(void)
456 int func_count = get_func_count();
457 size_t buff_size = CONFIG_TRACE_EARLY_SIZE;
458 size_t needed;
460 if (func_count < 0)
461 return func_count;
462 /* We can ignore additional calls to this function */
463 if (trace_enabled)
464 return 0;
466 hdr = map_sysmem(CONFIG_TRACE_EARLY_ADDR, CONFIG_TRACE_EARLY_SIZE);
467 needed = sizeof(*hdr) + func_count * sizeof(uintptr_t);
468 if (needed > buff_size) {
469 printf("trace: buffer size is %zx bytes, at least %zx needed\n",
470 buff_size, needed);
471 return -ENOSPC;
474 memset(hdr, '\0', needed);
475 hdr->call_accum = (uintptr_t *)(hdr + 1);
476 hdr->func_count = func_count;
477 hdr->min_depth = INT_MAX;
479 /* Use any remaining space for the timed function trace */
480 hdr->ftrace = (struct trace_call *)((char *)hdr + needed);
481 hdr->ftrace_size = (buff_size - needed) / sizeof(*hdr->ftrace);
482 hdr->depth_limit = CONFIG_TRACE_EARLY_CALL_DEPTH_LIMIT;
483 printf("trace: early enable at %08x\n", CONFIG_TRACE_EARLY_ADDR);
485 trace_enabled = 1;
487 return 0;
489 #endif