OMAP3: PM: Implement locking for any scratchpad access
[linux-ginger.git] / include / trace / ftrace.h
blobcc0d9667e182d14d9d84ffc47d9e21078d96d067
1 /*
2 * Stage 1 of the trace events.
4 * Override the macros in <trace/trace_events.h> to include the following:
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
19 #include <linux/ftrace_event.h>
21 #undef __field
22 #define __field(type, item) type item;
24 #undef __field_ext
25 #define __field_ext(type, item, filter_type) type item;
27 #undef __array
28 #define __array(type, item, len) type item[len];
30 #undef __dynamic_array
31 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
33 #undef __string
34 #define __string(item, src) __dynamic_array(char, item, -1)
36 #undef TP_STRUCT__entry
37 #define TP_STRUCT__entry(args...) args
39 #undef TRACE_EVENT
40 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
41 struct ftrace_raw_##name { \
42 struct trace_entry ent; \
43 tstruct \
44 char __data[0]; \
45 }; \
46 static struct ftrace_event_call event_##name
48 #undef __cpparg
49 #define __cpparg(arg...) arg
51 /* Callbacks are meaningless to ftrace. */
52 #undef TRACE_EVENT_FN
53 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
54 assign, print, reg, unreg) \
55 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
56 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
58 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
62 * Stage 2 of the trace events.
64 * Include the following:
66 * struct ftrace_data_offsets_<call> {
67 * u32 <item1>;
68 * u32 <item2>;
69 * [...]
70 * };
72 * The __dynamic_array() macro will create each u32 <item>, this is
73 * to keep the offset of each array from the beginning of the event.
74 * The size of an array is also encoded, in the higher 16 bits of <item>.
77 #undef __field
78 #define __field(type, item)
80 #undef __field_ext
81 #define __field_ext(type, item, filter_type)
83 #undef __array
84 #define __array(type, item, len)
86 #undef __dynamic_array
87 #define __dynamic_array(type, item, len) u32 item;
89 #undef __string
90 #define __string(item, src) __dynamic_array(char, item, -1)
92 #undef TRACE_EVENT
93 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
94 struct ftrace_data_offsets_##call { \
95 tstruct; \
98 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
101 * Setup the showing format of trace point.
103 * int
104 * ftrace_format_##call(struct trace_seq *s)
106 * struct ftrace_raw_##call field;
107 * int ret;
109 * ret = trace_seq_printf(s, #type " " #item ";"
110 * " offset:%u; size:%u;\n",
111 * offsetof(struct ftrace_raw_##call, item),
112 * sizeof(field.type));
117 #undef TP_STRUCT__entry
118 #define TP_STRUCT__entry(args...) args
120 #undef __field
121 #define __field(type, item) \
122 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
123 "offset:%u;\tsize:%u;\n", \
124 (unsigned int)offsetof(typeof(field), item), \
125 (unsigned int)sizeof(field.item)); \
126 if (!ret) \
127 return 0;
129 #undef __field_ext
130 #define __field_ext(type, item, filter_type) __field(type, item)
132 #undef __array
133 #define __array(type, item, len) \
134 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
135 "offset:%u;\tsize:%u;\n", \
136 (unsigned int)offsetof(typeof(field), item), \
137 (unsigned int)sizeof(field.item)); \
138 if (!ret) \
139 return 0;
141 #undef __dynamic_array
142 #define __dynamic_array(type, item, len) \
143 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
144 "offset:%u;\tsize:%u;\n", \
145 (unsigned int)offsetof(typeof(field), \
146 __data_loc_##item), \
147 (unsigned int)sizeof(field.__data_loc_##item)); \
148 if (!ret) \
149 return 0;
151 #undef __string
152 #define __string(item, src) __dynamic_array(char, item, -1)
154 #undef __entry
155 #define __entry REC
157 #undef __print_symbolic
158 #undef __get_dynamic_array
159 #undef __get_str
161 #undef TP_printk
162 #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
164 #undef TP_fast_assign
165 #define TP_fast_assign(args...) args
167 #undef TP_perf_assign
168 #define TP_perf_assign(args...)
170 #undef TRACE_EVENT
171 #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
172 static int \
173 ftrace_format_##call(struct ftrace_event_call *unused, \
174 struct trace_seq *s) \
176 struct ftrace_raw_##call field __attribute__((unused)); \
177 int ret = 0; \
179 tstruct; \
181 trace_seq_printf(s, "\nprint fmt: " print); \
183 return ret; \
186 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
189 * Stage 3 of the trace events.
191 * Override the macros in <trace/trace_events.h> to include the following:
193 * enum print_line_t
194 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
196 * struct trace_seq *s = &iter->seq;
197 * struct ftrace_raw_<call> *field; <-- defined in stage 1
198 * struct trace_entry *entry;
199 * struct trace_seq *p;
200 * int ret;
202 * entry = iter->ent;
204 * if (entry->type != event_<call>.id) {
205 * WARN_ON_ONCE(1);
206 * return TRACE_TYPE_UNHANDLED;
209 * field = (typeof(field))entry;
211 * p = get_cpu_var(ftrace_event_seq);
212 * trace_seq_init(p);
213 * ret = trace_seq_printf(s, <TP_printk> "\n");
214 * put_cpu();
215 * if (!ret)
216 * return TRACE_TYPE_PARTIAL_LINE;
218 * return TRACE_TYPE_HANDLED;
221 * This is the method used to print the raw event to the trace
222 * output format. Note, this is not needed if the data is read
223 * in binary.
226 #undef __entry
227 #define __entry field
229 #undef TP_printk
230 #define TP_printk(fmt, args...) fmt "\n", args
232 #undef __get_dynamic_array
233 #define __get_dynamic_array(field) \
234 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
236 #undef __get_str
237 #define __get_str(field) (char *)__get_dynamic_array(field)
239 #undef __print_flags
240 #define __print_flags(flag, delim, flag_array...) \
241 ({ \
242 static const struct trace_print_flags __flags[] = \
243 { flag_array, { -1, NULL }}; \
244 ftrace_print_flags_seq(p, delim, flag, __flags); \
247 #undef __print_symbolic
248 #define __print_symbolic(value, symbol_array...) \
249 ({ \
250 static const struct trace_print_flags symbols[] = \
251 { symbol_array, { -1, NULL }}; \
252 ftrace_print_symbols_seq(p, value, symbols); \
255 #undef TRACE_EVENT
256 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
257 static enum print_line_t \
258 ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
260 struct trace_seq *s = &iter->seq; \
261 struct ftrace_raw_##call *field; \
262 struct trace_entry *entry; \
263 struct trace_seq *p; \
264 int ret; \
266 entry = iter->ent; \
268 if (entry->type != event_##call.id) { \
269 WARN_ON_ONCE(1); \
270 return TRACE_TYPE_UNHANDLED; \
273 field = (typeof(field))entry; \
275 p = &get_cpu_var(ftrace_event_seq); \
276 trace_seq_init(p); \
277 ret = trace_seq_printf(s, #call ": " print); \
278 put_cpu(); \
279 if (!ret) \
280 return TRACE_TYPE_PARTIAL_LINE; \
282 return TRACE_TYPE_HANDLED; \
285 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
287 #undef __field_ext
288 #define __field_ext(type, item, filter_type) \
289 ret = trace_define_field(event_call, #type, #item, \
290 offsetof(typeof(field), item), \
291 sizeof(field.item), \
292 is_signed_type(type), filter_type); \
293 if (ret) \
294 return ret;
296 #undef __field
297 #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
299 #undef __array
300 #define __array(type, item, len) \
301 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
302 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
303 offsetof(typeof(field), item), \
304 sizeof(field.item), 0, FILTER_OTHER); \
305 if (ret) \
306 return ret;
308 #undef __dynamic_array
309 #define __dynamic_array(type, item, len) \
310 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
311 offsetof(typeof(field), __data_loc_##item), \
312 sizeof(field.__data_loc_##item), 0, \
313 FILTER_OTHER);
315 #undef __string
316 #define __string(item, src) __dynamic_array(char, item, -1)
318 #undef TRACE_EVENT
319 #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
320 static int \
321 ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
323 struct ftrace_raw_##call field; \
324 int ret; \
326 ret = trace_define_common_fields(event_call); \
327 if (ret) \
328 return ret; \
330 tstruct; \
332 return ret; \
335 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
338 * remember the offset of each array from the beginning of the event.
341 #undef __entry
342 #define __entry entry
344 #undef __field
345 #define __field(type, item)
347 #undef __field_ext
348 #define __field_ext(type, item, filter_type)
350 #undef __array
351 #define __array(type, item, len)
353 #undef __dynamic_array
354 #define __dynamic_array(type, item, len) \
355 __data_offsets->item = __data_size + \
356 offsetof(typeof(*entry), __data); \
357 __data_offsets->item |= (len * sizeof(type)) << 16; \
358 __data_size += (len) * sizeof(type);
360 #undef __string
361 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \
363 #undef TRACE_EVENT
364 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
365 static inline int ftrace_get_offsets_##call( \
366 struct ftrace_data_offsets_##call *__data_offsets, proto) \
368 int __data_size = 0; \
369 struct ftrace_raw_##call __maybe_unused *entry; \
371 tstruct; \
373 return __data_size; \
376 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
378 #ifdef CONFIG_EVENT_PROFILE
381 * Generate the functions needed for tracepoint perf_event support.
383 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
385 * static int ftrace_profile_enable_<call>(void)
387 * return register_trace_<call>(ftrace_profile_<call>);
390 * static void ftrace_profile_disable_<call>(void)
392 * unregister_trace_<call>(ftrace_profile_<call>);
397 #undef TRACE_EVENT
398 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
400 static void ftrace_profile_##call(proto); \
402 static int ftrace_profile_enable_##call(void) \
404 return register_trace_##call(ftrace_profile_##call); \
407 static void ftrace_profile_disable_##call(void) \
409 unregister_trace_##call(ftrace_profile_##call); \
412 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
414 #endif
417 * Stage 4 of the trace events.
419 * Override the macros in <trace/trace_events.h> to include the following:
421 * static void ftrace_event_<call>(proto)
423 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
426 * static int ftrace_reg_event_<call>(void)
428 * int ret;
430 * ret = register_trace_<call>(ftrace_event_<call>);
431 * if (!ret)
432 * pr_info("event trace: Could not activate trace point "
433 * "probe to <call>");
434 * return ret;
437 * static void ftrace_unreg_event_<call>(void)
439 * unregister_trace_<call>(ftrace_event_<call>);
443 * For those macros defined with TRACE_EVENT:
445 * static struct ftrace_event_call event_<call>;
447 * static void ftrace_raw_event_<call>(proto)
449 * struct ring_buffer_event *event;
450 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
451 * struct ring_buffer *buffer;
452 * unsigned long irq_flags;
453 * int pc;
455 * local_save_flags(irq_flags);
456 * pc = preempt_count();
458 * event = trace_current_buffer_lock_reserve(&buffer,
459 * event_<call>.id,
460 * sizeof(struct ftrace_raw_<call>),
461 * irq_flags, pc);
462 * if (!event)
463 * return;
464 * entry = ring_buffer_event_data(event);
466 * <assign>; <-- Here we assign the entries by the __field and
467 * __array macros.
469 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
472 * static int ftrace_raw_reg_event_<call>(void)
474 * int ret;
476 * ret = register_trace_<call>(ftrace_raw_event_<call>);
477 * if (!ret)
478 * pr_info("event trace: Could not activate trace point "
479 * "probe to <call>");
480 * return ret;
483 * static void ftrace_unreg_event_<call>(void)
485 * unregister_trace_<call>(ftrace_raw_event_<call>);
488 * static struct trace_event ftrace_event_type_<call> = {
489 * .trace = ftrace_raw_output_<call>, <-- stage 2
490 * };
492 * static int ftrace_raw_init_event_<call>(void)
494 * int id;
496 * id = register_ftrace_event(&ftrace_event_type_<call>);
497 * if (!id)
498 * return -ENODEV;
499 * event_<call>.id = id;
500 * return 0;
503 * static struct ftrace_event_call __used
504 * __attribute__((__aligned__(4)))
505 * __attribute__((section("_ftrace_events"))) event_<call> = {
506 * .name = "<call>",
507 * .system = "<system>",
508 * .raw_init = ftrace_raw_init_event_<call>,
509 * .regfunc = ftrace_reg_event_<call>,
510 * .unregfunc = ftrace_unreg_event_<call>,
511 * .show_format = ftrace_format_<call>,
516 #undef TP_FMT
517 #define TP_FMT(fmt, args...) fmt "\n", ##args
519 #ifdef CONFIG_EVENT_PROFILE
521 #define _TRACE_PROFILE_INIT(call) \
522 .profile_count = ATOMIC_INIT(-1), \
523 .profile_enable = ftrace_profile_enable_##call, \
524 .profile_disable = ftrace_profile_disable_##call,
526 #else
527 #define _TRACE_PROFILE_INIT(call)
528 #endif
530 #undef __entry
531 #define __entry entry
533 #undef __field
534 #define __field(type, item)
536 #undef __array
537 #define __array(type, item, len)
539 #undef __dynamic_array
540 #define __dynamic_array(type, item, len) \
541 __entry->__data_loc_##item = __data_offsets.item;
543 #undef __string
544 #define __string(item, src) __dynamic_array(char, item, -1) \
546 #undef __assign_str
547 #define __assign_str(dst, src) \
548 strcpy(__get_str(dst), src);
550 #undef TRACE_EVENT
551 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
553 static struct ftrace_event_call event_##call; \
555 static void ftrace_raw_event_##call(proto) \
557 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
558 struct ftrace_event_call *event_call = &event_##call; \
559 struct ring_buffer_event *event; \
560 struct ftrace_raw_##call *entry; \
561 struct ring_buffer *buffer; \
562 unsigned long irq_flags; \
563 int __data_size; \
564 int pc; \
566 local_save_flags(irq_flags); \
567 pc = preempt_count(); \
569 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
571 event = trace_current_buffer_lock_reserve(&buffer, \
572 event_##call.id, \
573 sizeof(*entry) + __data_size, \
574 irq_flags, pc); \
575 if (!event) \
576 return; \
577 entry = ring_buffer_event_data(event); \
580 tstruct \
582 { assign; } \
584 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
585 trace_nowake_buffer_unlock_commit(buffer, \
586 event, irq_flags, pc); \
589 static int ftrace_raw_reg_event_##call(void *ptr) \
591 int ret; \
593 ret = register_trace_##call(ftrace_raw_event_##call); \
594 if (ret) \
595 pr_info("event trace: Could not activate trace point " \
596 "probe to " #call "\n"); \
597 return ret; \
600 static void ftrace_raw_unreg_event_##call(void *ptr) \
602 unregister_trace_##call(ftrace_raw_event_##call); \
605 static struct trace_event ftrace_event_type_##call = { \
606 .trace = ftrace_raw_output_##call, \
607 }; \
609 static int ftrace_raw_init_event_##call(void) \
611 int id; \
613 id = register_ftrace_event(&ftrace_event_type_##call); \
614 if (!id) \
615 return -ENODEV; \
616 event_##call.id = id; \
617 INIT_LIST_HEAD(&event_##call.fields); \
618 return 0; \
621 static struct ftrace_event_call __used \
622 __attribute__((__aligned__(4))) \
623 __attribute__((section("_ftrace_events"))) event_##call = { \
624 .name = #call, \
625 .system = __stringify(TRACE_SYSTEM), \
626 .event = &ftrace_event_type_##call, \
627 .raw_init = ftrace_raw_init_event_##call, \
628 .regfunc = ftrace_raw_reg_event_##call, \
629 .unregfunc = ftrace_raw_unreg_event_##call, \
630 .show_format = ftrace_format_##call, \
631 .define_fields = ftrace_define_fields_##call, \
632 _TRACE_PROFILE_INIT(call) \
635 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
638 * Define the insertion callback to profile events
640 * The job is very similar to ftrace_raw_event_<call> except that we don't
641 * insert in the ring buffer but in a perf counter.
643 * static void ftrace_profile_<call>(proto)
645 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
646 * struct ftrace_event_call *event_call = &event_<call>;
647 * extern void perf_tp_event(int, u64, u64, void *, int);
648 * struct ftrace_raw_##call *entry;
649 * u64 __addr = 0, __count = 1;
650 * unsigned long irq_flags;
651 * struct trace_entry *ent;
652 * int __entry_size;
653 * int __data_size;
654 * int __cpu
655 * int pc;
657 * pc = preempt_count();
659 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
661 * // Below we want to get the aligned size by taking into account
662 * // the u32 field that will later store the buffer size
663 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
664 * sizeof(u64));
665 * __entry_size -= sizeof(u32);
667 * // Protect the non nmi buffer
668 * // This also protects the rcu read side
669 * local_irq_save(irq_flags);
670 * __cpu = smp_processor_id();
672 * if (in_nmi())
673 * raw_data = rcu_dereference(trace_profile_buf_nmi);
674 * else
675 * raw_data = rcu_dereference(trace_profile_buf);
677 * if (!raw_data)
678 * goto end;
680 * raw_data = per_cpu_ptr(raw_data, __cpu);
682 * //zero dead bytes from alignment to avoid stack leak to userspace:
683 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
684 * entry = (struct ftrace_raw_<call> *)raw_data;
685 * ent = &entry->ent;
686 * tracing_generic_entry_update(ent, irq_flags, pc);
687 * ent->type = event_call->id;
689 * <tstruct> <- do some jobs with dynamic arrays
691 * <assign> <- affect our values
693 * perf_tp_event(event_call->id, __addr, __count, entry,
694 * __entry_size); <- submit them to perf counter
699 #ifdef CONFIG_EVENT_PROFILE
701 #undef __perf_addr
702 #define __perf_addr(a) __addr = (a)
704 #undef __perf_count
705 #define __perf_count(c) __count = (c)
707 #undef TRACE_EVENT
708 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
709 static void ftrace_profile_##call(proto) \
711 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
712 struct ftrace_event_call *event_call = &event_##call; \
713 extern void perf_tp_event(int, u64, u64, void *, int); \
714 struct ftrace_raw_##call *entry; \
715 u64 __addr = 0, __count = 1; \
716 unsigned long irq_flags; \
717 struct trace_entry *ent; \
718 int __entry_size; \
719 int __data_size; \
720 char *raw_data; \
721 int __cpu; \
722 int pc; \
724 pc = preempt_count(); \
726 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
727 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
728 sizeof(u64)); \
729 __entry_size -= sizeof(u32); \
731 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
732 "profile buffer not large enough")) \
733 return; \
735 local_irq_save(irq_flags); \
736 __cpu = smp_processor_id(); \
738 if (in_nmi()) \
739 raw_data = rcu_dereference(trace_profile_buf_nmi); \
740 else \
741 raw_data = rcu_dereference(trace_profile_buf); \
743 if (!raw_data) \
744 goto end; \
746 raw_data = per_cpu_ptr(raw_data, __cpu); \
748 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
749 entry = (struct ftrace_raw_##call *)raw_data; \
750 ent = &entry->ent; \
751 tracing_generic_entry_update(ent, irq_flags, pc); \
752 ent->type = event_call->id; \
754 tstruct \
756 { assign; } \
758 perf_tp_event(event_call->id, __addr, __count, entry, \
759 __entry_size); \
761 end: \
762 local_irq_restore(irq_flags); \
766 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
767 #endif /* CONFIG_EVENT_PROFILE */
769 #undef _TRACE_PROFILE_INIT