tracing: Use helper functions in event assignment to shrink macro size
[linux/fpc-iii.git] / kernel / trace / trace_output.c
blobee8d74840b88316c5d41aba0ccd0d931ebf2644d
1 /*
2 * trace_output.c
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 */
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
12 #include "trace_output.h"
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE 128
17 DECLARE_RWSEM(trace_event_sem);
19 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
21 static int next_event_type = __TRACE_LAST_TYPE + 1;
23 #define EVENT_STORAGE_SIZE 128
24 static DEFINE_MUTEX(event_storage_mutex);
25 static char event_storage[EVENT_STORAGE_SIZE];
27 int trace_print_seq(struct seq_file *m, struct trace_seq *s)
29 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
30 int ret;
32 ret = seq_write(m, s->buffer, len);
35 * Only reset this buffer if we successfully wrote to the
36 * seq_file buffer.
38 if (!ret)
39 trace_seq_init(s);
41 return ret;
44 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
46 struct trace_seq *s = &iter->seq;
47 struct trace_entry *entry = iter->ent;
48 struct bputs_entry *field;
49 int ret;
51 trace_assign_type(field, entry);
53 ret = trace_seq_puts(s, field->str);
54 if (!ret)
55 return TRACE_TYPE_PARTIAL_LINE;
57 return TRACE_TYPE_HANDLED;
60 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
62 struct trace_seq *s = &iter->seq;
63 struct trace_entry *entry = iter->ent;
64 struct bprint_entry *field;
65 int ret;
67 trace_assign_type(field, entry);
69 ret = trace_seq_bprintf(s, field->fmt, field->buf);
70 if (!ret)
71 return TRACE_TYPE_PARTIAL_LINE;
73 return TRACE_TYPE_HANDLED;
76 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
78 struct trace_seq *s = &iter->seq;
79 struct trace_entry *entry = iter->ent;
80 struct print_entry *field;
81 int ret;
83 trace_assign_type(field, entry);
85 ret = trace_seq_puts(s, field->buf);
86 if (!ret)
87 return TRACE_TYPE_PARTIAL_LINE;
89 return TRACE_TYPE_HANDLED;
92 /**
93 * trace_seq_printf - sequence printing of trace information
94 * @s: trace sequence descriptor
95 * @fmt: printf format string
97 * It returns 0 if the trace oversizes the buffer's free
98 * space, 1 otherwise.
100 * The tracer may use either sequence operations or its own
101 * copy to user routines. To simplify formating of a trace
102 * trace_seq_printf is used to store strings into a special
103 * buffer (@s). Then the output may be either used by
104 * the sequencer or pulled into another buffer.
107 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
109 int len = (PAGE_SIZE - 1) - s->len;
110 va_list ap;
111 int ret;
113 if (s->full || !len)
114 return 0;
116 va_start(ap, fmt);
117 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
118 va_end(ap);
120 /* If we can't write it all, don't bother writing anything */
121 if (ret >= len) {
122 s->full = 1;
123 return 0;
126 s->len += ret;
128 return 1;
130 EXPORT_SYMBOL_GPL(trace_seq_printf);
133 * trace_seq_vprintf - sequence printing of trace information
134 * @s: trace sequence descriptor
135 * @fmt: printf format string
137 * The tracer may use either sequence operations or its own
138 * copy to user routines. To simplify formating of a trace
139 * trace_seq_printf is used to store strings into a special
140 * buffer (@s). Then the output may be either used by
141 * the sequencer or pulled into another buffer.
144 trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
146 int len = (PAGE_SIZE - 1) - s->len;
147 int ret;
149 if (s->full || !len)
150 return 0;
152 ret = vsnprintf(s->buffer + s->len, len, fmt, args);
154 /* If we can't write it all, don't bother writing anything */
155 if (ret >= len) {
156 s->full = 1;
157 return 0;
160 s->len += ret;
162 return len;
164 EXPORT_SYMBOL_GPL(trace_seq_vprintf);
166 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
168 int len = (PAGE_SIZE - 1) - s->len;
169 int ret;
171 if (s->full || !len)
172 return 0;
174 ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
176 /* If we can't write it all, don't bother writing anything */
177 if (ret >= len) {
178 s->full = 1;
179 return 0;
182 s->len += ret;
184 return len;
188 * trace_seq_puts - trace sequence printing of simple string
189 * @s: trace sequence descriptor
190 * @str: simple string to record
192 * The tracer may use either the sequence operations or its own
193 * copy to user routines. This function records a simple string
194 * into a special buffer (@s) for later retrieval by a sequencer
195 * or other mechanism.
197 int trace_seq_puts(struct trace_seq *s, const char *str)
199 int len = strlen(str);
201 if (s->full)
202 return 0;
204 if (len > ((PAGE_SIZE - 1) - s->len)) {
205 s->full = 1;
206 return 0;
209 memcpy(s->buffer + s->len, str, len);
210 s->len += len;
212 return len;
215 int trace_seq_putc(struct trace_seq *s, unsigned char c)
217 if (s->full)
218 return 0;
220 if (s->len >= (PAGE_SIZE - 1)) {
221 s->full = 1;
222 return 0;
225 s->buffer[s->len++] = c;
227 return 1;
229 EXPORT_SYMBOL(trace_seq_putc);
231 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
233 if (s->full)
234 return 0;
236 if (len > ((PAGE_SIZE - 1) - s->len)) {
237 s->full = 1;
238 return 0;
241 memcpy(s->buffer + s->len, mem, len);
242 s->len += len;
244 return len;
247 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
249 unsigned char hex[HEX_CHARS];
250 const unsigned char *data = mem;
251 int i, j;
253 if (s->full)
254 return 0;
256 #ifdef __BIG_ENDIAN
257 for (i = 0, j = 0; i < len; i++) {
258 #else
259 for (i = len-1, j = 0; i >= 0; i--) {
260 #endif
261 hex[j++] = hex_asc_hi(data[i]);
262 hex[j++] = hex_asc_lo(data[i]);
264 hex[j++] = ' ';
266 return trace_seq_putmem(s, hex, j);
269 void *trace_seq_reserve(struct trace_seq *s, size_t len)
271 void *ret;
273 if (s->full)
274 return NULL;
276 if (len > ((PAGE_SIZE - 1) - s->len)) {
277 s->full = 1;
278 return NULL;
281 ret = s->buffer + s->len;
282 s->len += len;
284 return ret;
287 int trace_seq_path(struct trace_seq *s, const struct path *path)
289 unsigned char *p;
291 if (s->full)
292 return 0;
294 if (s->len >= (PAGE_SIZE - 1)) {
295 s->full = 1;
296 return 0;
299 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
300 if (!IS_ERR(p)) {
301 p = mangle_path(s->buffer + s->len, p, "\n");
302 if (p) {
303 s->len = p - s->buffer;
304 return 1;
306 } else {
307 s->buffer[s->len++] = '?';
308 return 1;
311 s->full = 1;
312 return 0;
315 const char *
316 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
317 unsigned long flags,
318 const struct trace_print_flags *flag_array)
320 unsigned long mask;
321 const char *str;
322 const char *ret = p->buffer + p->len;
323 int i, first = 1;
325 for (i = 0; flag_array[i].name && flags; i++) {
327 mask = flag_array[i].mask;
328 if ((flags & mask) != mask)
329 continue;
331 str = flag_array[i].name;
332 flags &= ~mask;
333 if (!first && delim)
334 trace_seq_puts(p, delim);
335 else
336 first = 0;
337 trace_seq_puts(p, str);
340 /* check for left over flags */
341 if (flags) {
342 if (!first && delim)
343 trace_seq_puts(p, delim);
344 trace_seq_printf(p, "0x%lx", flags);
347 trace_seq_putc(p, 0);
349 return ret;
351 EXPORT_SYMBOL(ftrace_print_flags_seq);
353 const char *
354 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
355 const struct trace_print_flags *symbol_array)
357 int i;
358 const char *ret = p->buffer + p->len;
360 for (i = 0; symbol_array[i].name; i++) {
362 if (val != symbol_array[i].mask)
363 continue;
365 trace_seq_puts(p, symbol_array[i].name);
366 break;
369 if (ret == (const char *)(p->buffer + p->len))
370 trace_seq_printf(p, "0x%lx", val);
372 trace_seq_putc(p, 0);
374 return ret;
376 EXPORT_SYMBOL(ftrace_print_symbols_seq);
378 #if BITS_PER_LONG == 32
379 const char *
380 ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
381 const struct trace_print_flags_u64 *symbol_array)
383 int i;
384 const char *ret = p->buffer + p->len;
386 for (i = 0; symbol_array[i].name; i++) {
388 if (val != symbol_array[i].mask)
389 continue;
391 trace_seq_puts(p, symbol_array[i].name);
392 break;
395 if (ret == (const char *)(p->buffer + p->len))
396 trace_seq_printf(p, "0x%llx", val);
398 trace_seq_putc(p, 0);
400 return ret;
402 EXPORT_SYMBOL(ftrace_print_symbols_seq_u64);
403 #endif
405 const char *
406 ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
408 int i;
409 const char *ret = p->buffer + p->len;
411 for (i = 0; i < buf_len; i++)
412 trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]);
414 trace_seq_putc(p, 0);
416 return ret;
418 EXPORT_SYMBOL(ftrace_print_hex_seq);
420 int ftrace_raw_output_prep(struct trace_iterator *iter,
421 struct trace_event *trace_event)
423 struct ftrace_event_call *event;
424 struct trace_seq *s = &iter->seq;
425 struct trace_seq *p = &iter->tmp_seq;
426 struct trace_entry *entry;
427 int ret;
429 event = container_of(trace_event, struct ftrace_event_call, event);
430 entry = iter->ent;
432 if (entry->type != event->event.type) {
433 WARN_ON_ONCE(1);
434 return TRACE_TYPE_UNHANDLED;
437 trace_seq_init(p);
438 ret = trace_seq_printf(s, "%s: ", event->name);
439 if (!ret)
440 return TRACE_TYPE_PARTIAL_LINE;
442 return 0;
444 EXPORT_SYMBOL(ftrace_raw_output_prep);
446 static int ftrace_output_raw(struct trace_iterator *iter, char *name,
447 char *fmt, va_list ap)
449 struct trace_seq *s = &iter->seq;
450 int ret;
452 ret = trace_seq_printf(s, "%s: ", name);
453 if (!ret)
454 return TRACE_TYPE_PARTIAL_LINE;
456 ret = trace_seq_vprintf(s, fmt, ap);
458 if (!ret)
459 return TRACE_TYPE_PARTIAL_LINE;
461 return TRACE_TYPE_HANDLED;
464 int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
466 va_list ap;
467 int ret;
469 va_start(ap, fmt);
470 ret = ftrace_output_raw(iter, name, fmt, ap);
471 va_end(ap);
473 return ret;
475 EXPORT_SYMBOL_GPL(ftrace_output_call);
477 int ftrace_event_define_field(struct ftrace_event_call *call,
478 char *type, int len, char *item, int offset,
479 int field_size, int sign, int filter)
481 int ret;
483 mutex_lock(&event_storage_mutex);
484 snprintf(event_storage, sizeof(event_storage),
485 "%s[%d]", type, len);
486 ret = trace_define_field(call, event_storage, item, offset,
487 field_size, sign, filter);
488 mutex_unlock(&event_storage_mutex);
490 return ret;
492 EXPORT_SYMBOL_GPL(ftrace_event_define_field);
494 #ifdef CONFIG_KRETPROBES
495 static inline const char *kretprobed(const char *name)
497 static const char tramp_name[] = "kretprobe_trampoline";
498 int size = sizeof(tramp_name);
500 if (strncmp(tramp_name, name, size) == 0)
501 return "[unknown/kretprobe'd]";
502 return name;
504 #else
505 static inline const char *kretprobed(const char *name)
507 return name;
509 #endif /* CONFIG_KRETPROBES */
511 static int
512 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
514 #ifdef CONFIG_KALLSYMS
515 char str[KSYM_SYMBOL_LEN];
516 const char *name;
518 kallsyms_lookup(address, NULL, NULL, NULL, str);
520 name = kretprobed(str);
522 return trace_seq_printf(s, fmt, name);
523 #endif
524 return 1;
527 static int
528 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
529 unsigned long address)
531 #ifdef CONFIG_KALLSYMS
532 char str[KSYM_SYMBOL_LEN];
533 const char *name;
535 sprint_symbol(str, address);
536 name = kretprobed(str);
538 return trace_seq_printf(s, fmt, name);
539 #endif
540 return 1;
543 #ifndef CONFIG_64BIT
544 # define IP_FMT "%08lx"
545 #else
546 # define IP_FMT "%016lx"
547 #endif
549 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
550 unsigned long ip, unsigned long sym_flags)
552 struct file *file = NULL;
553 unsigned long vmstart = 0;
554 int ret = 1;
556 if (s->full)
557 return 0;
559 if (mm) {
560 const struct vm_area_struct *vma;
562 down_read(&mm->mmap_sem);
563 vma = find_vma(mm, ip);
564 if (vma) {
565 file = vma->vm_file;
566 vmstart = vma->vm_start;
568 if (file) {
569 ret = trace_seq_path(s, &file->f_path);
570 if (ret)
571 ret = trace_seq_printf(s, "[+0x%lx]",
572 ip - vmstart);
574 up_read(&mm->mmap_sem);
576 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
577 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
578 return ret;
582 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
583 unsigned long sym_flags)
585 struct mm_struct *mm = NULL;
586 int ret = 1;
587 unsigned int i;
589 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
590 struct task_struct *task;
592 * we do the lookup on the thread group leader,
593 * since individual threads might have already quit!
595 rcu_read_lock();
596 task = find_task_by_vpid(entry->tgid);
597 if (task)
598 mm = get_task_mm(task);
599 rcu_read_unlock();
602 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
603 unsigned long ip = entry->caller[i];
605 if (ip == ULONG_MAX || !ret)
606 break;
607 if (ret)
608 ret = trace_seq_puts(s, " => ");
609 if (!ip) {
610 if (ret)
611 ret = trace_seq_puts(s, "??");
612 if (ret)
613 ret = trace_seq_putc(s, '\n');
614 continue;
616 if (!ret)
617 break;
618 if (ret)
619 ret = seq_print_user_ip(s, mm, ip, sym_flags);
620 ret = trace_seq_putc(s, '\n');
623 if (mm)
624 mmput(mm);
625 return ret;
629 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
631 int ret;
633 if (!ip)
634 return trace_seq_putc(s, '0');
636 if (sym_flags & TRACE_ITER_SYM_OFFSET)
637 ret = seq_print_sym_offset(s, "%s", ip);
638 else
639 ret = seq_print_sym_short(s, "%s", ip);
641 if (!ret)
642 return 0;
644 if (sym_flags & TRACE_ITER_SYM_ADDR)
645 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
646 return ret;
650 * trace_print_lat_fmt - print the irq, preempt and lockdep fields
651 * @s: trace seq struct to write to
652 * @entry: The trace entry field from the ring buffer
654 * Prints the generic fields of irqs off, in hard or softirq, preempt
655 * count.
657 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
659 char hardsoft_irq;
660 char need_resched;
661 char irqs_off;
662 int hardirq;
663 int softirq;
664 int ret;
666 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
667 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
669 irqs_off =
670 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
671 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
672 '.';
674 switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
675 TRACE_FLAG_PREEMPT_RESCHED)) {
676 case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
677 need_resched = 'N';
678 break;
679 case TRACE_FLAG_NEED_RESCHED:
680 need_resched = 'n';
681 break;
682 case TRACE_FLAG_PREEMPT_RESCHED:
683 need_resched = 'p';
684 break;
685 default:
686 need_resched = '.';
687 break;
690 hardsoft_irq =
691 (hardirq && softirq) ? 'H' :
692 hardirq ? 'h' :
693 softirq ? 's' :
694 '.';
696 if (!trace_seq_printf(s, "%c%c%c",
697 irqs_off, need_resched, hardsoft_irq))
698 return 0;
700 if (entry->preempt_count)
701 ret = trace_seq_printf(s, "%x", entry->preempt_count);
702 else
703 ret = trace_seq_putc(s, '.');
705 return ret;
708 static int
709 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
711 char comm[TASK_COMM_LEN];
713 trace_find_cmdline(entry->pid, comm);
715 if (!trace_seq_printf(s, "%8.8s-%-5d %3d",
716 comm, entry->pid, cpu))
717 return 0;
719 return trace_print_lat_fmt(s, entry);
722 static unsigned long preempt_mark_thresh_us = 100;
724 static int
725 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
727 unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE;
728 unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
729 unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
730 unsigned long long rel_ts = next_ts - iter->ts;
731 struct trace_seq *s = &iter->seq;
733 if (in_ns) {
734 abs_ts = ns2usecs(abs_ts);
735 rel_ts = ns2usecs(rel_ts);
738 if (verbose && in_ns) {
739 unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
740 unsigned long abs_msec = (unsigned long)abs_ts;
741 unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
742 unsigned long rel_msec = (unsigned long)rel_ts;
744 return trace_seq_printf(
745 s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
746 ns2usecs(iter->ts),
747 abs_msec, abs_usec,
748 rel_msec, rel_usec);
749 } else if (verbose && !in_ns) {
750 return trace_seq_printf(
751 s, "[%016llx] %lld (+%lld): ",
752 iter->ts, abs_ts, rel_ts);
753 } else if (!verbose && in_ns) {
754 return trace_seq_printf(
755 s, " %4lldus%c: ",
756 abs_ts,
757 rel_ts > preempt_mark_thresh_us ? '!' :
758 rel_ts > 1 ? '+' : ' ');
759 } else { /* !verbose && !in_ns */
760 return trace_seq_printf(s, " %4lld: ", abs_ts);
764 int trace_print_context(struct trace_iterator *iter)
766 struct trace_seq *s = &iter->seq;
767 struct trace_entry *entry = iter->ent;
768 unsigned long long t;
769 unsigned long secs, usec_rem;
770 char comm[TASK_COMM_LEN];
771 int ret;
773 trace_find_cmdline(entry->pid, comm);
775 ret = trace_seq_printf(s, "%16s-%-5d [%03d] ",
776 comm, entry->pid, iter->cpu);
777 if (!ret)
778 return 0;
780 if (trace_flags & TRACE_ITER_IRQ_INFO) {
781 ret = trace_print_lat_fmt(s, entry);
782 if (!ret)
783 return 0;
786 if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
787 t = ns2usecs(iter->ts);
788 usec_rem = do_div(t, USEC_PER_SEC);
789 secs = (unsigned long)t;
790 return trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem);
791 } else
792 return trace_seq_printf(s, " %12llu: ", iter->ts);
795 int trace_print_lat_context(struct trace_iterator *iter)
797 u64 next_ts;
798 int ret;
799 /* trace_find_next_entry will reset ent_size */
800 int ent_size = iter->ent_size;
801 struct trace_seq *s = &iter->seq;
802 struct trace_entry *entry = iter->ent,
803 *next_entry = trace_find_next_entry(iter, NULL,
804 &next_ts);
805 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
807 /* Restore the original ent_size */
808 iter->ent_size = ent_size;
810 if (!next_entry)
811 next_ts = iter->ts;
813 if (verbose) {
814 char comm[TASK_COMM_LEN];
816 trace_find_cmdline(entry->pid, comm);
818 ret = trace_seq_printf(
819 s, "%16s %5d %3d %d %08x %08lx ",
820 comm, entry->pid, iter->cpu, entry->flags,
821 entry->preempt_count, iter->idx);
822 } else {
823 ret = lat_print_generic(s, entry, iter->cpu);
826 if (ret)
827 ret = lat_print_timestamp(iter, next_ts);
829 return ret;
832 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
834 static int task_state_char(unsigned long state)
836 int bit = state ? __ffs(state) + 1 : 0;
838 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
842 * ftrace_find_event - find a registered event
843 * @type: the type of event to look for
845 * Returns an event of type @type otherwise NULL
846 * Called with trace_event_read_lock() held.
848 struct trace_event *ftrace_find_event(int type)
850 struct trace_event *event;
851 unsigned key;
853 key = type & (EVENT_HASHSIZE - 1);
855 hlist_for_each_entry(event, &event_hash[key], node) {
856 if (event->type == type)
857 return event;
860 return NULL;
863 static LIST_HEAD(ftrace_event_list);
865 static int trace_search_list(struct list_head **list)
867 struct trace_event *e;
868 int last = __TRACE_LAST_TYPE;
870 if (list_empty(&ftrace_event_list)) {
871 *list = &ftrace_event_list;
872 return last + 1;
876 * We used up all possible max events,
877 * lets see if somebody freed one.
879 list_for_each_entry(e, &ftrace_event_list, list) {
880 if (e->type != last + 1)
881 break;
882 last++;
885 /* Did we used up all 65 thousand events??? */
886 if ((last + 1) > FTRACE_MAX_EVENT)
887 return 0;
889 *list = &e->list;
890 return last + 1;
893 void trace_event_read_lock(void)
895 down_read(&trace_event_sem);
898 void trace_event_read_unlock(void)
900 up_read(&trace_event_sem);
904 * register_ftrace_event - register output for an event type
905 * @event: the event type to register
907 * Event types are stored in a hash and this hash is used to
908 * find a way to print an event. If the @event->type is set
909 * then it will use that type, otherwise it will assign a
910 * type to use.
912 * If you assign your own type, please make sure it is added
913 * to the trace_type enum in trace.h, to avoid collisions
914 * with the dynamic types.
916 * Returns the event type number or zero on error.
918 int register_ftrace_event(struct trace_event *event)
920 unsigned key;
921 int ret = 0;
923 down_write(&trace_event_sem);
925 if (WARN_ON(!event))
926 goto out;
928 if (WARN_ON(!event->funcs))
929 goto out;
931 INIT_LIST_HEAD(&event->list);
933 if (!event->type) {
934 struct list_head *list = NULL;
936 if (next_event_type > FTRACE_MAX_EVENT) {
938 event->type = trace_search_list(&list);
939 if (!event->type)
940 goto out;
942 } else {
944 event->type = next_event_type++;
945 list = &ftrace_event_list;
948 if (WARN_ON(ftrace_find_event(event->type)))
949 goto out;
951 list_add_tail(&event->list, list);
953 } else if (event->type > __TRACE_LAST_TYPE) {
954 printk(KERN_WARNING "Need to add type to trace.h\n");
955 WARN_ON(1);
956 goto out;
957 } else {
958 /* Is this event already used */
959 if (ftrace_find_event(event->type))
960 goto out;
963 if (event->funcs->trace == NULL)
964 event->funcs->trace = trace_nop_print;
965 if (event->funcs->raw == NULL)
966 event->funcs->raw = trace_nop_print;
967 if (event->funcs->hex == NULL)
968 event->funcs->hex = trace_nop_print;
969 if (event->funcs->binary == NULL)
970 event->funcs->binary = trace_nop_print;
972 key = event->type & (EVENT_HASHSIZE - 1);
974 hlist_add_head(&event->node, &event_hash[key]);
976 ret = event->type;
977 out:
978 up_write(&trace_event_sem);
980 return ret;
982 EXPORT_SYMBOL_GPL(register_ftrace_event);
985 * Used by module code with the trace_event_sem held for write.
987 int __unregister_ftrace_event(struct trace_event *event)
989 hlist_del(&event->node);
990 list_del(&event->list);
991 return 0;
995 * unregister_ftrace_event - remove a no longer used event
996 * @event: the event to remove
998 int unregister_ftrace_event(struct trace_event *event)
1000 down_write(&trace_event_sem);
1001 __unregister_ftrace_event(event);
1002 up_write(&trace_event_sem);
1004 return 0;
1006 EXPORT_SYMBOL_GPL(unregister_ftrace_event);
1009 * Standard events
1012 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
1013 struct trace_event *event)
1015 if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type))
1016 return TRACE_TYPE_PARTIAL_LINE;
1018 return TRACE_TYPE_HANDLED;
1021 /* TRACE_FN */
1022 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
1023 struct trace_event *event)
1025 struct ftrace_entry *field;
1026 struct trace_seq *s = &iter->seq;
1028 trace_assign_type(field, iter->ent);
1030 if (!seq_print_ip_sym(s, field->ip, flags))
1031 goto partial;
1033 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
1034 if (!trace_seq_puts(s, " <-"))
1035 goto partial;
1036 if (!seq_print_ip_sym(s,
1037 field->parent_ip,
1038 flags))
1039 goto partial;
1041 if (!trace_seq_putc(s, '\n'))
1042 goto partial;
1044 return TRACE_TYPE_HANDLED;
1046 partial:
1047 return TRACE_TYPE_PARTIAL_LINE;
1050 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
1051 struct trace_event *event)
1053 struct ftrace_entry *field;
1055 trace_assign_type(field, iter->ent);
1057 if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
1058 field->ip,
1059 field->parent_ip))
1060 return TRACE_TYPE_PARTIAL_LINE;
1062 return TRACE_TYPE_HANDLED;
1065 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
1066 struct trace_event *event)
1068 struct ftrace_entry *field;
1069 struct trace_seq *s = &iter->seq;
1071 trace_assign_type(field, iter->ent);
1073 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
1074 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
1076 return TRACE_TYPE_HANDLED;
1079 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
1080 struct trace_event *event)
1082 struct ftrace_entry *field;
1083 struct trace_seq *s = &iter->seq;
1085 trace_assign_type(field, iter->ent);
1087 SEQ_PUT_FIELD_RET(s, field->ip);
1088 SEQ_PUT_FIELD_RET(s, field->parent_ip);
1090 return TRACE_TYPE_HANDLED;
1093 static struct trace_event_functions trace_fn_funcs = {
1094 .trace = trace_fn_trace,
1095 .raw = trace_fn_raw,
1096 .hex = trace_fn_hex,
1097 .binary = trace_fn_bin,
1100 static struct trace_event trace_fn_event = {
1101 .type = TRACE_FN,
1102 .funcs = &trace_fn_funcs,
1105 /* TRACE_CTX an TRACE_WAKE */
1106 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
1107 char *delim)
1109 struct ctx_switch_entry *field;
1110 char comm[TASK_COMM_LEN];
1111 int S, T;
1114 trace_assign_type(field, iter->ent);
1116 T = task_state_char(field->next_state);
1117 S = task_state_char(field->prev_state);
1118 trace_find_cmdline(field->next_pid, comm);
1119 if (!trace_seq_printf(&iter->seq,
1120 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
1121 field->prev_pid,
1122 field->prev_prio,
1123 S, delim,
1124 field->next_cpu,
1125 field->next_pid,
1126 field->next_prio,
1127 T, comm))
1128 return TRACE_TYPE_PARTIAL_LINE;
1130 return TRACE_TYPE_HANDLED;
1133 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
1134 struct trace_event *event)
1136 return trace_ctxwake_print(iter, "==>");
1139 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
1140 int flags, struct trace_event *event)
1142 return trace_ctxwake_print(iter, " +");
1145 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
1147 struct ctx_switch_entry *field;
1148 int T;
1150 trace_assign_type(field, iter->ent);
1152 if (!S)
1153 S = task_state_char(field->prev_state);
1154 T = task_state_char(field->next_state);
1155 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
1156 field->prev_pid,
1157 field->prev_prio,
1159 field->next_cpu,
1160 field->next_pid,
1161 field->next_prio,
1163 return TRACE_TYPE_PARTIAL_LINE;
1165 return TRACE_TYPE_HANDLED;
1168 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
1169 struct trace_event *event)
1171 return trace_ctxwake_raw(iter, 0);
1174 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
1175 struct trace_event *event)
1177 return trace_ctxwake_raw(iter, '+');
1181 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
1183 struct ctx_switch_entry *field;
1184 struct trace_seq *s = &iter->seq;
1185 int T;
1187 trace_assign_type(field, iter->ent);
1189 if (!S)
1190 S = task_state_char(field->prev_state);
1191 T = task_state_char(field->next_state);
1193 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
1194 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
1195 SEQ_PUT_HEX_FIELD_RET(s, S);
1196 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
1197 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
1198 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
1199 SEQ_PUT_HEX_FIELD_RET(s, T);
1201 return TRACE_TYPE_HANDLED;
1204 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
1205 struct trace_event *event)
1207 return trace_ctxwake_hex(iter, 0);
1210 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
1211 struct trace_event *event)
1213 return trace_ctxwake_hex(iter, '+');
1216 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
1217 int flags, struct trace_event *event)
1219 struct ctx_switch_entry *field;
1220 struct trace_seq *s = &iter->seq;
1222 trace_assign_type(field, iter->ent);
1224 SEQ_PUT_FIELD_RET(s, field->prev_pid);
1225 SEQ_PUT_FIELD_RET(s, field->prev_prio);
1226 SEQ_PUT_FIELD_RET(s, field->prev_state);
1227 SEQ_PUT_FIELD_RET(s, field->next_pid);
1228 SEQ_PUT_FIELD_RET(s, field->next_prio);
1229 SEQ_PUT_FIELD_RET(s, field->next_state);
1231 return TRACE_TYPE_HANDLED;
1234 static struct trace_event_functions trace_ctx_funcs = {
1235 .trace = trace_ctx_print,
1236 .raw = trace_ctx_raw,
1237 .hex = trace_ctx_hex,
1238 .binary = trace_ctxwake_bin,
1241 static struct trace_event trace_ctx_event = {
1242 .type = TRACE_CTX,
1243 .funcs = &trace_ctx_funcs,
1246 static struct trace_event_functions trace_wake_funcs = {
1247 .trace = trace_wake_print,
1248 .raw = trace_wake_raw,
1249 .hex = trace_wake_hex,
1250 .binary = trace_ctxwake_bin,
1253 static struct trace_event trace_wake_event = {
1254 .type = TRACE_WAKE,
1255 .funcs = &trace_wake_funcs,
1258 /* TRACE_STACK */
1260 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1261 int flags, struct trace_event *event)
1263 struct stack_entry *field;
1264 struct trace_seq *s = &iter->seq;
1265 unsigned long *p;
1266 unsigned long *end;
1268 trace_assign_type(field, iter->ent);
1269 end = (unsigned long *)((long)iter->ent + iter->ent_size);
1271 if (!trace_seq_puts(s, "<stack trace>\n"))
1272 goto partial;
1274 for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) {
1275 if (!trace_seq_puts(s, " => "))
1276 goto partial;
1278 if (!seq_print_ip_sym(s, *p, flags))
1279 goto partial;
1280 if (!trace_seq_putc(s, '\n'))
1281 goto partial;
1284 return TRACE_TYPE_HANDLED;
1286 partial:
1287 return TRACE_TYPE_PARTIAL_LINE;
1290 static struct trace_event_functions trace_stack_funcs = {
1291 .trace = trace_stack_print,
1294 static struct trace_event trace_stack_event = {
1295 .type = TRACE_STACK,
1296 .funcs = &trace_stack_funcs,
1299 /* TRACE_USER_STACK */
1300 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1301 int flags, struct trace_event *event)
1303 struct userstack_entry *field;
1304 struct trace_seq *s = &iter->seq;
1306 trace_assign_type(field, iter->ent);
1308 if (!trace_seq_puts(s, "<user stack trace>\n"))
1309 goto partial;
1311 if (!seq_print_userip_objs(field, s, flags))
1312 goto partial;
1314 return TRACE_TYPE_HANDLED;
1316 partial:
1317 return TRACE_TYPE_PARTIAL_LINE;
1320 static struct trace_event_functions trace_user_stack_funcs = {
1321 .trace = trace_user_stack_print,
1324 static struct trace_event trace_user_stack_event = {
1325 .type = TRACE_USER_STACK,
1326 .funcs = &trace_user_stack_funcs,
1329 /* TRACE_BPUTS */
1330 static enum print_line_t
1331 trace_bputs_print(struct trace_iterator *iter, int flags,
1332 struct trace_event *event)
1334 struct trace_entry *entry = iter->ent;
1335 struct trace_seq *s = &iter->seq;
1336 struct bputs_entry *field;
1338 trace_assign_type(field, entry);
1340 if (!seq_print_ip_sym(s, field->ip, flags))
1341 goto partial;
1343 if (!trace_seq_puts(s, ": "))
1344 goto partial;
1346 if (!trace_seq_puts(s, field->str))
1347 goto partial;
1349 return TRACE_TYPE_HANDLED;
1351 partial:
1352 return TRACE_TYPE_PARTIAL_LINE;
1356 static enum print_line_t
1357 trace_bputs_raw(struct trace_iterator *iter, int flags,
1358 struct trace_event *event)
1360 struct bputs_entry *field;
1361 struct trace_seq *s = &iter->seq;
1363 trace_assign_type(field, iter->ent);
1365 if (!trace_seq_printf(s, ": %lx : ", field->ip))
1366 goto partial;
1368 if (!trace_seq_puts(s, field->str))
1369 goto partial;
1371 return TRACE_TYPE_HANDLED;
1373 partial:
1374 return TRACE_TYPE_PARTIAL_LINE;
1377 static struct trace_event_functions trace_bputs_funcs = {
1378 .trace = trace_bputs_print,
1379 .raw = trace_bputs_raw,
1382 static struct trace_event trace_bputs_event = {
1383 .type = TRACE_BPUTS,
1384 .funcs = &trace_bputs_funcs,
1387 /* TRACE_BPRINT */
1388 static enum print_line_t
1389 trace_bprint_print(struct trace_iterator *iter, int flags,
1390 struct trace_event *event)
1392 struct trace_entry *entry = iter->ent;
1393 struct trace_seq *s = &iter->seq;
1394 struct bprint_entry *field;
1396 trace_assign_type(field, entry);
1398 if (!seq_print_ip_sym(s, field->ip, flags))
1399 goto partial;
1401 if (!trace_seq_puts(s, ": "))
1402 goto partial;
1404 if (!trace_seq_bprintf(s, field->fmt, field->buf))
1405 goto partial;
1407 return TRACE_TYPE_HANDLED;
1409 partial:
1410 return TRACE_TYPE_PARTIAL_LINE;
1414 static enum print_line_t
1415 trace_bprint_raw(struct trace_iterator *iter, int flags,
1416 struct trace_event *event)
1418 struct bprint_entry *field;
1419 struct trace_seq *s = &iter->seq;
1421 trace_assign_type(field, iter->ent);
1423 if (!trace_seq_printf(s, ": %lx : ", field->ip))
1424 goto partial;
1426 if (!trace_seq_bprintf(s, field->fmt, field->buf))
1427 goto partial;
1429 return TRACE_TYPE_HANDLED;
1431 partial:
1432 return TRACE_TYPE_PARTIAL_LINE;
1435 static struct trace_event_functions trace_bprint_funcs = {
1436 .trace = trace_bprint_print,
1437 .raw = trace_bprint_raw,
1440 static struct trace_event trace_bprint_event = {
1441 .type = TRACE_BPRINT,
1442 .funcs = &trace_bprint_funcs,
1445 /* TRACE_PRINT */
1446 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1447 int flags, struct trace_event *event)
1449 struct print_entry *field;
1450 struct trace_seq *s = &iter->seq;
1452 trace_assign_type(field, iter->ent);
1454 if (!seq_print_ip_sym(s, field->ip, flags))
1455 goto partial;
1457 if (!trace_seq_printf(s, ": %s", field->buf))
1458 goto partial;
1460 return TRACE_TYPE_HANDLED;
1462 partial:
1463 return TRACE_TYPE_PARTIAL_LINE;
1466 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
1467 struct trace_event *event)
1469 struct print_entry *field;
1471 trace_assign_type(field, iter->ent);
1473 if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
1474 goto partial;
1476 return TRACE_TYPE_HANDLED;
1478 partial:
1479 return TRACE_TYPE_PARTIAL_LINE;
1482 static struct trace_event_functions trace_print_funcs = {
1483 .trace = trace_print_print,
1484 .raw = trace_print_raw,
1487 static struct trace_event trace_print_event = {
1488 .type = TRACE_PRINT,
1489 .funcs = &trace_print_funcs,
1493 static struct trace_event *events[] __initdata = {
1494 &trace_fn_event,
1495 &trace_ctx_event,
1496 &trace_wake_event,
1497 &trace_stack_event,
1498 &trace_user_stack_event,
1499 &trace_bputs_event,
1500 &trace_bprint_event,
1501 &trace_print_event,
1502 NULL
1505 __init static int init_events(void)
1507 struct trace_event *event;
1508 int i, ret;
1510 for (i = 0; events[i]; i++) {
1511 event = events[i];
1513 ret = register_ftrace_event(event);
1514 if (!ret) {
1515 printk(KERN_WARNING "event %d failed to register\n",
1516 event->type);
1517 WARN_ON_ONCE(1);
1521 return 0;
1523 early_initcall(init_events);