2 * CTF writing support via babeltrace.
4 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
5 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <linux/compiler.h>
11 #include <babeltrace/ctf-writer/writer.h>
12 #include <babeltrace/ctf-writer/clock.h>
13 #include <babeltrace/ctf-writer/stream.h>
14 #include <babeltrace/ctf-writer/event.h>
15 #include <babeltrace/ctf-writer/event-types.h>
16 #include <babeltrace/ctf-writer/event-fields.h>
17 #include <babeltrace/ctf-ir/utils.h>
18 #include <babeltrace/ctf/events.h>
19 #include <traceevent/event-parse.h>
21 #include "data-convert-bt.h"
31 #define pr_N(n, fmt, ...) \
32 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
34 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
35 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
37 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
40 struct bt_ctf_event_class
*event_class
;
46 struct bt_ctf_stream
*stream
;
52 /* writer primitives */
53 struct bt_ctf_writer
*writer
;
54 struct ctf_stream
**stream
;
56 struct bt_ctf_stream_class
*stream_class
;
57 struct bt_ctf_clock
*clock
;
62 struct bt_ctf_field_type
*s64
;
63 struct bt_ctf_field_type
*u64
;
64 struct bt_ctf_field_type
*s32
;
65 struct bt_ctf_field_type
*u32
;
66 struct bt_ctf_field_type
*string
;
67 struct bt_ctf_field_type
*u32_hex
;
68 struct bt_ctf_field_type
*u64_hex
;
70 struct bt_ctf_field_type
*array
[6];
72 struct bt_ctf_event_class
*comm_class
;
73 struct bt_ctf_event_class
*exit_class
;
74 struct bt_ctf_event_class
*fork_class
;
78 struct perf_tool tool
;
79 struct ctf_writer writer
;
85 /* Ordered events configured queue size. */
89 static int value_set(struct bt_ctf_field_type
*type
,
90 struct bt_ctf_event
*event
,
91 const char *name
, u64 val
)
93 struct bt_ctf_field
*field
;
94 bool sign
= bt_ctf_field_type_integer_get_signed(type
);
97 field
= bt_ctf_field_create(type
);
99 pr_err("failed to create a field %s\n", name
);
104 ret
= bt_ctf_field_signed_integer_set_value(field
, val
);
106 pr_err("failed to set field value %s\n", name
);
110 ret
= bt_ctf_field_unsigned_integer_set_value(field
, val
);
112 pr_err("failed to set field value %s\n", name
);
117 ret
= bt_ctf_event_set_payload(event
, name
, field
);
119 pr_err("failed to set payload %s\n", name
);
123 pr2(" SET [%s = %" PRIu64
"]\n", name
, val
);
126 bt_ctf_field_put(field
);
130 #define __FUNC_VALUE_SET(_name, _val_type) \
131 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \
132 struct bt_ctf_event *event, \
136 struct bt_ctf_field_type *type = cw->data._name; \
137 return value_set(type, event, name, (u64) val); \
140 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
146 __FUNC_VALUE_SET(u64_hex
, u64
)
148 static int string_set_value(struct bt_ctf_field
*field
, const char *string
);
149 static __maybe_unused
int
150 value_set_string(struct ctf_writer
*cw
, struct bt_ctf_event
*event
,
151 const char *name
, const char *string
)
153 struct bt_ctf_field_type
*type
= cw
->data
.string
;
154 struct bt_ctf_field
*field
;
157 field
= bt_ctf_field_create(type
);
159 pr_err("failed to create a field %s\n", name
);
163 ret
= string_set_value(field
, string
);
165 pr_err("failed to set value %s\n", name
);
169 ret
= bt_ctf_event_set_payload(event
, name
, field
);
171 pr_err("failed to set payload %s\n", name
);
174 bt_ctf_field_put(field
);
178 static struct bt_ctf_field_type
*
179 get_tracepoint_field_type(struct ctf_writer
*cw
, struct format_field
*field
)
181 unsigned long flags
= field
->flags
;
183 if (flags
& FIELD_IS_STRING
)
184 return cw
->data
.string
;
186 if (!(flags
& FIELD_IS_SIGNED
)) {
187 /* unsigned long are mostly pointers */
188 if (flags
& FIELD_IS_LONG
|| flags
& FIELD_IS_POINTER
)
189 return cw
->data
.u64_hex
;
192 if (flags
& FIELD_IS_SIGNED
) {
193 if (field
->size
== 8)
199 if (field
->size
== 8)
205 static unsigned long long adjust_signedness(unsigned long long value_int
, int size
)
207 unsigned long long value_mask
;
210 * value_mask = (1 << (size * 8 - 1)) - 1.
211 * Directly set value_mask for code readers.
215 value_mask
= 0x7fULL
;
218 value_mask
= 0x7fffULL
;
221 value_mask
= 0x7fffffffULL
;
225 * For 64 bit value, return it self. There is no need
234 /* If it is a positive value, don't adjust. */
235 if ((value_int
& (~0ULL - value_mask
)) == 0)
238 /* Fill upper part of value_int with 1 to make it a negative long long. */
239 return (value_int
& value_mask
) | ~value_mask
;
242 static int string_set_value(struct bt_ctf_field
*field
, const char *string
)
245 size_t len
= strlen(string
), i
, p
;
248 for (i
= p
= 0; i
< len
; i
++, p
++) {
249 if (isprint(string
[i
])) {
252 buffer
[p
] = string
[i
];
256 snprintf(numstr
, sizeof(numstr
), "\\x%02x",
257 (unsigned int)(string
[i
]) & 0xff);
260 buffer
= zalloc(i
+ (len
- i
) * 4 + 2);
262 pr_err("failed to set unprintable string '%s'\n", string
);
263 return bt_ctf_field_string_set_value(field
, "UNPRINTABLE-STRING");
266 strncpy(buffer
, string
, i
);
268 strncat(buffer
+ p
, numstr
, 4);
274 return bt_ctf_field_string_set_value(field
, string
);
275 err
= bt_ctf_field_string_set_value(field
, buffer
);
280 static int add_tracepoint_field_value(struct ctf_writer
*cw
,
281 struct bt_ctf_event_class
*event_class
,
282 struct bt_ctf_event
*event
,
283 struct perf_sample
*sample
,
284 struct format_field
*fmtf
)
286 struct bt_ctf_field_type
*type
;
287 struct bt_ctf_field
*array_field
;
288 struct bt_ctf_field
*field
;
289 const char *name
= fmtf
->name
;
290 void *data
= sample
->raw_data
;
291 unsigned long flags
= fmtf
->flags
;
292 unsigned int n_items
;
299 offset
= fmtf
->offset
;
301 if (flags
& FIELD_IS_STRING
)
302 flags
&= ~FIELD_IS_ARRAY
;
304 if (flags
& FIELD_IS_DYNAMIC
) {
305 unsigned long long tmp_val
;
307 tmp_val
= pevent_read_number(fmtf
->event
->pevent
,
314 if (flags
& FIELD_IS_ARRAY
) {
316 type
= bt_ctf_event_class_get_field_by_name(
318 array_field
= bt_ctf_field_create(type
);
319 bt_ctf_field_type_put(type
);
321 pr_err("Failed to create array type %s\n", name
);
325 len
= fmtf
->size
/ fmtf
->arraylen
;
326 n_items
= fmtf
->arraylen
;
332 type
= get_tracepoint_field_type(cw
, fmtf
);
334 for (i
= 0; i
< n_items
; i
++) {
335 if (flags
& FIELD_IS_ARRAY
)
336 field
= bt_ctf_field_array_get_field(array_field
, i
);
338 field
= bt_ctf_field_create(type
);
341 pr_err("failed to create a field %s\n", name
);
345 if (flags
& FIELD_IS_STRING
)
346 ret
= string_set_value(field
, data
+ offset
+ i
* len
);
348 unsigned long long value_int
;
350 value_int
= pevent_read_number(
352 data
+ offset
+ i
* len
, len
);
354 if (!(flags
& FIELD_IS_SIGNED
))
355 ret
= bt_ctf_field_unsigned_integer_set_value(
358 ret
= bt_ctf_field_signed_integer_set_value(
359 field
, adjust_signedness(value_int
, len
));
363 pr_err("failed to set file value %s\n", name
);
366 if (!(flags
& FIELD_IS_ARRAY
)) {
367 ret
= bt_ctf_event_set_payload(event
, name
, field
);
369 pr_err("failed to set payload %s\n", name
);
373 bt_ctf_field_put(field
);
375 if (flags
& FIELD_IS_ARRAY
) {
376 ret
= bt_ctf_event_set_payload(event
, name
, array_field
);
378 pr_err("Failed add payload array %s\n", name
);
381 bt_ctf_field_put(array_field
);
386 bt_ctf_field_put(field
);
390 static int add_tracepoint_fields_values(struct ctf_writer
*cw
,
391 struct bt_ctf_event_class
*event_class
,
392 struct bt_ctf_event
*event
,
393 struct format_field
*fields
,
394 struct perf_sample
*sample
)
396 struct format_field
*field
;
399 for (field
= fields
; field
; field
= field
->next
) {
400 ret
= add_tracepoint_field_value(cw
, event_class
, event
, sample
,
408 static int add_tracepoint_values(struct ctf_writer
*cw
,
409 struct bt_ctf_event_class
*event_class
,
410 struct bt_ctf_event
*event
,
411 struct perf_evsel
*evsel
,
412 struct perf_sample
*sample
)
414 struct format_field
*common_fields
= evsel
->tp_format
->format
.common_fields
;
415 struct format_field
*fields
= evsel
->tp_format
->format
.fields
;
418 ret
= add_tracepoint_fields_values(cw
, event_class
, event
,
419 common_fields
, sample
);
421 ret
= add_tracepoint_fields_values(cw
, event_class
, event
,
428 add_bpf_output_values(struct bt_ctf_event_class
*event_class
,
429 struct bt_ctf_event
*event
,
430 struct perf_sample
*sample
)
432 struct bt_ctf_field_type
*len_type
, *seq_type
;
433 struct bt_ctf_field
*len_field
, *seq_field
;
434 unsigned int raw_size
= sample
->raw_size
;
435 unsigned int nr_elements
= raw_size
/ sizeof(u32
);
439 if (nr_elements
* sizeof(u32
) != raw_size
)
440 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
441 raw_size
, nr_elements
* sizeof(u32
) - raw_size
);
443 len_type
= bt_ctf_event_class_get_field_by_name(event_class
, "raw_len");
444 len_field
= bt_ctf_field_create(len_type
);
446 pr_err("failed to create 'raw_len' for bpf output event\n");
451 ret
= bt_ctf_field_unsigned_integer_set_value(len_field
, nr_elements
);
453 pr_err("failed to set field value for raw_len\n");
456 ret
= bt_ctf_event_set_payload(event
, "raw_len", len_field
);
458 pr_err("failed to set payload to raw_len\n");
462 seq_type
= bt_ctf_event_class_get_field_by_name(event_class
, "raw_data");
463 seq_field
= bt_ctf_field_create(seq_type
);
465 pr_err("failed to create 'raw_data' for bpf output event\n");
470 ret
= bt_ctf_field_sequence_set_length(seq_field
, len_field
);
472 pr_err("failed to set length of 'raw_data'\n");
476 for (i
= 0; i
< nr_elements
; i
++) {
477 struct bt_ctf_field
*elem_field
=
478 bt_ctf_field_sequence_get_field(seq_field
, i
);
480 ret
= bt_ctf_field_unsigned_integer_set_value(elem_field
,
481 ((u32
*)(sample
->raw_data
))[i
]);
483 bt_ctf_field_put(elem_field
);
485 pr_err("failed to set raw_data[%d]\n", i
);
490 ret
= bt_ctf_event_set_payload(event
, "raw_data", seq_field
);
492 pr_err("failed to set payload for raw_data\n");
495 bt_ctf_field_put(seq_field
);
497 bt_ctf_field_type_put(seq_type
);
499 bt_ctf_field_put(len_field
);
501 bt_ctf_field_type_put(len_type
);
505 static int add_generic_values(struct ctf_writer
*cw
,
506 struct bt_ctf_event
*event
,
507 struct perf_evsel
*evsel
,
508 struct perf_sample
*sample
)
510 u64 type
= evsel
->attr
.sample_type
;
515 * PERF_SAMPLE_TIME - not needed as we have it in
517 * PERF_SAMPLE_READ - TODO
518 * PERF_SAMPLE_CALLCHAIN - TODO
519 * PERF_SAMPLE_RAW - tracepoint fields are handled separately
520 * PERF_SAMPLE_BRANCH_STACK - TODO
521 * PERF_SAMPLE_REGS_USER - TODO
522 * PERF_SAMPLE_STACK_USER - TODO
525 if (type
& PERF_SAMPLE_IP
) {
526 ret
= value_set_u64_hex(cw
, event
, "perf_ip", sample
->ip
);
531 if (type
& PERF_SAMPLE_TID
) {
532 ret
= value_set_s32(cw
, event
, "perf_tid", sample
->tid
);
536 ret
= value_set_s32(cw
, event
, "perf_pid", sample
->pid
);
541 if ((type
& PERF_SAMPLE_ID
) ||
542 (type
& PERF_SAMPLE_IDENTIFIER
)) {
543 ret
= value_set_u64(cw
, event
, "perf_id", sample
->id
);
548 if (type
& PERF_SAMPLE_STREAM_ID
) {
549 ret
= value_set_u64(cw
, event
, "perf_stream_id", sample
->stream_id
);
554 if (type
& PERF_SAMPLE_PERIOD
) {
555 ret
= value_set_u64(cw
, event
, "perf_period", sample
->period
);
560 if (type
& PERF_SAMPLE_WEIGHT
) {
561 ret
= value_set_u64(cw
, event
, "perf_weight", sample
->weight
);
566 if (type
& PERF_SAMPLE_DATA_SRC
) {
567 ret
= value_set_u64(cw
, event
, "perf_data_src",
573 if (type
& PERF_SAMPLE_TRANSACTION
) {
574 ret
= value_set_u64(cw
, event
, "perf_transaction",
575 sample
->transaction
);
583 static int ctf_stream__flush(struct ctf_stream
*cs
)
588 err
= bt_ctf_stream_flush(cs
->stream
);
590 pr_err("CTF stream %d flush failed\n", cs
->cpu
);
592 pr("Flush stream for cpu %d (%u samples)\n",
601 static struct ctf_stream
*ctf_stream__create(struct ctf_writer
*cw
, int cpu
)
603 struct ctf_stream
*cs
;
604 struct bt_ctf_field
*pkt_ctx
= NULL
;
605 struct bt_ctf_field
*cpu_field
= NULL
;
606 struct bt_ctf_stream
*stream
= NULL
;
609 cs
= zalloc(sizeof(*cs
));
611 pr_err("Failed to allocate ctf stream\n");
615 stream
= bt_ctf_writer_create_stream(cw
->writer
, cw
->stream_class
);
617 pr_err("Failed to create CTF stream\n");
621 pkt_ctx
= bt_ctf_stream_get_packet_context(stream
);
623 pr_err("Failed to obtain packet context\n");
627 cpu_field
= bt_ctf_field_structure_get_field(pkt_ctx
, "cpu_id");
628 bt_ctf_field_put(pkt_ctx
);
630 pr_err("Failed to obtain cpu field\n");
634 ret
= bt_ctf_field_unsigned_integer_set_value(cpu_field
, (u32
) cpu
);
636 pr_err("Failed to update CPU number\n");
640 bt_ctf_field_put(cpu_field
);
648 bt_ctf_field_put(cpu_field
);
650 bt_ctf_stream_put(stream
);
656 static void ctf_stream__delete(struct ctf_stream
*cs
)
659 bt_ctf_stream_put(cs
->stream
);
664 static struct ctf_stream
*ctf_stream(struct ctf_writer
*cw
, int cpu
)
666 struct ctf_stream
*cs
= cw
->stream
[cpu
];
669 cs
= ctf_stream__create(cw
, cpu
);
670 cw
->stream
[cpu
] = cs
;
676 static int get_sample_cpu(struct ctf_writer
*cw
, struct perf_sample
*sample
,
677 struct perf_evsel
*evsel
)
681 if (evsel
->attr
.sample_type
& PERF_SAMPLE_CPU
)
684 if (cpu
> cw
->stream_cnt
) {
685 pr_err("Event was recorded for CPU %d, limit is at %d.\n",
686 cpu
, cw
->stream_cnt
);
693 #define STREAM_FLUSH_COUNT 100000
696 * Currently we have no other way to determine the
697 * time for the stream flush other than keep track
698 * of the number of events and check it against
701 static bool is_flush_needed(struct ctf_stream
*cs
)
703 return cs
->count
>= STREAM_FLUSH_COUNT
;
706 static int process_sample_event(struct perf_tool
*tool
,
707 union perf_event
*_event
,
708 struct perf_sample
*sample
,
709 struct perf_evsel
*evsel
,
710 struct machine
*machine __maybe_unused
)
712 struct convert
*c
= container_of(tool
, struct convert
, tool
);
713 struct evsel_priv
*priv
= evsel
->priv
;
714 struct ctf_writer
*cw
= &c
->writer
;
715 struct ctf_stream
*cs
;
716 struct bt_ctf_event_class
*event_class
;
717 struct bt_ctf_event
*event
;
720 if (WARN_ONCE(!priv
, "Failed to setup all events.\n"))
723 event_class
= priv
->event_class
;
727 c
->events_size
+= _event
->header
.size
;
729 pr_time2(sample
->time
, "sample %" PRIu64
"\n", c
->events_count
);
731 event
= bt_ctf_event_create(event_class
);
733 pr_err("Failed to create an CTF event\n");
737 bt_ctf_clock_set_time(cw
->clock
, sample
->time
);
739 ret
= add_generic_values(cw
, event
, evsel
, sample
);
743 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
) {
744 ret
= add_tracepoint_values(cw
, event_class
, event
,
750 if (perf_evsel__is_bpf_output(evsel
)) {
751 ret
= add_bpf_output_values(event_class
, event
, sample
);
756 cs
= ctf_stream(cw
, get_sample_cpu(cw
, sample
, evsel
));
758 if (is_flush_needed(cs
))
759 ctf_stream__flush(cs
);
762 bt_ctf_stream_append_event(cs
->stream
, event
);
765 bt_ctf_event_put(event
);
769 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \
771 ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
776 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) \
777 static int process_##_name##_event(struct perf_tool *tool, \
778 union perf_event *_event, \
779 struct perf_sample *sample, \
780 struct machine *machine) \
782 struct convert *c = container_of(tool, struct convert, tool);\
783 struct ctf_writer *cw = &c->writer; \
784 struct bt_ctf_event_class *event_class = cw->_name##_class;\
785 struct bt_ctf_event *event; \
786 struct ctf_stream *cs; \
789 c->non_sample_count++; \
790 c->events_size += _event->header.size; \
791 event = bt_ctf_event_create(event_class); \
793 pr_err("Failed to create an CTF event\n"); \
797 bt_ctf_clock_set_time(cw->clock, sample->time); \
799 cs = ctf_stream(cw, 0); \
801 if (is_flush_needed(cs)) \
802 ctf_stream__flush(cs); \
805 bt_ctf_stream_append_event(cs->stream, event); \
807 bt_ctf_event_put(event); \
809 return perf_event__process_##_name(tool, _event, sample, machine);\
812 __FUNC_PROCESS_NON_SAMPLE(comm
,
813 __NON_SAMPLE_SET_FIELD(comm
, u32
, pid
);
814 __NON_SAMPLE_SET_FIELD(comm
, u32
, tid
);
815 __NON_SAMPLE_SET_FIELD(comm
, string
, comm
);
817 __FUNC_PROCESS_NON_SAMPLE(fork
,
818 __NON_SAMPLE_SET_FIELD(fork
, u32
, pid
);
819 __NON_SAMPLE_SET_FIELD(fork
, u32
, ppid
);
820 __NON_SAMPLE_SET_FIELD(fork
, u32
, tid
);
821 __NON_SAMPLE_SET_FIELD(fork
, u32
, ptid
);
822 __NON_SAMPLE_SET_FIELD(fork
, u64
, time
);
825 __FUNC_PROCESS_NON_SAMPLE(exit
,
826 __NON_SAMPLE_SET_FIELD(fork
, u32
, pid
);
827 __NON_SAMPLE_SET_FIELD(fork
, u32
, ppid
);
828 __NON_SAMPLE_SET_FIELD(fork
, u32
, tid
);
829 __NON_SAMPLE_SET_FIELD(fork
, u32
, ptid
);
830 __NON_SAMPLE_SET_FIELD(fork
, u64
, time
);
832 #undef __NON_SAMPLE_SET_FIELD
833 #undef __FUNC_PROCESS_NON_SAMPLE
835 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
836 static char *change_name(char *name
, char *orig_name
, int dup
)
838 char *new_name
= NULL
;
847 * Add '_' prefix to potential keywork. According to
848 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
849 * futher CTF spec updating may require us to use '$'.
852 len
= strlen(name
) + sizeof("_");
854 len
= strlen(orig_name
) + sizeof("_dupl_X");
856 new_name
= malloc(len
);
861 snprintf(new_name
, len
, "_%s", name
);
863 snprintf(new_name
, len
, "%s_dupl_%d", orig_name
, dup
);
866 if (name
!= orig_name
)
871 static int event_class_add_field(struct bt_ctf_event_class
*event_class
,
872 struct bt_ctf_field_type
*type
,
873 struct format_field
*field
)
875 struct bt_ctf_field_type
*t
= NULL
;
880 /* alias was already assigned */
881 if (field
->alias
!= field
->name
)
882 return bt_ctf_event_class_add_field(event_class
, type
,
883 (char *)field
->alias
);
887 /* If 'name' is a keywork, add prefix. */
888 if (bt_ctf_validate_identifier(name
))
889 name
= change_name(name
, field
->name
, -1);
892 pr_err("Failed to fix invalid identifier.");
895 while ((t
= bt_ctf_event_class_get_field_by_name(event_class
, name
))) {
896 bt_ctf_field_type_put(t
);
897 name
= change_name(name
, field
->name
, dup
++);
899 pr_err("Failed to create dup name for '%s'\n", field
->name
);
904 ret
= bt_ctf_event_class_add_field(event_class
, type
, name
);
911 static int add_tracepoint_fields_types(struct ctf_writer
*cw
,
912 struct format_field
*fields
,
913 struct bt_ctf_event_class
*event_class
)
915 struct format_field
*field
;
918 for (field
= fields
; field
; field
= field
->next
) {
919 struct bt_ctf_field_type
*type
;
920 unsigned long flags
= field
->flags
;
922 pr2(" field '%s'\n", field
->name
);
924 type
= get_tracepoint_field_type(cw
, field
);
929 * A string is an array of chars. For this we use the string
930 * type and don't care that it is an array. What we don't
931 * support is an array of strings.
933 if (flags
& FIELD_IS_STRING
)
934 flags
&= ~FIELD_IS_ARRAY
;
936 if (flags
& FIELD_IS_ARRAY
)
937 type
= bt_ctf_field_type_array_create(type
, field
->arraylen
);
939 ret
= event_class_add_field(event_class
, type
, field
);
941 if (flags
& FIELD_IS_ARRAY
)
942 bt_ctf_field_type_put(type
);
945 pr_err("Failed to add field '%s': %d\n",
954 static int add_tracepoint_types(struct ctf_writer
*cw
,
955 struct perf_evsel
*evsel
,
956 struct bt_ctf_event_class
*class)
958 struct format_field
*common_fields
= evsel
->tp_format
->format
.common_fields
;
959 struct format_field
*fields
= evsel
->tp_format
->format
.fields
;
962 ret
= add_tracepoint_fields_types(cw
, common_fields
, class);
964 ret
= add_tracepoint_fields_types(cw
, fields
, class);
969 static int add_bpf_output_types(struct ctf_writer
*cw
,
970 struct bt_ctf_event_class
*class)
972 struct bt_ctf_field_type
*len_type
= cw
->data
.u32
;
973 struct bt_ctf_field_type
*seq_base_type
= cw
->data
.u32_hex
;
974 struct bt_ctf_field_type
*seq_type
;
977 ret
= bt_ctf_event_class_add_field(class, len_type
, "raw_len");
981 seq_type
= bt_ctf_field_type_sequence_create(seq_base_type
, "raw_len");
985 return bt_ctf_event_class_add_field(class, seq_type
, "raw_data");
988 static int add_generic_types(struct ctf_writer
*cw
, struct perf_evsel
*evsel
,
989 struct bt_ctf_event_class
*event_class
)
991 u64 type
= evsel
->attr
.sample_type
;
995 * PERF_SAMPLE_TIME - not needed as we have it in
997 * PERF_SAMPLE_READ - TODO
998 * PERF_SAMPLE_CALLCHAIN - TODO
999 * PERF_SAMPLE_RAW - tracepoint fields and BPF output
1000 * are handled separately
1001 * PERF_SAMPLE_BRANCH_STACK - TODO
1002 * PERF_SAMPLE_REGS_USER - TODO
1003 * PERF_SAMPLE_STACK_USER - TODO
1006 #define ADD_FIELD(cl, t, n) \
1008 pr2(" field '%s'\n", n); \
1009 if (bt_ctf_event_class_add_field(cl, t, n)) { \
1010 pr_err("Failed to add field '%s';\n", n); \
1015 if (type
& PERF_SAMPLE_IP
)
1016 ADD_FIELD(event_class
, cw
->data
.u64_hex
, "perf_ip");
1018 if (type
& PERF_SAMPLE_TID
) {
1019 ADD_FIELD(event_class
, cw
->data
.s32
, "perf_tid");
1020 ADD_FIELD(event_class
, cw
->data
.s32
, "perf_pid");
1023 if ((type
& PERF_SAMPLE_ID
) ||
1024 (type
& PERF_SAMPLE_IDENTIFIER
))
1025 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_id");
1027 if (type
& PERF_SAMPLE_STREAM_ID
)
1028 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_stream_id");
1030 if (type
& PERF_SAMPLE_PERIOD
)
1031 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_period");
1033 if (type
& PERF_SAMPLE_WEIGHT
)
1034 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_weight");
1036 if (type
& PERF_SAMPLE_DATA_SRC
)
1037 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_data_src");
1039 if (type
& PERF_SAMPLE_TRANSACTION
)
1040 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_transaction");
1046 static int add_event(struct ctf_writer
*cw
, struct perf_evsel
*evsel
)
1048 struct bt_ctf_event_class
*event_class
;
1049 struct evsel_priv
*priv
;
1050 const char *name
= perf_evsel__name(evsel
);
1053 pr("Adding event '%s' (type %d)\n", name
, evsel
->attr
.type
);
1055 event_class
= bt_ctf_event_class_create(name
);
1059 ret
= add_generic_types(cw
, evsel
, event_class
);
1063 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
) {
1064 ret
= add_tracepoint_types(cw
, evsel
, event_class
);
1069 if (perf_evsel__is_bpf_output(evsel
)) {
1070 ret
= add_bpf_output_types(cw
, event_class
);
1075 ret
= bt_ctf_stream_class_add_event_class(cw
->stream_class
, event_class
);
1077 pr("Failed to add event class into stream.\n");
1081 priv
= malloc(sizeof(*priv
));
1085 priv
->event_class
= event_class
;
1090 bt_ctf_event_class_put(event_class
);
1091 pr_err("Failed to add event '%s'.\n", name
);
1095 static int setup_events(struct ctf_writer
*cw
, struct perf_session
*session
)
1097 struct perf_evlist
*evlist
= session
->evlist
;
1098 struct perf_evsel
*evsel
;
1101 evlist__for_each_entry(evlist
, evsel
) {
1102 ret
= add_event(cw
, evsel
);
1109 #define __NON_SAMPLE_ADD_FIELD(t, n) \
1111 pr2(" field '%s'\n", #n); \
1112 if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
1113 pr_err("Failed to add field '%s';\n", #n);\
1118 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \
1119 static int add_##_name##_event(struct ctf_writer *cw) \
1121 struct bt_ctf_event_class *event_class; \
1124 pr("Adding "#_name" event\n"); \
1125 event_class = bt_ctf_event_class_create("perf_" #_name);\
1130 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
1132 pr("Failed to add event class '"#_name"' into stream.\n");\
1136 cw->_name##_class = event_class; \
1137 bt_ctf_event_class_put(event_class); \
1141 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm
,
1142 __NON_SAMPLE_ADD_FIELD(u32
, pid
);
1143 __NON_SAMPLE_ADD_FIELD(u32
, tid
);
1144 __NON_SAMPLE_ADD_FIELD(string
, comm
);
1147 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork
,
1148 __NON_SAMPLE_ADD_FIELD(u32
, pid
);
1149 __NON_SAMPLE_ADD_FIELD(u32
, ppid
);
1150 __NON_SAMPLE_ADD_FIELD(u32
, tid
);
1151 __NON_SAMPLE_ADD_FIELD(u32
, ptid
);
1152 __NON_SAMPLE_ADD_FIELD(u64
, time
);
1155 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit
,
1156 __NON_SAMPLE_ADD_FIELD(u32
, pid
);
1157 __NON_SAMPLE_ADD_FIELD(u32
, ppid
);
1158 __NON_SAMPLE_ADD_FIELD(u32
, tid
);
1159 __NON_SAMPLE_ADD_FIELD(u32
, ptid
);
1160 __NON_SAMPLE_ADD_FIELD(u64
, time
);
1163 #undef __NON_SAMPLE_ADD_FIELD
1164 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
1166 static int setup_non_sample_events(struct ctf_writer
*cw
,
1167 struct perf_session
*session __maybe_unused
)
1171 ret
= add_comm_event(cw
);
1174 ret
= add_exit_event(cw
);
1177 ret
= add_fork_event(cw
);
1183 static void cleanup_events(struct perf_session
*session
)
1185 struct perf_evlist
*evlist
= session
->evlist
;
1186 struct perf_evsel
*evsel
;
1188 evlist__for_each_entry(evlist
, evsel
) {
1189 struct evsel_priv
*priv
;
1192 bt_ctf_event_class_put(priv
->event_class
);
1193 zfree(&evsel
->priv
);
1196 perf_evlist__delete(evlist
);
1197 session
->evlist
= NULL
;
1200 static int setup_streams(struct ctf_writer
*cw
, struct perf_session
*session
)
1202 struct ctf_stream
**stream
;
1203 struct perf_header
*ph
= &session
->header
;
1207 * Try to get the number of cpus used in the data file,
1208 * if not present fallback to the MAX_CPUS.
1210 ncpus
= ph
->env
.nr_cpus_avail
?: MAX_CPUS
;
1212 stream
= zalloc(sizeof(*stream
) * ncpus
);
1214 pr_err("Failed to allocate streams.\n");
1218 cw
->stream
= stream
;
1219 cw
->stream_cnt
= ncpus
;
1223 static void free_streams(struct ctf_writer
*cw
)
1227 for (cpu
= 0; cpu
< cw
->stream_cnt
; cpu
++)
1228 ctf_stream__delete(cw
->stream
[cpu
]);
1233 static int ctf_writer__setup_env(struct ctf_writer
*cw
,
1234 struct perf_session
*session
)
1236 struct perf_header
*header
= &session
->header
;
1237 struct bt_ctf_writer
*writer
= cw
->writer
;
1239 #define ADD(__n, __v) \
1241 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \
1245 ADD("host", header
->env
.hostname
);
1246 ADD("sysname", "Linux");
1247 ADD("release", header
->env
.os_release
);
1248 ADD("version", header
->env
.version
);
1249 ADD("machine", header
->env
.arch
);
1250 ADD("domain", "kernel");
1251 ADD("tracer_name", "perf");
1257 static int ctf_writer__setup_clock(struct ctf_writer
*cw
)
1259 struct bt_ctf_clock
*clock
= cw
->clock
;
1261 bt_ctf_clock_set_description(clock
, "perf clock");
1263 #define SET(__n, __v) \
1265 if (bt_ctf_clock_set_##__n(clock, __v)) \
1269 SET(frequency
, 1000000000);
1273 SET(is_absolute
, 0);
1279 static struct bt_ctf_field_type
*create_int_type(int size
, bool sign
, bool hex
)
1281 struct bt_ctf_field_type
*type
;
1283 type
= bt_ctf_field_type_integer_create(size
);
1288 bt_ctf_field_type_integer_set_signed(type
, 1))
1292 bt_ctf_field_type_integer_set_base(type
, BT_CTF_INTEGER_BASE_HEXADECIMAL
))
1295 #if __BYTE_ORDER == __BIG_ENDIAN
1296 bt_ctf_field_type_set_byte_order(type
, BT_CTF_BYTE_ORDER_BIG_ENDIAN
);
1298 bt_ctf_field_type_set_byte_order(type
, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN
);
1301 pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1302 size
, sign
? "un" : "", hex
? "hex" : "");
1306 bt_ctf_field_type_put(type
);
1310 static void ctf_writer__cleanup_data(struct ctf_writer
*cw
)
1314 for (i
= 0; i
< ARRAY_SIZE(cw
->data
.array
); i
++)
1315 bt_ctf_field_type_put(cw
->data
.array
[i
]);
1318 static int ctf_writer__init_data(struct ctf_writer
*cw
)
1320 #define CREATE_INT_TYPE(type, size, sign, hex) \
1322 (type) = create_int_type(size, sign, hex); \
1327 CREATE_INT_TYPE(cw
->data
.s64
, 64, true, false);
1328 CREATE_INT_TYPE(cw
->data
.u64
, 64, false, false);
1329 CREATE_INT_TYPE(cw
->data
.s32
, 32, true, false);
1330 CREATE_INT_TYPE(cw
->data
.u32
, 32, false, false);
1331 CREATE_INT_TYPE(cw
->data
.u32_hex
, 32, false, true);
1332 CREATE_INT_TYPE(cw
->data
.u64_hex
, 64, false, true);
1334 cw
->data
.string
= bt_ctf_field_type_string_create();
1335 if (cw
->data
.string
)
1339 ctf_writer__cleanup_data(cw
);
1340 pr_err("Failed to create data types.\n");
1344 static void ctf_writer__cleanup(struct ctf_writer
*cw
)
1346 ctf_writer__cleanup_data(cw
);
1348 bt_ctf_clock_put(cw
->clock
);
1350 bt_ctf_stream_class_put(cw
->stream_class
);
1351 bt_ctf_writer_put(cw
->writer
);
1353 /* and NULL all the pointers */
1354 memset(cw
, 0, sizeof(*cw
));
1357 static int ctf_writer__init(struct ctf_writer
*cw
, const char *path
)
1359 struct bt_ctf_writer
*writer
;
1360 struct bt_ctf_stream_class
*stream_class
;
1361 struct bt_ctf_clock
*clock
;
1362 struct bt_ctf_field_type
*pkt_ctx_type
;
1366 writer
= bt_ctf_writer_create(path
);
1370 cw
->writer
= writer
;
1373 clock
= bt_ctf_clock_create("perf_clock");
1375 pr("Failed to create CTF clock.\n");
1381 if (ctf_writer__setup_clock(cw
)) {
1382 pr("Failed to setup CTF clock.\n");
1386 /* CTF stream class */
1387 stream_class
= bt_ctf_stream_class_create("perf_stream");
1388 if (!stream_class
) {
1389 pr("Failed to create CTF stream class.\n");
1393 cw
->stream_class
= stream_class
;
1395 /* CTF clock stream setup */
1396 if (bt_ctf_stream_class_set_clock(stream_class
, clock
)) {
1397 pr("Failed to assign CTF clock to stream class.\n");
1401 if (ctf_writer__init_data(cw
))
1404 /* Add cpu_id for packet context */
1405 pkt_ctx_type
= bt_ctf_stream_class_get_packet_context_type(stream_class
);
1409 ret
= bt_ctf_field_type_structure_add_field(pkt_ctx_type
, cw
->data
.u32
, "cpu_id");
1410 bt_ctf_field_type_put(pkt_ctx_type
);
1414 /* CTF clock writer setup */
1415 if (bt_ctf_writer_add_clock(writer
, clock
)) {
1416 pr("Failed to assign CTF clock to writer.\n");
1423 ctf_writer__cleanup(cw
);
1425 pr_err("Failed to setup CTF writer.\n");
1429 static int ctf_writer__flush_streams(struct ctf_writer
*cw
)
1433 for (cpu
= 0; cpu
< cw
->stream_cnt
&& !ret
; cpu
++)
1434 ret
= ctf_stream__flush(cw
->stream
[cpu
]);
1439 static int convert__config(const char *var
, const char *value
, void *cb
)
1441 struct convert
*c
= cb
;
1443 if (!strcmp(var
, "convert.queue-size")) {
1444 c
->queue_size
= perf_config_u64(var
, value
);
1451 int bt_convert__perf2ctf(const char *input
, const char *path
,
1452 struct perf_data_convert_opts
*opts
)
1454 struct perf_session
*session
;
1455 struct perf_data_file file
= {
1457 .mode
= PERF_DATA_MODE_READ
,
1458 .force
= opts
->force
,
1460 struct convert c
= {
1462 .sample
= process_sample_event
,
1463 .mmap
= perf_event__process_mmap
,
1464 .mmap2
= perf_event__process_mmap2
,
1465 .comm
= perf_event__process_comm
,
1466 .exit
= perf_event__process_exit
,
1467 .fork
= perf_event__process_fork
,
1468 .lost
= perf_event__process_lost
,
1469 .tracing_data
= perf_event__process_tracing_data
,
1470 .build_id
= perf_event__process_build_id
,
1471 .ordered_events
= true,
1472 .ordering_requires_timestamps
= true,
1475 struct ctf_writer
*cw
= &c
.writer
;
1479 c
.tool
.comm
= process_comm_event
;
1480 c
.tool
.exit
= process_exit_event
;
1481 c
.tool
.fork
= process_fork_event
;
1484 perf_config(convert__config
, &c
);
1487 if (ctf_writer__init(cw
, path
))
1490 /* perf.data session */
1491 session
= perf_session__new(&file
, 0, &c
.tool
);
1496 ordered_events__set_alloc_size(&session
->ordered_events
,
1500 /* CTF writer env/clock setup */
1501 if (ctf_writer__setup_env(cw
, session
))
1504 /* CTF events setup */
1505 if (setup_events(cw
, session
))
1508 if (opts
->all
&& setup_non_sample_events(cw
, session
))
1511 if (setup_streams(cw
, session
))
1514 err
= perf_session__process_events(session
);
1516 err
= ctf_writer__flush_streams(cw
);
1518 pr_err("Error during conversion.\n");
1521 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1525 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64
" samples",
1526 (double) c
.events_size
/ 1024.0 / 1024.0,
1529 if (!c
.non_sample_count
)
1530 fprintf(stderr
, ") ]\n");
1532 fprintf(stderr
, ", %" PRIu64
" non-samples) ]\n", c
.non_sample_count
);
1534 cleanup_events(session
);
1535 perf_session__delete(session
);
1536 ctf_writer__cleanup(cw
);
1541 perf_session__delete(session
);
1543 ctf_writer__cleanup(cw
);
1544 pr_err("Error during conversion setup.\n");