2 * CTF writing support via babeltrace.
4 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
5 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <linux/compiler.h>
11 #include <babeltrace/ctf-writer/writer.h>
12 #include <babeltrace/ctf-writer/clock.h>
13 #include <babeltrace/ctf-writer/stream.h>
14 #include <babeltrace/ctf-writer/event.h>
15 #include <babeltrace/ctf-writer/event-types.h>
16 #include <babeltrace/ctf-writer/event-fields.h>
17 #include <babeltrace/ctf-ir/utils.h>
18 #include <babeltrace/ctf/events.h>
19 #include <traceevent/event-parse.h>
21 #include "data-convert-bt.h"
30 #define pr_N(n, fmt, ...) \
31 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
33 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
34 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
36 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
39 struct bt_ctf_event_class
*event_class
;
45 struct bt_ctf_stream
*stream
;
51 /* writer primitives */
52 struct bt_ctf_writer
*writer
;
53 struct ctf_stream
**stream
;
55 struct bt_ctf_stream_class
*stream_class
;
56 struct bt_ctf_clock
*clock
;
61 struct bt_ctf_field_type
*s64
;
62 struct bt_ctf_field_type
*u64
;
63 struct bt_ctf_field_type
*s32
;
64 struct bt_ctf_field_type
*u32
;
65 struct bt_ctf_field_type
*string
;
66 struct bt_ctf_field_type
*u32_hex
;
67 struct bt_ctf_field_type
*u64_hex
;
69 struct bt_ctf_field_type
*array
[6];
74 struct perf_tool tool
;
75 struct ctf_writer writer
;
80 /* Ordered events configured queue size. */
84 static int value_set(struct bt_ctf_field_type
*type
,
85 struct bt_ctf_event
*event
,
86 const char *name
, u64 val
)
88 struct bt_ctf_field
*field
;
89 bool sign
= bt_ctf_field_type_integer_get_signed(type
);
92 field
= bt_ctf_field_create(type
);
94 pr_err("failed to create a field %s\n", name
);
99 ret
= bt_ctf_field_signed_integer_set_value(field
, val
);
101 pr_err("failed to set field value %s\n", name
);
105 ret
= bt_ctf_field_unsigned_integer_set_value(field
, val
);
107 pr_err("failed to set field value %s\n", name
);
112 ret
= bt_ctf_event_set_payload(event
, name
, field
);
114 pr_err("failed to set payload %s\n", name
);
118 pr2(" SET [%s = %" PRIu64
"]\n", name
, val
);
121 bt_ctf_field_put(field
);
125 #define __FUNC_VALUE_SET(_name, _val_type) \
126 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \
127 struct bt_ctf_event *event, \
131 struct bt_ctf_field_type *type = cw->data._name; \
132 return value_set(type, event, name, (u64) val); \
135 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
141 __FUNC_VALUE_SET(u64_hex
, u64
)
143 static struct bt_ctf_field_type
*
144 get_tracepoint_field_type(struct ctf_writer
*cw
, struct format_field
*field
)
146 unsigned long flags
= field
->flags
;
148 if (flags
& FIELD_IS_STRING
)
149 return cw
->data
.string
;
151 if (!(flags
& FIELD_IS_SIGNED
)) {
152 /* unsigned long are mostly pointers */
153 if (flags
& FIELD_IS_LONG
|| flags
& FIELD_IS_POINTER
)
154 return cw
->data
.u64_hex
;
157 if (flags
& FIELD_IS_SIGNED
) {
158 if (field
->size
== 8)
164 if (field
->size
== 8)
170 static unsigned long long adjust_signedness(unsigned long long value_int
, int size
)
172 unsigned long long value_mask
;
175 * value_mask = (1 << (size * 8 - 1)) - 1.
176 * Directly set value_mask for code readers.
180 value_mask
= 0x7fULL
;
183 value_mask
= 0x7fffULL
;
186 value_mask
= 0x7fffffffULL
;
190 * For 64 bit value, return it self. There is no need
199 /* If it is a positive value, don't adjust. */
200 if ((value_int
& (~0ULL - value_mask
)) == 0)
203 /* Fill upper part of value_int with 1 to make it a negative long long. */
204 return (value_int
& value_mask
) | ~value_mask
;
207 static int add_tracepoint_field_value(struct ctf_writer
*cw
,
208 struct bt_ctf_event_class
*event_class
,
209 struct bt_ctf_event
*event
,
210 struct perf_sample
*sample
,
211 struct format_field
*fmtf
)
213 struct bt_ctf_field_type
*type
;
214 struct bt_ctf_field
*array_field
;
215 struct bt_ctf_field
*field
;
216 const char *name
= fmtf
->name
;
217 void *data
= sample
->raw_data
;
218 unsigned long flags
= fmtf
->flags
;
219 unsigned int n_items
;
226 offset
= fmtf
->offset
;
228 if (flags
& FIELD_IS_STRING
)
229 flags
&= ~FIELD_IS_ARRAY
;
231 if (flags
& FIELD_IS_DYNAMIC
) {
232 unsigned long long tmp_val
;
234 tmp_val
= pevent_read_number(fmtf
->event
->pevent
,
241 if (flags
& FIELD_IS_ARRAY
) {
243 type
= bt_ctf_event_class_get_field_by_name(
245 array_field
= bt_ctf_field_create(type
);
246 bt_ctf_field_type_put(type
);
248 pr_err("Failed to create array type %s\n", name
);
252 len
= fmtf
->size
/ fmtf
->arraylen
;
253 n_items
= fmtf
->arraylen
;
259 type
= get_tracepoint_field_type(cw
, fmtf
);
261 for (i
= 0; i
< n_items
; i
++) {
262 if (flags
& FIELD_IS_ARRAY
)
263 field
= bt_ctf_field_array_get_field(array_field
, i
);
265 field
= bt_ctf_field_create(type
);
268 pr_err("failed to create a field %s\n", name
);
272 if (flags
& FIELD_IS_STRING
)
273 ret
= bt_ctf_field_string_set_value(field
,
274 data
+ offset
+ i
* len
);
276 unsigned long long value_int
;
278 value_int
= pevent_read_number(
280 data
+ offset
+ i
* len
, len
);
282 if (!(flags
& FIELD_IS_SIGNED
))
283 ret
= bt_ctf_field_unsigned_integer_set_value(
286 ret
= bt_ctf_field_signed_integer_set_value(
287 field
, adjust_signedness(value_int
, len
));
291 pr_err("failed to set file value %s\n", name
);
294 if (!(flags
& FIELD_IS_ARRAY
)) {
295 ret
= bt_ctf_event_set_payload(event
, name
, field
);
297 pr_err("failed to set payload %s\n", name
);
301 bt_ctf_field_put(field
);
303 if (flags
& FIELD_IS_ARRAY
) {
304 ret
= bt_ctf_event_set_payload(event
, name
, array_field
);
306 pr_err("Failed add payload array %s\n", name
);
309 bt_ctf_field_put(array_field
);
314 bt_ctf_field_put(field
);
318 static int add_tracepoint_fields_values(struct ctf_writer
*cw
,
319 struct bt_ctf_event_class
*event_class
,
320 struct bt_ctf_event
*event
,
321 struct format_field
*fields
,
322 struct perf_sample
*sample
)
324 struct format_field
*field
;
327 for (field
= fields
; field
; field
= field
->next
) {
328 ret
= add_tracepoint_field_value(cw
, event_class
, event
, sample
,
336 static int add_tracepoint_values(struct ctf_writer
*cw
,
337 struct bt_ctf_event_class
*event_class
,
338 struct bt_ctf_event
*event
,
339 struct perf_evsel
*evsel
,
340 struct perf_sample
*sample
)
342 struct format_field
*common_fields
= evsel
->tp_format
->format
.common_fields
;
343 struct format_field
*fields
= evsel
->tp_format
->format
.fields
;
346 ret
= add_tracepoint_fields_values(cw
, event_class
, event
,
347 common_fields
, sample
);
349 ret
= add_tracepoint_fields_values(cw
, event_class
, event
,
356 add_bpf_output_values(struct bt_ctf_event_class
*event_class
,
357 struct bt_ctf_event
*event
,
358 struct perf_sample
*sample
)
360 struct bt_ctf_field_type
*len_type
, *seq_type
;
361 struct bt_ctf_field
*len_field
, *seq_field
;
362 unsigned int raw_size
= sample
->raw_size
;
363 unsigned int nr_elements
= raw_size
/ sizeof(u32
);
367 if (nr_elements
* sizeof(u32
) != raw_size
)
368 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %lu bytes\n",
369 raw_size
, nr_elements
* sizeof(u32
) - raw_size
);
371 len_type
= bt_ctf_event_class_get_field_by_name(event_class
, "raw_len");
372 len_field
= bt_ctf_field_create(len_type
);
374 pr_err("failed to create 'raw_len' for bpf output event\n");
379 ret
= bt_ctf_field_unsigned_integer_set_value(len_field
, nr_elements
);
381 pr_err("failed to set field value for raw_len\n");
384 ret
= bt_ctf_event_set_payload(event
, "raw_len", len_field
);
386 pr_err("failed to set payload to raw_len\n");
390 seq_type
= bt_ctf_event_class_get_field_by_name(event_class
, "raw_data");
391 seq_field
= bt_ctf_field_create(seq_type
);
393 pr_err("failed to create 'raw_data' for bpf output event\n");
398 ret
= bt_ctf_field_sequence_set_length(seq_field
, len_field
);
400 pr_err("failed to set length of 'raw_data'\n");
404 for (i
= 0; i
< nr_elements
; i
++) {
405 struct bt_ctf_field
*elem_field
=
406 bt_ctf_field_sequence_get_field(seq_field
, i
);
408 ret
= bt_ctf_field_unsigned_integer_set_value(elem_field
,
409 ((u32
*)(sample
->raw_data
))[i
]);
411 bt_ctf_field_put(elem_field
);
413 pr_err("failed to set raw_data[%d]\n", i
);
418 ret
= bt_ctf_event_set_payload(event
, "raw_data", seq_field
);
420 pr_err("failed to set payload for raw_data\n");
423 bt_ctf_field_put(seq_field
);
425 bt_ctf_field_type_put(seq_type
);
427 bt_ctf_field_put(len_field
);
429 bt_ctf_field_type_put(len_type
);
433 static int add_generic_values(struct ctf_writer
*cw
,
434 struct bt_ctf_event
*event
,
435 struct perf_evsel
*evsel
,
436 struct perf_sample
*sample
)
438 u64 type
= evsel
->attr
.sample_type
;
443 * PERF_SAMPLE_TIME - not needed as we have it in
445 * PERF_SAMPLE_READ - TODO
446 * PERF_SAMPLE_CALLCHAIN - TODO
447 * PERF_SAMPLE_RAW - tracepoint fields are handled separately
448 * PERF_SAMPLE_BRANCH_STACK - TODO
449 * PERF_SAMPLE_REGS_USER - TODO
450 * PERF_SAMPLE_STACK_USER - TODO
453 if (type
& PERF_SAMPLE_IP
) {
454 ret
= value_set_u64_hex(cw
, event
, "perf_ip", sample
->ip
);
459 if (type
& PERF_SAMPLE_TID
) {
460 ret
= value_set_s32(cw
, event
, "perf_tid", sample
->tid
);
464 ret
= value_set_s32(cw
, event
, "perf_pid", sample
->pid
);
469 if ((type
& PERF_SAMPLE_ID
) ||
470 (type
& PERF_SAMPLE_IDENTIFIER
)) {
471 ret
= value_set_u64(cw
, event
, "perf_id", sample
->id
);
476 if (type
& PERF_SAMPLE_STREAM_ID
) {
477 ret
= value_set_u64(cw
, event
, "perf_stream_id", sample
->stream_id
);
482 if (type
& PERF_SAMPLE_PERIOD
) {
483 ret
= value_set_u64(cw
, event
, "perf_period", sample
->period
);
488 if (type
& PERF_SAMPLE_WEIGHT
) {
489 ret
= value_set_u64(cw
, event
, "perf_weight", sample
->weight
);
494 if (type
& PERF_SAMPLE_DATA_SRC
) {
495 ret
= value_set_u64(cw
, event
, "perf_data_src",
501 if (type
& PERF_SAMPLE_TRANSACTION
) {
502 ret
= value_set_u64(cw
, event
, "perf_transaction",
503 sample
->transaction
);
511 static int ctf_stream__flush(struct ctf_stream
*cs
)
516 err
= bt_ctf_stream_flush(cs
->stream
);
518 pr_err("CTF stream %d flush failed\n", cs
->cpu
);
520 pr("Flush stream for cpu %d (%u samples)\n",
529 static struct ctf_stream
*ctf_stream__create(struct ctf_writer
*cw
, int cpu
)
531 struct ctf_stream
*cs
;
532 struct bt_ctf_field
*pkt_ctx
= NULL
;
533 struct bt_ctf_field
*cpu_field
= NULL
;
534 struct bt_ctf_stream
*stream
= NULL
;
537 cs
= zalloc(sizeof(*cs
));
539 pr_err("Failed to allocate ctf stream\n");
543 stream
= bt_ctf_writer_create_stream(cw
->writer
, cw
->stream_class
);
545 pr_err("Failed to create CTF stream\n");
549 pkt_ctx
= bt_ctf_stream_get_packet_context(stream
);
551 pr_err("Failed to obtain packet context\n");
555 cpu_field
= bt_ctf_field_structure_get_field(pkt_ctx
, "cpu_id");
556 bt_ctf_field_put(pkt_ctx
);
558 pr_err("Failed to obtain cpu field\n");
562 ret
= bt_ctf_field_unsigned_integer_set_value(cpu_field
, (u32
) cpu
);
564 pr_err("Failed to update CPU number\n");
568 bt_ctf_field_put(cpu_field
);
576 bt_ctf_field_put(cpu_field
);
578 bt_ctf_stream_put(stream
);
584 static void ctf_stream__delete(struct ctf_stream
*cs
)
587 bt_ctf_stream_put(cs
->stream
);
592 static struct ctf_stream
*ctf_stream(struct ctf_writer
*cw
, int cpu
)
594 struct ctf_stream
*cs
= cw
->stream
[cpu
];
597 cs
= ctf_stream__create(cw
, cpu
);
598 cw
->stream
[cpu
] = cs
;
604 static int get_sample_cpu(struct ctf_writer
*cw
, struct perf_sample
*sample
,
605 struct perf_evsel
*evsel
)
609 if (evsel
->attr
.sample_type
& PERF_SAMPLE_CPU
)
612 if (cpu
> cw
->stream_cnt
) {
613 pr_err("Event was recorded for CPU %d, limit is at %d.\n",
614 cpu
, cw
->stream_cnt
);
621 #define STREAM_FLUSH_COUNT 100000
624 * Currently we have no other way to determine the
625 * time for the stream flush other than keep track
626 * of the number of events and check it against
629 static bool is_flush_needed(struct ctf_stream
*cs
)
631 return cs
->count
>= STREAM_FLUSH_COUNT
;
634 static int process_sample_event(struct perf_tool
*tool
,
635 union perf_event
*_event
,
636 struct perf_sample
*sample
,
637 struct perf_evsel
*evsel
,
638 struct machine
*machine __maybe_unused
)
640 struct convert
*c
= container_of(tool
, struct convert
, tool
);
641 struct evsel_priv
*priv
= evsel
->priv
;
642 struct ctf_writer
*cw
= &c
->writer
;
643 struct ctf_stream
*cs
;
644 struct bt_ctf_event_class
*event_class
;
645 struct bt_ctf_event
*event
;
648 if (WARN_ONCE(!priv
, "Failed to setup all events.\n"))
651 event_class
= priv
->event_class
;
655 c
->events_size
+= _event
->header
.size
;
657 pr_time2(sample
->time
, "sample %" PRIu64
"\n", c
->events_count
);
659 event
= bt_ctf_event_create(event_class
);
661 pr_err("Failed to create an CTF event\n");
665 bt_ctf_clock_set_time(cw
->clock
, sample
->time
);
667 ret
= add_generic_values(cw
, event
, evsel
, sample
);
671 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
) {
672 ret
= add_tracepoint_values(cw
, event_class
, event
,
678 if (perf_evsel__is_bpf_output(evsel
)) {
679 ret
= add_bpf_output_values(event_class
, event
, sample
);
684 cs
= ctf_stream(cw
, get_sample_cpu(cw
, sample
, evsel
));
686 if (is_flush_needed(cs
))
687 ctf_stream__flush(cs
);
690 bt_ctf_stream_append_event(cs
->stream
, event
);
693 bt_ctf_event_put(event
);
697 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
698 static char *change_name(char *name
, char *orig_name
, int dup
)
700 char *new_name
= NULL
;
709 * Add '_' prefix to potential keywork. According to
710 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
711 * futher CTF spec updating may require us to use '$'.
714 len
= strlen(name
) + sizeof("_");
716 len
= strlen(orig_name
) + sizeof("_dupl_X");
718 new_name
= malloc(len
);
723 snprintf(new_name
, len
, "_%s", name
);
725 snprintf(new_name
, len
, "%s_dupl_%d", orig_name
, dup
);
728 if (name
!= orig_name
)
733 static int event_class_add_field(struct bt_ctf_event_class
*event_class
,
734 struct bt_ctf_field_type
*type
,
735 struct format_field
*field
)
737 struct bt_ctf_field_type
*t
= NULL
;
742 /* alias was already assigned */
743 if (field
->alias
!= field
->name
)
744 return bt_ctf_event_class_add_field(event_class
, type
,
745 (char *)field
->alias
);
749 /* If 'name' is a keywork, add prefix. */
750 if (bt_ctf_validate_identifier(name
))
751 name
= change_name(name
, field
->name
, -1);
754 pr_err("Failed to fix invalid identifier.");
757 while ((t
= bt_ctf_event_class_get_field_by_name(event_class
, name
))) {
758 bt_ctf_field_type_put(t
);
759 name
= change_name(name
, field
->name
, dup
++);
761 pr_err("Failed to create dup name for '%s'\n", field
->name
);
766 ret
= bt_ctf_event_class_add_field(event_class
, type
, name
);
773 static int add_tracepoint_fields_types(struct ctf_writer
*cw
,
774 struct format_field
*fields
,
775 struct bt_ctf_event_class
*event_class
)
777 struct format_field
*field
;
780 for (field
= fields
; field
; field
= field
->next
) {
781 struct bt_ctf_field_type
*type
;
782 unsigned long flags
= field
->flags
;
784 pr2(" field '%s'\n", field
->name
);
786 type
= get_tracepoint_field_type(cw
, field
);
791 * A string is an array of chars. For this we use the string
792 * type and don't care that it is an array. What we don't
793 * support is an array of strings.
795 if (flags
& FIELD_IS_STRING
)
796 flags
&= ~FIELD_IS_ARRAY
;
798 if (flags
& FIELD_IS_ARRAY
)
799 type
= bt_ctf_field_type_array_create(type
, field
->arraylen
);
801 ret
= event_class_add_field(event_class
, type
, field
);
803 if (flags
& FIELD_IS_ARRAY
)
804 bt_ctf_field_type_put(type
);
807 pr_err("Failed to add field '%s': %d\n",
816 static int add_tracepoint_types(struct ctf_writer
*cw
,
817 struct perf_evsel
*evsel
,
818 struct bt_ctf_event_class
*class)
820 struct format_field
*common_fields
= evsel
->tp_format
->format
.common_fields
;
821 struct format_field
*fields
= evsel
->tp_format
->format
.fields
;
824 ret
= add_tracepoint_fields_types(cw
, common_fields
, class);
826 ret
= add_tracepoint_fields_types(cw
, fields
, class);
831 static int add_bpf_output_types(struct ctf_writer
*cw
,
832 struct bt_ctf_event_class
*class)
834 struct bt_ctf_field_type
*len_type
= cw
->data
.u32
;
835 struct bt_ctf_field_type
*seq_base_type
= cw
->data
.u32_hex
;
836 struct bt_ctf_field_type
*seq_type
;
839 ret
= bt_ctf_event_class_add_field(class, len_type
, "raw_len");
843 seq_type
= bt_ctf_field_type_sequence_create(seq_base_type
, "raw_len");
847 return bt_ctf_event_class_add_field(class, seq_type
, "raw_data");
850 static int add_generic_types(struct ctf_writer
*cw
, struct perf_evsel
*evsel
,
851 struct bt_ctf_event_class
*event_class
)
853 u64 type
= evsel
->attr
.sample_type
;
857 * PERF_SAMPLE_TIME - not needed as we have it in
859 * PERF_SAMPLE_READ - TODO
860 * PERF_SAMPLE_CALLCHAIN - TODO
861 * PERF_SAMPLE_RAW - tracepoint fields and BPF output
862 * are handled separately
863 * PERF_SAMPLE_BRANCH_STACK - TODO
864 * PERF_SAMPLE_REGS_USER - TODO
865 * PERF_SAMPLE_STACK_USER - TODO
868 #define ADD_FIELD(cl, t, n) \
870 pr2(" field '%s'\n", n); \
871 if (bt_ctf_event_class_add_field(cl, t, n)) { \
872 pr_err("Failed to add field '%s';\n", n); \
877 if (type
& PERF_SAMPLE_IP
)
878 ADD_FIELD(event_class
, cw
->data
.u64_hex
, "perf_ip");
880 if (type
& PERF_SAMPLE_TID
) {
881 ADD_FIELD(event_class
, cw
->data
.s32
, "perf_tid");
882 ADD_FIELD(event_class
, cw
->data
.s32
, "perf_pid");
885 if ((type
& PERF_SAMPLE_ID
) ||
886 (type
& PERF_SAMPLE_IDENTIFIER
))
887 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_id");
889 if (type
& PERF_SAMPLE_STREAM_ID
)
890 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_stream_id");
892 if (type
& PERF_SAMPLE_PERIOD
)
893 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_period");
895 if (type
& PERF_SAMPLE_WEIGHT
)
896 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_weight");
898 if (type
& PERF_SAMPLE_DATA_SRC
)
899 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_data_src");
901 if (type
& PERF_SAMPLE_TRANSACTION
)
902 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_transaction");
908 static int add_event(struct ctf_writer
*cw
, struct perf_evsel
*evsel
)
910 struct bt_ctf_event_class
*event_class
;
911 struct evsel_priv
*priv
;
912 const char *name
= perf_evsel__name(evsel
);
915 pr("Adding event '%s' (type %d)\n", name
, evsel
->attr
.type
);
917 event_class
= bt_ctf_event_class_create(name
);
921 ret
= add_generic_types(cw
, evsel
, event_class
);
925 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
) {
926 ret
= add_tracepoint_types(cw
, evsel
, event_class
);
931 if (perf_evsel__is_bpf_output(evsel
)) {
932 ret
= add_bpf_output_types(cw
, event_class
);
937 ret
= bt_ctf_stream_class_add_event_class(cw
->stream_class
, event_class
);
939 pr("Failed to add event class into stream.\n");
943 priv
= malloc(sizeof(*priv
));
947 priv
->event_class
= event_class
;
952 bt_ctf_event_class_put(event_class
);
953 pr_err("Failed to add event '%s'.\n", name
);
957 static int setup_events(struct ctf_writer
*cw
, struct perf_session
*session
)
959 struct perf_evlist
*evlist
= session
->evlist
;
960 struct perf_evsel
*evsel
;
963 evlist__for_each(evlist
, evsel
) {
964 ret
= add_event(cw
, evsel
);
971 static void cleanup_events(struct perf_session
*session
)
973 struct perf_evlist
*evlist
= session
->evlist
;
974 struct perf_evsel
*evsel
;
976 evlist__for_each(evlist
, evsel
) {
977 struct evsel_priv
*priv
;
980 bt_ctf_event_class_put(priv
->event_class
);
984 perf_evlist__delete(evlist
);
985 session
->evlist
= NULL
;
988 static int setup_streams(struct ctf_writer
*cw
, struct perf_session
*session
)
990 struct ctf_stream
**stream
;
991 struct perf_header
*ph
= &session
->header
;
995 * Try to get the number of cpus used in the data file,
996 * if not present fallback to the MAX_CPUS.
998 ncpus
= ph
->env
.nr_cpus_avail
?: MAX_CPUS
;
1000 stream
= zalloc(sizeof(*stream
) * ncpus
);
1002 pr_err("Failed to allocate streams.\n");
1006 cw
->stream
= stream
;
1007 cw
->stream_cnt
= ncpus
;
1011 static void free_streams(struct ctf_writer
*cw
)
1015 for (cpu
= 0; cpu
< cw
->stream_cnt
; cpu
++)
1016 ctf_stream__delete(cw
->stream
[cpu
]);
1021 static int ctf_writer__setup_env(struct ctf_writer
*cw
,
1022 struct perf_session
*session
)
1024 struct perf_header
*header
= &session
->header
;
1025 struct bt_ctf_writer
*writer
= cw
->writer
;
1027 #define ADD(__n, __v) \
1029 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \
1033 ADD("host", header
->env
.hostname
);
1034 ADD("sysname", "Linux");
1035 ADD("release", header
->env
.os_release
);
1036 ADD("version", header
->env
.version
);
1037 ADD("machine", header
->env
.arch
);
1038 ADD("domain", "kernel");
1039 ADD("tracer_name", "perf");
1045 static int ctf_writer__setup_clock(struct ctf_writer
*cw
)
1047 struct bt_ctf_clock
*clock
= cw
->clock
;
1049 bt_ctf_clock_set_description(clock
, "perf clock");
1051 #define SET(__n, __v) \
1053 if (bt_ctf_clock_set_##__n(clock, __v)) \
1057 SET(frequency
, 1000000000);
1061 SET(is_absolute
, 0);
1067 static struct bt_ctf_field_type
*create_int_type(int size
, bool sign
, bool hex
)
1069 struct bt_ctf_field_type
*type
;
1071 type
= bt_ctf_field_type_integer_create(size
);
1076 bt_ctf_field_type_integer_set_signed(type
, 1))
1080 bt_ctf_field_type_integer_set_base(type
, BT_CTF_INTEGER_BASE_HEXADECIMAL
))
1083 #if __BYTE_ORDER == __BIG_ENDIAN
1084 bt_ctf_field_type_set_byte_order(type
, BT_CTF_BYTE_ORDER_BIG_ENDIAN
);
1086 bt_ctf_field_type_set_byte_order(type
, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN
);
1089 pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1090 size
, sign
? "un" : "", hex
? "hex" : "");
1094 bt_ctf_field_type_put(type
);
1098 static void ctf_writer__cleanup_data(struct ctf_writer
*cw
)
1102 for (i
= 0; i
< ARRAY_SIZE(cw
->data
.array
); i
++)
1103 bt_ctf_field_type_put(cw
->data
.array
[i
]);
1106 static int ctf_writer__init_data(struct ctf_writer
*cw
)
1108 #define CREATE_INT_TYPE(type, size, sign, hex) \
1110 (type) = create_int_type(size, sign, hex); \
1115 CREATE_INT_TYPE(cw
->data
.s64
, 64, true, false);
1116 CREATE_INT_TYPE(cw
->data
.u64
, 64, false, false);
1117 CREATE_INT_TYPE(cw
->data
.s32
, 32, true, false);
1118 CREATE_INT_TYPE(cw
->data
.u32
, 32, false, false);
1119 CREATE_INT_TYPE(cw
->data
.u32_hex
, 32, false, true);
1120 CREATE_INT_TYPE(cw
->data
.u64_hex
, 64, false, true);
1122 cw
->data
.string
= bt_ctf_field_type_string_create();
1123 if (cw
->data
.string
)
1127 ctf_writer__cleanup_data(cw
);
1128 pr_err("Failed to create data types.\n");
1132 static void ctf_writer__cleanup(struct ctf_writer
*cw
)
1134 ctf_writer__cleanup_data(cw
);
1136 bt_ctf_clock_put(cw
->clock
);
1138 bt_ctf_stream_class_put(cw
->stream_class
);
1139 bt_ctf_writer_put(cw
->writer
);
1141 /* and NULL all the pointers */
1142 memset(cw
, 0, sizeof(*cw
));
1145 static int ctf_writer__init(struct ctf_writer
*cw
, const char *path
)
1147 struct bt_ctf_writer
*writer
;
1148 struct bt_ctf_stream_class
*stream_class
;
1149 struct bt_ctf_clock
*clock
;
1150 struct bt_ctf_field_type
*pkt_ctx_type
;
1154 writer
= bt_ctf_writer_create(path
);
1158 cw
->writer
= writer
;
1161 clock
= bt_ctf_clock_create("perf_clock");
1163 pr("Failed to create CTF clock.\n");
1169 if (ctf_writer__setup_clock(cw
)) {
1170 pr("Failed to setup CTF clock.\n");
1174 /* CTF stream class */
1175 stream_class
= bt_ctf_stream_class_create("perf_stream");
1176 if (!stream_class
) {
1177 pr("Failed to create CTF stream class.\n");
1181 cw
->stream_class
= stream_class
;
1183 /* CTF clock stream setup */
1184 if (bt_ctf_stream_class_set_clock(stream_class
, clock
)) {
1185 pr("Failed to assign CTF clock to stream class.\n");
1189 if (ctf_writer__init_data(cw
))
1192 /* Add cpu_id for packet context */
1193 pkt_ctx_type
= bt_ctf_stream_class_get_packet_context_type(stream_class
);
1197 ret
= bt_ctf_field_type_structure_add_field(pkt_ctx_type
, cw
->data
.u32
, "cpu_id");
1198 bt_ctf_field_type_put(pkt_ctx_type
);
1202 /* CTF clock writer setup */
1203 if (bt_ctf_writer_add_clock(writer
, clock
)) {
1204 pr("Failed to assign CTF clock to writer.\n");
1211 ctf_writer__cleanup(cw
);
1213 pr_err("Failed to setup CTF writer.\n");
1217 static int ctf_writer__flush_streams(struct ctf_writer
*cw
)
1221 for (cpu
= 0; cpu
< cw
->stream_cnt
&& !ret
; cpu
++)
1222 ret
= ctf_stream__flush(cw
->stream
[cpu
]);
1227 static int convert__config(const char *var
, const char *value
, void *cb
)
1229 struct convert
*c
= cb
;
1231 if (!strcmp(var
, "convert.queue-size")) {
1232 c
->queue_size
= perf_config_u64(var
, value
);
1239 int bt_convert__perf2ctf(const char *input
, const char *path
, bool force
)
1241 struct perf_session
*session
;
1242 struct perf_data_file file
= {
1244 .mode
= PERF_DATA_MODE_READ
,
1247 struct convert c
= {
1249 .sample
= process_sample_event
,
1250 .mmap
= perf_event__process_mmap
,
1251 .mmap2
= perf_event__process_mmap2
,
1252 .comm
= perf_event__process_comm
,
1253 .exit
= perf_event__process_exit
,
1254 .fork
= perf_event__process_fork
,
1255 .lost
= perf_event__process_lost
,
1256 .tracing_data
= perf_event__process_tracing_data
,
1257 .build_id
= perf_event__process_build_id
,
1258 .ordered_events
= true,
1259 .ordering_requires_timestamps
= true,
1262 struct ctf_writer
*cw
= &c
.writer
;
1265 perf_config(convert__config
, &c
);
1268 if (ctf_writer__init(cw
, path
))
1271 /* perf.data session */
1272 session
= perf_session__new(&file
, 0, &c
.tool
);
1277 ordered_events__set_alloc_size(&session
->ordered_events
,
1281 /* CTF writer env/clock setup */
1282 if (ctf_writer__setup_env(cw
, session
))
1285 /* CTF events setup */
1286 if (setup_events(cw
, session
))
1289 if (setup_streams(cw
, session
))
1292 err
= perf_session__process_events(session
);
1294 err
= ctf_writer__flush_streams(cw
);
1296 pr_err("Error during conversion.\n");
1299 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1303 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64
" samples) ]\n",
1304 (double) c
.events_size
/ 1024.0 / 1024.0,
1307 cleanup_events(session
);
1308 perf_session__delete(session
);
1309 ctf_writer__cleanup(cw
);
1314 perf_session__delete(session
);
1316 ctf_writer__cleanup(cw
);
1317 pr_err("Error during conversion setup.\n");