1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/types.h>
3 #include <linux/string.h>
4 #include <linux/zalloc.h>
7 #include "../../../util/event.h"
8 #include "../../../util/synthetic-events.h"
9 #include "../../../util/machine.h"
10 #include "../../../util/tool.h"
11 #include "../../../util/map.h"
12 #include "../../../util/debug.h"
13 #include "util/sample.h"
15 #if defined(__x86_64__)
17 struct perf_event__synthesize_extra_kmaps_cb_args
{
18 const struct perf_tool
*tool
;
19 perf_event__handler_t process
;
20 struct machine
*machine
;
21 union perf_event
*event
;
24 static int perf_event__synthesize_extra_kmaps_cb(struct map
*map
, void *data
)
26 struct perf_event__synthesize_extra_kmaps_cb_args
*args
= data
;
27 union perf_event
*event
= args
->event
;
31 if (!__map__is_extra_kernel_map(map
))
34 kmap
= map__kmap(map
);
36 size
= sizeof(event
->mmap
) - sizeof(event
->mmap
.filename
) +
37 PERF_ALIGN(strlen(kmap
->name
) + 1, sizeof(u64
)) +
38 args
->machine
->id_hdr_size
;
40 memset(event
, 0, size
);
42 event
->mmap
.header
.type
= PERF_RECORD_MMAP
;
45 * kernel uses 0 for user space maps, see kernel/perf_event.c
48 if (machine__is_host(args
->machine
))
49 event
->header
.misc
= PERF_RECORD_MISC_KERNEL
;
51 event
->header
.misc
= PERF_RECORD_MISC_GUEST_KERNEL
;
53 event
->mmap
.header
.size
= size
;
55 event
->mmap
.start
= map__start(map
);
56 event
->mmap
.len
= map__size(map
);
57 event
->mmap
.pgoff
= map__pgoff(map
);
58 event
->mmap
.pid
= args
->machine
->pid
;
60 strlcpy(event
->mmap
.filename
, kmap
->name
, PATH_MAX
);
62 if (perf_tool__process_synth_event(args
->tool
, event
, args
->machine
, args
->process
) != 0)
68 int perf_event__synthesize_extra_kmaps(const struct perf_tool
*tool
,
69 perf_event__handler_t process
,
70 struct machine
*machine
)
73 struct maps
*kmaps
= machine__kernel_maps(machine
);
74 struct perf_event__synthesize_extra_kmaps_cb_args args
= {
78 .event
= zalloc(sizeof(args
.event
->mmap
) + machine
->id_hdr_size
),
82 pr_debug("Not enough memory synthesizing mmap event "
83 "for extra kernel maps\n");
87 rc
= maps__for_each_map(kmaps
, perf_event__synthesize_extra_kmaps_cb
, &args
);
95 void arch_perf_parse_sample_weight(struct perf_sample
*data
,
96 const __u64
*array
, u64 type
)
98 union perf_sample_weight weight
;
100 weight
.full
= *array
;
101 if (type
& PERF_SAMPLE_WEIGHT
)
102 data
->weight
= weight
.full
;
104 data
->weight
= weight
.var1_dw
;
105 data
->ins_lat
= weight
.var2_w
;
106 data
->retire_lat
= weight
.var3_w
;
110 void arch_perf_synthesize_sample_weight(const struct perf_sample
*data
,
111 __u64
*array
, u64 type
)
113 *array
= data
->weight
;
115 if (type
& PERF_SAMPLE_WEIGHT_STRUCT
) {
116 *array
&= 0xffffffff;
117 *array
|= ((u64
)data
->ins_lat
<< 32);
118 *array
|= ((u64
)data
->retire_lat
<< 48);
122 const char *arch_perf_header_entry(const char *se_header
)
124 if (!strcmp(se_header
, "Local Pipeline Stage Cycle"))
125 return "Local Retire Latency";
126 else if (!strcmp(se_header
, "Pipeline Stage Cycle"))
127 return "Retire Latency";
132 int arch_support_sort_key(const char *sort_key
)
134 if (!strcmp(sort_key
, "p_stage_cyc"))
136 if (!strcmp(sort_key
, "local_p_stage_cyc"))