1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/bitops.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
10 #include "map_symbol.h"
15 #include "util/synthetic-events.h"
19 #define COMP(m) do { \
20 if (s1->m != s2->m) { \
21 pr_debug("Samples differ at '"#m"'\n"); \
26 #define MCOMP(m) do { \
27 if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) { \
28 pr_debug("Samples differ at '"#m"'\n"); \
33 static bool samples_same(const struct perf_sample
*s1
,
34 const struct perf_sample
*s2
,
35 u64 type
, u64 read_format
)
39 if (type
& PERF_SAMPLE_IDENTIFIER
)
42 if (type
& PERF_SAMPLE_IP
)
45 if (type
& PERF_SAMPLE_TID
) {
50 if (type
& PERF_SAMPLE_TIME
)
53 if (type
& PERF_SAMPLE_ADDR
)
56 if (type
& PERF_SAMPLE_ID
)
59 if (type
& PERF_SAMPLE_STREAM_ID
)
62 if (type
& PERF_SAMPLE_CPU
)
65 if (type
& PERF_SAMPLE_PERIOD
)
68 if (type
& PERF_SAMPLE_READ
) {
69 if (read_format
& PERF_FORMAT_GROUP
)
73 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
74 COMP(read
.time_enabled
);
75 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
76 COMP(read
.time_running
);
77 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
78 if (read_format
& PERF_FORMAT_GROUP
) {
79 for (i
= 0; i
< s1
->read
.group
.nr
; i
++)
80 MCOMP(read
.group
.values
[i
]);
86 if (type
& PERF_SAMPLE_CALLCHAIN
) {
88 for (i
= 0; i
< s1
->callchain
->nr
; i
++)
89 COMP(callchain
->ips
[i
]);
92 if (type
& PERF_SAMPLE_RAW
) {
94 if (memcmp(s1
->raw_data
, s2
->raw_data
, s1
->raw_size
)) {
95 pr_debug("Samples differ at 'raw_data'\n");
100 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
101 COMP(branch_stack
->nr
);
102 COMP(branch_stack
->hw_idx
);
103 for (i
= 0; i
< s1
->branch_stack
->nr
; i
++)
104 MCOMP(branch_stack
->entries
[i
]);
107 if (type
& PERF_SAMPLE_REGS_USER
) {
108 size_t sz
= hweight_long(s1
->user_regs
.mask
) * sizeof(u64
);
110 COMP(user_regs
.mask
);
112 if (s1
->user_regs
.abi
&&
113 (!s1
->user_regs
.regs
|| !s2
->user_regs
.regs
||
114 memcmp(s1
->user_regs
.regs
, s2
->user_regs
.regs
, sz
))) {
115 pr_debug("Samples differ at 'user_regs'\n");
120 if (type
& PERF_SAMPLE_STACK_USER
) {
121 COMP(user_stack
.size
);
122 if (memcmp(s1
->user_stack
.data
, s2
->user_stack
.data
,
123 s1
->user_stack
.size
)) {
124 pr_debug("Samples differ at 'user_stack'\n");
129 if (type
& PERF_SAMPLE_WEIGHT
)
132 if (type
& PERF_SAMPLE_DATA_SRC
)
135 if (type
& PERF_SAMPLE_TRANSACTION
)
138 if (type
& PERF_SAMPLE_REGS_INTR
) {
139 size_t sz
= hweight_long(s1
->intr_regs
.mask
) * sizeof(u64
);
141 COMP(intr_regs
.mask
);
143 if (s1
->intr_regs
.abi
&&
144 (!s1
->intr_regs
.regs
|| !s2
->intr_regs
.regs
||
145 memcmp(s1
->intr_regs
.regs
, s2
->intr_regs
.regs
, sz
))) {
146 pr_debug("Samples differ at 'intr_regs'\n");
151 if (type
& PERF_SAMPLE_PHYS_ADDR
)
154 if (type
& PERF_SAMPLE_CGROUP
)
157 if (type
& PERF_SAMPLE_AUX
) {
158 COMP(aux_sample
.size
);
159 if (memcmp(s1
->aux_sample
.data
, s2
->aux_sample
.data
,
160 s1
->aux_sample
.size
)) {
161 pr_debug("Samples differ at 'aux_sample'\n");
169 static int do_test(u64 sample_type
, u64 sample_regs
, u64 read_format
)
171 struct evsel evsel
= {
175 .sample_type
= sample_type
,
176 .read_format
= read_format
,
180 union perf_event
*event
;
182 struct ip_callchain callchain
;
186 .data
= {3, 201, 202, 203},
189 struct branch_stack branch_stack
;
193 .data
= {1, -1ULL, 211, 212, 213},
196 const u64 raw_data
[] = {0x123456780a0b0c0dULL
, 0x1102030405060708ULL
};
197 const u64 data
[] = {0x2211443366558877ULL
, 0, 0xaabbccddeeff4321ULL
};
198 const u64 aux_data
[] = {0xa55a, 0, 0xeeddee, 0x0282028202820282};
199 struct perf_sample sample
= {
210 .raw_size
= sizeof(raw_data
),
213 .raw_data
= (void *)raw_data
,
214 .callchain
= &callchain
.callchain
,
216 .branch_stack
= &branch_stack
.branch_stack
,
218 .abi
= PERF_SAMPLE_REGS_ABI_64
,
223 .size
= sizeof(data
),
224 .data
= (void *)data
,
227 .time_enabled
= 0x030a59d664fca7deULL
,
228 .time_running
= 0x011b6ae553eb98edULL
,
231 .abi
= PERF_SAMPLE_REGS_ABI_64
,
238 .size
= sizeof(aux_data
),
239 .data
= (void *)aux_data
,
242 struct sample_read_value values
[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
243 struct perf_sample sample_out
;
247 if (sample_type
& PERF_SAMPLE_REGS_USER
)
248 evsel
.core
.attr
.sample_regs_user
= sample_regs
;
250 if (sample_type
& PERF_SAMPLE_REGS_INTR
)
251 evsel
.core
.attr
.sample_regs_intr
= sample_regs
;
253 if (sample_type
& PERF_SAMPLE_BRANCH_STACK
)
254 evsel
.core
.attr
.branch_sample_type
|= PERF_SAMPLE_BRANCH_HW_INDEX
;
256 for (i
= 0; i
< sizeof(regs
); i
++)
257 *(i
+ (u8
*)regs
) = i
& 0xfe;
259 if (read_format
& PERF_FORMAT_GROUP
) {
260 sample
.read
.group
.nr
= 4;
261 sample
.read
.group
.values
= values
;
263 sample
.read
.one
.value
= 0x08789faeb786aa87ULL
;
264 sample
.read
.one
.id
= 99;
267 sz
= perf_event__sample_event_size(&sample
, sample_type
, read_format
);
268 bufsz
= sz
+ 4096; /* Add a bit for overrun checking */
269 event
= malloc(bufsz
);
271 pr_debug("malloc failed\n");
275 memset(event
, 0xff, bufsz
);
276 event
->header
.type
= PERF_RECORD_SAMPLE
;
277 event
->header
.misc
= 0;
278 event
->header
.size
= sz
;
280 err
= perf_event__synthesize_sample(event
, sample_type
, read_format
,
283 pr_debug("%s failed for sample_type %#"PRIx64
", error %d\n",
284 "perf_event__synthesize_sample", sample_type
, err
);
288 /* The data does not contain 0xff so we use that to check the size */
289 for (i
= bufsz
; i
> 0; i
--) {
290 if (*(i
- 1 + (u8
*)event
) != 0xff)
294 pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
299 evsel
.sample_size
= __evsel__sample_size(sample_type
);
301 err
= evsel__parse_sample(&evsel
, event
, &sample_out
);
303 pr_debug("%s failed for sample_type %#"PRIx64
", error %d\n",
304 "evsel__parse_sample", sample_type
, err
);
308 if (!samples_same(&sample
, &sample_out
, sample_type
, read_format
)) {
309 pr_debug("parsing failed for sample_type %#"PRIx64
"\n",
317 if (ret
&& read_format
)
318 pr_debug("read_format %#"PRIx64
"\n", read_format
);
323 * test__sample_parsing - test sample parsing.
325 * This function implements a test that synthesizes a sample event, parses it
326 * and then checks that the parsed sample matches the original sample. The test
327 * checks sample format bits separately and together. If the test passes %0 is
328 * returned, otherwise %-1 is returned.
330 int test__sample_parsing(struct test
*test __maybe_unused
, int subtest __maybe_unused
)
332 const u64 rf
[] = {4, 5, 6, 7, 12, 13, 14, 15};
339 * Fail the test if it has not been updated when new sample format bits
340 * were added. Please actually update the test rather than just change
341 * the condition below.
343 if (PERF_SAMPLE_MAX
> PERF_SAMPLE_CGROUP
<< 1) {
344 pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
348 /* Test each sample format bit separately */
349 for (sample_type
= 1; sample_type
!= PERF_SAMPLE_MAX
;
351 /* Test read_format variations */
352 if (sample_type
== PERF_SAMPLE_READ
) {
353 for (i
= 0; i
< ARRAY_SIZE(rf
); i
++) {
354 err
= do_test(sample_type
, 0, rf
[i
]);
362 if (sample_type
== PERF_SAMPLE_REGS_USER
)
363 sample_regs
= 0x3fff;
365 if (sample_type
== PERF_SAMPLE_REGS_INTR
)
366 sample_regs
= 0xff0fff;
368 err
= do_test(sample_type
, sample_regs
, 0);
373 /* Test all sample format bits together */
374 sample_type
= PERF_SAMPLE_MAX
- 1;
375 sample_regs
= 0x3fff; /* shared yb intr and user regs */
376 for (i
= 0; i
< ARRAY_SIZE(rf
); i
++) {
377 err
= do_test(sample_type
, sample_regs
, rf
[i
]);