2 #include <linux/types.h>
11 #define COMP(m) do { \
12 if (s1->m != s2->m) { \
13 pr_debug("Samples differ at '"#m"'\n"); \
18 #define MCOMP(m) do { \
19 if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) { \
20 pr_debug("Samples differ at '"#m"'\n"); \
25 static bool samples_same(const struct perf_sample
*s1
,
26 const struct perf_sample
*s2
,
27 u64 type
, u64 read_format
)
31 if (type
& PERF_SAMPLE_IDENTIFIER
)
34 if (type
& PERF_SAMPLE_IP
)
37 if (type
& PERF_SAMPLE_TID
) {
42 if (type
& PERF_SAMPLE_TIME
)
45 if (type
& PERF_SAMPLE_ADDR
)
48 if (type
& PERF_SAMPLE_ID
)
51 if (type
& PERF_SAMPLE_STREAM_ID
)
54 if (type
& PERF_SAMPLE_CPU
)
57 if (type
& PERF_SAMPLE_PERIOD
)
60 if (type
& PERF_SAMPLE_READ
) {
61 if (read_format
& PERF_FORMAT_GROUP
)
65 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
66 COMP(read
.time_enabled
);
67 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
68 COMP(read
.time_running
);
69 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
70 if (read_format
& PERF_FORMAT_GROUP
) {
71 for (i
= 0; i
< s1
->read
.group
.nr
; i
++)
72 MCOMP(read
.group
.values
[i
]);
78 if (type
& PERF_SAMPLE_CALLCHAIN
) {
80 for (i
= 0; i
< s1
->callchain
->nr
; i
++)
81 COMP(callchain
->ips
[i
]);
84 if (type
& PERF_SAMPLE_RAW
) {
86 if (memcmp(s1
->raw_data
, s2
->raw_data
, s1
->raw_size
)) {
87 pr_debug("Samples differ at 'raw_data'\n");
92 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
93 COMP(branch_stack
->nr
);
94 for (i
= 0; i
< s1
->branch_stack
->nr
; i
++)
95 MCOMP(branch_stack
->entries
[i
]);
98 if (type
& PERF_SAMPLE_REGS_USER
) {
99 size_t sz
= hweight_long(s1
->user_regs
.mask
) * sizeof(u64
);
101 COMP(user_regs
.mask
);
103 if (s1
->user_regs
.abi
&&
104 (!s1
->user_regs
.regs
|| !s2
->user_regs
.regs
||
105 memcmp(s1
->user_regs
.regs
, s2
->user_regs
.regs
, sz
))) {
106 pr_debug("Samples differ at 'user_regs'\n");
111 if (type
& PERF_SAMPLE_STACK_USER
) {
112 COMP(user_stack
.size
);
113 if (memcmp(s1
->user_stack
.data
, s2
->user_stack
.data
,
114 s1
->user_stack
.size
)) {
115 pr_debug("Samples differ at 'user_stack'\n");
120 if (type
& PERF_SAMPLE_WEIGHT
)
123 if (type
& PERF_SAMPLE_DATA_SRC
)
126 if (type
& PERF_SAMPLE_TRANSACTION
)
129 if (type
& PERF_SAMPLE_REGS_INTR
) {
130 size_t sz
= hweight_long(s1
->intr_regs
.mask
) * sizeof(u64
);
132 COMP(intr_regs
.mask
);
134 if (s1
->intr_regs
.abi
&&
135 (!s1
->intr_regs
.regs
|| !s2
->intr_regs
.regs
||
136 memcmp(s1
->intr_regs
.regs
, s2
->intr_regs
.regs
, sz
))) {
137 pr_debug("Samples differ at 'intr_regs'\n");
145 static int do_test(u64 sample_type
, u64 sample_regs
, u64 read_format
)
147 struct perf_evsel evsel
= {
150 .sample_type
= sample_type
,
151 .read_format
= read_format
,
154 union perf_event
*event
;
156 struct ip_callchain callchain
;
160 .data
= {3, 201, 202, 203},
163 struct branch_stack branch_stack
;
167 .data
= {1, 211, 212, 213},
170 const u64 raw_data
[] = {0x123456780a0b0c0dULL
, 0x1102030405060708ULL
};
171 const u64 data
[] = {0x2211443366558877ULL
, 0, 0xaabbccddeeff4321ULL
};
172 struct perf_sample sample
= {
183 .raw_size
= sizeof(raw_data
),
186 .raw_data
= (void *)raw_data
,
187 .callchain
= &callchain
.callchain
,
188 .branch_stack
= &branch_stack
.branch_stack
,
190 .abi
= PERF_SAMPLE_REGS_ABI_64
,
195 .size
= sizeof(data
),
196 .data
= (void *)data
,
199 .time_enabled
= 0x030a59d664fca7deULL
,
200 .time_running
= 0x011b6ae553eb98edULL
,
203 .abi
= PERF_SAMPLE_REGS_ABI_64
,
208 struct sample_read_value values
[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
209 struct perf_sample sample_out
;
213 if (sample_type
& PERF_SAMPLE_REGS_USER
)
214 evsel
.attr
.sample_regs_user
= sample_regs
;
216 if (sample_type
& PERF_SAMPLE_REGS_INTR
)
217 evsel
.attr
.sample_regs_intr
= sample_regs
;
219 for (i
= 0; i
< sizeof(regs
); i
++)
220 *(i
+ (u8
*)regs
) = i
& 0xfe;
222 if (read_format
& PERF_FORMAT_GROUP
) {
223 sample
.read
.group
.nr
= 4;
224 sample
.read
.group
.values
= values
;
226 sample
.read
.one
.value
= 0x08789faeb786aa87ULL
;
227 sample
.read
.one
.id
= 99;
230 sz
= perf_event__sample_event_size(&sample
, sample_type
, read_format
);
231 bufsz
= sz
+ 4096; /* Add a bit for overrun checking */
232 event
= malloc(bufsz
);
234 pr_debug("malloc failed\n");
238 memset(event
, 0xff, bufsz
);
239 event
->header
.type
= PERF_RECORD_SAMPLE
;
240 event
->header
.misc
= 0;
241 event
->header
.size
= sz
;
243 err
= perf_event__synthesize_sample(event
, sample_type
, read_format
,
246 pr_debug("%s failed for sample_type %#"PRIx64
", error %d\n",
247 "perf_event__synthesize_sample", sample_type
, err
);
251 /* The data does not contain 0xff so we use that to check the size */
252 for (i
= bufsz
; i
> 0; i
--) {
253 if (*(i
- 1 + (u8
*)event
) != 0xff)
257 pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
262 evsel
.sample_size
= __perf_evsel__sample_size(sample_type
);
264 err
= perf_evsel__parse_sample(&evsel
, event
, &sample_out
);
266 pr_debug("%s failed for sample_type %#"PRIx64
", error %d\n",
267 "perf_evsel__parse_sample", sample_type
, err
);
271 if (!samples_same(&sample
, &sample_out
, sample_type
, read_format
)) {
272 pr_debug("parsing failed for sample_type %#"PRIx64
"\n",
280 if (ret
&& read_format
)
281 pr_debug("read_format %#"PRIx64
"\n", read_format
);
286 * test__sample_parsing - test sample parsing.
288 * This function implements a test that synthesizes a sample event, parses it
289 * and then checks that the parsed sample matches the original sample. The test
290 * checks sample format bits separately and together. If the test passes %0 is
291 * returned, otherwise %-1 is returned.
293 int test__sample_parsing(void)
295 const u64 rf
[] = {4, 5, 6, 7, 12, 13, 14, 15};
302 * Fail the test if it has not been updated when new sample format bits
303 * were added. Please actually update the test rather than just change
304 * the condition below.
306 if (PERF_SAMPLE_MAX
> PERF_SAMPLE_REGS_INTR
<< 1) {
307 pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
311 /* Test each sample format bit separately */
312 for (sample_type
= 1; sample_type
!= PERF_SAMPLE_MAX
;
314 /* Test read_format variations */
315 if (sample_type
== PERF_SAMPLE_READ
) {
316 for (i
= 0; i
< ARRAY_SIZE(rf
); i
++) {
317 err
= do_test(sample_type
, 0, rf
[i
]);
325 if (sample_type
== PERF_SAMPLE_REGS_USER
)
326 sample_regs
= 0x3fff;
328 if (sample_type
== PERF_SAMPLE_REGS_INTR
)
329 sample_regs
= 0xff0fff;
331 err
= do_test(sample_type
, sample_regs
, 0);
336 /* Test all sample format bits together */
337 sample_type
= PERF_SAMPLE_MAX
- 1;
338 sample_regs
= 0x3fff; /* shared yb intr and user regs */
339 for (i
= 0; i
< ARRAY_SIZE(rf
); i
++) {
340 err
= do_test(sample_type
, sample_regs
, rf
[i
]);