Merge tag 'powerpc-5.11-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux/fpc-iii.git] / tools / perf / tests / sample-parsing.c
blob2393916f6128a6fb30a71f440fbc5a8c06f59023
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdbool.h>
3 #include <inttypes.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <linux/bitops.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
10 #include "map_symbol.h"
11 #include "branch.h"
12 #include "event.h"
13 #include "evsel.h"
14 #include "debug.h"
15 #include "util/synthetic-events.h"
17 #include "tests.h"
19 #define COMP(m) do { \
20 if (s1->m != s2->m) { \
21 pr_debug("Samples differ at '"#m"'\n"); \
22 return false; \
23 } \
24 } while (0)
26 #define MCOMP(m) do { \
27 if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) { \
28 pr_debug("Samples differ at '"#m"'\n"); \
29 return false; \
30 } \
31 } while (0)
33 static bool samples_same(const struct perf_sample *s1,
34 const struct perf_sample *s2,
35 u64 type, u64 read_format)
37 size_t i;
39 if (type & PERF_SAMPLE_IDENTIFIER)
40 COMP(id);
42 if (type & PERF_SAMPLE_IP)
43 COMP(ip);
45 if (type & PERF_SAMPLE_TID) {
46 COMP(pid);
47 COMP(tid);
50 if (type & PERF_SAMPLE_TIME)
51 COMP(time);
53 if (type & PERF_SAMPLE_ADDR)
54 COMP(addr);
56 if (type & PERF_SAMPLE_ID)
57 COMP(id);
59 if (type & PERF_SAMPLE_STREAM_ID)
60 COMP(stream_id);
62 if (type & PERF_SAMPLE_CPU)
63 COMP(cpu);
65 if (type & PERF_SAMPLE_PERIOD)
66 COMP(period);
68 if (type & PERF_SAMPLE_READ) {
69 if (read_format & PERF_FORMAT_GROUP)
70 COMP(read.group.nr);
71 else
72 COMP(read.one.value);
73 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
74 COMP(read.time_enabled);
75 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
76 COMP(read.time_running);
77 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
78 if (read_format & PERF_FORMAT_GROUP) {
79 for (i = 0; i < s1->read.group.nr; i++)
80 MCOMP(read.group.values[i]);
81 } else {
82 COMP(read.one.id);
86 if (type & PERF_SAMPLE_CALLCHAIN) {
87 COMP(callchain->nr);
88 for (i = 0; i < s1->callchain->nr; i++)
89 COMP(callchain->ips[i]);
92 if (type & PERF_SAMPLE_RAW) {
93 COMP(raw_size);
94 if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
95 pr_debug("Samples differ at 'raw_data'\n");
96 return false;
100 if (type & PERF_SAMPLE_BRANCH_STACK) {
101 COMP(branch_stack->nr);
102 COMP(branch_stack->hw_idx);
103 for (i = 0; i < s1->branch_stack->nr; i++)
104 MCOMP(branch_stack->entries[i]);
107 if (type & PERF_SAMPLE_REGS_USER) {
108 size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
110 COMP(user_regs.mask);
111 COMP(user_regs.abi);
112 if (s1->user_regs.abi &&
113 (!s1->user_regs.regs || !s2->user_regs.regs ||
114 memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
115 pr_debug("Samples differ at 'user_regs'\n");
116 return false;
120 if (type & PERF_SAMPLE_STACK_USER) {
121 COMP(user_stack.size);
122 if (memcmp(s1->user_stack.data, s2->user_stack.data,
123 s1->user_stack.size)) {
124 pr_debug("Samples differ at 'user_stack'\n");
125 return false;
129 if (type & PERF_SAMPLE_WEIGHT)
130 COMP(weight);
132 if (type & PERF_SAMPLE_DATA_SRC)
133 COMP(data_src);
135 if (type & PERF_SAMPLE_TRANSACTION)
136 COMP(transaction);
138 if (type & PERF_SAMPLE_REGS_INTR) {
139 size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
141 COMP(intr_regs.mask);
142 COMP(intr_regs.abi);
143 if (s1->intr_regs.abi &&
144 (!s1->intr_regs.regs || !s2->intr_regs.regs ||
145 memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
146 pr_debug("Samples differ at 'intr_regs'\n");
147 return false;
151 if (type & PERF_SAMPLE_PHYS_ADDR)
152 COMP(phys_addr);
154 if (type & PERF_SAMPLE_CGROUP)
155 COMP(cgroup);
157 if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
158 COMP(data_page_size);
160 if (type & PERF_SAMPLE_AUX) {
161 COMP(aux_sample.size);
162 if (memcmp(s1->aux_sample.data, s2->aux_sample.data,
163 s1->aux_sample.size)) {
164 pr_debug("Samples differ at 'aux_sample'\n");
165 return false;
169 return true;
172 static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
174 struct evsel evsel = {
175 .needs_swap = false,
176 .core = {
177 . attr = {
178 .sample_type = sample_type,
179 .read_format = read_format,
183 union perf_event *event;
184 union {
185 struct ip_callchain callchain;
186 u64 data[64];
187 } callchain = {
188 /* 3 ips */
189 .data = {3, 201, 202, 203},
191 union {
192 struct branch_stack branch_stack;
193 u64 data[64];
194 } branch_stack = {
195 /* 1 branch_entry */
196 .data = {1, -1ULL, 211, 212, 213},
198 u64 regs[64];
199 const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
200 const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
201 const u64 aux_data[] = {0xa55a, 0, 0xeeddee, 0x0282028202820282};
202 struct perf_sample sample = {
203 .ip = 101,
204 .pid = 102,
205 .tid = 103,
206 .time = 104,
207 .addr = 105,
208 .id = 106,
209 .stream_id = 107,
210 .period = 108,
211 .weight = 109,
212 .cpu = 110,
213 .raw_size = sizeof(raw_data),
214 .data_src = 111,
215 .transaction = 112,
216 .raw_data = (void *)raw_data,
217 .callchain = &callchain.callchain,
218 .no_hw_idx = false,
219 .branch_stack = &branch_stack.branch_stack,
220 .user_regs = {
221 .abi = PERF_SAMPLE_REGS_ABI_64,
222 .mask = sample_regs,
223 .regs = regs,
225 .user_stack = {
226 .size = sizeof(data),
227 .data = (void *)data,
229 .read = {
230 .time_enabled = 0x030a59d664fca7deULL,
231 .time_running = 0x011b6ae553eb98edULL,
233 .intr_regs = {
234 .abi = PERF_SAMPLE_REGS_ABI_64,
235 .mask = sample_regs,
236 .regs = regs,
238 .phys_addr = 113,
239 .cgroup = 114,
240 .data_page_size = 115,
241 .aux_sample = {
242 .size = sizeof(aux_data),
243 .data = (void *)aux_data,
246 struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
247 struct perf_sample sample_out;
248 size_t i, sz, bufsz;
249 int err, ret = -1;
251 if (sample_type & PERF_SAMPLE_REGS_USER)
252 evsel.core.attr.sample_regs_user = sample_regs;
254 if (sample_type & PERF_SAMPLE_REGS_INTR)
255 evsel.core.attr.sample_regs_intr = sample_regs;
257 if (sample_type & PERF_SAMPLE_BRANCH_STACK)
258 evsel.core.attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
260 for (i = 0; i < sizeof(regs); i++)
261 *(i + (u8 *)regs) = i & 0xfe;
263 if (read_format & PERF_FORMAT_GROUP) {
264 sample.read.group.nr = 4;
265 sample.read.group.values = values;
266 } else {
267 sample.read.one.value = 0x08789faeb786aa87ULL;
268 sample.read.one.id = 99;
271 sz = perf_event__sample_event_size(&sample, sample_type, read_format);
272 bufsz = sz + 4096; /* Add a bit for overrun checking */
273 event = malloc(bufsz);
274 if (!event) {
275 pr_debug("malloc failed\n");
276 return -1;
279 memset(event, 0xff, bufsz);
280 event->header.type = PERF_RECORD_SAMPLE;
281 event->header.misc = 0;
282 event->header.size = sz;
284 err = perf_event__synthesize_sample(event, sample_type, read_format,
285 &sample);
286 if (err) {
287 pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
288 "perf_event__synthesize_sample", sample_type, err);
289 goto out_free;
292 /* The data does not contain 0xff so we use that to check the size */
293 for (i = bufsz; i > 0; i--) {
294 if (*(i - 1 + (u8 *)event) != 0xff)
295 break;
297 if (i != sz) {
298 pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
299 i, sz);
300 goto out_free;
303 evsel.sample_size = __evsel__sample_size(sample_type);
305 err = evsel__parse_sample(&evsel, event, &sample_out);
306 if (err) {
307 pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
308 "evsel__parse_sample", sample_type, err);
309 goto out_free;
312 if (!samples_same(&sample, &sample_out, sample_type, read_format)) {
313 pr_debug("parsing failed for sample_type %#"PRIx64"\n",
314 sample_type);
315 goto out_free;
318 ret = 0;
319 out_free:
320 free(event);
321 if (ret && read_format)
322 pr_debug("read_format %#"PRIx64"\n", read_format);
323 return ret;
327 * test__sample_parsing - test sample parsing.
329 * This function implements a test that synthesizes a sample event, parses it
330 * and then checks that the parsed sample matches the original sample. The test
331 * checks sample format bits separately and together. If the test passes %0 is
332 * returned, otherwise %-1 is returned.
334 int test__sample_parsing(struct test *test __maybe_unused, int subtest __maybe_unused)
336 const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
337 u64 sample_type;
338 u64 sample_regs;
339 size_t i;
340 int err;
343 * Fail the test if it has not been updated when new sample format bits
344 * were added. Please actually update the test rather than just change
345 * the condition below.
347 if (PERF_SAMPLE_MAX > PERF_SAMPLE_CODE_PAGE_SIZE << 1) {
348 pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
349 return -1;
352 /* Test each sample format bit separately */
353 for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
354 sample_type <<= 1) {
355 /* Test read_format variations */
356 if (sample_type == PERF_SAMPLE_READ) {
357 for (i = 0; i < ARRAY_SIZE(rf); i++) {
358 err = do_test(sample_type, 0, rf[i]);
359 if (err)
360 return err;
362 continue;
364 sample_regs = 0;
366 if (sample_type == PERF_SAMPLE_REGS_USER)
367 sample_regs = 0x3fff;
369 if (sample_type == PERF_SAMPLE_REGS_INTR)
370 sample_regs = 0xff0fff;
372 err = do_test(sample_type, sample_regs, 0);
373 if (err)
374 return err;
377 /* Test all sample format bits together */
378 sample_type = PERF_SAMPLE_MAX - 1;
379 sample_regs = 0x3fff; /* shared yb intr and user regs */
380 for (i = 0; i < ARRAY_SIZE(rf); i++) {
381 err = do_test(sample_type, sample_regs, rf[i]);
382 if (err)
383 return err;
386 return 0;