accel/qaic: Add AIC200 support
[drm/drm-misc.git] / tools / perf / arch / x86 / annotate / instructions.c
blobae94b1f0b9cce1d3c0e4617abef106309b484e6c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * x86 instruction nmemonic table to parse disasm lines for annotate.
4 * This table is searched twice - one for exact match and another for
5 * match without a size suffix (b, w, l, q) in case of AT&T syntax.
7 * So this table should not have entries with the suffix unless it's
8 * a complete different instruction than ones without the suffix.
9 */
10 static struct ins x86__instructions[] = {
11 { .name = "adc", .ops = &mov_ops, },
12 { .name = "add", .ops = &mov_ops, },
13 { .name = "addsd", .ops = &mov_ops, },
14 { .name = "and", .ops = &mov_ops, },
15 { .name = "andpd", .ops = &mov_ops, },
16 { .name = "andps", .ops = &mov_ops, },
17 { .name = "bsr", .ops = &mov_ops, },
18 { .name = "bt", .ops = &mov_ops, },
19 { .name = "btr", .ops = &mov_ops, },
20 { .name = "bts", .ops = &mov_ops, },
21 { .name = "call", .ops = &call_ops, },
22 { .name = "cmovbe", .ops = &mov_ops, },
23 { .name = "cmove", .ops = &mov_ops, },
24 { .name = "cmovae", .ops = &mov_ops, },
25 { .name = "cmp", .ops = &mov_ops, },
26 { .name = "cmpxch", .ops = &mov_ops, },
27 { .name = "cmpxchg", .ops = &mov_ops, },
28 { .name = "cs", .ops = &mov_ops, },
29 { .name = "dec", .ops = &dec_ops, },
30 { .name = "divsd", .ops = &mov_ops, },
31 { .name = "divss", .ops = &mov_ops, },
32 { .name = "gs", .ops = &mov_ops, },
33 { .name = "imul", .ops = &mov_ops, },
34 { .name = "inc", .ops = &dec_ops, },
35 { .name = "ja", .ops = &jump_ops, },
36 { .name = "jae", .ops = &jump_ops, },
37 { .name = "jb", .ops = &jump_ops, },
38 { .name = "jbe", .ops = &jump_ops, },
39 { .name = "jc", .ops = &jump_ops, },
40 { .name = "jcxz", .ops = &jump_ops, },
41 { .name = "je", .ops = &jump_ops, },
42 { .name = "jecxz", .ops = &jump_ops, },
43 { .name = "jg", .ops = &jump_ops, },
44 { .name = "jge", .ops = &jump_ops, },
45 { .name = "jl", .ops = &jump_ops, },
46 { .name = "jle", .ops = &jump_ops, },
47 { .name = "jmp", .ops = &jump_ops, },
48 { .name = "jna", .ops = &jump_ops, },
49 { .name = "jnae", .ops = &jump_ops, },
50 { .name = "jnb", .ops = &jump_ops, },
51 { .name = "jnbe", .ops = &jump_ops, },
52 { .name = "jnc", .ops = &jump_ops, },
53 { .name = "jne", .ops = &jump_ops, },
54 { .name = "jng", .ops = &jump_ops, },
55 { .name = "jnge", .ops = &jump_ops, },
56 { .name = "jnl", .ops = &jump_ops, },
57 { .name = "jnle", .ops = &jump_ops, },
58 { .name = "jno", .ops = &jump_ops, },
59 { .name = "jnp", .ops = &jump_ops, },
60 { .name = "jns", .ops = &jump_ops, },
61 { .name = "jnz", .ops = &jump_ops, },
62 { .name = "jo", .ops = &jump_ops, },
63 { .name = "jp", .ops = &jump_ops, },
64 { .name = "jpe", .ops = &jump_ops, },
65 { .name = "jpo", .ops = &jump_ops, },
66 { .name = "jrcxz", .ops = &jump_ops, },
67 { .name = "js", .ops = &jump_ops, },
68 { .name = "jz", .ops = &jump_ops, },
69 { .name = "lea", .ops = &mov_ops, },
70 { .name = "lock", .ops = &lock_ops, },
71 { .name = "mov", .ops = &mov_ops, },
72 { .name = "movapd", .ops = &mov_ops, },
73 { .name = "movaps", .ops = &mov_ops, },
74 { .name = "movdqa", .ops = &mov_ops, },
75 { .name = "movdqu", .ops = &mov_ops, },
76 { .name = "movsd", .ops = &mov_ops, },
77 { .name = "movss", .ops = &mov_ops, },
78 { .name = "movsb", .ops = &mov_ops, },
79 { .name = "movsw", .ops = &mov_ops, },
80 { .name = "movsl", .ops = &mov_ops, },
81 { .name = "movupd", .ops = &mov_ops, },
82 { .name = "movups", .ops = &mov_ops, },
83 { .name = "movzb", .ops = &mov_ops, },
84 { .name = "movzw", .ops = &mov_ops, },
85 { .name = "movzl", .ops = &mov_ops, },
86 { .name = "mulsd", .ops = &mov_ops, },
87 { .name = "mulss", .ops = &mov_ops, },
88 { .name = "nop", .ops = &nop_ops, },
89 { .name = "or", .ops = &mov_ops, },
90 { .name = "orps", .ops = &mov_ops, },
91 { .name = "pand", .ops = &mov_ops, },
92 { .name = "paddq", .ops = &mov_ops, },
93 { .name = "pcmpeqb", .ops = &mov_ops, },
94 { .name = "por", .ops = &mov_ops, },
95 { .name = "rcl", .ops = &mov_ops, },
96 { .name = "ret", .ops = &ret_ops, },
97 { .name = "sbb", .ops = &mov_ops, },
98 { .name = "sete", .ops = &mov_ops, },
99 { .name = "sub", .ops = &mov_ops, },
100 { .name = "subsd", .ops = &mov_ops, },
101 { .name = "test", .ops = &mov_ops, },
102 { .name = "tzcnt", .ops = &mov_ops, },
103 { .name = "ucomisd", .ops = &mov_ops, },
104 { .name = "ucomiss", .ops = &mov_ops, },
105 { .name = "vaddsd", .ops = &mov_ops, },
106 { .name = "vandpd", .ops = &mov_ops, },
107 { .name = "vmovdqa", .ops = &mov_ops, },
108 { .name = "vmovq", .ops = &mov_ops, },
109 { .name = "vmovsd", .ops = &mov_ops, },
110 { .name = "vmulsd", .ops = &mov_ops, },
111 { .name = "vorpd", .ops = &mov_ops, },
112 { .name = "vsubsd", .ops = &mov_ops, },
113 { .name = "vucomisd", .ops = &mov_ops, },
114 { .name = "xadd", .ops = &mov_ops, },
115 { .name = "xbegin", .ops = &jump_ops, },
116 { .name = "xchg", .ops = &mov_ops, },
117 { .name = "xor", .ops = &mov_ops, },
118 { .name = "xorpd", .ops = &mov_ops, },
119 { .name = "xorps", .ops = &mov_ops, },
122 static bool amd__ins_is_fused(struct arch *arch, const char *ins1,
123 const char *ins2)
125 if (strstr(ins2, "jmp"))
126 return false;
128 /* Family >= 15h supports cmp/test + branch fusion */
129 if (arch->family >= 0x15 && (strstarts(ins1, "test") ||
130 (strstarts(ins1, "cmp") && !strstr(ins1, "xchg")))) {
131 return true;
134 /* Family >= 19h supports some ALU + branch fusion */
135 if (arch->family >= 0x19 && (strstarts(ins1, "add") ||
136 strstarts(ins1, "sub") || strstarts(ins1, "and") ||
137 strstarts(ins1, "inc") || strstarts(ins1, "dec") ||
138 strstarts(ins1, "or") || strstarts(ins1, "xor"))) {
139 return true;
142 return false;
145 static bool intel__ins_is_fused(struct arch *arch, const char *ins1,
146 const char *ins2)
148 if (arch->family != 6 || arch->model < 0x1e || strstr(ins2, "jmp"))
149 return false;
151 if (arch->model == 0x1e) {
152 /* Nehalem */
153 if ((strstr(ins1, "cmp") && !strstr(ins1, "xchg")) ||
154 strstr(ins1, "test")) {
155 return true;
157 } else {
158 /* Newer platform */
159 if ((strstr(ins1, "cmp") && !strstr(ins1, "xchg")) ||
160 strstr(ins1, "test") ||
161 strstr(ins1, "add") ||
162 strstr(ins1, "sub") ||
163 strstr(ins1, "and") ||
164 strstr(ins1, "inc") ||
165 strstr(ins1, "dec")) {
166 return true;
170 return false;
173 static int x86__cpuid_parse(struct arch *arch, char *cpuid)
175 unsigned int family, model, stepping;
176 int ret;
179 * cpuid = "GenuineIntel,family,model,stepping"
181 ret = sscanf(cpuid, "%*[^,],%u,%u,%u", &family, &model, &stepping);
182 if (ret == 3) {
183 arch->family = family;
184 arch->model = model;
185 arch->ins_is_fused = strstarts(cpuid, "AuthenticAMD") ?
186 amd__ins_is_fused :
187 intel__ins_is_fused;
188 return 0;
191 return -1;
194 static int x86__annotate_init(struct arch *arch, char *cpuid)
196 int err = 0;
198 if (arch->initialized)
199 return 0;
201 if (cpuid) {
202 if (x86__cpuid_parse(arch, cpuid))
203 err = SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING;
205 arch->e_machine = EM_X86_64;
206 arch->e_flags = 0;
207 arch->initialized = true;
208 return err;
211 #ifdef HAVE_LIBDW_SUPPORT
212 static void update_insn_state_x86(struct type_state *state,
213 struct data_loc_info *dloc, Dwarf_Die *cu_die,
214 struct disasm_line *dl)
216 struct annotated_insn_loc loc;
217 struct annotated_op_loc *src = &loc.ops[INSN_OP_SOURCE];
218 struct annotated_op_loc *dst = &loc.ops[INSN_OP_TARGET];
219 struct type_state_reg *tsr;
220 Dwarf_Die type_die;
221 u32 insn_offset = dl->al.offset;
222 int fbreg = dloc->fbreg;
223 int fboff = 0;
225 if (annotate_get_insn_location(dloc->arch, dl, &loc) < 0)
226 return;
228 if (ins__is_call(&dl->ins)) {
229 struct symbol *func = dl->ops.target.sym;
231 if (func == NULL)
232 return;
234 /* __fentry__ will preserve all registers */
235 if (!strcmp(func->name, "__fentry__"))
236 return;
238 pr_debug_dtp("call [%x] %s\n", insn_offset, func->name);
240 /* Otherwise invalidate caller-saved registers after call */
241 for (unsigned i = 0; i < ARRAY_SIZE(state->regs); i++) {
242 if (state->regs[i].caller_saved)
243 state->regs[i].ok = false;
246 /* Update register with the return type (if any) */
247 if (die_find_func_rettype(cu_die, func->name, &type_die)) {
248 tsr = &state->regs[state->ret_reg];
249 tsr->type = type_die;
250 tsr->kind = TSR_KIND_TYPE;
251 tsr->ok = true;
253 pr_debug_dtp("call [%x] return -> reg%d",
254 insn_offset, state->ret_reg);
255 pr_debug_type_name(&type_die, tsr->kind);
257 return;
260 if (!strncmp(dl->ins.name, "add", 3)) {
261 u64 imm_value = -1ULL;
262 int offset;
263 const char *var_name = NULL;
264 struct map_symbol *ms = dloc->ms;
265 u64 ip = ms->sym->start + dl->al.offset;
267 if (!has_reg_type(state, dst->reg1))
268 return;
270 tsr = &state->regs[dst->reg1];
271 tsr->copied_from = -1;
273 if (src->imm)
274 imm_value = src->offset;
275 else if (has_reg_type(state, src->reg1) &&
276 state->regs[src->reg1].kind == TSR_KIND_CONST)
277 imm_value = state->regs[src->reg1].imm_value;
278 else if (src->reg1 == DWARF_REG_PC) {
279 u64 var_addr = annotate_calc_pcrel(dloc->ms, ip,
280 src->offset, dl);
282 if (get_global_var_info(dloc, var_addr,
283 &var_name, &offset) &&
284 !strcmp(var_name, "this_cpu_off") &&
285 tsr->kind == TSR_KIND_CONST) {
286 tsr->kind = TSR_KIND_PERCPU_BASE;
287 tsr->ok = true;
288 imm_value = tsr->imm_value;
291 else
292 return;
294 if (tsr->kind != TSR_KIND_PERCPU_BASE)
295 return;
297 if (get_global_var_type(cu_die, dloc, ip, imm_value, &offset,
298 &type_die) && offset == 0) {
300 * This is not a pointer type, but it should be treated
301 * as a pointer.
303 tsr->type = type_die;
304 tsr->kind = TSR_KIND_POINTER;
305 tsr->ok = true;
307 pr_debug_dtp("add [%x] percpu %#"PRIx64" -> reg%d",
308 insn_offset, imm_value, dst->reg1);
309 pr_debug_type_name(&tsr->type, tsr->kind);
311 return;
314 if (strncmp(dl->ins.name, "mov", 3))
315 return;
317 if (dloc->fb_cfa) {
318 u64 ip = dloc->ms->sym->start + dl->al.offset;
319 u64 pc = map__rip_2objdump(dloc->ms->map, ip);
321 if (die_get_cfa(dloc->di->dbg, pc, &fbreg, &fboff) < 0)
322 fbreg = -1;
325 /* Case 1. register to register or segment:offset to register transfers */
326 if (!src->mem_ref && !dst->mem_ref) {
327 if (!has_reg_type(state, dst->reg1))
328 return;
330 tsr = &state->regs[dst->reg1];
331 tsr->copied_from = -1;
333 if (dso__kernel(map__dso(dloc->ms->map)) &&
334 src->segment == INSN_SEG_X86_GS && src->imm) {
335 u64 ip = dloc->ms->sym->start + dl->al.offset;
336 u64 var_addr;
337 int offset;
340 * In kernel, %gs points to a per-cpu region for the
341 * current CPU. Access with a constant offset should
342 * be treated as a global variable access.
344 var_addr = src->offset;
346 if (var_addr == 40) {
347 tsr->kind = TSR_KIND_CANARY;
348 tsr->ok = true;
350 pr_debug_dtp("mov [%x] stack canary -> reg%d\n",
351 insn_offset, dst->reg1);
352 return;
355 if (!get_global_var_type(cu_die, dloc, ip, var_addr,
356 &offset, &type_die) ||
357 !die_get_member_type(&type_die, offset, &type_die)) {
358 tsr->ok = false;
359 return;
362 tsr->type = type_die;
363 tsr->kind = TSR_KIND_TYPE;
364 tsr->ok = true;
366 pr_debug_dtp("mov [%x] this-cpu addr=%#"PRIx64" -> reg%d",
367 insn_offset, var_addr, dst->reg1);
368 pr_debug_type_name(&tsr->type, tsr->kind);
369 return;
372 if (src->imm) {
373 tsr->kind = TSR_KIND_CONST;
374 tsr->imm_value = src->offset;
375 tsr->ok = true;
377 pr_debug_dtp("mov [%x] imm=%#x -> reg%d\n",
378 insn_offset, tsr->imm_value, dst->reg1);
379 return;
382 if (!has_reg_type(state, src->reg1) ||
383 !state->regs[src->reg1].ok) {
384 tsr->ok = false;
385 return;
388 tsr->type = state->regs[src->reg1].type;
389 tsr->kind = state->regs[src->reg1].kind;
390 tsr->imm_value = state->regs[src->reg1].imm_value;
391 tsr->ok = true;
393 /* To copy back the variable type later (hopefully) */
394 if (tsr->kind == TSR_KIND_TYPE)
395 tsr->copied_from = src->reg1;
397 pr_debug_dtp("mov [%x] reg%d -> reg%d",
398 insn_offset, src->reg1, dst->reg1);
399 pr_debug_type_name(&tsr->type, tsr->kind);
401 /* Case 2. memory to register transers */
402 if (src->mem_ref && !dst->mem_ref) {
403 int sreg = src->reg1;
405 if (!has_reg_type(state, dst->reg1))
406 return;
408 tsr = &state->regs[dst->reg1];
409 tsr->copied_from = -1;
411 retry:
412 /* Check stack variables with offset */
413 if (sreg == fbreg) {
414 struct type_state_stack *stack;
415 int offset = src->offset - fboff;
417 stack = find_stack_state(state, offset);
418 if (stack == NULL) {
419 tsr->ok = false;
420 return;
421 } else if (!stack->compound) {
422 tsr->type = stack->type;
423 tsr->kind = stack->kind;
424 tsr->ok = true;
425 } else if (die_get_member_type(&stack->type,
426 offset - stack->offset,
427 &type_die)) {
428 tsr->type = type_die;
429 tsr->kind = TSR_KIND_TYPE;
430 tsr->ok = true;
431 } else {
432 tsr->ok = false;
433 return;
436 pr_debug_dtp("mov [%x] -%#x(stack) -> reg%d",
437 insn_offset, -offset, dst->reg1);
438 pr_debug_type_name(&tsr->type, tsr->kind);
440 /* And then dereference the pointer if it has one */
441 else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
442 state->regs[sreg].kind == TSR_KIND_TYPE &&
443 die_deref_ptr_type(&state->regs[sreg].type,
444 src->offset, &type_die)) {
445 tsr->type = type_die;
446 tsr->kind = TSR_KIND_TYPE;
447 tsr->ok = true;
449 pr_debug_dtp("mov [%x] %#x(reg%d) -> reg%d",
450 insn_offset, src->offset, sreg, dst->reg1);
451 pr_debug_type_name(&tsr->type, tsr->kind);
453 /* Or check if it's a global variable */
454 else if (sreg == DWARF_REG_PC) {
455 struct map_symbol *ms = dloc->ms;
456 u64 ip = ms->sym->start + dl->al.offset;
457 u64 addr;
458 int offset;
460 addr = annotate_calc_pcrel(ms, ip, src->offset, dl);
462 if (!get_global_var_type(cu_die, dloc, ip, addr, &offset,
463 &type_die) ||
464 !die_get_member_type(&type_die, offset, &type_die)) {
465 tsr->ok = false;
466 return;
469 tsr->type = type_die;
470 tsr->kind = TSR_KIND_TYPE;
471 tsr->ok = true;
473 pr_debug_dtp("mov [%x] global addr=%"PRIx64" -> reg%d",
474 insn_offset, addr, dst->reg1);
475 pr_debug_type_name(&type_die, tsr->kind);
477 /* And check percpu access with base register */
478 else if (has_reg_type(state, sreg) &&
479 state->regs[sreg].kind == TSR_KIND_PERCPU_BASE) {
480 u64 ip = dloc->ms->sym->start + dl->al.offset;
481 u64 var_addr = src->offset;
482 int offset;
484 if (src->multi_regs) {
485 int reg2 = (sreg == src->reg1) ? src->reg2 : src->reg1;
487 if (has_reg_type(state, reg2) && state->regs[reg2].ok &&
488 state->regs[reg2].kind == TSR_KIND_CONST)
489 var_addr += state->regs[reg2].imm_value;
493 * In kernel, %gs points to a per-cpu region for the
494 * current CPU. Access with a constant offset should
495 * be treated as a global variable access.
497 if (get_global_var_type(cu_die, dloc, ip, var_addr,
498 &offset, &type_die) &&
499 die_get_member_type(&type_die, offset, &type_die)) {
500 tsr->type = type_die;
501 tsr->kind = TSR_KIND_TYPE;
502 tsr->ok = true;
504 if (src->multi_regs) {
505 pr_debug_dtp("mov [%x] percpu %#x(reg%d,reg%d) -> reg%d",
506 insn_offset, src->offset, src->reg1,
507 src->reg2, dst->reg1);
508 } else {
509 pr_debug_dtp("mov [%x] percpu %#x(reg%d) -> reg%d",
510 insn_offset, src->offset, sreg, dst->reg1);
512 pr_debug_type_name(&tsr->type, tsr->kind);
513 } else {
514 tsr->ok = false;
517 /* And then dereference the calculated pointer if it has one */
518 else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
519 state->regs[sreg].kind == TSR_KIND_POINTER &&
520 die_get_member_type(&state->regs[sreg].type,
521 src->offset, &type_die)) {
522 tsr->type = type_die;
523 tsr->kind = TSR_KIND_TYPE;
524 tsr->ok = true;
526 pr_debug_dtp("mov [%x] pointer %#x(reg%d) -> reg%d",
527 insn_offset, src->offset, sreg, dst->reg1);
528 pr_debug_type_name(&tsr->type, tsr->kind);
530 /* Or try another register if any */
531 else if (src->multi_regs && sreg == src->reg1 &&
532 src->reg1 != src->reg2) {
533 sreg = src->reg2;
534 goto retry;
536 else {
537 int offset;
538 const char *var_name = NULL;
540 /* it might be per-cpu variable (in kernel) access */
541 if (src->offset < 0) {
542 if (get_global_var_info(dloc, (s64)src->offset,
543 &var_name, &offset) &&
544 !strcmp(var_name, "__per_cpu_offset")) {
545 tsr->kind = TSR_KIND_PERCPU_BASE;
546 tsr->ok = true;
548 pr_debug_dtp("mov [%x] percpu base reg%d\n",
549 insn_offset, dst->reg1);
550 return;
554 tsr->ok = false;
557 /* Case 3. register to memory transfers */
558 if (!src->mem_ref && dst->mem_ref) {
559 if (!has_reg_type(state, src->reg1) ||
560 !state->regs[src->reg1].ok)
561 return;
563 /* Check stack variables with offset */
564 if (dst->reg1 == fbreg) {
565 struct type_state_stack *stack;
566 int offset = dst->offset - fboff;
568 tsr = &state->regs[src->reg1];
570 stack = find_stack_state(state, offset);
571 if (stack) {
573 * The source register is likely to hold a type
574 * of member if it's a compound type. Do not
575 * update the stack variable type since we can
576 * get the member type later by using the
577 * die_get_member_type().
579 if (!stack->compound)
580 set_stack_state(stack, offset, tsr->kind,
581 &tsr->type);
582 } else {
583 findnew_stack_state(state, offset, tsr->kind,
584 &tsr->type);
587 pr_debug_dtp("mov [%x] reg%d -> -%#x(stack)",
588 insn_offset, src->reg1, -offset);
589 pr_debug_type_name(&tsr->type, tsr->kind);
592 * Ignore other transfers since it'd set a value in a struct
593 * and won't change the type.
596 /* Case 4. memory to memory transfers (not handled for now) */
598 #endif